Page MenuHomeFreeBSD

D12143.id32441.diff
No OneTemporary

D12143.id32441.diff

Index: sys/conf/files
===================================================================
--- sys/conf/files
+++ sys/conf/files
@@ -1622,12 +1622,12 @@
dev/e1000/e1000_osdep.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/et/if_et.c optional et
-dev/ena/ena.c optional ena \
+dev/ena/ena.c optional ena compat_linux \
compile-with "${NORMAL_C} -I$S/contrib"
-dev/ena/ena_sysctl.c optional ena \
+dev/ena/ena_sysctl.c optional ena compat_linux \
compile-with "${NORMAL_C} -I$S/contrib"
-contrib/ena-com/ena_com.c optional ena
-contrib/ena-com/ena_eth_com.c optional ena
+contrib/ena-com/ena_com.c optional ena compat_linux
+contrib/ena-com/ena_eth_com.c optional ena compat_linux
dev/ep/if_ep.c optional ep
dev/ep/if_ep_isa.c optional ep isa
dev/ep/if_ep_pccard.c optional ep pccard
Index: sys/contrib/ena-com/ena_com.h
===================================================================
--- sys/contrib/ena-com/ena_com.h
+++ sys/contrib/ena-com/ena_com.h
@@ -133,6 +133,15 @@
u16 l4_hdr_len; /* In words */
};
+struct ena_com_llq_info {
+ bool inline_header;
+ u16 desc_stride_ctrl;
+
+ u16 desc_list_entry_size;
+ u16 descs_num_before_header;
+ u16 descs_per_entry;
+};
+
struct ena_com_io_cq {
struct ena_com_io_desc_addr cdesc_addr;
void *bus;
@@ -171,6 +180,20 @@
} ____cacheline_aligned;
+struct ena_com_io_bounce_buffer_control {
+ u8 *base_buffer;
+ u16 next_to_use;
+ u16 buffer_size;
+ u16 buffers_num; /* Must be a power of 2 */
+};
+
+/* This struct is to keep tracking the current location of the next llq entry */
+struct ena_com_llq_pkt_ctrl {
+ u8 *curr_bounce_buf;
+ u16 idx;
+ u16 descs_left_in_line;
+};
+
struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
void *bus;
@@ -183,6 +206,9 @@
u32 msix_vector;
struct ena_com_tx_meta cached_tx_meta;
+ struct ena_com_llq_info llq_info;
+ struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
+ struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
u16 q_depth;
u16 qid;
@@ -190,6 +216,7 @@
u16 idx;
u16 tail;
u16 next_to_comp;
+ u16 llq_last_copy_tail;
u32 tx_max_header_size;
u8 phase;
u8 desc_entry_size;
@@ -321,6 +348,7 @@
void __iomem *mem_bar;
void *dmadev;
void *bus;
+
enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size;
u16 stats_func; /* Selected function for extended statistic dump */
@@ -337,6 +365,8 @@
u16 intr_delay_resolution;
u32 intr_moder_tx_interval;
struct ena_intr_moder_entry *intr_moder_tbl;
+
+ struct ena_com_llq_info llq_info;
};
struct ena_com_dev_get_features_ctx {
@@ -345,6 +375,7 @@
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
struct ena_admin_ena_hw_hints hw_hints;
+ struct ena_admin_feature_llq_desc llq;
};
struct ena_com_create_io_ctx {
@@ -426,10 +457,12 @@
/* ena_com_dev_reset - Perform device FLR to the device.
* @ena_dev: ENA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
*
* @return - 0 on success, negative value on failure.
*/
-int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason);
/* ena_com_create_io_queue - Create io queue.
* @ena_dev: ENA communication layer struct
@@ -939,6 +972,15 @@
enum ena_intr_moder_level level,
struct ena_intr_moder_entry *entry);
+
+/* ena_com_config_dev_mode - Configure the placement policy of the device.
+ * @ena_dev: ENA communication layer struct
+ * @llq: LLQ feature descriptor, retrieve via ena_com_get_dev_attr_feat.
+ *
+ */
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq);
+
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
@@ -1048,6 +1090,30 @@
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
+static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
+{
+ u16 size, buffers_num;
+ u8 *buf;
+
+ size = bounce_buf_ctrl->buffer_size;
+ buffers_num = bounce_buf_ctrl->buffers_num;
+
+ buf = bounce_buf_ctrl->base_buffer +
+ (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
+
+ prefetch(bounce_buf_ctrl->base_buffer +
+ (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
+
+ return buf;
+}
+
+#ifdef ENA_EXTENDED_STATS
+int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
+ u32 len);
+
+int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
+ u32 funct_queue);
+#endif
#if defined(__cplusplus)
}
#endif /* __cplusplus */
Index: sys/contrib/ena-com/ena_com.c
===================================================================
--- sys/contrib/ena-com/ena_com.c
+++ sys/contrib/ena-com/ena_com.c
@@ -45,6 +45,13 @@
#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
+#ifdef ENA_EXTENDED_STATS
+
+#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
+#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
+#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
+
+#endif /* ENA_EXTENDED_STATS */
#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
| (ENA_COMMON_SPEC_VERSION_MINOR))
@@ -65,6 +72,10 @@
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
+
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -102,7 +113,7 @@
}
ena_addr->mem_addr_low = (u32)addr;
- ena_addr->mem_addr_high = (u64)addr >> 32;
+ ena_addr->mem_addr_high = (u16)((u64)addr >> 32);
return 0;
}
@@ -238,12 +249,9 @@
tail_masked = admin_queue->sq.tail & queue_size_mask;
/* In case of queue FULL */
- cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
- admin_queue->sq.tail,
- admin_queue->sq.head,
- admin_queue->q_depth);
+ ena_trc_dbg("admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(ENA_COM_NO_SPACE);
}
@@ -362,21 +370,43 @@
io_sq->desc_addr.phys_addr,
io_sq->desc_addr.mem_handle);
}
- } else {
+
+ if (!io_sq->desc_addr.virt_addr) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+ }
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Allocate bounce buffers */
+ io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
+ io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+ size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;
+
ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
size,
- io_sq->desc_addr.virt_addr,
+ io_sq->bounce_buf_ctrl.base_buffer,
ctx->numa_node,
dev_node);
- if (!io_sq->desc_addr.virt_addr) {
- io_sq->desc_addr.virt_addr =
- ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ if (!io_sq->bounce_buf_ctrl.base_buffer)
+ io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
+
+ if (!io_sq->bounce_buf_ctrl.base_buffer) {
+ ena_trc_err("bounce buffer memory allocation failed");
+ return ENA_COM_NO_MEM;
}
- }
- if (!io_sq->desc_addr.virt_addr) {
- ena_trc_err("memory allocation failed");
- return ENA_COM_NO_MEM;
+ memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
+
+ /* Initiate the first bounce buffer */
+ io_sq->llq_buf_ctrl.curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, io_sq->llq_info.desc_list_entry_size);
+ io_sq->llq_buf_ctrl.descs_left_in_line =
+ io_sq->llq_info.descs_num_before_header;
}
io_sq->tail = 0;
@@ -507,7 +537,7 @@
case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
return ENA_COM_NO_MEM;
case ENA_ADMIN_UNSUPPORTED_OPCODE:
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
case ENA_ADMIN_BAD_OPCODE:
case ENA_ADMIN_MALFORMED_REQUEST:
case ENA_ADMIN_ILLEGAL_PARAMETER:
@@ -532,7 +562,7 @@
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
if (comp_ctx->status != ENA_CMD_SUBMITTED)
- break;
+ break;
if (ENA_TIME_EXPIRE(timeout)) {
ena_trc_err("Wait for completion (polling) timeout\n");
@@ -567,6 +597,75 @@
return ret;
}
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_desc)
+{
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+
+ memset(llq_info, 0, sizeof(*llq_info));
+
+ switch (llq_desc->header_location_ctrl) {
+ case ENA_ADMIN_INLINE_HEADER:
+ llq_info->inline_header = true;
+ break;
+ case ENA_ADMIN_HEADER_RING:
+ llq_info->inline_header = false;
+ break;
+ default:
+ ena_trc_err("Invalid header location control\n");
+ return -EINVAL;
+ }
+
+ switch (llq_desc->entry_size_ctrl) {
+ case ENA_ADMIN_LIST_ENTRY_SIZE_128B:
+ llq_info->desc_list_entry_size = 128;
+ break;
+ case ENA_ADMIN_LIST_ENTRY_SIZE_192B:
+ llq_info->desc_list_entry_size = 192;
+ break;
+ case ENA_ADMIN_LIST_ENTRY_SIZE_256B:
+ llq_info->desc_list_entry_size = 256;
+ break;
+ default:
+ ena_trc_err("Invalid entry_size_ctrl %d\n",
+ llq_desc->entry_size_ctrl);
+ return -EINVAL;
+ }
+
+ if ((llq_info->desc_list_entry_size & 0x7)) {
+ /* The desc list entry size should be whole multiply of 8
+ * This requirement comes from __iowrite64_copy()
+ */
+ ena_trc_err("illegal entry size %d\n",
+ llq_info->desc_list_entry_size);
+ return -EINVAL;
+ }
+
+ if (llq_info->inline_header) {
+ llq_info->desc_stride_ctrl = llq_desc->descriptors_stride_ctrl;
+ if ((llq_info->desc_stride_ctrl != ENA_ADMIN_SINGLE_DESC_PER_ENTRY) &&
+ (llq_info->desc_stride_ctrl != ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)) {
+ ena_trc_err("Invalid desc_stride_ctrl %d\n",
+ llq_info->desc_stride_ctrl);
+ return -EINVAL;
+ }
+ } else {
+ llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+ }
+
+ if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
+ llq_info->descs_per_entry = llq_info->desc_list_entry_size /
+ sizeof(struct ena_eth_io_tx_desc);
+ else
+ llq_info->descs_per_entry = 1;
+
+ llq_info->descs_num_before_header = llq_desc->desc_num_before_header_ctrl;
+
+ return 0;
+}
+
+
+
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
@@ -614,13 +713,14 @@
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
- u32 mmio_read_reg, timeout, ret;
+ u32 mmio_read_reg, ret, i;
unsigned long flags;
- int i;
+ u32 timeout = mmio_read->reg_read_to;
ENA_MIGHT_SLEEP();
- timeout = mmio_read->reg_read_to ? : ENA_REG_READ_TIMEOUT;
+ if (timeout == 0)
+ timeout = ENA_REG_READ_TIMEOUT;
/* If readless is disabled, perform regular read */
if (!mmio_read->readless_supported)
@@ -745,17 +845,20 @@
if (io_sq->desc_addr.virt_addr) {
size = io_sq->desc_entry_size * io_sq->q_depth;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- size,
- io_sq->desc_addr.virt_addr,
- io_sq->desc_addr.phys_addr,
- io_sq->desc_addr.mem_handle);
- else
- ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle);
io_sq->desc_addr.virt_addr = NULL;
}
+
+ if (io_sq->bounce_buf_ctrl.base_buffer) {
+ size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
+ io_sq->bounce_buf_ctrl.base_buffer = NULL;
+ }
}
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
@@ -807,7 +910,7 @@
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
ena_trc_dbg("Feature %d isn't supported\n", feature_id);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&get_cmd, 0x0, sizeof(get_cmd));
@@ -1366,7 +1469,7 @@
ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
get_resp.u.aenq.supported_groups,
groups_flag);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -1480,7 +1583,6 @@
if (admin_queue->comp_ctx)
ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
-
admin_queue->comp_ctx = NULL;
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
@@ -1503,6 +1605,12 @@
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
{
+ u32 mask_value = 0;
+
+ if (polling)
+ mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+ ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
ena_dev->admin_queue.polling = polling;
}
@@ -1790,11 +1898,20 @@
if (!rc)
memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
sizeof(get_resp.u.hw_hints));
- else if (rc == ENA_COM_PERMISSION)
+ else if (rc == ENA_COM_UNSUPPORTED)
memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
else
return rc;
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+ if (!rc)
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+ sizeof(get_resp.u.llq));
+ else if (rc == ENA_COM_UNSUPPORTED)
+ memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+ else
+ return rc;
+
return 0;
}
@@ -1838,7 +1955,7 @@
/* Go over all the events */
while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
phase) {
- ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%jus]\n",
+ ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group,
aenq_common->syndrom,
(u64)aenq_common->timestamp_low +
@@ -1872,8 +1989,30 @@
mb();
ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
+#ifdef ENA_EXTENDED_STATS
+/*
+ * Sets the function Idx and Queue Idx to be used for
+ * get full statistics feature
+ *
+ */
+int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
+ u32 func_queue)
+{
+
+ /* Function & Queue is acquired from user in the following format :
+ * Bottom Half word: funct
+ * Top Half Word: queue
+ */
+ ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
+ ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
+
+ return 0;
+}
-int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+#endif /* ENA_EXTENDED_STATS */
+
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason)
{
u32 stat, timeout, cap, reset_val;
int rc;
@@ -1901,6 +2040,8 @@
/* start reset */
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+ reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+ ENA_REGS_DEV_CTL_RESET_REASON_MASK;
ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
/* Write again the MMIO read request address */
@@ -1973,6 +2114,51 @@
return ret;
}
+#ifdef ENA_EXTENDED_STATS
+
+int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
+ u32 len)
+{
+ struct ena_com_stats_ctx ctx;
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
+ ena_mem_handle_t mem_handle;
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ int ret;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
+ virt_addr, phys_addr, mem_handle);
+ if (!virt_addr) {
+ ret = ENA_COM_NO_MEM;
+ goto done;
+ }
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd->u.control_buffer.address,
+ phys_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+ get_cmd->u.control_buffer.length = len;
+
+ get_cmd->device_id = ena_dev->stats_func;
+ get_cmd->queue_idx = ena_dev->stats_queue;
+
+ ret = ena_get_dev_stats(ena_dev, &ctx,
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
+ if (ret < 0)
+ goto free_ext_stats_mem;
+
+ ret = snprintf(buff, len, "%s", (char *)virt_addr);
+
+free_ext_stats_mem:
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
+ mem_handle);
+done:
+ return ret;
+}
+#endif
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
{
@@ -1983,7 +2169,7 @@
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2037,7 +2223,7 @@
ENA_ADMIN_RSS_HASH_FUNCTION)) {
ena_trc_dbg("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
/* Validate hash function is supported */
@@ -2049,7 +2235,7 @@
if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
ena_trc_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2108,7 +2294,7 @@
if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
ena_trc_err("Flow hash function %d isn't supported\n", func);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
switch (func) {
@@ -2201,7 +2387,7 @@
ENA_ADMIN_RSS_HASH_INPUT)) {
ena_trc_dbg("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2282,7 +2468,7 @@
ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
}
@@ -2360,7 +2546,7 @@
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
ena_trc_dbg("Feature %d isn't supported\n",
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
@@ -2636,7 +2822,7 @@
ENA_ADMIN_INTERRUPT_MODERATION);
if (rc) {
- if (rc == ENA_COM_PERMISSION) {
+ if (rc == ENA_COM_UNSUPPORTED) {
ena_trc_dbg("Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
@@ -2759,3 +2945,33 @@
intr_moder_tbl[level].pkts_per_interval;
entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
}
+
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq)
+{
+ int rc;
+ int size;
+
+ if (llq->max_llq_num == 0) {
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ rc = ena_com_config_llq_info(ena_dev, llq);
+ if (rc)
+ return rc;
+
+ /* Validate the descriptor is not too big */
+ size = ena_dev->tx_max_header_size;
+ size += ena_dev->llq_info.descs_num_before_header *
+ sizeof(struct ena_eth_io_tx_desc);
+
+ if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+ ena_trc_err("the size of the LLQ entry is smaller than needed\n");
+ return ENA_COM_INVAL;
+ }
+
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+
+ return 0;
+}
Index: sys/contrib/ena-com/ena_defs/ena_admin_defs.h
===================================================================
--- sys/contrib/ena-com/ena_defs/ena_admin_defs.h
+++ sys/contrib/ena-com/ena_defs/ena_admin_defs.h
@@ -30,7 +30,6 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
@@ -74,6 +73,8 @@
ENA_ADMIN_HW_HINTS = 3,
+ ENA_ADMIN_LLQ = 4,
+
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
@@ -485,6 +486,75 @@
uint32_t max_mtu;
};
+enum ena_admin_llq_header_location {
+ /* header is in descriptor list */
+ ENA_ADMIN_INLINE_HEADER = 1,
+
+ /* header in a separate ring, implies 16B descriptor list entry */
+ ENA_ADMIN_HEADER_RING = 2,
+};
+
+enum ena_admin_llq_ring_entry_size {
+ ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1,
+
+ ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2,
+
+ ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4,
+};
+
+enum ena_admin_llq_num_descs_before_header {
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0,
+
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1,
+
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2,
+
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4,
+
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8,
+};
+
+/* packet descriptor list entry always starts with one or more descriptors,
+ * followed by a header. The rest of the descriptors are located in the
+ * beginning of the subsequent entry. Stride refers to how the rest of the
+ * descriptors are placed. This field is relevant only for inline header
+ * mode
+ */
+enum ena_admin_llq_stride_ctrl {
+ ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1,
+
+ ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2,
+};
+
+struct ena_admin_feature_llq_desc {
+ uint32_t max_llq_num;
+
+ uint32_t max_llq_depth;
+
+ /* use enum ena_admin_llq_header_location */
+ uint16_t header_location_ctrl;
+
+ /* if inline header is specified - this is the size of descriptor
+ * list entry. If header in a separate ring is specified - this is
+ * the size of header ring entry. use enum
+ * ena_admin_llq_ring_entry_size
+ */
+ uint16_t entry_size_ctrl;
+
+ /* valid only if inline header is specified. First entry associated
+ * with the packet includes descriptors and header. Rest of the
+ * entries occupied by descriptors. This parameter defines the max
+ * number of descriptors precedding the header in the first entry.
+ * Values: use enum llq_num_descs_before_header
+ */
+ uint16_t desc_num_before_header_ctrl;
+
+ /* valid, only if inline header is specified. Note, use enum
+ * ena_admin_llq_stide_ctrl
+ */
+ uint16_t descriptors_stride_ctrl;
+};
+
struct ena_admin_queue_feature_desc {
/* including LLQs */
uint32_t max_sq_num;
@@ -495,9 +565,9 @@
uint32_t max_cq_depth;
- uint32_t max_llq_num;
+ uint32_t max_legacy_llq_num;
- uint32_t max_llq_depth;
+ uint32_t max_legacy_llq_depth;
uint32_t max_header_size;
@@ -802,6 +872,8 @@
struct ena_admin_device_attr_feature_desc dev_attr;
+ struct ena_admin_feature_llq_desc llq;
+
struct ena_admin_queue_feature_desc max_queue;
struct ena_admin_feature_aenq_desc aenq;
Index: sys/contrib/ena-com/ena_defs/ena_common_defs.h
===================================================================
--- sys/contrib/ena-com/ena_defs/ena_common_defs.h
+++ sys/contrib/ena-com/ena_defs/ena_common_defs.h
@@ -30,7 +30,6 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef _ENA_COMMON_H_
#define _ENA_COMMON_H_
Index: sys/contrib/ena-com/ena_defs/ena_efa_admin_defs.h
===================================================================
--- /dev/null
+++ sys/contrib/ena-com/ena_defs/ena_efa_admin_defs.h
@@ -0,0 +1,731 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ENA_EFA_ADMIN_H_
+#define _ENA_EFA_ADMIN_H_
+
+/* EFA admin queue opcodes */
+enum ena_efa_admin_aq_opcode {
+ /* starting opcode of efa admin commands */
+ ENA_EFA_ADMIN_START_CMD_RANGE = 100,
+
+ /* Query device capabilities */
+ ENA_EFA_ADMIN_QUERY_DEV = ENA_EFA_ADMIN_START_CMD_RANGE,
+
+ /* Modify device attributes */
+ ENA_EFA_ADMIN_MODIFY_DEV,
+
+ /* Create SRQ */
+ ENA_EFA_ADMIN_CREATE_SRQ,
+
+ /* Destroy SRQ */
+ ENA_EFA_ADMIN_DESTROY_SRQ,
+
+ /* Create QP */
+ ENA_EFA_ADMIN_CREATE_QP,
+
+ /* Modify QP */
+ ENA_EFA_ADMIN_MODIFY_QP,
+
+ /* Query QP */
+ ENA_EFA_ADMIN_QUERY_QP,
+
+ /* Destroy QP */
+ ENA_EFA_ADMIN_DESTROY_QP,
+
+ /* Create Address Handle */
+ ENA_EFA_ADMIN_CREATE_AH,
+
+ /* Destroy Address Handle */
+ ENA_EFA_ADMIN_DESTROY_AH,
+
+ /* Register Memory Region */
+ ENA_EFA_ADMIN_REG_MR,
+
+ /* Deregister Memory Region */
+ ENA_EFA_ADMIN_DEREG_MR,
+
+ /* Create EFA completion queue */
+ ENA_EFA_ADMIN_CREATE_CQ,
+ ENA_EFA_ADMIN_DESTROY_CQ,
+
+ ENA_EFA_ADMIN_GET_IPV4_ADDR,
+};
+
+/* Packet format */
+enum ena_efa_admin_packet_format {
+ /* RoCEv2 with IPv4 */
+ ENA_EFA_ADMIN_ROCEV2_IPV4 = 1,
+};
+
+/* QP transport type */
+enum ena_efa_admin_qp_type {
+ /* Unreliable Datagram */
+ ENA_EFA_ADMIN_QP_TYPE_UD = 1,
+
+ /* GSI, aka QP1 */
+ ENA_EFA_ADMIN_QP_TYPE_GSI = 2,
+};
+
+/* QP state */
+enum ena_efa_admin_qp_state {
+ /* Reset queue */
+ ENA_EFA_ADMIN_QP_STATE_RESET = 0,
+
+ /* Init queue */
+ ENA_EFA_ADMIN_QP_STATE_INIT = 1,
+
+ /* Ready to receive */
+ ENA_EFA_ADMIN_QP_STATE_RTR = 2,
+
+ /* Ready to send */
+ ENA_EFA_ADMIN_QP_STATE_RTS = 3,
+
+ /* Send queue drain */
+ ENA_EFA_ADMIN_QP_STATE_SQD = 4,
+
+ /* Send queue error */
+ ENA_EFA_ADMIN_QP_STATE_SQE = 5,
+
+ /* Queue in error state */
+ ENA_EFA_ADMIN_QP_STATE_ERR = 6,
+};
+
+/* Device attributes */
+struct ena_efa_admin_dev_attr {
+ /* FW version */
+ uint32_t fw_ver;
+
+ uint32_t max_mr_size;
+
+ uint32_t max_qp;
+
+ uint32_t max_cq;
+
+ uint32_t max_mr;
+
+ uint32_t max_pd;
+
+ uint32_t max_ah;
+
+ uint32_t max_srq;
+
+ /* Enable low-latency queues */
+ uint32_t llq_en;
+};
+
+/* Query device command */
+struct ena_efa_admin_query_dev {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+};
+
+/* Query device response. */
+struct ena_efa_admin_query_dev_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* Device attributes */
+ struct ena_efa_admin_dev_attr dev_attr;
+};
+
+/* Modify device command */
+struct ena_efa_admin_modify_dev {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* Device attributes */
+ struct ena_efa_admin_dev_attr dev_attr;
+};
+
+/* Modify device response. */
+struct ena_efa_admin_modify_dev_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* Create Shared Receive Queue (SRQ) command. */
+struct ena_efa_admin_create_srq_cmd {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* Protection Domain associated with this SRQ */
+ uint16_t pd;
+
+ /* 0 : indirect - Queue ring is in indirect mode, the
+ * base address field points to a list of pages
+ * 15:1 : reserved17
+ */
+ uint16_t flags;
+
+ /* ring base address. This field is not used if this is a Low
+ * Latency Queue(LLQ)
+ */
+ uint64_t ring_base_addr;
+
+ /* descriptor ring size in bytes, sufficient for user-provided
+ * number of WQEs and SGL size
+ */
+ uint32_t ring_size;
+
+ uint32_t reserved_5;
+};
+
+/* Create SRQ response. */
+struct ena_efa_admin_create_srq_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* Opaque handle to be used for consequent operations on the QP */
+ uint32_t srq_handle;
+
+ /* doorbell address, as offset to PCIe MMIO LLQ_MEM BAR */
+ uint32_t db_offset;
+
+ /* low latency send queue ring base address as an offset to PCIe
+ * MMIO LLQ_MEM BAR
+ */
+ uint32_t llq_descriptors_offset;
+};
+
+/* Destroy SRQ command */
+struct ena_efa_admin_destroy_srq_cmd {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* SRQ handle returned by create_qp command */
+ uint32_t srq_handle;
+};
+
+/* Destroy SRQ response */
+struct ena_efa_admin_destroy_srq_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* QP allocation sizes, converted by fabric QueuePair (QP) create command
+ * from QP capabilities.
+ */
+struct ena_efa_admin_qp_alloc_size {
+ /* Send descriptor ring size in bytes, sufficient for user-provided
+ * number of WQEs and SGL size
+ */
+ uint32_t send_queue_ring_size;
+
+ /* Max number of WQEs that will be posted on send queue */
+ uint32_t send_queue_depth;
+
+ /* Size in bytes of push buffer associated with each send WQE, used
+ * for immediate data (4 or 8 bytes) and for short inline message
+ * data
+ */
+ uint32_t push_buf_size;
+
+ /* Recv descriptor ring size in bytes, sufficient for user-provided
+ * number of WQEs and SGL size
+ */
+ uint32_t recv_queue_ring_size;
+};
+
+/* Create QP command. */
+struct ena_efa_admin_create_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* Protection Domain associated with this QP */
+ uint16_t pd;
+
+ /* QP type, currently UD only */
+ uint8_t qp_type;
+
+ /* 0 : use_srq - Indicates that the QP should be
+ * associated with the specified SRQ
+ * 1 : sq_indirect - Send Queue ring is in indirect
+ * mode, the base address field points to a list of
+ * pages
+ * 2 : rq_indirect - Recv Queue ring is in indirect
+ * mode, the base address field points to a list of
+ * pages
+ * 7:3 : reserved27
+ */
+ uint8_t flags;
+
+ /* SendQueue (SQ) ring base address. This field is not used if this
+ * is a Low Latency Queue(LLQ)
+ */
+ uint64_t sq_base_addr;
+
+ /* ReceiveQueue (RQ) ring base address */
+ uint64_t rq_base_addr;
+
+ /* Index of CQ to be associated with Send Queue completions */
+ uint32_t send_cq_idx;
+
+ /* Index of CQ to be associated with Recv Queue completions */
+ uint32_t recv_cq_idx;
+
+ /* Index of SRQ to be associated with this QP */
+ uint32_t srq_idx;
+
+ /* Requested QP allocation sizes */
+ struct ena_efa_admin_qp_alloc_size qp_alloc_size;
+
+ uint32_t reserved_13;
+};
+
+/* Create QP response. */
+struct ena_efa_admin_create_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* Opaque handle to be used for consequent operations on the QP */
+ uint32_t qp_handle;
+
+ /* QP number in the given ENA virtual device */
+ uint16_t qp_number;
+
+ uint16_t reserved;
+
+ /* SQ doorbell address, as offset to PCIe MMIO LLQ_MEM BAR */
+ uint32_t sq_db_offset;
+
+ /* RQ doorbell address, as offset to PCIe MMIO LLQ_MEM BAR */
+ uint32_t rq_db_offset;
+
+ /* low latency send queue ring base address as an offset to PCIe
+ * MMIO LLQ_MEM BAR
+ */
+ uint32_t llq_descriptors_offset;
+
+ /* low latency send queue push buffers' memory as an offset to PCIe
+ * MMIO LLQ_MEM BAR
+ */
+ uint32_t llq_push_bufs_offset;
+};
+
+/* attributes for UD QP, used in modify_qp_cmd */
+struct ena_efa_admin_ud_modify_qp_cmd {
+ /* QP state */
+ uint32_t qp_state;
+
+ /* Override current QP state (before applying the transition) */
+ uint32_t cur_qp_state;
+
+ /* QKey */
+ uint32_t qkey;
+
+ /* Enable async notification when SQ is drained */
+ uint32_t sq_drained_async_notify;
+};
+
+/* Modify QP command */
+struct ena_efa_admin_modify_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* QP handle returned by create_qp command */
+ uint32_t qp_handle;
+
+ /* QP attributes according to QP type */
+ union {
+ /* UD QP */
+ struct ena_efa_admin_ud_modify_qp_cmd ud;
+ } qp_attr;
+};
+
+/* Modify QP response */
+struct ena_efa_admin_modify_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* Query QP command */
+struct ena_efa_admin_query_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* QP handle returned by create_qp command */
+ uint32_t qp_handle;
+};
+
+/* query QP attributes for UD */
+struct ena_efa_admin_ud_query_qp_resp {
+ /* QP state */
+ uint32_t qp_state;
+
+ /* QKey */
+ uint32_t qkey;
+
+ /* Indicates that draining is in progress */
+ uint32_t sq_draining;
+};
+
+/* Query QP response */
+struct ena_efa_admin_query_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* QP attributes according to QP type */
+ union {
+ /* UD QP */
+ struct ena_efa_admin_ud_query_qp_resp ud;
+ } qp_attr;
+};
+
+/* Destroy QP command */
+struct ena_efa_admin_destroy_qp_cmd {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* QP handle returned by create_qp command */
+ uint32_t qp_handle;
+};
+
+/* Destroy QP response */
+struct ena_efa_admin_destroy_qp_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* Create Address Handle command parameters. Must not be called more than
+ * once for the same destination
+ */
+struct ena_efa_admin_create_ah_cmd {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* Destination IP address in network byte order */
+ uint8_t dest_ip_addr[4];
+};
+
+/* Create Address Handle response */
+struct ena_efa_admin_create_ah_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* Target interface address handle (opaque)
+ * 23:0 : address_handle - Target interface address
+ * handle (opaque)
+ * 31:24 : reserved24_w1 - Reserved, MBZ
+ */
+ uint32_t address_handle;
+};
+
+/* Destroy Address Handle command parameters. */
+struct ena_efa_admin_destroy_ah_cmd {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* Target interface address handle (opaque)
+ * 23:0 : address_handle - Target interface address
+ * handle (opaque)
+ * 31:24 : reserved24_w1 - Reserved, MBZ
+ */
+ uint32_t address_handle;
+};
+
+/* Destroy Address Handle response */
+struct ena_efa_admin_destroy_ah_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* Registration of MemoryRegion, required for QP working with Virtual
+ * Addresses. In standard verbs semantics, region length is limited to 2GB
+ * space, but ENA offers larger MR support for large memory space, to ease
+ * on users working with very large datasets (i.e. full GPU memory mapping).
+ */
+struct ena_efa_admin_reg_mr_cmd {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* Protection Domain */
+ uint16_t pd;
+
+ uint16_t reserved16_w1;
+
+ /* Physical Buffer List, each element is page-aligned. */
+ union {
+ /* Inline array of physical addresses of app pages
+ * (optimization for short region reservations)
+ */
+ uint64_t inline_pbl_array[4];
+
+ /* points to PBL (direct or indirect, chained if needed) */
+ struct ena_admin_ctrl_buff_info pbl;
+ } pbl;
+
+ /* Memory region length, in bytes. */
+ uint64_t mr_length;
+
+ /* flags and page size
+ * 0 : mem_addr_phy_mode_en - Enable bit for physical
+ * memory registration (no translation), can be used
+ * only by privileged clients. If set, PBL must
+ * contain a single entry.
+ * 5:1 : phys_page_size_shift - page size is (1 <<
+ * phys_page_size_shift). Supported values: 12 (4K),
+ * 16 (64K), 20 (1M), 21 (2M), 30 (1G). Page size is
+ * used for building the Virtual to Physical address
+ * mapping
+ * 7:6 : reserved
+ */
+ uint8_t flags;
+
+ /* permissions
+ * 0 : local_write_enable - Write permissions: value
+ * of 1 needed for RQ buffers and for RDMA write
+ * 7:1 : reserved1 - remote access flags, etc
+ */
+ uint8_t permissions;
+
+ uint16_t reserved16_w5;
+
+ /* number of pages in PBL (redundant, could be calculated) */
+ uint32_t page_num;
+
+ /* 48-bit IO Virtual Address associated with this MR. If
+ * mem_addr_phy_mode_en is set, contains the physical address of
+ * the region.
+ */
+ uint64_t iova;
+};
+
+struct ena_efa_admin_reg_mr_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* L_Key, this l_key will be used in conjunction with local buffers
+ * references in SQ and RQ WQE
+ */
+ uint32_t l_key;
+
+ /* R_Key, to be used in incoming RDMA semantics messages to refer
+ * to remotely accessed memory region
+ */
+ uint32_t r_key;
+};
+
+/* Deregister a MemoryRegion */
+struct ena_efa_admin_dereg_mr_cmd {
+ /* Common Admin Queue descriptor */
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* L_Key, memory region's l_key */
+ uint32_t l_key;
+};
+
+struct ena_efa_admin_dereg_mr_resp {
+ /* Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+struct ena_efa_admin_create_cq_cmd {
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ /* 4:0 : reserved5
+ * 5 : interrupt_mode_enabled - if set, cq operates
+ * in interrupt mode, otherwise - polling
+ * 7:6 : reserved6
+ */
+ uint8_t cq_caps_1;
+
+ /* 4:0 : cq_entry_size_words - size of CQ entry in
+ * 32-bit words, valid values: 4, 8.
+ * 7:5 : reserved7
+ */
+ uint8_t cq_caps_2;
+
+ /* completion queue depth in # of entries. must be power of 2 */
+ uint16_t cq_depth;
+
+ /* msix vector assigned to this cq */
+ uint32_t msix_vector;
+
+ /* cq physical base address in OS memory. CQ must be physically
+ * contiguous
+ */
+ struct ena_common_mem_addr cq_ba;
+};
+
+struct ena_efa_admin_create_cq_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ uint16_t cq_idx;
+
+ /* actual cq depth in number of entries */
+ uint16_t cq_actual_depth;
+
+ uint32_t numa_node_register_offset;
+
+ uint32_t cq_head_db_register_offset;
+
+ uint32_t cq_interrupt_unmask_register_offset;
+};
+
+struct ena_efa_admin_destroy_cq_cmd {
+ struct ena_admin_aq_common_desc aq_common_desc;
+
+ uint16_t cq_idx;
+
+ uint16_t reserved1;
+};
+
+struct ena_efa_admin_destroy_cq_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+struct ena_efa_admin_get_ipv4_addr_cmd {
+ struct ena_admin_aq_common_desc aq_common_desc;
+};
+
+struct ena_efa_admin_get_ipv4_addr_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+ /* IPv4 address in network order. */
+ uint8_t ipv4_addr[4];
+};
+
+/* create_srq_cmd */
+#define ENA_EFA_ADMIN_CREATE_SRQ_CMD_INDIRECT_MASK BIT(0)
+
+/* create_qp_cmd */
+#define ENA_EFA_ADMIN_CREATE_QP_CMD_USE_SRQ_MASK BIT(0)
+#define ENA_EFA_ADMIN_CREATE_QP_CMD_SQ_INDIRECT_SHIFT 1
+#define ENA_EFA_ADMIN_CREATE_QP_CMD_SQ_INDIRECT_MASK BIT(1)
+#define ENA_EFA_ADMIN_CREATE_QP_CMD_RQ_INDIRECT_SHIFT 2
+#define ENA_EFA_ADMIN_CREATE_QP_CMD_RQ_INDIRECT_MASK BIT(2)
+
+/* create_ah_resp */
+#define ENA_EFA_ADMIN_CREATE_AH_RESP_ADDRESS_HANDLE_MASK GENMASK(23, 0)
+
+/* destroy_ah_cmd */
+#define ENA_EFA_ADMIN_DESTROY_AH_CMD_ADDRESS_HANDLE_MASK GENMASK(23, 0)
+
+/* reg_mr_cmd */
+#define ENA_EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(0)
+#define ENA_EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_SHIFT 1
+#define ENA_EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(5, 1)
+#define ENA_EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint16_t get_ena_efa_admin_create_srq_cmd_indirect(const struct ena_efa_admin_create_srq_cmd *p)
+{
+ return p->flags & ENA_EFA_ADMIN_CREATE_SRQ_CMD_INDIRECT_MASK;
+}
+
+static inline void set_ena_efa_admin_create_srq_cmd_indirect(struct ena_efa_admin_create_srq_cmd *p, uint16_t val)
+{
+ p->flags |= val & ENA_EFA_ADMIN_CREATE_SRQ_CMD_INDIRECT_MASK;
+}
+
+static inline uint8_t get_ena_efa_admin_create_qp_cmd_use_srq(const struct ena_efa_admin_create_qp_cmd *p)
+{
+ return p->flags & ENA_EFA_ADMIN_CREATE_QP_CMD_USE_SRQ_MASK;
+}
+
+static inline void set_ena_efa_admin_create_qp_cmd_use_srq(struct ena_efa_admin_create_qp_cmd *p, uint8_t val)
+{
+ p->flags |= val & ENA_EFA_ADMIN_CREATE_QP_CMD_USE_SRQ_MASK;
+}
+
+static inline uint8_t get_ena_efa_admin_create_qp_cmd_sq_indirect(const struct ena_efa_admin_create_qp_cmd *p)
+{
+ return (p->flags & ENA_EFA_ADMIN_CREATE_QP_CMD_SQ_INDIRECT_MASK) >> ENA_EFA_ADMIN_CREATE_QP_CMD_SQ_INDIRECT_SHIFT;
+}
+
+static inline void set_ena_efa_admin_create_qp_cmd_sq_indirect(struct ena_efa_admin_create_qp_cmd *p, uint8_t val)
+{
+ p->flags |= (val << ENA_EFA_ADMIN_CREATE_QP_CMD_SQ_INDIRECT_SHIFT) & ENA_EFA_ADMIN_CREATE_QP_CMD_SQ_INDIRECT_MASK;
+}
+
+static inline uint8_t get_ena_efa_admin_create_qp_cmd_rq_indirect(const struct ena_efa_admin_create_qp_cmd *p)
+{
+ return (p->flags & ENA_EFA_ADMIN_CREATE_QP_CMD_RQ_INDIRECT_MASK) >> ENA_EFA_ADMIN_CREATE_QP_CMD_RQ_INDIRECT_SHIFT;
+}
+
+static inline void set_ena_efa_admin_create_qp_cmd_rq_indirect(struct ena_efa_admin_create_qp_cmd *p, uint8_t val)
+{
+ p->flags |= (val << ENA_EFA_ADMIN_CREATE_QP_CMD_RQ_INDIRECT_SHIFT) & ENA_EFA_ADMIN_CREATE_QP_CMD_RQ_INDIRECT_MASK;
+}
+
+static inline uint32_t get_ena_efa_admin_create_ah_resp_address_handle(const struct ena_efa_admin_create_ah_resp *p)
+{
+ return p->address_handle & ENA_EFA_ADMIN_CREATE_AH_RESP_ADDRESS_HANDLE_MASK;
+}
+
+static inline void set_ena_efa_admin_create_ah_resp_address_handle(struct ena_efa_admin_create_ah_resp *p, uint32_t val)
+{
+ p->address_handle |= val & ENA_EFA_ADMIN_CREATE_AH_RESP_ADDRESS_HANDLE_MASK;
+}
+
+static inline uint32_t get_ena_efa_admin_destroy_ah_cmd_address_handle(const struct ena_efa_admin_destroy_ah_cmd *p)
+{
+ return p->address_handle & ENA_EFA_ADMIN_DESTROY_AH_CMD_ADDRESS_HANDLE_MASK;
+}
+
+static inline void set_ena_efa_admin_destroy_ah_cmd_address_handle(struct ena_efa_admin_destroy_ah_cmd *p, uint32_t val)
+{
+ p->address_handle |= val & ENA_EFA_ADMIN_DESTROY_AH_CMD_ADDRESS_HANDLE_MASK;
+}
+
+static inline uint8_t get_ena_efa_admin_reg_mr_cmd_mem_addr_phy_mode_en(const struct ena_efa_admin_reg_mr_cmd *p)
+{
+ return p->flags & ENA_EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK;
+}
+
+static inline void set_ena_efa_admin_reg_mr_cmd_mem_addr_phy_mode_en(struct ena_efa_admin_reg_mr_cmd *p, uint8_t val)
+{
+ p->flags |= val & ENA_EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK;
+}
+
+static inline uint8_t get_ena_efa_admin_reg_mr_cmd_phys_page_size_shift(const struct ena_efa_admin_reg_mr_cmd *p)
+{
+ return (p->flags & ENA_EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK) >> ENA_EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_SHIFT;
+}
+
+static inline void set_ena_efa_admin_reg_mr_cmd_phys_page_size_shift(struct ena_efa_admin_reg_mr_cmd *p, uint8_t val)
+{
+ p->flags |= (val << ENA_EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_SHIFT) & ENA_EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK;
+}
+
+static inline uint8_t get_ena_efa_admin_reg_mr_cmd_local_write_enable(const struct ena_efa_admin_reg_mr_cmd *p)
+{
+ return p->permissions & ENA_EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK;
+}
+
+static inline void set_ena_efa_admin_reg_mr_cmd_local_write_enable(struct ena_efa_admin_reg_mr_cmd *p, uint8_t val)
+{
+ p->permissions |= val & ENA_EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_EFA_ADMIN_H_ */
Index: sys/contrib/ena-com/ena_defs/ena_efa_io_defs.h
===================================================================
--- /dev/null
+++ sys/contrib/ena-com/ena_defs/ena_efa_io_defs.h
@@ -0,0 +1,529 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ENA_EFA_IO_H_
+#define _ENA_EFA_IO_H_
+
+enum ena_efa_io_queue_type {
+ /* send queue (of a QP) */
+ ENA_EFA_IO_SEND_QUEUE = 1,
+
+ /* recv queue (of a QP) */
+ ENA_EFA_IO_RECV_QUEUE = 2,
+
+ /* SRQ */
+ ENA_EFA_IO_SHARED_RECV_QUEUE = 3,
+};
+
+/* UD Transport IO Queue Tx Meta descriptor. Must be the first (or only)
+ * descriptor for each UD message.
+ */
+struct ena_efa_io_ud_tx_meta_desc {
+ /* Verbs-generated Request ID */
+ uint16_t req_id;
+
+ /* control flags
+ * 3:0 : reserved16
+ * 4 : has_imm - Immediate data is present (in the
+ * first word of header area).
+ * 5 : push_msg - push mode - message data in header
+ * area, following the immediate word if present, no
+ * buffer descriptors.
+ * 6 : reserved22
+ * 7 : meta_desc - Indicates metadata descriptor.
+ * Must be set.
+ */
+ uint8_t ctrl1;
+
+ /* control flags
+ * 0 : phase - phase bit
+ * 1 : reserved25 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction. Must be set.
+ * 3 : last - Indicates last descriptor in transaction
+ * 4 : comp_req - Indicates whether completion should
+ * be posted, after packet is transmitted. Valid only
+ * for the first descriptor
+ * 7:5 : reserved29 - MBZ
+ */
+ uint8_t ctrl2;
+
+ /* destination QPn and push length
+ * 23:0 : dest_qp - destination QP number
+ * 31:24 : push_len - If push_msg bit is set, length
+ * of push message. 0 means 256.
+ */
+ uint32_t qp;
+
+ /* qkey of destination QP */
+ uint32_t qkey;
+
+ /* Address handle
+ * 23:0 : ah - address handle
+ * 31:24 : reserved24
+ */
+ uint32_t ah;
+};
+
+/* Transport IO Queue Tx buffer descriptor, for any transport type.
+ * Preceded by metadata descriptor.
+ */
+struct ena_efa_io_tx_buf_desc {
+ /* length in bytes */
+ uint16_t length;
+
+ /* control flags
+ * 6:0 : reserved16
+ * 7 : meta_desc - MBZ
+ */
+ uint8_t ctrl1;
+
+ /* control flags
+ * 0 : phase - phase bit
+ * 1 : reserved25 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction. MBZ
+ * 3 : last - Indicates last descriptor in transaction
+ * 7:4 : reserved28 - MBZ
+ */
+ uint8_t ctrl;
+
+ /* memory translation key */
+ uint32_t lkey;
+
+ /* Buffer address bits[31:0] */
+ uint32_t buf_addr_lo;
+
+ /* 15:0 : buf_addr_hi - Buffer Pointer[47:32]
+ * 31:16 : reserved - Reserved
+ */
+ uint32_t buf_addr_hi;
+};
+
+/* UD IO Queue Rx descriptor */
+struct ena_efa_io_ud_rx_desc {
+ /* Verbs-generated request id. Must be specified if comp_req flag is
+ * set.
+ */
+ uint16_t req_id;
+
+ /* MBZ */
+ uint8_t reserved16;
+
+ /* control flags
+ * 0 : phase
+ * 1 : reserved25 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction
+ * 3 : last - Indicates last descriptor in transaction
+ * 4 : comp_req - Indicates whether completion should
+ * be posted, after packet is transmitted. Valid only
+ * for first descriptor
+ * 5 : reserved29 - MBO
+ * 7:6 : reserved30 - MBZ
+ */
+ uint8_t ctrl;
+
+ /* memory translation key */
+ uint32_t lkey;
+
+ /* Buffer address bits[31:0] */
+ uint32_t buf_addr_lo;
+
+ /* Buffer Pointer[47:32] */
+ uint16_t buf_addr_hi;
+
+ /* Length in bytes. 0 means 64KB */
+ uint16_t length;
+};
+
+/* Transport IO Queue common completion descriptor */
+struct ena_efa_io_cdesc_common {
+ /* verbs-generated request ID, as provided in the completed tx or rx
+ * descriptor.
+ */
+ uint16_t req_id;
+
+ /* status */
+ uint8_t status;
+
+ /* flags
+ * 0 : phase - Phase bit
+ * 2:1 : q_type - enum ena_efa_io_q_type:
+ * send/recv/srq
+ * 3 : has_imm - indicates that immediate data is
+ * present - for RX completions only
+ * 4 : found_ah - Address Handle is provided instead
+ * of GRH (in the last word of GRH area) - for UD RX
+ * completions only
+ * 7:5 : reserved29
+ */
+ uint8_t flags;
+
+ /* submission queue number (QP number for send or recv queue, or SRQ
+ * number)
+ */
+ uint16_t q_num;
+
+ /* Transferred length. For UD RX messages, includes 40-byte GRH, even
+ * if AH index was provided instead of GRH
+ */
+ uint16_t length;
+};
+
+/* UD IO Queue Tx completion descriptor */
+struct ena_efa_io_tx_cdesc {
+ /* Common completion info */
+ struct ena_efa_io_cdesc_common common;
+};
+
+/* UD Transport IO Queue Rx Completion Descriptor */
+struct ena_efa_io_ud_rx_cdesc {
+ /* Common completion info */
+ struct ena_efa_io_cdesc_common common;
+
+ /* 23:0 : src_qp_num - Source QP number
+ * 25:24 : format - Format: RoCEv2/etc
+ * 27:26 : reserved26
+ * 31:28 : reserved28
+ */
+ uint32_t source;
+
+ /* Immediate data */
+ uint32_t imm;
+};
+
+/* ud_tx_meta_desc */
+#define ENA_EFA_IO_UD_TX_META_DESC_HAS_IMM_SHIFT 4
+#define ENA_EFA_IO_UD_TX_META_DESC_HAS_IMM_MASK BIT(4)
+#define ENA_EFA_IO_UD_TX_META_DESC_PUSH_MSG_SHIFT 5
+#define ENA_EFA_IO_UD_TX_META_DESC_PUSH_MSG_MASK BIT(5)
+#define ENA_EFA_IO_UD_TX_META_DESC_META_DESC_SHIFT 7
+#define ENA_EFA_IO_UD_TX_META_DESC_META_DESC_MASK BIT(7)
+#define ENA_EFA_IO_UD_TX_META_DESC_PHASE_MASK BIT(0)
+#define ENA_EFA_IO_UD_TX_META_DESC_FIRST_SHIFT 2
+#define ENA_EFA_IO_UD_TX_META_DESC_FIRST_MASK BIT(2)
+#define ENA_EFA_IO_UD_TX_META_DESC_LAST_SHIFT 3
+#define ENA_EFA_IO_UD_TX_META_DESC_LAST_MASK BIT(3)
+#define ENA_EFA_IO_UD_TX_META_DESC_COMP_REQ_SHIFT 4
+#define ENA_EFA_IO_UD_TX_META_DESC_COMP_REQ_MASK BIT(4)
+#define ENA_EFA_IO_UD_TX_META_DESC_DEST_QP_MASK GENMASK(23, 0)
+#define ENA_EFA_IO_UD_TX_META_DESC_PUSH_LEN_SHIFT 24
+#define ENA_EFA_IO_UD_TX_META_DESC_PUSH_LEN_MASK GENMASK(31, 24)
+#define ENA_EFA_IO_UD_TX_META_DESC_AH_MASK GENMASK(23, 0)
+
+/* tx_buf_desc */
+#define ENA_EFA_IO_TX_BUF_DESC_META_DESC_SHIFT 7
+#define ENA_EFA_IO_TX_BUF_DESC_META_DESC_MASK BIT(7)
+#define ENA_EFA_IO_TX_BUF_DESC_PHASE_MASK BIT(0)
+#define ENA_EFA_IO_TX_BUF_DESC_FIRST_SHIFT 2
+#define ENA_EFA_IO_TX_BUF_DESC_FIRST_MASK BIT(2)
+#define ENA_EFA_IO_TX_BUF_DESC_LAST_SHIFT 3
+#define ENA_EFA_IO_TX_BUF_DESC_LAST_MASK BIT(3)
+#define ENA_EFA_IO_TX_BUF_DESC_BUF_ADDR_HI_MASK GENMASK(15, 0)
+
+/* ud_rx_desc */
+#define ENA_EFA_IO_UD_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_EFA_IO_UD_RX_DESC_FIRST_SHIFT 2
+#define ENA_EFA_IO_UD_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_EFA_IO_UD_RX_DESC_LAST_SHIFT 3
+#define ENA_EFA_IO_UD_RX_DESC_LAST_MASK BIT(3)
+#define ENA_EFA_IO_UD_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_EFA_IO_UD_RX_DESC_COMP_REQ_MASK BIT(4)
+
+/* cdesc_common */
+#define ENA_EFA_IO_CDESC_COMMON_PHASE_MASK BIT(0)
+#define ENA_EFA_IO_CDESC_COMMON_Q_TYPE_SHIFT 1
+#define ENA_EFA_IO_CDESC_COMMON_Q_TYPE_MASK GENMASK(2, 1)
+#define ENA_EFA_IO_CDESC_COMMON_HAS_IMM_SHIFT 3
+#define ENA_EFA_IO_CDESC_COMMON_HAS_IMM_MASK BIT(3)
+#define ENA_EFA_IO_CDESC_COMMON_FOUND_AH_SHIFT 4
+#define ENA_EFA_IO_CDESC_COMMON_FOUND_AH_MASK BIT(4)
+
+/* ud_rx_cdesc */
+#define ENA_EFA_IO_UD_RX_CDESC_SRC_QP_NUM_MASK GENMASK(23, 0)
+#define ENA_EFA_IO_UD_RX_CDESC_FORMAT_SHIFT 24
+#define ENA_EFA_IO_UD_RX_CDESC_FORMAT_MASK GENMASK(25, 24)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint8_t get_ena_efa_io_ud_tx_meta_desc_has_imm(const struct ena_efa_io_ud_tx_meta_desc *p)
+{
+ return (p->ctrl1 & ENA_EFA_IO_UD_TX_META_DESC_HAS_IMM_MASK) >> ENA_EFA_IO_UD_TX_META_DESC_HAS_IMM_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_tx_meta_desc_has_imm(struct ena_efa_io_ud_tx_meta_desc *p, uint8_t val)
+{
+ p->ctrl1 |= (val << ENA_EFA_IO_UD_TX_META_DESC_HAS_IMM_SHIFT) & ENA_EFA_IO_UD_TX_META_DESC_HAS_IMM_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_ud_tx_meta_desc_push_msg(const struct ena_efa_io_ud_tx_meta_desc *p)
+{
+ return (p->ctrl1 & ENA_EFA_IO_UD_TX_META_DESC_PUSH_MSG_MASK) >> ENA_EFA_IO_UD_TX_META_DESC_PUSH_MSG_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_tx_meta_desc_push_msg(struct ena_efa_io_ud_tx_meta_desc *p, uint8_t val)
+{
+ p->ctrl1 |= (val << ENA_EFA_IO_UD_TX_META_DESC_PUSH_MSG_SHIFT) & ENA_EFA_IO_UD_TX_META_DESC_PUSH_MSG_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_ud_tx_meta_desc_meta_desc(const struct ena_efa_io_ud_tx_meta_desc *p)
+{
+ return (p->ctrl1 & ENA_EFA_IO_UD_TX_META_DESC_META_DESC_MASK) >> ENA_EFA_IO_UD_TX_META_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_tx_meta_desc_meta_desc(struct ena_efa_io_ud_tx_meta_desc *p, uint8_t val)
+{
+ p->ctrl1 |= (val << ENA_EFA_IO_UD_TX_META_DESC_META_DESC_SHIFT) & ENA_EFA_IO_UD_TX_META_DESC_META_DESC_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_ud_tx_meta_desc_phase(const struct ena_efa_io_ud_tx_meta_desc *p)
+{
+ return p->ctrl2 & ENA_EFA_IO_UD_TX_META_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_efa_io_ud_tx_meta_desc_phase(struct ena_efa_io_ud_tx_meta_desc *p, uint8_t val)
+{
+ p->ctrl2 |= val & ENA_EFA_IO_UD_TX_META_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_ud_tx_meta_desc_first(const struct ena_efa_io_ud_tx_meta_desc *p)
+{
+ return (p->ctrl2 & ENA_EFA_IO_UD_TX_META_DESC_FIRST_MASK) >> ENA_EFA_IO_UD_TX_META_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_tx_meta_desc_first(struct ena_efa_io_ud_tx_meta_desc *p, uint8_t val)
+{
+ p->ctrl2 |= (val << ENA_EFA_IO_UD_TX_META_DESC_FIRST_SHIFT) & ENA_EFA_IO_UD_TX_META_DESC_FIRST_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_ud_tx_meta_desc_last(const struct ena_efa_io_ud_tx_meta_desc *p)
+{
+ return (p->ctrl2 & ENA_EFA_IO_UD_TX_META_DESC_LAST_MASK) >> ENA_EFA_IO_UD_TX_META_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_tx_meta_desc_last(struct ena_efa_io_ud_tx_meta_desc *p, uint8_t val)
+{
+ p->ctrl2 |= (val << ENA_EFA_IO_UD_TX_META_DESC_LAST_SHIFT) & ENA_EFA_IO_UD_TX_META_DESC_LAST_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_ud_tx_meta_desc_comp_req(const struct ena_efa_io_ud_tx_meta_desc *p)
+{
+ return (p->ctrl2 & ENA_EFA_IO_UD_TX_META_DESC_COMP_REQ_MASK) >> ENA_EFA_IO_UD_TX_META_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_tx_meta_desc_comp_req(struct ena_efa_io_ud_tx_meta_desc *p, uint8_t val)
+{
+ p->ctrl2 |= (val << ENA_EFA_IO_UD_TX_META_DESC_COMP_REQ_SHIFT) & ENA_EFA_IO_UD_TX_META_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_efa_io_ud_tx_meta_desc_dest_qp(const struct ena_efa_io_ud_tx_meta_desc *p)
+{
+ return p->qp & ENA_EFA_IO_UD_TX_META_DESC_DEST_QP_MASK;
+}
+
+static inline void set_ena_efa_io_ud_tx_meta_desc_dest_qp(struct ena_efa_io_ud_tx_meta_desc *p, uint32_t val)
+{
+ p->qp |= val & ENA_EFA_IO_UD_TX_META_DESC_DEST_QP_MASK;
+}
+
+static inline uint32_t get_ena_efa_io_ud_tx_meta_desc_push_len(const struct ena_efa_io_ud_tx_meta_desc *p)
+{
+ return (p->qp & ENA_EFA_IO_UD_TX_META_DESC_PUSH_LEN_MASK) >> ENA_EFA_IO_UD_TX_META_DESC_PUSH_LEN_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_tx_meta_desc_push_len(struct ena_efa_io_ud_tx_meta_desc *p, uint32_t val)
+{
+ p->qp |= (val << ENA_EFA_IO_UD_TX_META_DESC_PUSH_LEN_SHIFT) & ENA_EFA_IO_UD_TX_META_DESC_PUSH_LEN_MASK;
+}
+
+static inline uint32_t get_ena_efa_io_ud_tx_meta_desc_ah(const struct ena_efa_io_ud_tx_meta_desc *p)
+{
+ return p->ah & ENA_EFA_IO_UD_TX_META_DESC_AH_MASK;
+}
+
+static inline void set_ena_efa_io_ud_tx_meta_desc_ah(struct ena_efa_io_ud_tx_meta_desc *p, uint32_t val)
+{
+ p->ah |= val & ENA_EFA_IO_UD_TX_META_DESC_AH_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_tx_buf_desc_meta_desc(const struct ena_efa_io_tx_buf_desc *p)
+{
+ return (p->ctrl1 & ENA_EFA_IO_TX_BUF_DESC_META_DESC_MASK) >> ENA_EFA_IO_TX_BUF_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_efa_io_tx_buf_desc_meta_desc(struct ena_efa_io_tx_buf_desc *p, uint8_t val)
+{
+ p->ctrl1 |= (val << ENA_EFA_IO_TX_BUF_DESC_META_DESC_SHIFT) & ENA_EFA_IO_TX_BUF_DESC_META_DESC_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_tx_buf_desc_phase(const struct ena_efa_io_tx_buf_desc *p)
+{
+ return p->ctrl & ENA_EFA_IO_TX_BUF_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_efa_io_tx_buf_desc_phase(struct ena_efa_io_tx_buf_desc *p, uint8_t val)
+{
+ p->ctrl |= val & ENA_EFA_IO_TX_BUF_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_tx_buf_desc_first(const struct ena_efa_io_tx_buf_desc *p)
+{
+ return (p->ctrl & ENA_EFA_IO_TX_BUF_DESC_FIRST_MASK) >> ENA_EFA_IO_TX_BUF_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_efa_io_tx_buf_desc_first(struct ena_efa_io_tx_buf_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_EFA_IO_TX_BUF_DESC_FIRST_SHIFT) & ENA_EFA_IO_TX_BUF_DESC_FIRST_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_tx_buf_desc_last(const struct ena_efa_io_tx_buf_desc *p)
+{
+ return (p->ctrl & ENA_EFA_IO_TX_BUF_DESC_LAST_MASK) >> ENA_EFA_IO_TX_BUF_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_efa_io_tx_buf_desc_last(struct ena_efa_io_tx_buf_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_EFA_IO_TX_BUF_DESC_LAST_SHIFT) & ENA_EFA_IO_TX_BUF_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_efa_io_tx_buf_desc_buf_addr_hi(const struct ena_efa_io_tx_buf_desc *p)
+{
+ return p->buf_addr_hi & ENA_EFA_IO_TX_BUF_DESC_BUF_ADDR_HI_MASK;
+}
+
+static inline void set_ena_efa_io_tx_buf_desc_buf_addr_hi(struct ena_efa_io_tx_buf_desc *p, uint32_t val)
+{
+ p->buf_addr_hi |= val & ENA_EFA_IO_TX_BUF_DESC_BUF_ADDR_HI_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_ud_rx_desc_phase(const struct ena_efa_io_ud_rx_desc *p)
+{
+ return p->ctrl & ENA_EFA_IO_UD_RX_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_efa_io_ud_rx_desc_phase(struct ena_efa_io_ud_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= val & ENA_EFA_IO_UD_RX_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_ud_rx_desc_first(const struct ena_efa_io_ud_rx_desc *p)
+{
+ return (p->ctrl & ENA_EFA_IO_UD_RX_DESC_FIRST_MASK) >> ENA_EFA_IO_UD_RX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_rx_desc_first(struct ena_efa_io_ud_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_EFA_IO_UD_RX_DESC_FIRST_SHIFT) & ENA_EFA_IO_UD_RX_DESC_FIRST_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_ud_rx_desc_last(const struct ena_efa_io_ud_rx_desc *p)
+{
+ return (p->ctrl & ENA_EFA_IO_UD_RX_DESC_LAST_MASK) >> ENA_EFA_IO_UD_RX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_rx_desc_last(struct ena_efa_io_ud_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_EFA_IO_UD_RX_DESC_LAST_SHIFT) & ENA_EFA_IO_UD_RX_DESC_LAST_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_ud_rx_desc_comp_req(const struct ena_efa_io_ud_rx_desc *p)
+{
+ return (p->ctrl & ENA_EFA_IO_UD_RX_DESC_COMP_REQ_MASK) >> ENA_EFA_IO_UD_RX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_rx_desc_comp_req(struct ena_efa_io_ud_rx_desc *p, uint8_t val)
+{
+ p->ctrl |= (val << ENA_EFA_IO_UD_RX_DESC_COMP_REQ_SHIFT) & ENA_EFA_IO_UD_RX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_cdesc_common_phase(const struct ena_efa_io_cdesc_common *p)
+{
+ return p->flags & ENA_EFA_IO_CDESC_COMMON_PHASE_MASK;
+}
+
+static inline void set_ena_efa_io_cdesc_common_phase(struct ena_efa_io_cdesc_common *p, uint8_t val)
+{
+ p->flags |= val & ENA_EFA_IO_CDESC_COMMON_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_cdesc_common_q_type(const struct ena_efa_io_cdesc_common *p)
+{
+ return (p->flags & ENA_EFA_IO_CDESC_COMMON_Q_TYPE_MASK) >> ENA_EFA_IO_CDESC_COMMON_Q_TYPE_SHIFT;
+}
+
+static inline void set_ena_efa_io_cdesc_common_q_type(struct ena_efa_io_cdesc_common *p, uint8_t val)
+{
+ p->flags |= (val << ENA_EFA_IO_CDESC_COMMON_Q_TYPE_SHIFT) & ENA_EFA_IO_CDESC_COMMON_Q_TYPE_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_cdesc_common_has_imm(const struct ena_efa_io_cdesc_common *p)
+{
+ return (p->flags & ENA_EFA_IO_CDESC_COMMON_HAS_IMM_MASK) >> ENA_EFA_IO_CDESC_COMMON_HAS_IMM_SHIFT;
+}
+
+static inline void set_ena_efa_io_cdesc_common_has_imm(struct ena_efa_io_cdesc_common *p, uint8_t val)
+{
+ p->flags |= (val << ENA_EFA_IO_CDESC_COMMON_HAS_IMM_SHIFT) & ENA_EFA_IO_CDESC_COMMON_HAS_IMM_MASK;
+}
+
+static inline uint8_t get_ena_efa_io_cdesc_common_found_ah(const struct ena_efa_io_cdesc_common *p)
+{
+ return (p->flags & ENA_EFA_IO_CDESC_COMMON_FOUND_AH_MASK) >> ENA_EFA_IO_CDESC_COMMON_FOUND_AH_SHIFT;
+}
+
+static inline void set_ena_efa_io_cdesc_common_found_ah(struct ena_efa_io_cdesc_common *p, uint8_t val)
+{
+ p->flags |= (val << ENA_EFA_IO_CDESC_COMMON_FOUND_AH_SHIFT) & ENA_EFA_IO_CDESC_COMMON_FOUND_AH_MASK;
+}
+
+static inline uint32_t get_ena_efa_io_ud_rx_cdesc_src_qp_num(const struct ena_efa_io_ud_rx_cdesc *p)
+{
+ return p->source & ENA_EFA_IO_UD_RX_CDESC_SRC_QP_NUM_MASK;
+}
+
+static inline void set_ena_efa_io_ud_rx_cdesc_src_qp_num(struct ena_efa_io_ud_rx_cdesc *p, uint32_t val)
+{
+ p->source |= val & ENA_EFA_IO_UD_RX_CDESC_SRC_QP_NUM_MASK;
+}
+
+static inline uint32_t get_ena_efa_io_ud_rx_cdesc_format(const struct ena_efa_io_ud_rx_cdesc *p)
+{
+ return (p->source & ENA_EFA_IO_UD_RX_CDESC_FORMAT_MASK) >> ENA_EFA_IO_UD_RX_CDESC_FORMAT_SHIFT;
+}
+
+static inline void set_ena_efa_io_ud_rx_cdesc_format(struct ena_efa_io_ud_rx_cdesc *p, uint32_t val)
+{
+ p->source |= (val << ENA_EFA_IO_UD_RX_CDESC_FORMAT_SHIFT) & ENA_EFA_IO_UD_RX_CDESC_FORMAT_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_EFA_IO_H_ */
Index: sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h
===================================================================
--- sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h
+++ sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h
@@ -30,7 +30,6 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef _ENA_ETH_IO_H_
#define _ENA_ETH_IO_H_
Index: sys/contrib/ena-com/ena_defs/ena_gen_info.h
===================================================================
--- /dev/null
+++ sys/contrib/ena-com/ena_defs/ena_gen_info.h
@@ -0,0 +1,34 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#define ENA_GEN_DATE "Sun Nov 20 11:22:05 IST 2016"
+#define ENA_GEN_COMMIT "44da4e8"
Index: sys/contrib/ena-com/ena_defs/ena_includes.h
===================================================================
--- /dev/null
+++ sys/contrib/ena-com/ena_defs/ena_includes.h
@@ -0,0 +1,6 @@
+#include "ena_common_defs.h"
+#include "ena_regs_defs.h"
+#include "ena_admin_defs.h"
+#include "ena_eth_io_defs.h"
+#include "ena_efa_admin_defs.h"
+#include "ena_efa_io_defs.h"
Index: sys/contrib/ena-com/ena_defs/ena_regs_defs.h
===================================================================
--- sys/contrib/ena-com/ena_defs/ena_regs_defs.h
+++ sys/contrib/ena-com/ena_defs/ena_regs_defs.h
@@ -30,10 +30,39 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef _ENA_REGS_H_
#define _ENA_REGS_H_
+enum ena_regs_reset_reason_types {
+ ENA_REGS_RESET_NORMAL = 0,
+
+ ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
+
+ ENA_REGS_RESET_ADMIN_TO = 2,
+
+ ENA_REGS_RESET_MISS_TX_CMPL = 3,
+
+ ENA_REGS_RESET_INV_RX_REQ_ID = 4,
+
+ ENA_REGS_RESET_INV_TX_REQ_ID = 5,
+
+ ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
+
+ ENA_REGS_RESET_INIT_ERR = 7,
+
+ ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
+
+ ENA_REGS_RESET_OS_TRIGGER = 9,
+
+ ENA_REGS_RESET_OS_NETDEV_WD = 10,
+
+ ENA_REGS_RESET_SHUTDOWN = 11,
+
+ ENA_REGS_RESET_USER_TRIGGER = 12,
+
+ ENA_REGS_RESET_GENERIC = 13,
+};
+
/* ena_registers offsets */
#define ENA_REGS_VERSION_OFF 0x0
#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
@@ -106,6 +135,8 @@
#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
#define ENA_REGS_DEV_STS_READY_MASK 0x1
Index: sys/contrib/ena-com/ena_efa_com.c
===================================================================
--- /dev/null
+++ sys/contrib/ena-com/ena_efa_com.c
@@ -0,0 +1,431 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ena_netdev.h"
+#include "ena_com.h"
+#include "ena_efa_com.h"
+#include "ena_efa_admin_defs.h"
+
+static inline int ena_efa_com_mem_addr_set(struct ena_com_dev *ena_dev,
+ struct ena_common_mem_addr *ena_addr,
+ dma_addr_t addr)
+{
+ if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
+ ena_trc_err("dma address has more bits that the device supports\n");
+ return ENA_COM_INVAL;
+ }
+
+ ena_addr->mem_addr_low = (u32)addr;
+ ena_addr->mem_addr_high = (u16)((u64)addr >> 32);
+
+ return 0;
+}
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+int ena_efa_create_qp(struct pci_dev *pdev,
+ struct ena_efa_create_qp_params *params,
+ struct ena_efa_create_qp_result *res)
+{
+ struct ena_com_dev *ena_dev = ena_efa_enadev_get(pdev);
+ struct ena_com_admin_queue *admin_queue = &(ena_dev->admin_queue);
+ struct ena_efa_admin_create_qp_cmd create_qp_cmd;
+ struct ena_efa_admin_create_qp_resp cmd_completion;
+ int err;
+
+ memset(&create_qp_cmd, 0x0, sizeof(create_qp_cmd));
+
+ create_qp_cmd.aq_common_desc.opcode = ENA_EFA_ADMIN_CREATE_QP;
+
+ create_qp_cmd.pd = params->pd;
+ create_qp_cmd.qp_type = ENA_EFA_ADMIN_QP_TYPE_UD; /* UD QP */
+ create_qp_cmd.flags = 0;
+ create_qp_cmd.sq_base_addr = 0;
+ create_qp_cmd.rq_base_addr = params->rq_base_addr;
+ create_qp_cmd.send_cq_idx = params->send_cq_idx;
+ create_qp_cmd.recv_cq_idx = params->recv_cq_idx;
+ /* TODO make user is sq_head
+ *(needs to be converted from user to phys first)
+ *(needs params->sq_head_addr; */
+ create_qp_cmd.srq_idx = 0;
+ create_qp_cmd.qp_alloc_size.send_queue_ring_size =
+ params->sq_ring_size_in_bytes;
+ create_qp_cmd.qp_alloc_size.send_queue_depth =
+ params->sq_depth;
+ create_qp_cmd.qp_alloc_size.push_buf_size =
+ params->push_buf_size;
+ create_qp_cmd.qp_alloc_size.recv_queue_ring_size =
+ params->rq_ring_size_in_bytes;
+
+ err = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_qp_cmd,
+ sizeof(create_qp_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(err))
+ ena_trc_err("Failed to create cq [%d]\n", err);
+ else {
+ res->qp_handle = cmd_completion.qp_handle;
+ res->qp_number = cmd_completion.qp_number;
+ res->sq_db_offset = cmd_completion.sq_db_offset;
+ res->rq_db_offset = cmd_completion.rq_db_offset;
+ res->llq_descriptors_offset =
+ cmd_completion.llq_descriptors_offset;
+ res->llq_push_bufs_offset =
+ cmd_completion.llq_push_bufs_offset;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ena_efa_create_qp);
+
+/*****************************************************************************/
+
+int ena_efa_destroy_qp(struct pci_dev *pdev,
+ struct ena_efa_destroy_qp_params *params)
+{
+ struct ena_com_dev *ena_dev = ena_efa_enadev_get(pdev);
+ struct ena_com_admin_queue *admin_queue = &(ena_dev->admin_queue);
+ struct ena_efa_admin_destroy_qp_cmd qp_cmd;
+ struct ena_efa_admin_destroy_qp_resp cmd_completion;
+ int err;
+
+ memset(&qp_cmd, 0x0, sizeof(qp_cmd));
+
+ qp_cmd.aq_common_desc.opcode = ENA_EFA_ADMIN_DESTROY_QP;
+
+ qp_cmd.qp_handle = params->qp_handle;
+
+ err = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&qp_cmd,
+ sizeof(qp_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(err))
+ ena_trc_err("failed to destroy qp-0x%08x [%d]\n",
+ qp_cmd.qp_handle, err);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ena_efa_destroy_qp);
+
+/*****************************************************************************/
+
+static int do_ena_efa_create_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_efa_admin_create_cq_cmd create_cmd;
+ struct ena_efa_admin_create_cq_resp cmd_completion;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
+
+ create_cmd.aq_common_desc.opcode = ENA_EFA_ADMIN_CREATE_CQ;
+
+ create_cmd.cq_caps_2 = (io_cq->cdesc_entry_size_in_bytes / 4) &
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ create_cmd.msix_vector = 0;
+ create_cmd.cq_depth = io_cq->q_depth;
+
+ ret = ena_efa_com_mem_addr_set(ena_dev,
+ &create_cmd.cq_ba,
+ io_cq->cdesc_addr.phys_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_cq->idx = cmd_completion.cq_idx;
+
+ io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_interrupt_unmask_register_offset);
+
+ if (cmd_completion.cq_head_db_register_offset)
+ io_cq->cq_head_db_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_head_db_register_offset);
+
+ if (cmd_completion.numa_node_register_offset)
+ io_cq->numa_node_cfg_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.numa_node_register_offset);
+
+ ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+
+ return ret;
+}
+
+/*****************************************************************************/
+
+int ena_efa_create_cq(struct pci_dev *pdev,
+ struct ena_efa_create_cq_params *params,
+ struct ena_efa_create_cq_result *result)
+{
+ struct ena_com_dev *ena_dev = ena_efa_enadev_get(pdev);
+ struct ena_com_io_cq io_cq;
+ int err;
+
+ memset(&io_cq, 0, sizeof(io_cq));
+
+ io_cq.phase = 1;
+ io_cq.cdesc_addr.phys_addr = params->dma_addr;
+ io_cq.cdesc_entry_size_in_bytes = params->entry_size_in_bytes;
+ io_cq.q_depth = params->q_depth;
+
+ err = do_ena_efa_create_cq(ena_dev, &io_cq);
+ if (unlikely(err))
+ ena_trc_err("failed to create cq[%d]\n", err);
+ else {
+ result->q_idx = io_cq.idx;
+ result->actual_depth = io_cq.q_depth;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ena_efa_create_cq);
+
+/*****************************************************************************/
+
+static int do_ena_efa_destroy_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_efa_admin_destroy_cq_cmd destroy_cmd;
+ struct ena_efa_admin_destroy_cq_resp destroy_resp;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
+
+ destroy_cmd.cq_idx = io_cq->idx;
+ destroy_cmd.aq_common_desc.opcode = ENA_EFA_ADMIN_DESTROY_CQ;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
+ ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
+
+ return ret;
+}
+
+/*****************************************************************************/
+
+int ena_efa_destroy_cq(struct pci_dev *pdev,
+ struct ena_efa_destroy_cq_params *params)
+{
+ struct ena_com_dev *ena_dev = ena_efa_enadev_get(pdev);
+ struct ena_com_io_cq io_cq;
+ int err;
+
+ memset(&io_cq, 0, sizeof(io_cq));
+ io_cq.idx = params->qid;
+
+ err = do_ena_efa_destroy_cq(ena_dev, &io_cq);
+ if (unlikely(err))
+ ena_trc_err("failed to destory cq-%u [%d]\n", io_cq.idx, err);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ena_efa_destroy_cq);
+
+/*****************************************************************************/
+
+int ena_efa_register_mr(struct pci_dev *pdev,
+ struct ena_efa_reg_mr_params *params,
+ struct ena_efa_reg_mr_result *result)
+{
+ struct ena_com_dev *ena_dev = ena_efa_enadev_get(pdev);
+ struct ena_com_admin_queue *admin_queue = &(ena_dev->admin_queue);
+ struct ena_efa_admin_reg_mr_cmd mr_cmd;
+ struct ena_efa_admin_reg_mr_resp cmd_completion;
+ int err;
+
+ BUILD_BUG_ON((sizeof(struct ena_admin_ctrl_buff_info) !=
+ sizeof(struct ena_efa_ctrl_buff_info)));
+
+ memset(&mr_cmd, 0x0, sizeof(mr_cmd));
+
+ mr_cmd.aq_common_desc.opcode = ENA_EFA_ADMIN_REG_MR;
+ mr_cmd.aq_common_desc.flags = 0;
+
+ mr_cmd.pd = params->pd;
+ mr_cmd.mr_length = params->mr_length_in_bytes;
+
+ mr_cmd.flags |= 0 &
+ ENA_EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK;
+ mr_cmd.flags |= (params->page_shift <<
+ ENA_EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_SHIFT) &
+ ENA_EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK;
+ mr_cmd.permissions |= 0 &
+ ENA_EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK;
+ mr_cmd.iova = params->iova;
+
+ if (params->inline_pbl) {
+ memcpy(mr_cmd.pbl.inline_pbl_array,
+ params->pbl.inline_pbl_array,
+ sizeof(mr_cmd.pbl.inline_pbl_array));
+ } else {
+ mr_cmd.pbl.pbl.length = params->pbl.pbl.length;
+ mr_cmd.pbl.pbl.address.mem_addr_low =
+ params->pbl.pbl.address.mem_addr_low;
+ mr_cmd.pbl.pbl.address.mem_addr_high =
+ params->pbl.pbl.address.mem_addr_high;
+ mr_cmd.aq_common_desc.flags |=
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+ if (params->indirect)
+ mr_cmd.aq_common_desc.flags |=
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ }
+
+ err = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&mr_cmd,
+ sizeof(mr_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(err))
+ ena_trc_err("failed to register mr [%d]\n", err);
+ else {
+ result->l_key = cmd_completion.l_key;
+ result->r_key = cmd_completion.r_key;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ena_efa_register_mr);
+
+
+/*****************************************************************************/
+
+int ena_efa_dereg_mr(struct pci_dev *pdev,
+ struct ena_efa_dereg_mr_params *params)
+{
+ struct ena_com_dev *ena_dev = ena_efa_enadev_get(pdev);
+ struct ena_com_admin_queue *admin_queue = &(ena_dev->admin_queue);
+ struct ena_efa_admin_dereg_mr_cmd mr_cmd;
+ struct ena_efa_admin_dereg_mr_resp cmd_completion;
+ int err;
+
+ memset(&mr_cmd, 0x0, sizeof(mr_cmd));
+
+ mr_cmd.aq_common_desc.opcode = ENA_EFA_ADMIN_DEREG_MR;
+ mr_cmd.l_key = params->l_key;
+
+ err = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&mr_cmd,
+ sizeof(mr_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(err))
+ ena_trc_err("failed to deregister mr(lkey-0x%08X) [%d]\n",
+ mr_cmd.l_key, err);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ena_efa_dereg_mr);
+
+/*****************************************************************************/
+
+int ena_efa_create_ah(struct pci_dev *pdev,
+ struct ena_efa_create_ah_params *params,
+ struct ena_efa_create_ah_result *result)
+{
+ struct ena_com_dev *ena_dev = ena_efa_enadev_get(pdev);
+ struct ena_com_admin_queue *admin_queue = &(ena_dev->admin_queue);
+ struct ena_efa_admin_create_ah_cmd ah_cmd;
+ struct ena_efa_admin_create_ah_resp cmd_completion;
+ int err;
+
+ memset(&ah_cmd, 0x0, sizeof(ah_cmd));
+
+ ah_cmd.aq_common_desc.opcode = ENA_EFA_ADMIN_CREATE_AH;
+
+ memcpy(ah_cmd.dest_ip_addr, params->dest_ip_addr, 4);
+
+ err = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&ah_cmd,
+ sizeof(ah_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(err))
+ ena_trc_err("failed to create ah [%d]\n", err);
+ else
+ result->address_handle = cmd_completion.address_handle;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ena_efa_create_ah);
+
+/*****************************************************************************/
+
+int ena_efa_destroy_ah(struct pci_dev *pdev,
+ struct ena_efa_destroy_ah_params *params)
+{
+ struct ena_com_dev *ena_dev = ena_efa_enadev_get(pdev);
+ struct ena_com_admin_queue *admin_queue = &(ena_dev->admin_queue);
+ struct ena_efa_admin_destroy_ah_cmd ah_cmd;
+ struct ena_efa_admin_destroy_ah_resp cmd_completion;
+ int err;
+
+ memset(&ah_cmd, 0x0, sizeof(ah_cmd));
+
+ ah_cmd.aq_common_desc.opcode = ENA_EFA_ADMIN_DESTROY_AH;
+ ah_cmd.address_handle = params->address_handle;
+
+ err = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&ah_cmd,
+ sizeof(ah_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(err))
+ ena_trc_err("failed to destroy ah-0x%08X [%d]\n",
+ ah_cmd.address_handle, err);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ena_efa_destroy_ah);
+
Index: sys/contrib/ena-com/ena_eth_com.h
===================================================================
--- sys/contrib/ena-com/ena_eth_com.h
+++ sys/contrib/ena-com/ena_eth_com.h
@@ -98,7 +98,7 @@
ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
}
-static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
{
u16 tail, next_to_comp, cnt;
@@ -109,6 +109,25 @@
return io_sq->q_depth - 1 - cnt;
}
+/* Check if the submission queue has enough space to hold required_buffers */
+static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
+ u16 required_buffers)
+{
+ int temp;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return ena_com_free_desc(io_sq) >= required_buffers;
+
+ /* This calculation doesn't need to be 100% accurate. So to reduce
+ * the calculation overhead just Subtract 2 lines from the free descs
+ * (one for the header line and one to compensate the devision
+ * down calculation.
+ */
+ temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
+
+ return ena_com_free_desc(io_sq) > temp;
+}
+
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
{
u16 tail;
Index: sys/contrib/ena-com/ena_eth_com.c
===================================================================
--- sys/contrib/ena-com/ena_eth_com.c
+++ sys/contrib/ena-com/ena_eth_com.c
@@ -64,7 +64,7 @@
io_cq->phase ^= 1;
}
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
{
u16 tail_masked;
u32 offset;
@@ -76,22 +76,27 @@
return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
}
-static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
+static inline void ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+ u8 *bounce_buffer)
{
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
- u32 offset = tail_masked * io_sq->desc_entry_size;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
- /* In case this queue isn't a LLQ */
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- return;
+ u16 dst_tail_mask;
+ u32 dst_offset;
- memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
- io_sq->desc_addr.virt_addr + offset,
- io_sq->desc_entry_size);
-}
+ dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
+ dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
+
+ /* Make sure everything was written into the bounce buffer before
+ * writing the bounce buffer to the device
+ */
+ wmb();
+
+ /* The line is completed. Copy it to dev */
+ ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+ bounce_buffer,
+ llq_info->desc_list_entry_size);
-static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
-{
io_sq->tail++;
/* Switch phase bit in case of wrap around */
@@ -99,26 +104,124 @@
io_sq->phase ^= 1;
}
-static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
- u8 *head_src, u16 header_len)
+static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+ u8 *header_src,
+ u16 header_len)
{
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
- u8 __iomem *dev_head_addr =
- io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+ u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
+ u16 header_offset;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
return 0;
- if (unlikely(!io_sq->header_addr)) {
- ena_trc_err("Push buffer header ptr is NULL\n");
- return ENA_COM_INVAL;
+ header_offset =
+ llq_info->descs_num_before_header * io_sq->desc_entry_size;
+
+ if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
+ ena_trc_err("trying to write header larger than llq entry can accommodate\n");
+ return ENA_COM_FAULT;
+ }
+
+ if (unlikely(!bounce_buffer)) {
+ ena_trc_err("bounce buffer is NULL\n");
+ return ENA_COM_FAULT;
}
- memcpy_toio(dev_head_addr, head_src, header_len);
+ memcpy(bounce_buffer + header_offset, header_src, header_len);
return 0;
}
+static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ u8 *bounce_buffer;
+ void *sq_desc;
+
+ bounce_buffer = pkt_ctrl->curr_bounce_buf;
+
+ if (unlikely(!bounce_buffer)) {
+ ena_trc_err("bounce buffer is NULL\n");
+ return NULL;
+ }
+
+ sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
+ pkt_ctrl->idx++;
+ pkt_ctrl->descs_left_in_line--;
+
+ return sq_desc;
+}
+
+static inline void ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return;
+
+ /* bounce buffer was used, so write it and get a new one */
+ if (pkt_ctrl->idx) {
+ ena_com_write_bounce_buffer_to_dev(io_sq,
+ pkt_ctrl->curr_bounce_buf);
+ pkt_ctrl->curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, llq_info->desc_list_entry_size);
+ }
+
+ pkt_ctrl->idx = 0;
+ pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ return get_sq_desc_llq(io_sq);
+
+ return get_sq_desc_regular_queue(io_sq);
+}
+
+static inline void ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+
+ if (!pkt_ctrl->descs_left_in_line) {
+ ena_com_write_bounce_buffer_to_dev(io_sq,
+ pkt_ctrl->curr_bounce_buf);
+
+ pkt_ctrl->curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, llq_info->desc_list_entry_size);
+
+ pkt_ctrl->idx = 0;
+ if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
+ pkt_ctrl->descs_left_in_line = 1;
+ else
+ pkt_ctrl->descs_left_in_line =
+ llq_info->desc_list_entry_size / io_sq->desc_entry_size;
+ }
+}
+
+static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ ena_com_sq_update_llq_tail(io_sq);
+ return;
+ }
+
+ io_sq->tail++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+ io_sq->phase ^= 1;
+}
+
static inline struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
{
@@ -228,7 +331,6 @@
memcpy(&io_sq->cached_tx_meta, ena_meta,
sizeof(struct ena_com_tx_meta));
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
ena_com_sq_update_tail(io_sq);
}
@@ -271,10 +373,11 @@
{
struct ena_eth_io_tx_desc *desc = NULL;
struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
- void *push_header = ena_tx_ctx->push_header;
+ void *buffer_to_push = ena_tx_ctx->push_header;
u16 header_len = ena_tx_ctx->header_len;
u16 num_bufs = ena_tx_ctx->num_bufs;
- int total_desc, i, rc;
+ u16 start_tail = io_sq->tail;
+ int i, rc;
bool have_meta;
u64 addr_hi;
@@ -282,7 +385,7 @@
"wrong Q type");
/* num_bufs +1 for potential meta desc */
- if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
+ if (!ena_com_sq_have_enough_space(io_sq, num_bufs + 1)) {
ena_trc_err("Not enough space in the tx queue\n");
return ENA_COM_NO_MEM;
}
@@ -293,8 +396,10 @@
return ENA_COM_INVAL;
}
- /* start with pushing the header (if needed) */
- rc = ena_com_write_header(io_sq, push_header, header_len);
+ if (unlikely((io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && !buffer_to_push))
+ return ENA_COM_INVAL;
+
+ rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
if (unlikely(rc))
return rc;
@@ -305,11 +410,14 @@
/* If the caller doesn't want send packets */
if (unlikely(!num_bufs && !header_len)) {
- *nb_hw_desc = have_meta ? 0 : 1;
+ ena_com_close_bounce_buffer(io_sq);
+ *nb_hw_desc = io_sq->tail - start_tail;
return 0;
}
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return ENA_COM_FAULT;
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
/* Set first desc when we don't have meta descriptor */
@@ -361,10 +469,12 @@
for (i = 0; i < num_bufs; i++) {
/* The first desc share the same desc as the header */
if (likely(i != 0)) {
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
ena_com_sq_update_tail(io_sq);
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return ENA_COM_FAULT;
+
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
desc->len_ctrl |= (io_sq->phase <<
@@ -387,14 +497,11 @@
/* set the last desc indicator */
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
-
ena_com_sq_update_tail(io_sq);
- total_desc = ENA_MAX16(num_bufs, 1);
- total_desc += have_meta ? 1 : 0;
+ ena_com_close_bounce_buffer(io_sq);
- *nb_hw_desc = total_desc;
+ *nb_hw_desc = io_sq->tail - start_tail;
return 0;
}
@@ -456,10 +563,13 @@
ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
"wrong Q type");
- if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
return ENA_COM_NO_SPACE;
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return ENA_COM_FAULT;
+
memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
desc->length = ena_buf->len;
@@ -501,6 +611,11 @@
if (cdesc_phase != expected_phase)
return ENA_COM_TRY_AGAIN;
+ if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
+ ena_trc_err("Invalid req id %d\n", cdesc->req_id);
+ return ENA_COM_INVAL;
+ }
+
ena_com_cq_inc_head(io_cq);
*req_id = READ_ONCE(cdesc->req_id);
Index: sys/contrib/ena-com/ena_plat.h
===================================================================
--- sys/contrib/ena-com/ena_plat.h
+++ sys/contrib/ena-com/ena_plat.h
@@ -90,6 +90,9 @@
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
extern struct ena_bus_space ebs;
/* Levels */
@@ -130,9 +133,6 @@
#define ena_trc_warn(format, arg...) ena_trace(ENA_WARNING, format, ##arg)
#define ena_trc_err(format, arg...) ena_trace(ENA_ALERT, format, ##arg)
-#define unlikely(x) __predict_false(x)
-#define likely(x) __predict_true(x)
-
#define __iomem
#define ____cacheline_aligned __aligned(CACHE_LINE_SIZE)
@@ -186,6 +186,7 @@
#define ENA_COM_NO_MEM ENOMEM
#define ENA_COM_NO_SPACE ENOSPC
#define ENA_COM_TRY_AGAIN -1
+#define ENA_COM_UNSUPPORTED EOPNOTSUPP
#define ENA_COM_NO_DEVICE ENODEV
#define ENA_COM_PERMISSION EPERM
#define ENA_COM_TIMER_EXPIRED ETIMEDOUT
@@ -256,7 +257,7 @@
#define u8 uint8_t
#define u16 uint16_t
#define u32 uint32_t
-#define u64 uint64_t
+#define u64 unsigned long long
typedef struct {
bus_addr_t paddr;
@@ -281,6 +282,9 @@
int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
int mapflags);
+/* The size must be 8 byte align */
+#define ENA_MEMCPY_TO_DEVICE_64(dst, src, size) __iowrite64_copy((dst), (src), (size) / 8)
+
#define ENA_MEM_ALLOC(dmadev, size) malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO)
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) (virt = NULL)
#define ENA_MEM_FREE(dmadev, ptr) free(ptr, M_DEVBUF)
@@ -342,26 +346,11 @@
#define dma_unmap_len(p, name) ((p)->name)
#define dma_unmap_len_set(p, name, v) (((p)->name) = (v))
-#define memcpy_toio memcpy
-
#define ATOMIC32_INC(I32_PTR) atomic_add_int(I32_PTR, 1)
#define ATOMIC32_DEC(I32_PTR) atomic_add_int(I32_PTR, -1)
#define ATOMIC32_READ(I32_PTR) atomic_load_acq_int(I32_PTR)
#define ATOMIC32_SET(I32_PTR, VAL) atomic_store_rel_int(I32_PTR, VAL)
-#define barrier() __asm__ __volatile__("": : :"memory")
-#define ACCESS_ONCE(x) (*(volatile __typeof(x) *)&(x))
-#define READ_ONCE(x) ({ \
- __typeof(x) __var; \
- barrier(); \
- __var = ACCESS_ONCE(x); \
- barrier(); \
- __var; \
- })
-
-#include "ena_common_defs.h"
-#include "ena_admin_defs.h"
-#include "ena_eth_io_defs.h"
-#include "ena_regs_defs.h"
+#include "ena_defs/ena_includes.h"
#endif /* ENA_PLAT_H_ */
Index: sys/dev/ena/ena.h
===================================================================
--- sys/dev/ena/ena.h
+++ sys/dev/ena/ena.h
@@ -40,7 +40,7 @@
#include "ena-com/ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 0
-#define DRV_MODULE_VER_MINOR 7
+#define DRV_MODULE_VER_MINOR 8
#define DRV_MODULE_VER_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
@@ -90,7 +90,7 @@
#define ENA_RX_HASH_KEY_NUM 10
#define ENA_RX_THASH_TABLE_SIZE (1 << 8)
-#define ENA_TX_CLEANUP_TRESHOLD 128
+#define ENA_TX_CLEANUP_THRESHOLD 128
#define DB_THRESHOLD 64
@@ -410,6 +410,8 @@
/* Statistics */
struct ena_stats_dev dev_stats;
struct ena_hw_stats hw_stats;
+
+ enum ena_regs_reset_reason_types reset_reason;
};
Index: sys/dev/ena/ena.c
===================================================================
--- sys/dev/ena/ena.c
+++ sys/dev/ena/ena.c
@@ -1612,7 +1612,7 @@
rx_ring->next_to_clean = next_to_clean;
- refill_required = ena_com_sq_empty_space(io_sq);
+ refill_required = ena_com_free_desc(io_sq);
refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DEVIDER;
if (refill_required > refill_threshold) {
@@ -2047,17 +2047,17 @@
/* Set indirect table */
rc = ena_com_indirect_table_set(ena_dev);
- if (unlikely(rc && rc != EPERM))
+ if (unlikely(rc && rc != EOPNOTSUPP))
return rc;
/* Configure hash function (if supported) */
rc = ena_com_set_hash_function(ena_dev);
- if (unlikely(rc && (rc != EPERM)))
+ if (unlikely(rc && (rc != EOPNOTSUPP)))
return rc;
/* Configure hash inputs (if supported) */
rc = ena_com_set_hash_ctrl(ena_dev);
- if (unlikely(rc && (rc != EPERM)))
+ if (unlikely(rc && (rc != EOPNOTSUPP)))
return rc;
return 0;
@@ -2506,6 +2506,7 @@
static void
ena_down(struct ena_adapter *adapter)
{
+ int rc;
if (adapter->up) {
device_printf(adapter->pdev, "device is going DOWN\n");
@@ -2522,6 +2523,14 @@
ena_free_io_irq(adapter);
+ if (adapter->trigger_reset) {
+ rc = ena_com_dev_reset(adapter->ena_dev,
+ adapter->reset_reason);
+ if (rc)
+ device_printf(adapter->pdev,
+ "Device reset failed\n");
+ }
+
ena_destroy_all_io_queues(adapter);
ena_free_all_tx_bufs(adapter);
@@ -2789,7 +2798,8 @@
" header csum flags %#jx",
mbuf, mbuf->m_flags, mbuf->m_pkthdr.csum_flags);
- if (ena_com_sq_empty_space(io_sq) < ENA_TX_CLEANUP_TRESHOLD)
+ if (!ena_com_sq_have_enough_space(io_sq,
+ ENA_TX_CLEANUP_THRESHOLD))
ena_tx_cleanup(tx_ring);
if ((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0) {
@@ -2831,7 +2841,8 @@
counter_u64_add(tx_ring->tx_stats.doorbells, 1);
}
- if (ena_com_sq_empty_space(io_sq) < ENA_TX_CLEANUP_TRESHOLD)
+ if (!ena_com_sq_have_enough_space(io_sq,
+ ENA_TX_CLEANUP_THRESHOLD))
ena_tx_cleanup(tx_ring);
}
@@ -3000,7 +3011,7 @@
#endif
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(qid));
- if (unlikely(rc && (rc != EPERM))) {
+ if (unlikely(rc && (rc != EOPNOTSUPP))) {
device_printf(dev, "Cannot fill indirect table\n");
goto err_fill_indir;
}
@@ -3008,13 +3019,13 @@
rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
- if (unlikely(rc && (rc != EPERM))) {
+ if (unlikely(rc && (rc != EOPNOTSUPP))) {
device_printf(dev, "Cannot fill hash function\n");
goto err_fill_indir;
}
rc = ena_com_set_default_hash_ctrl(ena_dev);
- if (unlikely(rc && (rc != EPERM))) {
+ if (unlikely(rc && (rc != EOPNOTSUPP))) {
device_printf(dev, "Cannot fill hash control\n");
goto err_fill_indir;
}
@@ -3087,7 +3098,7 @@
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
- if (rc == EPERM)
+ if (rc == EOPNOTSUPP)
ena_trace(ENA_WARNING, "Cannot set host attributes\n");
else
ena_trace(ENA_ALERT, "Cannot set host attributes\n");
@@ -3124,7 +3135,7 @@
readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
ena_com_set_mmio_read_mode(ena_dev, readless_supported);
- rc = ena_com_dev_reset(ena_dev);
+ rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
if (rc) {
device_printf(pdev, "Can not reset device\n");
goto err_mmio_read_less;
@@ -3255,6 +3266,7 @@
device_printf(adapter->pdev,
"Keep alive watchdog timeout.\n");
counter_u64_add(adapter->dev_stats.wd_expired, 1);
+ adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
adapter->trigger_reset = true;
}
}
@@ -3266,6 +3278,7 @@
device_printf(adapter->pdev,
"ENA admin queue is not in running state!\n");
counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
+ adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
adapter->trigger_reset = true;
}
}
@@ -3331,6 +3344,8 @@
"is above the threshold (%d > %d). "
"Reset the device\n", missed_tx,
adapter->missing_tx_threshold);
+ adapter->reset_reason =
+ ENA_REGS_RESET_MISS_TX_CMPL;
adapter->trigger_reset = true;
return;
}
@@ -3398,15 +3413,15 @@
dev_up = adapter->up;
ena_com_set_admin_running_state(ena_dev, false);
- ena_free_mgmnt_irq(adapter);
ena_down(adapter);
- ena_com_dev_reset(ena_dev);
+ ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
ena_com_abort_admin_commands(ena_dev);
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_mmio_reg_read_request_destroy(ena_dev);
+ adapter->reset_reason = ENA_REGS_RESET_NORMAL;
adapter->trigger_reset = false;
/* Finished destroy part. Restart the device */
@@ -3443,7 +3458,6 @@
return;
err_msix_free:
- ena_com_dev_reset(ena_dev);
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_com_free:
@@ -3588,6 +3602,8 @@
goto err_com_free;
}
+ adapter->reset_reason = ENA_REGS_RESET_NORMAL;
+
adapter->tx_ring_size = queue_size;
adapter->rx_ring_size = queue_size;
@@ -3664,6 +3680,7 @@
err_stats_tq:
taskqueue_free(adapter->reset_tq);
err_reset_tq:
+ ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_ifp_free:
@@ -3745,7 +3762,7 @@
/* Reset the device only if the device is running. */
if (adapter->running)
- ena_com_dev_reset(ena_dev);
+ ena_com_dev_reset(ena_dev, adapter->reset_reason);
ena_com_delete_host_info(ena_dev);
Index: sys/modules/ena/Makefile
===================================================================
--- sys/modules/ena/Makefile
+++ sys/modules/ena/Makefile
@@ -37,5 +37,6 @@
SRCS = ena.c ena_com.c ena_eth_com.c ena_sysctl.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += -I${SRCTOP}/sys/contrib
+CFLAGS += -I${SRCTOP}/sys/compat/linuxkpi/common/include
.include <bsd.kmod.mk>

File Metadata

Mime Type
text/plain
Expires
Wed, Apr 22, 8:52 AM (6 h, 51 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
31900466
Default Alt Text
D12143.id32441.diff (99 KB)

Event Timeline