diff --git a/ena_com.c b/ena_com.c
index 986de6dba112..3934d9a98d6e 100644
--- a/ena_com.c
+++ b/ena_com.c
@@ -1,3434 +1,3455 @@
 /*-
  * SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  *
  * * Redistributions of source code must retain the above copyright
  * notice, this list of conditions and the following disclaimer.
  * * Redistributions in binary form must reproduce the above copyright
  * notice, this list of conditions and the following disclaimer in
  * the documentation and/or other materials provided with the
  * distribution.
  * * Neither the name of copyright holder nor the names of its
  * contributors may be used to endorse or promote products derived
  * from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "ena_com.h"
 
 /*****************************************************************************/
 /*****************************************************************************/
 
 /* Timeout in micro-sec */
 #define ADMIN_CMD_TIMEOUT_US (3000000)
 
 #define ENA_ASYNC_QUEUE_DEPTH 16
 #define ENA_ADMIN_QUEUE_DEPTH 32
 
 #ifdef ENA_EXTENDED_STATS
 
 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
 
 #endif /* ENA_EXTENDED_STATS */
 
 #define ENA_CTRL_MAJOR		0
 #define ENA_CTRL_MINOR		0
 #define ENA_CTRL_SUB_MINOR	1
 
 #define MIN_ENA_CTRL_VER \
 	(((ENA_CTRL_MAJOR) << \
 	(ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
 	((ENA_CTRL_MINOR) << \
 	(ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
 	(ENA_CTRL_SUB_MINOR))
 
 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)	((u32)((u64)(x)))
 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)	((u32)(((u64)(x)) >> 32))
 
 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
 
 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT	4
 
 #define ENA_REGS_ADMIN_INTR_MASK 1
 
 #define ENA_MIN_ADMIN_POLL_US 100
 
 #define ENA_MAX_ADMIN_POLL_US 5000
 
 /* PHC definitions */
 #define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 20
 #define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
 #define ENA_PHC_TIMESTAMP_ERROR 0xFFFFFFFFFFFFFFFF
 #define ENA_PHC_REQ_ID_OFFSET 0xDEAD
 
 /*****************************************************************************/
 /*****************************************************************************/
 /*****************************************************************************/
 
 enum ena_cmd_status {
 	ENA_CMD_SUBMITTED,
 	ENA_CMD_COMPLETED,
 	/* Abort - canceled by the driver */
 	ENA_CMD_ABORTED,
 };
 
 struct ena_comp_ctx {
 	ena_wait_event_t wait_event;
 	struct ena_admin_acq_entry *user_cqe;
 	u32 comp_size;
 	enum ena_cmd_status status;
 	/* status from the device */
 	u8 comp_status;
 	u8 cmd_opcode;
 	bool occupied;
 };
 
 struct ena_com_stats_ctx {
 	struct ena_admin_aq_get_stats_cmd get_cmd;
 	struct ena_admin_acq_get_stats_resp get_resp;
 };
 
 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
 				       struct ena_common_mem_addr *ena_addr,
 				       dma_addr_t addr)
 {
 	if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
 		ena_trc_err(ena_dev, "DMA address has more bits that the device supports\n");
 		return ENA_COM_INVAL;
 	}
 
 	ena_addr->mem_addr_low = lower_32_bits(addr);
 	ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
 
 	return 0;
 }
 
 static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
 {
 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
 	struct ena_com_admin_sq *sq = &admin_queue->sq;
 	u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
 
 	ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr,
 			       sq->mem_handle);
 
 	if (!sq->entries) {
 		ena_trc_err(ena_dev, "Memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
 	sq->head = 0;
 	sq->tail = 0;
 	sq->phase = 1;
 
 	sq->db_addr = NULL;
 
 	return 0;
 }
 
 static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
 {
 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
 	struct ena_com_admin_cq *cq = &admin_queue->cq;
 	u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
 
 	ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr,
 			       cq->mem_handle);
 
 	if (!cq->entries)  {
 		ena_trc_err(ena_dev, "Memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
 	cq->head = 0;
 	cq->phase = 1;
 
 	return 0;
 }
 
 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
 				   struct ena_aenq_handlers *aenq_handlers)
 {
 	struct ena_com_aenq *aenq = &ena_dev->aenq;
 	u32 addr_low, addr_high, aenq_caps;
 	u16 size;
 
 	ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
 	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size,
 			aenq->entries,
 			aenq->dma_addr,
 			aenq->mem_handle);
 
 	if (!aenq->entries) {
 		ena_trc_err(ena_dev, "Memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
 	aenq->head = aenq->q_depth;
 	aenq->phase = 1;
 
 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
 
 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
 
 	aenq_caps = 0;
 	aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
 	aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
 		ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
 		ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
 	ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
 
 	if (unlikely(!aenq_handlers)) {
 		ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n");
 		return ENA_COM_INVAL;
 	}
 
 	aenq->aenq_handlers = aenq_handlers;
 
 	return 0;
 }
 
 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
 				     struct ena_comp_ctx *comp_ctx)
 {
 	comp_ctx->occupied = false;
 	ATOMIC32_DEC(&queue->outstanding_cmds);
 }
 
 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
 					  u16 command_id, bool capture)
 {
 	if (unlikely(command_id >= admin_queue->q_depth)) {
 		ena_trc_err(admin_queue->ena_dev,
 			    "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
 			    command_id, admin_queue->q_depth);
 		return NULL;
 	}
 
 	if (unlikely(!admin_queue->comp_ctx)) {
 		ena_trc_err(admin_queue->ena_dev,
 			    "Completion context is NULL\n");
 		return NULL;
 	}
 
 	if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
 		ena_trc_err(admin_queue->ena_dev,
 			    "Completion context is occupied\n");
 		return NULL;
 	}
 
 	if (capture) {
 		ATOMIC32_INC(&admin_queue->outstanding_cmds);
 		admin_queue->comp_ctx[command_id].occupied = true;
 	}
 
 	return &admin_queue->comp_ctx[command_id];
 }
 
 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
 						       struct ena_admin_aq_entry *cmd,
 						       size_t cmd_size_in_bytes,
 						       struct ena_admin_acq_entry *comp,
 						       size_t comp_size_in_bytes)
 {
 	struct ena_comp_ctx *comp_ctx;
 	u16 tail_masked, cmd_id;
 	u16 queue_size_mask;
 	u16 cnt;
 
 	queue_size_mask = admin_queue->q_depth - 1;
 
 	tail_masked = admin_queue->sq.tail & queue_size_mask;
 
 	/* In case of queue FULL */
 	cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
 	if (cnt >= admin_queue->q_depth) {
 		ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
 		admin_queue->stats.out_of_space++;
 		return ERR_PTR(ENA_COM_NO_SPACE);
 	}
 
 	cmd_id = admin_queue->curr_cmd_id;
 
 	cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
 		ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
 
 	cmd->aq_common_descriptor.command_id |= cmd_id &
 		ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
 
 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
 	if (unlikely(!comp_ctx))
 		return ERR_PTR(ENA_COM_INVAL);
 
 	comp_ctx->status = ENA_CMD_SUBMITTED;
 	comp_ctx->comp_size = (u32)comp_size_in_bytes;
 	comp_ctx->user_cqe = comp;
 	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
 
 	ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
 
 	memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
 
 	admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
 		queue_size_mask;
 
 	admin_queue->sq.tail++;
 	admin_queue->stats.submitted_cmd++;
 
 	if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
 		admin_queue->sq.phase = !admin_queue->sq.phase;
 
 	ENA_DB_SYNC(&admin_queue->sq.mem_handle);
 	ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
 			admin_queue->sq.db_addr);
 
 	return comp_ctx;
 }
 
 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
 {
 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
 	size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
 	struct ena_comp_ctx *comp_ctx;
 	u16 i;
 
 	admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size);
 	if (unlikely(!admin_queue->comp_ctx)) {
 		ena_trc_err(ena_dev, "Memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
 	for (i = 0; i < admin_queue->q_depth; i++) {
 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
 		if (comp_ctx)
 			ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
 	}
 
 	return 0;
 }
 
 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
 						     struct ena_admin_aq_entry *cmd,
 						     size_t cmd_size_in_bytes,
 						     struct ena_admin_acq_entry *comp,
 						     size_t comp_size_in_bytes)
 {
 	unsigned long flags = 0;
 	struct ena_comp_ctx *comp_ctx;
 
 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 	if (unlikely(!admin_queue->running_state)) {
 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 		return ERR_PTR(ENA_COM_NO_DEVICE);
 	}
 	comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
 					      cmd_size_in_bytes,
 					      comp,
 					      comp_size_in_bytes);
 	if (IS_ERR(comp_ctx))
 		admin_queue->running_state = false;
 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 
 	return comp_ctx;
 }
 
 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
 			      struct ena_com_create_io_ctx *ctx,
 			      struct ena_com_io_sq *io_sq)
 {
 	size_t size;
 	int dev_node = 0;
 
 	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 
 	io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
 	io_sq->desc_entry_size =
 		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 		sizeof(struct ena_eth_io_tx_desc) :
 		sizeof(struct ena_eth_io_rx_desc);
 
 	size = io_sq->desc_entry_size * io_sq->q_depth;
 	io_sq->bus = ena_dev->bus;
 
 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
 		ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
 					    size,
 					    io_sq->desc_addr.virt_addr,
 					    io_sq->desc_addr.phys_addr,
 					    io_sq->desc_addr.mem_handle,
 					    ctx->numa_node,
 					    dev_node);
 		if (!io_sq->desc_addr.virt_addr) {
 			ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
 					       size,
 					       io_sq->desc_addr.virt_addr,
 					       io_sq->desc_addr.phys_addr,
 					       io_sq->desc_addr.mem_handle);
 		}
 
 		if (!io_sq->desc_addr.virt_addr) {
 			ena_trc_err(ena_dev, "Memory allocation failed\n");
 			return ENA_COM_NO_MEM;
 		}
 	}
 
 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
 		/* Allocate bounce buffers */
 		io_sq->bounce_buf_ctrl.buffer_size =
 			ena_dev->llq_info.desc_list_entry_size;
 		io_sq->bounce_buf_ctrl.buffers_num =
 			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
 		io_sq->bounce_buf_ctrl.next_to_use = 0;
 
 		size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
 			io_sq->bounce_buf_ctrl.buffers_num;
 
 		ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
 				   size,
 				   io_sq->bounce_buf_ctrl.base_buffer,
 				   ctx->numa_node,
 				   dev_node);
 		if (!io_sq->bounce_buf_ctrl.base_buffer)
 			io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
 
 		if (!io_sq->bounce_buf_ctrl.base_buffer) {
 			ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
 			return ENA_COM_NO_MEM;
 		}
 
 		memcpy(&io_sq->llq_info, &ena_dev->llq_info,
 		       sizeof(io_sq->llq_info));
 
 		/* Initiate the first bounce buffer */
 		io_sq->llq_buf_ctrl.curr_bounce_buf =
 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
 		       0x0, io_sq->llq_info.desc_list_entry_size);
 		io_sq->llq_buf_ctrl.descs_left_in_line =
 			io_sq->llq_info.descs_num_before_header;
 		io_sq->disable_meta_caching =
 			io_sq->llq_info.disable_meta_caching;
 
 		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
 			io_sq->entries_in_tx_burst_left =
 				io_sq->llq_info.max_entries_in_tx_burst;
 	}
 
 	io_sq->tail = 0;
 	io_sq->next_to_comp = 0;
 	io_sq->phase = 1;
 
 	return 0;
 }
 
 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
 			      struct ena_com_create_io_ctx *ctx,
 			      struct ena_com_io_cq *io_cq)
 {
 	size_t size;
 	int prev_node = 0;
 
 	memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
 
 	/* Use the basic completion descriptor for Rx */
 	io_cq->cdesc_entry_size_in_bytes =
 		(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 		sizeof(struct ena_eth_io_tx_cdesc) :
 		sizeof(struct ena_eth_io_rx_cdesc_base);
 
 	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
 	io_cq->bus = ena_dev->bus;
 
 	ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
 					    size,
 					    io_cq->cdesc_addr.virt_addr,
 					    io_cq->cdesc_addr.phys_addr,
 					    io_cq->cdesc_addr.mem_handle,
 					    ctx->numa_node,
 					    prev_node,
 					    ENA_CDESC_RING_SIZE_ALIGNMENT);
 	if (!io_cq->cdesc_addr.virt_addr) {
 		ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
 					       size,
 					       io_cq->cdesc_addr.virt_addr,
 					       io_cq->cdesc_addr.phys_addr,
 					       io_cq->cdesc_addr.mem_handle,
 					       ENA_CDESC_RING_SIZE_ALIGNMENT);
 	}
 
 	if (!io_cq->cdesc_addr.virt_addr) {
 		ena_trc_err(ena_dev, "Memory allocation failed\n");
 		return ENA_COM_NO_MEM;
 	}
 
 	io_cq->phase = 1;
 	io_cq->head = 0;
 
 	return 0;
 }
 
 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
 						   struct ena_admin_acq_entry *cqe)
 {
 	struct ena_comp_ctx *comp_ctx;
 	u16 cmd_id;
 
 	cmd_id = cqe->acq_common_descriptor.command &
 		ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
 
 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
 	if (unlikely(!comp_ctx)) {
 		ena_trc_err(admin_queue->ena_dev,
 			    "comp_ctx is NULL. Changing the admin queue running state\n");
 		admin_queue->running_state = false;
 		return;
 	}
 
 	comp_ctx->status = ENA_CMD_COMPLETED;
 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
 
 	if (comp_ctx->user_cqe)
 		memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
 
 	if (!admin_queue->polling)
 		ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
 }
 
 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
 {
 	struct ena_admin_acq_entry *cqe = NULL;
 	u16 comp_num = 0;
 	u16 head_masked;
 	u8 phase;
 
 	head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
 	phase = admin_queue->cq.phase;
 
 	cqe = &admin_queue->cq.entries[head_masked];
 
 	/* Go over all the completions */
 	while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
 			ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
 		/* Do not read the rest of the completion entry before the
 		 * phase bit was validated
 		 */
 		dma_rmb();
 		ena_com_handle_single_admin_completion(admin_queue, cqe);
 
 		head_masked++;
 		comp_num++;
 		if (unlikely(head_masked == admin_queue->q_depth)) {
 			head_masked = 0;
 			phase = !phase;
 		}
 
 		cqe = &admin_queue->cq.entries[head_masked];
 	}
 
 	admin_queue->cq.head += comp_num;
 	admin_queue->cq.phase = phase;
 	admin_queue->sq.head += comp_num;
 	admin_queue->stats.completed_cmd += comp_num;
 }
 
 static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
 					u8 comp_status)
 {
 	if (unlikely(comp_status != 0))
 		ena_trc_err(admin_queue->ena_dev,
 			    "Admin command failed[%u]\n", comp_status);
 
 	switch (comp_status) {
 	case ENA_ADMIN_SUCCESS:
 		return ENA_COM_OK;
 	case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
 		return ENA_COM_NO_MEM;
 	case ENA_ADMIN_UNSUPPORTED_OPCODE:
 		return ENA_COM_UNSUPPORTED;
 	case ENA_ADMIN_BAD_OPCODE:
 	case ENA_ADMIN_MALFORMED_REQUEST:
 	case ENA_ADMIN_ILLEGAL_PARAMETER:
 	case ENA_ADMIN_UNKNOWN_ERROR:
 		return ENA_COM_INVAL;
 	case ENA_ADMIN_RESOURCE_BUSY:
 		return ENA_COM_TRY_AGAIN;
 	}
 
 	return ENA_COM_INVAL;
 }
 
 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
 {
 	delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
 	delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
 	ENA_USLEEP(delay_us);
 }
 
 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
 						     struct ena_com_admin_queue *admin_queue)
 {
 	unsigned long flags = 0;
 	ena_time_t timeout;
 	int ret;
 	u32 exp = 0;
 
 	timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
 
 	while (1) {
 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 		ena_com_handle_admin_completion(admin_queue);
 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 
 		if (comp_ctx->status != ENA_CMD_SUBMITTED)
 			break;
 
 		if (ENA_TIME_EXPIRE(timeout)) {
 			ena_trc_err(admin_queue->ena_dev,
 				    "Wait for completion (polling) timeout\n");
 			/* ENA didn't have any completion */
 			ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 			admin_queue->stats.no_completion++;
 			admin_queue->running_state = false;
 			ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 
 			ret = ENA_COM_TIMER_EXPIRED;
 			goto err;
 		}
 
 		ena_delay_exponential_backoff_us(exp++,
 						 admin_queue->ena_dev->ena_min_poll_delay_us);
 	}
 
 	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
 		ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 		admin_queue->stats.aborted_cmd++;
 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 		ret = ENA_COM_NO_DEVICE;
 		goto err;
 	}
 
 	ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
 		 admin_queue->ena_dev, "Invalid comp status %d\n",
 		 comp_ctx->status);
 
 	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
 err:
 	comp_ctxt_release(admin_queue, comp_ctx);
 	return ret;
 }
 
 /*
  * Set the LLQ configurations of the firmware
  *
  * The driver provides only the enabled feature values to the device,
  * which in turn, checks if they are supported.
  */
 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue;
 	struct ena_admin_set_feat_cmd cmd;
 	struct ena_admin_set_feat_resp resp;
 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
 	int ret;
 
 	memset(&cmd, 0x0, sizeof(cmd));
 	admin_queue = &ena_dev->admin_queue;
 
 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 	cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
 
 	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
 	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
 	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
 	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
 
 	cmd.u.llq.accel_mode.u.set.enabled_flags =
 		BIT(ENA_ADMIN_DISABLE_META_CACHING) |
 		BIT(ENA_ADMIN_LIMIT_TX_BURST);
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&cmd,
 					    sizeof(cmd),
 					    (struct ena_admin_acq_entry *)&resp,
 					    sizeof(resp));
 
 	if (unlikely(ret))
 		ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret);
 
 	return ret;
 }
 
 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
 				   struct ena_admin_feature_llq_desc *llq_features,
 				   struct ena_llq_configurations *llq_default_cfg)
 {
 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
 	struct ena_admin_accel_mode_get llq_accel_mode_get;
 	u16 supported_feat;
 	int rc;
 
 	memset(llq_info, 0, sizeof(*llq_info));
 
 	supported_feat = llq_features->header_location_ctrl_supported;
 
 	if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
 		llq_info->header_location_ctrl =
 			llq_default_cfg->llq_header_location;
 	} else {
 		ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n",
 			    supported_feat);
 		return ENA_COM_INVAL;
 	}
 
 	if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
 		supported_feat = llq_features->descriptors_stride_ctrl_supported;
 		if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
 			llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
 		} else	{
 			if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
 				llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
 			} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
 				llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
 			} else {
 				ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n",
 					    supported_feat);
 				return ENA_COM_INVAL;
 			}
 
 			ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 				    llq_default_cfg->llq_stride_ctrl,
 				    supported_feat,
 				    llq_info->desc_stride_ctrl);
 		}
 	} else {
 		llq_info->desc_stride_ctrl = 0;
 	}
 
 	supported_feat = llq_features->entry_size_ctrl_supported;
 	if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
 		llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
 		llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
 	} else {
 		if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
 			llq_info->desc_list_entry_size = 128;
 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
 			llq_info->desc_list_entry_size = 192;
 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
 			llq_info->desc_list_entry_size = 256;
 		} else {
 			ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n",
 				    supported_feat);
 			return ENA_COM_INVAL;
 		}
 
 		ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 			    llq_default_cfg->llq_ring_entry_size,
 			    supported_feat,
 			    llq_info->desc_list_entry_size);
 	}
 	if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
 		/* The desc list entry size should be whole multiply of 8
 		 * This requirement comes from __iowrite64_copy()
 		 */
 		ena_trc_err(ena_dev, "Illegal entry size %d\n",
 			    llq_info->desc_list_entry_size);
 		return ENA_COM_INVAL;
 	}
 
 	if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
 		llq_info->descs_per_entry = llq_info->desc_list_entry_size /
 			sizeof(struct ena_eth_io_tx_desc);
 	else
 		llq_info->descs_per_entry = 1;
 
 	supported_feat = llq_features->desc_num_before_header_supported;
 	if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
 		llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
 	} else {
 		if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
 		} else {
 			ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n",
 				    supported_feat);
 			return ENA_COM_INVAL;
 		}
 
 		ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
 			    llq_default_cfg->llq_num_decs_before_header,
 			    supported_feat,
 			    llq_info->descs_num_before_header);
 	}
 	/* Check for accelerated queue supported */
 	llq_accel_mode_get = llq_features->accel_mode.u.get;
 
 	llq_info->disable_meta_caching =
 		!!(llq_accel_mode_get.supported_flags &
 		   BIT(ENA_ADMIN_DISABLE_META_CACHING));
 
 	if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
 		llq_info->max_entries_in_tx_burst =
 			llq_accel_mode_get.max_tx_burst_size /
 			llq_default_cfg->llq_ring_entry_size_value;
 
 	rc = ena_com_set_llq(ena_dev);
 	if (rc)
 		ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc);
 
 	return rc;
 }
 
 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
 							struct ena_com_admin_queue *admin_queue)
 {
 	unsigned long flags = 0;
 	int ret;
 
 	ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
 			    admin_queue->completion_timeout);
 
 	/* In case the command wasn't completed find out the root cause.
 	 * There might be 2 kinds of errors
 	 * 1) No completion (timeout reached)
 	 * 2) There is completion but the device didn't get any msi-x interrupt.
 	 */
 	if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 		ena_com_handle_admin_completion(admin_queue);
 		admin_queue->stats.no_completion++;
 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 
 		if (comp_ctx->status == ENA_CMD_COMPLETED) {
 			ena_trc_err(admin_queue->ena_dev,
 				    "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
 				    comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
 			/* Check if fallback to polling is enabled */
 			if (admin_queue->auto_polling)
 				admin_queue->polling = true;
 		} else {
 			ena_trc_err(admin_queue->ena_dev,
 				    "The ena device didn't send a completion for the admin cmd %d status %d\n",
 				    comp_ctx->cmd_opcode, comp_ctx->status);
 		}
 		/* Check if shifted to polling mode.
 		 * This will happen if there is a completion without an interrupt
 		 * and autopolling mode is enabled. Continuing normal execution in such case
 		 */
 		if (!admin_queue->polling) {
 			admin_queue->running_state = false;
 			ret = ENA_COM_TIMER_EXPIRED;
 			goto err;
 		}
 	}
 
 	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
 err:
 	comp_ctxt_release(admin_queue, comp_ctx);
 	return ret;
 }
 
 /* This method read the hardware device register through posting writes
  * and waiting for response
  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
  */
 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
 {
 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
 	volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
 		mmio_read->read_resp;
 	u32 mmio_read_reg, ret, i;
 	unsigned long flags = 0;
 	u32 timeout = mmio_read->reg_read_to;
 
 	ENA_MIGHT_SLEEP();
 
 	if (timeout == 0)
 		timeout = ENA_REG_READ_TIMEOUT;
 
 	/* If readless is disabled, perform regular read */
 	if (!mmio_read->readless_supported)
 		return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
 
 	ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
 	mmio_read->seq_num++;
 
 	read_resp->req_id = mmio_read->seq_num + 0xDEAD;
 	mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
 			ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
 	mmio_read_reg |= mmio_read->seq_num &
 			ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
 
 	ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
 			ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
 
 	for (i = 0; i < timeout; i++) {
 		if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
 			break;
 
 		ENA_UDELAY(1);
 	}
 
 	if (unlikely(i == timeout)) {
 		ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
 			    mmio_read->seq_num,
 			    offset,
 			    read_resp->req_id,
 			    read_resp->reg_off);
 		ret = ENA_MMIO_READ_TIMEOUT;
 		goto err;
 	}
 
 	if (read_resp->reg_off != offset) {
 		ena_trc_err(ena_dev, "Read failure: wrong offset provided\n");
 		ret = ENA_MMIO_READ_TIMEOUT;
 	} else {
 		ret = read_resp->reg_val;
 	}
 err:
 	ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
 
 	return ret;
 }
 
 /* There are two types to wait for completion.
  * Polling mode - wait until the completion is available.
  * Async mode - wait on wait queue until the completion is ready
  * (or the timeout expired).
  * It is expected that the IRQ called ena_com_handle_admin_completion
  * to mark the completions.
  */
 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
 					     struct ena_com_admin_queue *admin_queue)
 {
 	if (admin_queue->polling)
 		return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
 								 admin_queue);
 
 	return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
 							    admin_queue);
 }
 
 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
 				 struct ena_com_io_sq *io_sq)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
 	struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
 	u8 direction;
 	int ret;
 
 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
 
 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
 	else
 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
 
 	destroy_cmd.sq.sq_identity |= (direction <<
 		ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
 		ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
 
 	destroy_cmd.sq.sq_idx = io_sq->idx;
 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&destroy_cmd,
 					    sizeof(destroy_cmd),
 					    (struct ena_admin_acq_entry *)&destroy_resp,
 					    sizeof(destroy_resp));
 
 	if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
 		ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret);
 
 	return ret;
 }
 
 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
 				  struct ena_com_io_sq *io_sq,
 				  struct ena_com_io_cq *io_cq)
 {
 	size_t size;
 
 	if (io_cq->cdesc_addr.virt_addr) {
 		size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
 
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 				      size,
 				      io_cq->cdesc_addr.virt_addr,
 				      io_cq->cdesc_addr.phys_addr,
 				      io_cq->cdesc_addr.mem_handle);
 
 		io_cq->cdesc_addr.virt_addr = NULL;
 	}
 
 	if (io_sq->desc_addr.virt_addr) {
 		size = io_sq->desc_entry_size * io_sq->q_depth;
 
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 				      size,
 				      io_sq->desc_addr.virt_addr,
 				      io_sq->desc_addr.phys_addr,
 				      io_sq->desc_addr.mem_handle);
 
 		io_sq->desc_addr.virt_addr = NULL;
 	}
 
 	if (io_sq->bounce_buf_ctrl.base_buffer) {
 		ENA_MEM_FREE(ena_dev->dmadev,
 			     io_sq->bounce_buf_ctrl.base_buffer,
 			     (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
 		io_sq->bounce_buf_ctrl.base_buffer = NULL;
 	}
 }
 
 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
 				u16 exp_state)
 {
 	u32 val, exp = 0;
 	ena_time_t timeout_stamp;
 
 	/* Convert timeout from resolution of 100ms to us resolution. */
 	timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
 
 	while (1) {
 		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 
 		if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
 			ena_trc_err(ena_dev, "Reg read timeout occurred\n");
 			return ENA_COM_TIMER_EXPIRED;
 		}
 
 		if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
 			exp_state)
 			return 0;
 
 		if (ENA_TIME_EXPIRE(timeout_stamp))
 			return ENA_COM_TIMER_EXPIRED;
 
 		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
 	}
 }
 
 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
 					       enum ena_admin_aq_feature_id feature_id)
 {
 	u32 feature_mask = 1 << feature_id;
 
 	/* Device attributes is always supported */
 	if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
 	    !(ena_dev->supported_features & feature_mask))
 		return false;
 
 	return true;
 }
 
 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
 				  struct ena_admin_get_feat_resp *get_resp,
 				  enum ena_admin_aq_feature_id feature_id,
 				  dma_addr_t control_buf_dma_addr,
 				  u32 control_buff_size,
 				  u8 feature_ver)
 {
 	struct ena_com_admin_queue *admin_queue;
 	struct ena_admin_get_feat_cmd get_cmd;
 	int ret;
 
 	if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	memset(&get_cmd, 0x0, sizeof(get_cmd));
 	admin_queue = &ena_dev->admin_queue;
 
 	get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
 
 	if (control_buff_size)
 		get_cmd.aq_common_descriptor.flags =
 			ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
 	else
 		get_cmd.aq_common_descriptor.flags = 0;
 
 	ret = ena_com_mem_addr_set(ena_dev,
 				   &get_cmd.control_buffer.address,
 				   control_buf_dma_addr);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Memory address set failed\n");
 		return ret;
 	}
 
 	get_cmd.control_buffer.length = control_buff_size;
 	get_cmd.feat_common.feature_version = feature_ver;
 	get_cmd.feat_common.feature_id = feature_id;
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)
 					    &get_cmd,
 					    sizeof(get_cmd),
 					    (struct ena_admin_acq_entry *)
 					    get_resp,
 					    sizeof(*get_resp));
 
 	if (unlikely(ret))
 		ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n",
 			    feature_id, ret);
 
 	return ret;
 }
 
 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
 			       struct ena_admin_get_feat_resp *get_resp,
 			       enum ena_admin_aq_feature_id feature_id,
 			       u8 feature_ver)
 {
 	return ena_com_get_feature_ex(ena_dev,
 				      get_resp,
 				      feature_id,
 				      0,
 				      0,
 				      feature_ver);
 }
 
 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
 {
 	return ena_dev->rss.hash_func;
 }
 
 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
 {
 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
 		(ena_dev->rss).hash_key;
 
 	ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
 	/* The key buffer is stored in the device in an array of
 	 * uint32 elements.
 	 */
 	hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
 }
 
 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 
 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
 		return ENA_COM_UNSUPPORTED;
 
 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
 			       sizeof(*rss->hash_key),
 			       rss->hash_key,
 			       rss->hash_key_dma_addr,
 			       rss->hash_key_mem_handle);
 
 	if (unlikely(!rss->hash_key))
 		return ENA_COM_NO_MEM;
 
 	return 0;
 }
 
 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 
 	if (rss->hash_key)
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 				      sizeof(*rss->hash_key),
 				      rss->hash_key,
 				      rss->hash_key_dma_addr,
 				      rss->hash_key_mem_handle);
 	rss->hash_key = NULL;
 }
 
 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 
 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
 			       sizeof(*rss->hash_ctrl),
 			       rss->hash_ctrl,
 			       rss->hash_ctrl_dma_addr,
 			       rss->hash_ctrl_mem_handle);
 
 	if (unlikely(!rss->hash_ctrl))
 		return ENA_COM_NO_MEM;
 
 	return 0;
 }
 
 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 
 	if (rss->hash_ctrl)
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 				      sizeof(*rss->hash_ctrl),
 				      rss->hash_ctrl,
 				      rss->hash_ctrl_dma_addr,
 				      rss->hash_ctrl_mem_handle);
 	rss->hash_ctrl = NULL;
 }
 
 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
 					   u16 log_size)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 	struct ena_admin_get_feat_resp get_resp;
 	size_t tbl_size;
 	int ret;
 
 	ret = ena_com_get_feature(ena_dev, &get_resp,
 				  ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
 	if (unlikely(ret))
 		return ret;
 
 	if ((get_resp.u.ind_table.min_size > log_size) ||
 	    (get_resp.u.ind_table.max_size < log_size)) {
 		ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
 			    1 << log_size,
 			    1 << get_resp.u.ind_table.min_size,
 			    1 << get_resp.u.ind_table.max_size);
 		return ENA_COM_INVAL;
 	}
 
 	tbl_size = (1ULL << log_size) *
 		sizeof(struct ena_admin_rss_ind_table_entry);
 
 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
 			     tbl_size,
 			     rss->rss_ind_tbl,
 			     rss->rss_ind_tbl_dma_addr,
 			     rss->rss_ind_tbl_mem_handle);
 	if (unlikely(!rss->rss_ind_tbl))
 		goto mem_err1;
 
 	tbl_size = (1ULL << log_size) * sizeof(u16);
 	rss->host_rss_ind_tbl =
 		ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
 	if (unlikely(!rss->host_rss_ind_tbl))
 		goto mem_err2;
 
 	rss->tbl_log_size = log_size;
 
 	return 0;
 
 mem_err2:
 	tbl_size = (1ULL << log_size) *
 		sizeof(struct ena_admin_rss_ind_table_entry);
 
 	ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 			      tbl_size,
 			      rss->rss_ind_tbl,
 			      rss->rss_ind_tbl_dma_addr,
 			      rss->rss_ind_tbl_mem_handle);
 	rss->rss_ind_tbl = NULL;
 mem_err1:
 	rss->tbl_log_size = 0;
 	return ENA_COM_NO_MEM;
 }
 
 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 	size_t tbl_size = (1ULL << rss->tbl_log_size) *
 		sizeof(struct ena_admin_rss_ind_table_entry);
 
 	if (rss->rss_ind_tbl)
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 				      tbl_size,
 				      rss->rss_ind_tbl,
 				      rss->rss_ind_tbl_dma_addr,
 				      rss->rss_ind_tbl_mem_handle);
 	rss->rss_ind_tbl = NULL;
 
 	if (rss->host_rss_ind_tbl)
 		ENA_MEM_FREE(ena_dev->dmadev,
 			     rss->host_rss_ind_tbl,
 			     ((1ULL << rss->tbl_log_size) * sizeof(u16)));
 	rss->host_rss_ind_tbl = NULL;
 }
 
 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
 				struct ena_com_io_sq *io_sq, u16 cq_idx)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	struct ena_admin_aq_create_sq_cmd create_cmd;
 	struct ena_admin_acq_create_sq_resp_desc cmd_completion;
 	u8 direction;
 	int ret;
 
 	memset(&create_cmd, 0x0, sizeof(create_cmd));
 
 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
 
 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
 	else
 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
 
 	create_cmd.sq_identity |= (direction <<
 		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
 		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
 
 	create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
 		ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
 
 	create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
 		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
 		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
 
 	create_cmd.sq_caps_3 |=
 		ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
 
 	create_cmd.cq_idx = cq_idx;
 	create_cmd.sq_depth = io_sq->q_depth;
 
 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
 		ret = ena_com_mem_addr_set(ena_dev,
 					   &create_cmd.sq_ba,
 					   io_sq->desc_addr.phys_addr);
 		if (unlikely(ret)) {
 			ena_trc_err(ena_dev, "Memory address set failed\n");
 			return ret;
 		}
 	}
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&create_cmd,
 					    sizeof(create_cmd),
 					    (struct ena_admin_acq_entry *)&cmd_completion,
 					    sizeof(cmd_completion));
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret);
 		return ret;
 	}
 
 	io_sq->idx = cmd_completion.sq_idx;
 
 	io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
 		(uintptr_t)cmd_completion.sq_doorbell_offset);
 
 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
 		io_sq->desc_addr.pbuf_dev_addr =
 			(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
 			cmd_completion.llq_descriptors_offset);
 	}
 
 	ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
 
 	return ret;
 }
 
 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 	struct ena_com_io_sq *io_sq;
 	u16 qid;
 	int i;
 
 	for (i = 0; i < 1 << rss->tbl_log_size; i++) {
 		qid = rss->host_rss_ind_tbl[i];
 		if (qid >= ENA_TOTAL_NUM_QUEUES)
 			return ENA_COM_INVAL;
 
 		io_sq = &ena_dev->io_sq_queues[qid];
 
 		if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
 			return ENA_COM_INVAL;
 
 		rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
 	}
 
 	return 0;
 }
 
 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
 						 u16 intr_delay_resolution)
 {
 	u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
 
 	if (unlikely(!intr_delay_resolution)) {
 		ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
 		intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
 	}
 
 	/* update Rx */
 	ena_dev->intr_moder_rx_interval =
 		ena_dev->intr_moder_rx_interval *
 		prev_intr_delay_resolution /
 		intr_delay_resolution;
 
 	/* update Tx */
 	ena_dev->intr_moder_tx_interval =
 		ena_dev->intr_moder_tx_interval *
 		prev_intr_delay_resolution /
 		intr_delay_resolution;
 
 	ena_dev->intr_delay_resolution = intr_delay_resolution;
 }
 
 /*****************************************************************************/
 /*******************************      API       ******************************/
 /*****************************************************************************/
 
 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
 				  struct ena_admin_aq_entry *cmd,
 				  size_t cmd_size,
 				  struct ena_admin_acq_entry *comp,
 				  size_t comp_size)
 {
 	struct ena_comp_ctx *comp_ctx;
 	int ret;
 
 	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
 					    comp, comp_size);
 	if (IS_ERR(comp_ctx)) {
 		ret = PTR_ERR(comp_ctx);
 		if (ret == ENA_COM_NO_DEVICE)
 			ena_trc_dbg(admin_queue->ena_dev,
 				    "Failed to submit command [%d]\n",
 				    ret);
 		else
 			ena_trc_err(admin_queue->ena_dev,
 				    "Failed to submit command [%d]\n",
 				    ret);
 
 		return ret;
 	}
 
 	ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
 	if (unlikely(ret)) {
 		if (admin_queue->running_state)
 			ena_trc_err(admin_queue->ena_dev,
 				    "Failed to process command. ret = %d\n", ret);
 		else
 			ena_trc_dbg(admin_queue->ena_dev,
 				    "Failed to process command. ret = %d\n", ret);
 	}
 	return ret;
 }
 
 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
 			 struct ena_com_io_cq *io_cq)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	struct ena_admin_aq_create_cq_cmd create_cmd;
 	struct ena_admin_acq_create_cq_resp_desc cmd_completion;
 	int ret;
 
 	memset(&create_cmd, 0x0, sizeof(create_cmd));
 
 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
 
 	create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
 		ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
 	create_cmd.cq_caps_1 |=
 		ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
 
 	create_cmd.msix_vector = io_cq->msix_vector;
 	create_cmd.cq_depth = io_cq->q_depth;
 
 	ret = ena_com_mem_addr_set(ena_dev,
 				   &create_cmd.cq_ba,
 				   io_cq->cdesc_addr.phys_addr);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Memory address set failed\n");
 		return ret;
 	}
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&create_cmd,
 					    sizeof(create_cmd),
 					    (struct ena_admin_acq_entry *)&cmd_completion,
 					    sizeof(cmd_completion));
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret);
 		return ret;
 	}
 
 	io_cq->idx = cmd_completion.cq_idx;
 
 	io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
 		cmd_completion.cq_interrupt_unmask_register_offset);
 
 	if (cmd_completion.numa_node_register_offset)
 		io_cq->numa_node_cfg_reg =
 			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
 			cmd_completion.numa_node_register_offset);
 
 	ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
 
 	return ret;
 }
 
 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
 			    struct ena_com_io_sq **io_sq,
 			    struct ena_com_io_cq **io_cq)
 {
 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
 		ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n",
 			    qid, ENA_TOTAL_NUM_QUEUES);
 		return ENA_COM_INVAL;
 	}
 
 	*io_sq = &ena_dev->io_sq_queues[qid];
 	*io_cq = &ena_dev->io_cq_queues[qid];
 
 	return 0;
 }
 
 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	struct ena_comp_ctx *comp_ctx;
 	u16 i;
 
 	if (!admin_queue->comp_ctx)
 		return;
 
 	for (i = 0; i < admin_queue->q_depth; i++) {
 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
 		if (unlikely(!comp_ctx))
 			break;
 
 		comp_ctx->status = ENA_CMD_ABORTED;
 
 		ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
 	}
 }
 
 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	unsigned long flags = 0;
 	u32 exp = 0;
 
 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 	while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 	}
 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 }
 
 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
 			  struct ena_com_io_cq *io_cq)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
 	struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
 	int ret;
 
 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
 
 	destroy_cmd.cq_idx = io_cq->idx;
 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&destroy_cmd,
 					    sizeof(destroy_cmd),
 					    (struct ena_admin_acq_entry *)&destroy_resp,
 					    sizeof(destroy_resp));
 
 	if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
 		ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret);
 
 	return ret;
 }
 
 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
 {
 	return ena_dev->admin_queue.running_state;
 }
 
 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	unsigned long flags = 0;
 
 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
 	ena_dev->admin_queue.running_state = state;
 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 }
 
 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
 {
 	u16 depth = ena_dev->aenq.q_depth;
 
 	ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n");
 
 	/* Init head_db to mark that all entries in the queue
 	 * are initially available
 	 */
 	ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
 }
 
 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
 {
 	struct ena_com_admin_queue *admin_queue;
 	struct ena_admin_set_feat_cmd cmd;
 	struct ena_admin_set_feat_resp resp;
 	struct ena_admin_get_feat_resp get_resp;
 	int ret;
 
 	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
 	if (ret) {
 		ena_trc_info(ena_dev, "Can't get aenq configuration\n");
 		return ret;
 	}
 
 	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
 		ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
 			     get_resp.u.aenq.supported_groups,
 			     groups_flag);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	memset(&cmd, 0x0, sizeof(cmd));
 	admin_queue = &ena_dev->admin_queue;
 
 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 	cmd.aq_common_descriptor.flags = 0;
 	cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
 	cmd.u.aenq.enabled_groups = groups_flag;
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&cmd,
 					    sizeof(cmd),
 					    (struct ena_admin_acq_entry *)&resp,
 					    sizeof(resp));
 
 	if (unlikely(ret))
 		ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret);
 
 	return ret;
 }
 
 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
 {
 	u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
 	u32 width;
 
 	if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
 		ena_trc_err(ena_dev, "Reg read timeout occurred\n");
 		return ENA_COM_TIMER_EXPIRED;
 	}
 
 	width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
 		ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
 
 	ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width);
 
 	if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
 		ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width);
 		return ENA_COM_INVAL;
 	}
 
 	ena_dev->dma_addr_bits = width;
 
 	return width;
 }
 
 int ena_com_validate_version(struct ena_com_dev *ena_dev)
 {
 	u32 ver;
 	u32 ctrl_ver;
 	u32 ctrl_ver_masked;
 
 	/* Make sure the ENA version and the controller version are at least
 	 * as the driver expects
 	 */
 	ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
 	ctrl_ver = ena_com_reg_bar_read32(ena_dev,
 					  ENA_REGS_CONTROLLER_VERSION_OFF);
 
 	if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
 		     (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
 		ena_trc_err(ena_dev, "Reg read timeout occurred\n");
 		return ENA_COM_TIMER_EXPIRED;
 	}
 
 	ena_trc_info(ena_dev, "ENA device version: %d.%d\n",
 		     (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
 		     ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
 		     ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
 
 	ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n",
 		     (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
 		     >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
 		     (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
 		     >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
 		     (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
 		     (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
 		     ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
 
 	ctrl_ver_masked =
 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
 
 	/* Validate the ctrl version without the implementation ID */
 	if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
 		ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
 		return -1;
 	}
 
 	return 0;
 }
 
 static void
 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
 				      struct ena_com_admin_queue *admin_queue)
 
 {
 	if (!admin_queue->comp_ctx)
 		return;
 
 	ENA_WAIT_EVENTS_DESTROY(admin_queue);
 	ENA_MEM_FREE(ena_dev->dmadev,
 		     admin_queue->comp_ctx,
 		     (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
 
 	admin_queue->comp_ctx = NULL;
 }
 
 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	struct ena_com_admin_cq *cq = &admin_queue->cq;
 	struct ena_com_admin_sq *sq = &admin_queue->sq;
 	struct ena_com_aenq *aenq = &ena_dev->aenq;
 	u16 size;
 
 	ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
 
 	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
 	if (sq->entries)
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
 				      sq->dma_addr, sq->mem_handle);
 	sq->entries = NULL;
 
 	size = ADMIN_CQ_SIZE(admin_queue->q_depth);
 	if (cq->entries)
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
 				      cq->dma_addr, cq->mem_handle);
 	cq->entries = NULL;
 
 	size = ADMIN_AENQ_SIZE(aenq->q_depth);
 	if (ena_dev->aenq.entries)
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
 				      aenq->dma_addr, aenq->mem_handle);
 	aenq->entries = NULL;
 	ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
 }
 
 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
 {
 	u32 mask_value = 0;
 
 	if (polling)
 		mask_value = ENA_REGS_ADMIN_INTR_MASK;
 
 	ENA_REG_WRITE32(ena_dev->bus, mask_value,
 			ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
 	ena_dev->admin_queue.polling = polling;
 }
 
 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
 {
 	return ena_dev->admin_queue.polling;
 }
 
 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
 					 bool polling)
 {
 	ena_dev->admin_queue.auto_polling = polling;
 }
 
 bool ena_com_phc_supported(struct ena_com_dev *ena_dev)
 {
 	return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG);
 }
 
 int ena_com_phc_init(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_phc_info *phc = &ena_dev->phc;
 
 	memset(phc, 0x0, sizeof(*phc));
 
 	/* Allocate shared mem used PHC timestamp retrieved from device */
 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
 			       sizeof(*phc->virt_addr),
 			       phc->virt_addr,
 			       phc->phys_addr,
 			       phc->mem_handle);
 	if (unlikely(!phc->virt_addr))
 		return ENA_COM_NO_MEM;
 
 	ENA_SPINLOCK_INIT(phc->lock);
 
 	phc->virt_addr->req_id = 0;
 	phc->virt_addr->timestamp = 0;
 
 	return 0;
 }
 
 int ena_com_phc_config(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_phc_info *phc = &ena_dev->phc;
 	struct ena_admin_get_feat_resp get_feat_resp;
 	struct ena_admin_set_feat_resp set_feat_resp;
 	struct ena_admin_set_feat_cmd set_feat_cmd;
 	int ret = 0;
 
 	/* Get device PHC default configuration */
 	ret = ena_com_get_feature(ena_dev, &get_feat_resp, ENA_ADMIN_PHC_CONFIG, 0);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Failed to get PHC feature configuration, error: %d\n", ret);
 		return ret;
 	}
 
 	/* Suporting only readless PHC retrieval */
 	if (get_feat_resp.u.phc.type != ENA_ADMIN_PHC_TYPE_READLESS) {
 		ena_trc_err(ena_dev, "Unsupprted PHC type, error: %d\n", ENA_COM_UNSUPPORTED);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	/* Update PHC doorbell offset according to device value, used to write req_id to PHC bar */
 	phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
 
 	/* Update PHC expire timeout according to device or default driver value */
 	phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
 				    get_feat_resp.u.phc.expire_timeout_usec :
 				    ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC;
 
 	/* Update PHC block timeout according to device or default driver value */
 	phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
 				   get_feat_resp.u.phc.block_timeout_usec :
 				   ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
 
 	/* Sanity check - expire timeout must not be above skip timeout */
 	if (phc->expire_timeout_usec > phc->block_timeout_usec)
 		phc->expire_timeout_usec = phc->block_timeout_usec;
 
 	/* Prepare PHC feature command with PHC output address */
 	memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
 	set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 	set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
 	set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
 	ret = ena_com_mem_addr_set(ena_dev, &set_feat_cmd.u.phc.output_address, phc->phys_addr);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Failed setting PHC output address, error: %d\n", ret);
 		return ret;
 	}
 
 	/* Send PHC feature command to the device */
 	ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
 					    (struct ena_admin_aq_entry *)&set_feat_cmd,
 					    sizeof(set_feat_cmd),
 					    (struct ena_admin_acq_entry *)&set_feat_resp,
 					    sizeof(set_feat_resp));
 
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Failed to enable PHC, error: %d\n", ret);
 		return ret;
 	}
 
 	phc->active = true;
 	ena_trc_dbg(ena_dev, "PHC is active in the device\n");
 
 	return ret;
 }
 
 void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_phc_info *phc = &ena_dev->phc;
 
 	phc->active = false;
 
 	/* In case PHC is not supported by the device, silently exiting */
 	if (!phc->virt_addr)
 		return;
 
 	ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 			      sizeof(*phc->virt_addr),
 			      phc->virt_addr,
 			      phc->phys_addr,
 			      phc->mem_handle);
 	phc->virt_addr = NULL;
 
 	ENA_SPINLOCK_DESTROY(phc->lock);
 }
 
 int ena_com_phc_get(struct ena_com_dev *ena_dev, u64 *timestamp)
 {
 	volatile struct ena_admin_phc_resp *read_resp = ena_dev->phc.virt_addr;
 	struct ena_com_phc_info *phc = &ena_dev->phc;
 	ena_time_high_res_t initial_time = ENA_TIME_INIT_HIGH_RES();
 	static ena_time_high_res_t start_time;
 	unsigned long flags = 0;
 	ena_time_high_res_t expire_time;
 	ena_time_high_res_t block_time;
 	int ret = ENA_COM_OK;
 
 	if (!phc->active) {
 		ena_trc_err(ena_dev, "PHC feature is not active in the device\n");
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	ENA_SPINLOCK_LOCK(phc->lock, flags);
 
 	/* Check if PHC is in blocked state */
 	if (unlikely(ENA_TIME_COMPARE_HIGH_RES(start_time, initial_time))) {
 		/* Check if blocking time expired */
 		block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(start_time, phc->block_timeout_usec);
 		if (!ENA_TIME_EXPIRE_HIGH_RES(block_time)) {
 			/* PHC is still in blocked state, skip PHC request */
 			phc->stats.phc_skp++;
 			ret = ENA_COM_DEVICE_BUSY;
 			goto skip;
 		}
 
 		/* PHC is in active state, update statistics according to req_id and timestamp */
 		if ((READ_ONCE16(read_resp->req_id) != phc->req_id) ||
 		    (read_resp->timestamp == ENA_PHC_TIMESTAMP_ERROR)) {
 			/* Device didn't update req_id during blocking time or timestamp is invalid,
 			 * this indicates on a device error
 			 */
 			phc->stats.phc_err++;
 		} else {
 			/* Device updated req_id during blocking time with valid timestamp */
 			phc->stats.phc_exp++;
 		}
 	}
 
 	/* Setting relative timeouts */
 	start_time = ENA_GET_SYSTEM_TIME_HIGH_RES();
 	block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(start_time, phc->block_timeout_usec);
 	expire_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(start_time, phc->expire_timeout_usec);
 
 	/* We expect the device to return this req_id once the new PHC timestamp is updated */
 	phc->req_id++;
 
 	/* Initialize PHC shared memory with different req_id value to be able to identify once the
 	 * device changes it to req_id
 	 */
 	read_resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
 
 	/* Writing req_id to PHC bar */
 	ENA_REG_WRITE32(ena_dev->bus, phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
 
 	/* Stalling until the device updates req_id */
 	while (1) {
 		if (unlikely(ENA_TIME_EXPIRE_HIGH_RES(expire_time))) {
 			/* Gave up waiting for updated req_id, PHC enters into blocked state until
 			 * passing blocking time
 			 */
 			ret = ENA_COM_DEVICE_BUSY;
 			break;
 		}
 
 		/* Check if req_id was updated by the device */
 		if (READ_ONCE16(read_resp->req_id) != phc->req_id) {
 			/* req_id was not updated by the device, check again on next loop */
 			continue;
 		}
 
 		/* req_id was updated which indicates that PHC timestamp was updated too */
 		*timestamp = read_resp->timestamp;
 
 		/* PHC timestamp validty check */
 		if (unlikely(*timestamp == ENA_PHC_TIMESTAMP_ERROR)) {
 			/* Retrieved invalid PHC timestamp, PHC enters into blocked state until
 			 * passing blocking time
 			 */
 			ret = ENA_COM_DEVICE_BUSY;
 			break;
 		}
 
 		/* Retrieved valid PHC timestamp */
 		phc->stats.phc_cnt++;
 
 		/* This indicates PHC state is active */
 		start_time = initial_time;
 		break;
 	}
 
 skip:
 	ENA_SPINLOCK_UNLOCK(phc->lock, flags);
 
 	return ret;
 }
 
 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
 
 	ENA_SPINLOCK_INIT(mmio_read->lock);
 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
 			       sizeof(*mmio_read->read_resp),
 			       mmio_read->read_resp,
 			       mmio_read->read_resp_dma_addr,
 			       mmio_read->read_resp_mem_handle);
 	if (unlikely(!mmio_read->read_resp))
 		goto err;
 
 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
 
 	mmio_read->read_resp->req_id = 0x0;
 	mmio_read->seq_num = 0x0;
 	mmio_read->readless_supported = true;
 
 	return 0;
 
 err:
 		ENA_SPINLOCK_DESTROY(mmio_read->lock);
 		return ENA_COM_NO_MEM;
 }
 
 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
 {
 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
 
 	mmio_read->readless_supported = readless_supported;
 }
 
 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
 
 	ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
 	ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
 
 	ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 			      sizeof(*mmio_read->read_resp),
 			      mmio_read->read_resp,
 			      mmio_read->read_resp_dma_addr,
 			      mmio_read->read_resp_mem_handle);
 
 	mmio_read->read_resp = NULL;
 	ENA_SPINLOCK_DESTROY(mmio_read->lock);
 }
 
 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
 	u32 addr_low, addr_high;
 
 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
 
 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
 }
 
 int ena_com_admin_init(struct ena_com_dev *ena_dev,
 		       struct ena_aenq_handlers *aenq_handlers)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
 	int ret;
 
 	dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 
 	if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
 		ena_trc_err(ena_dev, "Reg read timeout occurred\n");
 		return ENA_COM_TIMER_EXPIRED;
 	}
 
 	if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
 		ena_trc_err(ena_dev, "Device isn't ready, abort com init\n");
 		return ENA_COM_NO_DEVICE;
 	}
 
 	admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
 
 	admin_queue->bus = ena_dev->bus;
 	admin_queue->q_dmadev = ena_dev->dmadev;
 	admin_queue->polling = false;
 	admin_queue->curr_cmd_id = 0;
 
 	ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
 
 	ENA_SPINLOCK_INIT(admin_queue->q_lock);
 
 	ret = ena_com_init_comp_ctxt(admin_queue);
 	if (ret)
 		goto error;
 
 	ret = ena_com_admin_init_sq(admin_queue);
 	if (ret)
 		goto error;
 
 	ret = ena_com_admin_init_cq(admin_queue);
 	if (ret)
 		goto error;
 
 	admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
 		ENA_REGS_AQ_DB_OFF);
 
 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
 
 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
 
 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
 
 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
 
 	aq_caps = 0;
 	aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
 	aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
 			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
 			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
 
 	acq_caps = 0;
 	acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
 	acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
 		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
 		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
 
 	ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
 	ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
 	ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
 	if (ret)
 		goto error;
 
 	admin_queue->ena_dev = ena_dev;
 	admin_queue->running_state = true;
 
 	return 0;
 error:
 	ena_com_admin_destroy(ena_dev);
 
 	return ret;
 }
 
 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
 			    struct ena_com_create_io_ctx *ctx)
 {
 	struct ena_com_io_sq *io_sq;
 	struct ena_com_io_cq *io_cq;
 	int ret;
 
 	if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
 		ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
 			    ctx->qid, ENA_TOTAL_NUM_QUEUES);
 		return ENA_COM_INVAL;
 	}
 
 	io_sq = &ena_dev->io_sq_queues[ctx->qid];
 	io_cq = &ena_dev->io_cq_queues[ctx->qid];
 
 	memset(io_sq, 0x0, sizeof(*io_sq));
 	memset(io_cq, 0x0, sizeof(*io_cq));
 
 	/* Init CQ */
 	io_cq->q_depth = ctx->queue_size;
 	io_cq->direction = ctx->direction;
 	io_cq->qid = ctx->qid;
 
 	io_cq->msix_vector = ctx->msix_vector;
 
 	io_sq->q_depth = ctx->queue_size;
 	io_sq->direction = ctx->direction;
 	io_sq->qid = ctx->qid;
 
 	io_sq->mem_queue_type = ctx->mem_queue_type;
 
 	if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
 		/* header length is limited to 8 bits */
 		io_sq->tx_max_header_size =
 			ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
 
 	ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
 	if (ret)
 		goto error;
 	ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
 	if (ret)
 		goto error;
 
 	ret = ena_com_create_io_cq(ena_dev, io_cq);
 	if (ret)
 		goto error;
 
 	ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
 	if (ret)
 		goto destroy_io_cq;
 
 	return 0;
 
 destroy_io_cq:
 	ena_com_destroy_io_cq(ena_dev, io_cq);
 error:
 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
 	return ret;
 }
 
 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
 {
 	struct ena_com_io_sq *io_sq;
 	struct ena_com_io_cq *io_cq;
 
 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
 		ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
 			    qid, ENA_TOTAL_NUM_QUEUES);
 		return;
 	}
 
 	io_sq = &ena_dev->io_sq_queues[qid];
 	io_cq = &ena_dev->io_cq_queues[qid];
 
 	ena_com_destroy_io_sq(ena_dev, io_sq);
 	ena_com_destroy_io_cq(ena_dev, io_cq);
 
 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
 }
 
 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
 			    struct ena_admin_get_feat_resp *resp)
 {
 	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
 }
 
 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
 			     struct ena_com_stats_ctx *ctx,
 			     enum ena_admin_get_stats_type type)
 {
 	struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
 	struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
 	struct ena_com_admin_queue *admin_queue;
 	int ret;
 
 	admin_queue = &ena_dev->admin_queue;
 
 	get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
 	get_cmd->aq_common_descriptor.flags = 0;
 	get_cmd->type = type;
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)get_cmd,
 					    sizeof(*get_cmd),
 					    (struct ena_admin_acq_entry *)get_resp,
 					    sizeof(*get_resp));
 
 	if (unlikely(ret))
 		ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret);
 
 	return ret;
 }
 
 static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
 {
 	struct ena_customer_metrics *customer_metrics;
 	struct ena_com_stats_ctx ctx;
 	int ret;
 
 	customer_metrics = &ena_dev->customer_metrics;
 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
 		customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
 		return;
 	}
 
 	memset(&ctx, 0x0, sizeof(ctx));
 	ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
 	if (likely(ret == 0))
 		customer_metrics->supported_metrics =
 			ctx.get_resp.u.customer_metrics.reported_metrics;
 	else
 		ena_trc_err(ena_dev, "Failed to query customer metrics support. error: %d\n", ret);
 }
 
 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
 {
 	struct ena_admin_get_feat_resp get_resp;
 	int rc;
 
 	rc = ena_com_get_feature(ena_dev, &get_resp,
 				 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
 	if (rc)
 		return rc;
 
 	memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
 	       sizeof(get_resp.u.dev_attr));
 
 	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
 	ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
 
 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
 		rc = ena_com_get_feature(ena_dev, &get_resp,
 					 ENA_ADMIN_MAX_QUEUES_EXT,
 					 ENA_FEATURE_MAX_QUEUE_EXT_VER);
 		if (rc)
 			return rc;
 
 		if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
 			return ENA_COM_INVAL;
 
 		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
 		       sizeof(get_resp.u.max_queue_ext));
 		ena_dev->tx_max_header_size =
 			get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
 	} else {
 		rc = ena_com_get_feature(ena_dev, &get_resp,
 					 ENA_ADMIN_MAX_QUEUES_NUM, 0);
 		memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
 		       sizeof(get_resp.u.max_queue));
 		ena_dev->tx_max_header_size =
 			get_resp.u.max_queue.max_header_size;
 
 		if (rc)
 			return rc;
 	}
 
 	rc = ena_com_get_feature(ena_dev, &get_resp,
 				 ENA_ADMIN_AENQ_CONFIG, 0);
 	if (rc)
 		return rc;
 
 	memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
 	       sizeof(get_resp.u.aenq));
 
 	rc = ena_com_get_feature(ena_dev, &get_resp,
 				 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
 	if (rc)
 		return rc;
 
 	memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
 	       sizeof(get_resp.u.offload));
 
 	/* Driver hints isn't mandatory admin command. So in case the
 	 * command isn't supported set driver hints to 0
 	 */
 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
 
 	if (!rc)
 		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
 		       sizeof(get_resp.u.hw_hints));
 	else if (rc == ENA_COM_UNSUPPORTED)
 		memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
 	else
 		return rc;
 
 	rc = ena_com_get_feature(ena_dev, &get_resp,
 				 ENA_ADMIN_LLQ, ENA_ADMIN_LLQ_FEATURE_VERSION_1);
 	if (!rc)
 		memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
 		       sizeof(get_resp.u.llq));
 	else if (rc == ENA_COM_UNSUPPORTED)
 		memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
 	else
 		return rc;
 
 	ena_com_set_supported_customer_metrics(ena_dev);
 
 	return 0;
 }
 
 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
 {
 	ena_com_handle_admin_completion(&ena_dev->admin_queue);
 }
 
 /* ena_handle_specific_aenq_event:
  * return the handler that is relevant to the specific event group
  */
 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
 						     u16 group)
 {
 	struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
 
 	if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
 		return aenq_handlers->handlers[group];
 
 	return aenq_handlers->unimplemented_handler;
 }
 
 /* ena_aenq_intr_handler:
  * handles the aenq incoming events.
  * pop events from the queue and apply the specific handler
  */
 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
 {
 	struct ena_admin_aenq_entry *aenq_e;
 	struct ena_admin_aenq_common_desc *aenq_common;
 	struct ena_com_aenq *aenq  = &ena_dev->aenq;
 	u64 timestamp;
 	ena_aenq_handler handler_cb;
 	u16 masked_head, processed = 0;
 	u8 phase;
 
 	masked_head = aenq->head & (aenq->q_depth - 1);
 	phase = aenq->phase;
 	aenq_e = &aenq->entries[masked_head]; /* Get first entry */
 	aenq_common = &aenq_e->aenq_common_desc;
 
 	/* Go over all the events */
 	while ((READ_ONCE8(aenq_common->flags) &
 		ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
 		/* Make sure the phase bit (ownership) is as expected before
 		 * reading the rest of the descriptor.
 		 */
 		dma_rmb();
 
 		timestamp = (u64)aenq_common->timestamp_low |
 			((u64)aenq_common->timestamp_high << 32);
 
 		ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrome[%x] timestamp: [%" ENA_PRIu64 "s]\n",
 			    aenq_common->group,
 			    aenq_common->syndrome,
 			    timestamp);
 
 		/* Handle specific event*/
 		handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
 							  aenq_common->group);
 		handler_cb(data, aenq_e); /* call the actual event handler*/
 
 		/* Get next event entry */
 		masked_head++;
 		processed++;
 
 		if (unlikely(masked_head == aenq->q_depth)) {
 			masked_head = 0;
 			phase = !phase;
 		}
 		aenq_e = &aenq->entries[masked_head];
 		aenq_common = &aenq_e->aenq_common_desc;
 	}
 
 	aenq->head += processed;
 	aenq->phase = phase;
 
 	/* Don't update aenq doorbell if there weren't any processed events */
 	if (!processed)
 		return;
 
 	/* write the aenq doorbell after all AENQ descriptors were read */
 	mb();
 	ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head,
 				ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
 	mmiowb();
 }
 #ifdef ENA_EXTENDED_STATS
 /*
  * Sets the function Idx and Queue Idx to be used for
  * get full statistics feature
  *
  */
 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
 					  u32 func_queue)
 {
 
 	/* Function & Queue is acquired from user in the following format :
 	 * Bottom Half word:	funct
 	 * Top Half Word:	queue
 	 */
 	ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
 	ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
 
 	return 0;
 }
 
 #endif /* ENA_EXTENDED_STATS */
 
 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
 		      enum ena_regs_reset_reason_types reset_reason)
 {
+	u32 reset_reason_msb, reset_reason_lsb;
 	u32 stat, timeout, cap, reset_val;
 	int rc;
 
 	stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
 	cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
 
 	if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
 		     (cap == ENA_MMIO_READ_TIMEOUT))) {
 		ena_trc_err(ena_dev, "Reg read32 timeout occurred\n");
 		return ENA_COM_TIMER_EXPIRED;
 	}
 
 	if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
 		ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n");
 		return ENA_COM_INVAL;
 	}
 
 	timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
 			ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
 	if (timeout == 0) {
 		ena_trc_err(ena_dev, "Invalid timeout value\n");
 		return ENA_COM_INVAL;
 	}
 
 	/* start reset */
 	reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
-	reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
-			ENA_REGS_DEV_CTL_RESET_REASON_MASK;
+
+	/* For backward compatibility, device will interpret
+	 * bits 24-27 as MSB, bits 28-31 as LSB
+	 */
+	reset_reason_lsb = ENA_FIELD_GET(reset_reason, ENA_RESET_REASON_LSB_MASK,
+					 ENA_RESET_REASON_LSB_OFFSET);
+
+	reset_reason_msb = ENA_FIELD_GET(reset_reason, ENA_RESET_REASON_MSB_MASK,
+					 ENA_RESET_REASON_MSB_OFFSET);
+
+	reset_val |= reset_reason_lsb << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT;
+
+	if (ena_com_get_cap(ena_dev, ENA_ADMIN_EXTENDED_RESET_REASONS))
+		reset_val |= reset_reason_msb << ENA_REGS_DEV_CTL_RESET_REASON_EXT_SHIFT;
+	else if (reset_reason_msb) {
+		/* In case the device does not support intended
+		 * extended reset reason fallback to generic
+		 */
+		reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+		reset_val |= (ENA_REGS_RESET_GENERIC << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+			      ENA_REGS_DEV_CTL_RESET_REASON_MASK;
+	}
 	ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
 
 	/* Write again the MMIO read request address */
 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
 
 	rc = wait_for_reset_state(ena_dev, timeout,
 				  ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
 	if (rc != 0) {
 		ena_trc_err(ena_dev, "Reset indication didn't turn on\n");
 		return rc;
 	}
 
 	/* reset done */
 	ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
 	rc = wait_for_reset_state(ena_dev, timeout, 0);
 	if (rc != 0) {
 		ena_trc_err(ena_dev, "Reset indication didn't turn off\n");
 		return rc;
 	}
 
 	timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
 		ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
 	if (timeout)
 		/* the resolution of timeout reg is 100ms */
 		ena_dev->admin_queue.completion_timeout = timeout * 100000;
 	else
 		ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
 
 	return 0;
 }
 
 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
 			  struct ena_admin_eni_stats *stats)
 {
 	struct ena_com_stats_ctx ctx;
 	int ret;
 
 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
 		ena_trc_err(ena_dev, "Capability %d isn't supported\n", ENA_ADMIN_ENI_STATS);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	memset(&ctx, 0x0, sizeof(ctx));
 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
 	if (likely(ret == 0))
 		memcpy(stats, &ctx.get_resp.u.eni_stats,
 		       sizeof(ctx.get_resp.u.eni_stats));
 
 	return ret;
 }
 
 int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
 			      struct ena_admin_ena_srd_info *info)
 {
 	struct ena_com_stats_ctx ctx;
 	int ret;
 
 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
 		ena_trc_err(ena_dev, "Capability %d isn't supported\n", ENA_ADMIN_ENA_SRD_INFO);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	memset(&ctx, 0x0, sizeof(ctx));
 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
 	if (likely(ret == 0))
 		memcpy(info, &ctx.get_resp.u.ena_srd_info,
 		       sizeof(ctx.get_resp.u.ena_srd_info));
 
 	return ret;
 }
 
 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
 				struct ena_admin_basic_stats *stats)
 {
 	struct ena_com_stats_ctx ctx;
 	int ret;
 
 	memset(&ctx, 0x0, sizeof(ctx));
 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
 	if (likely(ret == 0))
 		memcpy(stats, &ctx.get_resp.u.basic_stats,
 		       sizeof(ctx.get_resp.u.basic_stats));
 
 	return ret;
 }
 #ifdef ENA_EXTENDED_STATS
 
 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
 				   u32 len)
 {
 	struct ena_com_stats_ctx ctx;
 	struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
 	ena_mem_handle_t mem_handle;
 	void *virt_addr;
 	dma_addr_t phys_addr;
 	int ret;
 
 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
 			       virt_addr, phys_addr, mem_handle);
 	if (!virt_addr) {
 		ret = ENA_COM_NO_MEM;
 		goto done;
 	}
 	memset(&ctx, 0x0, sizeof(ctx));
 	ret = ena_com_mem_addr_set(ena_dev,
 				   &get_cmd->u.control_buffer.address,
 				   phys_addr);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Memory address set failed\n");
 		goto free_ext_stats_mem;
 	}
 	get_cmd->u.control_buffer.length = len;
 
 	get_cmd->device_id = ena_dev->stats_func;
 	get_cmd->queue_idx = ena_dev->stats_queue;
 
 	ret = ena_get_dev_stats(ena_dev, &ctx,
 				ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
 	if (ret < 0)
 		goto free_ext_stats_mem;
 
 	ret = snprintf(buff, len, "%s", (char *)virt_addr);
 
 free_ext_stats_mem:
 	ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
 			      mem_handle);
 done:
 	return ret;
 }
 #endif
 
 int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
 {
 	struct ena_admin_aq_get_stats_cmd *get_cmd;
 	struct ena_com_stats_ctx ctx;
 	int ret;
 
 	if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
 		ena_trc_err(ena_dev, "Invalid buffer size %u. The given buffer is too big.\n", len);
 		return ENA_COM_INVAL;
 	}
 
 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
 		ena_trc_err(ena_dev, "Capability %d not supported.\n", ENA_ADMIN_CUSTOMER_METRICS);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	if (!ena_dev->customer_metrics.supported_metrics) {
 		ena_trc_err(ena_dev, "No supported customer metrics.\n");
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	get_cmd = &ctx.get_cmd;
 	memset(&ctx, 0x0, sizeof(ctx));
 	ret = ena_com_mem_addr_set(ena_dev,
 		&get_cmd->u.control_buffer.address,
 		ena_dev->customer_metrics.buffer_dma_addr);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Memory address set failed.\n");
 		return ret;
 	}
 
 	get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
 	get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
 	if (likely(ret == 0))
 		memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
 	else
 		ena_trc_err(ena_dev, "Failed to get customer metrics. error: %d\n", ret);
 
 	return ret;
 }
 
 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
 {
 	struct ena_com_admin_queue *admin_queue;
 	struct ena_admin_set_feat_cmd cmd;
 	struct ena_admin_set_feat_resp resp;
 	int ret;
 
 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	memset(&cmd, 0x0, sizeof(cmd));
 	admin_queue = &ena_dev->admin_queue;
 
 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 	cmd.aq_common_descriptor.flags = 0;
 	cmd.feat_common.feature_id = ENA_ADMIN_MTU;
 	cmd.u.mtu.mtu = mtu;
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&cmd,
 					    sizeof(cmd),
 					    (struct ena_admin_acq_entry *)&resp,
 					    sizeof(resp));
 
 	if (unlikely(ret))
 		ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret);
 
 	return ret;
 }
 
 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
 				 struct ena_admin_feature_offload_desc *offload)
 {
 	int ret;
 	struct ena_admin_get_feat_resp resp;
 
 	ret = ena_com_get_feature(ena_dev, &resp,
 				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret);
 		return ret;
 	}
 
 	memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
 
 	return 0;
 }
 
 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	struct ena_rss *rss = &ena_dev->rss;
 	struct ena_admin_set_feat_cmd cmd;
 	struct ena_admin_set_feat_resp resp;
 	struct ena_admin_get_feat_resp get_resp;
 	int ret;
 
 	if (!ena_com_check_supported_feature_id(ena_dev,
 						ENA_ADMIN_RSS_HASH_FUNCTION)) {
 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
 			    ENA_ADMIN_RSS_HASH_FUNCTION);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	/* Validate hash function is supported */
 	ret = ena_com_get_feature(ena_dev, &get_resp,
 				  ENA_ADMIN_RSS_HASH_FUNCTION, 0);
 	if (unlikely(ret))
 		return ret;
 
 	if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
 		ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n",
 			    rss->hash_func);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	memset(&cmd, 0x0, sizeof(cmd));
 
 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 	cmd.aq_common_descriptor.flags =
 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
 	cmd.u.flow_hash_func.init_val = rss->hash_init_val;
 	cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
 
 	ret = ena_com_mem_addr_set(ena_dev,
 				   &cmd.control_buffer.address,
 				   rss->hash_key_dma_addr);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Memory address set failed\n");
 		return ret;
 	}
 
 	cmd.control_buffer.length = sizeof(*rss->hash_key);
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&cmd,
 					    sizeof(cmd),
 					    (struct ena_admin_acq_entry *)&resp,
 					    sizeof(resp));
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n",
 			    rss->hash_func, ret);
 		return ENA_COM_INVAL;
 	}
 
 	return 0;
 }
 
 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
 			       enum ena_admin_hash_functions func,
 			       const u8 *key, u16 key_len, u32 init_val)
 {
 	struct ena_admin_feature_rss_flow_hash_control *hash_key;
 	struct ena_admin_get_feat_resp get_resp;
 	enum ena_admin_hash_functions old_func;
 	struct ena_rss *rss = &ena_dev->rss;
 	int rc;
 
 	hash_key = rss->hash_key;
 
 	/* Make sure size is a mult of DWs */
 	if (unlikely(key_len & 0x3))
 		return ENA_COM_INVAL;
 
 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
 				    ENA_ADMIN_RSS_HASH_FUNCTION,
 				    rss->hash_key_dma_addr,
 				    sizeof(*rss->hash_key), 0);
 	if (unlikely(rc))
 		return rc;
 
 	if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
 		ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	if ((func == ENA_ADMIN_TOEPLITZ) && key) {
 		if (key_len != sizeof(hash_key->key)) {
 			ena_trc_err(ena_dev, "key len (%u) doesn't equal the supported size (%zu)\n",
 				    key_len, sizeof(hash_key->key));
 			return ENA_COM_INVAL;
 		}
 		memcpy(hash_key->key, key, key_len);
 		hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
 	}
 
 	rss->hash_init_val = init_val;
 	old_func = rss->hash_func;
 	rss->hash_func = func;
 	rc = ena_com_set_hash_function(ena_dev);
 
 	/* Restore the old function */
 	if (unlikely(rc))
 		rss->hash_func = old_func;
 
 	return rc;
 }
 
 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
 			      enum ena_admin_hash_functions *func)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 	struct ena_admin_get_feat_resp get_resp;
 	int rc;
 
 	if (unlikely(!func))
 		return ENA_COM_INVAL;
 
 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
 				    ENA_ADMIN_RSS_HASH_FUNCTION,
 				    rss->hash_key_dma_addr,
 				    sizeof(*rss->hash_key), 0);
 	if (unlikely(rc))
 		return rc;
 
 	/* ENA_FFS() returns 1 in case the lsb is set */
 	rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
 	if (rss->hash_func)
 		rss->hash_func--;
 
 	*func = rss->hash_func;
 
 	return 0;
 }
 
 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
 {
 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
 		ena_dev->rss.hash_key;
 
 	if (key)
 		memcpy(key, hash_key->key,
 		       (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
 
 	return 0;
 }
 
 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
 			  enum ena_admin_flow_hash_proto proto,
 			  u16 *fields)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 	struct ena_admin_get_feat_resp get_resp;
 	int rc;
 
 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
 				    ENA_ADMIN_RSS_HASH_INPUT,
 				    rss->hash_ctrl_dma_addr,
 				    sizeof(*rss->hash_ctrl), 0);
 	if (unlikely(rc))
 		return rc;
 
 	if (fields)
 		*fields = rss->hash_ctrl->selected_fields[proto].fields;
 
 	return 0;
 }
 
 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	struct ena_rss *rss = &ena_dev->rss;
 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
 	struct ena_admin_set_feat_cmd cmd;
 	struct ena_admin_set_feat_resp resp;
 	int ret;
 
 	if (!ena_com_check_supported_feature_id(ena_dev,
 						ENA_ADMIN_RSS_HASH_INPUT)) {
 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
 			    ENA_ADMIN_RSS_HASH_INPUT);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	memset(&cmd, 0x0, sizeof(cmd));
 
 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 	cmd.aq_common_descriptor.flags =
 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
 	cmd.u.flow_hash_input.enabled_input_sort =
 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
 
 	ret = ena_com_mem_addr_set(ena_dev,
 				   &cmd.control_buffer.address,
 				   rss->hash_ctrl_dma_addr);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Memory address set failed\n");
 		return ret;
 	}
 	cmd.control_buffer.length = sizeof(*hash_ctrl);
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&cmd,
 					    sizeof(cmd),
 					    (struct ena_admin_acq_entry *)&resp,
 					    sizeof(resp));
 	if (unlikely(ret))
 		ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret);
 
 	return ret;
 }
 
 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 	struct ena_admin_feature_rss_hash_control *hash_ctrl =
 		rss->hash_ctrl;
 	u16 available_fields = 0;
 	int rc, i;
 
 	/* Get the supported hash input */
 	rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
 	if (unlikely(rc))
 		return rc;
 
 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
 
 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
 
 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
 
 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
 
 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
 
 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
 
 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
 
 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
 		ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
 
 	for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
 		available_fields = hash_ctrl->selected_fields[i].fields &
 				hash_ctrl->supported_fields[i].fields;
 		if (available_fields != hash_ctrl->selected_fields[i].fields) {
 			ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
 				    i, hash_ctrl->supported_fields[i].fields,
 				    hash_ctrl->selected_fields[i].fields);
 			return ENA_COM_UNSUPPORTED;
 		}
 	}
 
 	rc = ena_com_set_hash_ctrl(ena_dev);
 
 	/* In case of failure, restore the old hash ctrl */
 	if (unlikely(rc))
 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
 
 	return rc;
 }
 
 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
 			   enum ena_admin_flow_hash_proto proto,
 			   u16 hash_fields)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
 	u16 supported_fields;
 	int rc;
 
 	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
 		ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto);
 		return ENA_COM_INVAL;
 	}
 
 	/* Get the ctrl table */
 	rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
 	if (unlikely(rc))
 		return rc;
 
 	/* Make sure all the fields are supported */
 	supported_fields = hash_ctrl->supported_fields[proto].fields;
 	if ((hash_fields & supported_fields) != hash_fields) {
 		ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n",
 			    proto, hash_fields, supported_fields);
 	}
 
 	hash_ctrl->selected_fields[proto].fields = hash_fields;
 
 	rc = ena_com_set_hash_ctrl(ena_dev);
 
 	/* In case of failure, restore the old hash ctrl */
 	if (unlikely(rc))
 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
 
 	return 0;
 }
 
 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
 				      u16 entry_idx, u16 entry_value)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 
 	if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
 		return ENA_COM_INVAL;
 
 	if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
 		return ENA_COM_INVAL;
 
 	rss->host_rss_ind_tbl[entry_idx] = entry_value;
 
 	return 0;
 }
 
 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
 	struct ena_rss *rss = &ena_dev->rss;
 	struct ena_admin_set_feat_cmd cmd;
 	struct ena_admin_set_feat_resp resp;
 	int ret;
 
 	if (!ena_com_check_supported_feature_id(ena_dev,
 						ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
 			    ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
 		return ENA_COM_UNSUPPORTED;
 	}
 
 	ret = ena_com_ind_tbl_convert_to_device(ena_dev);
 	if (ret) {
 		ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n");
 		return ret;
 	}
 
 	memset(&cmd, 0x0, sizeof(cmd));
 
 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 	cmd.aq_common_descriptor.flags =
 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
 	cmd.u.ind_table.size = rss->tbl_log_size;
 	cmd.u.ind_table.inline_index = 0xFFFFFFFF;
 
 	ret = ena_com_mem_addr_set(ena_dev,
 				   &cmd.control_buffer.address,
 				   rss->rss_ind_tbl_dma_addr);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Memory address set failed\n");
 		return ret;
 	}
 
 	cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
 		sizeof(struct ena_admin_rss_ind_table_entry);
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&cmd,
 					    sizeof(cmd),
 					    (struct ena_admin_acq_entry *)&resp,
 					    sizeof(resp));
 
 	if (unlikely(ret))
 		ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret);
 
 	return ret;
 }
 
 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
 {
 	struct ena_rss *rss = &ena_dev->rss;
 	struct ena_admin_get_feat_resp get_resp;
 	u32 tbl_size;
 	int i, rc;
 
 	tbl_size = (1ULL << rss->tbl_log_size) *
 		sizeof(struct ena_admin_rss_ind_table_entry);
 
 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
 				    ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
 				    rss->rss_ind_tbl_dma_addr,
 				    tbl_size, 0);
 	if (unlikely(rc))
 		return rc;
 
 	if (!ind_tbl)
 		return 0;
 
 	for (i = 0; i < (1 << rss->tbl_log_size); i++)
 		ind_tbl[i] = rss->host_rss_ind_tbl[i];
 
 	return 0;
 }
 
 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
 {
 	int rc;
 
 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
 
 	rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
 	if (unlikely(rc))
 		goto err_indr_tbl;
 
 	/* The following function might return unsupported in case the
 	 * device doesn't support setting the key / hash function. We can safely
 	 * ignore this error and have indirection table support only.
 	 */
 	rc = ena_com_hash_key_allocate(ena_dev);
 	if (likely(!rc))
 		ena_com_hash_key_fill_default_key(ena_dev);
 	else if (rc != ENA_COM_UNSUPPORTED)
 		goto err_hash_key;
 
 	rc = ena_com_hash_ctrl_init(ena_dev);
 	if (unlikely(rc))
 		goto err_hash_ctrl;
 
 	return 0;
 
 err_hash_ctrl:
 	ena_com_hash_key_destroy(ena_dev);
 err_hash_key:
 	ena_com_indirect_table_destroy(ena_dev);
 err_indr_tbl:
 
 	return rc;
 }
 
 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
 {
 	ena_com_indirect_table_destroy(ena_dev);
 	ena_com_hash_key_destroy(ena_dev);
 	ena_com_hash_ctrl_destroy(ena_dev);
 
 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
 }
 
 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
 {
 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
 
 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
 			       SZ_4K,
 			       host_attr->host_info,
 			       host_attr->host_info_dma_addr,
 			       host_attr->host_info_dma_handle);
 	if (unlikely(!host_attr->host_info))
 		return ENA_COM_NO_MEM;
 
 	host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
 		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
 		(ENA_COMMON_SPEC_VERSION_MINOR));
 
 	return 0;
 }
 
 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
 				u32 debug_area_size)
 {
 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
 
 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
 			       debug_area_size,
 			       host_attr->debug_area_virt_addr,
 			       host_attr->debug_area_dma_addr,
 			       host_attr->debug_area_dma_handle);
 	if (unlikely(!host_attr->debug_area_virt_addr)) {
 		host_attr->debug_area_size = 0;
 		return ENA_COM_NO_MEM;
 	}
 
 	host_attr->debug_area_size = debug_area_size;
 
 	return 0;
 }
 
 int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
 {
 	struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
 
 	customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
 			       customer_metrics->buffer_len,
 			       customer_metrics->buffer_virt_addr,
 			       customer_metrics->buffer_dma_addr,
 			       customer_metrics->buffer_dma_handle);
 	if (!customer_metrics->buffer_virt_addr)
 		return ENA_COM_NO_MEM;
 
 	return 0;
 }
 
 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
 {
 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
 
 	if (host_attr->host_info) {
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 				      SZ_4K,
 				      host_attr->host_info,
 				      host_attr->host_info_dma_addr,
 				      host_attr->host_info_dma_handle);
 		host_attr->host_info = NULL;
 	}
 }
 
 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
 {
 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
 
 	if (host_attr->debug_area_virt_addr) {
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 				      host_attr->debug_area_size,
 				      host_attr->debug_area_virt_addr,
 				      host_attr->debug_area_dma_addr,
 				      host_attr->debug_area_dma_handle);
 		host_attr->debug_area_virt_addr = NULL;
 	}
 }
 
 void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
 {
 	struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
 
 	if (customer_metrics->buffer_virt_addr) {
 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
 				      customer_metrics->buffer_len,
 				      customer_metrics->buffer_virt_addr,
 				      customer_metrics->buffer_dma_addr,
 				      customer_metrics->buffer_dma_handle);
 		customer_metrics->buffer_virt_addr = NULL;
 	}
 }
 
 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
 {
 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
 	struct ena_com_admin_queue *admin_queue;
 	struct ena_admin_set_feat_cmd cmd;
 	struct ena_admin_set_feat_resp resp;
 
 	int ret;
 
 	/* Host attribute config is called before ena_com_get_dev_attr_feat
 	 * so ena_com can't check if the feature is supported.
 	 */
 
 	memset(&cmd, 0x0, sizeof(cmd));
 	admin_queue = &ena_dev->admin_queue;
 
 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
 	cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
 
 	ret = ena_com_mem_addr_set(ena_dev,
 				   &cmd.u.host_attr.debug_ba,
 				   host_attr->debug_area_dma_addr);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Memory address set failed\n");
 		return ret;
 	}
 
 	ret = ena_com_mem_addr_set(ena_dev,
 				   &cmd.u.host_attr.os_info_ba,
 				   host_attr->host_info_dma_addr);
 	if (unlikely(ret)) {
 		ena_trc_err(ena_dev, "Memory address set failed\n");
 		return ret;
 	}
 
 	cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
 
 	ret = ena_com_execute_admin_command(admin_queue,
 					    (struct ena_admin_aq_entry *)&cmd,
 					    sizeof(cmd),
 					    (struct ena_admin_acq_entry *)&resp,
 					    sizeof(resp));
 
 	if (unlikely(ret))
 		ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret);
 
 	return ret;
 }
 
 /* Interrupt moderation */
 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
 {
 	return ena_com_check_supported_feature_id(ena_dev,
 						  ENA_ADMIN_INTERRUPT_MODERATION);
 }
 
 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
 							  u32 coalesce_usecs,
 							  u32 intr_delay_resolution,
 							  u32 *intr_moder_interval)
 {
 	if (!intr_delay_resolution) {
 		ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n");
 		return ENA_COM_FAULT;
 	}
 
 	*intr_moder_interval = coalesce_usecs / intr_delay_resolution;
 
 	return 0;
 }
 
 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
 						      u32 tx_coalesce_usecs)
 {
 	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
 							      tx_coalesce_usecs,
 							      ena_dev->intr_delay_resolution,
 							      &ena_dev->intr_moder_tx_interval);
 }
 
 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
 						      u32 rx_coalesce_usecs)
 {
 	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
 							      rx_coalesce_usecs,
 							      ena_dev->intr_delay_resolution,
 							      &ena_dev->intr_moder_rx_interval);
 }
 
 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
 {
 	struct ena_admin_get_feat_resp get_resp;
 	u16 delay_resolution;
 	int rc;
 
 	rc = ena_com_get_feature(ena_dev, &get_resp,
 				 ENA_ADMIN_INTERRUPT_MODERATION, 0);
 
 	if (rc) {
 		if (rc == ENA_COM_UNSUPPORTED) {
 			ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
 				    ENA_ADMIN_INTERRUPT_MODERATION);
 			rc = 0;
 		} else {
 			ena_trc_err(ena_dev,
 				    "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
 		}
 
 		/* no moderation supported, disable adaptive support */
 		ena_com_disable_adaptive_moderation(ena_dev);
 		return rc;
 	}
 
 	/* if moderation is supported by device we set adaptive moderation */
 	delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
 	ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
 
 	/* Disable adaptive moderation by default - can be enabled later */
 	ena_com_disable_adaptive_moderation(ena_dev);
 
 	return 0;
 }
 
 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
 {
 	return ena_dev->intr_moder_tx_interval;
 }
 
 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
 {
 	return ena_dev->intr_moder_rx_interval;
 }
 
 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
 			    struct ena_admin_feature_llq_desc *llq_features,
 			    struct ena_llq_configurations *llq_default_cfg)
 {
 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
 	int rc;
 
 	if (!llq_features->max_llq_num) {
 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
 		return 0;
 	}
 
 	rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
 	if (rc)
 		return rc;
 
 	ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
 		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
 
 	if (unlikely(ena_dev->tx_max_header_size == 0)) {
 		ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n");
 		return ENA_COM_INVAL;
 	}
 
 	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
 
 	return 0;
 }
diff --git a/ena_com.h b/ena_com.h
index 5b091c64572c..347ca4031fda 100644
--- a/ena_com.h
+++ b/ena_com.h
@@ -1,1227 +1,1235 @@
 /*-
  * SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  *
  * * Redistributions of source code must retain the above copyright
  * notice, this list of conditions and the following disclaimer.
  * * Redistributions in binary form must reproduce the above copyright
  * notice, this list of conditions and the following disclaimer in
  * the documentation and/or other materials provided with the
  * distribution.
  * * Neither the name of copyright holder nor the names of its
  * contributors may be used to endorse or promote products derived
  * from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef ENA_COM
 #define ENA_COM
 
 #include "ena_plat.h"
 
 #define ENA_MAX_NUM_IO_QUEUES 128U
 /* We need to queues for each IO (on for Tx and one for Rx) */
 #define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
 
 #define ENA_MAX_HANDLERS 256
 
 #define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
 
 /* Unit in usec */
 #define ENA_REG_READ_TIMEOUT 200000
 
 #define ADMIN_SQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_aq_entry))
 #define ADMIN_CQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_acq_entry))
 #define ADMIN_AENQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_aenq_entry))
 
+/* Macros used to extract LSB/MSB from the
+ * enums defining the reset reasons
+ */
+#define ENA_RESET_REASON_LSB_OFFSET			    0
+#define ENA_RESET_REASON_LSB_MASK			    0xf
+#define ENA_RESET_REASON_MSB_OFFSET			    4
+#define ENA_RESET_REASON_MSB_MASK			    0xf0
+
 #define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512
 
 /*****************************************************************************/
 /*****************************************************************************/
 /* ENA adaptive interrupt moderation settings */
 
 #define ENA_INTR_INITIAL_TX_INTERVAL_USECS ENA_INTR_INITIAL_TX_INTERVAL_USECS_PLAT
 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS ENA_INTR_INITIAL_RX_INTERVAL_USECS_PLAT
 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
 
 #define ENA_HASH_KEY_SIZE 40
 
 #define ENA_HW_HINTS_NO_TIMEOUT	0xFFFF
 
 #define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
 
 struct ena_llq_configurations {
 	enum ena_admin_llq_header_location llq_header_location;
 	enum ena_admin_llq_ring_entry_size llq_ring_entry_size;
 	enum ena_admin_llq_stride_ctrl  llq_stride_ctrl;
 	enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header;
 	u16 llq_ring_entry_size_value;
 };
 
 enum queue_direction {
 	ENA_COM_IO_QUEUE_DIRECTION_TX,
 	ENA_COM_IO_QUEUE_DIRECTION_RX
 };
 
 struct ena_com_buf {
 	dma_addr_t paddr; /**< Buffer physical address */
 	u16 len; /**< Buffer length in bytes */
 };
 
 struct ena_com_rx_buf_info {
 	u16 len;
 	u16 req_id;
 };
 
 struct ena_com_io_desc_addr {
 	u8 __iomem *pbuf_dev_addr; /* LLQ address */
 	u8 *virt_addr;
 	dma_addr_t phys_addr;
 	ena_mem_handle_t mem_handle;
 };
 
 struct ena_com_tx_meta {
 	u16 mss;
 	u16 l3_hdr_len;
 	u16 l3_hdr_offset;
 	u16 l4_hdr_len; /* In words */
 };
 
 struct ena_com_llq_info {
 	u16 header_location_ctrl;
 	u16 desc_stride_ctrl;
 	u16 desc_list_entry_size_ctrl;
 	u16 desc_list_entry_size;
 	u16 descs_num_before_header;
 	u16 descs_per_entry;
 	u16 max_entries_in_tx_burst;
 	bool disable_meta_caching;
 };
 
 struct ena_com_io_cq {
 	struct ena_com_io_desc_addr cdesc_addr;
 	void *bus;
 
 	/* Interrupt unmask register */
 	u32 __iomem *unmask_reg;
 
 
 	/* numa configuration register (for TPH) */
 	u32 __iomem *numa_node_cfg_reg;
 
 	/* The value to write to the above register to unmask
 	 * the interrupt of this queue
 	 */
 	u32 msix_vector ____cacheline_aligned;
 
 	enum queue_direction direction;
 
 	/* holds the number of cdesc of the current packet */
 	u16 cur_rx_pkt_cdesc_count;
 	/* save the first cdesc idx of the current packet */
 	u16 cur_rx_pkt_cdesc_start_idx;
 
 	u16 q_depth;
 	/* Caller qid */
 	u16 qid;
 
 	/* Device queue index */
 	u16 idx;
 	u16 head;
 	u8 phase;
 	u8 cdesc_entry_size_in_bytes;
 
 } ____cacheline_aligned;
 
 struct ena_com_io_bounce_buffer_control {
 	u8 *base_buffer;
 	u16 next_to_use;
 	u16 buffer_size;
 	u16 buffers_num;  /* Must be a power of 2 */
 };
 
 /* This struct is to keep tracking the current location of the next llq entry */
 struct ena_com_llq_pkt_ctrl {
 	u8 *curr_bounce_buf;
 	u16 idx;
 	u16 descs_left_in_line;
 };
 
 struct ena_com_io_sq {
 	struct ena_com_io_desc_addr desc_addr;
 	void *bus;
 
 	u32 __iomem *db_addr;
 
 	enum queue_direction direction;
 	enum ena_admin_placement_policy_type mem_queue_type;
 
 	bool disable_meta_caching;
 
 	u32 msix_vector;
 	struct ena_com_tx_meta cached_tx_meta;
 	struct ena_com_llq_info llq_info;
 	struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
 	struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
 
 	u16 q_depth;
 	u16 qid;
 
 	u16 idx;
 	u16 tail;
 	u16 next_to_comp;
 	u16 llq_last_copy_tail;
 	u32 tx_max_header_size;
 	u8 phase;
 	u8 desc_entry_size;
 	u8 dma_addr_bits;
 	u16 entries_in_tx_burst_left;
 } ____cacheline_aligned;
 
 struct ena_com_admin_cq {
 	struct ena_admin_acq_entry *entries;
 	ena_mem_handle_t mem_handle;
 	dma_addr_t dma_addr;
 
 	u16 head;
 	u8 phase;
 };
 
 struct ena_com_admin_sq {
 	struct ena_admin_aq_entry *entries;
 	ena_mem_handle_t mem_handle;
 	dma_addr_t dma_addr;
 
 	u32 __iomem *db_addr;
 
 	u16 head;
 	u16 tail;
 	u8 phase;
 
 };
 
 struct ena_com_stats_admin {
 	u64 aborted_cmd;
 	u64 submitted_cmd;
 	u64 completed_cmd;
 	u64 out_of_space;
 	u64 no_completion;
 };
 
 struct ena_com_stats_phc {
 	u64 phc_cnt;
 	u64 phc_exp;
 	u64 phc_skp;
 	u64 phc_err;
 };
 
 struct ena_com_admin_queue {
 	void *q_dmadev;
 	void *bus;
 	struct ena_com_dev *ena_dev;
 	ena_spinlock_t q_lock; /* spinlock for the admin queue */
 
 	struct ena_comp_ctx *comp_ctx;
 	u32 completion_timeout;
 	u16 q_depth;
 	struct ena_com_admin_cq cq;
 	struct ena_com_admin_sq sq;
 
 	/* Indicate if the admin queue should poll for completion */
 	bool polling;
 
 	/* Define if fallback to polling mode should occur */
 	bool auto_polling;
 
 	u16 curr_cmd_id;
 
 	/* Indicate that the ena was initialized and can
 	 * process new admin commands
 	 */
 	bool running_state;
 
 	/* Count the number of outstanding admin commands */
 	ena_atomic32_t outstanding_cmds;
 
 	struct ena_com_stats_admin stats;
 };
 
 struct ena_aenq_handlers;
 
 struct ena_com_aenq {
 	u16 head;
 	u8 phase;
 	struct ena_admin_aenq_entry *entries;
 	dma_addr_t dma_addr;
 	ena_mem_handle_t mem_handle;
 	u16 q_depth;
 	struct ena_aenq_handlers *aenq_handlers;
 };
 
 struct ena_com_mmio_read {
 	struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
 	dma_addr_t read_resp_dma_addr;
 	ena_mem_handle_t read_resp_mem_handle;
 	u32 reg_read_to; /* in us */
 	u16 seq_num;
 	bool readless_supported;
 	/* spin lock to ensure a single outstanding read */
 	ena_spinlock_t lock;
 };
 
 /* PTP hardware clock (PHC) MMIO read data info */
 struct ena_com_phc_info {
 	/* Internal PHC statistics */
 	struct ena_com_stats_phc stats;
 
 	/* PHC shared memory - virtual address */
 	struct ena_admin_phc_resp *virt_addr;
 
 	/* Spin lock to ensure a single outstanding PHC read */
 	ena_spinlock_t lock;
 
 	/* PHC doorbell address as an offset to PCIe MMIO REG BAR */
 	u32 doorbell_offset;
 
 	/* Shared memory read expire timeout (usec)
 	 * Max time for valid PHC retrieval, passing this threshold will fail the get time request
 	 * and block new PHC requests for block_timeout_usec in order to prevent floods on busy
 	 * device
 	 */
 	u32 expire_timeout_usec;
 
 	/* Shared memory read abort timeout (usec)
 	 * PHC requests block period, blocking starts once PHC request expired in order to prevent
 	 * floods on busy device, any PHC requests during block period will be skipped
 	 */
 	u32 block_timeout_usec;
 
 	/* Request id sent to the device */
 	u16 req_id;
 
 	/* True if PHC is active in the device */
 	bool active;
 
 	/* PHC shared memory - memory handle */
 	ena_mem_handle_t mem_handle;
 
 	/* PHC shared memory - physical address */
 	dma_addr_t phys_addr;
 };
 
 struct ena_rss {
 	/* Indirect table */
 	u16 *host_rss_ind_tbl;
 	struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
 	dma_addr_t rss_ind_tbl_dma_addr;
 	ena_mem_handle_t rss_ind_tbl_mem_handle;
 	u16 tbl_log_size;
 
 	/* Hash key */
 	enum ena_admin_hash_functions hash_func;
 	struct ena_admin_feature_rss_flow_hash_control *hash_key;
 	dma_addr_t hash_key_dma_addr;
 	ena_mem_handle_t hash_key_mem_handle;
 	u32 hash_init_val;
 
 	/* Flow Control */
 	struct ena_admin_feature_rss_hash_control *hash_ctrl;
 	dma_addr_t hash_ctrl_dma_addr;
 	ena_mem_handle_t hash_ctrl_mem_handle;
 
 };
 
 struct ena_customer_metrics {
 	/* in correlation with ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
 	 * and ena_admin_customer_metrics_id
 	 */
 	uint64_t supported_metrics;
 	dma_addr_t buffer_dma_addr;
 	void *buffer_virt_addr;
 	ena_mem_handle_t buffer_dma_handle;
 	u32 buffer_len;
 };
 
 struct ena_host_attribute {
 	/* Debug area */
 	u8 *debug_area_virt_addr;
 	dma_addr_t debug_area_dma_addr;
 	ena_mem_handle_t debug_area_dma_handle;
 	u32 debug_area_size;
 
 	/* Host information */
 	struct ena_admin_host_info *host_info;
 	dma_addr_t host_info_dma_addr;
 	ena_mem_handle_t host_info_dma_handle;
 };
 
 /* Each ena_dev is a PCI function. */
 struct ena_com_dev {
 	struct ena_com_admin_queue admin_queue;
 	struct ena_com_aenq aenq;
 	struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
 	struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
 	u8 __iomem *reg_bar;
 	void __iomem *mem_bar;
 	void *dmadev;
 	void *bus;
 	ena_netdev *net_device;
 
 	enum ena_admin_placement_policy_type tx_mem_queue_type;
 	u32 tx_max_header_size;
 	u16 stats_func; /* Selected function for extended statistic dump */
 	u16 stats_queue; /* Selected queue for extended statistic dump */
 
 	u32 ena_min_poll_delay_us;
 
 	struct ena_com_mmio_read mmio_read;
 	struct ena_com_phc_info phc;
 
 	struct ena_rss rss;
 	u32 supported_features;
 	u32 capabilities;
 	u32 dma_addr_bits;
 
 	struct ena_host_attribute host_attr;
 	bool adaptive_coalescing;
 	u16 intr_delay_resolution;
 
 	/* interrupt moderation intervals are in usec divided by
 	 * intr_delay_resolution, which is supplied by the device.
 	 */
 	u32 intr_moder_tx_interval;
 	u32 intr_moder_rx_interval;
 
 	struct ena_intr_moder_entry *intr_moder_tbl;
 
 	struct ena_com_llq_info llq_info;
 
 	struct ena_customer_metrics customer_metrics;
 };
 
 struct ena_com_dev_get_features_ctx {
 	struct ena_admin_queue_feature_desc max_queues;
 	struct ena_admin_queue_ext_feature_desc max_queue_ext;
 	struct ena_admin_device_attr_feature_desc dev_attr;
 	struct ena_admin_feature_aenq_desc aenq;
 	struct ena_admin_feature_offload_desc offload;
 	struct ena_admin_ena_hw_hints hw_hints;
 	struct ena_admin_feature_llq_desc llq;
 };
 
 struct ena_com_create_io_ctx {
 	enum ena_admin_placement_policy_type mem_queue_type;
 	enum queue_direction direction;
 	int numa_node;
 	u32 msix_vector;
 	u16 queue_size;
 	u16 qid;
 };
 
 typedef void (*ena_aenq_handler)(void *data,
 	struct ena_admin_aenq_entry *aenq_e);
 
 /* Holds aenq handlers. Indexed by AENQ event group */
 struct ena_aenq_handlers {
 	ena_aenq_handler handlers[ENA_MAX_HANDLERS];
 	ena_aenq_handler unimplemented_handler;
 };
 
 /*****************************************************************************/
 /*****************************************************************************/
 #if defined(__cplusplus)
 extern "C" {
 #endif
 
 /* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
  * @ena_dev: ENA communication layer struct
  *
  * Initialize the register read mechanism.
  *
  * @note: This method must be the first stage in the initialization sequence.
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
 
 /* ena_com_phc_init - Allocate and initialize PHC feature
  * @ena_dev: ENA communication layer struct
  * @note: This method assumes PHC is supported by the device
  * @return - 0 on success, negative value on failure
  */
 int ena_com_phc_init(struct ena_com_dev *ena_dev);
 
 /* ena_com_phc_supported - Return if PHC feature is supported by the device
  * @ena_dev: ENA communication layer struct
  * @note: This method must be called after getting supported features
  * @return - supported or not
  */
 bool ena_com_phc_supported(struct ena_com_dev *ena_dev);
 
 /* ena_com_phc_config - Configure PHC feature
  * @ena_dev: ENA communication layer struct
  * Configure PHC feature in driver and device
  * @note: This method assumes PHC is supported by the device
  * @return - 0 on success, negative value on failure
  */
 int ena_com_phc_config(struct ena_com_dev *ena_dev);
 
 /* ena_com_phc_destroy - Destroy PHC feature
  * @ena_dev: ENA communication layer struct
  */
 void ena_com_phc_destroy(struct ena_com_dev *ena_dev);
 
 /* ena_com_phc_get - Retrieve PHC timestamp
  * @ena_dev: ENA communication layer struct
  * @timestamp: Retrieve PHC timestamp
  * @return - 0 on success, negative value on failure
  */
 int ena_com_phc_get(struct ena_com_dev *ena_dev, u64 *timestamp);
 
 /* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
  * @ena_dev: ENA communication layer struct
  * @readless_supported: readless mode (enable/disable)
  */
 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
 				bool readless_supported);
 
 /* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
  * value physical address.
  * @ena_dev: ENA communication layer struct
  */
 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
 
 /* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
  * @ena_dev: ENA communication layer struct
  */
 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
 
 /* ena_com_admin_init - Init the admin and the async queues
  * @ena_dev: ENA communication layer struct
  * @aenq_handlers: Those handlers to be called upon event.
  *
  * Initialize the admin submission and completion queues.
  * Initialize the asynchronous events notification queues.
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_admin_init(struct ena_com_dev *ena_dev,
 		       struct ena_aenq_handlers *aenq_handlers);
 
 /* ena_com_admin_destroy - Destroy the admin and the async events queues.
  * @ena_dev: ENA communication layer struct
  *
  * @note: Before calling this method, the caller must validate that the device
  * won't send any additional admin completions/aenq.
  * To achieve that, a FLR is recommended.
  */
 void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
 
 /* ena_com_dev_reset - Perform device FLR to the device.
  * @ena_dev: ENA communication layer struct
  * @reset_reason: Specify what is the trigger for the reset in case of an error.
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
 		      enum ena_regs_reset_reason_types reset_reason);
 
 /* ena_com_create_io_queue - Create io queue.
  * @ena_dev: ENA communication layer struct
  * @ctx - create context structure
  *
  * Create the submission and the completion queues.
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
 			    struct ena_com_create_io_ctx *ctx);
 
 /* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
  * @ena_dev: ENA communication layer struct
  * @qid - the caller virtual queue id.
  */
 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
 
 /* ena_com_get_io_handlers - Return the io queue handlers
  * @ena_dev: ENA communication layer struct
  * @qid - the caller virtual queue id.
  * @io_sq - IO submission queue handler
  * @io_cq - IO completion queue handler.
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
 			    struct ena_com_io_sq **io_sq,
 			    struct ena_com_io_cq **io_cq);
 
 /* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
  * @ena_dev: ENA communication layer struct
  *
  * After this method, aenq event can be received via AENQ.
  */
 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
 
 /* ena_com_set_admin_running_state - Set the state of the admin queue
  * @ena_dev: ENA communication layer struct
  *
  * Change the state of the admin queue (enable/disable)
  */
 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
 
 /* ena_com_get_admin_running_state - Get the admin queue state
  * @ena_dev: ENA communication layer struct
  *
  * Retrieve the state of the admin queue (enable/disable)
  *
  * @return - current polling mode (enable/disable)
  */
 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
 
 /* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
  * @ena_dev: ENA communication layer struct
  * @polling: ENAble/Disable polling mode
  *
  * Set the admin completion mode.
  */
 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
 
 /* ena_com_get_admin_polling_mode - Get the admin completion queue polling mode
  * @ena_dev: ENA communication layer struct
  *
  * Get the admin completion mode.
  * If polling mode is on, ena_com_execute_admin_command will perform a
  * polling on the admin completion queue for the commands completion,
  * otherwise it will wait on wait event.
  *
  * @return state
  */
 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev);
 
 /* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
  * @ena_dev: ENA communication layer struct
  * @polling: Enable/Disable polling mode
  *
  * Set the autopolling mode.
  * If autopolling is on:
  * In case of missing interrupt when data is available switch to polling.
  */
 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
 					 bool polling);
 
 /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
  * @ena_dev: ENA communication layer struct
  *
  * This method goes over the admin completion queue and wakes up all the pending
  * threads that wait on the commands wait event.
  *
  * @note: Should be called after MSI-X interrupt.
  */
 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
 
 /* ena_com_aenq_intr_handler - AENQ interrupt handler
  * @ena_dev: ENA communication layer struct
  *
  * This method goes over the async event notification queue and calls the proper
  * aenq handler.
  */
 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data);
 
 /* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
  * @ena_dev: ENA communication layer struct
  *
  * This method aborts all the outstanding admin commands.
  * The caller should then call ena_com_wait_for_abort_completion to make sure
  * all the commands were completed.
  */
 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
 
 /* ena_com_wait_for_abort_completion - Wait for admin commands abort.
  * @ena_dev: ENA communication layer struct
  *
  * This method waits until all the outstanding admin commands are completed.
  */
 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
 
 /* ena_com_validate_version - Validate the device parameters
  * @ena_dev: ENA communication layer struct
  *
  * This method verifies the device parameters are the same as the saved
  * parameters in ena_dev.
  * This method is useful after device reset, to validate the device mac address
  * and the device offloads are the same as before the reset.
  *
  * @return - 0 on success negative value otherwise.
  */
 int ena_com_validate_version(struct ena_com_dev *ena_dev);
 
 /* ena_com_get_link_params - Retrieve physical link parameters.
  * @ena_dev: ENA communication layer struct
  * @resp: Link parameters
  *
  * Retrieve the physical link parameters,
  * like speed, auto-negotiation and full duplex support.
  *
  * @return - 0 on Success negative value otherwise.
  */
 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
 			    struct ena_admin_get_feat_resp *resp);
 
 /* ena_com_get_dma_width - Retrieve physical dma address width the device
  * supports.
  * @ena_dev: ENA communication layer struct
  *
  * Retrieve the maximum physical address bits the device can handle.
  *
  * @return: > 0 on Success and negative value otherwise.
  */
 int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
 
 /* ena_com_set_aenq_config - Set aenq groups configurations
  * @ena_dev: ENA communication layer struct
  * @groups flag: bit fields flags of enum ena_admin_aenq_group.
  *
  * Configure which aenq event group the driver would like to receive.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
 
 /* ena_com_get_dev_attr_feat - Get device features
  * @ena_dev: ENA communication layer struct
  * @get_feat_ctx: returned context that contain the get features.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
 			      struct ena_com_dev_get_features_ctx *get_feat_ctx);
 
 /* ena_com_get_dev_basic_stats - Get device basic statistics
  * @ena_dev: ENA communication layer struct
  * @stats: stats return value
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
 				struct ena_admin_basic_stats *stats);
 
 /* ena_com_get_eni_stats - Get extended network interface statistics
  * @ena_dev: ENA communication layer struct
  * @stats: stats return value
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
 			  struct ena_admin_eni_stats *stats);
 
 /* ena_com_get_ena_srd_info - Get ENA SRD network interface statistics
  * @ena_dev: ENA communication layer struct
  * @info: ena srd stats and flags
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
 			     struct ena_admin_ena_srd_info *info);
 
 /* ena_com_get_customer_metrics - Get customer metrics for network interface
  * @ena_dev: ENA communication layer struct
  * @buffer: buffer for returned customer metrics
  * @len: size of the buffer
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len);
 
 /* ena_com_set_dev_mtu - Configure the device mtu.
  * @ena_dev: ENA communication layer struct
  * @mtu: mtu value
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
 
 /* ena_com_get_offload_settings - Retrieve the device offloads capabilities
  * @ena_dev: ENA communication layer struct
  * @offlad: offload return value
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
 				 struct ena_admin_feature_offload_desc *offload);
 
 /* ena_com_rss_init - Init RSS
  * @ena_dev: ENA communication layer struct
  * @log_size: indirection log size
  *
  * Allocate RSS/RFS resources.
  * The caller then can configure rss using ena_com_set_hash_function,
  * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
 
 /* ena_com_rss_destroy - Destroy rss
  * @ena_dev: ENA communication layer struct
  *
  * Free all the RSS/RFS resources.
  */
 void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
 
 /* ena_com_get_current_hash_function - Get RSS hash function
  * @ena_dev: ENA communication layer struct
  *
  * Return the current hash function.
  * @return: 0 or one of the ena_admin_hash_functions values.
  */
 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
 
 /* ena_com_fill_hash_function - Fill RSS hash function
  * @ena_dev: ENA communication layer struct
  * @func: The hash function (Toeplitz or crc)
  * @key: Hash key (for toeplitz hash)
  * @key_len: key length (max length 10 DW)
  * @init_val: initial value for the hash function
  *
  * Fill the ena_dev resources with the desire hash function, hash key, key_len
  * and key initial value (if needed by the hash function).
  * To flush the key into the device the caller should call
  * ena_com_set_hash_function.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
 			       enum ena_admin_hash_functions func,
 			       const u8 *key, u16 key_len, u32 init_val);
 
 /* ena_com_set_hash_function - Flush the hash function and it dependencies to
  * the device.
  * @ena_dev: ENA communication layer struct
  *
  * Flush the hash function and it dependencies (key, key length and
  * initial value) if needed.
  *
  * @note: Prior to this method the caller should call ena_com_fill_hash_function
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
 
 /* ena_com_get_hash_function - Retrieve the hash function from the device.
  * @ena_dev: ENA communication layer struct
  * @func: hash function
  *
  * Retrieve the hash function from the device.
  *
  * @note: If the caller called ena_com_fill_hash_function but didn't flush
  * it to the device, the new configuration will be lost.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
 			      enum ena_admin_hash_functions *func);
 
 /* ena_com_get_hash_key - Retrieve the hash key
  * @ena_dev: ENA communication layer struct
  * @key: hash key
  *
  * Retrieve the hash key.
  *
  * @note: If the caller called ena_com_fill_hash_key but didn't flush
  * it to the device, the new configuration will be lost.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key);
 /* ena_com_fill_hash_ctrl - Fill RSS hash control
  * @ena_dev: ENA communication layer struct.
  * @proto: The protocol to configure.
  * @hash_fields: bit mask of ena_admin_flow_hash_fields
  *
  * Fill the ena_dev resources with the desire hash control (the ethernet
  * fields that take part of the hash) for a specific protocol.
  * To flush the hash control to the device, the caller should call
  * ena_com_set_hash_ctrl.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
 			   enum ena_admin_flow_hash_proto proto,
 			   u16 hash_fields);
 
 /* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
  * @ena_dev: ENA communication layer struct
  *
  * Flush the hash control (the ethernet fields that take part of the hash)
  *
  * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
 
 /* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
  * @ena_dev: ENA communication layer struct
  * @proto: The protocol to retrieve.
  * @fields: bit mask of ena_admin_flow_hash_fields.
  *
  * Retrieve the hash control from the device.
  *
  * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush
  * it to the device, the new configuration will be lost.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
 			  enum ena_admin_flow_hash_proto proto,
 			  u16 *fields);
 
 /* ena_com_set_default_hash_ctrl - Set the hash control to a default
  * configuration.
  * @ena_dev: ENA communication layer struct
  *
  * Fill the ena_dev resources with the default hash control configuration.
  * To flush the hash control to the device, the caller should call
  * ena_com_set_hash_ctrl.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
 
 /* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
  * indirection table
  * @ena_dev: ENA communication layer struct.
  * @entry_idx - indirection table entry.
  * @entry_value - redirection value
  *
  * Fill a single entry of the RSS indirection table in the ena_dev resources.
  * To flush the indirection table to the device, the called should call
  * ena_com_indirect_table_set.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
 				      u16 entry_idx, u16 entry_value);
 
 /* ena_com_indirect_table_set - Flush the indirection table to the device.
  * @ena_dev: ENA communication layer struct
  *
  * Flush the indirection hash control to the device.
  * Prior to this method the caller should call ena_com_indirect_table_fill_entry
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
 
 /* ena_com_indirect_table_get - Retrieve the indirection table from the device.
  * @ena_dev: ENA communication layer struct
  * @ind_tbl: indirection table
  *
  * Retrieve the RSS indirection table from the device.
  *
  * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush
  * it to the device, the new configuration will be lost.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
 
 /* ena_com_allocate_host_info - Allocate host info resources.
  * @ena_dev: ENA communication layer struct
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
 
 /* ena_com_allocate_debug_area - Allocate debug area.
  * @ena_dev: ENA communication layer struct
  * @debug_area_size - debug area size.
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
 				u32 debug_area_size);
 
 /* ena_com_allocate_customer_metrics_buffer - Allocate customer metrics resources.
  * @ena_dev: ENA communication layer struct
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev);
 
 /* ena_com_delete_debug_area - Free the debug area resources.
  * @ena_dev: ENA communication layer struct
  *
  * Free the allocated debug area.
  */
 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
 
 /* ena_com_delete_host_info - Free the host info resources.
  * @ena_dev: ENA communication layer struct
  *
  * Free the allocated host info.
  */
 void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
 
 /* ena_com_delete_customer_metrics_buffer - Free the customer metrics resources.
  * @ena_dev: ENA communication layer struct
  *
  * Free the allocated customer metrics area.
  */
 void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev);
 
 /* ena_com_set_host_attributes - Update the device with the host
  * attributes (debug area and host info) base address.
  * @ena_dev: ENA communication layer struct
  *
  * @return: 0 on Success and negative value otherwise.
  */
 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
 
 /* ena_com_create_io_cq - Create io completion queue.
  * @ena_dev: ENA communication layer struct
  * @io_cq - io completion queue handler
 
  * Create IO completion queue.
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
 			 struct ena_com_io_cq *io_cq);
 
 /* ena_com_destroy_io_cq - Destroy io completion queue.
  * @ena_dev: ENA communication layer struct
  * @io_cq - io completion queue handler
 
  * Destroy IO completion queue.
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
 			  struct ena_com_io_cq *io_cq);
 
 /* ena_com_execute_admin_command - Execute admin command
  * @admin_queue: admin queue.
  * @cmd: the admin command to execute.
  * @cmd_size: the command size.
  * @cmd_completion: command completion return value.
  * @cmd_comp_size: command completion size.
 
  * Submit an admin command and then wait until the device returns a
  * completion.
  * The completion will be copied into cmd_comp.
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
 				  struct ena_admin_aq_entry *cmd,
 				  size_t cmd_size,
 				  struct ena_admin_acq_entry *cmd_comp,
 				  size_t cmd_comp_size);
 
 /* ena_com_init_interrupt_moderation - Init interrupt moderation
  * @ena_dev: ENA communication layer struct
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
 
 /* ena_com_interrupt_moderation_supported - Return if interrupt moderation
  * capability is supported by the device.
  *
  * @return - supported or not.
  */
 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
 
 /* ena_com_update_nonadaptive_moderation_interval_tx - Update the
  * non-adaptive interval in Tx direction.
  * @ena_dev: ENA communication layer struct
  * @tx_coalesce_usecs: Interval in usec.
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
 						      u32 tx_coalesce_usecs);
 
 /* ena_com_update_nonadaptive_moderation_interval_rx - Update the
  * non-adaptive interval in Rx direction.
  * @ena_dev: ENA communication layer struct
  * @rx_coalesce_usecs: Interval in usec.
  *
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
 						      u32 rx_coalesce_usecs);
 
 /* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
  * non-adaptive interval in Tx direction.
  * @ena_dev: ENA communication layer struct
  *
  * @return - interval in usec
  */
 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
 
 /* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
  * non-adaptive interval in Rx direction.
  * @ena_dev: ENA communication layer struct
  *
  * @return - interval in usec
  */
 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
 
 /* ena_com_config_dev_mode - Configure the placement policy of the device.
  * @ena_dev: ENA communication layer struct
  * @llq_features: LLQ feature descriptor, retrieve via
  *		   ena_com_get_dev_attr_feat.
  * @ena_llq_config: The default driver LLQ parameters configurations
  */
 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
 			    struct ena_admin_feature_llq_desc *llq_features,
 			    struct ena_llq_configurations *llq_default_config);
 
 /* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq.
  * @io_sq: IO submit queue struct
  *
  * @return - ena_com_dev struct extracted from io_sq
  */
 static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq)
 {
 	return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]);
 }
 
 /* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq.
  * @io_sq: IO submit queue struct
  *
  * @return - ena_com_dev struct extracted from io_sq
  */
 static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq)
 {
 	return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]);
 }
 
 static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
 {
 	return ena_dev->adaptive_coalescing;
 }
 
 static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
 {
 	ena_dev->adaptive_coalescing = true;
 }
 
 static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
 {
 	ena_dev->adaptive_coalescing = false;
 }
 
 /* ena_com_get_cap - query whether device supports a capability.
  * @ena_dev: ENA communication layer struct
  * @cap_id: enum value representing the capability
  *
  * @return - true if capability is supported or false otherwise
  */
 static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
 				   enum ena_admin_aq_caps_id cap_id)
 {
 	return !!(ena_dev->capabilities & BIT(cap_id));
 }
 
 /* ena_com_get_customer_metric_support - query whether device supports a given customer metric.
  * @ena_dev: ENA communication layer struct
  * @metric_id: enum value representing the customer metric
  *
  * @return - true if customer metric is supported or false otherwise
  */
 static inline bool ena_com_get_customer_metric_support(struct ena_com_dev *ena_dev,
 						       enum ena_admin_customer_metrics_id metric_id)
 {
 	return !!(ena_dev->customer_metrics.supported_metrics & BIT64(metric_id));
 }
 
 /* ena_com_get_customer_metric_count - return the number of supported customer metrics.
  * @ena_dev: ENA communication layer struct
  *
  * @return - the number of supported customer metrics
  */
 static inline int ena_com_get_customer_metric_count(struct ena_com_dev *ena_dev)
 {
 	return ENA_BITS_PER_U64(ena_dev->customer_metrics.supported_metrics);
 }
 
 /* ena_com_update_intr_reg - Prepare interrupt register
  * @intr_reg: interrupt register to update.
  * @rx_delay_interval: Rx interval in usecs
  * @tx_delay_interval: Tx interval in usecs
  * @unmask: unmask enable/disable
  * @no_moderation_update: 0 - Indicates that any of the TX/RX intervals was
  *                        updated, 1 - otherwise
  *
  * Prepare interrupt update register with the supplied parameters.
  */
 static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
 					   u32 rx_delay_interval,
 					   u32 tx_delay_interval,
 					   bool unmask,
 					   bool no_moderation_update)
 {
 	intr_reg->intr_control = 0;
 	intr_reg->intr_control |= rx_delay_interval &
 		ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
 
 	intr_reg->intr_control |=
 		(tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
 		& ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
 
 	if (unmask)
 		intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
 
 	intr_reg->intr_control |=
 		(((u32)no_moderation_update) << ENA_ETH_IO_INTR_REG_NO_MODERATION_UPDATE_SHIFT) &
 			ENA_ETH_IO_INTR_REG_NO_MODERATION_UPDATE_MASK;
 }
 
 static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
 {
 	u16 size, buffers_num;
 	u8 *buf;
 
 	size = bounce_buf_ctrl->buffer_size;
 	buffers_num = bounce_buf_ctrl->buffers_num;
 
 	buf = bounce_buf_ctrl->base_buffer +
 		(bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
 
 	prefetchw(bounce_buf_ctrl->base_buffer +
 		(bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
 
 	return buf;
 }
 
 #ifdef ENA_EXTENDED_STATS
 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
 				   u32 len);
 
 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
 					  u32 funct_queue);
 #endif
 #if defined(__cplusplus)
 }
 #endif /* __cplusplus */
 #endif /* !(ENA_COM) */
diff --git a/ena_defs/ena_admin_defs.h b/ena_defs/ena_admin_defs.h
index 302ab6c2efa1..edcb9ef72e48 100644
--- a/ena_defs/ena_admin_defs.h
+++ b/ena_defs/ena_admin_defs.h
@@ -1,1901 +1,1902 @@
 /*-
  * SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  *
  * * Redistributions of source code must retain the above copyright
  * notice, this list of conditions and the following disclaimer.
  * * Redistributions in binary form must reproduce the above copyright
  * notice, this list of conditions and the following disclaimer in
  * the documentation and/or other materials provided with the
  * distribution.
  * * Neither the name of copyright holder nor the names of its
  * contributors may be used to endorse or promote products derived
  * from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef _ENA_ADMIN_H_
 #define _ENA_ADMIN_H_
 
 #define ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN 32
 #define ENA_ADMIN_EXTRA_PROPERTIES_COUNT     32
 
 #define ENA_ADMIN_RSS_KEY_PARTS              10
 
 #define ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK 0x3F
 #define ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK 0x1F
 
  /* customer metrics - in correlation with
   * ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
   */
 enum ena_admin_customer_metrics_id {
 	ENA_ADMIN_BW_IN_ALLOWANCE_EXCEEDED         = 0,
 	ENA_ADMIN_BW_OUT_ALLOWANCE_EXCEEDED        = 1,
 	ENA_ADMIN_PPS_ALLOWANCE_EXCEEDED           = 2,
 	ENA_ADMIN_CONNTRACK_ALLOWANCE_EXCEEDED     = 3,
 	ENA_ADMIN_LINKLOCAL_ALLOWANCE_EXCEEDED     = 4,
 	ENA_ADMIN_CONNTRACK_ALLOWANCE_AVAILABLE    = 5,
 };
 
 enum ena_admin_aq_opcode {
 	ENA_ADMIN_CREATE_SQ                         = 1,
 	ENA_ADMIN_DESTROY_SQ                        = 2,
 	ENA_ADMIN_CREATE_CQ                         = 3,
 	ENA_ADMIN_DESTROY_CQ                        = 4,
 	ENA_ADMIN_GET_FEATURE                       = 8,
 	ENA_ADMIN_SET_FEATURE                       = 9,
 	ENA_ADMIN_GET_STATS                         = 11,
 };
 
 enum ena_admin_aq_completion_status {
 	ENA_ADMIN_SUCCESS                           = 0,
 	ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE       = 1,
 	ENA_ADMIN_BAD_OPCODE                        = 2,
 	ENA_ADMIN_UNSUPPORTED_OPCODE                = 3,
 	ENA_ADMIN_MALFORMED_REQUEST                 = 4,
 	/* Additional status is provided in ACQ entry extended_status */
 	ENA_ADMIN_ILLEGAL_PARAMETER                 = 5,
 	ENA_ADMIN_UNKNOWN_ERROR                     = 6,
 	ENA_ADMIN_RESOURCE_BUSY                     = 7,
 };
 
 /* subcommands for the set/get feature admin commands */
 enum ena_admin_aq_feature_id {
 	ENA_ADMIN_DEVICE_ATTRIBUTES                 = 1,
 	ENA_ADMIN_MAX_QUEUES_NUM                    = 2,
 	ENA_ADMIN_HW_HINTS                          = 3,
 	ENA_ADMIN_LLQ                               = 4,
 	ENA_ADMIN_EXTRA_PROPERTIES_STRINGS          = 5,
 	ENA_ADMIN_EXTRA_PROPERTIES_FLAGS            = 6,
 	ENA_ADMIN_MAX_QUEUES_EXT                    = 7,
 	ENA_ADMIN_RSS_HASH_FUNCTION                 = 10,
 	ENA_ADMIN_STATELESS_OFFLOAD_CONFIG          = 11,
 	ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG      = 12,
 	ENA_ADMIN_MTU                               = 14,
 	ENA_ADMIN_RSS_HASH_INPUT                    = 18,
 	ENA_ADMIN_INTERRUPT_MODERATION              = 20,
 	ENA_ADMIN_AENQ_CONFIG                       = 26,
 	ENA_ADMIN_LINK_CONFIG                       = 27,
 	ENA_ADMIN_HOST_ATTR_CONFIG                  = 28,
 	ENA_ADMIN_PHC_CONFIG                        = 29,
 	ENA_ADMIN_FEATURES_OPCODE_NUM               = 32,
 };
 
 /* feature version for the set/get ENA_ADMIN_LLQ feature admin commands */
 enum ena_admin_llq_feature_version {
 	/* legacy base version in older drivers */
 	ENA_ADMIN_LLQ_FEATURE_VERSION_0_LEGACY      = 0,
 	/* support entry_size recommendation by device */
 	ENA_ADMIN_LLQ_FEATURE_VERSION_1             = 1,
 };
 
 /* device capabilities */
 enum ena_admin_aq_caps_id {
 	ENA_ADMIN_ENI_STATS                         = 0,
 	/* ENA SRD customer metrics */
 	ENA_ADMIN_ENA_SRD_INFO                      = 1,
 	ENA_ADMIN_CUSTOMER_METRICS                  = 2,
+	ENA_ADMIN_EXTENDED_RESET_REASONS	    = 3,
 };
 
 enum ena_admin_placement_policy_type {
 	/* descriptors and headers are in host memory */
 	ENA_ADMIN_PLACEMENT_POLICY_HOST             = 1,
 	/* descriptors and headers are in device memory (a.k.a Low Latency
 	 * Queue)
 	 */
 	ENA_ADMIN_PLACEMENT_POLICY_DEV              = 3,
 };
 
 enum ena_admin_link_types {
 	ENA_ADMIN_LINK_SPEED_1G                     = 0x1,
 	ENA_ADMIN_LINK_SPEED_2_HALF_G               = 0x2,
 	ENA_ADMIN_LINK_SPEED_5G                     = 0x4,
 	ENA_ADMIN_LINK_SPEED_10G                    = 0x8,
 	ENA_ADMIN_LINK_SPEED_25G                    = 0x10,
 	ENA_ADMIN_LINK_SPEED_40G                    = 0x20,
 	ENA_ADMIN_LINK_SPEED_50G                    = 0x40,
 	ENA_ADMIN_LINK_SPEED_100G                   = 0x80,
 	ENA_ADMIN_LINK_SPEED_200G                   = 0x100,
 	ENA_ADMIN_LINK_SPEED_400G                   = 0x200,
 };
 
 enum ena_admin_completion_policy_type {
 	/* completion queue entry for each sq descriptor */
 	ENA_ADMIN_COMPLETION_POLICY_DESC            = 0,
 	/* completion queue entry upon request in sq descriptor */
 	ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND  = 1,
 	/* current queue head pointer is updated in OS memory upon sq
 	 * descriptor request
 	 */
 	ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND  = 2,
 	/* current queue head pointer is updated in OS memory for each sq
 	 * descriptor
 	 */
 	ENA_ADMIN_COMPLETION_POLICY_HEAD            = 3,
 };
 
 /* basic stats return ena_admin_basic_stats while extanded stats return a
  * buffer (string format) with additional statistics per queue and per
  * device id
  */
 enum ena_admin_get_stats_type {
 	ENA_ADMIN_GET_STATS_TYPE_BASIC              = 0,
 	ENA_ADMIN_GET_STATS_TYPE_EXTENDED           = 1,
 	/* extra HW stats for specific network interface */
 	ENA_ADMIN_GET_STATS_TYPE_ENI                = 2,
 	/* extra HW stats for ENA SRD */
 	ENA_ADMIN_GET_STATS_TYPE_ENA_SRD            = 3,
 	ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS   = 4,
 
 };
 
 enum ena_admin_get_stats_scope {
 	ENA_ADMIN_SPECIFIC_QUEUE                    = 0,
 	ENA_ADMIN_ETH_TRAFFIC                       = 1,
 };
 
 enum ena_admin_get_phc_type {
 	ENA_ADMIN_PHC_TYPE_READLESS                 = 0,
 };
 
 /* ENA SRD configuration for ENI */
 enum ena_admin_ena_srd_flags {
 	/* Feature enabled */
 	ENA_ADMIN_ENA_SRD_ENABLED                   = BIT(0),
 	/* UDP support enabled */
 	ENA_ADMIN_ENA_SRD_UDP_ENABLED               = BIT(1),
 	/* Bypass Rx UDP ordering */
 	ENA_ADMIN_ENA_SRD_UDP_ORDERING_BYPASS_ENABLED = BIT(2),
 };
 
 struct ena_admin_aq_common_desc {
 	/* 11:0 : command_id
 	 * 15:12 : reserved12
 	 */
 	uint16_t command_id;
 
 	/* as appears in ena_admin_aq_opcode */
 	uint8_t opcode;
 
 	/* 0 : phase
 	 * 1 : ctrl_data - control buffer address valid
 	 * 2 : ctrl_data_indirect - control buffer address
 	 *    points to list of pages with addresses of control
 	 *    buffers
 	 * 7:3 : reserved3
 	 */
 	uint8_t flags;
 };
 
 /* used in ena_admin_aq_entry. Can point directly to control data, or to a
  * page list chunk. Used also at the end of indirect mode page list chunks,
  * for chaining.
  */
 struct ena_admin_ctrl_buff_info {
 	uint32_t length;
 
 	struct ena_common_mem_addr address;
 };
 
 struct ena_admin_sq {
 	uint16_t sq_idx;
 
 	/* 4:0 : reserved
 	 * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx
 	 */
 	uint8_t sq_identity;
 
 	uint8_t reserved1;
 };
 
 struct ena_admin_aq_entry {
 	struct ena_admin_aq_common_desc aq_common_descriptor;
 
 	union {
 		uint32_t inline_data_w1[3];
 
 		struct ena_admin_ctrl_buff_info control_buffer;
 	} u;
 
 	uint32_t inline_data_w4[12];
 };
 
 struct ena_admin_acq_common_desc {
 	/* command identifier to associate it with the aq descriptor
 	 * 11:0 : command_id
 	 * 15:12 : reserved12
 	 */
 	uint16_t command;
 
 	uint8_t status;
 
 	/* 0 : phase
 	 * 7:1 : reserved1
 	 */
 	uint8_t flags;
 
 	uint16_t extended_status;
 
 	/* indicates to the driver which AQ entry has been consumed by the
 	 * device and could be reused
 	 */
 	uint16_t sq_head_indx;
 };
 
 struct ena_admin_acq_entry {
 	struct ena_admin_acq_common_desc acq_common_descriptor;
 
 	uint32_t response_specific_data[14];
 };
 
 struct ena_admin_aq_create_sq_cmd {
 	struct ena_admin_aq_common_desc aq_common_descriptor;
 
 	/* 4:0 : reserved0_w1
 	 * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
 	 */
 	uint8_t sq_identity;
 
 	uint8_t reserved8_w1;
 
 	/* 3:0 : placement_policy - Describing where the SQ
 	 *    descriptor ring and the SQ packet headers reside:
 	 *    0x1 - descriptors and headers are in OS memory,
 	 *    0x3 - descriptors and headers in device memory
 	 *    (a.k.a Low Latency Queue)
 	 * 6:4 : completion_policy - Describing what policy
 	 *    to use for generation completion entry (cqe) in
 	 *    the CQ associated with this SQ: 0x0 - cqe for each
 	 *    sq descriptor, 0x1 - cqe upon request in sq
 	 *    descriptor, 0x2 - current queue head pointer is
 	 *    updated in OS memory upon sq descriptor request
 	 *    0x3 - current queue head pointer is updated in OS
 	 *    memory for each sq descriptor
 	 * 7 : reserved15_w1
 	 */
 	uint8_t sq_caps_2;
 
 	/* 0 : is_physically_contiguous - Described if the
 	 *    queue ring memory is allocated in physical
 	 *    contiguous pages or split.
 	 * 7:1 : reserved17_w1
 	 */
 	uint8_t sq_caps_3;
 
 	/* associated completion queue id. This CQ must be created prior to SQ
 	 * creation
 	 */
 	uint16_t cq_idx;
 
 	/* submission queue depth in entries */
 	uint16_t sq_depth;
 
 	/* SQ physical base address in OS memory. This field should not be
 	 * used for Low Latency queues. Has to be page aligned.
 	 */
 	struct ena_common_mem_addr sq_ba;
 
 	/* specifies queue head writeback location in OS memory. Valid if
 	 * completion_policy is set to completion_policy_head_on_demand or
 	 * completion_policy_head. Has to be cache aligned
 	 */
 	struct ena_common_mem_addr sq_head_writeback;
 
 	uint32_t reserved0_w7;
 
 	uint32_t reserved0_w8;
 };
 
 enum ena_admin_sq_direction {
 	ENA_ADMIN_SQ_DIRECTION_TX                   = 1,
 	ENA_ADMIN_SQ_DIRECTION_RX                   = 2,
 };
 
 struct ena_admin_acq_create_sq_resp_desc {
 	struct ena_admin_acq_common_desc acq_common_desc;
 
 	uint16_t sq_idx;
 
 	uint16_t reserved;
 
 	/* queue doorbell address as an offset to PCIe MMIO REG BAR */
 	uint32_t sq_doorbell_offset;
 
 	/* low latency queue ring base address as an offset to PCIe MMIO
 	 * LLQ_MEM BAR
 	 */
 	uint32_t llq_descriptors_offset;
 
 	/* low latency queue headers' memory as an offset to PCIe MMIO
 	 * LLQ_MEM BAR
 	 */
 	uint32_t llq_headers_offset;
 };
 
 struct ena_admin_aq_destroy_sq_cmd {
 	struct ena_admin_aq_common_desc aq_common_descriptor;
 
 	struct ena_admin_sq sq;
 };
 
 struct ena_admin_acq_destroy_sq_resp_desc {
 	struct ena_admin_acq_common_desc acq_common_desc;
 };
 
 struct ena_admin_aq_create_cq_cmd {
 	struct ena_admin_aq_common_desc aq_common_descriptor;
 
 	/* 4:0 : reserved5
 	 * 5 : interrupt_mode_enabled - if set, cq operates
 	 *    in interrupt mode, otherwise - polling
 	 * 7:6 : reserved6
 	 */
 	uint8_t cq_caps_1;
 
 	/* 4:0 : cq_entry_size_words - size of CQ entry in
 	 *    32-bit words, valid values: 4, 8.
 	 * 7:5 : reserved7
 	 */
 	uint8_t cq_caps_2;
 
 	/* completion queue depth in # of entries. must be power of 2 */
 	uint16_t cq_depth;
 
 	/* msix vector assigned to this cq */
 	uint32_t msix_vector;
 
 	/* cq physical base address in OS memory. CQ must be physically
 	 * contiguous
 	 */
 	struct ena_common_mem_addr cq_ba;
 };
 
 struct ena_admin_acq_create_cq_resp_desc {
 	struct ena_admin_acq_common_desc acq_common_desc;
 
 	uint16_t cq_idx;
 
 	/* actual cq depth in number of entries */
 	uint16_t cq_actual_depth;
 
 	uint32_t numa_node_register_offset;
 
 	uint32_t cq_head_db_register_offset;
 
 	uint32_t cq_interrupt_unmask_register_offset;
 };
 
 struct ena_admin_aq_destroy_cq_cmd {
 	struct ena_admin_aq_common_desc aq_common_descriptor;
 
 	uint16_t cq_idx;
 
 	uint16_t reserved1;
 };
 
 struct ena_admin_acq_destroy_cq_resp_desc {
 	struct ena_admin_acq_common_desc acq_common_desc;
 };
 
 /* ENA AQ Get Statistics command. Extended statistics are placed in control
  * buffer pointed by AQ entry
  */
 struct ena_admin_aq_get_stats_cmd {
 	struct ena_admin_aq_common_desc aq_common_descriptor;
 
 	union {
 		/* command specific inline data */
 		uint32_t inline_data_w1[3];
 
 		struct ena_admin_ctrl_buff_info control_buffer;
 	} u;
 
 	/* stats type as defined in enum ena_admin_get_stats_type */
 	uint8_t type;
 
 	/* stats scope defined in enum ena_admin_get_stats_scope */
 	uint8_t scope;
 
 	uint16_t reserved3;
 
 	/* queue id. used when scope is specific_queue */
 	uint16_t queue_idx;
 
 	/* device id, value 0xFFFF means mine. only privileged device can get
 	 * stats of other device
 	 */
 	uint16_t device_id;
 
 	/* a bitmap representing the requested metric values */
 	uint64_t requested_metrics;
 };
 
 /* Basic Statistics Command. */
 struct ena_admin_basic_stats {
 	uint32_t tx_bytes_low;
 
 	uint32_t tx_bytes_high;
 
 	uint32_t tx_pkts_low;
 
 	uint32_t tx_pkts_high;
 
 	uint32_t rx_bytes_low;
 
 	uint32_t rx_bytes_high;
 
 	uint32_t rx_pkts_low;
 
 	uint32_t rx_pkts_high;
 
 	uint32_t rx_drops_low;
 
 	uint32_t rx_drops_high;
 
 	uint32_t tx_drops_low;
 
 	uint32_t tx_drops_high;
 
 	uint32_t rx_overruns_low;
 
 	uint32_t rx_overruns_high;
 };
 
 /* ENI Statistics Command. */
 struct ena_admin_eni_stats {
 	/* The number of packets shaped due to inbound aggregate BW
 	 * allowance being exceeded
 	 */
 	uint64_t bw_in_allowance_exceeded;
 
 	/* The number of packets shaped due to outbound aggregate BW
 	 * allowance being exceeded
 	 */
 	uint64_t bw_out_allowance_exceeded;
 
 	/* The number of packets shaped due to PPS allowance being exceeded */
 	uint64_t pps_allowance_exceeded;
 
 	/* The number of packets shaped due to connection tracking
 	 * allowance being exceeded and leading to failure in establishment
 	 * of new connections
 	 */
 	uint64_t conntrack_allowance_exceeded;
 
 	/* The number of packets shaped due to linklocal packet rate
 	 * allowance being exceeded
 	 */
 	uint64_t linklocal_allowance_exceeded;
 };
 
 struct ena_admin_ena_srd_stats {
 	/* Number of packets transmitted over ENA SRD */
 	uint64_t ena_srd_tx_pkts;
 
 	/* Number of packets transmitted or could have been
 	 * transmitted over ENA SRD
 	 */
 	uint64_t ena_srd_eligible_tx_pkts;
 
 	/* Number of packets received over ENA SRD */
 	uint64_t ena_srd_rx_pkts;
 
 	/* Percentage of the ENA SRD resources that is in use */
 	uint64_t ena_srd_resource_utilization;
 };
 
 /* ENA SRD Statistics Command */
 struct ena_admin_ena_srd_info {
 	/* ENA SRD configuration bitmap. See ena_admin_ena_srd_flags for
 	 * details
 	 */
 	uint64_t flags;
 
 	struct ena_admin_ena_srd_stats ena_srd_stats;
 };
 
 /* Customer Metrics Command. */
 struct ena_admin_customer_metrics {
 	/* A bitmap representing the reported customer metrics according to
 	 * the order they are reported
 	 */
 	uint64_t reported_metrics;
 };
 
 struct ena_admin_acq_get_stats_resp {
 	struct ena_admin_acq_common_desc acq_common_desc;
 
 	union {
 		uint64_t raw[7];
 
 		struct ena_admin_basic_stats basic_stats;
 
 		struct ena_admin_eni_stats eni_stats;
 
 		struct ena_admin_ena_srd_info ena_srd_info;
 
 		struct ena_admin_customer_metrics customer_metrics;
 	} u;
 };
 
 struct ena_admin_get_set_feature_common_desc {
 	/* 1:0 : select - 0x1 - current value; 0x3 - default
 	 *    value
 	 * 7:3 : reserved3
 	 */
 	uint8_t flags;
 
 	/* as appears in ena_admin_aq_feature_id */
 	uint8_t feature_id;
 
 	/* The driver specifies the max feature version it supports and the
 	 * device responds with the currently supported feature version. The
 	 * field is zero based
 	 */
 	uint8_t feature_version;
 
 	uint8_t reserved8;
 };
 
 struct ena_admin_device_attr_feature_desc {
 	uint32_t impl_id;
 
 	uint32_t device_version;
 
 	/* bitmap of ena_admin_aq_feature_id, which represents supported
 	 * subcommands for the set/get feature admin commands.
 	 */
 	uint32_t supported_features;
 
 	/* bitmap of ena_admin_aq_caps_id, which represents device
 	 * capabilities.
 	 */
 	uint32_t capabilities;
 
 	/* Indicates how many bits are used physical address access. */
 	uint32_t phys_addr_width;
 
 	/* Indicates how many bits are used virtual address access. */
 	uint32_t virt_addr_width;
 
 	/* unicast MAC address (in Network byte order) */
 	uint8_t mac_addr[6];
 
 	uint8_t reserved7[2];
 
 	uint32_t max_mtu;
 };
 
 enum ena_admin_llq_header_location {
 	/* header is in descriptor list */
 	ENA_ADMIN_INLINE_HEADER                     = 1,
 	/* header in a separate ring, implies 16B descriptor list entry */
 	ENA_ADMIN_HEADER_RING                       = 2,
 };
 
 enum ena_admin_llq_ring_entry_size {
 	ENA_ADMIN_LIST_ENTRY_SIZE_128B              = 1,
 	ENA_ADMIN_LIST_ENTRY_SIZE_192B              = 2,
 	ENA_ADMIN_LIST_ENTRY_SIZE_256B              = 4,
 };
 
 enum ena_admin_llq_num_descs_before_header {
 	ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0     = 0,
 	ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1     = 1,
 	ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2     = 2,
 	ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4     = 4,
 	ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8     = 8,
 };
 
 /* packet descriptor list entry always starts with one or more descriptors,
  * followed by a header. The rest of the descriptors are located in the
  * beginning of the subsequent entry. Stride refers to how the rest of the
  * descriptors are placed. This field is relevant only for inline header
  * mode
  */
 enum ena_admin_llq_stride_ctrl {
 	ENA_ADMIN_SINGLE_DESC_PER_ENTRY             = 1,
 	ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY          = 2,
 };
 
 enum ena_admin_accel_mode_feat {
 	ENA_ADMIN_DISABLE_META_CACHING              = 0,
 	ENA_ADMIN_LIMIT_TX_BURST                    = 1,
 };
 
 struct ena_admin_accel_mode_get {
 	/* bit field of enum ena_admin_accel_mode_feat */
 	uint16_t supported_flags;
 
 	/* maximum burst size between two doorbells. The size is in bytes */
 	uint16_t max_tx_burst_size;
 };
 
 struct ena_admin_accel_mode_set {
 	/* bit field of enum ena_admin_accel_mode_feat */
 	uint16_t enabled_flags;
 
 	uint16_t reserved;
 };
 
 struct ena_admin_accel_mode_req {
 	union {
 		uint32_t raw[2];
 
 		struct ena_admin_accel_mode_get get;
 
 		struct ena_admin_accel_mode_set set;
 	} u;
 };
 
 struct ena_admin_feature_llq_desc {
 	uint32_t max_llq_num;
 
 	uint32_t max_llq_depth;
 
 	/* specify the header locations the device supports. bitfield of enum
 	 * ena_admin_llq_header_location.
 	 */
 	uint16_t header_location_ctrl_supported;
 
 	/* the header location the driver selected to use. */
 	uint16_t header_location_ctrl_enabled;
 
 	/* if inline header is specified - this is the size of descriptor list
 	 * entry. If header in a separate ring is specified - this is the size
 	 * of header ring entry. bitfield of enum ena_admin_llq_ring_entry_size.
 	 * specify the entry sizes the device supports
 	 */
 	uint16_t entry_size_ctrl_supported;
 
 	/* the entry size the driver selected to use. */
 	uint16_t entry_size_ctrl_enabled;
 
 	/* valid only if inline header is specified. First entry associated with
 	 * the packet includes descriptors and header. Rest of the entries
 	 * occupied by descriptors. This parameter defines the max number of
 	 * descriptors precedding the header in the first entry. The field is
 	 * bitfield of enum ena_admin_llq_num_descs_before_header and specify
 	 * the values the device supports
 	 */
 	uint16_t desc_num_before_header_supported;
 
 	/* the desire field the driver selected to use */
 	uint16_t desc_num_before_header_enabled;
 
 	/* valid only if inline was chosen. bitfield of enum
 	 * ena_admin_llq_stride_ctrl
 	 */
 	uint16_t descriptors_stride_ctrl_supported;
 
 	/* the stride control the driver selected to use */
 	uint16_t descriptors_stride_ctrl_enabled;
 
 	/* feature version of device resp to either GET/SET commands. */
 	uint8_t feature_version;
 
 	/* llq entry size recommended by the device,
 	 * values correlated to enum ena_admin_llq_ring_entry_size.
 	 * used only for GET command.
 	 */
 	uint8_t entry_size_recommended;
 
 	/* reserved */
 	uint8_t reserved1[2];
 
 	/* accelerated low latency queues requirement. driver needs to
 	 * support those requirements in order to use accelerated llq
 	 */
 	struct ena_admin_accel_mode_req accel_mode;
 };
 
 struct ena_admin_queue_ext_feature_fields {
 	uint32_t max_tx_sq_num;
 
 	uint32_t max_tx_cq_num;
 
 	uint32_t max_rx_sq_num;
 
 	uint32_t max_rx_cq_num;
 
 	uint32_t max_tx_sq_depth;
 
 	uint32_t max_tx_cq_depth;
 
 	uint32_t max_rx_sq_depth;
 
 	uint32_t max_rx_cq_depth;
 
 	uint32_t max_tx_header_size;
 
 	/* Maximum Descriptors number, including meta descriptor, allowed for a
 	 * single Tx packet
 	 */
 	uint16_t max_per_packet_tx_descs;
 
 	/* Maximum Descriptors number allowed for a single Rx packet */
 	uint16_t max_per_packet_rx_descs;
 };
 
 struct ena_admin_queue_feature_desc {
 	uint32_t max_sq_num;
 
 	uint32_t max_sq_depth;
 
 	uint32_t max_cq_num;
 
 	uint32_t max_cq_depth;
 
 	uint32_t max_legacy_llq_num;
 
 	uint32_t max_legacy_llq_depth;
 
 	uint32_t max_header_size;
 
 	/* Maximum Descriptors number, including meta descriptor, allowed for a
 	 * single Tx packet
 	 */
 	uint16_t max_packet_tx_descs;
 
 	/* Maximum Descriptors number allowed for a single Rx packet */
 	uint16_t max_packet_rx_descs;
 };
 
 struct ena_admin_set_feature_mtu_desc {
 	/* exclude L2 */
 	uint32_t mtu;
 };
 
 struct ena_admin_get_extra_properties_strings_desc {
 	uint32_t count;
 };
 
 struct ena_admin_get_extra_properties_flags_desc {
 	uint32_t flags;
 };
 
 struct ena_admin_set_feature_host_attr_desc {
 	/* host OS info base address in OS memory. host info is 4KB of
 	 * physically contiguous
 	 */
 	struct ena_common_mem_addr os_info_ba;
 
 	/* host debug area base address in OS memory. debug area must be
 	 * physically contiguous
 	 */
 	struct ena_common_mem_addr debug_ba;
 
 	/* debug area size */
 	uint32_t debug_area_size;
 };
 
 struct ena_admin_feature_intr_moder_desc {
 	/* interrupt delay granularity in usec */
 	uint16_t intr_delay_resolution;
 
 	uint16_t reserved;
 };
 
 struct ena_admin_get_feature_link_desc {
 	/* Link speed in Mb */
 	uint32_t speed;
 
 	/* bit field of enum ena_admin_link types */
 	uint32_t supported;
 
 	/* 0 : autoneg
 	 * 1 : duplex - Full Duplex
 	 * 31:2 : reserved2
 	 */
 	uint32_t flags;
 };
 
 struct ena_admin_feature_aenq_desc {
 	/* bitmask for AENQ groups the device can report */
 	uint32_t supported_groups;
 
 	/* bitmask for AENQ groups to report */
 	uint32_t enabled_groups;
 };
 
 struct ena_admin_feature_offload_desc {
 	/* 0 : TX_L3_csum_ipv4
 	 * 1 : TX_L4_ipv4_csum_part - The checksum field
 	 *    should be initialized with pseudo header checksum
 	 * 2 : TX_L4_ipv4_csum_full
 	 * 3 : TX_L4_ipv6_csum_part - The checksum field
 	 *    should be initialized with pseudo header checksum
 	 * 4 : TX_L4_ipv6_csum_full
 	 * 5 : tso_ipv4
 	 * 6 : tso_ipv6
 	 * 7 : tso_ecn
 	 */
 	uint32_t tx;
 
 	/* Receive side supported stateless offload
 	 * 0 : RX_L3_csum_ipv4 - IPv4 checksum
 	 * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
 	 * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum
 	 * 3 : RX_hash - Hash calculation
 	 */
 	uint32_t rx_supported;
 
 	uint32_t rx_enabled;
 };
 
 enum ena_admin_hash_functions {
 	ENA_ADMIN_TOEPLITZ                          = 1,
 	ENA_ADMIN_CRC32                             = 2,
 };
 
 struct ena_admin_feature_rss_flow_hash_control {
 	uint32_t key_parts;
 
 	uint32_t reserved;
 
 	uint32_t key[ENA_ADMIN_RSS_KEY_PARTS];
 };
 
 struct ena_admin_feature_rss_flow_hash_function {
 	/* 7:0 : funcs - bitmask of ena_admin_hash_functions */
 	uint32_t supported_func;
 
 	/* 7:0 : selected_func - bitmask of
 	 *    ena_admin_hash_functions
 	 */
 	uint32_t selected_func;
 
 	/* initial value */
 	uint32_t init_val;
 };
 
 /* RSS flow hash protocols */
 enum ena_admin_flow_hash_proto {
 	ENA_ADMIN_RSS_TCP4                          = 0,
 	ENA_ADMIN_RSS_UDP4                          = 1,
 	ENA_ADMIN_RSS_TCP6                          = 2,
 	ENA_ADMIN_RSS_UDP6                          = 3,
 	ENA_ADMIN_RSS_IP4                           = 4,
 	ENA_ADMIN_RSS_IP6                           = 5,
 	ENA_ADMIN_RSS_IP4_FRAG                      = 6,
 	ENA_ADMIN_RSS_NOT_IP                        = 7,
 	/* TCPv6 with extension header */
 	ENA_ADMIN_RSS_TCP6_EX                       = 8,
 	/* IPv6 with extension header */
 	ENA_ADMIN_RSS_IP6_EX                        = 9,
 	ENA_ADMIN_RSS_PROTO_NUM                     = 16,
 };
 
 /* RSS flow hash fields */
 enum ena_admin_flow_hash_fields {
 	/* Ethernet Dest Addr */
 	ENA_ADMIN_RSS_L2_DA                         = BIT(0),
 	/* Ethernet Src Addr */
 	ENA_ADMIN_RSS_L2_SA                         = BIT(1),
 	/* ipv4/6 Dest Addr */
 	ENA_ADMIN_RSS_L3_DA                         = BIT(2),
 	/* ipv4/6 Src Addr */
 	ENA_ADMIN_RSS_L3_SA                         = BIT(3),
 	/* tcp/udp Dest Port */
 	ENA_ADMIN_RSS_L4_DP                         = BIT(4),
 	/* tcp/udp Src Port */
 	ENA_ADMIN_RSS_L4_SP                         = BIT(5),
 };
 
 struct ena_admin_proto_input {
 	/* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
 	uint16_t fields;
 
 	uint16_t reserved2;
 };
 
 struct ena_admin_feature_rss_hash_control {
 	struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
 
 	struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
 
 	struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];
 
 	struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];
 };
 
 struct ena_admin_feature_rss_flow_hash_input {
 	/* supported hash input sorting
 	 * 1 : L3_sort - support swap L3 addresses if DA is
 	 *    smaller than SA
 	 * 2 : L4_sort - support swap L4 ports if DP smaller
 	 *    SP
 	 */
 	uint16_t supported_input_sort;
 
 	/* enabled hash input sorting
 	 * 1 : enable_L3_sort - enable swap L3 addresses if
 	 *    DA smaller than SA
 	 * 2 : enable_L4_sort - enable swap L4 ports if DP
 	 *    smaller than SP
 	 */
 	uint16_t enabled_input_sort;
 };
 
 enum ena_admin_os_type {
 	ENA_ADMIN_OS_LINUX                          = 1,
 	ENA_ADMIN_OS_WIN                            = 2,
 	ENA_ADMIN_OS_DPDK                           = 3,
 	ENA_ADMIN_OS_FREEBSD                        = 4,
 	ENA_ADMIN_OS_IPXE                           = 5,
 	ENA_ADMIN_OS_ESXI                           = 6,
 	ENA_ADMIN_OS_MACOS                          = 7,
 	ENA_ADMIN_OS_GROUPS_NUM                     = 7,
 };
 
 struct ena_admin_host_info {
 	/* defined in enum ena_admin_os_type */
 	uint32_t os_type;
 
 	/* os distribution string format */
 	uint8_t os_dist_str[128];
 
 	/* OS distribution numeric format */
 	uint32_t os_dist;
 
 	/* kernel version string format */
 	uint8_t kernel_ver_str[32];
 
 	/* Kernel version numeric format */
 	uint32_t kernel_ver;
 
 	/* 7:0 : major
 	 * 15:8 : minor
 	 * 23:16 : sub_minor
 	 * 31:24 : module_type
 	 */
 	uint32_t driver_version;
 
 	/* features bitmap */
 	uint32_t supported_network_features[2];
 
 	/* ENA spec version of driver */
 	uint16_t ena_spec_version;
 
 	/* ENA device's Bus, Device and Function
 	 * 2:0 : function
 	 * 7:3 : device
 	 * 15:8 : bus
 	 */
 	uint16_t bdf;
 
 	/* Number of CPUs */
 	uint16_t num_cpus;
 
 	uint16_t reserved;
 
 	/* 0 : reserved
 	 * 1 : rx_offset
 	 * 2 : interrupt_moderation
 	 * 3 : rx_buf_mirroring
 	 * 4 : rss_configurable_function_key
 	 * 5 : reserved
 	 * 6 : rx_page_reuse
 	 * 31:7 : reserved
 	 */
 	uint32_t driver_supported_features;
 };
 
 struct ena_admin_rss_ind_table_entry {
 	uint16_t cq_idx;
 
 	uint16_t reserved;
 };
 
 struct ena_admin_feature_rss_ind_table {
 	/* min supported table size (2^min_size) */
 	uint16_t min_size;
 
 	/* max supported table size (2^max_size) */
 	uint16_t max_size;
 
 	/* table size (2^size) */
 	uint16_t size;
 
 	/* 0 : one_entry_update - The ENA device supports
 	 *    setting a single RSS table entry
 	 */
 	uint8_t flags;
 
 	uint8_t reserved;
 
 	/* index of the inline entry. 0xFFFFFFFF means invalid */
 	uint32_t inline_index;
 
 	/* used for updating single entry, ignored when setting the entire
 	 * table through the control buffer.
 	 */
 	struct ena_admin_rss_ind_table_entry inline_entry;
 };
 
 /* When hint value is 0, driver should use it's own predefined value */
 struct ena_admin_ena_hw_hints {
 	/* value in ms */
 	uint16_t mmio_read_timeout;
 
 	/* value in ms */
 	uint16_t driver_watchdog_timeout;
 
 	/* Per packet tx completion timeout. value in ms */
 	uint16_t missing_tx_completion_timeout;
 
 	uint16_t missed_tx_completion_count_threshold_to_reset;
 
 	/* value in ms */
 	uint16_t admin_completion_tx_timeout;
 
 	uint16_t netdev_wd_timeout;
 
 	uint16_t max_tx_sgl_size;
 
 	uint16_t max_rx_sgl_size;
 
 	uint16_t reserved[8];
 };
 
 struct ena_admin_get_feat_cmd {
 	struct ena_admin_aq_common_desc aq_common_descriptor;
 
 	struct ena_admin_ctrl_buff_info control_buffer;
 
 	struct ena_admin_get_set_feature_common_desc feat_common;
 
 	uint32_t raw[11];
 };
 
 struct ena_admin_queue_ext_feature_desc {
 	/* version */
 	uint8_t version;
 
 	uint8_t reserved1[3];
 
 	union {
 		struct ena_admin_queue_ext_feature_fields max_queue_ext;
 
 		uint32_t raw[10];
 	};
 };
 
 struct ena_admin_feature_phc_desc {
 	/* PHC type as defined in enum ena_admin_get_phc_type,
 	 * used only for GET command.
 	 */
 	uint8_t type;
 
 	/* Reserved - MBZ */
 	uint8_t reserved1[3];
 
 	/* PHC doorbell address as an offset to PCIe MMIO REG BAR,
 	 * used only for GET command.
 	 */
 	uint32_t doorbell_offset;
 
 	/* Max time for valid PHC retrieval, passing this threshold will
 	 * fail the get-time request and block PHC requests for
 	 * block_timeout_usec, used only for GET command.
 	 */
 	uint32_t expire_timeout_usec;
 
 	/* PHC requests block period, blocking starts if PHC request expired
 	 * in order to prevent floods on busy device,
 	 * used only for GET command.
 	 */
 	uint32_t block_timeout_usec;
 
 	/* Shared PHC physical address (ena_admin_phc_resp),
 	 * used only for SET command.
 	 */
 	struct ena_common_mem_addr output_address;
 
 	/* Shared PHC Size (ena_admin_phc_resp),
 	 * used only for SET command.
 	 */
 	uint32_t output_length;
 };
 
 struct ena_admin_get_feat_resp {
 	struct ena_admin_acq_common_desc acq_common_desc;
 
 	union {
 		uint32_t raw[14];
 
 		struct ena_admin_device_attr_feature_desc dev_attr;
 
 		struct ena_admin_feature_llq_desc llq;
 
 		struct ena_admin_queue_feature_desc max_queue;
 
 		struct ena_admin_queue_ext_feature_desc max_queue_ext;
 
 		struct ena_admin_feature_aenq_desc aenq;
 
 		struct ena_admin_get_feature_link_desc link;
 
 		struct ena_admin_feature_offload_desc offload;
 
 		struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
 
 		struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
 
 		struct ena_admin_feature_rss_ind_table ind_table;
 
 		struct ena_admin_feature_intr_moder_desc intr_moderation;
 
 		struct ena_admin_ena_hw_hints hw_hints;
 
 		struct ena_admin_feature_phc_desc phc;
 
 		struct ena_admin_get_extra_properties_strings_desc extra_properties_strings;
 
 		struct ena_admin_get_extra_properties_flags_desc extra_properties_flags;
 	} u;
 };
 
 struct ena_admin_set_feat_cmd {
 	struct ena_admin_aq_common_desc aq_common_descriptor;
 
 	struct ena_admin_ctrl_buff_info control_buffer;
 
 	struct ena_admin_get_set_feature_common_desc feat_common;
 
 	union {
 		uint32_t raw[11];
 
 		/* mtu size */
 		struct ena_admin_set_feature_mtu_desc mtu;
 
 		/* host attributes */
 		struct ena_admin_set_feature_host_attr_desc host_attr;
 
 		/* AENQ configuration */
 		struct ena_admin_feature_aenq_desc aenq;
 
 		/* rss flow hash function */
 		struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
 
 		/* rss flow hash input */
 		struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
 
 		/* rss indirection table */
 		struct ena_admin_feature_rss_ind_table ind_table;
 
 		/* LLQ configuration */
 		struct ena_admin_feature_llq_desc llq;
 
 		/* PHC configuration */
 		struct ena_admin_feature_phc_desc phc;
 	} u;
 };
 
 struct ena_admin_set_feat_resp {
 	struct ena_admin_acq_common_desc acq_common_desc;
 
 	union {
 		uint32_t raw[14];
 	} u;
 };
 
 struct ena_admin_aenq_common_desc {
 	uint16_t group;
 
 	uint16_t syndrome;
 
 	/* 0 : phase
 	 * 7:1 : reserved - MBZ
 	 */
 	uint8_t flags;
 
 	uint8_t reserved1[3];
 
 	uint32_t timestamp_low;
 
 	uint32_t timestamp_high;
 };
 
 /* asynchronous event notification groups */
 enum ena_admin_aenq_group {
 	ENA_ADMIN_LINK_CHANGE                       = 0,
 	ENA_ADMIN_FATAL_ERROR                       = 1,
 	ENA_ADMIN_WARNING                           = 2,
 	ENA_ADMIN_NOTIFICATION                      = 3,
 	ENA_ADMIN_KEEP_ALIVE                        = 4,
 	ENA_ADMIN_REFRESH_CAPABILITIES              = 5,
 	ENA_ADMIN_AENQ_GROUPS_NUM                   = 6,
 };
 
 enum ena_admin_aenq_notification_syndrome {
 	ENA_ADMIN_UPDATE_HINTS                      = 2,
 };
 
 struct ena_admin_aenq_entry {
 	struct ena_admin_aenq_common_desc aenq_common_desc;
 
 	/* command specific inline data */
 	uint32_t inline_data_w4[12];
 };
 
 struct ena_admin_aenq_link_change_desc {
 	struct ena_admin_aenq_common_desc aenq_common_desc;
 
 	/* 0 : link_status */
 	uint32_t flags;
 };
 
 struct ena_admin_aenq_keep_alive_desc {
 	struct ena_admin_aenq_common_desc aenq_common_desc;
 
 	uint32_t rx_drops_low;
 
 	uint32_t rx_drops_high;
 
 	uint32_t tx_drops_low;
 
 	uint32_t tx_drops_high;
 
 	uint32_t rx_overruns_low;
 
 	uint32_t rx_overruns_high;
 };
 
 struct ena_admin_ena_mmio_req_read_less_resp {
 	uint16_t req_id;
 
 	uint16_t reg_off;
 
 	/* value is valid when poll is cleared */
 	uint32_t reg_val;
 };
 
 struct ena_admin_phc_resp {
 	uint16_t req_id;
 
 	uint8_t reserved1[6];
 
 	uint64_t timestamp;
 
 	uint8_t reserved2[48];
 };
 
 /* aq_common_desc */
 #define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK            GENMASK(11, 0)
 #define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK                 BIT(0)
 #define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT            1
 #define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK             BIT(1)
 #define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT   2
 #define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK    BIT(2)
 
 /* sq */
 #define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT                     5
 #define ENA_ADMIN_SQ_SQ_DIRECTION_MASK                      GENMASK(7, 5)
 
 /* acq_common_desc */
 #define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK           GENMASK(11, 0)
 #define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK                BIT(0)
 
 /* aq_create_sq_cmd */
 #define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT       5
 #define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK        GENMASK(7, 5)
 #define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK    GENMASK(3, 0)
 #define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT  4
 #define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK   GENMASK(6, 4)
 #define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
 
 /* aq_create_cq_cmd */
 #define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
 #define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
 #define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
 
 /* get_set_feature_common_desc */
 #define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK   GENMASK(1, 0)
 
 /* get_feature_link_desc */
 #define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK        BIT(0)
 #define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT        1
 #define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK         BIT(1)
 
 /* feature_offload_desc */
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT       5
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK        BIT(5)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT       6
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK        BIT(6)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT        7
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK         BIT(7)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT        3
 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK         BIT(3)
 
 /* feature_rss_flow_hash_function */
 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0)
 
 /* feature_rss_flow_hash_input */
 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK  BIT(1)
 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK  BIT(2)
 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
 
 /* host_info */
 #define ENA_ADMIN_HOST_INFO_MAJOR_MASK                      GENMASK(7, 0)
 #define ENA_ADMIN_HOST_INFO_MINOR_SHIFT                     8
 #define ENA_ADMIN_HOST_INFO_MINOR_MASK                      GENMASK(15, 8)
 #define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT                 16
 #define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK                  GENMASK(23, 16)
 #define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT               24
 #define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK                GENMASK(31, 24)
 #define ENA_ADMIN_HOST_INFO_FUNCTION_MASK                   GENMASK(2, 0)
 #define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT                    3
 #define ENA_ADMIN_HOST_INFO_DEVICE_MASK                     GENMASK(7, 3)
 #define ENA_ADMIN_HOST_INFO_BUS_SHIFT                       8
 #define ENA_ADMIN_HOST_INFO_BUS_MASK                        GENMASK(15, 8)
 #define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT                 1
 #define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK                  BIT(1)
 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT      2
 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK       BIT(2)
 #define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT          3
 #define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK           BIT(3)
 #define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4
 #define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
 #define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT             6
 #define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK              BIT(6)
 
 /* feature_rss_ind_table */
 #define ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK BIT(0)
 
 /* aenq_common_desc */
 #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK               BIT(0)
 
 /* aenq_link_change_desc */
 #define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK    BIT(0)
 
 #if !defined(DEFS_LINUX_MAINLINE)
 static inline uint16_t get_ena_admin_aq_common_desc_command_id(const struct ena_admin_aq_common_desc *p)
 {
 	return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
 }
 
 static inline void set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, uint16_t val)
 {
 	p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
 }
 
 static inline uint8_t get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
 {
 	return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
 }
 
 static inline void set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, uint8_t val)
 {
 	p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
 }
 
 static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data(const struct ena_admin_aq_common_desc *p)
 {
 	return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
 }
 
 static inline void set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, uint8_t val)
 {
 	p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
 }
 
 static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data_indirect(const struct ena_admin_aq_common_desc *p)
 {
 	return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
 }
 
 static inline void set_ena_admin_aq_common_desc_ctrl_data_indirect(struct ena_admin_aq_common_desc *p, uint8_t val)
 {
 	p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
 }
 
 static inline uint8_t get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
 {
 	return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
 }
 
 static inline void set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
 {
 	p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
 }
 
 static inline uint16_t get_ena_admin_acq_common_desc_command_id(const struct ena_admin_acq_common_desc *p)
 {
 	return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
 }
 
 static inline void set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, uint16_t val)
 {
 	p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
 }
 
 static inline uint8_t get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
 {
 	return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
 }
 
 static inline void set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, uint8_t val)
 {
 	p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
 }
 
 static inline uint8_t get_ena_admin_aq_create_sq_cmd_sq_direction(const struct ena_admin_aq_create_sq_cmd *p)
 {
 	return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
 }
 
 static inline void set_ena_admin_aq_create_sq_cmd_sq_direction(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
 {
 	p->sq_identity |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
 }
 
 static inline uint8_t get_ena_admin_aq_create_sq_cmd_placement_policy(const struct ena_admin_aq_create_sq_cmd *p)
 {
 	return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
 }
 
 static inline void set_ena_admin_aq_create_sq_cmd_placement_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
 {
 	p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
 }
 
 static inline uint8_t get_ena_admin_aq_create_sq_cmd_completion_policy(const struct ena_admin_aq_create_sq_cmd *p)
 {
 	return (p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
 }
 
 static inline void set_ena_admin_aq_create_sq_cmd_completion_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
 {
 	p->sq_caps_2 |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
 }
 
 static inline uint8_t get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(const struct ena_admin_aq_create_sq_cmd *p)
 {
 	return p->sq_caps_3 & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
 }
 
 static inline void set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
 {
 	p->sq_caps_3 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
 }
 
 static inline uint8_t get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(const struct ena_admin_aq_create_cq_cmd *p)
 {
 	return (p->cq_caps_1 & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
 }
 
 static inline void set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
 {
 	p->cq_caps_1 |= (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
 }
 
 static inline uint8_t get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(const struct ena_admin_aq_create_cq_cmd *p)
 {
 	return p->cq_caps_2 & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
 }
 
 static inline void set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
 {
 	p->cq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
 }
 
 static inline uint8_t get_ena_admin_get_set_feature_common_desc_select(const struct ena_admin_get_set_feature_common_desc *p)
 {
 	return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
 }
 
 static inline void set_ena_admin_get_set_feature_common_desc_select(struct ena_admin_get_set_feature_common_desc *p, uint8_t val)
 {
 	p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
 }
 
 static inline uint32_t get_ena_admin_get_feature_link_desc_autoneg(const struct ena_admin_get_feature_link_desc *p)
 {
 	return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
 }
 
 static inline void set_ena_admin_get_feature_link_desc_autoneg(struct ena_admin_get_feature_link_desc *p, uint32_t val)
 {
 	p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
 }
 
 static inline uint32_t get_ena_admin_get_feature_link_desc_duplex(const struct ena_admin_get_feature_link_desc *p)
 {
 	return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
 }
 
 static inline void set_ena_admin_get_feature_link_desc_duplex(struct ena_admin_get_feature_link_desc *p, uint32_t val)
 {
 	p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
 {
 	return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
 }
 
 static inline void set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(const struct ena_admin_feature_offload_desc *p)
 {
 	return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
 }
 
 static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(const struct ena_admin_feature_offload_desc *p)
 {
 	return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
 }
 
 static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(const struct ena_admin_feature_offload_desc *p)
 {
 	return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
 }
 
 static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(const struct ena_admin_feature_offload_desc *p)
 {
 	return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
 }
 
 static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv4(const struct ena_admin_feature_offload_desc *p)
 {
 	return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
 }
 
 static inline void set_ena_admin_feature_offload_desc_tso_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv6(const struct ena_admin_feature_offload_desc *p)
 {
 	return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
 }
 
 static inline void set_ena_admin_feature_offload_desc_tso_ipv6(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_tso_ecn(const struct ena_admin_feature_offload_desc *p)
 {
 	return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
 }
 
 static inline void set_ena_admin_feature_offload_desc_tso_ecn(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
 {
 	return p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
 }
 
 static inline void set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->rx_supported |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(const struct ena_admin_feature_offload_desc *p)
 {
 	return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
 }
 
 static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(const struct ena_admin_feature_offload_desc *p)
 {
 	return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
 }
 
 static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_offload_desc_RX_hash(const struct ena_admin_feature_offload_desc *p)
 {
 	return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
 }
 
 static inline void set_ena_admin_feature_offload_desc_RX_hash(struct ena_admin_feature_offload_desc *p, uint32_t val)
 {
 	p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_funcs(const struct ena_admin_feature_rss_flow_hash_function *p)
 {
 	return p->supported_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
 }
 
 static inline void set_ena_admin_feature_rss_flow_hash_function_funcs(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
 {
 	p->supported_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
 }
 
 static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_selected_func(const struct ena_admin_feature_rss_flow_hash_function *p)
 {
 	return p->selected_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
 }
 
 static inline void set_ena_admin_feature_rss_flow_hash_function_selected_func(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
 {
 	p->selected_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
 }
 
 static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
 {
 	return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
 }
 
 static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
 {
 	p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
 }
 
 static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
 {
 	return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
 }
 
 static inline void set_ena_admin_feature_rss_flow_hash_input_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
 {
 	p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
 }
 
 static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
 {
 	return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
 }
 
 static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
 {
 	p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
 }
 
 static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
 {
 	return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
 }
 
 static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
 {
 	p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
 }
 
 static inline uint32_t get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
 {
 	return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
 }
 
 static inline void set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
 {
 	p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
 }
 
 static inline uint32_t get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
 {
 	return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
 }
 
 static inline void set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
 {
 	p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_MINOR_MASK;
 }
 
 static inline uint32_t get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
 {
 	return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
 }
 
 static inline void set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
 {
 	p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
 }
 
 static inline uint32_t get_ena_admin_host_info_module_type(const struct ena_admin_host_info *p)
 {
 	return (p->driver_version & ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK) >> ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT;
 }
 
 static inline void set_ena_admin_host_info_module_type(struct ena_admin_host_info *p, uint32_t val)
 {
 	p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT) & ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK;
 }
 
 static inline uint16_t get_ena_admin_host_info_function(const struct ena_admin_host_info *p)
 {
 	return p->bdf & ENA_ADMIN_HOST_INFO_FUNCTION_MASK;
 }
 
 static inline void set_ena_admin_host_info_function(struct ena_admin_host_info *p, uint16_t val)
 {
 	p->bdf |= val & ENA_ADMIN_HOST_INFO_FUNCTION_MASK;
 }
 
 static inline uint16_t get_ena_admin_host_info_device(const struct ena_admin_host_info *p)
 {
 	return (p->bdf & ENA_ADMIN_HOST_INFO_DEVICE_MASK) >> ENA_ADMIN_HOST_INFO_DEVICE_SHIFT;
 }
 
 static inline void set_ena_admin_host_info_device(struct ena_admin_host_info *p, uint16_t val)
 {
 	p->bdf |= (val << ENA_ADMIN_HOST_INFO_DEVICE_SHIFT) & ENA_ADMIN_HOST_INFO_DEVICE_MASK;
 }
 
 static inline uint16_t get_ena_admin_host_info_bus(const struct ena_admin_host_info *p)
 {
 	return (p->bdf & ENA_ADMIN_HOST_INFO_BUS_MASK) >> ENA_ADMIN_HOST_INFO_BUS_SHIFT;
 }
 
 static inline void set_ena_admin_host_info_bus(struct ena_admin_host_info *p, uint16_t val)
 {
 	p->bdf |= (val << ENA_ADMIN_HOST_INFO_BUS_SHIFT) & ENA_ADMIN_HOST_INFO_BUS_MASK;
 }
 
 static inline uint32_t get_ena_admin_host_info_rx_offset(const struct ena_admin_host_info *p)
 {
 	return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK) >> ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT;
 }
 
 static inline void set_ena_admin_host_info_rx_offset(struct ena_admin_host_info *p, uint32_t val)
 {
 	p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT) & ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;
 }
 
 static inline uint32_t get_ena_admin_host_info_interrupt_moderation(const struct ena_admin_host_info *p)
 {
 	return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK) >> ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT;
 }
 
 static inline void set_ena_admin_host_info_interrupt_moderation(struct ena_admin_host_info *p, uint32_t val)
 {
 	p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT) & ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
 }
 
 static inline uint32_t get_ena_admin_host_info_rx_buf_mirroring(const struct ena_admin_host_info *p)
 {
 	return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK) >> ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT;
 }
 
 static inline void set_ena_admin_host_info_rx_buf_mirroring(struct ena_admin_host_info *p, uint32_t val)
 {
 	p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT) & ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK;
 }
 
 static inline uint32_t get_ena_admin_host_info_rss_configurable_function_key(const struct ena_admin_host_info *p)
 {
 	return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK) >> ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT;
 }
 
 static inline void set_ena_admin_host_info_rss_configurable_function_key(struct ena_admin_host_info *p, uint32_t val)
 {
 	p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT) & ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
 }
 
 static inline uint32_t get_ena_admin_host_info_rx_page_reuse(const struct ena_admin_host_info *p)
 {
 	return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK) >> ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT;
 }
 
 static inline void set_ena_admin_host_info_rx_page_reuse(struct ena_admin_host_info *p, uint32_t val)
 {
 	p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT) & ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
 }
 
 static inline uint8_t get_ena_admin_feature_rss_ind_table_one_entry_update(const struct ena_admin_feature_rss_ind_table *p)
 {
 	return p->flags & ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK;
 }
 
 static inline void set_ena_admin_feature_rss_ind_table_one_entry_update(struct ena_admin_feature_rss_ind_table *p, uint8_t val)
 {
 	p->flags |= val & ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK;
 }
 
 static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p)
 {
 	return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
 }
 
 static inline void set_ena_admin_aenq_common_desc_phase(struct ena_admin_aenq_common_desc *p, uint8_t val)
 {
 	p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
 }
 
 static inline uint32_t get_ena_admin_aenq_link_change_desc_link_status(const struct ena_admin_aenq_link_change_desc *p)
 {
 	return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
 }
 
 static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_admin_aenq_link_change_desc *p, uint32_t val)
 {
 	p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
 }
 
 #endif /* !defined(DEFS_LINUX_MAINLINE) */
 #endif /* _ENA_ADMIN_H_ */
diff --git a/ena_defs/ena_regs_defs.h b/ena_defs/ena_regs_defs.h
index f42a1074c12e..d66fb43ba2ab 100644
--- a/ena_defs/ena_regs_defs.h
+++ b/ena_defs/ena_regs_defs.h
@@ -1,170 +1,172 @@
 /*-
  * SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  *
  * * Redistributions of source code must retain the above copyright
  * notice, this list of conditions and the following disclaimer.
  * * Redistributions in binary form must reproduce the above copyright
  * notice, this list of conditions and the following disclaimer in
  * the documentation and/or other materials provided with the
  * distribution.
  * * Neither the name of copyright holder nor the names of its
  * contributors may be used to endorse or promote products derived
  * from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef _ENA_REGS_H_
 #define _ENA_REGS_H_
 
 enum ena_regs_reset_reason_types {
 	ENA_REGS_RESET_NORMAL                       = 0,
 	ENA_REGS_RESET_KEEP_ALIVE_TO                = 1,
 	ENA_REGS_RESET_ADMIN_TO                     = 2,
 	ENA_REGS_RESET_MISS_TX_CMPL                 = 3,
 	ENA_REGS_RESET_INV_RX_REQ_ID                = 4,
 	ENA_REGS_RESET_INV_TX_REQ_ID                = 5,
 	ENA_REGS_RESET_TOO_MANY_RX_DESCS            = 6,
 	ENA_REGS_RESET_INIT_ERR                     = 7,
 	ENA_REGS_RESET_DRIVER_INVALID_STATE         = 8,
 	ENA_REGS_RESET_OS_TRIGGER                   = 9,
 	ENA_REGS_RESET_OS_NETDEV_WD                 = 10,
 	ENA_REGS_RESET_SHUTDOWN                     = 11,
 	ENA_REGS_RESET_USER_TRIGGER                 = 12,
 	ENA_REGS_RESET_GENERIC                      = 13,
 	ENA_REGS_RESET_MISS_INTERRUPT               = 14,
 	ENA_REGS_RESET_SUSPECTED_POLL_STARVATION    = 15,
 	ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED	    = 16,
 	ENA_REGS_RESET_LAST,
 };
 
 /* ena_registers offsets */
 
 /* 0 base */
 #define ENA_REGS_VERSION_OFF                                0x0
 #define ENA_REGS_CONTROLLER_VERSION_OFF                     0x4
 #define ENA_REGS_CAPS_OFF                                   0x8
 #define ENA_REGS_CAPS_EXT_OFF                               0xc
 #define ENA_REGS_AQ_BASE_LO_OFF                             0x10
 #define ENA_REGS_AQ_BASE_HI_OFF                             0x14
 #define ENA_REGS_AQ_CAPS_OFF                                0x18
 #define ENA_REGS_ACQ_BASE_LO_OFF                            0x20
 #define ENA_REGS_ACQ_BASE_HI_OFF                            0x24
 #define ENA_REGS_ACQ_CAPS_OFF                               0x28
 #define ENA_REGS_AQ_DB_OFF                                  0x2c
 #define ENA_REGS_ACQ_TAIL_OFF                               0x30
 #define ENA_REGS_AENQ_CAPS_OFF                              0x34
 #define ENA_REGS_AENQ_BASE_LO_OFF                           0x38
 #define ENA_REGS_AENQ_BASE_HI_OFF                           0x3c
 #define ENA_REGS_AENQ_HEAD_DB_OFF                           0x40
 #define ENA_REGS_AENQ_TAIL_OFF                              0x44
 #define ENA_REGS_INTR_MASK_OFF                              0x4c
 #define ENA_REGS_DEV_CTL_OFF                                0x54
 #define ENA_REGS_DEV_STS_OFF                                0x58
 #define ENA_REGS_MMIO_REG_READ_OFF                          0x5c
 #define ENA_REGS_MMIO_RESP_LO_OFF                           0x60
 #define ENA_REGS_MMIO_RESP_HI_OFF                           0x64
 #define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF                   0x68
 
 /* phc_registers offsets */
 
 /* 100 base */
 #define ENA_REGS_PHC_DB_OFF                                 0x100
 
 /* version register */
 #define ENA_REGS_VERSION_MINOR_VERSION_MASK                 0xff
 #define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT                8
 #define ENA_REGS_VERSION_MAJOR_VERSION_MASK                 0xff00
 
 /* controller_version register */
 #define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK   0xff
 #define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT     8
 #define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK      0xff00
 #define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT     16
 #define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK      0xff0000
 #define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT           24
 #define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK            0xff000000
 
 /* caps register */
 #define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK        0x1
 #define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT                   1
 #define ENA_REGS_CAPS_RESET_TIMEOUT_MASK                    0x3e
 #define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT                  8
 #define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK                   0xff00
 #define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT                    16
 #define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK                     0xf0000
 
 /* aq_caps register */
 #define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK                      0xffff
 #define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT                16
 #define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK                 0xffff0000
 
 /* acq_caps register */
 #define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK                    0xffff
 #define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT              16
 #define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK               0xffff0000
 
 /* aenq_caps register */
 #define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK                  0xffff
 #define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT            16
 #define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK             0xffff0000
 
 /* dev_ctl register */
 #define ENA_REGS_DEV_CTL_DEV_RESET_MASK                     0x1
 #define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT                   1
 #define ENA_REGS_DEV_CTL_AQ_RESTART_MASK                    0x2
 #define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT                    2
 #define ENA_REGS_DEV_CTL_QUIESCENT_MASK                     0x4
 #define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT                    3
 #define ENA_REGS_DEV_CTL_IO_RESUME_MASK                     0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_EXT_SHIFT             24
+#define ENA_REGS_DEV_CTL_RESET_REASON_EXT_MASK              0xf000000
 #define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT                 28
 #define ENA_REGS_DEV_CTL_RESET_REASON_MASK                  0xf0000000
 
 /* dev_sts register */
 #define ENA_REGS_DEV_STS_READY_MASK                         0x1
 #define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT       1
 #define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK        0x2
 #define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT          2
 #define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK           0x4
 #define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT            3
 #define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK             0x8
 #define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT               4
 #define ENA_REGS_DEV_STS_RESET_FINISHED_MASK                0x10
 #define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT                  5
 #define ENA_REGS_DEV_STS_FATAL_ERROR_MASK                   0x20
 #define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT  6
 #define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK   0x40
 #define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT     7
 #define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK      0x80
 
 /* mmio_reg_read register */
 #define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK                  0xffff
 #define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT                16
 #define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK                 0xffff0000
 
 /* rss_ind_entry_update register */
 #define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK            0xffff
 #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT          16
 #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK           0xffff0000
 
 /* phc_db_req_id register */
 #define ENA_REGS_PHC_DB_REQ_ID_MASK                         0xffff
 
 #endif /* _ENA_REGS_H_ */
diff --git a/ena_plat.h b/ena_plat.h
index b5dc4e91a73c..c21331bfc10f 100644
--- a/ena_plat.h
+++ b/ena_plat.h
@@ -1,476 +1,478 @@
 /*-
  * SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  *
  * * Redistributions of source code must retain the above copyright
  * notice, this list of conditions and the following disclaimer.
  * * Redistributions in binary form must reproduce the above copyright
  * notice, this list of conditions and the following disclaimer in
  * the documentation and/or other materials provided with the
  * distribution.
  * * Neither the name of copyright holder nor the names of its
  * contributors may be used to endorse or promote products derived
  * from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef ENA_PLAT_H_
 #define ENA_PLAT_H_
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/systm.h>
 
 #include <sys/bus.h>
 #include <sys/condvar.h>
 #include <sys/domainset.h>
 #include <sys/endian.h>
 #include <sys/kernel.h>
 #include <sys/kthread.h>
 #include <sys/malloc.h>
 #include <sys/mbuf.h>
 #include <sys/module.h>
 #include <sys/rman.h>
 #include <sys/proc.h>
 #include <sys/smp.h>
 #include <sys/socket.h>
 #include <sys/sockio.h>
 #include <sys/sysctl.h>
 #include <sys/taskqueue.h>
 #include <sys/eventhandler.h>
 #include <sys/types.h>
 #include <sys/timetc.h>
 #include <sys/cdefs.h>
 
 #include <machine/atomic.h>
 #include <machine/bus.h>
 #include <machine/in_cksum.h>
 #include <machine/pcpu.h>
 #include <machine/resource.h>
 #include <machine/_inttypes.h>
 
 #include <net/bpf.h>
 #include <net/ethernet.h>
 #include <net/if.h>
 #include <net/if_var.h>
 #include <net/if_arp.h>
 #include <net/if_dl.h>
 #include <net/if_media.h>
 
 #include <net/if_types.h>
 #include <net/if_vlan_var.h>
 
 #include <netinet/in_systm.h>
 #include <netinet/in.h>
 #include <netinet/if_ether.h>
 #include <netinet/ip.h>
 #include <netinet/ip6.h>
 #include <netinet/tcp.h>
 #include <netinet/tcp_lro.h>
 #include <netinet/udp.h>
 
 #include <dev/led/led.h>
 #include <dev/pci/pcivar.h>
 #include <dev/pci/pcireg.h>
 
 enum ena_log_t {
 	ENA_ERR = 0,
 	ENA_WARN,
 	ENA_INFO,
 	ENA_DBG,
 };
 
 extern int ena_log_level;
 
 #define ena_log(dev, level, fmt, args...)			\
 	do {							\
 		if (ENA_ ## level <= ena_log_level)		\
 			device_printf((dev), fmt, ##args);	\
 	} while (0)
 
 #define ena_log_raw(level, fmt, args...)			\
 	do {							\
 		if (ENA_ ## level <= ena_log_level)		\
 			printf(fmt, ##args);			\
 	} while (0)
 
 #define ena_log_unused(dev, level, fmt, args...)		\
 	do {							\
 		(void)(dev);					\
 	} while (0)
 
 #ifdef ENA_LOG_IO_ENABLE
 #define ena_log_io(dev, level, fmt, args...)			\
 	ena_log((dev), level, fmt, ##args)
 #else
 #define ena_log_io(dev, level, fmt, args...)			\
 	ena_log_unused((dev), level, fmt, ##args)
 #endif
 
 #define ena_log_nm(dev, level, fmt, args...)			\
 	ena_log((dev), level, "[nm] " fmt, ##args)
 
 extern struct ena_bus_space ebs;
 
 #define DEFAULT_ALLOC_ALIGNMENT	8
 #define ENA_CDESC_RING_SIZE_ALIGNMENT  (1 << 12) /* 4K */
 
 #define container_of(ptr, type, member)					\
 	({								\
 		const __typeof(((type *)0)->member) *__p = (ptr);	\
 		(type *)((uintptr_t)__p - offsetof(type, member));	\
 	})
 
 #define ena_trace(ctx, level, fmt, args...)			\
 	ena_log((ctx)->dmadev, level, "%s() [TID:%d]: "		\
 	    fmt, __func__, curthread->td_tid, ##args)
 
 #define ena_trc_dbg(ctx, format, arg...)	\
 	ena_trace(ctx, DBG, format, ##arg)
 #define ena_trc_info(ctx, format, arg...)	\
 	ena_trace(ctx, INFO, format, ##arg)
 #define ena_trc_warn(ctx, format, arg...)	\
 	ena_trace(ctx, WARN, format, ##arg)
 #define ena_trc_err(ctx, format, arg...)	\
 	ena_trace(ctx, ERR, format, ##arg)
 
 #define unlikely(x)	__predict_false(!!(x))
 #define likely(x)  	__predict_true(!!(x))
 
 #define __iomem
 #define ____cacheline_aligned __aligned(CACHE_LINE_SIZE)
 
 #define MAX_ERRNO 4095
 #define IS_ERR_VALUE(x) unlikely((x) <= (unsigned long)MAX_ERRNO)
 
 #define ENA_WARN(cond, ctx, format, arg...)				\
 	do {								\
 		if (unlikely((cond))) {					\
 			ena_trc_warn(ctx, format, ##arg);		\
 		}							\
 	} while (0)
 
 static inline long IS_ERR(const void *ptr)
 {
 	return IS_ERR_VALUE((unsigned long)ptr);
 }
 
 static inline void *ERR_PTR(long error)
 {
 	return (void *)error;
 }
 
 static inline long PTR_ERR(const void *ptr)
 {
 	return (long) ptr;
 }
 
 #define GENMASK(h, l)	(((~0U) - (1U << (l)) + 1) & (~0U >> (32 - 1 - (h))))
 #define GENMASK_ULL(h, l)	(((~0ULL) << (l)) & (~0ULL >> (64 - 1 - (h))))
 #define BIT(x)			(1UL << (x))
 #define BIT64(x)		BIT(x)
 #define ENA_ABORT() 		BUG()
 #define BUG() 			panic("ENA BUG")
 
 #define SZ_256			(256)
 #define SZ_4K			(4096)
 
 #define	ENA_COM_OK		0
 #define ENA_COM_FAULT		EFAULT
 #define	ENA_COM_INVAL		EINVAL
 #define ENA_COM_NO_MEM		ENOMEM
 #define	ENA_COM_NO_SPACE	ENOSPC
 #define ENA_COM_TRY_AGAIN	-1
 #define	ENA_COM_UNSUPPORTED	EOPNOTSUPP
 #define	ENA_COM_NO_DEVICE	ENODEV
 #define	ENA_COM_PERMISSION	EPERM
 #define ENA_COM_TIMER_EXPIRED	ETIMEDOUT
 #define ENA_COM_EIO		EIO
 #define ENA_COM_DEVICE_BUSY	EBUSY
 
 #define ENA_NODE_ANY		(-1)
 
 #define ENA_MSLEEP(x) 		pause_sbt("ena", SBT_1MS * (x), SBT_1MS, 0)
 #define ENA_USLEEP(x) 		pause_sbt("ena", SBT_1US * (x), SBT_1US, 0)
 #define ENA_UDELAY(x) 		DELAY(x)
 #define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
     ((long)cputick2usec(cpu_ticks()) + (timeout_us))
 #define ENA_TIME_EXPIRE(timeout)  ((timeout) < cputick2usec(cpu_ticks()))
 #define ENA_TIME_EXPIRE_HIGH_RES ENA_TIME_EXPIRE
 #define ENA_TIME_INIT_HIGH_RES() (0)
 #define ENA_TIME_COMPARE_HIGH_RES(time1, time2)			\
 	((time1 < time2) ? -1 : ((time1 > time2) ? 1 : 0))
 #define ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(current_time, timeout_us)	\
     ((long)cputick2usec(cpu_ticks()) + (timeout_us))
 #define ENA_GET_SYSTEM_TIME_HIGH_RES() ENA_GET_SYSTEM_TIMEOUT(0)
 #define ENA_MIGHT_SLEEP()
 
 #define min_t(type, _x, _y) ((type)(_x) < (type)(_y) ? (type)(_x) : (type)(_y))
 #define max_t(type, _x, _y) ((type)(_x) > (type)(_y) ? (type)(_x) : (type)(_y))
 
 #define ENA_MIN32(x,y) 	MIN(x, y)
 #define ENA_MIN16(x,y)	MIN(x, y)
 #define ENA_MIN8(x,y)	MIN(x, y)
 
 #define ENA_MAX32(x,y) 	MAX(x, y)
 #define ENA_MAX16(x,y) 	MAX(x, y)
 #define ENA_MAX8(x,y) 	MAX(x, y)
 
 /* Spinlock related methods */
 #define ena_spinlock_t 	struct mtx
 #define ENA_SPINLOCK_INIT(spinlock)				\
 	mtx_init(&(spinlock), "ena_spin", NULL, MTX_SPIN)
 #define ENA_SPINLOCK_DESTROY(spinlock)				\
 	do {							\
 		if (mtx_initialized(&(spinlock)))		\
 		    mtx_destroy(&(spinlock));			\
 	} while (0)
 #define ENA_SPINLOCK_LOCK(spinlock, flags)			\
 	do {							\
 		(void)(flags);					\
 		mtx_lock_spin(&(spinlock));			\
 	} while (0)
 #define ENA_SPINLOCK_UNLOCK(spinlock, flags)			\
 	do {							\
 		(void)(flags);					\
 		mtx_unlock_spin(&(spinlock));			\
 	} while (0)
 
 
 /* Wait queue related methods */
 #define ena_wait_event_t struct { struct cv wq; struct mtx mtx; }
 #define ENA_WAIT_EVENT_INIT(waitqueue)					\
 	do {								\
 		cv_init(&((waitqueue).wq), "cv");			\
 		mtx_init(&((waitqueue).mtx), "wq", NULL, MTX_DEF);	\
 	} while (0)
 #define ENA_WAIT_EVENTS_DESTROY(admin_queue)				\
 	do {								\
 		struct ena_comp_ctx *comp_ctx;				\
 		int i;							\
 		for (i = 0; i < admin_queue->q_depth; i++) {		\
 			comp_ctx = get_comp_ctxt(admin_queue, i, false); \
 			if (comp_ctx != NULL) {				\
 				cv_destroy(&((comp_ctx->wait_event).wq)); \
 				mtx_destroy(&((comp_ctx->wait_event).mtx)); \
 			}						\
 		}							\
 	} while (0)
 #define ENA_WAIT_EVENT_CLEAR(waitqueue)					\
 	cv_init(&((waitqueue).wq), (waitqueue).wq.cv_description)
 #define ENA_WAIT_EVENT_WAIT(waitqueue, timeout_us)			\
 	do {								\
 		mtx_lock(&((waitqueue).mtx));				\
 		cv_timedwait(&((waitqueue).wq), &((waitqueue).mtx),	\
 		    timeout_us * hz / 1000 / 1000 );			\
 		mtx_unlock(&((waitqueue).mtx));				\
 	} while (0)
 #define ENA_WAIT_EVENT_SIGNAL(waitqueue)		\
 	do {						\
 		mtx_lock(&((waitqueue).mtx));		\
 		cv_broadcast(&((waitqueue).wq));	\
 		mtx_unlock(&((waitqueue).mtx));		\
 	} while (0)
 
 #define dma_addr_t 	bus_addr_t
 #define u8 		uint8_t
 #define u16 		uint16_t
 #define u32 		uint32_t
 #define u64 		uint64_t
 
 typedef struct {
 	bus_addr_t              paddr;
 	caddr_t                 vaddr;
         bus_dma_tag_t           tag;
 	bus_dmamap_t            map;
         bus_dma_segment_t       seg;
 	int                     nseg;
 } ena_mem_handle_t;
 
 struct ena_bus {
 	bus_space_handle_t 	reg_bar_h;
 	bus_space_tag_t 	reg_bar_t;
 	bus_space_handle_t	mem_bar_h;
 	bus_space_tag_t 	mem_bar_t;
 };
 
 typedef uint32_t ena_atomic32_t;
 
 #define ENA_PRIu64 PRIu64
 
 typedef uint64_t ena_time_t;
 typedef uint64_t ena_time_high_res_t;
 typedef struct ifnet ena_netdev;
 
 void	ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg,
     int error);
 int	ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
     int mapflags, bus_size_t alignment, int domain);
 
 static inline uint32_t
 ena_reg_read32(struct ena_bus *bus, bus_size_t offset)
 {
 	uint32_t v = bus_space_read_4(bus->reg_bar_t, bus->reg_bar_h, offset);
 	rmb();
 	return v;
 }
 
 #define ENA_MEMCPY_TO_DEVICE_64(dst, src, size)				\
 	do {								\
 		int count, i;						\
 		volatile uint64_t *to = (volatile uint64_t *)(dst);	\
 		const uint64_t *from = (const uint64_t *)(src);		\
 		count = (size) / 8;					\
 									\
 		for (i = 0; i < count; i++, from++, to++)		\
 			*to = *from;					\
 	} while (0)
 
 #define ENA_MEM_ALLOC(dmadev, size) malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO)
 
 #define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node)		\
 	do {								\
 		(virt) = malloc_domainset((size), M_DEVBUF,		\
 		    (node) < 0 ? DOMAINSET_RR() : DOMAINSET_PREF(node),	\
 		    M_NOWAIT | M_ZERO);					\
 		(void)(dev_node);					\
 	} while (0)
 
 #define ENA_MEM_FREE(dmadev, ptr, size)					\
 	do { 								\
 		(void)(size);						\
 		free(ptr, M_DEVBUF);					\
 	} while (0)
 #define ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(dmadev, size, virt, phys,	\
     dma, node, dev_node, alignment) 					\
 	do {								\
 		ena_dma_alloc((dmadev), (size), &(dma), 0, (alignment),	\
 		    (node));						\
 		(virt) = (void *)(dma).vaddr;				\
 		(phys) = (dma).paddr;					\
 		(void)(dev_node);					\
 	} while (0)
 
 #define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, handle,	\
     node, dev_node)							\
 	ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(dmadev, size, virt,		\
 	    phys, handle, node, dev_node, DEFAULT_ALLOC_ALIGNMENT)
 
 #define ENA_MEM_ALLOC_COHERENT_ALIGNED(dmadev, size, virt, phys, dma,	\
     alignment)								\
 	do {								\
 		ena_dma_alloc((dmadev), (size), &(dma), 0, (alignment),	\
 		    ENA_NODE_ANY);					\
 		(virt) = (void *)(dma).vaddr;				\
 		(phys) = (dma).paddr;					\
 	} while (0)
 
 #define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, dma)		\
 	ENA_MEM_ALLOC_COHERENT_ALIGNED(dmadev, size, virt,		\
 	    phys, dma, DEFAULT_ALLOC_ALIGNMENT)
 
 #define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, dma)		\
 	do {								\
 		(void)size;						\
 		bus_dmamap_unload((dma).tag, (dma).map);		\
 		bus_dmamem_free((dma).tag, (virt), (dma).map);		\
 		bus_dma_tag_destroy((dma).tag);				\
 		(dma).tag = NULL;					\
 		(virt) = NULL;						\
 	} while (0)
 
 /* Register R/W methods */
 #define ENA_REG_WRITE32(bus, value, offset)				\
 	do {								\
 		wmb();							\
 		ENA_REG_WRITE32_RELAXED(bus, value, offset);		\
 	} while (0)
 
 #define ENA_REG_WRITE32_RELAXED(bus, value, offset)			\
 	bus_space_write_4(						\
 			  ((struct ena_bus*)bus)->reg_bar_t,		\
 			  ((struct ena_bus*)bus)->reg_bar_h,		\
 			  (bus_size_t)(offset), (value))
 
 #define ENA_REG_READ32(bus, offset)					\
 	ena_reg_read32((struct ena_bus*)(bus), (bus_size_t)(offset))
 
 #define ENA_DB_SYNC_WRITE(mem_handle) bus_dmamap_sync(			\
 	(mem_handle)->tag, (mem_handle)->map, BUS_DMASYNC_PREWRITE)
 #define ENA_DB_SYNC_PREREAD(mem_handle) bus_dmamap_sync(		\
 	(mem_handle)->tag, (mem_handle)->map, BUS_DMASYNC_PREREAD)
 #define ENA_DB_SYNC_POSTREAD(mem_handle) bus_dmamap_sync(		\
 	(mem_handle)->tag, (mem_handle)->map, BUS_DMASYNC_POSTREAD)
 #define ENA_DB_SYNC(mem_handle) ENA_DB_SYNC_WRITE(mem_handle)
 
 #define time_after(a,b)	((long)((unsigned long)(b) - (unsigned long)(a)) < 0)
 
 #define VLAN_HLEN 	sizeof(struct ether_vlan_header)
 
 #define prefetch(x)	(void)(x)
 #define prefetchw(x)	(void)(x)
 
 /* DMA buffers access */
 #define	dma_unmap_addr(p, name)			((p)->dma->name)
 #define	dma_unmap_addr_set(p, name, v)		(((p)->dma->name) = (v))
 #define	dma_unmap_len(p, name)			((p)->name)
 #define	dma_unmap_len_set(p, name, v)		(((p)->name) = (v))
 
 #define memcpy_toio memcpy
 
 #define ATOMIC32_INC(I32_PTR)		atomic_add_int(I32_PTR, 1)
 #define ATOMIC32_DEC(I32_PTR) 		atomic_add_int(I32_PTR, -1)
 #define ATOMIC32_READ(I32_PTR) 		atomic_load_acq_int(I32_PTR)
 #define ATOMIC32_SET(I32_PTR, VAL) 	atomic_store_rel_int(I32_PTR, VAL)
 
 #define	barrier() __asm__ __volatile__("": : :"memory")
 #define dma_rmb() barrier()
 #define mmiowb() barrier()
 
 #define	ACCESS_ONCE(x) (*(volatile __typeof(x) *)&(x))
 #define READ_ONCE(x)  ({			\
 			__typeof(x) __var;	\
 			barrier();		\
 			__var = ACCESS_ONCE(x);	\
 			barrier();		\
 			__var;			\
 		})
 #define READ_ONCE8(x) READ_ONCE(x)
 #define READ_ONCE16(x) READ_ONCE(x)
 #define READ_ONCE32(x) READ_ONCE(x)
 
 #define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
 #define lower_32_bits(n) ((uint32_t)(n))
 
 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
 
 #define ENA_FFS(x) ffs(x)
 
 void	ena_rss_key_fill(void *key, size_t size);
 
 #define ENA_RSS_FILL_KEY(key, size) ena_rss_key_fill(key, size)
 
+#define ENA_FIELD_GET(value, mask, offset) ((value & mask) >> offset)
+
 #include "ena_defs/ena_includes.h"
 
 #define ENA_BITS_PER_U64(bitmap) (bitcount64(bitmap))
 
 #endif /* ENA_PLAT_H_ */