diff --git a/share/man/man4/ena.4 b/share/man/man4/ena.4 --- a/share/man/man4/ena.4 +++ b/share/man/man4/ena.4 @@ -133,17 +133,17 @@ timeouts. If network performance is critical and memory capacity is sufficient, the 9k mbufs can be used. -.It Va hw.ena.force_large_llq_headers -Force the driver to use large LLQ headers (224 bytes). -The default is 0. +.It Va hw.ena.force_large_llq_header +Force the driver to use large (224 bytes) or regular (96 bytes) LLQ header size. +The default value is 2 and the recommended LLQ header size will be used. If the node value is set to 0, the regular size LLQ header will be used, which is 96B. In some cases, the packet header can be bigger than this (for example - IPv6 with multiple extensions). -In such a situation, the large LLQ headers should be used by setting this node -value to 1. -This will take effect only if the device supports both LLQ and large LLQ -headers. +In such a situation, the large LLQ header size which is 224B should be used, +and can be forced by setting this node value to 1. +Using large LLQ header size will take effect only if the device supports +both LLQ and large LLQ headers. Otherwise, it will fallback to the no LLQ mode or regular header size. .Pp Increasing LLQ header size reduces the size of the Tx queue by half, so it may diff --git a/sys/dev/ena/ena.h b/sys/dev/ena/ena.h --- a/sys/dev/ena/ena.h +++ b/sys/dev/ena/ena.h @@ -171,6 +171,15 @@ ENA_FLAGS_NUMBER = ENA_FLAG_RSS_ACTIVE }; +enum ena_llq_header_size_policy_t { + /* Policy for Regular LLQ entry size (128B) */ + ENA_LLQ_HEADER_SIZE_POLICY_REGULAR, + /* Policy for Large LLQ entry size (256B) */ + ENA_LLQ_HEADER_SIZE_POLICY_LARGE, + /* Policy for device recommended LLQ entry size */ + ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT +}; + BITSET_DEFINE(_ena_state, ENA_FLAGS_NUMBER); typedef struct _ena_state ena_state_t; @@ -457,6 +466,8 @@ uint8_t mac_addr[ETHER_ADDR_LEN]; /* mdio and phy*/ + uint8_t llq_policy; + ena_state_t flags; /* IRQ CPU affinity */ diff --git a/sys/dev/ena/ena.c b/sys/dev/ena/ena.c --- a/sys/dev/ena/ena.c +++ b/sys/dev/ena/ena.c @@ -156,7 +156,7 @@ static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *); static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *, struct ena_com_dev_get_features_ctx *); -static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *); +static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *, struct ena_adapter *); static void ena_config_host_info(struct ena_com_dev *, device_t); static int ena_attach(device_t); static int ena_detach(device_t); @@ -2756,27 +2756,32 @@ } static inline void -set_default_llq_configurations(struct ena_llq_configurations *llq_config, - struct ena_admin_feature_llq_desc *llq) +ena_set_llq_configurations(struct ena_llq_configurations *llq_config, + struct ena_admin_feature_llq_desc *llq, struct ena_adapter *adapter) { llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; - if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) != - 0 && ena_force_large_llq_header) { - llq_config->llq_ring_entry_size = - ENA_ADMIN_LIST_ENTRY_SIZE_256B; - llq_config->llq_ring_entry_size_value = 256; + if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0) { + if ((ena_force_large_llq_header == ENA_LLQ_HEADER_SIZE_POLICY_LARGE) || + (ena_force_large_llq_header == ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT && + llq->entry_size_recommended == ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { + llq_config->llq_ring_entry_size = + ENA_ADMIN_LIST_ENTRY_SIZE_256B; + llq_config->llq_ring_entry_size_value = 256; + adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_256B; + } } else { llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; llq_config->llq_ring_entry_size_value = 128; + adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_128B; } } static int -ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) +ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, struct ena_adapter *adapter) { struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; struct ena_com_dev *ena_dev = ctx->ena_dev; @@ -2831,22 +2836,20 @@ max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1); /* - * When forcing large headers, we multiply the entry size by 2, + * When using large headers, we multiply the entry size by 2, * and therefore divide the queue size by 2, leaving the amount * of memory used by the queues unchanged. */ - if (ena_force_large_llq_header) { - if ((llq->entry_size_ctrl_supported & - ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 && - ena_dev->tx_mem_queue_type == + if (adapter->llq_policy == ENA_ADMIN_LIST_ENTRY_SIZE_256B) { + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { max_tx_queue_size /= 2; ena_log(ctx->pdev, INFO, - "Forcing large headers and decreasing maximum Tx queue size to %d\n", + "Using large headers and decreasing maximum Tx queue size to %d\n", max_tx_queue_size); } else { ena_log(ctx->pdev, WARN, - "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); + "Using large headers failed: LLQ is disabled or device does not support large headers\n"); } } @@ -3002,7 +3005,7 @@ *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); - set_default_llq_configurations(&llq_config, &get_feat_ctx->llq); + ena_set_llq_configurations(&llq_config, &get_feat_ctx->llq, adapter); rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, &llq_config); @@ -3859,7 +3862,7 @@ /* Calculate initial and maximum IO queue number and size */ max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx); - rc = ena_calc_io_queue_size(&calc_queue_ctx); + rc = ena_calc_io_queue_size(&calc_queue_ctx, adapter); if (unlikely((rc != 0) || (max_num_io_queues <= 0))) { rc = EFAULT; goto err_com_free; diff --git a/sys/dev/ena/ena_sysctl.h b/sys/dev/ena/ena_sysctl.h --- a/sys/dev/ena/ena_sysctl.h +++ b/sys/dev/ena/ena_sysctl.h @@ -46,6 +46,6 @@ #define ena_mbuf_sz (ena_enable_9k_mbufs ? MJUM9BYTES : MJUMPAGESIZE) /* Force the driver to use large LLQ (Low Latency Queue) headers. */ -extern bool ena_force_large_llq_header; +extern int ena_force_large_llq_header; #endif /* !(ENA_SYSCTL_H) */ diff --git a/sys/dev/ena/ena_sysctl.c b/sys/dev/ena/ena_sysctl.c --- a/sys/dev/ena/ena_sysctl.c +++ b/sys/dev/ena/ena_sysctl.c @@ -148,17 +148,17 @@ &ena_enable_9k_mbufs, 0, "Use 9 kB mbufs for Rx descriptors"); /* - * Force the driver to use large LLQ (Low Latency Queue) header. Defaults to - * false. This option may be important for platforms, which often handle packet - * headers on Tx with total header size greater than 96B, as it may - * reduce the latency. + * Force the driver to use large or regular LLQ (Low Latency Queue) header size. + * Defaults to ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT. This option may be + * important for platforms, which often handle packet headers on Tx with total + * header size greater than 96B, as it may reduce the latency. * It also reduces the maximum Tx queue size by half, so it may cause more Tx * packet drops. */ -bool ena_force_large_llq_header = false; -SYSCTL_BOOL(_hw_ena, OID_AUTO, force_large_llq_header, CTLFLAG_RDTUN, +int ena_force_large_llq_header = ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT; +SYSCTL_INT(_hw_ena, OID_AUTO, force_large_llq_header, CTLFLAG_RDTUN, &ena_force_large_llq_header, 0, - "Increases maximum supported header size in LLQ mode to 224 bytes, while reducing the maximum Tx queue size by half.\n"); + "Change default LLQ entry size received from the device\n"); int ena_rss_table_size = ENA_RX_RSS_TABLE_SIZE;