diff --git a/sys/dev/ice/ice_ddp_common.c b/sys/dev/ice/ice_ddp_common.c index 87ecdad5e7bf..dfc50cc1f966 100644 --- a/sys/dev/ice/ice_ddp_common.c +++ b/sys/dev/ice/ice_ddp_common.c @@ -1,2528 +1,2551 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2024, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "ice_ddp_common.h" #include "ice_type.h" #include "ice_common.h" #include "ice_sched.h" /** * ice_aq_download_pkg * @hw: pointer to the hardware structure * @pkg_buf: the package buffer to transfer * @buf_size: the size of the package buffer * @last_buf: last buffer indicator * @error_offset: returns error offset * @error_info: returns error information * @cd: pointer to command details structure or NULL * * Download Package (0x0C40) */ static int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, bool last_buf, u32 *error_offset, u32 *error_info, struct ice_sq_cd *cd) { struct ice_aqc_download_pkg *cmd; struct ice_aq_desc desc; int status; if (error_offset) *error_offset = 0; if (error_info) *error_info = 0; cmd = &desc.params.download_pkg; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); if (last_buf) cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); if (status == ICE_ERR_AQ_ERROR) { /* Read error from buffer only when the FW returned an error */ struct ice_aqc_download_pkg_resp *resp; resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; if (error_offset) *error_offset = LE32_TO_CPU(resp->error_offset); if (error_info) *error_info = LE32_TO_CPU(resp->error_info); } return status; } /** * ice_aq_upload_section * @hw: pointer to the hardware structure * @pkg_buf: the package buffer which will receive the section * @buf_size: the size of the package buffer * @cd: pointer to command details structure or NULL * * Upload Section (0x0C41) */ int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); } /** * ice_aq_update_pkg * @hw: pointer to the hardware structure * @pkg_buf: the package cmd buffer * @buf_size: the size of the package cmd buffer * @last_buf: last buffer indicator * @error_offset: returns error offset * @error_info: returns error information * @cd: pointer to command details structure or NULL * * Update Package (0x0C42) */ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, bool last_buf, u32 *error_offset, u32 *error_info, struct ice_sq_cd *cd) { struct ice_aqc_download_pkg *cmd; struct ice_aq_desc desc; int status; if (error_offset) *error_offset = 0; if (error_info) *error_info = 0; cmd = &desc.params.download_pkg; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); if (last_buf) cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); if (status == ICE_ERR_AQ_ERROR) { /* Read error from buffer only when the FW returned an error */ struct ice_aqc_download_pkg_resp *resp; resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; if (error_offset) *error_offset = LE32_TO_CPU(resp->error_offset); if (error_info) *error_info = LE32_TO_CPU(resp->error_info); } return status; } /** * ice_find_seg_in_pkg * @hw: pointer to the hardware structure * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) * @pkg_hdr: pointer to the package header to be searched * * This function searches a package file for a particular segment type. On * success it returns a pointer to the segment header, otherwise it will * return NULL. */ struct ice_generic_seg_hdr * ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, struct ice_pkg_hdr *pkg_hdr) { u32 i; ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, pkg_hdr->pkg_format_ver.update, pkg_hdr->pkg_format_ver.draft); /* Search all package segments for the requested segment type */ for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { struct ice_generic_seg_hdr *seg; seg = (struct ice_generic_seg_hdr *) ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i])); if (LE32_TO_CPU(seg->seg_type) == seg_type) return seg; } return NULL; } /** * ice_get_pkg_seg_by_idx * @pkg_hdr: pointer to the package header to be searched * @idx: index of segment */ static struct ice_generic_seg_hdr * ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) { struct ice_generic_seg_hdr *seg = NULL; if (idx < LE32_TO_CPU(pkg_hdr->seg_count)) seg = (struct ice_generic_seg_hdr *) ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[idx])); return seg; } /** * ice_is_signing_seg_at_idx - determine if segment is a signing segment * @pkg_hdr: pointer to package header * @idx: segment index */ static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) { struct ice_generic_seg_hdr *seg; bool retval = false; seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx); if (seg) retval = LE32_TO_CPU(seg->seg_type) == SEGMENT_TYPE_SIGNING; return retval; } /** * ice_is_signing_seg_type_at_idx * @pkg_hdr: pointer to package header * @idx: segment index * @seg_id: segment id that is expected * @sign_type: signing type * * Determine if a segment is a signing segment of the correct type */ static bool ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, u32 seg_id, u32 sign_type) { bool result = false; if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) { struct ice_sign_seg *seg; seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); if (seg && LE32_TO_CPU(seg->seg_id) == seg_id && LE32_TO_CPU(seg->sign_type) == sign_type) result = true; } return result; } /** * ice_update_pkg_no_lock * @hw: pointer to the hardware structure * @bufs: pointer to an array of buffers * @count: the number of buffers in the array */ int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { int status = 0; u32 i; for (i = 0; i < count; i++) { struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); bool last = ((i + 1) == count); u32 offset, info; status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), last, &offset, &info, NULL); if (status) { ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", status, offset, info); break; } } return status; } /** * ice_update_pkg * @hw: pointer to the hardware structure * @bufs: pointer to an array of buffers * @count: the number of buffers in the array * * Obtains change lock and updates package. */ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { int status; status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) return status; status = ice_update_pkg_no_lock(hw, bufs, count); ice_release_change_lock(hw); return status; } static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) { switch (aq_err) { case ICE_AQ_RC_ENOSEC: return ICE_DDP_PKG_NO_SEC_MANIFEST; case ICE_AQ_RC_EBADSIG: return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; case ICE_AQ_RC_ESVN: return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW; case ICE_AQ_RC_EBADMAN: return ICE_DDP_PKG_MANIFEST_INVALID; case ICE_AQ_RC_EBADBUF: return ICE_DDP_PKG_BUFFER_INVALID; default: return ICE_DDP_PKG_ERR; } } /** * ice_is_buffer_metadata - determine if package buffer is a metadata buffer * @buf: pointer to buffer header */ static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) { bool metadata = false; if (LE32_TO_CPU(buf->section_entry[0].type) & ICE_METADATA_BUF) metadata = true; return metadata; } /** * ice_is_last_download_buffer * @buf: pointer to current buffer header * @idx: index of the buffer in the current sequence * @count: the buffer count in the current sequence * * Note: this routine should only be called if the buffer is not the last buffer */ static bool ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count) { bool last = ((idx + 1) == count); /* A set metadata flag in the next buffer will signal that the current * buffer will be the last buffer downloaded */ if (!last) { struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1; last = ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf); } return last; } /** * ice_dwnld_cfg_bufs_no_lock * @hw: pointer to the hardware structure * @bufs: pointer to an array of buffers * @start: buffer index of first buffer to download * @count: the number of buffers to download * @indicate_last: if true, then set last buffer flag on last buffer download * * Downloads package configuration buffers to the firmware. Metadata buffers * are skipped, and the first metadata buffer found indicates that the rest * of the buffers are all metadata buffers. */ static enum ice_ddp_state ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, u32 count, bool indicate_last) { enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_buf_hdr *bh; enum ice_aq_err err; u32 offset, info, i; if (!bufs || !count) return ICE_DDP_PKG_ERR; /* If the first buffer's first section has its metadata bit set * then there are no buffers to be downloaded, and the operation is * considered a success. */ bh = (struct ice_buf_hdr *)(bufs + start); if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) return ICE_DDP_PKG_SUCCESS; for (i = 0; i < count; i++) { bool last = false; int status; bh = (struct ice_buf_hdr *)(bufs + start + i); if (indicate_last) last = ice_is_last_download_buffer(bh, i, count); status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, &offset, &info, NULL); /* Save AQ status from download package */ if (status) { ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", status, offset, info); err = hw->adminq.sq_last_status; state = ice_map_aq_err_to_ddp_state(err); break; } if (last) break; } return state; } /** * ice_aq_get_pkg_info_list * @hw: pointer to the hardware structure * @pkg_info: the buffer which will receive the information list * @buf_size: the size of the pkg_info information buffer * @cd: pointer to command details structure or NULL * * Get Package Info List (0x0C43) */ static int ice_aq_get_pkg_info_list(struct ice_hw *hw, struct ice_aqc_get_pkg_info_resp *pkg_info, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); } /** * ice_get_pkg_segment_id - get correct package segment id, based on device * @mac_type: MAC type of the device */ static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type) { u32 seg_id; switch (mac_type) { case ICE_MAC_E830: seg_id = SEGMENT_TYPE_ICE_E830; break; case ICE_MAC_GENERIC: case ICE_MAC_GENERIC_3K: case ICE_MAC_GENERIC_3K_E825: default: seg_id = SEGMENT_TYPE_ICE_E810; break; } return seg_id; } /** * ice_get_pkg_sign_type - get package segment sign type, based on device * @mac_type: MAC type of the device */ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type) { u32 sign_type; switch (mac_type) { case ICE_MAC_E830: sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB; break; case ICE_MAC_GENERIC_3K: sign_type = SEGMENT_SIGN_TYPE_RSA3K; break; case ICE_MAC_GENERIC_3K_E825: sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825; break; case ICE_MAC_GENERIC: default: sign_type = SEGMENT_SIGN_TYPE_RSA2K; break; } return sign_type; } /** * ice_get_signing_req - get correct package requirements, based on device * @hw: pointer to the hardware structure */ static void ice_get_signing_req(struct ice_hw *hw) { hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); } /** * ice_download_pkg_sig_seg - download a signature segment * @hw: pointer to the hardware structure * @seg: pointer to signature segment */ static enum ice_ddp_state ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) { enum ice_ddp_state state; state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, LE32_TO_CPU(seg->buf_tbl.buf_count), false); return state; } /** * ice_download_pkg_config_seg - download a config segment * @hw: pointer to the hardware structure * @pkg_hdr: pointer to package header * @idx: segment index * @start: starting buffer * @count: buffer count + * @last_seg: last segment being downloaded * * Note: idx must reference a ICE segment */ static enum ice_ddp_state ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, - u32 idx, u32 start, u32 count) + u32 idx, u32 start, u32 count, bool last_seg) { struct ice_buf_table *bufs; enum ice_ddp_state state; struct ice_seg *seg; u32 buf_count; seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); if (!seg) return ICE_DDP_PKG_ERR; bufs = ice_find_buf_table(seg); buf_count = LE32_TO_CPU(bufs->buf_count); if (start >= buf_count || start + count > buf_count) return ICE_DDP_PKG_ERR; state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, - true); + last_seg); return state; } /** * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment * @hw: pointer to the hardware structure * @pkg_hdr: pointer to package header * @idx: segment index (must be a signature segment) * * Note: idx must reference a signature segment */ static enum ice_ddp_state ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, u32 idx) { enum ice_ddp_state state; struct ice_sign_seg *seg; + bool last_seg = true; u32 conf_idx; u32 start; u32 count; + u32 flags; seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); if (!seg) { state = ICE_DDP_PKG_ERR; goto exit; } conf_idx = LE32_TO_CPU(seg->signed_seg_idx); start = LE32_TO_CPU(seg->signed_buf_start); count = LE32_TO_CPU(seg->signed_buf_count); + flags = LE32_TO_CPU(seg->flags); + + if (flags & ICE_SIGN_SEG_FLAGS_VALID) + last_seg = !!(flags & ICE_SIGN_SEG_FLAGS_LAST); state = ice_download_pkg_sig_seg(hw, seg); if (state) goto exit; if (count == 0) { /* this is a "Reference Signature Segment" and download should * be only for the buffers in the signature segment (and not * the hardware configuration segment) */ goto exit; } state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, - count); + count, last_seg); exit: return state; } /** * ice_match_signing_seg - determine if a matching signing segment exists * @pkg_hdr: pointer to package header * @seg_id: segment id that is expected * @sign_type: signing type */ static bool ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type) { bool match = false; u32 i; for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id, sign_type)) { match = true; break; } } return match; } /** * ice_post_dwnld_pkg_actions - perform post download package actions * @hw: pointer to the hardware structure */ static enum ice_ddp_state ice_post_dwnld_pkg_actions(struct ice_hw *hw) { enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; int status; status = ice_set_vlan_mode(hw); if (status) { ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", status); state = ICE_DDP_PKG_ERR; } return state; } /** * ice_download_pkg_with_sig_seg - download package using signature segments * @hw: pointer to the hardware structure * @pkg_hdr: pointer to package header */ static enum ice_ddp_state ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { enum ice_aq_err aq_err = hw->adminq.sq_last_status; enum ice_ddp_state state = ICE_DDP_PKG_ERR; int status; u32 i; ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); if (status) { if (status == ICE_ERR_AQ_NO_WORK) state = ICE_DDP_PKG_ALREADY_LOADED; else state = ice_map_aq_err_to_ddp_state(aq_err); return state; } for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, hw->pkg_sign_type)) continue; state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i); if (state) break; } if (!state) state = ice_post_dwnld_pkg_actions(hw); ice_release_global_cfg_lock(hw); return state; } /** * ice_dwnld_cfg_bufs * @hw: pointer to the hardware structure * @bufs: pointer to an array of buffers * @count: the number of buffers in the array * * Obtains global config lock and downloads the package configuration buffers * to the firmware. */ static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_buf_hdr *bh; int status; if (!bufs || !count) return ICE_DDP_PKG_ERR; /* If the first buffer's first section has its metadata bit set * then there are no buffers to be downloaded, and the operation is * considered a success. */ bh = (struct ice_buf_hdr *)bufs; if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) return ICE_DDP_PKG_SUCCESS; status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); if (status) { if (status == ICE_ERR_AQ_NO_WORK) return ICE_DDP_PKG_ALREADY_LOADED; return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); } state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); if (!state) state = ice_post_dwnld_pkg_actions(hw); ice_release_global_cfg_lock(hw); return state; } /** * ice_download_pkg_without_sig_seg * @hw: pointer to the hardware structure * @ice_seg: pointer to the segment of the package to be downloaded * * Handles the download of a complete package without signature segment. */ static enum ice_ddp_state ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_buf_table *ice_buf_tbl; enum ice_ddp_state state; ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", ice_seg->hdr.seg_format_ver.major, ice_seg->hdr.seg_format_ver.minor, ice_seg->hdr.seg_format_ver.update, ice_seg->hdr.seg_format_ver.draft); ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", LE32_TO_CPU(ice_seg->hdr.seg_type), LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); ice_buf_tbl = ice_find_buf_table(ice_seg); ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", LE32_TO_CPU(ice_buf_tbl->buf_count)); state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, LE32_TO_CPU(ice_buf_tbl->buf_count)); return state; } /** * ice_download_pkg * @hw: pointer to the hardware structure * @pkg_hdr: pointer to package header * @ice_seg: pointer to the segment of the package to be downloaded * * Handles the download of a complete package. */ static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, struct ice_seg *ice_seg) { enum ice_ddp_state state; if (ice_match_signing_seg(pkg_hdr, hw->pkg_seg_id, hw->pkg_sign_type)) state = ice_download_pkg_with_sig_seg(hw, pkg_hdr); else state = ice_download_pkg_without_sig_seg(hw, ice_seg); ice_post_pkg_dwnld_vlan_mode_cfg(hw); return state; } /** * ice_init_pkg_info * @hw: pointer to the hardware structure * @pkg_hdr: pointer to the driver's package hdr * * Saves off the package details into the HW structure. */ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { struct ice_generic_seg_hdr *seg_hdr; if (!pkg_hdr) return ICE_DDP_PKG_ERR; ice_get_signing_req(hw); ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", hw->pkg_seg_id); seg_hdr = (struct ice_generic_seg_hdr *) ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); if (seg_hdr) { struct ice_meta_sect *meta; struct ice_pkg_enum state; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); /* Get package information from the Metadata Section */ meta = (struct ice_meta_sect *) ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, ICE_SID_METADATA); if (!meta) { ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); return ICE_DDP_PKG_INVALID_FILE; } hw->pkg_ver = meta->ver; ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name), ICE_NONDMA_TO_NONDMA); ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", meta->ver.major, meta->ver.minor, meta->ver.update, meta->ver.draft, meta->name); hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA); ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", seg_hdr->seg_format_ver.major, seg_hdr->seg_format_ver.minor, seg_hdr->seg_format_ver.update, seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); } else { ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); return ICE_DDP_PKG_INVALID_FILE; } return ICE_DDP_PKG_SUCCESS; } /** * ice_get_pkg_info * @hw: pointer to the hardware structure * * Store details of the package currently loaded in HW into the HW structure. */ enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) { enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_aqc_get_pkg_info_resp *pkg_info; u16 size; u32 i; size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT); pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); if (!pkg_info) return ICE_DDP_PKG_ERR; if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { state = ICE_DDP_PKG_ERR; goto init_pkg_free_alloc; } for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) { #define ICE_PKG_FLAG_COUNT 4 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; u8 place = 0; if (pkg_info->pkg_info[i].is_active) { flags[place++] = 'A'; hw->active_pkg_ver = pkg_info->pkg_info[i].ver; hw->active_track_id = LE32_TO_CPU(pkg_info->pkg_info[i].track_id); ice_memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, sizeof(pkg_info->pkg_info[i].name), ICE_NONDMA_TO_NONDMA); hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; } if (pkg_info->pkg_info[i].is_active_at_boot) flags[place++] = 'B'; if (pkg_info->pkg_info[i].is_modified) flags[place++] = 'M'; if (pkg_info->pkg_info[i].is_in_nvm) flags[place++] = 'N'; ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i, pkg_info->pkg_info[i].ver.major, pkg_info->pkg_info[i].ver.minor, pkg_info->pkg_info[i].ver.update, pkg_info->pkg_info[i].ver.draft, pkg_info->pkg_info[i].name, flags); } init_pkg_free_alloc: ice_free(hw, pkg_info); return state; } /** * ice_label_enum_handler * @sect_type: section type * @section: pointer to section * @index: index of the label entry to be returned * @offset: pointer to receive absolute offset, always zero for label sections * * This is a callback function that can be passed to ice_pkg_enum_entry. * Handles enumeration of individual label entries. */ static void * ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index, u32 *offset) { struct ice_label_section *labels; if (!section) return NULL; if (index > ICE_MAX_LABELS_IN_BUF) return NULL; if (offset) *offset = 0; labels = (struct ice_label_section *)section; if (index >= LE16_TO_CPU(labels->count)) return NULL; return labels->label + index; } /** * ice_enum_labels * @ice_seg: pointer to the ice segment (NULL on subsequent calls) * @type: the section type that will contain the label (0 on subsequent calls) * @state: ice_pkg_enum structure that will hold the state of the enumeration * @value: pointer to a value that will return the label's value if found * * Enumerates a list of labels in the package. The caller will call * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL * the end of the list has been reached. */ static char * ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, u16 *value) { struct ice_label *label; /* Check for valid label section on first call */ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) return NULL; label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type, NULL, ice_label_enum_handler); if (!label) return NULL; *value = LE16_TO_CPU(label->value); return label->name; } /** * ice_find_label_value * @ice_seg: pointer to the ice segment (non-NULL) * @name: name of the label to search for * @type: the section type that will contain the label * @value: pointer to a value that will return the label's value if found * * Finds a label's value given the label name and the section type to search. * The ice_seg parameter must not be NULL since the first call to * ice_enum_labels requires a pointer to an actual ice_seg structure. */ int ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, u16 *value) { struct ice_pkg_enum state; char *label_name; u16 val; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!ice_seg) return ICE_ERR_PARAM; do { label_name = ice_enum_labels(ice_seg, type, &state, &val); if (label_name && !strcmp(label_name, name)) { *value = val; return 0; } ice_seg = NULL; } while (label_name); return ICE_ERR_CFG; } /** * ice_verify_pkg - verify package * @pkg: pointer to the package buffer * @len: size of the package buffer * * Verifies various attributes of the package file, including length, format * version, and the requirement of at least one segment. */ enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) { u32 seg_count; u32 i; if (len < ice_struct_size(pkg, seg_offset, 1)) return ICE_DDP_PKG_INVALID_FILE; if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) return ICE_DDP_PKG_INVALID_FILE; /* pkg must have at least one segment */ seg_count = LE32_TO_CPU(pkg->seg_count); if (seg_count < 1) return ICE_DDP_PKG_INVALID_FILE; /* make sure segment array fits in package length */ if (len < ice_struct_size(pkg, seg_offset, seg_count)) return ICE_DDP_PKG_INVALID_FILE; /* all segments must fit within length */ for (i = 0; i < seg_count; i++) { u32 off = LE32_TO_CPU(pkg->seg_offset[i]); struct ice_generic_seg_hdr *seg; /* segment header must fit */ if (len < off + sizeof(*seg)) return ICE_DDP_PKG_INVALID_FILE; seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); /* segment body must fit */ if (len < off + LE32_TO_CPU(seg->seg_size)) return ICE_DDP_PKG_INVALID_FILE; } return ICE_DDP_PKG_SUCCESS; } /** * ice_free_seg - free package segment pointer * @hw: pointer to the hardware structure * * Frees the package segment pointer in the proper manner, depending on if the * segment was allocated or just the passed in pointer was stored. */ void ice_free_seg(struct ice_hw *hw) { if (hw->pkg_copy) { ice_free(hw, hw->pkg_copy); hw->pkg_copy = NULL; hw->pkg_size = 0; } hw->seg = NULL; } /** * ice_chk_pkg_version - check package version for compatibility with driver * @pkg_ver: pointer to a version structure to check * * Check to make sure that the package about to be downloaded is compatible with * the driver. To be compatible, the major and minor components of the package * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR * definitions. */ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) { if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; return ICE_DDP_PKG_SUCCESS; } /** * ice_chk_pkg_compat * @hw: pointer to the hardware structure * @ospkg: pointer to the package hdr * @seg: pointer to the package segment hdr * * This function checks the package version compatibility with driver and NVM */ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, struct ice_seg **seg) { struct ice_aqc_get_pkg_info_resp *pkg; enum ice_ddp_state state; u16 size; u32 i; /* Check package version compatibility */ state = ice_chk_pkg_version(&hw->pkg_ver); if (state) { ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); return state; } /* find ICE segment in given package */ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, ospkg); if (!*seg) { ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); return ICE_DDP_PKG_INVALID_FILE; } /* Check if FW is compatible with the OS package */ size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT); pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); if (!pkg) return ICE_DDP_PKG_ERR; if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { state = ICE_DDP_PKG_ERR; goto fw_ddp_compat_free_alloc; } for (i = 0; i < LE32_TO_CPU(pkg->count); i++) { /* loop till we find the NVM package */ if (!pkg->pkg_info[i].is_in_nvm) continue; if ((*seg)->hdr.seg_format_ver.major != pkg->pkg_info[i].ver.major || (*seg)->hdr.seg_format_ver.minor > pkg->pkg_info[i].ver.minor) { state = ICE_DDP_PKG_FW_MISMATCH; ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); } /* done processing NVM package so break */ break; } fw_ddp_compat_free_alloc: ice_free(hw, pkg); return state; } /** * ice_sw_fv_handler * @sect_type: section type * @section: pointer to section * @index: index of the field vector entry to be returned * @offset: ptr to variable that receives the offset in the field vector table * * This is a callback function that can be passed to ice_pkg_enum_entry. * This function treats the given section as of type ice_sw_fv_section and * enumerates offset field. "offset" is an index into the field vector table. */ static void * ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) { struct ice_sw_fv_section *fv_section = (struct ice_sw_fv_section *)section; if (!section || sect_type != ICE_SID_FLD_VEC_SW) return NULL; if (index >= LE16_TO_CPU(fv_section->count)) return NULL; if (offset) /* "index" passed in to this function is relative to a given * 4k block. To get to the true index into the field vector * table need to add the relative index to the base_offset * field of this section */ *offset = LE16_TO_CPU(fv_section->base_offset) + index; return fv_section->fv + index; } /** * ice_get_prof_index_max - get the max profile index for used profile * @hw: pointer to the HW struct * * Calling this function will get the max profile index for used profile * and store the index number in struct ice_switch_info *switch_info * in hw for following use. */ static int ice_get_prof_index_max(struct ice_hw *hw) { u16 prof_index = 0, j, max_prof_index = 0; struct ice_pkg_enum state; struct ice_seg *ice_seg; bool flag = false; struct ice_fv *fv; u32 offset; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!hw->seg) return ICE_ERR_PARAM; ice_seg = hw->seg; do { fv = (struct ice_fv *) ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, &offset, ice_sw_fv_handler); if (!fv) break; ice_seg = NULL; /* in the profile that not be used, the prot_id is set to 0xff * and the off is set to 0x1ff for all the field vectors. */ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) if (fv->ew[j].prot_id != ICE_PROT_INVALID || fv->ew[j].off != ICE_FV_OFFSET_INVAL) flag = true; if (flag && prof_index > max_prof_index) max_prof_index = prof_index; prof_index++; flag = false; } while (fv); hw->switch_info->max_used_prof_index = max_prof_index; return 0; } /** * ice_get_ddp_pkg_state - get DDP pkg state after download * @hw: pointer to the HW struct * @already_loaded: indicates if pkg was already loaded onto the device * */ static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded) { if (hw->pkg_ver.major == hw->active_pkg_ver.major && hw->pkg_ver.minor == hw->active_pkg_ver.minor && hw->pkg_ver.update == hw->active_pkg_ver.update && hw->pkg_ver.draft == hw->active_pkg_ver.draft && !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { if (already_loaded) return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; else return ICE_DDP_PKG_SUCCESS; } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; } else { return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; } } /** * ice_init_pkg_regs - initialize additional package registers * @hw: pointer to the hardware structure */ static void ice_init_pkg_regs(struct ice_hw *hw) { #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF #define ICE_SW_BLK_IDX 0 /* setup Switch block input mask, which is 48-bits in two parts */ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); } /** * ice_init_pkg - initialize/download package * @hw: pointer to the hardware structure * @buf: pointer to the package buffer * @len: size of the package buffer * * This function initializes a package. The package contains HW tables * required to do packet processing. First, the function extracts package * information such as version. Then it finds the ice configuration segment * within the package; this function then saves a copy of the segment pointer * within the supplied package buffer. Next, the function will cache any hints * from the package, followed by downloading the package itself. Note, that if * a previous PF driver has already downloaded the package successfully, then * the current driver will not have to download the package again. * * The local package contents will be used to query default behavior and to * update specific sections of the HW's version of the package (e.g. to update * the parse graph to understand new protocols). * * This function stores a pointer to the package buffer memory, and it is * expected that the supplied buffer will not be freed immediately. If the * package buffer needs to be freed, such as when read from a file, use * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this * case. */ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) { bool already_loaded = false; enum ice_ddp_state state; struct ice_pkg_hdr *pkg; struct ice_seg *seg; if (!buf || !len) return ICE_DDP_PKG_ERR; pkg = (struct ice_pkg_hdr *)buf; state = ice_verify_pkg(pkg, len); if (state) { ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", state); return state; } /* initialize package info */ state = ice_init_pkg_info(hw, pkg); if (state) return state; /* before downloading the package, check package version for * compatibility with driver */ state = ice_chk_pkg_compat(hw, pkg, &seg); if (state) return state; /* initialize package hints and then download package */ ice_init_pkg_hints(hw, seg); state = ice_download_pkg(hw, pkg, seg); if (state == ICE_DDP_PKG_ALREADY_LOADED) { ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); already_loaded = true; } /* Get information on the package currently loaded in HW, then make sure * the driver is compatible with this version. */ if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { state = ice_get_pkg_info(hw); if (!state) state = ice_get_ddp_pkg_state(hw, already_loaded); } if (ice_is_init_pkg_successful(state)) { hw->seg = seg; /* on successful package download update other required * registers to support the package and fill HW tables * with package content. */ ice_init_pkg_regs(hw); ice_fill_blk_tbls(hw); ice_get_prof_index_max(hw); } else { ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state); } return state; } /** * ice_copy_and_init_pkg - initialize/download a copy of the package * @hw: pointer to the hardware structure * @buf: pointer to the package buffer * @len: size of the package buffer * * This function copies the package buffer, and then calls ice_init_pkg() to * initialize the copied package contents. * * The copying is necessary if the package buffer supplied is constant, or if * the memory may disappear shortly after calling this function. * * If the package buffer resides in the data segment and can be modified, the * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). * * However, if the package buffer needs to be copied first, such as when being * read from a file, the caller should use ice_copy_and_init_pkg(). * * This function will first copy the package buffer, before calling * ice_init_pkg(). The caller is free to immediately destroy the original * package buffer, as the new copy will be managed by this function and * related routines. */ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) { enum ice_ddp_state state; u8 *buf_copy; if (!buf || !len) return ICE_DDP_PKG_ERR; buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA); state = ice_init_pkg(hw, buf_copy, len); if (!ice_is_init_pkg_successful(state)) { /* Free the copy, since we failed to initialize the package */ ice_free(hw, buf_copy); } else { /* Track the copied pkg so we can free it later */ hw->pkg_copy = buf_copy; hw->pkg_size = len; } return state; } /** * ice_is_init_pkg_successful - check if DDP init was successful * @state: state of the DDP pkg after download */ bool ice_is_init_pkg_successful(enum ice_ddp_state state) { switch (state) { case ICE_DDP_PKG_SUCCESS: case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: return true; default: return false; } } /** * ice_pkg_buf_alloc * @hw: pointer to the HW structure * * Allocates a package buffer and returns a pointer to the buffer header. * Note: all package contents must be in Little Endian form. */ struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) { struct ice_buf_build *bld; struct ice_buf_hdr *buf; bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld)); if (!bld) return NULL; buf = (struct ice_buf_hdr *)bld; buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr, section_entry)); return bld; } static bool ice_is_gtp_u_profile(u32 prof_idx) { return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP) || prof_idx == ICE_PROFID_IPV4_GTPU_TEID; } static bool ice_is_gtp_c_profile(u32 prof_idx) { switch (prof_idx) { case ICE_PROFID_IPV4_GTPC_TEID: case ICE_PROFID_IPV4_GTPC_NO_TEID: case ICE_PROFID_IPV6_GTPC_TEID: case ICE_PROFID_IPV6_GTPC_NO_TEID: return true; default: return false; } } /** * ice_get_sw_prof_type - determine switch profile type * @hw: pointer to the HW structure * @fv: pointer to the switch field vector * @prof_idx: profile index to check */ static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx) { bool valid_prof = false; u16 i; if (ice_is_gtp_c_profile(prof_idx)) return ICE_PROF_TUN_GTPC; if (ice_is_gtp_u_profile(prof_idx)) return ICE_PROF_TUN_GTPU; for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { if (fv->ew[i].off != ICE_NAN_OFFSET) valid_prof = true; /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && fv->ew[i].off == ICE_VNI_OFFSET) return ICE_PROF_TUN_UDP; /* GRE tunnel will have GRE protocol */ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) return ICE_PROF_TUN_GRE; } return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID; } /** * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type * @hw: pointer to hardware structure * @req_profs: type of profiles requested * @bm: pointer to memory for returning the bitmap of field vectors */ void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, ice_bitmap_t *bm) { struct ice_pkg_enum state; struct ice_seg *ice_seg; struct ice_fv *fv; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); ice_seg = hw->seg; do { enum ice_prof_type prof_type; u32 offset; fv = (struct ice_fv *) ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, &offset, ice_sw_fv_handler); ice_seg = NULL; if (fv) { /* Determine field vector type */ prof_type = ice_get_sw_prof_type(hw, fv, offset); if (req_profs & prof_type) ice_set_bit((u16)offset, bm); } } while (fv); } /** * ice_get_sw_fv_list * @hw: pointer to the HW structure * @lkups: lookup elements or match criteria for the advanced recipe, one * structure per protocol header * @bm: bitmap of field vectors to consider * @fv_list: Head of a list * * Finds all the field vector entries from switch block that contain * a given protocol ID and offset and returns a list of structures of type * "ice_sw_fv_list_entry". Every structure in the list has a field vector * definition and profile ID information * NOTE: The caller of the function is responsible for freeing the memory * allocated for every list entry. */ int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) { struct ice_sw_fv_list_entry *fvl; struct ice_sw_fv_list_entry *tmp; struct ice_pkg_enum state; struct ice_seg *ice_seg; struct ice_fv *fv; u32 offset; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!lkups->n_val_words || !hw->seg) return ICE_ERR_PARAM; ice_seg = hw->seg; do { u16 i; fv = (struct ice_fv *) ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, &offset, ice_sw_fv_handler); if (!fv) break; ice_seg = NULL; /* If field vector is not in the bitmap list, then skip this * profile. */ if (!ice_is_bit_set(bm, (u16)offset)) continue; for (i = 0; i < lkups->n_val_words; i++) { int j; for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) if (fv->ew[j].prot_id == lkups->fv_words[i].prot_id && fv->ew[j].off == lkups->fv_words[i].off) break; if (j >= hw->blk[ICE_BLK_SW].es.fvw) break; if (i + 1 == lkups->n_val_words) { fvl = (struct ice_sw_fv_list_entry *) ice_malloc(hw, sizeof(*fvl)); if (!fvl) goto err; fvl->fv_ptr = fv; fvl->profile_id = offset; LIST_ADD(&fvl->list_entry, fv_list); break; } } } while (fv); if (LIST_EMPTY(fv_list)) { ice_warn(hw, "Required profiles not found in currently loaded DDP package"); return ICE_ERR_CFG; } return 0; err: LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry, list_entry) { LIST_DEL(&fvl->list_entry); ice_free(hw, fvl); } return ICE_ERR_NO_MEMORY; } /** * ice_init_prof_result_bm - Initialize the profile result index bitmap * @hw: pointer to hardware structure */ void ice_init_prof_result_bm(struct ice_hw *hw) { struct ice_pkg_enum state; struct ice_seg *ice_seg; struct ice_fv *fv; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!hw->seg) return; ice_seg = hw->seg; do { u32 off; u16 i; fv = (struct ice_fv *) ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, &off, ice_sw_fv_handler); ice_seg = NULL; if (!fv) break; ice_zero_bitmap(hw->switch_info->prof_res_bm[off], ICE_MAX_FV_WORDS); /* Determine empty field vector indices, these can be * used for recipe results. Skip index 0, since it is * always used for Switch ID. */ for (i = 1; i < ICE_MAX_FV_WORDS; i++) if (fv->ew[i].prot_id == ICE_PROT_INVALID && fv->ew[i].off == ICE_FV_OFFSET_INVAL) ice_set_bit(i, hw->switch_info->prof_res_bm[off]); } while (fv); } /** * ice_pkg_buf_free * @hw: pointer to the HW structure * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * * Frees a package buffer */ void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) { ice_free(hw, bld); } /** * ice_pkg_buf_reserve_section * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * @count: the number of sections to reserve * * Reserves one or more section table entries in a package buffer. This routine * can be called multiple times as long as they are made before calling * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() * is called once, the number of sections that can be allocated will not be able * to be increased; not using all reserved sections is fine, but this will * result in some wasted space in the buffer. * Note: all package contents must be in Little Endian form. */ int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) { struct ice_buf_hdr *buf; u16 section_count; u16 data_end; if (!bld) return ICE_ERR_PARAM; buf = (struct ice_buf_hdr *)&bld->buf; /* already an active section, can't increase table size */ section_count = LE16_TO_CPU(buf->section_count); if (section_count > 0) return ICE_ERR_CFG; if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) return ICE_ERR_CFG; bld->reserved_section_table_entries += count; data_end = LE16_TO_CPU(buf->data_end) + FLEX_ARRAY_SIZE(buf, section_entry, count); buf->data_end = CPU_TO_LE16(data_end); return 0; } /** * ice_pkg_buf_alloc_section * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * @type: the section type value * @size: the size of the section to reserve (in bytes) * * Reserves memory in the buffer for a section's content and updates the * buffers' status accordingly. This routine returns a pointer to the first * byte of the section start within the buffer, which is used to fill in the * section contents. * Note: all package contents must be in Little Endian form. */ void * ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) { struct ice_buf_hdr *buf; u16 sect_count; u16 data_end; if (!bld || !type || !size) return NULL; buf = (struct ice_buf_hdr *)&bld->buf; /* check for enough space left in buffer */ data_end = LE16_TO_CPU(buf->data_end); /* section start must align on 4 byte boundary */ data_end = ICE_ALIGN(data_end, 4); if ((data_end + size) > ICE_MAX_S_DATA_END) return NULL; /* check for more available section table entries */ sect_count = LE16_TO_CPU(buf->section_count); if (sect_count < bld->reserved_section_table_entries) { void *section_ptr = ((u8 *)buf) + data_end; buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end); buf->section_entry[sect_count].size = CPU_TO_LE16(size); buf->section_entry[sect_count].type = CPU_TO_LE32(type); data_end += size; buf->data_end = CPU_TO_LE16(data_end); buf->section_count = CPU_TO_LE16(sect_count + 1); return section_ptr; } /* no free section table entries */ return NULL; } /** * ice_pkg_buf_alloc_single_section * @hw: pointer to the HW structure * @type: the section type value * @size: the size of the section to reserve (in bytes) * @section: returns pointer to the section * * Allocates a package buffer with a single section. * Note: all package contents must be in Little Endian form. */ struct ice_buf_build * ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, void **section) { struct ice_buf_build *buf; if (!section) return NULL; buf = ice_pkg_buf_alloc(hw); if (!buf) return NULL; if (ice_pkg_buf_reserve_section(buf, 1)) goto ice_pkg_buf_alloc_single_section_err; *section = ice_pkg_buf_alloc_section(buf, type, size); if (!*section) goto ice_pkg_buf_alloc_single_section_err; return buf; ice_pkg_buf_alloc_single_section_err: ice_pkg_buf_free(hw, buf); return NULL; } /** * ice_pkg_buf_unreserve_section * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * @count: the number of sections to unreserve * * Unreserves one or more section table entries in a package buffer, releasing * space that can be used for section data. This routine can be called * multiple times as long as they are made before calling * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() * is called once, the number of sections that can be allocated will not be able * to be increased; not using all reserved sections is fine, but this will * result in some wasted space in the buffer. * Note: all package contents must be in Little Endian form. */ int ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count) { struct ice_buf_hdr *buf; u16 section_count; u16 data_end; if (!bld) return ICE_ERR_PARAM; buf = (struct ice_buf_hdr *)&bld->buf; /* already an active section, can't decrease table size */ section_count = LE16_TO_CPU(buf->section_count); if (section_count > 0) return ICE_ERR_CFG; if (count > bld->reserved_section_table_entries) return ICE_ERR_CFG; bld->reserved_section_table_entries -= count; data_end = LE16_TO_CPU(buf->data_end) - FLEX_ARRAY_SIZE(buf, section_entry, count); buf->data_end = CPU_TO_LE16(data_end); return 0; } /** * ice_pkg_buf_get_free_space * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * * Returns the number of free bytes remaining in the buffer. * Note: all package contents must be in Little Endian form. */ u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld) { struct ice_buf_hdr *buf; if (!bld) return 0; buf = (struct ice_buf_hdr *)&bld->buf; return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end); } /** * ice_pkg_buf_get_active_sections * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * * Returns the number of active sections. Before using the package buffer * in an update package command, the caller should make sure that there is at * least one active section - otherwise, the buffer is not legal and should * not be used. * Note: all package contents must be in Little Endian form. */ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) { struct ice_buf_hdr *buf; if (!bld) return 0; buf = (struct ice_buf_hdr *)&bld->buf; return LE16_TO_CPU(buf->section_count); } /** * ice_pkg_buf * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * * Return a pointer to the buffer's header */ struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) { if (bld) return &bld->buf; return NULL; } /** * ice_find_buf_table * @ice_seg: pointer to the ice segment * * Returns the address of the buffer table within the ice segment. */ struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) { struct ice_nvm_table *nvms; nvms = (struct ice_nvm_table *) (ice_seg->device_table + LE32_TO_CPU(ice_seg->device_table_count)); return (_FORCE_ struct ice_buf_table *) (nvms->vers + LE32_TO_CPU(nvms->table_count)); } /** * ice_pkg_val_buf * @buf: pointer to the ice buffer * * This helper function validates a buffer's header. */ static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) { struct ice_buf_hdr *hdr; u16 section_count; u16 data_end; hdr = (struct ice_buf_hdr *)buf->buf; /* verify data */ section_count = LE16_TO_CPU(hdr->section_count); if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) return NULL; data_end = LE16_TO_CPU(hdr->data_end); if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) return NULL; return hdr; } /** * ice_pkg_enum_buf * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) * @state: pointer to the enum state * * This function will enumerate all the buffers in the ice segment. The first * call is made with the ice_seg parameter non-NULL; on subsequent calls, * ice_seg is set to NULL which continues the enumeration. When the function * returns a NULL pointer, then the end of the buffers has been reached, or an * unexpected value has been detected (for example an invalid section count or * an invalid buffer end value). */ struct ice_buf_hdr * ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) { if (ice_seg) { state->buf_table = ice_find_buf_table(ice_seg); if (!state->buf_table) return NULL; state->buf_idx = 0; return ice_pkg_val_buf(state->buf_table->buf_array); } if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count)) return ice_pkg_val_buf(state->buf_table->buf_array + state->buf_idx); else return NULL; } /** * ice_pkg_advance_sect * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) * @state: pointer to the enum state * * This helper function will advance the section within the ice segment, * also advancing the buffer if needed. */ bool ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) { if (!ice_seg && !state->buf) return false; if (!ice_seg && state->buf) if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count)) return true; state->buf = ice_pkg_enum_buf(ice_seg, state); if (!state->buf) return false; /* start of new buffer, reset section index */ state->sect_idx = 0; return true; } /** * ice_pkg_enum_section * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) * @state: pointer to the enum state * @sect_type: section type to enumerate * * This function will enumerate all the sections of a particular type in the * ice segment. The first call is made with the ice_seg parameter non-NULL; * on subsequent calls, ice_seg is set to NULL which continues the enumeration. * When the function returns a NULL pointer, then the end of the matching * sections has been reached. */ void * ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type) { u16 offset, size; if (ice_seg) state->type = sect_type; if (!ice_pkg_advance_sect(ice_seg, state)) return NULL; /* scan for next matching section */ while (state->buf->section_entry[state->sect_idx].type != CPU_TO_LE32(state->type)) if (!ice_pkg_advance_sect(NULL, state)) return NULL; /* validate section */ offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) return NULL; size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size); if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) return NULL; /* make sure the section fits in the buffer */ if (offset + size > ICE_PKG_BUF_SIZE) return NULL; state->sect_type = LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type); /* calc pointer to this section */ state->sect = ((u8 *)state->buf) + LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); return state->sect; } /** * ice_pkg_enum_entry * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) * @state: pointer to the enum state * @sect_type: section type to enumerate * @offset: pointer to variable that receives the offset in the table (optional) * @handler: function that handles access to the entries into the section type * * This function will enumerate all the entries in particular section type in * the ice segment. The first call is made with the ice_seg parameter non-NULL; * on subsequent calls, ice_seg is set to NULL which continues the enumeration. * When the function returns a NULL pointer, then the end of the entries has * been reached. * * Since each section may have a different header and entry size, the handler * function is needed to determine the number and location entries in each * section. * * The offset parameter is optional, but should be used for sections that * contain an offset for each section table. For such cases, the section handler * function must return the appropriate offset + index to give the absolution * offset for each entry. For example, if the base for a section's header * indicates a base offset of 10, and the index for the entry is 2, then * section handler function should set the offset to 10 + 2 = 12. */ void * ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type, u32 *offset, void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset)) { void *entry; if (ice_seg) { if (!handler) return NULL; if (!ice_pkg_enum_section(ice_seg, state, sect_type)) return NULL; state->entry_idx = 0; state->handler = handler; } else { state->entry_idx++; } if (!state->handler) return NULL; /* get entry */ entry = state->handler(state->sect_type, state->sect, state->entry_idx, offset); if (!entry) { /* end of a section, look for another section of this type */ if (!ice_pkg_enum_section(NULL, state, 0)) return NULL; state->entry_idx = 0; entry = state->handler(state->sect_type, state->sect, state->entry_idx, offset); } return entry; } /** * ice_boost_tcam_handler * @sect_type: section type * @section: pointer to section * @index: index of the boost TCAM entry to be returned * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections * * This is a callback function that can be passed to ice_pkg_enum_entry. * Handles enumeration of individual boost TCAM entries. */ static void * ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) { struct ice_boost_tcam_section *boost; if (!section) return NULL; if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) return NULL; if (index > ICE_MAX_BST_TCAMS_IN_BUF) return NULL; if (offset) *offset = 0; boost = (struct ice_boost_tcam_section *)section; if (index >= LE16_TO_CPU(boost->count)) return NULL; return boost->tcam + index; } /** * ice_find_boost_entry * @ice_seg: pointer to the ice segment (non-NULL) * @addr: Boost TCAM address of entry to search for * @entry: returns pointer to the entry * * Finds a particular Boost TCAM entry and returns a pointer to that entry * if it is found. The ice_seg parameter must not be NULL since the first call * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. */ static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, struct ice_boost_tcam_entry **entry) { struct ice_boost_tcam_entry *tcam; struct ice_pkg_enum state; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!ice_seg) return ICE_ERR_PARAM; do { tcam = (struct ice_boost_tcam_entry *) ice_pkg_enum_entry(ice_seg, &state, ICE_SID_RXPARSER_BOOST_TCAM, NULL, ice_boost_tcam_handler); if (tcam && LE16_TO_CPU(tcam->addr) == addr) { *entry = tcam; return 0; } ice_seg = NULL; } while (tcam); *entry = NULL; return ICE_ERR_CFG; } /** * ice_init_pkg_hints * @hw: pointer to the HW structure * @ice_seg: pointer to the segment of the package scan (non-NULL) * * This function will scan the package and save off relevant information * (hints or metadata) for driver use. The ice_seg parameter must not be NULL * since the first call to ice_enum_labels requires a pointer to an actual * ice_seg structure. */ void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_pkg_enum state; char *label_name; u16 val; int i; ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM); ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!ice_seg) return; label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, &val); while (label_name) { /* TODO: Replace !strnsmp() with wrappers like match_some_pre() */ if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) /* check for a tunnel entry */ ice_add_tunnel_hint(hw, label_name, val); label_name = ice_enum_labels(NULL, 0, &state, &val); } /* Cache the appropriate boost TCAM entry pointers for tunnels */ for (i = 0; i < hw->tnl.count; i++) { ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, &hw->tnl.tbl[i].boost_entry); if (hw->tnl.tbl[i].boost_entry) hw->tnl.tbl[i].valid = true; } } /** * ice_acquire_global_cfg_lock * @hw: pointer to the HW structure * @access: access type (read or write) * * This function will request ownership of the global config lock for reading * or writing of the package. When attempting to obtain write access, the * caller must check for the following two return values: * * 0 - Means the caller has acquired the global config lock * and can perform writing of the package. * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the * package or has found that no update was necessary; in * this case, the caller can just skip performing any * update of the package. */ int ice_acquire_global_cfg_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { int status; status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, ICE_GLOBAL_CFG_LOCK_TIMEOUT); if (status == ICE_ERR_AQ_NO_WORK) ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); return status; } /** * ice_release_global_cfg_lock * @hw: pointer to the HW structure * * This function will release the global config lock. */ void ice_release_global_cfg_lock(struct ice_hw *hw) { ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); } /** * ice_acquire_change_lock * @hw: pointer to the HW structure * @access: access type (read or write) * * This function will request ownership of the change lock. */ int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, ICE_CHANGE_LOCK_TIMEOUT); } /** * ice_release_change_lock * @hw: pointer to the HW structure * * This function will release the change lock using the proper Admin Command. */ void ice_release_change_lock(struct ice_hw *hw) { ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); } +/** + * ice_is_get_tx_sched_new_format + * @hw: pointer to the HW struct + * + * Determines if the new format for the Tx scheduler get api is supported + */ +static bool +ice_is_get_tx_sched_new_format(struct ice_hw *hw) +{ + if (ice_is_e830(hw)) + return true; + if (ice_is_e825c(hw)) + return true; + return false; +} + /** * ice_get_set_tx_topo - get or set tx topology * @hw: pointer to the HW struct * @buf: pointer to tx topology buffer * @buf_size: buffer size * @cd: pointer to command details structure or NULL * @flags: pointer to descriptor flags * @set: 0-get, 1-set topology * * The function will get or set tx topology */ static int ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, struct ice_sq_cd *cd, u8 *flags, bool set) { struct ice_aqc_get_set_tx_topo *cmd; struct ice_aq_desc desc; int status; cmd = &desc.params.get_set_tx_topo; if (set) { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo); cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; /* requested to update a new topology, not a default topolgy */ if (buf) cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); } else { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; - if (!ice_is_e830(hw)) + if (!ice_is_get_tx_sched_new_format(hw)) desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); } status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (status) return status; /* read the return flag values (first byte) for get operation */ if (!set && flags) *flags = desc.params.get_set_tx_topo.set_flags; return 0; } /** * ice_cfg_tx_topo - Initialize new tx topology if available * @hw: pointer to the HW struct * @buf: pointer to Tx topology buffer * @len: buffer size * * The function will apply the new Tx topology from the package buffer * if available. */ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) { u8 *current_topo, *new_topo = NULL; struct ice_run_time_cfg_seg *seg; struct ice_buf_hdr *section; struct ice_pkg_hdr *pkg_hdr; enum ice_ddp_state state; u16 i, size = 0, offset; u32 reg = 0; int status; u8 flags; if (!buf || !len) return ICE_ERR_PARAM; /* Does FW support new Tx topology mode ? */ if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n"); return ICE_ERR_NOT_SUPPORTED; } current_topo = (u8 *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); if (!current_topo) return ICE_ERR_NO_MEMORY; /* get the current Tx topology */ status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL, &flags, false); ice_free(hw, current_topo); if (status) { ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); return status; } /* Is default topology already applied ? */ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && hw->num_tx_sched_layers == 9) { ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n"); /* Already default topology is loaded */ return ICE_ERR_ALREADY_EXISTS; } /* Is new topology already applied ? */ if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && hw->num_tx_sched_layers == 5) { ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n"); /* Already new topology is loaded */ return ICE_ERR_ALREADY_EXISTS; } /* Is set topology issued already ? */ if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) { ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done by another PF\n"); /* add a small delay before exiting */ for (i = 0; i < 20; i++) ice_msec_delay(100, true); return ICE_ERR_ALREADY_EXISTS; } /* Change the topology from new to default (5 to 9) */ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && hw->num_tx_sched_layers == 5) { ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n"); goto update_topo; } pkg_hdr = (struct ice_pkg_hdr *)buf; state = ice_verify_pkg(pkg_hdr, len); if (state) { ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", state); return ICE_ERR_CFG; } /* find run time configuration segment */ seg = (struct ice_run_time_cfg_seg *) ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); if (!seg) { ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); return ICE_ERR_CFG; } if (LE32_TO_CPU(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n", seg->buf_table.buf_count); return ICE_ERR_CFG; } section = ice_pkg_val_buf(seg->buf_table.buf_array); if (!section || LE32_TO_CPU(section->section_entry[0].type) != ICE_SID_TX_5_LAYER_TOPO) { ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n"); return ICE_ERR_CFG; } size = LE16_TO_CPU(section->section_entry[0].size); offset = LE16_TO_CPU(section->section_entry[0].offset); if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) { ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n"); return ICE_ERR_CFG; } /* make sure the section fits in the buffer */ if (offset + size > ICE_PKG_BUF_SIZE) { ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n"); return ICE_ERR_CFG; } /* Get the new topology buffer */ new_topo = ((u8 *)section) + offset; update_topo: /* acquire global lock to make sure that set topology issued * by one PF */ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE, ICE_GLOBAL_CFG_LOCK_TIMEOUT); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); return status; } /* check reset was triggered already or not */ reg = rd32(hw, GLGEN_RSTAT); if (reg & GLGEN_RSTAT_DEVSTATE_M) { /* Reset is in progress, re-init the hw again */ ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n"); ice_check_reset(hw); return 0; } /* set new topology */ status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); if (status) { ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n"); return status; } /* new topology is updated, delay 1 second before issuing the CORRER */ for (i = 0; i < 10; i++) ice_msec_delay(100, true); ice_reset(hw, ICE_RESET_CORER); /* CORER will clear the global lock, so no explicit call * required for release */ return 0; } diff --git a/sys/dev/ice/ice_ddp_common.h b/sys/dev/ice/ice_ddp_common.h index b7dae1f526f0..a7b717c3e15e 100644 --- a/sys/dev/ice/ice_ddp_common.h +++ b/sys/dev/ice/ice_ddp_common.h @@ -1,479 +1,482 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2024, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _ICE_DDP_COMMON_H_ #define _ICE_DDP_COMMON_H_ #include "ice_osdep.h" #include "ice_adminq_cmd.h" #include "ice_controlq.h" #include "ice_status.h" #include "ice_flex_type.h" #include "ice_protocol_type.h" /* Package minimal version supported */ #define ICE_PKG_SUPP_VER_MAJ 1 #define ICE_PKG_SUPP_VER_MNR 3 /* Package format version */ #define ICE_PKG_FMT_VER_MAJ 1 #define ICE_PKG_FMT_VER_MNR 0 #define ICE_PKG_FMT_VER_UPD 0 #define ICE_PKG_FMT_VER_DFT 0 #define ICE_PKG_CNT 4 enum ice_ddp_state { /* Indicates that this call to ice_init_pkg * successfully loaded the requested DDP package */ ICE_DDP_PKG_SUCCESS = 0, /* Generic error for already loaded errors, it is mapped later to * the more specific one (one of the next 3) */ ICE_DDP_PKG_ALREADY_LOADED = -1, /* Indicates that a DDP package of the same version has already been * loaded onto the device by a previous call or by another PF */ ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, /* The device has a DDP package that is not supported by the driver */ ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, /* The device has a compatible package * (but different from the request) already loaded */ ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, /* The firmware loaded on the device is not compatible with * the DDP package loaded */ ICE_DDP_PKG_FW_MISMATCH = -5, /* The DDP package file is invalid */ ICE_DDP_PKG_INVALID_FILE = -6, /* The version of the DDP package provided is higher than * the driver supports */ ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, /* The version of the DDP package provided is lower than the * driver supports */ ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, /* Missing security manifest in DDP pkg */ ICE_DDP_PKG_NO_SEC_MANIFEST = -9, /* The RSA signature of the DDP package file provided is invalid */ ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10, /* The DDP package file security revision is too low and not * supported by firmware */ ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11, /* Manifest hash mismatch */ ICE_DDP_PKG_MANIFEST_INVALID = -12, /* Buffer hash mismatches manifest */ ICE_DDP_PKG_BUFFER_INVALID = -13, /* Other errors */ ICE_DDP_PKG_ERR = -14, }; /* Package and segment headers and tables */ struct ice_pkg_hdr { struct ice_pkg_ver pkg_format_ver; __le32 seg_count; __le32 seg_offset[STRUCT_HACK_VAR_LEN]; }; /* Package signing algorithm types */ #define SEGMENT_SIGN_TYPE_INVALID 0x00000000 #define SEGMENT_SIGN_TYPE_RSA2K 0x00000001 #define SEGMENT_SIGN_TYPE_RSA3K 0x00000002 #define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */ #define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005 /* generic segment */ struct ice_generic_seg_hdr { #define SEGMENT_TYPE_INVALID 0x00000000 #define SEGMENT_TYPE_METADATA 0x00000001 #define SEGMENT_TYPE_ICE_E810 0x00000010 #define SEGMENT_TYPE_SIGNING 0x00001001 #define SEGMENT_TYPE_ICE_E830 0x00000017 #define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020 __le32 seg_type; struct ice_pkg_ver seg_format_ver; __le32 seg_size; char seg_id[ICE_PKG_NAME_SIZE]; }; /* ice specific segment */ union ice_device_id { struct { __le16 device_id; __le16 vendor_id; } dev_vend_id; __le32 id; }; struct ice_device_id_entry { union ice_device_id device; union ice_device_id sub_device; }; struct ice_seg { struct ice_generic_seg_hdr hdr; __le32 device_table_count; struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN]; }; struct ice_nvm_table { __le32 table_count; __le32 vers[STRUCT_HACK_VAR_LEN]; }; struct ice_buf { #define ICE_PKG_BUF_SIZE 4096 u8 buf[ICE_PKG_BUF_SIZE]; }; struct ice_buf_table { __le32 buf_count; struct ice_buf buf_array[STRUCT_HACK_VAR_LEN]; }; struct ice_run_time_cfg_seg { struct ice_generic_seg_hdr hdr; u8 rsvd[8]; struct ice_buf_table buf_table; }; /* global metadata specific segment */ struct ice_global_metadata_seg { struct ice_generic_seg_hdr hdr; struct ice_pkg_ver pkg_ver; __le32 rsvd; char pkg_name[ICE_PKG_NAME_SIZE]; }; #define ICE_MIN_S_OFF 12 #define ICE_MAX_S_OFF 4095 #define ICE_MIN_S_SZ 1 #define ICE_MAX_S_SZ 4084 struct ice_sign_seg { struct ice_generic_seg_hdr hdr; __le32 seg_id; __le32 sign_type; __le32 signed_seg_idx; __le32 signed_buf_start; __le32 signed_buf_count; -#define ICE_SIGN_SEG_RESERVED_COUNT 44 +#define ICE_SIGN_SEG_FLAGS_VALID 0x80000000 +#define ICE_SIGN_SEG_FLAGS_LAST 0x00000001 + __le32 flags; +#define ICE_SIGN_SEG_RESERVED_COUNT 40 u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT]; struct ice_buf_table buf_tbl; }; /* section information */ struct ice_section_entry { __le32 type; __le16 offset; __le16 size; }; #define ICE_MIN_S_COUNT 1 #define ICE_MAX_S_COUNT 511 #define ICE_MIN_S_DATA_END 12 #define ICE_MAX_S_DATA_END 4096 #define ICE_METADATA_BUF 0x80000000 struct ice_buf_hdr { __le16 section_count; __le16 data_end; struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN]; }; #define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ (ent_sz)) /* ice package section IDs */ #define ICE_SID_METADATA 1 #define ICE_SID_XLT0_SW 10 #define ICE_SID_XLT_KEY_BUILDER_SW 11 #define ICE_SID_XLT1_SW 12 #define ICE_SID_XLT2_SW 13 #define ICE_SID_PROFID_TCAM_SW 14 #define ICE_SID_PROFID_REDIR_SW 15 #define ICE_SID_FLD_VEC_SW 16 #define ICE_SID_CDID_KEY_BUILDER_SW 17 #define ICE_SID_CDID_REDIR_SW 18 #define ICE_SID_XLT0_ACL 20 #define ICE_SID_XLT_KEY_BUILDER_ACL 21 #define ICE_SID_XLT1_ACL 22 #define ICE_SID_XLT2_ACL 23 #define ICE_SID_PROFID_TCAM_ACL 24 #define ICE_SID_PROFID_REDIR_ACL 25 #define ICE_SID_FLD_VEC_ACL 26 #define ICE_SID_CDID_KEY_BUILDER_ACL 27 #define ICE_SID_CDID_REDIR_ACL 28 #define ICE_SID_XLT0_FD 30 #define ICE_SID_XLT_KEY_BUILDER_FD 31 #define ICE_SID_XLT1_FD 32 #define ICE_SID_XLT2_FD 33 #define ICE_SID_PROFID_TCAM_FD 34 #define ICE_SID_PROFID_REDIR_FD 35 #define ICE_SID_FLD_VEC_FD 36 #define ICE_SID_CDID_KEY_BUILDER_FD 37 #define ICE_SID_CDID_REDIR_FD 38 #define ICE_SID_XLT0_RSS 40 #define ICE_SID_XLT_KEY_BUILDER_RSS 41 #define ICE_SID_XLT1_RSS 42 #define ICE_SID_XLT2_RSS 43 #define ICE_SID_PROFID_TCAM_RSS 44 #define ICE_SID_PROFID_REDIR_RSS 45 #define ICE_SID_FLD_VEC_RSS 46 #define ICE_SID_CDID_KEY_BUILDER_RSS 47 #define ICE_SID_CDID_REDIR_RSS 48 #define ICE_SID_RXPARSER_CAM 50 #define ICE_SID_RXPARSER_NOMATCH_CAM 51 #define ICE_SID_RXPARSER_IMEM 52 #define ICE_SID_RXPARSER_XLT0_BUILDER 53 #define ICE_SID_RXPARSER_NODE_PTYPE 54 #define ICE_SID_RXPARSER_MARKER_PTYPE 55 #define ICE_SID_RXPARSER_BOOST_TCAM 56 #define ICE_SID_RXPARSER_PROTO_GRP 57 #define ICE_SID_RXPARSER_METADATA_INIT 58 #define ICE_SID_RXPARSER_XLT0 59 #define ICE_SID_TXPARSER_CAM 60 #define ICE_SID_TXPARSER_NOMATCH_CAM 61 #define ICE_SID_TXPARSER_IMEM 62 #define ICE_SID_TXPARSER_XLT0_BUILDER 63 #define ICE_SID_TXPARSER_NODE_PTYPE 64 #define ICE_SID_TXPARSER_MARKER_PTYPE 65 #define ICE_SID_TXPARSER_BOOST_TCAM 66 #define ICE_SID_TXPARSER_PROTO_GRP 67 #define ICE_SID_TXPARSER_METADATA_INIT 68 #define ICE_SID_TXPARSER_XLT0 69 #define ICE_SID_RXPARSER_INIT_REDIR 70 #define ICE_SID_TXPARSER_INIT_REDIR 71 #define ICE_SID_RXPARSER_MARKER_GRP 72 #define ICE_SID_TXPARSER_MARKER_GRP 73 #define ICE_SID_RXPARSER_LAST_PROTO 74 #define ICE_SID_TXPARSER_LAST_PROTO 75 #define ICE_SID_RXPARSER_PG_SPILL 76 #define ICE_SID_TXPARSER_PG_SPILL 77 #define ICE_SID_RXPARSER_NOMATCH_SPILL 78 #define ICE_SID_TXPARSER_NOMATCH_SPILL 79 #define ICE_SID_XLT0_PE 80 #define ICE_SID_XLT_KEY_BUILDER_PE 81 #define ICE_SID_XLT1_PE 82 #define ICE_SID_XLT2_PE 83 #define ICE_SID_PROFID_TCAM_PE 84 #define ICE_SID_PROFID_REDIR_PE 85 #define ICE_SID_FLD_VEC_PE 86 #define ICE_SID_CDID_KEY_BUILDER_PE 87 #define ICE_SID_CDID_REDIR_PE 88 #define ICE_SID_RXPARSER_FLAG_REDIR 97 /* Label Metadata section IDs */ #define ICE_SID_LBL_FIRST 0x80000010 #define ICE_SID_LBL_RXPARSER_IMEM 0x80000010 #define ICE_SID_LBL_TXPARSER_IMEM 0x80000011 #define ICE_SID_LBL_RESERVED_12 0x80000012 #define ICE_SID_LBL_RESERVED_13 0x80000013 #define ICE_SID_LBL_RXPARSER_MARKER 0x80000014 #define ICE_SID_LBL_TXPARSER_MARKER 0x80000015 #define ICE_SID_LBL_PTYPE 0x80000016 #define ICE_SID_LBL_PROTOCOL_ID 0x80000017 #define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 #define ICE_SID_LBL_TXPARSER_TMEM 0x80000019 #define ICE_SID_LBL_RXPARSER_PG 0x8000001A #define ICE_SID_LBL_TXPARSER_PG 0x8000001B #define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C #define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D #define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E #define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F #define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020 #define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021 #define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022 #define ICE_SID_LBL_FLAG 0x80000023 #define ICE_SID_LBL_REG 0x80000024 #define ICE_SID_LBL_SW_PTG 0x80000025 #define ICE_SID_LBL_ACL_PTG 0x80000026 #define ICE_SID_LBL_PE_PTG 0x80000027 #define ICE_SID_LBL_RSS_PTG 0x80000028 #define ICE_SID_LBL_FD_PTG 0x80000029 #define ICE_SID_LBL_SW_VSIG 0x8000002A #define ICE_SID_LBL_ACL_VSIG 0x8000002B #define ICE_SID_LBL_PE_VSIG 0x8000002C #define ICE_SID_LBL_RSS_VSIG 0x8000002D #define ICE_SID_LBL_FD_VSIG 0x8000002E #define ICE_SID_LBL_PTYPE_META 0x8000002F #define ICE_SID_LBL_SW_PROFID 0x80000030 #define ICE_SID_LBL_ACL_PROFID 0x80000031 #define ICE_SID_LBL_PE_PROFID 0x80000032 #define ICE_SID_LBL_RSS_PROFID 0x80000033 #define ICE_SID_LBL_FD_PROFID 0x80000034 #define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035 #define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036 #define ICE_SID_LBL_RXPARSER_PROTO 0x80000037 #define ICE_SID_LBL_TXPARSER_PROTO 0x80000038 /* The following define MUST be updated to reflect the last label section ID */ #define ICE_SID_LBL_LAST 0x80000038 /* Label ICE runtime configuration section IDs */ #define ICE_SID_TX_5_LAYER_TOPO 0x10 enum ice_block { ICE_BLK_SW = 0, ICE_BLK_ACL, ICE_BLK_FD, ICE_BLK_RSS, ICE_BLK_PE, ICE_BLK_COUNT }; enum ice_sect { ICE_XLT0 = 0, ICE_XLT_KB, ICE_XLT1, ICE_XLT2, ICE_PROF_TCAM, ICE_PROF_REDIR, ICE_VEC_TBL, ICE_CDID_KB, ICE_CDID_REDIR, ICE_SECT_COUNT }; /* package buffer building */ struct ice_buf_build { struct ice_buf buf; u16 reserved_section_table_entries; }; struct ice_pkg_enum { struct ice_buf_table *buf_table; u32 buf_idx; u32 type; struct ice_buf_hdr *buf; u32 sect_idx; void *sect; u32 sect_type; u32 entry_idx; void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); }; struct ice_hw; int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_change_lock(struct ice_hw *hw); struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw); void * ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size); int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count); int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list); int ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count); void ice_release_global_cfg_lock(struct ice_hw *hw); struct ice_generic_seg_hdr * ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, struct ice_pkg_hdr *pkg_hdr); enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len); enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw); void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg); struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg); int ice_acquire_global_cfg_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg); struct ice_buf_hdr * ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state); bool ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state); void * ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type, u32 *offset, void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset)); void * ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type); enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); bool ice_is_init_pkg_successful(enum ice_ddp_state state); void ice_free_seg(struct ice_hw *hw); struct ice_buf_build * ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, void **section); struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld); void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld); int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len); #endif /* _ICE_DDP_H_ */ diff --git a/sys/dev/ice/ice_devids.h b/sys/dev/ice/ice_devids.h index 396f59b9d6d9..9b142a1110b2 100644 --- a/sys/dev/ice/ice_devids.h +++ b/sys/dev/ice/ice_devids.h @@ -1,120 +1,120 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2024, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _ICE_DEVIDS_H_ #define _ICE_DEVIDS_H_ /* Device IDs */ #define ICE_DEV_ID_E822_SI_DFLT 0x1888 /* Intel(R) Ethernet Connection E823-L for backplane */ #define ICE_DEV_ID_E823L_BACKPLANE 0x124C /* Intel(R) Ethernet Connection E823-L for SFP */ #define ICE_DEV_ID_E823L_SFP 0x124D /* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */ #define ICE_DEV_ID_E823L_10G_BASE_T 0x124E /* Intel(R) Ethernet Connection E823-L 1GbE */ #define ICE_DEV_ID_E823L_1GBE 0x124F /* Intel(R) Ethernet Connection E823-L for QSFP */ #define ICE_DEV_ID_E823L_QSFP 0x151D /* Intel(R) Ethernet Controller E830-CC for backplane */ #define ICE_DEV_ID_E830_BACKPLANE 0x12D1 /* Intel(R) Ethernet Controller E830-CC for QSFP */ #define ICE_DEV_ID_E830_QSFP56 0x12D2 /* Intel(R) Ethernet Controller E830-CC for SFP */ #define ICE_DEV_ID_E830_SFP 0x12D3 /* Intel(R) Ethernet Controller E830-C for backplane */ #define ICE_DEV_ID_E830C_BACKPLANE 0x12D5 -/* Intel(R) Ethernet Controller E830-XXV for backplane */ -#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC +/* Intel(R) Ethernet Controller E830-L for backplane */ +#define ICE_DEV_ID_E830_L_BACKPLANE 0x12DC /* Intel(R) Ethernet Controller E830-C for QSFP */ #define ICE_DEV_ID_E830C_QSFP 0x12D8 -/* Intel(R) Ethernet Controller E830-XXV for QSFP */ -#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD +/* Intel(R) Ethernet Controller E830-L for QSFP */ +#define ICE_DEV_ID_E830_L_QSFP 0x12DD /* Intel(R) Ethernet Controller E830-C for SFP */ #define ICE_DEV_ID_E830C_SFP 0x12DA -/* Intel(R) Ethernet Controller E830-XXV for SFP */ -#define ICE_DEV_ID_E830_XXV_SFP 0x12DE +/* Intel(R) Ethernet Controller E830-L for SFP */ +#define ICE_DEV_ID_E830_L_SFP 0x12DE /* Intel(R) Ethernet Controller E810-C for backplane */ #define ICE_DEV_ID_E810C_BACKPLANE 0x1591 /* Intel(R) Ethernet Controller E810-C for QSFP */ #define ICE_DEV_ID_E810C_QSFP 0x1592 /* Intel(R) Ethernet Controller E810-C for SFP */ #define ICE_DEV_ID_E810C_SFP 0x1593 #define ICE_SUBDEV_ID_E810T 0x000E #define ICE_SUBDEV_ID_E810T2 0x000F #define ICE_SUBDEV_ID_E810T3 0x0010 #define ICE_SUBDEV_ID_E810T4 0x0011 #define ICE_SUBDEV_ID_E810T5 0x0012 #define ICE_SUBDEV_ID_E810T6 0x02E9 #define ICE_SUBDEV_ID_E810T7 0x02EA /* Intel(R) Ethernet Controller E810-XXV for backplane */ #define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599 /* Intel(R) Ethernet Controller E810-XXV for QSFP */ #define ICE_DEV_ID_E810_XXV_QSFP 0x159A /* Intel(R) Ethernet Controller E810-XXV for SFP */ #define ICE_DEV_ID_E810_XXV_SFP 0x159B /* Intel(R) Ethernet Connection E823-C for backplane */ #define ICE_DEV_ID_E823C_BACKPLANE 0x188A /* Intel(R) Ethernet Connection E823-C for QSFP */ #define ICE_DEV_ID_E823C_QSFP 0x188B /* Intel(R) Ethernet Connection E823-C for SFP */ #define ICE_DEV_ID_E823C_SFP 0x188C /* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */ #define ICE_DEV_ID_E823C_10G_BASE_T 0x188D /* Intel(R) Ethernet Connection E823-C 1GbE */ #define ICE_DEV_ID_E823C_SGMII 0x188E /* Intel(R) Ethernet Connection E822-C for backplane */ #define ICE_DEV_ID_E822C_BACKPLANE 0x1890 /* Intel(R) Ethernet Connection E822-C for QSFP */ #define ICE_DEV_ID_E822C_QSFP 0x1891 /* Intel(R) Ethernet Connection E822-C for SFP */ #define ICE_DEV_ID_E822C_SFP 0x1892 /* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */ #define ICE_DEV_ID_E822C_10G_BASE_T 0x1893 /* Intel(R) Ethernet Connection E822-C 1GbE */ #define ICE_DEV_ID_E822C_SGMII 0x1894 /* Intel(R) Ethernet Connection E822-L for backplane */ #define ICE_DEV_ID_E822L_BACKPLANE 0x1897 /* Intel(R) Ethernet Connection E822-L for SFP */ #define ICE_DEV_ID_E822L_SFP 0x1898 /* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */ #define ICE_DEV_ID_E822L_10G_BASE_T 0x1899 /* Intel(R) Ethernet Connection E822-L 1GbE */ #define ICE_DEV_ID_E822L_SGMII 0x189A /* Intel(R) Ethernet Connection E825-C for backplane */ #define ICE_DEV_ID_E825C_BACKPLANE 0x579C /* Intel(R) Ethernet Connection E825-C for QSFP */ #define ICE_DEV_ID_E825C_QSFP 0x579D /* Intel(R) Ethernet Connection E825-C for SFP */ #define ICE_DEV_ID_E825C_SFP 0x579E /* Intel(R) Ethernet Connection E825-C 1GbE */ #define ICE_DEV_ID_E825C_SGMII 0x579F #endif /* _ICE_DEVIDS_H_ */ diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h index 6f4d5f05edd0..22e23ee53491 100644 --- a/sys/dev/ice/ice_drv_info.h +++ b/sys/dev/ice/ice_drv_info.h @@ -1,234 +1,234 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2024, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * @file ice_drv_info.h * @brief device IDs and driver version * * Contains the device IDs tables and the driver version string. * * This file contains static or constant definitions intended to be included * exactly once in the main driver interface file. It implicitly depends on * the main driver header file. * * These definitions could be placed directly in the interface file, but are * kept separate for organizational purposes. */ /** * @var ice_driver_version * @brief driver version string * * Driver version information, used for display as part of an informational * sysctl, and as part of the driver information sent to the firmware at load. * * @var ice_major_version * @brief driver major version number * * @var ice_minor_version * @brief driver minor version number * * @var ice_patch_version * @brief driver patch version number * * @var ice_rc_version * @brief driver release candidate version number */ -const char ice_driver_version[] = "1.42.1-k"; +const char ice_driver_version[] = "1.42.5-k"; const uint8_t ice_major_version = 1; const uint8_t ice_minor_version = 42; -const uint8_t ice_patch_version = 1; +const uint8_t ice_patch_version = 5; const uint8_t ice_rc_version = 0; #define PVIDV(vendor, devid, name) \ - PVID(vendor, devid, name " - 1.42.1-k") + PVID(vendor, devid, name " - 1.42.5-k") #define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \ - PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.42.1-k") + PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.42.5-k") /** * @var ice_vendor_info_array * @brief array of PCI devices supported by this driver * * Array of PCI devices which are supported by this driver. Used to determine * whether a given device should be loaded by this driver. This information is * also exported as part of the module information for other tools to analyze. * * @remark Each type of device ID needs to be listed from most-specific entry * to most-generic entry; e.g. PVIDV_OEM()s for a device ID must come before * the PVIDV() for it. */ static const pci_vendor_info_t ice_vendor_info_array[] = { PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE, "Intel(R) Ethernet Controller E810-C for backplane"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0001, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0002, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0003, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0004, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0005, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0006, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0007, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0008, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x000D, 0, "Intel(R) Ethernet Network Adapter E810-L-Q2 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x000E, 0, "Intel(R) Ethernet Network Adapter E810-2C-Q2"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, "Intel(R) Ethernet Controller E810-C for QSFP"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0005, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0006, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0007, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x000C, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4 for OCP 3.0"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, "Intel(R) Ethernet Controller E810-C for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE, "Intel(R) Ethernet Connection E822-C for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP, "Intel(R) Ethernet Connection E822-C for QSFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP, "Intel(R) Ethernet Connection E822-C for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T, "Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII, "Intel(R) Ethernet Connection E822-C 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE, "Intel(R) Ethernet Connection E822-L for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP, "Intel(R) Ethernet Connection E822-L for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T, "Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII, "Intel(R) Ethernet Connection E822-L 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE, "Intel(R) Ethernet Connection E823-L for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP, "Intel(R) Ethernet Connection E823-L for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP, "Intel(R) Ethernet Connection E823-L for QSFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T, "Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE, "Intel(R) Ethernet Connection E823-L 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE, "Intel(R) Ethernet Connection E823-C for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP, "Intel(R) Ethernet Connection E823-C for QSFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP, "Intel(R) Ethernet Connection E823-C for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T, "Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII, "Intel(R) Ethernet Connection E823-C 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE, "Intel(R) Ethernet Controller E810-XXV for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP, "Intel(R) Ethernet Controller E810-XXV for QSFP"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0003, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0004, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0005, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0006, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, "Intel(R) Ethernet Controller E810-XXV for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_BACKPLANE, "Intel(R) Ethernet Connection E830-CC for backplane"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_QSFP56, ICE_INTEL_VENDOR_ID, 0x0002, 0, "Intel(R) Ethernet Network Adapter E830-C-Q2 for OCP 3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_QSFP56, ICE_INTEL_VENDOR_ID, 0x0004, 0, "Intel(R) Ethernet Network Adapter E830-CC-Q1 for OCP 3.0"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_QSFP56, "Intel(R) Ethernet Connection E830-CC for QSFP56"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_SFP, ICE_INTEL_VENDOR_ID, 0x0001, 0, "Intel(R) Ethernet Network Adapter E830-XXV-2 for OCP 3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_SFP, ICE_INTEL_VENDOR_ID, 0x0003, 0, "Intel(R) Ethernet Network Adapter E830-XXV-2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_SFP, ICE_INTEL_VENDOR_ID, 0x0004, 0, "Intel(R) Ethernet Network Adapter E830-XXV-4 for OCP 3.0"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_SFP, "Intel(R) Ethernet Connection E830-CC for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830C_BACKPLANE, "Intel(R) Ethernet Connection E830-C for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830C_QSFP, "Intel(R) Ethernet Connection E830-C for QSFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830C_SFP, "Intel(R) Ethernet Connection E830-C for SFP"), - PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_XXV_BACKPLANE, - "Intel(R) Ethernet Connection E830-XXV for backplane"), - PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_XXV_QSFP, - "Intel(R) Ethernet Connection E830-XXV for QSFP"), - PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_XXV_SFP, - "Intel(R) Ethernet Connection E830-XXV for SFP"), + PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_L_BACKPLANE, + "Intel(R) Ethernet Connection E830-L for backplane"), + PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_L_QSFP, + "Intel(R) Ethernet Connection E830-L for QSFP"), + PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_L_SFP, + "Intel(R) Ethernet Connection E830-L for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_BACKPLANE, "Intel(R) Ethernet Connection E825-C for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_QSFP, "Intel(R) Ethernet Connection E825-C for QSFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_SFP, "Intel(R) Ethernet Connection E825-C for SFP"), PVID_END }; diff --git a/sys/dev/ice/ice_fw_logging.c b/sys/dev/ice/ice_fw_logging.c index 8e52e34b2752..0025a65d73fc 100644 --- a/sys/dev/ice/ice_fw_logging.c +++ b/sys/dev/ice/ice_fw_logging.c @@ -1,424 +1,427 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2024, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * @file ice_fw_logging.c * @brief firmware logging sysctls * * Contains sysctls to enable and configure firmware logging debug support. */ #include "ice_lib.h" #include "ice_iflib.h" #include #include /* * SDT provider for DTrace probes related to firmware logging events */ SDT_PROVIDER_DEFINE(ice_fwlog); /* * SDT DTrace probe fired when a firmware log message is received over the * AdminQ. It passes the buffer of the firwmare log message along with its * length in bytes to the DTrace framework. */ SDT_PROBE_DEFINE2(ice_fwlog, , , message, "uint8_t *", "int"); /* * Helper function prototypes */ static int ice_reconfig_fw_log(struct ice_softc *sc, struct ice_fwlog_cfg *cfg); /* * dynamic sysctl handlers */ static int ice_sysctl_fwlog_set_cfg_options(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fwlog_log_resolution(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fwlog_register(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fwlog_module_log_severity(SYSCTL_HANDLER_ARGS); /** * ice_reconfig_fw_log - Re-program firmware logging configuration * @sc: private softc structure * @cfg: firmware log configuration to latch * * If the adminq is currently active, ask firmware to update the logging * configuration. If the adminq is currently down, then do nothing. In this * case, ice_init_hw() will re-configure firmware logging as soon as it brings * up the adminq. */ static int ice_reconfig_fw_log(struct ice_softc *sc, struct ice_fwlog_cfg *cfg) { int status; ice_fwlog_init(&sc->hw, cfg); if (!ice_check_sq_alive(&sc->hw, &sc->hw.adminq)) return (0); if (!ice_fwlog_supported(&sc->hw)) return (0); status = ice_fwlog_set(&sc->hw, cfg); if (status) { device_printf(sc->dev, "Failed to reconfigure firmware logging, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); return (ENODEV); } return (0); } #define ICE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION \ "\nControl firmware message limit to send per ARQ event" \ "\t\nMin: 1" \ "\t\nMax: 128" #define ICE_SYSCTL_HELP_FWLOG_ARQ_ENA \ "\nControl whether to enable/disable reporting to admin Rx queue" \ "\n0 - Enable firmware reporting via ARQ" \ "\n1 - Disable firmware reporting via ARQ" #define ICE_SYSCTL_HELP_FWLOG_UART_ENA \ "\nControl whether to enable/disable reporting to UART" \ "\n0 - Enable firmware reporting via UART" \ "\n1 - Disable firmware reporting via UART" #define ICE_SYSCTL_HELP_FWLOG_ENABLE_ON_LOAD \ "\nControl whether to enable logging during the attach phase" \ "\n0 - Enable firmware logging during attach phase" \ "\n1 - Disable firmware logging during attach phase" #define ICE_SYSCTL_HELP_FWLOG_REGISTER \ "\nControl whether to enable/disable firmware logging" \ "\n0 - Enable firmware logging" \ "\n1 - Disable firmware logging" #define ICE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY \ "\nControl the level of log output messages for this module" \ "\n\tverbose <4> - Verbose messages + (Error|Warning|Normal)" \ "\n\tnormal <3> - Normal messages + (Error|Warning)" \ "\n\twarning <2> - Warning messages + (Error)" \ "\n\terror <1> - Error messages" \ "\n\tnone <0> - Disables all logging for this module" /** * ice_sysctl_fwlog_set_cfg_options - Sysctl for setting fwlog cfg options * @oidp: sysctl oid structure * @arg1: private softc structure * @arg2: option to adjust * @req: sysctl request pointer * * On read: displays whether firmware logging was reported during attachment * On write: enables/disables firmware logging during attach phase * * This has no effect on the legacy (V1) version of firmware logging. */ static int ice_sysctl_fwlog_set_cfg_options(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg; int error; u16 option = (u16)arg2; bool enabled; enabled = !!(cfg->options & option); error = sysctl_handle_bool(oidp, &enabled, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (enabled) cfg->options |= option; else cfg->options &= ~option; return ice_reconfig_fw_log(sc, cfg); } /** * ice_sysctl_fwlog_log_resolution - Sysctl for setting log message resolution * @oidp: sysctl oid structure * @arg1: private softc structure * @arg2: __unused__ * @req: sysctl request pointer * * On read: displays message queue limit before posting * On write: sets message queue limit before posting * * This has no effect on the legacy (V1) version of firmware logging. */ static int ice_sysctl_fwlog_log_resolution(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg; int error; u8 resolution; UNREFERENCED_PARAMETER(arg2); resolution = cfg->log_resolution; error = sysctl_handle_8(oidp, &resolution, 0, req); if ((error) || (req->newptr == NULL)) return (error); if ((resolution < ICE_AQC_FW_LOG_MIN_RESOLUTION) || (resolution > ICE_AQC_FW_LOG_MAX_RESOLUTION)) { device_printf(sc->dev, "Log resolution out-of-bounds\n"); return (EINVAL); } cfg->log_resolution = resolution; return ice_reconfig_fw_log(sc, cfg); } /** * ice_sysctl_fwlog_register - Sysctl for (de)registering firmware logs * @oidp: sysctl oid structure * @arg1: private softc structure * @arg2: __unused__ * @req: sysctl request pointer * * On read: displays whether firmware logging is registered * On write: (de)registers firmware logging. */ static int ice_sysctl_fwlog_register(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg; int status; int error; u8 enabled; UNREFERENCED_PARAMETER(arg2); if (ice_test_state(&sc->state, ICE_STATE_ATTACHING)) { device_printf(sc->dev, "Registering FW Logging via kenv is supported with the on_load option\n"); return (EIO); } if (cfg->options & ICE_FWLOG_OPTION_IS_REGISTERED) enabled = true; else enabled = false; error = sysctl_handle_bool(oidp, &enabled, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (!ice_check_sq_alive(&sc->hw, &sc->hw.adminq)) return (0); if (enabled) { status = ice_fwlog_register(&sc->hw); if (!status) ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en); } else { status = ice_fwlog_unregister(&sc->hw); if (!status) ice_clear_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en); } if (status) return (EIO); return (0); } /** * ice_sysctl_fwlog_module_log_severity - Add tunables for a FW logging module * @oidp: sysctl oid structure * @arg1: private softc structure * @arg2: index to logging module * @req: sysctl request pointer */ static int ice_sysctl_fwlog_module_log_severity(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg; struct sbuf *sbuf; char *sev_str_end; enum ice_aqc_fw_logging_mod module = (enum ice_aqc_fw_logging_mod)arg2; int error, ll_num; u8 log_level; char sev_str[16]; bool sev_set = false; log_level = cfg->module_entries[module].log_level; sbuf = sbuf_new(NULL, sev_str, sizeof(sev_str), SBUF_FIXEDLEN); sbuf_printf(sbuf, "%d<%s>", log_level, ice_log_sev_str(log_level)); sbuf_finish(sbuf); sbuf_delete(sbuf); error = sysctl_handle_string(oidp, sev_str, sizeof(sev_str), req); if ((error) || (req->newptr == NULL)) return (error); if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_VERBOSE), sev_str) == 0) { log_level = ICE_FWLOG_LEVEL_VERBOSE; sev_set = true; } else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_NORMAL), sev_str) == 0) { log_level = ICE_FWLOG_LEVEL_NORMAL; sev_set = true; } else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_WARNING), sev_str) == 0) { log_level = ICE_FWLOG_LEVEL_WARNING; sev_set = true; } else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_ERROR), sev_str) == 0) { log_level = ICE_FWLOG_LEVEL_ERROR; sev_set = true; } else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_NONE), sev_str) == 0) { log_level = ICE_FWLOG_LEVEL_NONE; sev_set = true; } if (!sev_set) { ll_num = strtol(sev_str, &sev_str_end, 0); if (sev_str_end == sev_str) ll_num = -1; if ((ll_num >= ICE_FWLOG_LEVEL_NONE) && (ll_num < ICE_FWLOG_LEVEL_INVALID)) log_level = ll_num; else { device_printf(sc->dev, "%s: \"%s\" is not a valid log level\n", __func__, sev_str); return (EINVAL); } } cfg->module_entries[module].log_level = log_level; return ice_reconfig_fw_log(sc, cfg); } /** * ice_add_fw_logging_tunables - Add tunables to configure FW logging events * @sc: private softc structure * @parent: parent node to add the tunables under * * Add tunables for configuring the firmware logging support. This includes * a control to enable the logging, and controls for each module to configure * which events to receive. */ void ice_add_fw_logging_tunables(struct ice_softc *sc, struct sysctl_oid *parent) { struct sysctl_oid_list *parent_list, *fwlog_list, *module_list; struct sysctl_oid *fwlog_node, *module_node; struct sysctl_ctx_list *ctx; struct ice_hw *hw = &sc->hw; struct ice_fwlog_cfg *cfg; device_t dev = sc->dev; enum ice_aqc_fw_logging_mod module; u16 i; cfg = &hw->fwlog_cfg; ctx = device_get_sysctl_ctx(dev); parent_list = SYSCTL_CHILDREN(parent); fwlog_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "fw_log", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL, "Firmware Logging"); fwlog_list = SYSCTL_CHILDREN(fwlog_node); - cfg->log_resolution = 10; - SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "log_resolution", - ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, - 0, ice_sysctl_fwlog_log_resolution, - "CU", ICE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION); - - cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA; - SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "arq_en", - ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, - ICE_FWLOG_OPTION_ARQ_ENA, ice_sysctl_fwlog_set_cfg_options, - "CU", ICE_SYSCTL_HELP_FWLOG_ARQ_ENA); - - SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "uart_en", - ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, - ICE_FWLOG_OPTION_UART_ENA, ice_sysctl_fwlog_set_cfg_options, - "CU", ICE_SYSCTL_HELP_FWLOG_UART_ENA); - SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "on_load", ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, ICE_FWLOG_OPTION_REGISTER_ON_INIT, ice_sysctl_fwlog_set_cfg_options, "CU", ICE_SYSCTL_HELP_FWLOG_ENABLE_ON_LOAD); SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "register", ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, 0, ice_sysctl_fwlog_register, "CU", ICE_SYSCTL_HELP_FWLOG_REGISTER); - module_node = SYSCTL_ADD_NODE(ctx, fwlog_list, OID_AUTO, "severity", - ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL, - "Level of log output"); - - module_list = SYSCTL_CHILDREN(module_node); - - for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { - /* Setup some defaults */ - cfg->module_entries[i].module_id = i; - cfg->module_entries[i].log_level = ICE_FWLOG_LEVEL_NONE; - module = (enum ice_aqc_fw_logging_mod)i; + hw->pf_id = ice_get_pf_id(hw); + if (hw->pf_id == 0) { + module_node = SYSCTL_ADD_NODE(ctx, fwlog_list, OID_AUTO, "severity", + ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL, + "Level of log output"); + + module_list = SYSCTL_CHILDREN(module_node); + + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { + /* Setup some defaults */ + cfg->module_entries[i].module_id = i; + cfg->module_entries[i].log_level = ICE_FWLOG_LEVEL_NONE; + module = (enum ice_aqc_fw_logging_mod)i; + + SYSCTL_ADD_PROC(ctx, module_list, + OID_AUTO, ice_fw_module_str(module), + ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RWTUN, sc, + module, ice_sysctl_fwlog_module_log_severity, + "A", ICE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY); + } - SYSCTL_ADD_PROC(ctx, module_list, - OID_AUTO, ice_fw_module_str(module), - ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RWTUN, sc, - module, ice_sysctl_fwlog_module_log_severity, - "A", ICE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY); + cfg->log_resolution = 10; + SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "log_resolution", + ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, + 0, ice_sysctl_fwlog_log_resolution, + "CU", ICE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION); + + cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA; + SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "arq_en", + ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, + ICE_FWLOG_OPTION_ARQ_ENA, ice_sysctl_fwlog_set_cfg_options, + "CU", ICE_SYSCTL_HELP_FWLOG_ARQ_ENA); + + SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "uart_en", + ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, + ICE_FWLOG_OPTION_UART_ENA, ice_sysctl_fwlog_set_cfg_options, + "CU", ICE_SYSCTL_HELP_FWLOG_UART_ENA); } } /** * ice_handle_fw_log_event - Handle a firmware logging event from the AdminQ * @sc: pointer to private softc structure * @desc: the AdminQ descriptor for this firmware event * @buf: pointer to the buffer accompanying the AQ message */ void ice_handle_fw_log_event(struct ice_softc *sc, struct ice_aq_desc *desc, void *buf) { /* Trigger a DTrace probe event for this firmware message */ SDT_PROBE2(ice_fwlog, , , message, (const u8 *)buf, desc->datalen); /* Possibly dump the firmware message to the console, if enabled */ ice_fwlog_event_dump(&sc->hw, desc, buf); } diff --git a/sys/dev/ice/ice_hw_autogen.h b/sys/dev/ice/ice_hw_autogen.h index 47256263d66c..3f2778d91a4b 100644 --- a/sys/dev/ice/ice_hw_autogen.h +++ b/sys/dev/ice/ice_hw_autogen.h @@ -1,11285 +1,11299 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2024, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* Machine generated file. Do not edit. */ #ifndef _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_ #define PRTMAC_CTL_TX_PAUSE_ENABLE_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_TX_PAUSE_ENABLE : E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE) #define PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_S : E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_S) #define PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_M : E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_M) #define PRTMAC_CTL_RX_PAUSE_ENABLE_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_RX_PAUSE_ENABLE : E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE) #define PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_S : E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_S) #define PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_M : E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_M) +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE(_i) (0x000FD000 + ((_i) * 64)) /* _i=0...7 */ /* Reset Source: CORER */ +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_MAX_INDEX 7 +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_START_S 0 +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_START_M MAKEMASK(0x3F, 0) +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_END_S 6 +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_END_M MAKEMASK(0x3F, 6) +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_VM_VF_TYPE_S 12 +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_VM_VF_TYPE_M MAKEMASK(0x3, 12) +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_VM_VF_NUM_S 14 +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_VM_VF_NUM_M MAKEMASK(0x3FF, 14) +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_PF_NUM_S 24 +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_PF_NUM_M MAKEMASK(0x7, 24) +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_ENABLE_S 31 +#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_ENABLE_M BIT(31) #define GL_HIDA(_i) (0x00082000 + ((_i) * 4)) #define GL_HIBA(_i) (0x00081000 + ((_i) * 4)) #define GL_HICR 0x00082040 #define GL_HICR_EN 0x00082044 #define GLGEN_CSR_DEBUG_C 0x00075750 #define GLNVM_GENS 0x000B6100 #define GLNVM_FLA 0x000B6108 #define GL_HIDA_MAX_INDEX 15 #define GL_HIBA_MAX_INDEX 1023 #define GL_MNG_FWSM_FW_LOADING_M BIT(30) #define GL_RDPU_CNTRL 0x00052054 /* Reset Source: CORER */ #define GL_RDPU_CNTRL_RX_PAD_EN_S 0 #define GL_RDPU_CNTRL_RX_PAD_EN_M BIT(0) #define GL_RDPU_CNTRL_UDP_ZERO_EN_S 1 #define GL_RDPU_CNTRL_UDP_ZERO_EN_M BIT(1) #define GL_RDPU_CNTRL_BLNC_EN_S 2 #define GL_RDPU_CNTRL_BLNC_EN_M BIT(2) #define GL_RDPU_CNTRL_RECIPE_BYPASS_S 3 #define GL_RDPU_CNTRL_RECIPE_BYPASS_M BIT(3) #define GL_RDPU_CNTRL_RLAN_ACK_REQ_PM_TH_S 4 #define GL_RDPU_CNTRL_RLAN_ACK_REQ_PM_TH_M MAKEMASK(0x3F, 4) #define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_S 10 #define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_M MAKEMASK(0x3F, 10) #define GL_RDPU_CNTRL_REQ_WB_PM_TH_S 16 #define GL_RDPU_CNTRL_REQ_WB_PM_TH_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_RDPU_CNTRL_REQ_WB_PM_TH_M : E800_GL_RDPU_CNTRL_REQ_WB_PM_TH_M) #define E800_GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x1F, 16) #define E830_GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x3F, 16) #define GL_RDPU_CNTRL_ECO_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_RDPU_CNTRL_ECO_S : E800_GL_RDPU_CNTRL_ECO_S) #define E800_GL_RDPU_CNTRL_ECO_S 21 #define E830_GL_RDPU_CNTRL_ECO_S 23 #define GL_RDPU_CNTRL_ECO_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_RDPU_CNTRL_ECO_M : E800_GL_RDPU_CNTRL_ECO_M) #define E800_GL_RDPU_CNTRL_ECO_M MAKEMASK(0x7FF, 21) #define E830_GL_RDPU_CNTRL_ECO_M MAKEMASK(0x1FF, 23) #define MSIX_PBA(_i) (0x00008000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: FLR */ #define MSIX_PBA_MAX_INDEX 2 #define MSIX_PBA_PENBIT_S 0 #define MSIX_PBA_PENBIT_M MAKEMASK(0xFFFFFFFF, 0) #define MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ #define MSIX_TADD_MAX_INDEX 64 #define MSIX_TADD_MSIXTADD10_S 0 #define MSIX_TADD_MSIXTADD10_M MAKEMASK(0x3, 0) #define MSIX_TADD_MSIXTADD_S 2 #define MSIX_TADD_MSIXTADD_M MAKEMASK(0x3FFFFFFF, 2) #define MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ #define MSIX_TUADD_MAX_INDEX 64 #define MSIX_TUADD_MSIXTUADD_S 0 #define MSIX_TUADD_MSIXTUADD_M MAKEMASK(0xFFFFFFFF, 0) #define MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ #define MSIX_TVCTRL_MAX_INDEX 64 #define MSIX_TVCTRL_MASK_S 0 #define MSIX_TVCTRL_MASK_M BIT(0) #define PF0_FW_HLP_ARQBAH_PAGE 0x02D00180 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_FW_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_HLP_ARQBAL_PAGE 0x02D00080 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_HLP_ARQH_PAGE 0x02D00380 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQH_PAGE_ARQH_S 0 #define PF0_FW_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ARQLEN_PAGE 0x02D00280 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_FW_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_FW_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_FW_HLP_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_FW_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_FW_HLP_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_FW_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_FW_HLP_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_FW_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_FW_HLP_ARQT_PAGE 0x02D00480 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQT_PAGE_ARQT_S 0 #define PF0_FW_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQBAH_PAGE 0x02D00100 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_FW_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_HLP_ATQBAL_PAGE 0x02D00000 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_LSB_S 0 #define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_HLP_ATQH_PAGE 0x02D00300 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQH_PAGE_ATQH_S 0 #define PF0_FW_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQLEN_PAGE 0x02D00200 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_FW_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_FW_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_FW_HLP_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_FW_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_FW_HLP_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_FW_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_FW_HLP_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_FW_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_FW_HLP_ATQT_PAGE 0x02D00400 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQT_PAGE_ATQT_S 0 #define PF0_FW_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQBAH_PAGE 0x02D40180 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_FW_PSM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_PSM_ARQBAL_PAGE 0x02D40080 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_PSM_ARQH_PAGE 0x02D40380 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQH_PAGE_ARQH_S 0 #define PF0_FW_PSM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQLEN_PAGE 0x02D40280 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_FW_PSM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_FW_PSM_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_FW_PSM_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_FW_PSM_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_FW_PSM_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_FW_PSM_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_FW_PSM_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_FW_PSM_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_FW_PSM_ARQT_PAGE 0x02D40480 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQT_PAGE_ARQT_S 0 #define PF0_FW_PSM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQBAH_PAGE 0x02D40100 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_FW_PSM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_PSM_ATQBAL_PAGE 0x02D40000 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_LSB_S 0 #define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_PSM_ATQH_PAGE 0x02D40300 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQH_PAGE_ATQH_S 0 #define PF0_FW_PSM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQLEN_PAGE 0x02D40200 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_FW_PSM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_FW_PSM_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_FW_PSM_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_FW_PSM_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_FW_PSM_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_FW_PSM_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_FW_PSM_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_FW_PSM_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_FW_PSM_ATQT_PAGE 0x02D40400 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQT_PAGE_ATQT_S 0 #define PF0_FW_PSM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQBAH_PAGE 0x02D80190 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_MBX_CPM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_CPM_ARQBAL_PAGE 0x02D80090 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_CPM_ARQH_PAGE 0x02D80390 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQH_PAGE_ARQH_S 0 #define PF0_MBX_CPM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQLEN_PAGE 0x02D80290 /* Reset Source: PFR */ #define PF0_MBX_CPM_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_MBX_CPM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_MBX_CPM_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_MBX_CPM_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_MBX_CPM_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_MBX_CPM_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_MBX_CPM_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_MBX_CPM_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_MBX_CPM_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_MBX_CPM_ARQT_PAGE 0x02D80490 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQT_PAGE_ARQT_S 0 #define PF0_MBX_CPM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQBAH_PAGE 0x02D80110 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_MBX_CPM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_CPM_ATQBAL_PAGE 0x02D80010 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_MBX_CPM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_CPM_ATQH_PAGE 0x02D80310 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQH_PAGE_ATQH_S 0 #define PF0_MBX_CPM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQLEN_PAGE 0x02D80210 /* Reset Source: PFR */ #define PF0_MBX_CPM_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_MBX_CPM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_MBX_CPM_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_MBX_CPM_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_MBX_CPM_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_MBX_CPM_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_MBX_CPM_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_MBX_CPM_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_MBX_CPM_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_MBX_CPM_ATQT_PAGE 0x02D80410 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQT_PAGE_ATQT_S 0 #define PF0_MBX_CPM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQBAH_PAGE 0x02D00190 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_MBX_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_HLP_ARQBAL_PAGE 0x02D00090 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_HLP_ARQH_PAGE 0x02D00390 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQH_PAGE_ARQH_S 0 #define PF0_MBX_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQLEN_PAGE 0x02D00290 /* Reset Source: PFR */ #define PF0_MBX_HLP_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_MBX_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_MBX_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_MBX_HLP_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_MBX_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_MBX_HLP_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_MBX_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_MBX_HLP_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_MBX_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_MBX_HLP_ARQT_PAGE 0x02D00490 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQT_PAGE_ARQT_S 0 #define PF0_MBX_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQBAH_PAGE 0x02D00110 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_MBX_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_HLP_ATQBAL_PAGE 0x02D00010 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_MBX_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_HLP_ATQH_PAGE 0x02D00310 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQH_PAGE_ATQH_S 0 #define PF0_MBX_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQLEN_PAGE 0x02D00210 /* Reset Source: PFR */ #define PF0_MBX_HLP_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_MBX_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_MBX_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_MBX_HLP_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_MBX_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_MBX_HLP_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_MBX_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_MBX_HLP_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_MBX_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_MBX_HLP_ATQT_PAGE 0x02D00410 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQT_PAGE_ATQT_S 0 #define PF0_MBX_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQBAH_PAGE 0x02D40190 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_MBX_PSM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_PSM_ARQBAL_PAGE 0x02D40090 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_PSM_ARQH_PAGE 0x02D40390 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQH_PAGE_ARQH_S 0 #define PF0_MBX_PSM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQLEN_PAGE 0x02D40290 /* Reset Source: PFR */ #define PF0_MBX_PSM_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_MBX_PSM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_MBX_PSM_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_MBX_PSM_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_MBX_PSM_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_MBX_PSM_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_MBX_PSM_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_MBX_PSM_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_MBX_PSM_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_MBX_PSM_ARQT_PAGE 0x02D40490 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQT_PAGE_ARQT_S 0 #define PF0_MBX_PSM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQBAH_PAGE 0x02D40110 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_MBX_PSM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_PSM_ATQBAL_PAGE 0x02D40010 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_MBX_PSM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_PSM_ATQH_PAGE 0x02D40310 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQH_PAGE_ATQH_S 0 #define PF0_MBX_PSM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQLEN_PAGE 0x02D40210 /* Reset Source: PFR */ #define PF0_MBX_PSM_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_MBX_PSM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_MBX_PSM_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_MBX_PSM_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_MBX_PSM_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_MBX_PSM_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_MBX_PSM_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_MBX_PSM_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_MBX_PSM_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_MBX_PSM_ATQT_PAGE 0x02D40410 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQT_PAGE_ATQT_S 0 #define PF0_MBX_PSM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQBAH_PAGE 0x02D801A0 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_SB_CPM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_CPM_ARQBAL_PAGE 0x02D800A0 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_CPM_ARQH_PAGE 0x02D803A0 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQH_PAGE_ARQH_S 0 #define PF0_SB_CPM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQLEN_PAGE 0x02D802A0 /* Reset Source: PFR */ #define PF0_SB_CPM_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_SB_CPM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_SB_CPM_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_SB_CPM_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_SB_CPM_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_SB_CPM_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_SB_CPM_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_SB_CPM_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_SB_CPM_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_SB_CPM_ARQT_PAGE 0x02D804A0 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQT_PAGE_ARQT_S 0 #define PF0_SB_CPM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQBAH_PAGE 0x02D80120 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_SB_CPM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_CPM_ATQBAL_PAGE 0x02D80020 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_SB_CPM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_CPM_ATQH_PAGE 0x02D80320 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQH_PAGE_ATQH_S 0 #define PF0_SB_CPM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQLEN_PAGE 0x02D80220 /* Reset Source: PFR */ #define PF0_SB_CPM_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_SB_CPM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_SB_CPM_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_SB_CPM_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_SB_CPM_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_SB_CPM_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_SB_CPM_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_SB_CPM_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_SB_CPM_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_SB_CPM_ATQT_PAGE 0x02D80420 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQT_PAGE_ATQT_S 0 #define PF0_SB_CPM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ARQBAH_PAGE 0x02D001A0 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_SB_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_HLP_ARQBAL_PAGE 0x02D000A0 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_HLP_ARQH_PAGE 0x02D003A0 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQH_PAGE_ARQH_S 0 #define PF0_SB_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ARQLEN_PAGE 0x02D002A0 /* Reset Source: PFR */ #define PF0_SB_HLP_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_SB_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_SB_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_SB_HLP_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_SB_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_SB_HLP_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_SB_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_SB_HLP_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_SB_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_SB_HLP_ARQT_PAGE 0x02D004A0 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQT_PAGE_ARQT_S 0 #define PF0_SB_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ATQBAH_PAGE 0x02D00120 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_SB_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_HLP_ATQBAL_PAGE 0x02D00020 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_SB_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_HLP_ATQH_PAGE 0x02D00320 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQH_PAGE_ATQH_S 0 #define PF0_SB_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ATQLEN_PAGE 0x02D00220 /* Reset Source: PFR */ #define PF0_SB_HLP_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_SB_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_SB_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_SB_HLP_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_SB_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_SB_HLP_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_SB_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_SB_HLP_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_SB_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_SB_HLP_ATQT_PAGE 0x02D00420 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQT_PAGE_ATQT_S 0 #define PF0_SB_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0INT_DYN_CTL(_i) (0x03000000 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ #define PF0INT_DYN_CTL_MAX_INDEX 2047 #define PF0INT_DYN_CTL_INTENA_S 0 #define PF0INT_DYN_CTL_INTENA_M BIT(0) #define PF0INT_DYN_CTL_CLEARPBA_S 1 #define PF0INT_DYN_CTL_CLEARPBA_M BIT(1) #define PF0INT_DYN_CTL_SWINT_TRIG_S 2 #define PF0INT_DYN_CTL_SWINT_TRIG_M BIT(2) #define PF0INT_DYN_CTL_ITR_INDX_S 3 #define PF0INT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, 3) #define PF0INT_DYN_CTL_INTERVAL_S 5 #define PF0INT_DYN_CTL_INTERVAL_M MAKEMASK(0xFFF, 5) #define PF0INT_DYN_CTL_SW_ITR_INDX_ENA_S 24 #define PF0INT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24) #define PF0INT_DYN_CTL_SW_ITR_INDX_S 25 #define PF0INT_DYN_CTL_SW_ITR_INDX_M MAKEMASK(0x3, 25) #define PF0INT_DYN_CTL_WB_ON_ITR_S 30 #define PF0INT_DYN_CTL_WB_ON_ITR_M BIT(30) #define PF0INT_DYN_CTL_INTENA_MSK_S 31 #define PF0INT_DYN_CTL_INTENA_MSK_M BIT(31) #define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ #define PF0INT_ITR_0_MAX_INDEX 2047 #define PF0INT_ITR_0_INTERVAL_S 0 #define PF0INT_ITR_0_INTERVAL_M MAKEMASK(0xFFF, 0) #define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ #define PF0INT_ITR_1_MAX_INDEX 2047 #define PF0INT_ITR_1_INTERVAL_S 0 #define PF0INT_ITR_1_INTERVAL_M MAKEMASK(0xFFF, 0) #define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ #define PF0INT_ITR_2_MAX_INDEX 2047 #define PF0INT_ITR_2_INTERVAL_S 0 #define PF0INT_ITR_2_INTERVAL_M MAKEMASK(0xFFF, 0) #define PF0INT_OICR_CPM_PAGE 0x02D03000 /* Reset Source: CORER */ #define PF0INT_OICR_CPM_PAGE_INTEVENT_S 0 #define PF0INT_OICR_CPM_PAGE_INTEVENT_M BIT(0) #define PF0INT_OICR_CPM_PAGE_QUEUE_S 1 #define PF0INT_OICR_CPM_PAGE_QUEUE_M BIT(1) #define PF0INT_OICR_CPM_PAGE_RSV1_S 2 #define PF0INT_OICR_CPM_PAGE_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_CPM_PAGE_RSV1_M : E800_PF0INT_OICR_CPM_PAGE_RSV1_M) #define E800_PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0xFF, 2) #define E830_PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0x3F, 2) #define E800_PF0INT_OICR_CPM_PAGE_HH_COMP_S 10 #define E800_PF0INT_OICR_CPM_PAGE_HH_COMP_M BIT(10) #define PF0INT_OICR_CPM_PAGE_TSYN_TX_S 11 #define PF0INT_OICR_CPM_PAGE_TSYN_TX_M BIT(11) #define PF0INT_OICR_CPM_PAGE_TSYN_EVNT_S 12 #define PF0INT_OICR_CPM_PAGE_TSYN_EVNT_M BIT(12) #define PF0INT_OICR_CPM_PAGE_TSYN_TGT_S 13 #define PF0INT_OICR_CPM_PAGE_TSYN_TGT_M BIT(13) #define PF0INT_OICR_CPM_PAGE_HLP_RDY_S 14 #define PF0INT_OICR_CPM_PAGE_HLP_RDY_M BIT(14) #define PF0INT_OICR_CPM_PAGE_CPM_RDY_S 15 #define PF0INT_OICR_CPM_PAGE_CPM_RDY_M BIT(15) #define PF0INT_OICR_CPM_PAGE_ECC_ERR_S 16 #define PF0INT_OICR_CPM_PAGE_ECC_ERR_M BIT(16) #define PF0INT_OICR_CPM_PAGE_RSV2_S 17 #define PF0INT_OICR_CPM_PAGE_RSV2_M MAKEMASK(0x3, 17) #define PF0INT_OICR_CPM_PAGE_MAL_DETECT_S 19 #define PF0INT_OICR_CPM_PAGE_MAL_DETECT_M BIT(19) #define PF0INT_OICR_CPM_PAGE_GRST_S 20 #define PF0INT_OICR_CPM_PAGE_GRST_M BIT(20) #define PF0INT_OICR_CPM_PAGE_PCI_EXCEPTION_S 21 #define PF0INT_OICR_CPM_PAGE_PCI_EXCEPTION_M BIT(21) #define PF0INT_OICR_CPM_PAGE_GPIO_S 22 #define PF0INT_OICR_CPM_PAGE_GPIO_M BIT(22) #define PF0INT_OICR_CPM_PAGE_RSV3_S 23 #define PF0INT_OICR_CPM_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_CPM_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_CPM_PAGE_STORM_DETECT_M BIT(24) #define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_CPM_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_CPM_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_CPM_PAGE_PE_PUSH_S 27 #define PF0INT_OICR_CPM_PAGE_PE_PUSH_M BIT(27) #define PF0INT_OICR_CPM_PAGE_PE_CRITERR_S 28 #define PF0INT_OICR_CPM_PAGE_PE_CRITERR_M BIT(28) #define PF0INT_OICR_CPM_PAGE_VFLR_S 29 #define PF0INT_OICR_CPM_PAGE_VFLR_M BIT(29) #define PF0INT_OICR_CPM_PAGE_XLR_HW_DONE_S 30 #define PF0INT_OICR_CPM_PAGE_XLR_HW_DONE_M BIT(30) #define PF0INT_OICR_CPM_PAGE_SWINT_S 31 #define PF0INT_OICR_CPM_PAGE_SWINT_M BIT(31) #define PF0INT_OICR_ENA_CPM_PAGE 0x02D03100 /* Reset Source: CORER */ #define PF0INT_OICR_ENA_CPM_PAGE_RSV0_S 0 #define PF0INT_OICR_ENA_CPM_PAGE_RSV0_M BIT(0) #define PF0INT_OICR_ENA_CPM_PAGE_INT_ENA_S 1 #define PF0INT_OICR_ENA_CPM_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) #define PF0INT_OICR_ENA_HLP_PAGE 0x02D01100 /* Reset Source: CORER */ #define PF0INT_OICR_ENA_HLP_PAGE_RSV0_S 0 #define PF0INT_OICR_ENA_HLP_PAGE_RSV0_M BIT(0) #define PF0INT_OICR_ENA_HLP_PAGE_INT_ENA_S 1 #define PF0INT_OICR_ENA_HLP_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) #define PF0INT_OICR_ENA_PSM_PAGE 0x02D02100 /* Reset Source: CORER */ #define PF0INT_OICR_ENA_PSM_PAGE_RSV0_S 0 #define PF0INT_OICR_ENA_PSM_PAGE_RSV0_M BIT(0) #define PF0INT_OICR_ENA_PSM_PAGE_INT_ENA_S 1 #define PF0INT_OICR_ENA_PSM_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) #define PF0INT_OICR_HLP_PAGE 0x02D01000 /* Reset Source: CORER */ #define PF0INT_OICR_HLP_PAGE_INTEVENT_S 0 #define PF0INT_OICR_HLP_PAGE_INTEVENT_M BIT(0) #define PF0INT_OICR_HLP_PAGE_QUEUE_S 1 #define PF0INT_OICR_HLP_PAGE_QUEUE_M BIT(1) #define PF0INT_OICR_HLP_PAGE_RSV1_S 2 #define PF0INT_OICR_HLP_PAGE_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_HLP_PAGE_RSV1_M : E800_PF0INT_OICR_HLP_PAGE_RSV1_M) #define E800_PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0xFF, 2) #define E830_PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0x3F, 2) #define E800_PF0INT_OICR_HLP_PAGE_HH_COMP_S 10 #define E800_PF0INT_OICR_HLP_PAGE_HH_COMP_M BIT(10) #define PF0INT_OICR_HLP_PAGE_TSYN_TX_S 11 #define PF0INT_OICR_HLP_PAGE_TSYN_TX_M BIT(11) #define PF0INT_OICR_HLP_PAGE_TSYN_EVNT_S 12 #define PF0INT_OICR_HLP_PAGE_TSYN_EVNT_M BIT(12) #define PF0INT_OICR_HLP_PAGE_TSYN_TGT_S 13 #define PF0INT_OICR_HLP_PAGE_TSYN_TGT_M BIT(13) #define PF0INT_OICR_HLP_PAGE_HLP_RDY_S 14 #define PF0INT_OICR_HLP_PAGE_HLP_RDY_M BIT(14) #define PF0INT_OICR_HLP_PAGE_CPM_RDY_S 15 #define PF0INT_OICR_HLP_PAGE_CPM_RDY_M BIT(15) #define PF0INT_OICR_HLP_PAGE_ECC_ERR_S 16 #define PF0INT_OICR_HLP_PAGE_ECC_ERR_M BIT(16) #define PF0INT_OICR_HLP_PAGE_RSV2_S 17 #define PF0INT_OICR_HLP_PAGE_RSV2_M MAKEMASK(0x3, 17) #define PF0INT_OICR_HLP_PAGE_MAL_DETECT_S 19 #define PF0INT_OICR_HLP_PAGE_MAL_DETECT_M BIT(19) #define PF0INT_OICR_HLP_PAGE_GRST_S 20 #define PF0INT_OICR_HLP_PAGE_GRST_M BIT(20) #define PF0INT_OICR_HLP_PAGE_PCI_EXCEPTION_S 21 #define PF0INT_OICR_HLP_PAGE_PCI_EXCEPTION_M BIT(21) #define PF0INT_OICR_HLP_PAGE_GPIO_S 22 #define PF0INT_OICR_HLP_PAGE_GPIO_M BIT(22) #define PF0INT_OICR_HLP_PAGE_RSV3_S 23 #define PF0INT_OICR_HLP_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_HLP_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_HLP_PAGE_STORM_DETECT_M BIT(24) #define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_HLP_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_HLP_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_HLP_PAGE_PE_PUSH_S 27 #define PF0INT_OICR_HLP_PAGE_PE_PUSH_M BIT(27) #define PF0INT_OICR_HLP_PAGE_PE_CRITERR_S 28 #define PF0INT_OICR_HLP_PAGE_PE_CRITERR_M BIT(28) #define PF0INT_OICR_HLP_PAGE_VFLR_S 29 #define PF0INT_OICR_HLP_PAGE_VFLR_M BIT(29) #define PF0INT_OICR_HLP_PAGE_XLR_HW_DONE_S 30 #define PF0INT_OICR_HLP_PAGE_XLR_HW_DONE_M BIT(30) #define PF0INT_OICR_HLP_PAGE_SWINT_S 31 #define PF0INT_OICR_HLP_PAGE_SWINT_M BIT(31) #define PF0INT_OICR_PSM_PAGE 0x02D02000 /* Reset Source: CORER */ #define PF0INT_OICR_PSM_PAGE_INTEVENT_S 0 #define PF0INT_OICR_PSM_PAGE_INTEVENT_M BIT(0) #define PF0INT_OICR_PSM_PAGE_QUEUE_S 1 #define PF0INT_OICR_PSM_PAGE_QUEUE_M BIT(1) #define PF0INT_OICR_PSM_PAGE_RSV1_S 2 #define PF0INT_OICR_PSM_PAGE_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_PSM_PAGE_RSV1_M : E800_PF0INT_OICR_PSM_PAGE_RSV1_M) #define E800_PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0xFF, 2) #define E830_PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0x3F, 2) #define E800_PF0INT_OICR_PSM_PAGE_HH_COMP_S 10 #define E800_PF0INT_OICR_PSM_PAGE_HH_COMP_M BIT(10) #define PF0INT_OICR_PSM_PAGE_TSYN_TX_S 11 #define PF0INT_OICR_PSM_PAGE_TSYN_TX_M BIT(11) #define PF0INT_OICR_PSM_PAGE_TSYN_EVNT_S 12 #define PF0INT_OICR_PSM_PAGE_TSYN_EVNT_M BIT(12) #define PF0INT_OICR_PSM_PAGE_TSYN_TGT_S 13 #define PF0INT_OICR_PSM_PAGE_TSYN_TGT_M BIT(13) #define PF0INT_OICR_PSM_PAGE_HLP_RDY_S 14 #define PF0INT_OICR_PSM_PAGE_HLP_RDY_M BIT(14) #define PF0INT_OICR_PSM_PAGE_CPM_RDY_S 15 #define PF0INT_OICR_PSM_PAGE_CPM_RDY_M BIT(15) #define PF0INT_OICR_PSM_PAGE_ECC_ERR_S 16 #define PF0INT_OICR_PSM_PAGE_ECC_ERR_M BIT(16) #define PF0INT_OICR_PSM_PAGE_RSV2_S 17 #define PF0INT_OICR_PSM_PAGE_RSV2_M MAKEMASK(0x3, 17) #define PF0INT_OICR_PSM_PAGE_MAL_DETECT_S 19 #define PF0INT_OICR_PSM_PAGE_MAL_DETECT_M BIT(19) #define PF0INT_OICR_PSM_PAGE_GRST_S 20 #define PF0INT_OICR_PSM_PAGE_GRST_M BIT(20) #define PF0INT_OICR_PSM_PAGE_PCI_EXCEPTION_S 21 #define PF0INT_OICR_PSM_PAGE_PCI_EXCEPTION_M BIT(21) #define PF0INT_OICR_PSM_PAGE_GPIO_S 22 #define PF0INT_OICR_PSM_PAGE_GPIO_M BIT(22) #define PF0INT_OICR_PSM_PAGE_RSV3_S 23 #define PF0INT_OICR_PSM_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_PSM_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_PSM_PAGE_STORM_DETECT_M BIT(24) #define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_PSM_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_PSM_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_PSM_PAGE_PE_PUSH_S 27 #define PF0INT_OICR_PSM_PAGE_PE_PUSH_M BIT(27) #define PF0INT_OICR_PSM_PAGE_PE_CRITERR_S 28 #define PF0INT_OICR_PSM_PAGE_PE_CRITERR_M BIT(28) #define PF0INT_OICR_PSM_PAGE_VFLR_S 29 #define PF0INT_OICR_PSM_PAGE_VFLR_M BIT(29) #define PF0INT_OICR_PSM_PAGE_XLR_HW_DONE_S 30 #define PF0INT_OICR_PSM_PAGE_XLR_HW_DONE_M BIT(30) #define PF0INT_OICR_PSM_PAGE_SWINT_S 31 #define PF0INT_OICR_PSM_PAGE_SWINT_M BIT(31) #define QRX_TAIL_PAGE(_QRX) (0x03800000 + ((_QRX) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ #define QRX_TAIL_PAGE_MAX_INDEX 2047 #define QRX_TAIL_PAGE_TAIL_S 0 #define QRX_TAIL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0) #define QTX_COMM_DBELL_PAGE(_DBQM) (0x04000000 + ((_DBQM) * 4096)) /* _i=0...16383 */ /* Reset Source: CORER */ #define QTX_COMM_DBELL_PAGE_MAX_INDEX 16383 #define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_S 0 #define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define E800_QTX_COMM_DBLQ_DBELL_PAGE(_DBLQ) (0x02F00000 + ((_DBLQ) * 4096)) /* _i=0...255 */ /* Reset Source: CORER */ #define E800_QTX_COMM_DBLQ_DBELL_PAGE_MAX_INDEX 255 #define E800_QTX_COMM_DBLQ_DBELL_PAGE_TAIL_S 0 #define E800_QTX_COMM_DBLQ_DBELL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0) #define VSI_MBX_ARQBAH(_VSI) (0x02000018 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ARQBAH_MAX_INDEX 767 #define VSI_MBX_ARQBAH_ARQBAH_S 0 #define VSI_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VSI_MBX_ARQBAL(_VSI) (0x02000014 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ARQBAL_MAX_INDEX 767 #define VSI_MBX_ARQBAL_ARQBAL_LSB_S 0 #define VSI_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VSI_MBX_ARQBAL_ARQBAL_S 6 #define VSI_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VSI_MBX_ARQH(_VSI) (0x02000020 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ARQH_MAX_INDEX 767 #define VSI_MBX_ARQH_ARQH_S 0 #define VSI_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define VSI_MBX_ARQLEN(_VSI) (0x0200001C + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: PFR */ #define VSI_MBX_ARQLEN_MAX_INDEX 767 #define VSI_MBX_ARQLEN_ARQLEN_S 0 #define VSI_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define VSI_MBX_ARQLEN_ARQVFE_S 28 #define VSI_MBX_ARQLEN_ARQVFE_M BIT(28) #define VSI_MBX_ARQLEN_ARQOVFL_S 29 #define VSI_MBX_ARQLEN_ARQOVFL_M BIT(29) #define VSI_MBX_ARQLEN_ARQCRIT_S 30 #define VSI_MBX_ARQLEN_ARQCRIT_M BIT(30) #define VSI_MBX_ARQLEN_ARQENABLE_S 31 #define VSI_MBX_ARQLEN_ARQENABLE_M BIT(31) #define VSI_MBX_ARQT(_VSI) (0x02000024 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ARQT_MAX_INDEX 767 #define VSI_MBX_ARQT_ARQT_S 0 #define VSI_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define VSI_MBX_ATQBAH(_VSI) (0x02000004 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ATQBAH_MAX_INDEX 767 #define VSI_MBX_ATQBAH_ATQBAH_S 0 #define VSI_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VSI_MBX_ATQBAL(_VSI) (0x02000000 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ATQBAL_MAX_INDEX 767 #define VSI_MBX_ATQBAL_ATQBAL_S 6 #define VSI_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VSI_MBX_ATQH(_VSI) (0x0200000C + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ATQH_MAX_INDEX 767 #define VSI_MBX_ATQH_ATQH_S 0 #define VSI_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define VSI_MBX_ATQLEN(_VSI) (0x02000008 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: PFR */ #define VSI_MBX_ATQLEN_MAX_INDEX 767 #define VSI_MBX_ATQLEN_ATQLEN_S 0 #define VSI_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define VSI_MBX_ATQLEN_ATQVFE_S 28 #define VSI_MBX_ATQLEN_ATQVFE_M BIT(28) #define VSI_MBX_ATQLEN_ATQOVFL_S 29 #define VSI_MBX_ATQLEN_ATQOVFL_M BIT(29) #define VSI_MBX_ATQLEN_ATQCRIT_S 30 #define VSI_MBX_ATQLEN_ATQCRIT_M BIT(30) #define VSI_MBX_ATQLEN_ATQENABLE_S 31 #define VSI_MBX_ATQLEN_ATQENABLE_M BIT(31) #define VSI_MBX_ATQT(_VSI) (0x02000010 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ATQT_MAX_INDEX 767 #define VSI_MBX_ATQT_ATQT_S 0 #define VSI_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define GL_ACL_ACCESS_CMD 0x00391000 /* Reset Source: CORER */ #define GL_ACL_ACCESS_CMD_TABLE_ID_S 0 #define GL_ACL_ACCESS_CMD_TABLE_ID_M MAKEMASK(0xFF, 0) #define GL_ACL_ACCESS_CMD_ENTRY_INDEX_S 8 #define GL_ACL_ACCESS_CMD_ENTRY_INDEX_M MAKEMASK(0xFFF, 8) #define GL_ACL_ACCESS_CMD_OPERATION_S 20 #define GL_ACL_ACCESS_CMD_OPERATION_M BIT(20) #define GL_ACL_ACCESS_CMD_OBJ_TYPE_S 24 #define GL_ACL_ACCESS_CMD_OBJ_TYPE_M MAKEMASK(0xF, 24) #define GL_ACL_ACCESS_CMD_EXECUTE_S 31 #define GL_ACL_ACCESS_CMD_EXECUTE_M BIT(31) #define GL_ACL_ACCESS_STATUS 0x00391004 /* Reset Source: CORER */ #define GL_ACL_ACCESS_STATUS_BUSY_S 0 #define GL_ACL_ACCESS_STATUS_BUSY_M BIT(0) #define GL_ACL_ACCESS_STATUS_DONE_S 1 #define GL_ACL_ACCESS_STATUS_DONE_M BIT(1) #define GL_ACL_ACCESS_STATUS_ERROR_S 2 #define GL_ACL_ACCESS_STATUS_ERROR_M BIT(2) #define GL_ACL_ACCESS_STATUS_OPERATION_S 3 #define GL_ACL_ACCESS_STATUS_OPERATION_M BIT(3) #define GL_ACL_ACCESS_STATUS_ERROR_CODE_S 4 #define GL_ACL_ACCESS_STATUS_ERROR_CODE_M MAKEMASK(0xF, 4) #define GL_ACL_ACCESS_STATUS_TABLE_ID_S 8 #define GL_ACL_ACCESS_STATUS_TABLE_ID_M MAKEMASK(0xFF, 8) #define GL_ACL_ACCESS_STATUS_ENTRY_INDEX_S 16 #define GL_ACL_ACCESS_STATUS_ENTRY_INDEX_M MAKEMASK(0xFFF, 16) #define GL_ACL_ACCESS_STATUS_OBJ_TYPE_S 28 #define GL_ACL_ACCESS_STATUS_OBJ_TYPE_M MAKEMASK(0xF, 28) #define GL_ACL_ACTMEM_ACT(_i) (0x00393824 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GL_ACL_ACTMEM_ACT_MAX_INDEX 1 #define GL_ACL_ACTMEM_ACT_VALUE_S 0 #define GL_ACL_ACTMEM_ACT_VALUE_M MAKEMASK(0xFFFF, 0) #define GL_ACL_ACTMEM_ACT_MDID_S 20 #define GL_ACL_ACTMEM_ACT_MDID_M MAKEMASK(0x3F, 20) #define GL_ACL_ACTMEM_ACT_PRIORITY_S 28 #define GL_ACL_ACTMEM_ACT_PRIORITY_M MAKEMASK(0x7, 28) #define GL_ACL_CHICKEN_REGISTER 0x00393810 /* Reset Source: CORER */ #define GL_ACL_CHICKEN_REGISTER_TCAM_DATA_POL_CH_S 0 #define GL_ACL_CHICKEN_REGISTER_TCAM_DATA_POL_CH_M BIT(0) #define GL_ACL_CHICKEN_REGISTER_TCAM_ADDR_POL_CH_S 1 #define GL_ACL_CHICKEN_REGISTER_TCAM_ADDR_POL_CH_M BIT(1) #define GL_ACL_DEFAULT_ACT(_i) (0x00391168 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GL_ACL_DEFAULT_ACT_MAX_INDEX 15 #define GL_ACL_DEFAULT_ACT_VALUE_S 0 #define GL_ACL_DEFAULT_ACT_VALUE_M MAKEMASK(0xFFFF, 0) #define GL_ACL_DEFAULT_ACT_MDID_S 20 #define GL_ACL_DEFAULT_ACT_MDID_M MAKEMASK(0x3F, 20) #define GL_ACL_DEFAULT_ACT_PRIORITY_S 28 #define GL_ACL_DEFAULT_ACT_PRIORITY_M MAKEMASK(0x7, 28) #define GL_ACL_PROFILE_BWSB_SEL(_i) (0x00391008 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_BWSB_SEL_MAX_INDEX 31 #define GL_ACL_PROFILE_BWSB_SEL_BSB_SRC_OFF_S 0 #define GL_ACL_PROFILE_BWSB_SEL_BSB_SRC_OFF_M MAKEMASK(0x3F, 0) #define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_S 8 #define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_M MAKEMASK(0x1F, 8) #define GL_ACL_PROFILE_DWSB_SEL(_i) (0x00391088 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_DWSB_SEL_MAX_INDEX 15 #define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_S 0 #define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_M MAKEMASK(0xF, 0) #define GL_ACL_PROFILE_PF_CFG(_i) (0x003910C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_PF_CFG_MAX_INDEX 7 #define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_S 0 #define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_M MAKEMASK(0x3F, 0) #define GL_ACL_PROFILE_RC_CFG(_i) (0x003910E8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_RC_CFG_MAX_INDEX 7 #define GL_ACL_PROFILE_RC_CFG_LOW_BOUND_S 0 #define GL_ACL_PROFILE_RC_CFG_LOW_BOUND_M MAKEMASK(0xFFFF, 0) #define GL_ACL_PROFILE_RC_CFG_HIGH_BOUND_S 16 #define GL_ACL_PROFILE_RC_CFG_HIGH_BOUND_M MAKEMASK(0xFFFF, 16) #define GL_ACL_PROFILE_RCF_MASK(_i) (0x00391108 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_RCF_MASK_MAX_INDEX 7 #define GL_ACL_PROFILE_RCF_MASK_MASK_S 0 #define GL_ACL_PROFILE_RCF_MASK_MASK_M MAKEMASK(0xFFFF, 0) #define GL_ACL_SCENARIO_ACT_CFG(_i) (0x003938AC + ((_i) * 4)) /* _i=0...19 */ /* Reset Source: CORER */ #define GL_ACL_SCENARIO_ACT_CFG_MAX_INDEX 19 #define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_SEL_S 0 #define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_SEL_M MAKEMASK(0xF, 0) #define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_EN_S 8 #define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_EN_M BIT(8) #define GL_ACL_SCENARIO_CFG_H(_i) (0x0039386C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GL_ACL_SCENARIO_CFG_H_MAX_INDEX 15 #define GL_ACL_SCENARIO_CFG_H_SELECT4_S 0 #define GL_ACL_SCENARIO_CFG_H_SELECT4_M MAKEMASK(0x1F, 0) #define GL_ACL_SCENARIO_CFG_H_CHUNKMASK_S 8 #define GL_ACL_SCENARIO_CFG_H_CHUNKMASK_M MAKEMASK(0xFF, 8) #define GL_ACL_SCENARIO_CFG_H_START_COMPARE_S 24 #define GL_ACL_SCENARIO_CFG_H_START_COMPARE_M BIT(24) #define GL_ACL_SCENARIO_CFG_H_START_SET_S 28 #define GL_ACL_SCENARIO_CFG_H_START_SET_M BIT(28) #define GL_ACL_SCENARIO_CFG_L(_i) (0x0039382C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GL_ACL_SCENARIO_CFG_L_MAX_INDEX 15 #define GL_ACL_SCENARIO_CFG_L_SELECT0_S 0 #define GL_ACL_SCENARIO_CFG_L_SELECT0_M MAKEMASK(0x7F, 0) #define GL_ACL_SCENARIO_CFG_L_SELECT1_S 8 #define GL_ACL_SCENARIO_CFG_L_SELECT1_M MAKEMASK(0x7F, 8) #define GL_ACL_SCENARIO_CFG_L_SELECT2_S 16 #define GL_ACL_SCENARIO_CFG_L_SELECT2_M MAKEMASK(0x7F, 16) #define GL_ACL_SCENARIO_CFG_L_SELECT3_S 24 #define GL_ACL_SCENARIO_CFG_L_SELECT3_M MAKEMASK(0x7F, 24) #define GL_ACL_TCAM_KEY_H 0x00393818 /* Reset Source: CORER */ #define GL_ACL_TCAM_KEY_H_GL_ACL_FFU_TCAM_KEY_H_S 0 #define GL_ACL_TCAM_KEY_H_GL_ACL_FFU_TCAM_KEY_H_M MAKEMASK(0xFF, 0) #define GL_ACL_TCAM_KEY_INV_H 0x00393820 /* Reset Source: CORER */ #define GL_ACL_TCAM_KEY_INV_H_GL_ACL_FFU_TCAM_KEY_INV_H_S 0 #define GL_ACL_TCAM_KEY_INV_H_GL_ACL_FFU_TCAM_KEY_INV_H_M MAKEMASK(0xFF, 0) #define GL_ACL_TCAM_KEY_INV_L 0x0039381C /* Reset Source: CORER */ #define GL_ACL_TCAM_KEY_INV_L_GL_ACL_FFU_TCAM_KEY_INV_L_S 0 #define GL_ACL_TCAM_KEY_INV_L_GL_ACL_FFU_TCAM_KEY_INV_L_M MAKEMASK(0xFFFFFFFF, 0) #define GL_ACL_TCAM_KEY_L 0x00393814 /* Reset Source: CORER */ #define GL_ACL_TCAM_KEY_L_GL_ACL_FFU_TCAM_KEY_L_S 0 #define GL_ACL_TCAM_KEY_L_GL_ACL_FFU_TCAM_KEY_L_M MAKEMASK(0xFFFFFFFF, 0) #define VSI_ACL_DEF_SEL(_VSI) (0x00391800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_ACL_DEF_SEL_MAX_INDEX 767 #define VSI_ACL_DEF_SEL_RX_PROFILE_MISS_SEL_S 0 #define VSI_ACL_DEF_SEL_RX_PROFILE_MISS_SEL_M MAKEMASK(0x3, 0) #define VSI_ACL_DEF_SEL_RX_TABLES_MISS_SEL_S 4 #define VSI_ACL_DEF_SEL_RX_TABLES_MISS_SEL_M MAKEMASK(0x3, 4) #define VSI_ACL_DEF_SEL_TX_PROFILE_MISS_SEL_S 8 #define VSI_ACL_DEF_SEL_TX_PROFILE_MISS_SEL_M MAKEMASK(0x3, 8) #define VSI_ACL_DEF_SEL_TX_TABLES_MISS_SEL_S 12 #define VSI_ACL_DEF_SEL_TX_TABLES_MISS_SEL_M MAKEMASK(0x3, 12) #define GL_SWT_L2TAG0(_i) (0x000492A8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_SWT_L2TAG0_MAX_INDEX 7 #define GL_SWT_L2TAG0_DATA_S 0 #define GL_SWT_L2TAG0_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_SWT_L2TAG1(_i) (0x000492C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_SWT_L2TAG1_MAX_INDEX 7 #define GL_SWT_L2TAG1_DATA_S 0 #define GL_SWT_L2TAG1_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_SWT_L2TAGCTRL(_i) (0x001D2660 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_SWT_L2TAGCTRL_MAX_INDEX 7 #define GL_SWT_L2TAGCTRL_LENGTH_S 0 #define GL_SWT_L2TAGCTRL_LENGTH_M MAKEMASK(0x7F, 0) #define GL_SWT_L2TAGCTRL_HAS_UP_S 7 #define GL_SWT_L2TAGCTRL_HAS_UP_M BIT(7) #define GL_SWT_L2TAGCTRL_ISVLAN_S 9 #define GL_SWT_L2TAGCTRL_ISVLAN_M BIT(9) #define GL_SWT_L2TAGCTRL_INNERUP_S 10 #define GL_SWT_L2TAGCTRL_INNERUP_M BIT(10) #define GL_SWT_L2TAGCTRL_OUTERUP_S 11 #define GL_SWT_L2TAGCTRL_OUTERUP_M BIT(11) #define GL_SWT_L2TAGCTRL_LONG_S 12 #define GL_SWT_L2TAGCTRL_LONG_M BIT(12) #define GL_SWT_L2TAGCTRL_ISMPLS_S 13 #define GL_SWT_L2TAGCTRL_ISMPLS_M BIT(13) #define GL_SWT_L2TAGCTRL_ISNSH_S 14 #define GL_SWT_L2TAGCTRL_ISNSH_M BIT(14) #define GL_SWT_L2TAGCTRL_ETHERTYPE_S 16 #define GL_SWT_L2TAGCTRL_ETHERTYPE_M MAKEMASK(0xFFFF, 16) #define GL_SWT_L2TAGRXEB(_i) (0x00052000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_SWT_L2TAGRXEB_MAX_INDEX 7 #define GL_SWT_L2TAGRXEB_OFFSET_S 0 #define GL_SWT_L2TAGRXEB_OFFSET_M MAKEMASK(0xFF, 0) #define GL_SWT_L2TAGRXEB_LENGTH_S 8 #define GL_SWT_L2TAGRXEB_LENGTH_M MAKEMASK(0x3, 8) #define GL_SWT_L2TAGTXIB(_i) (0x000492E8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_SWT_L2TAGTXIB_MAX_INDEX 7 #define GL_SWT_L2TAGTXIB_OFFSET_S 0 #define GL_SWT_L2TAGTXIB_OFFSET_M MAKEMASK(0xFF, 0) #define GL_SWT_L2TAGTXIB_LENGTH_S 8 #define GL_SWT_L2TAGTXIB_LENGTH_M MAKEMASK(0x3, 8) #define GLCM_PE_CACHESIZE 0x005046B4 /* Reset Source: CORER */ #define GLCM_PE_CACHESIZE_WORD_SIZE_S 0 #define GLCM_PE_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFFF, 0) #define GLCM_PE_CACHESIZE_SETS_S 12 #define GLCM_PE_CACHESIZE_SETS_M MAKEMASK(0xF, 12) #define GLCM_PE_CACHESIZE_WAYS_S 16 #define GLCM_PE_CACHESIZE_WAYS_M MAKEMASK(0x1FF, 16) #define GLCOMM_CQ_CTL(_CQ) (0x000F0000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLCOMM_CQ_CTL_MAX_INDEX 511 #define GLCOMM_CQ_CTL_COMP_TYPE_S 0 #define GLCOMM_CQ_CTL_COMP_TYPE_M MAKEMASK(0x7, 0) #define GLCOMM_CQ_CTL_CMD_S 4 #define GLCOMM_CQ_CTL_CMD_M MAKEMASK(0x7, 4) #define GLCOMM_CQ_CTL_ID_S 16 #define GLCOMM_CQ_CTL_ID_M MAKEMASK(0x3FFF, 16) #define GLCOMM_MIN_MAX_PKT 0x000FC064 /* Reset Source: CORER */ #define GLCOMM_MIN_MAX_PKT_MAHDL_S 0 #define GLCOMM_MIN_MAX_PKT_MAHDL_M MAKEMASK(0x3FFF, 0) #define GLCOMM_MIN_MAX_PKT_MIHDL_S 16 #define GLCOMM_MIN_MAX_PKT_MIHDL_M MAKEMASK(0x3F, 16) #define GLCOMM_MIN_MAX_PKT_LSO_COMS_MIHDL_S 22 #define GLCOMM_MIN_MAX_PKT_LSO_COMS_MIHDL_M MAKEMASK(0x3FF, 22) #define GLCOMM_PKT_SHAPER_PROF(_i) (0x002D2DA8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLCOMM_PKT_SHAPER_PROF_MAX_INDEX 7 #define GLCOMM_PKT_SHAPER_PROF_PKTCNT_S 0 #define GLCOMM_PKT_SHAPER_PROF_PKTCNT_M MAKEMASK(0x3F, 0) #define GLCOMM_QTX_CNTX_CTL 0x002D2DC8 /* Reset Source: CORER */ #define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_S 0 #define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M MAKEMASK(0x3FFF, 0) #define GLCOMM_QTX_CNTX_CTL_CMD_S 16 #define GLCOMM_QTX_CNTX_CTL_CMD_M MAKEMASK(0x7, 16) #define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_S 19 #define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19) #define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: CORER */ #define GLCOMM_QTX_CNTX_DATA_MAX_INDEX 9 #define GLCOMM_QTX_CNTX_DATA_DATA_S 0 #define GLCOMM_QTX_CNTX_DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GLCOMM_QTX_CNTX_STAT 0x002D2DCC /* Reset Source: CORER */ #define GLCOMM_QTX_CNTX_STAT_CMD_IN_PROG_S 0 #define GLCOMM_QTX_CNTX_STAT_CMD_IN_PROG_M BIT(0) #define GLCOMM_QUANTA_PROF(_i) (0x002D2D68 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLCOMM_QUANTA_PROF_MAX_INDEX 15 #define GLCOMM_QUANTA_PROF_QUANTA_SIZE_S 0 #define GLCOMM_QUANTA_PROF_QUANTA_SIZE_M MAKEMASK(0x3FFF, 0) #define GLCOMM_QUANTA_PROF_MAX_CMD_S 16 #define GLCOMM_QUANTA_PROF_MAX_CMD_M MAKEMASK(0xFF, 16) #define GLCOMM_QUANTA_PROF_MAX_DESC_S 24 #define GLCOMM_QUANTA_PROF_MAX_DESC_M MAKEMASK(0x3F, 24) #define GLLAN_TCLAN_CACHE_CTL 0x000FC0B8 /* Reset Source: CORER */ #define GLLAN_TCLAN_CACHE_CTL_MIN_FETCH_THRESH_S 0 #define GLLAN_TCLAN_CACHE_CTL_MIN_FETCH_THRESH_M MAKEMASK(0x3F, 0) #define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_S 6 #define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_M BIT(6) #define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_S 7 #define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_M MAKEMASK(0x7F, 7) #define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_S 14 #define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_M MAKEMASK(0xFF, 14) #define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_S 22 #define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_M MAKEMASK(0x3FF, 22) #define GLTCLAN_CQ_CNTX0(_CQ) (0x000F0800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX0_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX0_RING_ADDR_LSB_S 0 #define GLTCLAN_CQ_CNTX0_RING_ADDR_LSB_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX1(_CQ) (0x000F1000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX1_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX1_RING_ADDR_MSB_S 0 #define GLTCLAN_CQ_CNTX1_RING_ADDR_MSB_M MAKEMASK(0x1FFFFFF, 0) #define GLTCLAN_CQ_CNTX10(_CQ) (0x000F5800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX10_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX10_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX10_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX11(_CQ) (0x000F6000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX11_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX11_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX11_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX12(_CQ) (0x000F6800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX12_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX12_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX12_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX13(_CQ) (0x000F7000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX13_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX13_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX13_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX14(_CQ) (0x000F7800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX14_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX14_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX14_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX15(_CQ) (0x000F8000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX15_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX15_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX15_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX16(_CQ) (0x000F8800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX16_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX16_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX16_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX17(_CQ) (0x000F9000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX17_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX17_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX17_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX18(_CQ) (0x000F9800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX18_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX18_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX18_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX19(_CQ) (0x000FA000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX19_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX19_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX19_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX2(_CQ) (0x000F1800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX2_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX2_RING_LEN_S 0 #define GLTCLAN_CQ_CNTX2_RING_LEN_M MAKEMASK(0x3FFFF, 0) #define GLTCLAN_CQ_CNTX20(_CQ) (0x000FA800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX20_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX20_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX20_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX21(_CQ) (0x000FB000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX21_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX21_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX21_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX3(_CQ) (0x000F2000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX3_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX3_GENERATION_S 0 #define GLTCLAN_CQ_CNTX3_GENERATION_M BIT(0) #define GLTCLAN_CQ_CNTX3_CQ_WR_PTR_S 1 #define GLTCLAN_CQ_CNTX3_CQ_WR_PTR_M MAKEMASK(0x3FFFFF, 1) #define GLTCLAN_CQ_CNTX4(_CQ) (0x000F2800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX4_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX4_PF_NUM_S 0 #define GLTCLAN_CQ_CNTX4_PF_NUM_M MAKEMASK(0x7, 0) #define GLTCLAN_CQ_CNTX4_VMVF_NUM_S 3 #define GLTCLAN_CQ_CNTX4_VMVF_NUM_M MAKEMASK(0x3FF, 3) #define GLTCLAN_CQ_CNTX4_VMVF_TYPE_S 13 #define GLTCLAN_CQ_CNTX4_VMVF_TYPE_M MAKEMASK(0x3, 13) #define GLTCLAN_CQ_CNTX5(_CQ) (0x000F3000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX5_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX5_TPH_EN_S 0 #define GLTCLAN_CQ_CNTX5_TPH_EN_M BIT(0) #define GLTCLAN_CQ_CNTX5_CPU_ID_S 1 #define GLTCLAN_CQ_CNTX5_CPU_ID_M MAKEMASK(0xFF, 1) #define GLTCLAN_CQ_CNTX5_FLUSH_ON_ITR_DIS_S 9 #define GLTCLAN_CQ_CNTX5_FLUSH_ON_ITR_DIS_M BIT(9) #define GLTCLAN_CQ_CNTX6(_CQ) (0x000F3800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX6_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX6_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX6_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX7(_CQ) (0x000F4000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX7_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX7_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX7_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX8(_CQ) (0x000F4800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX8_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX8_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX8_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX9(_CQ) (0x000F5000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX9_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX9_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX9_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */ #define QTX_COMM_DBELL_MAX_INDEX 16383 #define QTX_COMM_DBELL_QTX_COMM_DBELL_S 0 #define QTX_COMM_DBELL_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define QTX_COMM_DBLQ_CNTX(_i, _DBLQ) (0x002D0000 + ((_i) * 1024 + (_DBLQ) * 4)) /* _i=0...4, _DBLQ=0...255 */ /* Reset Source: CORER */ #define QTX_COMM_DBLQ_CNTX_MAX_INDEX 4 #define QTX_COMM_DBLQ_CNTX_DATA_S 0 #define QTX_COMM_DBLQ_CNTX_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define QTX_COMM_DBLQ_DBELL(_DBLQ) (0x002D1400 + ((_DBLQ) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define QTX_COMM_DBLQ_DBELL_MAX_INDEX 255 #define QTX_COMM_DBLQ_DBELL_TAIL_S 0 #define QTX_COMM_DBLQ_DBELL_TAIL_M MAKEMASK(0x1FFF, 0) #define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */ #define QTX_COMM_HEAD_MAX_INDEX 16383 #define QTX_COMM_HEAD_HEAD_S 0 #define QTX_COMM_HEAD_HEAD_M MAKEMASK(0x1FFF, 0) #define QTX_COMM_HEAD_RS_PENDING_S 16 #define QTX_COMM_HEAD_RS_PENDING_M BIT(16) #define GL_FW_TOOL_ARQBAH 0x000801C0 /* Reset Source: EMPR */ #define GL_FW_TOOL_ARQBAH_ARQBAH_S 0 #define GL_FW_TOOL_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define GL_FW_TOOL_ARQBAL 0x000800C0 /* Reset Source: EMPR */ #define GL_FW_TOOL_ARQBAL_ARQBAL_LSB_S 0 #define GL_FW_TOOL_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define GL_FW_TOOL_ARQBAL_ARQBAL_S 6 #define GL_FW_TOOL_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define GL_FW_TOOL_ARQH 0x000803C0 /* Reset Source: EMPR */ #define GL_FW_TOOL_ARQH_ARQH_S 0 #define GL_FW_TOOL_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define GL_FW_TOOL_ARQLEN 0x000802C0 /* Reset Source: EMPR */ #define GL_FW_TOOL_ARQLEN_ARQLEN_S 0 #define GL_FW_TOOL_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define GL_FW_TOOL_ARQLEN_ARQVFE_S 28 #define GL_FW_TOOL_ARQLEN_ARQVFE_M BIT(28) #define GL_FW_TOOL_ARQLEN_ARQOVFL_S 29 #define GL_FW_TOOL_ARQLEN_ARQOVFL_M BIT(29) #define GL_FW_TOOL_ARQLEN_ARQCRIT_S 30 #define GL_FW_TOOL_ARQLEN_ARQCRIT_M BIT(30) #define GL_FW_TOOL_ARQLEN_ARQENABLE_S 31 #define GL_FW_TOOL_ARQLEN_ARQENABLE_M BIT(31) #define GL_FW_TOOL_ARQT 0x000804C0 /* Reset Source: EMPR */ #define GL_FW_TOOL_ARQT_ARQT_S 0 #define GL_FW_TOOL_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define GL_FW_TOOL_ATQBAH 0x00080140 /* Reset Source: EMPR */ #define GL_FW_TOOL_ATQBAH_ATQBAH_S 0 #define GL_FW_TOOL_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define GL_FW_TOOL_ATQBAL 0x00080040 /* Reset Source: EMPR */ #define GL_FW_TOOL_ATQBAL_ATQBAL_LSB_S 0 #define GL_FW_TOOL_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define GL_FW_TOOL_ATQBAL_ATQBAL_S 6 #define GL_FW_TOOL_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define GL_FW_TOOL_ATQH 0x00080340 /* Reset Source: EMPR */ #define GL_FW_TOOL_ATQH_ATQH_S 0 #define GL_FW_TOOL_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define GL_FW_TOOL_ATQLEN 0x00080240 /* Reset Source: EMPR */ #define GL_FW_TOOL_ATQLEN_ATQLEN_S 0 #define GL_FW_TOOL_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define GL_FW_TOOL_ATQLEN_ATQVFE_S 28 #define GL_FW_TOOL_ATQLEN_ATQVFE_M BIT(28) #define GL_FW_TOOL_ATQLEN_ATQOVFL_S 29 #define GL_FW_TOOL_ATQLEN_ATQOVFL_M BIT(29) #define GL_FW_TOOL_ATQLEN_ATQCRIT_S 30 #define GL_FW_TOOL_ATQLEN_ATQCRIT_M BIT(30) #define GL_FW_TOOL_ATQLEN_ATQENABLE_S 31 #define GL_FW_TOOL_ATQLEN_ATQENABLE_M BIT(31) #define GL_FW_TOOL_ATQT 0x00080440 /* Reset Source: EMPR */ #define GL_FW_TOOL_ATQT_ATQT_S 0 #define GL_FW_TOOL_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define GL_MBX_PASID 0x00231EC0 /* Reset Source: CORER */ #define GL_MBX_PASID_PASID_MODE_S 0 #define GL_MBX_PASID_PASID_MODE_M BIT(0) #define GL_MBX_PASID_PASID_MODE_VALID_S 1 #define GL_MBX_PASID_PASID_MODE_VALID_M BIT(1) #define PF_FW_ARQBAH 0x00080180 /* Reset Source: EMPR */ #define PF_FW_ARQBAH_ARQBAH_S 0 #define PF_FW_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_FW_ARQBAL 0x00080080 /* Reset Source: EMPR */ #define PF_FW_ARQBAL_ARQBAL_LSB_S 0 #define PF_FW_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF_FW_ARQBAL_ARQBAL_S 6 #define PF_FW_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_FW_ARQH 0x00080380 /* Reset Source: EMPR */ #define PF_FW_ARQH_ARQH_S 0 #define PF_FW_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF_FW_ARQLEN 0x00080280 /* Reset Source: EMPR */ #define PF_FW_ARQLEN_ARQLEN_S 0 #define PF_FW_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF_FW_ARQLEN_ARQVFE_S 28 #define PF_FW_ARQLEN_ARQVFE_M BIT(28) #define PF_FW_ARQLEN_ARQOVFL_S 29 #define PF_FW_ARQLEN_ARQOVFL_M BIT(29) #define PF_FW_ARQLEN_ARQCRIT_S 30 #define PF_FW_ARQLEN_ARQCRIT_M BIT(30) #define PF_FW_ARQLEN_ARQENABLE_S 31 #define PF_FW_ARQLEN_ARQENABLE_M BIT(31) #define PF_FW_ARQT 0x00080480 /* Reset Source: EMPR */ #define PF_FW_ARQT_ARQT_S 0 #define PF_FW_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF_FW_ATQBAH 0x00080100 /* Reset Source: EMPR */ #define PF_FW_ATQBAH_ATQBAH_S 0 #define PF_FW_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_FW_ATQBAL 0x00080000 /* Reset Source: EMPR */ #define PF_FW_ATQBAL_ATQBAL_LSB_S 0 #define PF_FW_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF_FW_ATQBAL_ATQBAL_S 6 #define PF_FW_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_FW_ATQH 0x00080300 /* Reset Source: EMPR */ #define PF_FW_ATQH_ATQH_S 0 #define PF_FW_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF_FW_ATQLEN 0x00080200 /* Reset Source: EMPR */ #define PF_FW_ATQLEN_ATQLEN_S 0 #define PF_FW_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF_FW_ATQLEN_ATQVFE_S 28 #define PF_FW_ATQLEN_ATQVFE_M BIT(28) #define PF_FW_ATQLEN_ATQOVFL_S 29 #define PF_FW_ATQLEN_ATQOVFL_M BIT(29) #define PF_FW_ATQLEN_ATQCRIT_S 30 #define PF_FW_ATQLEN_ATQCRIT_M BIT(30) #define PF_FW_ATQLEN_ATQENABLE_S 31 #define PF_FW_ATQLEN_ATQENABLE_M BIT(31) #define PF_FW_ATQT 0x00080400 /* Reset Source: EMPR */ #define PF_FW_ATQT_ATQT_S 0 #define PF_FW_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF_MBX_ARQBAH 0x0022E400 /* Reset Source: CORER */ #define PF_MBX_ARQBAH_ARQBAH_S 0 #define PF_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_MBX_ARQBAL 0x0022E380 /* Reset Source: CORER */ #define PF_MBX_ARQBAL_ARQBAL_LSB_S 0 #define PF_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF_MBX_ARQBAL_ARQBAL_S 6 #define PF_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_MBX_ARQH 0x0022E500 /* Reset Source: CORER */ #define PF_MBX_ARQH_ARQH_S 0 #define PF_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF_MBX_ARQLEN 0x0022E480 /* Reset Source: PFR */ #define PF_MBX_ARQLEN_ARQLEN_S 0 #define PF_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF_MBX_ARQLEN_ARQVFE_S 28 #define PF_MBX_ARQLEN_ARQVFE_M BIT(28) #define PF_MBX_ARQLEN_ARQOVFL_S 29 #define PF_MBX_ARQLEN_ARQOVFL_M BIT(29) #define PF_MBX_ARQLEN_ARQCRIT_S 30 #define PF_MBX_ARQLEN_ARQCRIT_M BIT(30) #define PF_MBX_ARQLEN_ARQENABLE_S 31 #define PF_MBX_ARQLEN_ARQENABLE_M BIT(31) #define PF_MBX_ARQT 0x0022E580 /* Reset Source: CORER */ #define PF_MBX_ARQT_ARQT_S 0 #define PF_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF_MBX_ATQBAH 0x0022E180 /* Reset Source: CORER */ #define PF_MBX_ATQBAH_ATQBAH_S 0 #define PF_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_MBX_ATQBAL 0x0022E100 /* Reset Source: CORER */ #define PF_MBX_ATQBAL_ATQBAL_S 6 #define PF_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_MBX_ATQH 0x0022E280 /* Reset Source: CORER */ #define PF_MBX_ATQH_ATQH_S 0 #define PF_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF_MBX_ATQLEN 0x0022E200 /* Reset Source: PFR */ #define PF_MBX_ATQLEN_ATQLEN_S 0 #define PF_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF_MBX_ATQLEN_ATQVFE_S 28 #define PF_MBX_ATQLEN_ATQVFE_M BIT(28) #define PF_MBX_ATQLEN_ATQOVFL_S 29 #define PF_MBX_ATQLEN_ATQOVFL_M BIT(29) #define PF_MBX_ATQLEN_ATQCRIT_S 30 #define PF_MBX_ATQLEN_ATQCRIT_M BIT(30) #define PF_MBX_ATQLEN_ATQENABLE_S 31 #define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) #define PF_MBX_ATQT 0x0022E300 /* Reset Source: CORER */ #define PF_MBX_ATQT_ATQT_S 0 #define PF_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF_SB_ARQBAH 0x0022FF00 /* Reset Source: CORER */ #define PF_SB_ARQBAH_ARQBAH_S 0 #define PF_SB_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_SB_ARQBAL 0x0022FE80 /* Reset Source: CORER */ #define PF_SB_ARQBAL_ARQBAL_LSB_S 0 #define PF_SB_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF_SB_ARQBAL_ARQBAL_S 6 #define PF_SB_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_SB_ARQH 0x00230000 /* Reset Source: CORER */ #define PF_SB_ARQH_ARQH_S 0 #define PF_SB_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF_SB_ARQLEN 0x0022FF80 /* Reset Source: PFR */ #define PF_SB_ARQLEN_ARQLEN_S 0 #define PF_SB_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF_SB_ARQLEN_ARQVFE_S 28 #define PF_SB_ARQLEN_ARQVFE_M BIT(28) #define PF_SB_ARQLEN_ARQOVFL_S 29 #define PF_SB_ARQLEN_ARQOVFL_M BIT(29) #define PF_SB_ARQLEN_ARQCRIT_S 30 #define PF_SB_ARQLEN_ARQCRIT_M BIT(30) #define PF_SB_ARQLEN_ARQENABLE_S 31 #define PF_SB_ARQLEN_ARQENABLE_M BIT(31) #define PF_SB_ARQT 0x00230080 /* Reset Source: CORER */ #define PF_SB_ARQT_ARQT_S 0 #define PF_SB_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF_SB_ATQBAH 0x0022FC80 /* Reset Source: CORER */ #define PF_SB_ATQBAH_ATQBAH_S 0 #define PF_SB_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_SB_ATQBAL 0x0022FC00 /* Reset Source: CORER */ #define PF_SB_ATQBAL_ATQBAL_S 6 #define PF_SB_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_SB_ATQH 0x0022FD80 /* Reset Source: CORER */ #define PF_SB_ATQH_ATQH_S 0 #define PF_SB_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF_SB_ATQLEN 0x0022FD00 /* Reset Source: PFR */ #define PF_SB_ATQLEN_ATQLEN_S 0 #define PF_SB_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF_SB_ATQLEN_ATQVFE_S 28 #define PF_SB_ATQLEN_ATQVFE_M BIT(28) #define PF_SB_ATQLEN_ATQOVFL_S 29 #define PF_SB_ATQLEN_ATQOVFL_M BIT(29) #define PF_SB_ATQLEN_ATQCRIT_S 30 #define PF_SB_ATQLEN_ATQCRIT_M BIT(30) #define PF_SB_ATQLEN_ATQENABLE_S 31 #define PF_SB_ATQLEN_ATQENABLE_M BIT(31) #define PF_SB_ATQT 0x0022FE00 /* Reset Source: CORER */ #define PF_SB_ATQT_ATQT_S 0 #define PF_SB_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF_SB_REM_DEV_CTL 0x002300F0 /* Reset Source: CORER */ #define PF_SB_REM_DEV_CTL_DEST_EN_S 0 #define PF_SB_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0) #define PF0_FW_HLP_ARQBAH 0x000801C8 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQBAH_ARQBAH_S 0 #define PF0_FW_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_HLP_ARQBAL 0x000800C8 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQBAL_ARQBAL_LSB_S 0 #define PF0_FW_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_HLP_ARQBAL_ARQBAL_S 6 #define PF0_FW_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_HLP_ARQH 0x000803C8 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQH_ARQH_S 0 #define PF0_FW_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ARQLEN 0x000802C8 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQLEN_ARQLEN_S 0 #define PF0_FW_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ARQLEN_ARQVFE_S 28 #define PF0_FW_HLP_ARQLEN_ARQVFE_M BIT(28) #define PF0_FW_HLP_ARQLEN_ARQOVFL_S 29 #define PF0_FW_HLP_ARQLEN_ARQOVFL_M BIT(29) #define PF0_FW_HLP_ARQLEN_ARQCRIT_S 30 #define PF0_FW_HLP_ARQLEN_ARQCRIT_M BIT(30) #define PF0_FW_HLP_ARQLEN_ARQENABLE_S 31 #define PF0_FW_HLP_ARQLEN_ARQENABLE_M BIT(31) #define PF0_FW_HLP_ARQT 0x000804C8 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQT_ARQT_S 0 #define PF0_FW_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQBAH 0x00080148 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQBAH_ATQBAH_S 0 #define PF0_FW_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_HLP_ATQBAL 0x00080048 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQBAL_ATQBAL_LSB_S 0 #define PF0_FW_HLP_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_HLP_ATQBAL_ATQBAL_S 6 #define PF0_FW_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_HLP_ATQH 0x00080348 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQH_ATQH_S 0 #define PF0_FW_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQLEN 0x00080248 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQLEN_ATQLEN_S 0 #define PF0_FW_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQLEN_ATQVFE_S 28 #define PF0_FW_HLP_ATQLEN_ATQVFE_M BIT(28) #define PF0_FW_HLP_ATQLEN_ATQOVFL_S 29 #define PF0_FW_HLP_ATQLEN_ATQOVFL_M BIT(29) #define PF0_FW_HLP_ATQLEN_ATQCRIT_S 30 #define PF0_FW_HLP_ATQLEN_ATQCRIT_M BIT(30) #define PF0_FW_HLP_ATQLEN_ATQENABLE_S 31 #define PF0_FW_HLP_ATQLEN_ATQENABLE_M BIT(31) #define PF0_FW_HLP_ATQT 0x00080448 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQT_ATQT_S 0 #define PF0_FW_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQBAH 0x000801C4 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQBAH_ARQBAH_S 0 #define PF0_FW_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_PSM_ARQBAL 0x000800C4 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQBAL_ARQBAL_LSB_S 0 #define PF0_FW_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_PSM_ARQBAL_ARQBAL_S 6 #define PF0_FW_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_PSM_ARQH 0x000803C4 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQH_ARQH_S 0 #define PF0_FW_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQLEN 0x000802C4 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQLEN_ARQLEN_S 0 #define PF0_FW_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQLEN_ARQVFE_S 28 #define PF0_FW_PSM_ARQLEN_ARQVFE_M BIT(28) #define PF0_FW_PSM_ARQLEN_ARQOVFL_S 29 #define PF0_FW_PSM_ARQLEN_ARQOVFL_M BIT(29) #define PF0_FW_PSM_ARQLEN_ARQCRIT_S 30 #define PF0_FW_PSM_ARQLEN_ARQCRIT_M BIT(30) #define PF0_FW_PSM_ARQLEN_ARQENABLE_S 31 #define PF0_FW_PSM_ARQLEN_ARQENABLE_M BIT(31) #define PF0_FW_PSM_ARQT 0x000804C4 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQT_ARQT_S 0 #define PF0_FW_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQBAH 0x00080144 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQBAH_ATQBAH_S 0 #define PF0_FW_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_PSM_ATQBAL 0x00080044 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQBAL_ATQBAL_LSB_S 0 #define PF0_FW_PSM_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_PSM_ATQBAL_ATQBAL_S 6 #define PF0_FW_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_PSM_ATQH 0x00080344 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQH_ATQH_S 0 #define PF0_FW_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQLEN 0x00080244 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQLEN_ATQLEN_S 0 #define PF0_FW_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQLEN_ATQVFE_S 28 #define PF0_FW_PSM_ATQLEN_ATQVFE_M BIT(28) #define PF0_FW_PSM_ATQLEN_ATQOVFL_S 29 #define PF0_FW_PSM_ATQLEN_ATQOVFL_M BIT(29) #define PF0_FW_PSM_ATQLEN_ATQCRIT_S 30 #define PF0_FW_PSM_ATQLEN_ATQCRIT_M BIT(30) #define PF0_FW_PSM_ATQLEN_ATQENABLE_S 31 #define PF0_FW_PSM_ATQLEN_ATQENABLE_M BIT(31) #define PF0_FW_PSM_ATQT 0x00080444 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQT_ATQT_S 0 #define PF0_FW_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQBAH 0x0022E5D8 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQBAH_ARQBAH_S 0 #define PF0_MBX_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_CPM_ARQBAL 0x0022E5D4 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQBAL_ARQBAL_LSB_S 0 #define PF0_MBX_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_CPM_ARQBAL_ARQBAL_S 6 #define PF0_MBX_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_CPM_ARQH 0x0022E5E0 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQH_ARQH_S 0 #define PF0_MBX_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQLEN 0x0022E5DC /* Reset Source: PFR */ #define PF0_MBX_CPM_ARQLEN_ARQLEN_S 0 #define PF0_MBX_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQLEN_ARQVFE_S 28 #define PF0_MBX_CPM_ARQLEN_ARQVFE_M BIT(28) #define PF0_MBX_CPM_ARQLEN_ARQOVFL_S 29 #define PF0_MBX_CPM_ARQLEN_ARQOVFL_M BIT(29) #define PF0_MBX_CPM_ARQLEN_ARQCRIT_S 30 #define PF0_MBX_CPM_ARQLEN_ARQCRIT_M BIT(30) #define PF0_MBX_CPM_ARQLEN_ARQENABLE_S 31 #define PF0_MBX_CPM_ARQLEN_ARQENABLE_M BIT(31) #define PF0_MBX_CPM_ARQT 0x0022E5E4 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQT_ARQT_S 0 #define PF0_MBX_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQBAH 0x0022E5C4 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQBAH_ATQBAH_S 0 #define PF0_MBX_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_CPM_ATQBAL 0x0022E5C0 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQBAL_ATQBAL_S 6 #define PF0_MBX_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_CPM_ATQH 0x0022E5CC /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQH_ATQH_S 0 #define PF0_MBX_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQLEN 0x0022E5C8 /* Reset Source: PFR */ #define PF0_MBX_CPM_ATQLEN_ATQLEN_S 0 #define PF0_MBX_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQLEN_ATQVFE_S 28 #define PF0_MBX_CPM_ATQLEN_ATQVFE_M BIT(28) #define PF0_MBX_CPM_ATQLEN_ATQOVFL_S 29 #define PF0_MBX_CPM_ATQLEN_ATQOVFL_M BIT(29) #define PF0_MBX_CPM_ATQLEN_ATQCRIT_S 30 #define PF0_MBX_CPM_ATQLEN_ATQCRIT_M BIT(30) #define PF0_MBX_CPM_ATQLEN_ATQENABLE_S 31 #define PF0_MBX_CPM_ATQLEN_ATQENABLE_M BIT(31) #define PF0_MBX_CPM_ATQT 0x0022E5D0 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQT_ATQT_S 0 #define PF0_MBX_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQBAH 0x0022E600 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQBAH_ARQBAH_S 0 #define PF0_MBX_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_HLP_ARQBAL 0x0022E5FC /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQBAL_ARQBAL_LSB_S 0 #define PF0_MBX_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_HLP_ARQBAL_ARQBAL_S 6 #define PF0_MBX_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_HLP_ARQH 0x0022E608 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQH_ARQH_S 0 #define PF0_MBX_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQLEN 0x0022E604 /* Reset Source: PFR */ #define PF0_MBX_HLP_ARQLEN_ARQLEN_S 0 #define PF0_MBX_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQLEN_ARQVFE_S 28 #define PF0_MBX_HLP_ARQLEN_ARQVFE_M BIT(28) #define PF0_MBX_HLP_ARQLEN_ARQOVFL_S 29 #define PF0_MBX_HLP_ARQLEN_ARQOVFL_M BIT(29) #define PF0_MBX_HLP_ARQLEN_ARQCRIT_S 30 #define PF0_MBX_HLP_ARQLEN_ARQCRIT_M BIT(30) #define PF0_MBX_HLP_ARQLEN_ARQENABLE_S 31 #define PF0_MBX_HLP_ARQLEN_ARQENABLE_M BIT(31) #define PF0_MBX_HLP_ARQT 0x0022E60C /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQT_ARQT_S 0 #define PF0_MBX_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQBAH 0x0022E5EC /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQBAH_ATQBAH_S 0 #define PF0_MBX_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_HLP_ATQBAL 0x0022E5E8 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQBAL_ATQBAL_S 6 #define PF0_MBX_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_HLP_ATQH 0x0022E5F4 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQH_ATQH_S 0 #define PF0_MBX_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQLEN 0x0022E5F0 /* Reset Source: PFR */ #define PF0_MBX_HLP_ATQLEN_ATQLEN_S 0 #define PF0_MBX_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQLEN_ATQVFE_S 28 #define PF0_MBX_HLP_ATQLEN_ATQVFE_M BIT(28) #define PF0_MBX_HLP_ATQLEN_ATQOVFL_S 29 #define PF0_MBX_HLP_ATQLEN_ATQOVFL_M BIT(29) #define PF0_MBX_HLP_ATQLEN_ATQCRIT_S 30 #define PF0_MBX_HLP_ATQLEN_ATQCRIT_M BIT(30) #define PF0_MBX_HLP_ATQLEN_ATQENABLE_S 31 #define PF0_MBX_HLP_ATQLEN_ATQENABLE_M BIT(31) #define PF0_MBX_HLP_ATQT 0x0022E5F8 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQT_ATQT_S 0 #define PF0_MBX_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQBAH 0x0022E628 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQBAH_ARQBAH_S 0 #define PF0_MBX_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_PSM_ARQBAL 0x0022E624 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQBAL_ARQBAL_LSB_S 0 #define PF0_MBX_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_PSM_ARQBAL_ARQBAL_S 6 #define PF0_MBX_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_PSM_ARQH 0x0022E630 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQH_ARQH_S 0 #define PF0_MBX_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQLEN 0x0022E62C /* Reset Source: PFR */ #define PF0_MBX_PSM_ARQLEN_ARQLEN_S 0 #define PF0_MBX_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQLEN_ARQVFE_S 28 #define PF0_MBX_PSM_ARQLEN_ARQVFE_M BIT(28) #define PF0_MBX_PSM_ARQLEN_ARQOVFL_S 29 #define PF0_MBX_PSM_ARQLEN_ARQOVFL_M BIT(29) #define PF0_MBX_PSM_ARQLEN_ARQCRIT_S 30 #define PF0_MBX_PSM_ARQLEN_ARQCRIT_M BIT(30) #define PF0_MBX_PSM_ARQLEN_ARQENABLE_S 31 #define PF0_MBX_PSM_ARQLEN_ARQENABLE_M BIT(31) #define PF0_MBX_PSM_ARQT 0x0022E634 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQT_ARQT_S 0 #define PF0_MBX_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQBAH 0x0022E614 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQBAH_ATQBAH_S 0 #define PF0_MBX_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_PSM_ATQBAL 0x0022E610 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQBAL_ATQBAL_S 6 #define PF0_MBX_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_PSM_ATQH 0x0022E61C /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQH_ATQH_S 0 #define PF0_MBX_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQLEN 0x0022E618 /* Reset Source: PFR */ #define PF0_MBX_PSM_ATQLEN_ATQLEN_S 0 #define PF0_MBX_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQLEN_ATQVFE_S 28 #define PF0_MBX_PSM_ATQLEN_ATQVFE_M BIT(28) #define PF0_MBX_PSM_ATQLEN_ATQOVFL_S 29 #define PF0_MBX_PSM_ATQLEN_ATQOVFL_M BIT(29) #define PF0_MBX_PSM_ATQLEN_ATQCRIT_S 30 #define PF0_MBX_PSM_ATQLEN_ATQCRIT_M BIT(30) #define PF0_MBX_PSM_ATQLEN_ATQENABLE_S 31 #define PF0_MBX_PSM_ATQLEN_ATQENABLE_M BIT(31) #define PF0_MBX_PSM_ATQT 0x0022E620 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQT_ATQT_S 0 #define PF0_MBX_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQBAH 0x0022E650 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQBAH_ARQBAH_S 0 #define PF0_SB_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_CPM_ARQBAL 0x0022E64C /* Reset Source: CORER */ #define PF0_SB_CPM_ARQBAL_ARQBAL_LSB_S 0 #define PF0_SB_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_SB_CPM_ARQBAL_ARQBAL_S 6 #define PF0_SB_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_CPM_ARQH 0x0022E658 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQH_ARQH_S 0 #define PF0_SB_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQLEN 0x0022E654 /* Reset Source: PFR */ #define PF0_SB_CPM_ARQLEN_ARQLEN_S 0 #define PF0_SB_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQLEN_ARQVFE_S 28 #define PF0_SB_CPM_ARQLEN_ARQVFE_M BIT(28) #define PF0_SB_CPM_ARQLEN_ARQOVFL_S 29 #define PF0_SB_CPM_ARQLEN_ARQOVFL_M BIT(29) #define PF0_SB_CPM_ARQLEN_ARQCRIT_S 30 #define PF0_SB_CPM_ARQLEN_ARQCRIT_M BIT(30) #define PF0_SB_CPM_ARQLEN_ARQENABLE_S 31 #define PF0_SB_CPM_ARQLEN_ARQENABLE_M BIT(31) #define PF0_SB_CPM_ARQT 0x0022E65C /* Reset Source: CORER */ #define PF0_SB_CPM_ARQT_ARQT_S 0 #define PF0_SB_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQBAH 0x0022E63C /* Reset Source: CORER */ #define PF0_SB_CPM_ATQBAH_ATQBAH_S 0 #define PF0_SB_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_CPM_ATQBAL 0x0022E638 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQBAL_ATQBAL_S 6 #define PF0_SB_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_CPM_ATQH 0x0022E644 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQH_ATQH_S 0 #define PF0_SB_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQLEN 0x0022E640 /* Reset Source: PFR */ #define PF0_SB_CPM_ATQLEN_ATQLEN_S 0 #define PF0_SB_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQLEN_ATQVFE_S 28 #define PF0_SB_CPM_ATQLEN_ATQVFE_M BIT(28) #define PF0_SB_CPM_ATQLEN_ATQOVFL_S 29 #define PF0_SB_CPM_ATQLEN_ATQOVFL_M BIT(29) #define PF0_SB_CPM_ATQLEN_ATQCRIT_S 30 #define PF0_SB_CPM_ATQLEN_ATQCRIT_M BIT(30) #define PF0_SB_CPM_ATQLEN_ATQENABLE_S 31 #define PF0_SB_CPM_ATQLEN_ATQENABLE_M BIT(31) #define PF0_SB_CPM_ATQT 0x0022E648 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQT_ATQT_S 0 #define PF0_SB_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_REM_DEV_CTL 0x002300F4 /* Reset Source: CORER */ #define PF0_SB_CPM_REM_DEV_CTL_DEST_EN_S 0 #define PF0_SB_CPM_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0) #define PF0_SB_HLP_ARQBAH 0x002300D8 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQBAH_ARQBAH_S 0 #define PF0_SB_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_HLP_ARQBAL 0x002300D4 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQBAL_ARQBAL_LSB_S 0 #define PF0_SB_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_SB_HLP_ARQBAL_ARQBAL_S 6 #define PF0_SB_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_HLP_ARQH 0x002300E0 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQH_ARQH_S 0 #define PF0_SB_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ARQLEN 0x002300DC /* Reset Source: PFR */ #define PF0_SB_HLP_ARQLEN_ARQLEN_S 0 #define PF0_SB_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ARQLEN_ARQVFE_S 28 #define PF0_SB_HLP_ARQLEN_ARQVFE_M BIT(28) #define PF0_SB_HLP_ARQLEN_ARQOVFL_S 29 #define PF0_SB_HLP_ARQLEN_ARQOVFL_M BIT(29) #define PF0_SB_HLP_ARQLEN_ARQCRIT_S 30 #define PF0_SB_HLP_ARQLEN_ARQCRIT_M BIT(30) #define PF0_SB_HLP_ARQLEN_ARQENABLE_S 31 #define PF0_SB_HLP_ARQLEN_ARQENABLE_M BIT(31) #define PF0_SB_HLP_ARQT 0x002300E4 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQT_ARQT_S 0 #define PF0_SB_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ATQBAH 0x002300C4 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQBAH_ATQBAH_S 0 #define PF0_SB_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_HLP_ATQBAL 0x002300C0 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQBAL_ATQBAL_S 6 #define PF0_SB_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_HLP_ATQH 0x002300CC /* Reset Source: CORER */ #define PF0_SB_HLP_ATQH_ATQH_S 0 #define PF0_SB_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ATQLEN 0x002300C8 /* Reset Source: PFR */ #define PF0_SB_HLP_ATQLEN_ATQLEN_S 0 #define PF0_SB_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ATQLEN_ATQVFE_S 28 #define PF0_SB_HLP_ATQLEN_ATQVFE_M BIT(28) #define PF0_SB_HLP_ATQLEN_ATQOVFL_S 29 #define PF0_SB_HLP_ATQLEN_ATQOVFL_M BIT(29) #define PF0_SB_HLP_ATQLEN_ATQCRIT_S 30 #define PF0_SB_HLP_ATQLEN_ATQCRIT_M BIT(30) #define PF0_SB_HLP_ATQLEN_ATQENABLE_S 31 #define PF0_SB_HLP_ATQLEN_ATQENABLE_M BIT(31) #define PF0_SB_HLP_ATQT 0x002300D0 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQT_ATQT_S 0 #define PF0_SB_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_REM_DEV_CTL 0x002300E8 /* Reset Source: CORER */ #define PF0_SB_HLP_REM_DEV_CTL_DEST_EN_S 0 #define PF0_SB_HLP_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0) #define SB_REM_DEV_DEST(_i) (0x002300F8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define SB_REM_DEV_DEST_MAX_INDEX 7 #define SB_REM_DEV_DEST_DEST_S 0 #define SB_REM_DEV_DEST_DEST_M MAKEMASK(0xF, 0) #define SB_REM_DEV_DEST_DEST_VALID_S 31 #define SB_REM_DEV_DEST_DEST_VALID_M BIT(31) #define VF_MBX_ARQBAH(_VF) (0x0022B800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VF_MBX_ARQBAH_MAX_INDEX 255 #define VF_MBX_ARQBAH_ARQBAH_S 0 #define VF_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_ARQBAL(_VF) (0x0022B400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VF_MBX_ARQBAL_MAX_INDEX 255 #define VF_MBX_ARQBAL_ARQBAL_LSB_S 0 #define VF_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VF_MBX_ARQBAL_ARQBAL_S 6 #define VF_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_ARQH(_VF) (0x0022C000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VF_MBX_ARQH_MAX_INDEX 255 #define VF_MBX_ARQH_ARQH_S 0 #define VF_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VF_MBX_ARQLEN_MAX_INDEX 255 #define VF_MBX_ARQLEN_ARQLEN_S 0 #define VF_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_ARQLEN_ARQVFE_S 28 #define VF_MBX_ARQLEN_ARQVFE_M BIT(28) #define VF_MBX_ARQLEN_ARQOVFL_S 29 #define VF_MBX_ARQLEN_ARQOVFL_M BIT(29) #define VF_MBX_ARQLEN_ARQCRIT_S 30 #define VF_MBX_ARQLEN_ARQCRIT_M BIT(30) #define VF_MBX_ARQLEN_ARQENABLE_S 31 #define VF_MBX_ARQLEN_ARQENABLE_M BIT(31) #define VF_MBX_ARQT(_VF) (0x0022C400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VF_MBX_ARQT_MAX_INDEX 255 #define VF_MBX_ARQT_ARQT_S 0 #define VF_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_ATQBAH(_VF) (0x0022A400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VF_MBX_ATQBAH_MAX_INDEX 255 #define VF_MBX_ATQBAH_ATQBAH_S 0 #define VF_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_ATQBAL(_VF) (0x0022A000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VF_MBX_ATQBAL_MAX_INDEX 255 #define VF_MBX_ATQBAL_ATQBAL_S 6 #define VF_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_ATQH(_VF) (0x0022AC00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VF_MBX_ATQH_MAX_INDEX 255 #define VF_MBX_ATQH_ATQH_S 0 #define VF_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VF_MBX_ATQLEN_MAX_INDEX 255 #define VF_MBX_ATQLEN_ATQLEN_S 0 #define VF_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_ATQLEN_ATQVFE_S 28 #define VF_MBX_ATQLEN_ATQVFE_M BIT(28) #define VF_MBX_ATQLEN_ATQOVFL_S 29 #define VF_MBX_ATQLEN_ATQOVFL_M BIT(29) #define VF_MBX_ATQLEN_ATQCRIT_S 30 #define VF_MBX_ATQLEN_ATQCRIT_M BIT(30) #define VF_MBX_ATQLEN_ATQENABLE_S 31 #define VF_MBX_ATQLEN_ATQENABLE_M BIT(31) #define VF_MBX_ATQT(_VF) (0x0022B000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VF_MBX_ATQT_MAX_INDEX 255 #define VF_MBX_ATQT_ATQT_S 0 #define VF_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ARQBAH(_VF128) (0x0022D400 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_MBX_CPM_ARQBAH_MAX_INDEX 127 #define VF_MBX_CPM_ARQBAH_ARQBAH_S 0 #define VF_MBX_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_CPM_ARQBAL(_VF128) (0x0022D200 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_MBX_CPM_ARQBAL_MAX_INDEX 127 #define VF_MBX_CPM_ARQBAL_ARQBAL_LSB_S 0 #define VF_MBX_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VF_MBX_CPM_ARQBAL_ARQBAL_S 6 #define VF_MBX_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_CPM_ARQH(_VF128) (0x0022D800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_MBX_CPM_ARQH_MAX_INDEX 127 #define VF_MBX_CPM_ARQH_ARQH_S 0 #define VF_MBX_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ARQLEN(_VF128) (0x0022D600 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */ #define VF_MBX_CPM_ARQLEN_MAX_INDEX 127 #define VF_MBX_CPM_ARQLEN_ARQLEN_S 0 #define VF_MBX_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ARQLEN_ARQVFE_S 28 #define VF_MBX_CPM_ARQLEN_ARQVFE_M BIT(28) #define VF_MBX_CPM_ARQLEN_ARQOVFL_S 29 #define VF_MBX_CPM_ARQLEN_ARQOVFL_M BIT(29) #define VF_MBX_CPM_ARQLEN_ARQCRIT_S 30 #define VF_MBX_CPM_ARQLEN_ARQCRIT_M BIT(30) #define VF_MBX_CPM_ARQLEN_ARQENABLE_S 31 #define VF_MBX_CPM_ARQLEN_ARQENABLE_M BIT(31) #define VF_MBX_CPM_ARQT(_VF128) (0x0022DA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_MBX_CPM_ARQT_MAX_INDEX 127 #define VF_MBX_CPM_ARQT_ARQT_S 0 #define VF_MBX_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ATQBAH(_VF128) (0x0022CA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_MBX_CPM_ATQBAH_MAX_INDEX 127 #define VF_MBX_CPM_ATQBAH_ATQBAH_S 0 #define VF_MBX_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_CPM_ATQBAL(_VF128) (0x0022C800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_MBX_CPM_ATQBAL_MAX_INDEX 127 #define VF_MBX_CPM_ATQBAL_ATQBAL_S 6 #define VF_MBX_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_CPM_ATQH(_VF128) (0x0022CE00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_MBX_CPM_ATQH_MAX_INDEX 127 #define VF_MBX_CPM_ATQH_ATQH_S 0 #define VF_MBX_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ATQLEN(_VF128) (0x0022CC00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */ #define VF_MBX_CPM_ATQLEN_MAX_INDEX 127 #define VF_MBX_CPM_ATQLEN_ATQLEN_S 0 #define VF_MBX_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ATQLEN_ATQVFE_S 28 #define VF_MBX_CPM_ATQLEN_ATQVFE_M BIT(28) #define VF_MBX_CPM_ATQLEN_ATQOVFL_S 29 #define VF_MBX_CPM_ATQLEN_ATQOVFL_M BIT(29) #define VF_MBX_CPM_ATQLEN_ATQCRIT_S 30 #define VF_MBX_CPM_ATQLEN_ATQCRIT_M BIT(30) #define VF_MBX_CPM_ATQLEN_ATQENABLE_S 31 #define VF_MBX_CPM_ATQLEN_ATQENABLE_M BIT(31) #define VF_MBX_CPM_ATQT(_VF128) (0x0022D000 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_MBX_CPM_ATQT_MAX_INDEX 127 #define VF_MBX_CPM_ATQT_ATQT_S 0 #define VF_MBX_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ARQBAH(_VF16) (0x0022DD80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_HLP_ARQBAH_MAX_INDEX 15 #define VF_MBX_HLP_ARQBAH_ARQBAH_S 0 #define VF_MBX_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_HLP_ARQBAL(_VF16) (0x0022DD40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_HLP_ARQBAL_MAX_INDEX 15 #define VF_MBX_HLP_ARQBAL_ARQBAL_LSB_S 0 #define VF_MBX_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VF_MBX_HLP_ARQBAL_ARQBAL_S 6 #define VF_MBX_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_HLP_ARQH(_VF16) (0x0022DE00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_HLP_ARQH_MAX_INDEX 15 #define VF_MBX_HLP_ARQH_ARQH_S 0 #define VF_MBX_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ARQLEN(_VF16) (0x0022DDC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */ #define VF_MBX_HLP_ARQLEN_MAX_INDEX 15 #define VF_MBX_HLP_ARQLEN_ARQLEN_S 0 #define VF_MBX_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ARQLEN_ARQVFE_S 28 #define VF_MBX_HLP_ARQLEN_ARQVFE_M BIT(28) #define VF_MBX_HLP_ARQLEN_ARQOVFL_S 29 #define VF_MBX_HLP_ARQLEN_ARQOVFL_M BIT(29) #define VF_MBX_HLP_ARQLEN_ARQCRIT_S 30 #define VF_MBX_HLP_ARQLEN_ARQCRIT_M BIT(30) #define VF_MBX_HLP_ARQLEN_ARQENABLE_S 31 #define VF_MBX_HLP_ARQLEN_ARQENABLE_M BIT(31) #define VF_MBX_HLP_ARQT(_VF16) (0x0022DE40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_HLP_ARQT_MAX_INDEX 15 #define VF_MBX_HLP_ARQT_ARQT_S 0 #define VF_MBX_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ATQBAH(_VF16) (0x0022DC40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_HLP_ATQBAH_MAX_INDEX 15 #define VF_MBX_HLP_ATQBAH_ATQBAH_S 0 #define VF_MBX_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_HLP_ATQBAL(_VF16) (0x0022DC00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_HLP_ATQBAL_MAX_INDEX 15 #define VF_MBX_HLP_ATQBAL_ATQBAL_S 6 #define VF_MBX_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_HLP_ATQH(_VF16) (0x0022DCC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_HLP_ATQH_MAX_INDEX 15 #define VF_MBX_HLP_ATQH_ATQH_S 0 #define VF_MBX_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ATQLEN(_VF16) (0x0022DC80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */ #define VF_MBX_HLP_ATQLEN_MAX_INDEX 15 #define VF_MBX_HLP_ATQLEN_ATQLEN_S 0 #define VF_MBX_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ATQLEN_ATQVFE_S 28 #define VF_MBX_HLP_ATQLEN_ATQVFE_M BIT(28) #define VF_MBX_HLP_ATQLEN_ATQOVFL_S 29 #define VF_MBX_HLP_ATQLEN_ATQOVFL_M BIT(29) #define VF_MBX_HLP_ATQLEN_ATQCRIT_S 30 #define VF_MBX_HLP_ATQLEN_ATQCRIT_M BIT(30) #define VF_MBX_HLP_ATQLEN_ATQENABLE_S 31 #define VF_MBX_HLP_ATQLEN_ATQENABLE_M BIT(31) #define VF_MBX_HLP_ATQT(_VF16) (0x0022DD00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_HLP_ATQT_MAX_INDEX 15 #define VF_MBX_HLP_ATQT_ATQT_S 0 #define VF_MBX_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ARQBAH(_VF16) (0x0022E000 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_PSM_ARQBAH_MAX_INDEX 15 #define VF_MBX_PSM_ARQBAH_ARQBAH_S 0 #define VF_MBX_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_PSM_ARQBAL(_VF16) (0x0022DFC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_PSM_ARQBAL_MAX_INDEX 15 #define VF_MBX_PSM_ARQBAL_ARQBAL_LSB_S 0 #define VF_MBX_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VF_MBX_PSM_ARQBAL_ARQBAL_S 6 #define VF_MBX_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_PSM_ARQH(_VF16) (0x0022E080 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_PSM_ARQH_MAX_INDEX 15 #define VF_MBX_PSM_ARQH_ARQH_S 0 #define VF_MBX_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ARQLEN(_VF16) (0x0022E040 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */ #define VF_MBX_PSM_ARQLEN_MAX_INDEX 15 #define VF_MBX_PSM_ARQLEN_ARQLEN_S 0 #define VF_MBX_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ARQLEN_ARQVFE_S 28 #define VF_MBX_PSM_ARQLEN_ARQVFE_M BIT(28) #define VF_MBX_PSM_ARQLEN_ARQOVFL_S 29 #define VF_MBX_PSM_ARQLEN_ARQOVFL_M BIT(29) #define VF_MBX_PSM_ARQLEN_ARQCRIT_S 30 #define VF_MBX_PSM_ARQLEN_ARQCRIT_M BIT(30) #define VF_MBX_PSM_ARQLEN_ARQENABLE_S 31 #define VF_MBX_PSM_ARQLEN_ARQENABLE_M BIT(31) #define VF_MBX_PSM_ARQT(_VF16) (0x0022E0C0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_PSM_ARQT_MAX_INDEX 15 #define VF_MBX_PSM_ARQT_ARQT_S 0 #define VF_MBX_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ATQBAH(_VF16) (0x0022DEC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_PSM_ATQBAH_MAX_INDEX 15 #define VF_MBX_PSM_ATQBAH_ATQBAH_S 0 #define VF_MBX_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_PSM_ATQBAL(_VF16) (0x0022DE80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_PSM_ATQBAL_MAX_INDEX 15 #define VF_MBX_PSM_ATQBAL_ATQBAL_S 6 #define VF_MBX_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_PSM_ATQH(_VF16) (0x0022DF40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_PSM_ATQH_MAX_INDEX 15 #define VF_MBX_PSM_ATQH_ATQH_S 0 #define VF_MBX_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ATQLEN(_VF16) (0x0022DF00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */ #define VF_MBX_PSM_ATQLEN_MAX_INDEX 15 #define VF_MBX_PSM_ATQLEN_ATQLEN_S 0 #define VF_MBX_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ATQLEN_ATQVFE_S 28 #define VF_MBX_PSM_ATQLEN_ATQVFE_M BIT(28) #define VF_MBX_PSM_ATQLEN_ATQOVFL_S 29 #define VF_MBX_PSM_ATQLEN_ATQOVFL_M BIT(29) #define VF_MBX_PSM_ATQLEN_ATQCRIT_S 30 #define VF_MBX_PSM_ATQLEN_ATQCRIT_M BIT(30) #define VF_MBX_PSM_ATQLEN_ATQENABLE_S 31 #define VF_MBX_PSM_ATQLEN_ATQENABLE_M BIT(31) #define VF_MBX_PSM_ATQT(_VF16) (0x0022DF80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VF_MBX_PSM_ATQT_MAX_INDEX 15 #define VF_MBX_PSM_ATQT_ATQT_S 0 #define VF_MBX_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ARQBAH(_VF128) (0x0022F400 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_SB_CPM_ARQBAH_MAX_INDEX 127 #define VF_SB_CPM_ARQBAH_ARQBAH_S 0 #define VF_SB_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_SB_CPM_ARQBAL(_VF128) (0x0022F200 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_SB_CPM_ARQBAL_MAX_INDEX 127 #define VF_SB_CPM_ARQBAL_ARQBAL_LSB_S 0 #define VF_SB_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VF_SB_CPM_ARQBAL_ARQBAL_S 6 #define VF_SB_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_SB_CPM_ARQH(_VF128) (0x0022F800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_SB_CPM_ARQH_MAX_INDEX 127 #define VF_SB_CPM_ARQH_ARQH_S 0 #define VF_SB_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ARQLEN(_VF128) (0x0022F600 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */ #define VF_SB_CPM_ARQLEN_MAX_INDEX 127 #define VF_SB_CPM_ARQLEN_ARQLEN_S 0 #define VF_SB_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ARQLEN_ARQVFE_S 28 #define VF_SB_CPM_ARQLEN_ARQVFE_M BIT(28) #define VF_SB_CPM_ARQLEN_ARQOVFL_S 29 #define VF_SB_CPM_ARQLEN_ARQOVFL_M BIT(29) #define VF_SB_CPM_ARQLEN_ARQCRIT_S 30 #define VF_SB_CPM_ARQLEN_ARQCRIT_M BIT(30) #define VF_SB_CPM_ARQLEN_ARQENABLE_S 31 #define VF_SB_CPM_ARQLEN_ARQENABLE_M BIT(31) #define VF_SB_CPM_ARQT(_VF128) (0x0022FA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_SB_CPM_ARQT_MAX_INDEX 127 #define VF_SB_CPM_ARQT_ARQT_S 0 #define VF_SB_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ATQBAH(_VF128) (0x0022EA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_SB_CPM_ATQBAH_MAX_INDEX 127 #define VF_SB_CPM_ATQBAH_ATQBAH_S 0 #define VF_SB_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_SB_CPM_ATQBAL(_VF128) (0x0022E800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_SB_CPM_ATQBAL_MAX_INDEX 127 #define VF_SB_CPM_ATQBAL_ATQBAL_S 6 #define VF_SB_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_SB_CPM_ATQH(_VF128) (0x0022EE00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_SB_CPM_ATQH_MAX_INDEX 127 #define VF_SB_CPM_ATQH_ATQH_S 0 #define VF_SB_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ATQLEN(_VF128) (0x0022EC00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */ #define VF_SB_CPM_ATQLEN_MAX_INDEX 127 #define VF_SB_CPM_ATQLEN_ATQLEN_S 0 #define VF_SB_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ATQLEN_ATQVFE_S 28 #define VF_SB_CPM_ATQLEN_ATQVFE_M BIT(28) #define VF_SB_CPM_ATQLEN_ATQOVFL_S 29 #define VF_SB_CPM_ATQLEN_ATQOVFL_M BIT(29) #define VF_SB_CPM_ATQLEN_ATQCRIT_S 30 #define VF_SB_CPM_ATQLEN_ATQCRIT_M BIT(30) #define VF_SB_CPM_ATQLEN_ATQENABLE_S 31 #define VF_SB_CPM_ATQLEN_ATQENABLE_M BIT(31) #define VF_SB_CPM_ATQT(_VF128) (0x0022F000 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VF_SB_CPM_ATQT_MAX_INDEX 127 #define VF_SB_CPM_ATQT_ATQT_S 0 #define VF_SB_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_REM_DEV_CTL 0x002300EC /* Reset Source: CORER */ #define VF_SB_CPM_REM_DEV_CTL_DEST_EN_S 0 #define VF_SB_CPM_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0) #define VP_MBX_CPM_PF_VF_CTRL(_VP128) (0x00231800 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VP_MBX_CPM_PF_VF_CTRL_MAX_INDEX 127 #define VP_MBX_CPM_PF_VF_CTRL_QUEUE_EN_S 0 #define VP_MBX_CPM_PF_VF_CTRL_QUEUE_EN_M BIT(0) #define VP_MBX_HLP_PF_VF_CTRL(_VP16) (0x00231A00 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VP_MBX_HLP_PF_VF_CTRL_MAX_INDEX 15 #define VP_MBX_HLP_PF_VF_CTRL_QUEUE_EN_S 0 #define VP_MBX_HLP_PF_VF_CTRL_QUEUE_EN_M BIT(0) #define VP_MBX_PF_VF_CTRL(_VSI) (0x00230800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VP_MBX_PF_VF_CTRL_MAX_INDEX 767 #define VP_MBX_PF_VF_CTRL_QUEUE_EN_S 0 #define VP_MBX_PF_VF_CTRL_QUEUE_EN_M BIT(0) #define VP_MBX_PSM_PF_VF_CTRL(_VP16) (0x00231A40 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VP_MBX_PSM_PF_VF_CTRL_MAX_INDEX 15 #define VP_MBX_PSM_PF_VF_CTRL_QUEUE_EN_S 0 #define VP_MBX_PSM_PF_VF_CTRL_QUEUE_EN_M BIT(0) #define VP_SB_CPM_PF_VF_CTRL(_VP128) (0x00231C00 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VP_SB_CPM_PF_VF_CTRL_MAX_INDEX 127 #define VP_SB_CPM_PF_VF_CTRL_QUEUE_EN_S 0 #define VP_SB_CPM_PF_VF_CTRL_QUEUE_EN_M BIT(0) #define GL_DCB_TDSCP2TC_BLOCK_DIS 0x00049218 /* Reset Source: CORER */ #define GL_DCB_TDSCP2TC_BLOCK_DIS_DSCP2TC_BLOCK_DIS_S 0 #define GL_DCB_TDSCP2TC_BLOCK_DIS_DSCP2TC_BLOCK_DIS_M BIT(0) #define GL_DCB_TDSCP2TC_BLOCK_IPV4(_i) (0x00049018 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GL_DCB_TDSCP2TC_BLOCK_IPV4_MAX_INDEX 63 #define GL_DCB_TDSCP2TC_BLOCK_IPV4_TC_BLOCK_LUT_S 0 #define GL_DCB_TDSCP2TC_BLOCK_IPV4_TC_BLOCK_LUT_M MAKEMASK(0xFFFFFFFF, 0) #define GL_DCB_TDSCP2TC_BLOCK_IPV6(_i) (0x00049118 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GL_DCB_TDSCP2TC_BLOCK_IPV6_MAX_INDEX 63 #define GL_DCB_TDSCP2TC_BLOCK_IPV6_TC_BLOCK_LUT_S 0 #define GL_DCB_TDSCP2TC_BLOCK_IPV6_TC_BLOCK_LUT_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_GENC 0x00083044 /* Reset Source: CORER */ #define GLDCB_GENC_PCIRTT_S 0 #define GLDCB_GENC_PCIRTT_M MAKEMASK(0xFFFF, 0) #define GLDCB_PRS_RETSTCC(_i) (0x002000B0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLDCB_PRS_RETSTCC_MAX_INDEX 31 #define GLDCB_PRS_RETSTCC_BWSHARE_S 0 #define GLDCB_PRS_RETSTCC_BWSHARE_M MAKEMASK(0x7F, 0) #define GLDCB_PRS_RETSTCC_ETSTC_S 31 #define GLDCB_PRS_RETSTCC_ETSTC_M BIT(31) #define GLDCB_PRS_RSPMC 0x00200160 /* Reset Source: CORER */ #define GLDCB_PRS_RSPMC_RSPM_S 0 #define GLDCB_PRS_RSPMC_RSPM_M MAKEMASK(0xFF, 0) #define GLDCB_PRS_RSPMC_RPM_MODE_S 8 #define GLDCB_PRS_RSPMC_RPM_MODE_M MAKEMASK(0x3, 8) #define GLDCB_PRS_RSPMC_PRR_MAX_EXP_S 10 #define GLDCB_PRS_RSPMC_PRR_MAX_EXP_M MAKEMASK(0xF, 10) #define GLDCB_PRS_RSPMC_PFCTIMER_S 14 #define GLDCB_PRS_RSPMC_PFCTIMER_M MAKEMASK(0x3FFF, 14) #define GLDCB_PRS_RSPMC_RPM_DIS_S 31 #define GLDCB_PRS_RSPMC_RPM_DIS_M BIT(31) #define GLDCB_RETSTCC(_i) (0x00122140 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLDCB_RETSTCC_MAX_INDEX 31 #define GLDCB_RETSTCC_BWSHARE_S 0 #define GLDCB_RETSTCC_BWSHARE_M MAKEMASK(0x7F, 0) #define GLDCB_RETSTCC_ETSTC_S 31 #define GLDCB_RETSTCC_ETSTC_M BIT(31) #define GLDCB_RETSTCS(_i) (0x001221C0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLDCB_RETSTCS_MAX_INDEX 31 #define GLDCB_RETSTCS_CREDITS_S 0 #define GLDCB_RETSTCS_CREDITS_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_RTC2PFC_RCB 0x00122100 /* Reset Source: CORER */ #define GLDCB_RTC2PFC_RCB_TC2PFC_S 0 #define GLDCB_RTC2PFC_RCB_TC2PFC_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_SWT_RETSTCC(_i) (0x0020A040 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLDCB_SWT_RETSTCC_MAX_INDEX 31 #define GLDCB_SWT_RETSTCC_BWSHARE_S 0 #define GLDCB_SWT_RETSTCC_BWSHARE_M MAKEMASK(0x7F, 0) #define GLDCB_SWT_RETSTCC_ETSTC_S 31 #define GLDCB_SWT_RETSTCC_ETSTC_M BIT(31) #define GLDCB_TC2PFC 0x001D2694 /* Reset Source: CORER */ #define GLDCB_TC2PFC_TC2PFC_S 0 #define GLDCB_TC2PFC_TC2PFC_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_TCB_MNG_SP 0x000AE12C /* Reset Source: CORER */ #define GLDCB_TCB_MNG_SP_MNG_SP_S 0 #define GLDCB_TCB_MNG_SP_MNG_SP_M BIT(0) #define GLDCB_TCB_TCLL_CFG 0x000AE134 /* Reset Source: CORER */ #define GLDCB_TCB_TCLL_CFG_LLTC_S 0 #define GLDCB_TCB_TCLL_CFG_LLTC_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_TCB_WB_SP 0x000AE310 /* Reset Source: CORER */ #define GLDCB_TCB_WB_SP_WB_SP_S 0 #define GLDCB_TCB_WB_SP_WB_SP_M BIT(0) #define GLDCB_TCUPM_IMM_EN 0x000BC824 /* Reset Source: CORER */ #define GLDCB_TCUPM_IMM_EN_IMM_EN_S 0 #define GLDCB_TCUPM_IMM_EN_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_TCUPM_LEGACY_TC 0x000BC828 /* Reset Source: CORER */ #define GLDCB_TCUPM_LEGACY_TC_LEGTC_S 0 #define GLDCB_TCUPM_LEGACY_TC_LEGTC_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_TCUPM_NO_EXCEED_DIS 0x000BC830 /* Reset Source: CORER */ #define GLDCB_TCUPM_NO_EXCEED_DIS_NON_EXCEED_DIS_S 0 #define GLDCB_TCUPM_NO_EXCEED_DIS_NON_EXCEED_DIS_M BIT(0) #define GLDCB_TCUPM_WB_DIS 0x000BC834 /* Reset Source: CORER */ #define GLDCB_TCUPM_WB_DIS_PORT_DISABLE_S 0 #define GLDCB_TCUPM_WB_DIS_PORT_DISABLE_M BIT(0) #define GLDCB_TCUPM_WB_DIS_TC_DISABLE_S 1 #define GLDCB_TCUPM_WB_DIS_TC_DISABLE_M BIT(1) #define GLDCB_TFPFCI 0x0009949C /* Reset Source: CORER */ #define GLDCB_TFPFCI_GLDCB_TFPFCI_S 0 #define GLDCB_TFPFCI_GLDCB_TFPFCI_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_TLPM_IMM_TCB 0x000A0190 /* Reset Source: CORER */ #define GLDCB_TLPM_IMM_TCB_IMM_EN_S 0 #define GLDCB_TLPM_IMM_TCB_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_TLPM_IMM_TCUPM 0x000A018C /* Reset Source: CORER */ #define GLDCB_TLPM_IMM_TCUPM_IMM_EN_S 0 #define GLDCB_TLPM_IMM_TCUPM_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_TLPM_PCI_DM 0x000A0180 /* Reset Source: CORER */ #define GLDCB_TLPM_PCI_DM_MONITOR_S 0 #define GLDCB_TLPM_PCI_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) #define GLDCB_TLPM_PCI_DTHR 0x000A0184 /* Reset Source: CORER */ #define GLDCB_TLPM_PCI_DTHR_PCI_TDATA_S 0 #define GLDCB_TLPM_PCI_DTHR_PCI_TDATA_M MAKEMASK(0xFFF, 0) #define GLDCB_TPB_IMM_TLPM 0x00099468 /* Reset Source: CORER */ #define GLDCB_TPB_IMM_TLPM_IMM_EN_S 0 #define GLDCB_TPB_IMM_TLPM_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_TPB_IMM_TPB 0x0009946C /* Reset Source: CORER */ #define GLDCB_TPB_IMM_TPB_IMM_EN_S 0 #define GLDCB_TPB_IMM_TPB_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_TPB_TCLL_CFG 0x00099464 /* Reset Source: CORER */ #define GLDCB_TPB_TCLL_CFG_LLTC_S 0 #define GLDCB_TPB_TCLL_CFG_LLTC_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCB_BULK_DWRR_REG_QUANTA 0x000AE0E0 /* Reset Source: CORER */ #define GLTCB_BULK_DWRR_REG_QUANTA_QUANTA_S 0 #define GLTCB_BULK_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) #define GLTCB_BULK_DWRR_REG_SAT 0x000AE0F0 /* Reset Source: CORER */ #define GLTCB_BULK_DWRR_REG_SAT_SATURATION_S 0 #define GLTCB_BULK_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) #define GLTCB_BULK_DWRR_WB_QUANTA 0x000AE0E4 /* Reset Source: CORER */ #define GLTCB_BULK_DWRR_WB_QUANTA_QUANTA_S 0 #define GLTCB_BULK_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) #define GLTCB_BULK_DWRR_WB_SAT 0x000AE0F4 /* Reset Source: CORER */ #define GLTCB_BULK_DWRR_WB_SAT_SATURATION_S 0 #define GLTCB_BULK_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) #define GLTCB_CREDIT_EXP_CTL 0x000AE120 /* Reset Source: CORER */ #define GLTCB_CREDIT_EXP_CTL_EN_S 0 #define GLTCB_CREDIT_EXP_CTL_EN_M BIT(0) #define GLTCB_CREDIT_EXP_CTL_MIN_PKT_S 1 #define GLTCB_CREDIT_EXP_CTL_MIN_PKT_M MAKEMASK(0x1FF, 1) #define GLTCB_LL_DWRR_REG_QUANTA 0x000AE0E8 /* Reset Source: CORER */ #define GLTCB_LL_DWRR_REG_QUANTA_QUANTA_S 0 #define GLTCB_LL_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) #define GLTCB_LL_DWRR_REG_SAT 0x000AE0F8 /* Reset Source: CORER */ #define GLTCB_LL_DWRR_REG_SAT_SATURATION_S 0 #define GLTCB_LL_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) #define GLTCB_LL_DWRR_WB_QUANTA 0x000AE0EC /* Reset Source: CORER */ #define GLTCB_LL_DWRR_WB_QUANTA_QUANTA_S 0 #define GLTCB_LL_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) #define GLTCB_LL_DWRR_WB_SAT 0x000AE0FC /* Reset Source: CORER */ #define GLTCB_LL_DWRR_WB_SAT_SATURATION_S 0 #define GLTCB_LL_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) #define GLTCB_WB_RL 0x000AE238 /* Reset Source: CORER */ #define GLTCB_WB_RL_PERIOD_S 0 #define GLTCB_WB_RL_PERIOD_M MAKEMASK(0xFFFF, 0) #define GLTCB_WB_RL_EN_S 16 #define GLTCB_WB_RL_EN_M BIT(16) #define GLTPB_WB_RL 0x00099460 /* Reset Source: CORER */ #define GLTPB_WB_RL_PERIOD_S 0 #define GLTPB_WB_RL_PERIOD_M MAKEMASK(0xFFFF, 0) #define GLTPB_WB_RL_EN_S 16 #define GLTPB_WB_RL_EN_M BIT(16) #define E800_PRTDCB_FCCFG 0x001E4640 /* Reset Source: GLOBR */ #define E800_PRTDCB_FCCFG_TFCE_S 3 #define E800_PRTDCB_FCCFG_TFCE_M MAKEMASK(0x3, 3) #define E800_PRTDCB_FCRTV 0x001E4600 /* Reset Source: GLOBR */ #define E800_PRTDCB_FCRTV_FC_REFRESH_TH_S 0 #define E800_PRTDCB_FCRTV_FC_REFRESH_TH_M MAKEMASK(0xFFFF, 0) #define E800_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: GLOBR */ #define E800_PRTDCB_FCTTVN_MAX_INDEX 3 #define E800_PRTDCB_FCTTVN_TTV_2N_S 0 #define E800_PRTDCB_FCTTVN_TTV_2N_M MAKEMASK(0xFFFF, 0) #define E800_PRTDCB_FCTTVN_TTV_2N_P1_S 16 #define E800_PRTDCB_FCTTVN_TTV_2N_P1_M MAKEMASK(0xFFFF, 16) #define PRTDCB_GENC 0x00083000 /* Reset Source: CORER */ #define PRTDCB_GENC_NUMTC_S 2 #define PRTDCB_GENC_NUMTC_M MAKEMASK(0xF, 2) #define PRTDCB_GENC_FCOEUP_S 6 #define PRTDCB_GENC_FCOEUP_M MAKEMASK(0x7, 6) #define PRTDCB_GENC_FCOEUP_VALID_S 9 #define PRTDCB_GENC_FCOEUP_VALID_M BIT(9) #define PRTDCB_GENC_PFCLDA_S 16 #define PRTDCB_GENC_PFCLDA_M MAKEMASK(0xFFFF, 16) #define PRTDCB_GENS 0x00083020 /* Reset Source: CORER */ #define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_M MAKEMASK(0x7, 0) #define PRTDCB_PRS_RETSC 0x002001A0 /* Reset Source: CORER */ #define PRTDCB_PRS_RETSC_ETS_MODE_S 0 #define PRTDCB_PRS_RETSC_ETS_MODE_M BIT(0) #define PRTDCB_PRS_RETSC_NON_ETS_MODE_S 1 #define PRTDCB_PRS_RETSC_NON_ETS_MODE_M BIT(1) #define PRTDCB_PRS_RETSC_ETS_MAX_EXP_S 2 #define PRTDCB_PRS_RETSC_ETS_MAX_EXP_M MAKEMASK(0xF, 2) #define PRTDCB_PRS_RPRRC 0x00200180 /* Reset Source: CORER */ #define PRTDCB_PRS_RPRRC_BWSHARE_S 0 #define PRTDCB_PRS_RPRRC_BWSHARE_M MAKEMASK(0x3FF, 0) #define PRTDCB_PRS_RPRRC_BWSHARE_DIS_S 31 #define PRTDCB_PRS_RPRRC_BWSHARE_DIS_M BIT(31) #define PRTDCB_RETSC 0x001222A0 /* Reset Source: CORER */ #define PRTDCB_RETSC_ETS_MODE_S 0 #define PRTDCB_RETSC_ETS_MODE_M BIT(0) #define PRTDCB_RETSC_NON_ETS_MODE_S 1 #define PRTDCB_RETSC_NON_ETS_MODE_M BIT(1) #define PRTDCB_RETSC_ETS_MAX_EXP_S 2 #define PRTDCB_RETSC_ETS_MAX_EXP_M MAKEMASK(0xF, 2) #define PRTDCB_RPRRC 0x001220C0 /* Reset Source: CORER */ #define PRTDCB_RPRRC_BWSHARE_S 0 #define PRTDCB_RPRRC_BWSHARE_M MAKEMASK(0x3FF, 0) #define PRTDCB_RPRRC_BWSHARE_DIS_S 31 #define PRTDCB_RPRRC_BWSHARE_DIS_M BIT(31) #define PRTDCB_RPRRS 0x001220E0 /* Reset Source: CORER */ #define PRTDCB_RPRRS_CREDITS_S 0 #define PRTDCB_RPRRS_CREDITS_M MAKEMASK(0xFFFFFFFF, 0) #define PRTDCB_RUP_TDPU 0x00040960 /* Reset Source: CORER */ #define PRTDCB_RUP_TDPU_NOVLANUP_S 0 #define PRTDCB_RUP_TDPU_NOVLANUP_M MAKEMASK(0x7, 0) #define PRTDCB_RUP2TC 0x001D2640 /* Reset Source: CORER */ #define PRTDCB_RUP2TC_UP0TC_S 0 #define PRTDCB_RUP2TC_UP0TC_M MAKEMASK(0x7, 0) #define PRTDCB_RUP2TC_UP1TC_S 3 #define PRTDCB_RUP2TC_UP1TC_M MAKEMASK(0x7, 3) #define PRTDCB_RUP2TC_UP2TC_S 6 #define PRTDCB_RUP2TC_UP2TC_M MAKEMASK(0x7, 6) #define PRTDCB_RUP2TC_UP3TC_S 9 #define PRTDCB_RUP2TC_UP3TC_M MAKEMASK(0x7, 9) #define PRTDCB_RUP2TC_UP4TC_S 12 #define PRTDCB_RUP2TC_UP4TC_M MAKEMASK(0x7, 12) #define PRTDCB_RUP2TC_UP5TC_S 15 #define PRTDCB_RUP2TC_UP5TC_M MAKEMASK(0x7, 15) #define PRTDCB_RUP2TC_UP6TC_S 18 #define PRTDCB_RUP2TC_UP6TC_M MAKEMASK(0x7, 18) #define PRTDCB_RUP2TC_UP7TC_S 21 #define PRTDCB_RUP2TC_UP7TC_M MAKEMASK(0x7, 21) #define PRTDCB_SWT_RETSC 0x0020A140 /* Reset Source: CORER */ #define PRTDCB_SWT_RETSC_ETS_MODE_S 0 #define PRTDCB_SWT_RETSC_ETS_MODE_M BIT(0) #define PRTDCB_SWT_RETSC_NON_ETS_MODE_S 1 #define PRTDCB_SWT_RETSC_NON_ETS_MODE_M BIT(1) #define PRTDCB_SWT_RETSC_ETS_MAX_EXP_S 2 #define PRTDCB_SWT_RETSC_ETS_MAX_EXP_M MAKEMASK(0xF, 2) #define PRTDCB_TCB_DWRR_CREDITS 0x000AE000 /* Reset Source: CORER */ #define PRTDCB_TCB_DWRR_CREDITS_CREDITS_S 0 #define PRTDCB_TCB_DWRR_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) #define PRTDCB_TCB_DWRR_QUANTA 0x000AE020 /* Reset Source: CORER */ #define PRTDCB_TCB_DWRR_QUANTA_QUANTA_S 0 #define PRTDCB_TCB_DWRR_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) #define PRTDCB_TCB_DWRR_SAT 0x000AE040 /* Reset Source: CORER */ #define PRTDCB_TCB_DWRR_SAT_SATURATION_S 0 #define PRTDCB_TCB_DWRR_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) #define PRTDCB_TCUPM_NO_EXCEED_DM 0x000BC3C0 /* Reset Source: CORER */ #define PRTDCB_TCUPM_NO_EXCEED_DM_MONITOR_S 0 #define PRTDCB_TCUPM_NO_EXCEED_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) #define PRTDCB_TCUPM_REG_CM 0x000BC360 /* Reset Source: CORER */ #define PRTDCB_TCUPM_REG_CM_MONITOR_S 0 #define PRTDCB_TCUPM_REG_CM_MONITOR_M MAKEMASK(0x7FFF, 0) #define PRTDCB_TCUPM_REG_CTHR 0x000BC380 /* Reset Source: CORER */ #define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_H_S 0 #define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_H_M MAKEMASK(0x7FFF, 0) #define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_L_S 15 #define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_L_M MAKEMASK(0x7FFF, 15) #define PRTDCB_TCUPM_REG_DM 0x000BC3A0 /* Reset Source: CORER */ #define PRTDCB_TCUPM_REG_DM_MONITOR_S 0 #define PRTDCB_TCUPM_REG_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) #define PRTDCB_TCUPM_REG_DTHR 0x000BC3E0 /* Reset Source: CORER */ #define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_H_S 0 #define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_H_M MAKEMASK(0xFFF, 0) #define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_L_S 12 #define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_L_M MAKEMASK(0xFFF, 12) #define PRTDCB_TCUPM_REG_PE_HB_DM 0x000BC400 /* Reset Source: CORER */ #define PRTDCB_TCUPM_REG_PE_HB_DM_MONITOR_S 0 #define PRTDCB_TCUPM_REG_PE_HB_DM_MONITOR_M MAKEMASK(0xFFF, 0) #define PRTDCB_TCUPM_REG_PE_HB_DTHR 0x000BC420 /* Reset Source: CORER */ #define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_H_S 0 #define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_H_M MAKEMASK(0xFFF, 0) #define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_L_S 12 #define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_L_M MAKEMASK(0xFFF, 12) #define PRTDCB_TCUPM_WAIT_PFC_CM 0x000BC440 /* Reset Source: CORER */ #define PRTDCB_TCUPM_WAIT_PFC_CM_MONITOR_S 0 #define PRTDCB_TCUPM_WAIT_PFC_CM_MONITOR_M MAKEMASK(0x7FFF, 0) #define PRTDCB_TCUPM_WAIT_PFC_CTHR 0x000BC460 /* Reset Source: CORER */ #define PRTDCB_TCUPM_WAIT_PFC_CTHR_PORTOFFTH_S 0 #define PRTDCB_TCUPM_WAIT_PFC_CTHR_PORTOFFTH_M MAKEMASK(0x7FFF, 0) #define PRTDCB_TCUPM_WAIT_PFC_DM 0x000BC480 /* Reset Source: CORER */ #define PRTDCB_TCUPM_WAIT_PFC_DM_MONITOR_S 0 #define PRTDCB_TCUPM_WAIT_PFC_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) #define PRTDCB_TCUPM_WAIT_PFC_DTHR 0x000BC4A0 /* Reset Source: CORER */ #define PRTDCB_TCUPM_WAIT_PFC_DTHR_PORTOFFTH_S 0 #define PRTDCB_TCUPM_WAIT_PFC_DTHR_PORTOFFTH_M MAKEMASK(0xFFF, 0) #define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DM 0x000BC4C0 /* Reset Source: CORER */ #define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DM_MONITOR_S 0 #define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DM_MONITOR_M MAKEMASK(0xFFF, 0) #define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DTHR 0x000BC4E0 /* Reset Source: CORER */ #define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DTHR_PORTOFFTH_S 0 #define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DTHR_PORTOFFTH_M MAKEMASK(0xFFF, 0) #define PRTDCB_TDPUC 0x00040940 /* Reset Source: CORER */ #define PRTDCB_TDPUC_MAX_TXFRAME_S 0 #define PRTDCB_TDPUC_MAX_TXFRAME_M MAKEMASK(0xFFFF, 0) #define PRTDCB_TDPUC_MAL_LENGTH_S 16 #define PRTDCB_TDPUC_MAL_LENGTH_M BIT(16) #define PRTDCB_TDPUC_MAL_CMD_S 17 #define PRTDCB_TDPUC_MAL_CMD_M BIT(17) #define PRTDCB_TDPUC_TTL_DROP_S 18 #define PRTDCB_TDPUC_TTL_DROP_M BIT(18) #define PRTDCB_TDPUC_UR_DROP_S 19 #define PRTDCB_TDPUC_UR_DROP_M BIT(19) #define PRTDCB_TDPUC_DUMMY_S 20 #define PRTDCB_TDPUC_DUMMY_M BIT(20) #define PRTDCB_TDPUC_BIG_PKT_SIZE_S 21 #define PRTDCB_TDPUC_BIG_PKT_SIZE_M BIT(21) #define PRTDCB_TDPUC_L2_ACCEPT_FAIL_S 22 #define PRTDCB_TDPUC_L2_ACCEPT_FAIL_M BIT(22) #define PRTDCB_TDPUC_DSCP_CHECK_FAIL_S 23 #define PRTDCB_TDPUC_DSCP_CHECK_FAIL_M BIT(23) #define PRTDCB_TDPUC_RCU_ANTISPOOF_S 24 #define PRTDCB_TDPUC_RCU_ANTISPOOF_M BIT(24) #define PRTDCB_TDPUC_NIC_DSI_S 25 #define PRTDCB_TDPUC_NIC_DSI_M BIT(25) #define PRTDCB_TDPUC_NIC_IPSEC_S 26 #define PRTDCB_TDPUC_NIC_IPSEC_M BIT(26) #define PRTDCB_TDPUC_CLEAR_DROP_S 31 #define PRTDCB_TDPUC_CLEAR_DROP_M BIT(31) #define PRTDCB_TFCS 0x001E4560 /* Reset Source: GLOBR */ #define PRTDCB_TFCS_TXOFF_S 0 #define PRTDCB_TFCS_TXOFF_M BIT(0) #define PRTDCB_TFCS_TXOFF0_S 8 #define PRTDCB_TFCS_TXOFF0_M BIT(8) #define PRTDCB_TFCS_TXOFF1_S 9 #define PRTDCB_TFCS_TXOFF1_M BIT(9) #define PRTDCB_TFCS_TXOFF2_S 10 #define PRTDCB_TFCS_TXOFF2_M BIT(10) #define PRTDCB_TFCS_TXOFF3_S 11 #define PRTDCB_TFCS_TXOFF3_M BIT(11) #define PRTDCB_TFCS_TXOFF4_S 12 #define PRTDCB_TFCS_TXOFF4_M BIT(12) #define PRTDCB_TFCS_TXOFF5_S 13 #define PRTDCB_TFCS_TXOFF5_M BIT(13) #define PRTDCB_TFCS_TXOFF6_S 14 #define PRTDCB_TFCS_TXOFF6_M BIT(14) #define PRTDCB_TFCS_TXOFF7_S 15 #define PRTDCB_TFCS_TXOFF7_M BIT(15) #define PRTDCB_TLPM_REG_DM 0x000A0000 /* Reset Source: CORER */ #define PRTDCB_TLPM_REG_DM_MONITOR_S 0 #define PRTDCB_TLPM_REG_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) #define PRTDCB_TLPM_REG_DTHR 0x000A0020 /* Reset Source: CORER */ #define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_H_S 0 #define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_H_M MAKEMASK(0xFFF, 0) #define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_L_S 12 #define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_L_M MAKEMASK(0xFFF, 12) #define PRTDCB_TLPM_WAIT_PFC_DM 0x000A0040 /* Reset Source: CORER */ #define PRTDCB_TLPM_WAIT_PFC_DM_MONITOR_S 0 #define PRTDCB_TLPM_WAIT_PFC_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) #define PRTDCB_TLPM_WAIT_PFC_DTHR 0x000A0060 /* Reset Source: CORER */ #define PRTDCB_TLPM_WAIT_PFC_DTHR_PORTOFFTH_S 0 #define PRTDCB_TLPM_WAIT_PFC_DTHR_PORTOFFTH_M MAKEMASK(0xFFF, 0) #define PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */ #define PRTDCB_TPFCTS_MAX_INDEX 7 #define PRTDCB_TPFCTS_PFCTIMER_S 0 #define PRTDCB_TPFCTS_PFCTIMER_M MAKEMASK(0x3FFF, 0) #define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */ #define PRTDCB_TUP2TC_UP0TC_S 0 #define PRTDCB_TUP2TC_UP0TC_M MAKEMASK(0x7, 0) #define PRTDCB_TUP2TC_UP1TC_S 3 #define PRTDCB_TUP2TC_UP1TC_M MAKEMASK(0x7, 3) #define PRTDCB_TUP2TC_UP2TC_S 6 #define PRTDCB_TUP2TC_UP2TC_M MAKEMASK(0x7, 6) #define PRTDCB_TUP2TC_UP3TC_S 9 #define PRTDCB_TUP2TC_UP3TC_M MAKEMASK(0x7, 9) #define PRTDCB_TUP2TC_UP4TC_S 12 #define PRTDCB_TUP2TC_UP4TC_M MAKEMASK(0x7, 12) #define PRTDCB_TUP2TC_UP5TC_S 15 #define PRTDCB_TUP2TC_UP5TC_M MAKEMASK(0x7, 15) #define PRTDCB_TUP2TC_UP6TC_S 18 #define PRTDCB_TUP2TC_UP6TC_M MAKEMASK(0x7, 18) #define PRTDCB_TUP2TC_UP7TC_S 21 #define PRTDCB_TUP2TC_UP7TC_M MAKEMASK(0x7, 21) #define PRTDCB_TX_DSCP2UP_CTL 0x00040980 /* Reset Source: CORER */ #define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_S 0 #define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_M BIT(0) #define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_S 1 #define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_M MAKEMASK(0x7, 1) #define PRTDCB_TX_DSCP2UP_IPV4_LUT(_i) (0x000409A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: CORER */ #define PRTDCB_TX_DSCP2UP_IPV4_LUT_MAX_INDEX 7 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_0_S 0 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_0_M MAKEMASK(0x7, 0) #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_1_S 4 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_1_M MAKEMASK(0x7, 4) #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_2_S 8 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_2_M MAKEMASK(0x7, 8) #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_3_S 12 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_3_M MAKEMASK(0x7, 12) #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_4_S 16 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_4_M MAKEMASK(0x7, 16) #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_5_S 20 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_5_M MAKEMASK(0x7, 20) #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_6_S 24 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_6_M MAKEMASK(0x7, 24) #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_7_S 28 #define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_7_M MAKEMASK(0x7, 28) #define PRTDCB_TX_DSCP2UP_IPV6_LUT(_i) (0x00040AA0 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: CORER */ #define PRTDCB_TX_DSCP2UP_IPV6_LUT_MAX_INDEX 7 #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_0_S 0 #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_0_M MAKEMASK(0x7, 0) #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_1_S 4 #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_1_M MAKEMASK(0x7, 4) #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_2_S 8 #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_2_M MAKEMASK(0x7, 8) #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_3_S 12 #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_3_M MAKEMASK(0x7, 12) #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_4_S 16 #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_4_M MAKEMASK(0x7, 16) #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_5_S 20 #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_5_M MAKEMASK(0x7, 20) #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_6_S 24 #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_6_M MAKEMASK(0x7, 24) #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_7_S 28 #define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_7_M MAKEMASK(0x7, 28) #define PRTTCB_BULK_DWRR_REG_CREDITS 0x000AE060 /* Reset Source: CORER */ #define PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_S 0 #define PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) #define PRTTCB_BULK_DWRR_WB_CREDITS 0x000AE080 /* Reset Source: CORER */ #define PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_S 0 #define PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) #define PRTTCB_CREDIT_EXP 0x000AE100 /* Reset Source: CORER */ #define PRTTCB_CREDIT_EXP_EXPANSION_S 0 #define PRTTCB_CREDIT_EXP_EXPANSION_M MAKEMASK(0xFF, 0) #define PRTTCB_LL_DWRR_REG_CREDITS 0x000AE0A0 /* Reset Source: CORER */ #define PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_S 0 #define PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) #define PRTTCB_LL_DWRR_WB_CREDITS 0x000AE0C0 /* Reset Source: CORER */ #define PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0 #define PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) #define TCDCB_TCUPM_WAIT_CM(_i) (0x000BC520 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TCDCB_TCUPM_WAIT_CM_MAX_INDEX 31 #define TCDCB_TCUPM_WAIT_CM_MONITOR_S 0 #define TCDCB_TCUPM_WAIT_CM_MONITOR_M MAKEMASK(0x7FFF, 0) #define TCDCB_TCUPM_WAIT_CTHR(_i) (0x000BC5A0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TCDCB_TCUPM_WAIT_CTHR_MAX_INDEX 31 #define TCDCB_TCUPM_WAIT_CTHR_TCOFFTH_S 0 #define TCDCB_TCUPM_WAIT_CTHR_TCOFFTH_M MAKEMASK(0x7FFF, 0) #define TCDCB_TCUPM_WAIT_DM(_i) (0x000BC620 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TCDCB_TCUPM_WAIT_DM_MAX_INDEX 31 #define TCDCB_TCUPM_WAIT_DM_MONITOR_S 0 #define TCDCB_TCUPM_WAIT_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) #define TCDCB_TCUPM_WAIT_DTHR(_i) (0x000BC6A0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TCDCB_TCUPM_WAIT_DTHR_MAX_INDEX 31 #define TCDCB_TCUPM_WAIT_DTHR_TCOFFTH_S 0 #define TCDCB_TCUPM_WAIT_DTHR_TCOFFTH_M MAKEMASK(0xFFF, 0) #define TCDCB_TCUPM_WAIT_PE_HB_DM(_i) (0x000BC720 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TCDCB_TCUPM_WAIT_PE_HB_DM_MAX_INDEX 31 #define TCDCB_TCUPM_WAIT_PE_HB_DM_MONITOR_S 0 #define TCDCB_TCUPM_WAIT_PE_HB_DM_MONITOR_M MAKEMASK(0xFFF, 0) #define TCDCB_TCUPM_WAIT_PE_HB_DTHR(_i) (0x000BC7A0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TCDCB_TCUPM_WAIT_PE_HB_DTHR_MAX_INDEX 31 #define TCDCB_TCUPM_WAIT_PE_HB_DTHR_TCOFFTH_S 0 #define TCDCB_TCUPM_WAIT_PE_HB_DTHR_TCOFFTH_M MAKEMASK(0xFFF, 0) #define TCDCB_TLPM_WAIT_DM(_i) (0x000A0080 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TCDCB_TLPM_WAIT_DM_MAX_INDEX 31 #define TCDCB_TLPM_WAIT_DM_MONITOR_S 0 #define TCDCB_TLPM_WAIT_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) #define TCDCB_TLPM_WAIT_DTHR(_i) (0x000A0100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TCDCB_TLPM_WAIT_DTHR_MAX_INDEX 31 #define TCDCB_TLPM_WAIT_DTHR_TCOFFTH_S 0 #define TCDCB_TLPM_WAIT_DTHR_TCOFFTH_M MAKEMASK(0xFFF, 0) #define TCTCB_WB_RL_TC_CFG(_i) (0x000AE138 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TCTCB_WB_RL_TC_CFG_MAX_INDEX 31 #define TCTCB_WB_RL_TC_CFG_TOKENS_S 0 #define TCTCB_WB_RL_TC_CFG_TOKENS_M MAKEMASK(0xFFF, 0) #define TCTCB_WB_RL_TC_CFG_BURST_SIZE_S 12 #define TCTCB_WB_RL_TC_CFG_BURST_SIZE_M MAKEMASK(0x3FF, 12) #define TCTCB_WB_RL_TC_STAT(_i) (0x000AE1B8 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TCTCB_WB_RL_TC_STAT_MAX_INDEX 31 #define TCTCB_WB_RL_TC_STAT_BUCKET_S 0 #define TCTCB_WB_RL_TC_STAT_BUCKET_M MAKEMASK(0x1FFFF, 0) #define TPB_BULK_DWRR_REG_QUANTA 0x00099340 /* Reset Source: CORER */ #define TPB_BULK_DWRR_REG_QUANTA_QUANTA_S 0 #define TPB_BULK_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) #define TPB_BULK_DWRR_REG_SAT 0x00099350 /* Reset Source: CORER */ #define TPB_BULK_DWRR_REG_SAT_SATURATION_S 0 #define TPB_BULK_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) #define TPB_BULK_DWRR_WB_QUANTA 0x00099344 /* Reset Source: CORER */ #define TPB_BULK_DWRR_WB_QUANTA_QUANTA_S 0 #define TPB_BULK_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) #define TPB_BULK_DWRR_WB_SAT 0x00099354 /* Reset Source: CORER */ #define TPB_BULK_DWRR_WB_SAT_SATURATION_S 0 #define TPB_BULK_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) #define TPB_GLDCB_TCB_WB_SP 0x0009966C /* Reset Source: CORER */ #define TPB_GLDCB_TCB_WB_SP_WB_SP_S 0 #define TPB_GLDCB_TCB_WB_SP_WB_SP_M BIT(0) #define TPB_GLTCB_CREDIT_EXP_CTL 0x00099664 /* Reset Source: CORER */ #define TPB_GLTCB_CREDIT_EXP_CTL_EN_S 0 #define TPB_GLTCB_CREDIT_EXP_CTL_EN_M BIT(0) #define TPB_GLTCB_CREDIT_EXP_CTL_MIN_PKT_S 1 #define TPB_GLTCB_CREDIT_EXP_CTL_MIN_PKT_M MAKEMASK(0x1FF, 1) #define TPB_LL_DWRR_REG_QUANTA 0x00099348 /* Reset Source: CORER */ #define TPB_LL_DWRR_REG_QUANTA_QUANTA_S 0 #define TPB_LL_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) #define TPB_LL_DWRR_REG_SAT 0x00099358 /* Reset Source: CORER */ #define TPB_LL_DWRR_REG_SAT_SATURATION_S 0 #define TPB_LL_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) #define TPB_LL_DWRR_WB_QUANTA 0x0009934C /* Reset Source: CORER */ #define TPB_LL_DWRR_WB_QUANTA_QUANTA_S 0 #define TPB_LL_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) #define TPB_LL_DWRR_WB_SAT 0x0009935C /* Reset Source: CORER */ #define TPB_LL_DWRR_WB_SAT_SATURATION_S 0 #define TPB_LL_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) #define TPB_PRTDCB_TCB_DWRR_CREDITS 0x000991C0 /* Reset Source: CORER */ #define TPB_PRTDCB_TCB_DWRR_CREDITS_CREDITS_S 0 #define TPB_PRTDCB_TCB_DWRR_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) #define TPB_PRTDCB_TCB_DWRR_QUANTA 0x00099220 /* Reset Source: CORER */ #define TPB_PRTDCB_TCB_DWRR_QUANTA_QUANTA_S 0 #define TPB_PRTDCB_TCB_DWRR_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) #define TPB_PRTDCB_TCB_DWRR_SAT 0x00099260 /* Reset Source: CORER */ #define TPB_PRTDCB_TCB_DWRR_SAT_SATURATION_S 0 #define TPB_PRTDCB_TCB_DWRR_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) #define TPB_PRTTCB_BULK_DWRR_REG_CREDITS 0x000992A0 /* Reset Source: CORER */ #define TPB_PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_S 0 #define TPB_PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) #define TPB_PRTTCB_BULK_DWRR_WB_CREDITS 0x000992C0 /* Reset Source: CORER */ #define TPB_PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_S 0 #define TPB_PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) #define TPB_PRTTCB_CREDIT_EXP 0x00099644 /* Reset Source: CORER */ #define TPB_PRTTCB_CREDIT_EXP_EXPANSION_S 0 #define TPB_PRTTCB_CREDIT_EXP_EXPANSION_M MAKEMASK(0xFF, 0) #define TPB_PRTTCB_LL_DWRR_REG_CREDITS 0x00099300 /* Reset Source: CORER */ #define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_S 0 #define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) #define TPB_PRTTCB_LL_DWRR_WB_CREDITS 0x00099320 /* Reset Source: CORER */ #define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0 #define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) #define TPB_WB_RL_TC_CFG(_i) (0x00099360 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TPB_WB_RL_TC_CFG_MAX_INDEX 31 #define TPB_WB_RL_TC_CFG_TOKENS_S 0 #define TPB_WB_RL_TC_CFG_TOKENS_M MAKEMASK(0xFFF, 0) #define TPB_WB_RL_TC_CFG_BURST_SIZE_S 12 #define TPB_WB_RL_TC_CFG_BURST_SIZE_M MAKEMASK(0x3FF, 12) #define TPB_WB_RL_TC_STAT(_i) (0x000993E0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define TPB_WB_RL_TC_STAT_MAX_INDEX 31 #define TPB_WB_RL_TC_STAT_BUCKET_S 0 #define TPB_WB_RL_TC_STAT_BUCKET_M MAKEMASK(0x1FFFF, 0) #define E800_GL_ACLEXT_CDMD_L1SEL(_i) (0x00210054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_CDMD_L1SEL_MAX_INDEX 2 #define E800_GL_ACLEXT_CDMD_L1SEL_RX_SEL_S 0 #define E800_GL_ACLEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0) #define E800_GL_ACLEXT_CDMD_L1SEL_TX_SEL_S 8 #define E800_GL_ACLEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8) #define E800_GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_S 16 #define E800_GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16) #define E800_GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_S 24 #define E800_GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24) #define E800_GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_S 30 #define E800_GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30) #define E800_GL_ACLEXT_CTLTBL_L2ADDR(_i) (0x00210084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_CTLTBL_L2ADDR_MAX_INDEX 2 #define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_S 0 #define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0) #define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_S 8 #define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8) #define E800_GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_S 31 #define E800_GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31) #define E800_GL_ACLEXT_CTLTBL_L2DATA(_i) (0x00210090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_CTLTBL_L2DATA_MAX_INDEX 2 #define E800_GL_ACLEXT_CTLTBL_L2DATA_DATA_S 0 #define E800_GL_ACLEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define E800_GL_ACLEXT_DFLT_L2PRFL(_i) (0x00210138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_DFLT_L2PRFL_MAX_INDEX 2 #define E800_GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_S 0 #define E800_GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0) #define GL_ACLEXT_DFLT_L2PRFL_ACL(_i) (0x00393800 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_ACLEXT_DFLT_L2PRFL_ACL_MAX_INDEX 2 #define GL_ACLEXT_DFLT_L2PRFL_ACL_DFLT_PRFL_S 0 #define GL_ACLEXT_DFLT_L2PRFL_ACL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0) #define E800_GL_ACLEXT_FLGS_L1SEL0_1(_i) (0x0021006C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_FLGS_L1SEL0_1_MAX_INDEX 2 #define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS0_S 0 #define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0) #define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS1_S 16 #define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16) #define E800_GL_ACLEXT_FLGS_L1SEL2_3(_i) (0x00210078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_FLGS_L1SEL2_3_MAX_INDEX 2 #define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS2_S 0 #define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0) #define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS3_S 16 #define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16) #define E800_GL_ACLEXT_FLGS_L1TBL(_i) (0x00210060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_FLGS_L1TBL_MAX_INDEX 2 #define E800_GL_ACLEXT_FLGS_L1TBL_LSB_S 0 #define E800_GL_ACLEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0) #define E800_GL_ACLEXT_FLGS_L1TBL_MSB_S 16 #define E800_GL_ACLEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16) #define E800_GL_ACLEXT_FORCE_L1CDID(_i) (0x00210018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2 #define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0 #define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0) #define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 #define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) #define E800_GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_FORCE_PID_MAX_INDEX 2 #define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_S 0 #define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0) #define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_EN_S 31 #define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_EN_M BIT(31) #define E800_GL_ACLEXT_K2N_L2ADDR(_i) (0x00210144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_K2N_L2ADDR_MAX_INDEX 2 #define E800_GL_ACLEXT_K2N_L2ADDR_LINE_IDX_S 0 #define E800_GL_ACLEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0) #define E800_GL_ACLEXT_K2N_L2ADDR_AUTO_INC_S 31 #define E800_GL_ACLEXT_K2N_L2ADDR_AUTO_INC_M BIT(31) #define E800_GL_ACLEXT_K2N_L2DATA(_i) (0x00210150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_K2N_L2DATA_MAX_INDEX 2 #define E800_GL_ACLEXT_K2N_L2DATA_DATA0_S 0 #define E800_GL_ACLEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) #define E800_GL_ACLEXT_K2N_L2DATA_DATA1_S 8 #define E800_GL_ACLEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) #define E800_GL_ACLEXT_K2N_L2DATA_DATA2_S 16 #define E800_GL_ACLEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) #define E800_GL_ACLEXT_K2N_L2DATA_DATA3_S 24 #define E800_GL_ACLEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) #define E800_GL_ACLEXT_L2_PMASK0(_i) (0x002100FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_L2_PMASK0_MAX_INDEX 2 #define E800_GL_ACLEXT_L2_PMASK0_BITMASK_S 0 #define E800_GL_ACLEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) #define E800_GL_ACLEXT_L2_PMASK1(_i) (0x00210108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_L2_PMASK1_MAX_INDEX 2 #define E800_GL_ACLEXT_L2_PMASK1_BITMASK_S 0 #define E800_GL_ACLEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0) #define E800_GL_ACLEXT_L2_TMASK0(_i) (0x00210498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_L2_TMASK0_MAX_INDEX 2 #define E800_GL_ACLEXT_L2_TMASK0_BITMASK_S 0 #define E800_GL_ACLEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) #define E800_GL_ACLEXT_L2_TMASK1(_i) (0x002104A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_L2_TMASK1_MAX_INDEX 2 #define E800_GL_ACLEXT_L2_TMASK1_BITMASK_S 0 #define E800_GL_ACLEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0) #define E800_GL_ACLEXT_L2BMP0_3(_i) (0x002100A8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_L2BMP0_3_MAX_INDEX 2 #define E800_GL_ACLEXT_L2BMP0_3_BMP0_S 0 #define E800_GL_ACLEXT_L2BMP0_3_BMP0_M MAKEMASK(0xFF, 0) #define E800_GL_ACLEXT_L2BMP0_3_BMP1_S 8 #define E800_GL_ACLEXT_L2BMP0_3_BMP1_M MAKEMASK(0xFF, 8) #define E800_GL_ACLEXT_L2BMP0_3_BMP2_S 16 #define E800_GL_ACLEXT_L2BMP0_3_BMP2_M MAKEMASK(0xFF, 16) #define E800_GL_ACLEXT_L2BMP0_3_BMP3_S 24 #define E800_GL_ACLEXT_L2BMP0_3_BMP3_M MAKEMASK(0xFF, 24) #define E800_GL_ACLEXT_L2BMP4_7(_i) (0x002100B4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_L2BMP4_7_MAX_INDEX 2 #define E800_GL_ACLEXT_L2BMP4_7_BMP4_S 0 #define E800_GL_ACLEXT_L2BMP4_7_BMP4_M MAKEMASK(0xFF, 0) #define E800_GL_ACLEXT_L2BMP4_7_BMP5_S 8 #define E800_GL_ACLEXT_L2BMP4_7_BMP5_M MAKEMASK(0xFF, 8) #define E800_GL_ACLEXT_L2BMP4_7_BMP6_S 16 #define E800_GL_ACLEXT_L2BMP4_7_BMP6_M MAKEMASK(0xFF, 16) #define E800_GL_ACLEXT_L2BMP4_7_BMP7_S 24 #define E800_GL_ACLEXT_L2BMP4_7_BMP7_M MAKEMASK(0xFF, 24) #define E800_GL_ACLEXT_L2PRTMOD(_i) (0x0021009C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_L2PRTMOD_MAX_INDEX 2 #define E800_GL_ACLEXT_L2PRTMOD_XLT1_S 0 #define E800_GL_ACLEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0) #define E800_GL_ACLEXT_L2PRTMOD_XLT2_S 8 #define E800_GL_ACLEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8) #define E800_GL_ACLEXT_N2N_L2ADDR(_i) (0x0021015C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_N2N_L2ADDR_MAX_INDEX 2 #define E800_GL_ACLEXT_N2N_L2ADDR_LINE_IDX_S 0 #define E800_GL_ACLEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0) #define E800_GL_ACLEXT_N2N_L2ADDR_AUTO_INC_S 31 #define E800_GL_ACLEXT_N2N_L2ADDR_AUTO_INC_M BIT(31) #define E800_GL_ACLEXT_N2N_L2DATA(_i) (0x00210168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_N2N_L2DATA_MAX_INDEX 2 #define E800_GL_ACLEXT_N2N_L2DATA_DATA0_S 0 #define E800_GL_ACLEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) #define E800_GL_ACLEXT_N2N_L2DATA_DATA1_S 8 #define E800_GL_ACLEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) #define E800_GL_ACLEXT_N2N_L2DATA_DATA2_S 16 #define E800_GL_ACLEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) #define E800_GL_ACLEXT_N2N_L2DATA_DATA3_S 24 #define E800_GL_ACLEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) #define E800_GL_ACLEXT_P2P_L1ADDR(_i) (0x00210024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_P2P_L1ADDR_MAX_INDEX 2 #define E800_GL_ACLEXT_P2P_L1ADDR_LINE_IDX_S 0 #define E800_GL_ACLEXT_P2P_L1ADDR_LINE_IDX_M BIT(0) #define E800_GL_ACLEXT_P2P_L1ADDR_AUTO_INC_S 31 #define E800_GL_ACLEXT_P2P_L1ADDR_AUTO_INC_M BIT(31) #define E800_GL_ACLEXT_P2P_L1DATA(_i) (0x00210030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_P2P_L1DATA_MAX_INDEX 2 #define E800_GL_ACLEXT_P2P_L1DATA_DATA_S 0 #define E800_GL_ACLEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define E800_GL_ACLEXT_PID_L2GKTYPE(_i) (0x002100F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_PID_L2GKTYPE_MAX_INDEX 2 #define E800_GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_S 0 #define E800_GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0) #define E800_GL_ACLEXT_PLVL_SEL(_i) (0x0021000C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_PLVL_SEL_MAX_INDEX 2 #define E800_GL_ACLEXT_PLVL_SEL_PLVL_SEL_S 0 #define E800_GL_ACLEXT_PLVL_SEL_PLVL_SEL_M BIT(0) #define E800_GL_ACLEXT_TCAM_L2ADDR(_i) (0x00210114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_TCAM_L2ADDR_MAX_INDEX 2 #define E800_GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_S 0 #define E800_GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0) #define E800_GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_S 31 #define E800_GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31) #define E800_GL_ACLEXT_TCAM_L2DATALSB(_i) (0x00210120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_TCAM_L2DATALSB_MAX_INDEX 2 #define E800_GL_ACLEXT_TCAM_L2DATALSB_DATALSB_S 0 #define E800_GL_ACLEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0) #define E800_GL_ACLEXT_TCAM_L2DATAMSB(_i) (0x0021012C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_TCAM_L2DATAMSB_MAX_INDEX 2 #define E800_GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_S 0 #define E800_GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0) #define E800_GL_ACLEXT_XLT0_L1ADDR(_i) (0x0021003C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_XLT0_L1ADDR_MAX_INDEX 2 #define E800_GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_S 0 #define E800_GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0) #define E800_GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_S 31 #define E800_GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31) #define E800_GL_ACLEXT_XLT0_L1DATA(_i) (0x00210048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_XLT0_L1DATA_MAX_INDEX 2 #define E800_GL_ACLEXT_XLT0_L1DATA_DATA_S 0 #define E800_GL_ACLEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define E800_GL_ACLEXT_XLT1_L2ADDR(_i) (0x002100C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_XLT1_L2ADDR_MAX_INDEX 2 #define E800_GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_S 0 #define E800_GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0) #define E800_GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_S 31 #define E800_GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31) #define E800_GL_ACLEXT_XLT1_L2DATA(_i) (0x002100CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_XLT1_L2DATA_MAX_INDEX 2 #define E800_GL_ACLEXT_XLT1_L2DATA_DATA_S 0 #define E800_GL_ACLEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define E800_GL_ACLEXT_XLT2_L2ADDR(_i) (0x002100D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_XLT2_L2ADDR_MAX_INDEX 2 #define E800_GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_S 0 #define E800_GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0) #define E800_GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_S 31 #define E800_GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31) #define E800_GL_ACLEXT_XLT2_L2DATA(_i) (0x002100E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define E800_GL_ACLEXT_XLT2_L2DATA_MAX_INDEX 2 #define E800_GL_ACLEXT_XLT2_L2DATA_DATA_S 0 #define E800_GL_ACLEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PREEXT_CDMD_L1SEL(_i) (0x0020F054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_CDMD_L1SEL_MAX_INDEX 2 #define GL_PREEXT_CDMD_L1SEL_RX_SEL_S 0 #define GL_PREEXT_CDMD_L1SEL_RX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_RX_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_RX_SEL_M) #define E800_GL_PREEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0) #define E830_GL_PREEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x3F, 0) #define GL_PREEXT_CDMD_L1SEL_TX_SEL_S 8 #define GL_PREEXT_CDMD_L1SEL_TX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_TX_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_TX_SEL_M) #define E800_GL_PREEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8) #define E830_GL_PREEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x3F, 8) #define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_S 16 #define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M) #define E800_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16) #define E830_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x3F, 16) #define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_S 24 #define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M) #define E800_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24) #define E830_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x3F, 24) #define GL_PREEXT_CDMD_L1SEL_BIDIR_ENA_S 30 #define GL_PREEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30) #define GL_PREEXT_CTLTBL_L2ADDR(_i) (0x0020F084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_CTLTBL_L2ADDR_MAX_INDEX 2 #define GL_PREEXT_CTLTBL_L2ADDR_LINE_OFF_S 0 #define GL_PREEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0) #define GL_PREEXT_CTLTBL_L2ADDR_LINE_IDX_S 8 #define GL_PREEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8) #define GL_PREEXT_CTLTBL_L2ADDR_AUTO_INC_S 31 #define GL_PREEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31) #define GL_PREEXT_CTLTBL_L2DATA(_i) (0x0020F090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_CTLTBL_L2DATA_MAX_INDEX 2 #define GL_PREEXT_CTLTBL_L2DATA_DATA_S 0 #define GL_PREEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PREEXT_DFLT_L2PRFL(_i) (0x0020F138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_DFLT_L2PRFL_MAX_INDEX 2 #define GL_PREEXT_DFLT_L2PRFL_DFLT_PRFL_S 0 #define GL_PREEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0) #define GL_PREEXT_FLGS_L1SEL0_1(_i) (0x0020F06C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_FLGS_L1SEL0_1_MAX_INDEX 2 #define GL_PREEXT_FLGS_L1SEL0_1_FLS0_S 0 #define GL_PREEXT_FLGS_L1SEL0_1_FLS0_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M : E800_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M) #define E800_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0) #define E830_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x3FF, 0) #define GL_PREEXT_FLGS_L1SEL0_1_FLS1_S 16 #define GL_PREEXT_FLGS_L1SEL0_1_FLS1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M : E800_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M) #define E800_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16) #define E830_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x3FF, 16) #define GL_PREEXT_FLGS_L1SEL2_3(_i) (0x0020F078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_FLGS_L1SEL2_3_MAX_INDEX 2 #define GL_PREEXT_FLGS_L1SEL2_3_FLS2_S 0 #define GL_PREEXT_FLGS_L1SEL2_3_FLS2_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M : E800_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M) #define E800_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0) #define E830_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x3FF, 0) #define GL_PREEXT_FLGS_L1SEL2_3_FLS3_S 16 #define GL_PREEXT_FLGS_L1SEL2_3_FLS3_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M : E800_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M) #define E800_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16) #define E830_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x3FF, 16) #define GL_PREEXT_FLGS_L1TBL(_i) (0x0020F060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_FLGS_L1TBL_MAX_INDEX 2 #define GL_PREEXT_FLGS_L1TBL_LSB_S 0 #define GL_PREEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0) #define GL_PREEXT_FLGS_L1TBL_MSB_S 16 #define GL_PREEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16) #define GL_PREEXT_FORCE_L1CDID(_i) (0x0020F018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_FORCE_L1CDID_MAX_INDEX 2 #define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_S 0 #define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0) #define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 #define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) #define GL_PREEXT_FORCE_PID(_i) (0x0020F000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_FORCE_PID_MAX_INDEX 2 #define GL_PREEXT_FORCE_PID_STATIC_PID_S 0 #define GL_PREEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0) #define GL_PREEXT_FORCE_PID_STATIC_PID_EN_S 31 #define GL_PREEXT_FORCE_PID_STATIC_PID_EN_M BIT(31) #define GL_PREEXT_K2N_L2ADDR(_i) (0x0020F144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_K2N_L2ADDR_MAX_INDEX 2 #define GL_PREEXT_K2N_L2ADDR_LINE_IDX_S 0 #define GL_PREEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0) #define GL_PREEXT_K2N_L2ADDR_AUTO_INC_S 31 #define GL_PREEXT_K2N_L2ADDR_AUTO_INC_M BIT(31) #define GL_PREEXT_K2N_L2DATA(_i) (0x0020F150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_K2N_L2DATA_MAX_INDEX 2 #define GL_PREEXT_K2N_L2DATA_DATA0_S 0 #define GL_PREEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) #define GL_PREEXT_K2N_L2DATA_DATA1_S 8 #define GL_PREEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) #define GL_PREEXT_K2N_L2DATA_DATA2_S 16 #define GL_PREEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) #define GL_PREEXT_K2N_L2DATA_DATA3_S 24 #define GL_PREEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) #define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_L2_PMASK0_MAX_INDEX 2 #define GL_PREEXT_L2_PMASK0_BITMASK_S 0 #define GL_PREEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_L2_PMASK1_MAX_INDEX 2 #define GL_PREEXT_L2_PMASK1_BITMASK_S 0 #define GL_PREEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0) #define GL_PREEXT_L2_TMASK0(_i) (0x0020F498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_L2_TMASK0_MAX_INDEX 2 #define GL_PREEXT_L2_TMASK0_BITMASK_S 0 #define GL_PREEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PREEXT_L2_TMASK1(_i) (0x0020F4A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_L2_TMASK1_MAX_INDEX 2 #define GL_PREEXT_L2_TMASK1_BITMASK_S 0 #define GL_PREEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0) #define GL_PREEXT_L2BMP0_3(_i) (0x0020F0A8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_L2BMP0_3_MAX_INDEX 2 #define GL_PREEXT_L2BMP0_3_BMP0_S 0 #define GL_PREEXT_L2BMP0_3_BMP0_M MAKEMASK(0xFF, 0) #define GL_PREEXT_L2BMP0_3_BMP1_S 8 #define GL_PREEXT_L2BMP0_3_BMP1_M MAKEMASK(0xFF, 8) #define GL_PREEXT_L2BMP0_3_BMP2_S 16 #define GL_PREEXT_L2BMP0_3_BMP2_M MAKEMASK(0xFF, 16) #define GL_PREEXT_L2BMP0_3_BMP3_S 24 #define GL_PREEXT_L2BMP0_3_BMP3_M MAKEMASK(0xFF, 24) #define GL_PREEXT_L2BMP4_7(_i) (0x0020F0B4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_L2BMP4_7_MAX_INDEX 2 #define GL_PREEXT_L2BMP4_7_BMP4_S 0 #define GL_PREEXT_L2BMP4_7_BMP4_M MAKEMASK(0xFF, 0) #define GL_PREEXT_L2BMP4_7_BMP5_S 8 #define GL_PREEXT_L2BMP4_7_BMP5_M MAKEMASK(0xFF, 8) #define GL_PREEXT_L2BMP4_7_BMP6_S 16 #define GL_PREEXT_L2BMP4_7_BMP6_M MAKEMASK(0xFF, 16) #define GL_PREEXT_L2BMP4_7_BMP7_S 24 #define GL_PREEXT_L2BMP4_7_BMP7_M MAKEMASK(0xFF, 24) #define GL_PREEXT_L2PRTMOD(_i) (0x0020F09C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_L2PRTMOD_MAX_INDEX 2 #define GL_PREEXT_L2PRTMOD_XLT1_S 0 #define GL_PREEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0) #define GL_PREEXT_L2PRTMOD_XLT2_S 8 #define GL_PREEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8) #define GL_PREEXT_N2N_L2ADDR(_i) (0x0020F15C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_N2N_L2ADDR_MAX_INDEX 2 #define GL_PREEXT_N2N_L2ADDR_LINE_IDX_S 0 #define GL_PREEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0) #define GL_PREEXT_N2N_L2ADDR_AUTO_INC_S 31 #define GL_PREEXT_N2N_L2ADDR_AUTO_INC_M BIT(31) #define GL_PREEXT_N2N_L2DATA(_i) (0x0020F168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_N2N_L2DATA_MAX_INDEX 2 #define GL_PREEXT_N2N_L2DATA_DATA0_S 0 #define GL_PREEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) #define GL_PREEXT_N2N_L2DATA_DATA1_S 8 #define GL_PREEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) #define GL_PREEXT_N2N_L2DATA_DATA2_S 16 #define GL_PREEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) #define GL_PREEXT_N2N_L2DATA_DATA3_S 24 #define GL_PREEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) #define GL_PREEXT_P2P_L1ADDR(_i) (0x0020F024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_P2P_L1ADDR_MAX_INDEX 2 #define GL_PREEXT_P2P_L1ADDR_LINE_IDX_S 0 #define GL_PREEXT_P2P_L1ADDR_LINE_IDX_M BIT(0) #define GL_PREEXT_P2P_L1ADDR_AUTO_INC_S 31 #define GL_PREEXT_P2P_L1ADDR_AUTO_INC_M BIT(31) #define GL_PREEXT_P2P_L1DATA(_i) (0x0020F030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_P2P_L1DATA_MAX_INDEX 2 #define GL_PREEXT_P2P_L1DATA_DATA_S 0 #define GL_PREEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PREEXT_PID_L2GKTYPE(_i) (0x0020F0F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_PID_L2GKTYPE_MAX_INDEX 2 #define GL_PREEXT_PID_L2GKTYPE_PID_GKTYPE_S 0 #define GL_PREEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0) #define GL_PREEXT_PLVL_SEL(_i) (0x0020F00C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_PLVL_SEL_MAX_INDEX 2 #define GL_PREEXT_PLVL_SEL_PLVL_SEL_S 0 #define GL_PREEXT_PLVL_SEL_PLVL_SEL_M BIT(0) #define GL_PREEXT_TCAM_L2ADDR(_i) (0x0020F114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_TCAM_L2ADDR_MAX_INDEX 2 #define GL_PREEXT_TCAM_L2ADDR_LINE_IDX_S 0 #define GL_PREEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0) #define GL_PREEXT_TCAM_L2ADDR_AUTO_INC_S 31 #define GL_PREEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31) #define GL_PREEXT_TCAM_L2DATALSB(_i) (0x0020F120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_TCAM_L2DATALSB_MAX_INDEX 2 #define GL_PREEXT_TCAM_L2DATALSB_DATALSB_S 0 #define GL_PREEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PREEXT_TCAM_L2DATAMSB(_i) (0x0020F12C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_TCAM_L2DATAMSB_MAX_INDEX 2 #define GL_PREEXT_TCAM_L2DATAMSB_DATAMSB_S 0 #define GL_PREEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0) #define GL_PREEXT_XLT0_L1ADDR(_i) (0x0020F03C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_XLT0_L1ADDR_MAX_INDEX 2 #define GL_PREEXT_XLT0_L1ADDR_LINE_IDX_S 0 #define GL_PREEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0) #define GL_PREEXT_XLT0_L1ADDR_AUTO_INC_S 31 #define GL_PREEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31) #define GL_PREEXT_XLT0_L1DATA(_i) (0x0020F048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_XLT0_L1DATA_MAX_INDEX 2 #define GL_PREEXT_XLT0_L1DATA_DATA_S 0 #define GL_PREEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PREEXT_XLT1_L2ADDR(_i) (0x0020F0C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_XLT1_L2ADDR_MAX_INDEX 2 #define GL_PREEXT_XLT1_L2ADDR_LINE_IDX_S 0 #define GL_PREEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0) #define GL_PREEXT_XLT1_L2ADDR_AUTO_INC_S 31 #define GL_PREEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31) #define GL_PREEXT_XLT1_L2DATA(_i) (0x0020F0CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_XLT1_L2DATA_MAX_INDEX 2 #define GL_PREEXT_XLT1_L2DATA_DATA_S 0 #define GL_PREEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PREEXT_XLT2_L2ADDR(_i) (0x0020F0D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_XLT2_L2ADDR_MAX_INDEX 2 #define GL_PREEXT_XLT2_L2ADDR_LINE_IDX_S 0 #define GL_PREEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0) #define GL_PREEXT_XLT2_L2ADDR_AUTO_INC_S 31 #define GL_PREEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31) #define GL_PREEXT_XLT2_L2DATA(_i) (0x0020F0E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PREEXT_XLT2_L2DATA_MAX_INDEX 2 #define GL_PREEXT_XLT2_L2DATA_DATA_S 0 #define GL_PREEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PSTEXT_CDMD_L1SEL(_i) (0x0020E054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_CDMD_L1SEL_MAX_INDEX 2 #define GL_PSTEXT_CDMD_L1SEL_RX_SEL_S 0 #define GL_PSTEXT_CDMD_L1SEL_RX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M) #define E800_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0) #define E830_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x3F, 0) #define GL_PSTEXT_CDMD_L1SEL_TX_SEL_S 8 #define GL_PSTEXT_CDMD_L1SEL_TX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M) #define E800_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8) #define E830_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x3F, 8) #define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_S 16 #define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M) #define E800_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16) #define E830_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x3F, 16) #define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_S 24 #define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M) #define E800_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24) #define E830_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x3F, 24) #define GL_PSTEXT_CDMD_L1SEL_BIDIR_ENA_S 30 #define GL_PSTEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30) #define GL_PSTEXT_CTLTBL_L2ADDR(_i) (0x0020E084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_CTLTBL_L2ADDR_MAX_INDEX 2 #define GL_PSTEXT_CTLTBL_L2ADDR_LINE_OFF_S 0 #define GL_PSTEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0) #define GL_PSTEXT_CTLTBL_L2ADDR_LINE_IDX_S 8 #define GL_PSTEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8) #define GL_PSTEXT_CTLTBL_L2ADDR_AUTO_INC_S 31 #define GL_PSTEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31) #define GL_PSTEXT_CTLTBL_L2DATA(_i) (0x0020E090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_CTLTBL_L2DATA_MAX_INDEX 2 #define GL_PSTEXT_CTLTBL_L2DATA_DATA_S 0 #define GL_PSTEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PSTEXT_DFLT_L2PRFL(_i) (0x0020E138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_DFLT_L2PRFL_MAX_INDEX 2 #define GL_PSTEXT_DFLT_L2PRFL_DFLT_PRFL_S 0 #define GL_PSTEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0) #define GL_PSTEXT_FL15_BMPLSB(_i) (0x0020E480 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_FL15_BMPLSB_MAX_INDEX 2 #define GL_PSTEXT_FL15_BMPLSB_BMPLSB_S 0 #define GL_PSTEXT_FL15_BMPLSB_BMPLSB_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PSTEXT_FL15_BMPMSB(_i) (0x0020E48C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_FL15_BMPMSB_MAX_INDEX 2 #define GL_PSTEXT_FL15_BMPMSB_BMPMSB_S 0 #define GL_PSTEXT_FL15_BMPMSB_BMPMSB_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PSTEXT_FLGS_L1SEL0_1(_i) (0x0020E06C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_FLGS_L1SEL0_1_MAX_INDEX 2 #define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_S 0 #define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M : E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M) #define E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0) #define E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x3FF, 0) #define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_S 16 #define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M : E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M) #define E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16) #define E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x3FF, 16) #define GL_PSTEXT_FLGS_L1SEL2_3(_i) (0x0020E078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_FLGS_L1SEL2_3_MAX_INDEX 2 #define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_S 0 #define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M : E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M) #define E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0) #define E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x3FF, 0) #define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_S 16 #define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M : E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M) #define E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16) #define E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x3FF, 16) #define GL_PSTEXT_FLGS_L1TBL(_i) (0x0020E060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_FLGS_L1TBL_MAX_INDEX 2 #define GL_PSTEXT_FLGS_L1TBL_LSB_S 0 #define GL_PSTEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0) #define GL_PSTEXT_FLGS_L1TBL_MSB_S 16 #define GL_PSTEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16) #define GL_PSTEXT_FORCE_L1CDID(_i) (0x0020E018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_FORCE_L1CDID_MAX_INDEX 2 #define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_S 0 #define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0) #define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 #define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) #define GL_PSTEXT_FORCE_PID(_i) (0x0020E000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_FORCE_PID_MAX_INDEX 2 #define GL_PSTEXT_FORCE_PID_STATIC_PID_S 0 #define GL_PSTEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0) #define GL_PSTEXT_FORCE_PID_STATIC_PID_EN_S 31 #define GL_PSTEXT_FORCE_PID_STATIC_PID_EN_M BIT(31) #define GL_PSTEXT_K2N_L2ADDR(_i) (0x0020E144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_K2N_L2ADDR_MAX_INDEX 2 #define GL_PSTEXT_K2N_L2ADDR_LINE_IDX_S 0 #define GL_PSTEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0) #define GL_PSTEXT_K2N_L2ADDR_AUTO_INC_S 31 #define GL_PSTEXT_K2N_L2ADDR_AUTO_INC_M BIT(31) #define GL_PSTEXT_K2N_L2DATA(_i) (0x0020E150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_K2N_L2DATA_MAX_INDEX 2 #define GL_PSTEXT_K2N_L2DATA_DATA0_S 0 #define GL_PSTEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) #define GL_PSTEXT_K2N_L2DATA_DATA1_S 8 #define GL_PSTEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) #define GL_PSTEXT_K2N_L2DATA_DATA2_S 16 #define GL_PSTEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) #define GL_PSTEXT_K2N_L2DATA_DATA3_S 24 #define GL_PSTEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) #define GL_PSTEXT_L2_PMASK0(_i) (0x0020E0FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_L2_PMASK0_MAX_INDEX 2 #define GL_PSTEXT_L2_PMASK0_BITMASK_S 0 #define GL_PSTEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PSTEXT_L2_PMASK1(_i) (0x0020E108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_L2_PMASK1_MAX_INDEX 2 #define GL_PSTEXT_L2_PMASK1_BITMASK_S 0 #define GL_PSTEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0) #define GL_PSTEXT_L2_TMASK0(_i) (0x0020E498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_L2_TMASK0_MAX_INDEX 2 #define GL_PSTEXT_L2_TMASK0_BITMASK_S 0 #define GL_PSTEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PSTEXT_L2_TMASK1(_i) (0x0020E4A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_L2_TMASK1_MAX_INDEX 2 #define GL_PSTEXT_L2_TMASK1_BITMASK_S 0 #define GL_PSTEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0) #define GL_PSTEXT_L2PRTMOD(_i) (0x0020E09C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_L2PRTMOD_MAX_INDEX 2 #define GL_PSTEXT_L2PRTMOD_XLT1_S 0 #define GL_PSTEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0) #define GL_PSTEXT_L2PRTMOD_XLT2_S 8 #define GL_PSTEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8) #define GL_PSTEXT_N2N_L2ADDR(_i) (0x0020E15C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_N2N_L2ADDR_MAX_INDEX 2 #define GL_PSTEXT_N2N_L2ADDR_LINE_IDX_S 0 #define GL_PSTEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0) #define GL_PSTEXT_N2N_L2ADDR_AUTO_INC_S 31 #define GL_PSTEXT_N2N_L2ADDR_AUTO_INC_M BIT(31) #define GL_PSTEXT_N2N_L2DATA(_i) (0x0020E168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_N2N_L2DATA_MAX_INDEX 2 #define GL_PSTEXT_N2N_L2DATA_DATA0_S 0 #define GL_PSTEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) #define GL_PSTEXT_N2N_L2DATA_DATA1_S 8 #define GL_PSTEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) #define GL_PSTEXT_N2N_L2DATA_DATA2_S 16 #define GL_PSTEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) #define GL_PSTEXT_N2N_L2DATA_DATA3_S 24 #define GL_PSTEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) #define GL_PSTEXT_P2P_L1ADDR(_i) (0x0020E024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_P2P_L1ADDR_MAX_INDEX 2 #define GL_PSTEXT_P2P_L1ADDR_LINE_IDX_S 0 #define GL_PSTEXT_P2P_L1ADDR_LINE_IDX_M BIT(0) #define GL_PSTEXT_P2P_L1ADDR_AUTO_INC_S 31 #define GL_PSTEXT_P2P_L1ADDR_AUTO_INC_M BIT(31) #define GL_PSTEXT_P2P_L1DATA(_i) (0x0020E030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_P2P_L1DATA_MAX_INDEX 2 #define GL_PSTEXT_P2P_L1DATA_DATA_S 0 #define GL_PSTEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PSTEXT_PID_L2GKTYPE(_i) (0x0020E0F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_PID_L2GKTYPE_MAX_INDEX 2 #define GL_PSTEXT_PID_L2GKTYPE_PID_GKTYPE_S 0 #define GL_PSTEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0) #define GL_PSTEXT_PLVL_SEL(_i) (0x0020E00C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_PLVL_SEL_MAX_INDEX 2 #define GL_PSTEXT_PLVL_SEL_PLVL_SEL_S 0 #define GL_PSTEXT_PLVL_SEL_PLVL_SEL_M BIT(0) #define GL_PSTEXT_PRFLM_CTRL(_i) (0x0020E474 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_PRFLM_CTRL_MAX_INDEX 2 #define GL_PSTEXT_PRFLM_CTRL_PRFL_IDX_S 0 #define GL_PSTEXT_PRFLM_CTRL_PRFL_IDX_M MAKEMASK(0xFF, 0) #define GL_PSTEXT_PRFLM_CTRL_RD_REQ_S 30 #define GL_PSTEXT_PRFLM_CTRL_RD_REQ_M BIT(30) #define GL_PSTEXT_PRFLM_CTRL_WR_REQ_S 31 #define GL_PSTEXT_PRFLM_CTRL_WR_REQ_M BIT(31) #define GL_PSTEXT_PRFLM_DATA_0(_i) (0x0020E174 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GL_PSTEXT_PRFLM_DATA_0_MAX_INDEX 63 #define GL_PSTEXT_PRFLM_DATA_0_PROT_S 0 #define GL_PSTEXT_PRFLM_DATA_0_PROT_M MAKEMASK(0xFF, 0) #define GL_PSTEXT_PRFLM_DATA_0_OFF_S 16 #define GL_PSTEXT_PRFLM_DATA_0_OFF_M MAKEMASK(0x1FF, 16) #define GL_PSTEXT_PRFLM_DATA_1(_i) (0x0020E274 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GL_PSTEXT_PRFLM_DATA_1_MAX_INDEX 63 #define GL_PSTEXT_PRFLM_DATA_1_PROT_S 0 #define GL_PSTEXT_PRFLM_DATA_1_PROT_M MAKEMASK(0xFF, 0) #define GL_PSTEXT_PRFLM_DATA_1_OFF_S 16 #define GL_PSTEXT_PRFLM_DATA_1_OFF_M MAKEMASK(0x1FF, 16) #define GL_PSTEXT_PRFLM_DATA_2(_i) (0x0020E374 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GL_PSTEXT_PRFLM_DATA_2_MAX_INDEX 63 #define GL_PSTEXT_PRFLM_DATA_2_PROT_S 0 #define GL_PSTEXT_PRFLM_DATA_2_PROT_M MAKEMASK(0xFF, 0) #define GL_PSTEXT_PRFLM_DATA_2_OFF_S 16 #define GL_PSTEXT_PRFLM_DATA_2_OFF_M MAKEMASK(0x1FF, 16) #define GL_PSTEXT_TCAM_L2ADDR(_i) (0x0020E114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_TCAM_L2ADDR_MAX_INDEX 2 #define GL_PSTEXT_TCAM_L2ADDR_LINE_IDX_S 0 #define GL_PSTEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0) #define GL_PSTEXT_TCAM_L2ADDR_AUTO_INC_S 31 #define GL_PSTEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31) #define GL_PSTEXT_TCAM_L2DATALSB(_i) (0x0020E120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_TCAM_L2DATALSB_MAX_INDEX 2 #define GL_PSTEXT_TCAM_L2DATALSB_DATALSB_S 0 #define GL_PSTEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PSTEXT_TCAM_L2DATAMSB(_i) (0x0020E12C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_TCAM_L2DATAMSB_MAX_INDEX 2 #define GL_PSTEXT_TCAM_L2DATAMSB_DATAMSB_S 0 #define GL_PSTEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0) #define GL_PSTEXT_XLT0_L1ADDR(_i) (0x0020E03C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_XLT0_L1ADDR_MAX_INDEX 2 #define GL_PSTEXT_XLT0_L1ADDR_LINE_IDX_S 0 #define GL_PSTEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0) #define GL_PSTEXT_XLT0_L1ADDR_AUTO_INC_S 31 #define GL_PSTEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31) #define GL_PSTEXT_XLT0_L1DATA(_i) (0x0020E048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_XLT0_L1DATA_MAX_INDEX 2 #define GL_PSTEXT_XLT0_L1DATA_DATA_S 0 #define GL_PSTEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PSTEXT_XLT1_L2ADDR(_i) (0x0020E0C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_XLT1_L2ADDR_MAX_INDEX 2 #define GL_PSTEXT_XLT1_L2ADDR_LINE_IDX_S 0 #define GL_PSTEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0) #define GL_PSTEXT_XLT1_L2ADDR_AUTO_INC_S 31 #define GL_PSTEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31) #define GL_PSTEXT_XLT1_L2DATA(_i) (0x0020E0CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_XLT1_L2DATA_MAX_INDEX 2 #define GL_PSTEXT_XLT1_L2DATA_DATA_S 0 #define GL_PSTEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PSTEXT_XLT2_L2ADDR(_i) (0x0020E0D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_XLT2_L2ADDR_MAX_INDEX 2 #define GL_PSTEXT_XLT2_L2ADDR_LINE_IDX_S 0 #define GL_PSTEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0) #define GL_PSTEXT_XLT2_L2ADDR_AUTO_INC_S 31 #define GL_PSTEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31) #define GL_PSTEXT_XLT2_L2DATA(_i) (0x0020E0E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GL_PSTEXT_XLT2_L2DATA_MAX_INDEX 2 #define GL_PSTEXT_XLT2_L2DATA_DATA_S 0 #define GL_PSTEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GLFLXP_PTYPE_TRANSLATION(_i) (0x0045C000 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define GLFLXP_PTYPE_TRANSLATION_MAX_INDEX 255 #define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_S 0 #define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_M MAKEMASK(0xFF, 0) #define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_1_S 8 #define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_1_M MAKEMASK(0xFF, 8) #define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_2_S 16 #define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_2_M MAKEMASK(0xFF, 16) #define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_3_S 24 #define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_3_M MAKEMASK(0xFF, 24) #define GLFLXP_RX_CMD_LX_PROT_IDX(_i) (0x0045C400 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define GLFLXP_RX_CMD_LX_PROT_IDX_MAX_INDEX 255 #define GLFLXP_RX_CMD_LX_PROT_IDX_INNER_CLOUD_OFFSET_INDEX_S 0 #define GLFLXP_RX_CMD_LX_PROT_IDX_INNER_CLOUD_OFFSET_INDEX_M MAKEMASK(0x7, 0) #define GLFLXP_RX_CMD_LX_PROT_IDX_L4_OFFSET_INDEX_S 4 #define GLFLXP_RX_CMD_LX_PROT_IDX_L4_OFFSET_INDEX_M MAKEMASK(0x7, 4) #define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_S 8 #define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_M MAKEMASK(0x7, 8) #define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_S 12 #define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_M MAKEMASK(0x3, 12) #define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_S 14 #define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_M MAKEMASK(0x3, 14) #define GLFLXP_RX_CMD_PROTIDS(_i, _j) (0x0045A000 + ((_i) * 4 + (_j) * 1024)) /* _i=0...255, _j=0...5 */ /* Reset Source: CORER */ #define GLFLXP_RX_CMD_PROTIDS_MAX_INDEX 255 #define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_S 0 #define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_M MAKEMASK(0xFF, 0) #define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_1_S 8 #define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_1_M MAKEMASK(0xFF, 8) #define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_2_S 16 #define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_2_M MAKEMASK(0xFF, 16) #define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_3_S 24 #define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_3_M MAKEMASK(0xFF, 24) #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) /* _i=0...63, _j=0...4 */ /* Reset Source: CORER */ #define GLFLXP_RXDID_FLAGS_MAX_INDEX 63 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M MAKEMASK(0x3F, 0) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M MAKEMASK(0x3F, 8) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M MAKEMASK(0x3F, 16) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M MAKEMASK(0x3F, 24) #define GLFLXP_RXDID_FLAGS1_OVERRIDE(_i) (0x0045D600 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GLFLXP_RXDID_FLAGS1_OVERRIDE_MAX_INDEX 63 #define GLFLXP_RXDID_FLAGS1_OVERRIDE_FLEXIFLAGS1_OVERRIDE_S 0 #define GLFLXP_RXDID_FLAGS1_OVERRIDE_FLEXIFLAGS1_OVERRIDE_M MAKEMASK(0xF, 0) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045C800 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GLFLXP_RXDID_FLX_WRD_0_MAX_INDEX 63 #define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0 #define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M MAKEMASK(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_0_EXTRACTION_OFFSET_S 8 #define GLFLXP_RXDID_FLX_WRD_0_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) #define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30 #define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M MAKEMASK(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045C900 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GLFLXP_RXDID_FLX_WRD_1_MAX_INDEX 63 #define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0 #define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M MAKEMASK(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_1_EXTRACTION_OFFSET_S 8 #define GLFLXP_RXDID_FLX_WRD_1_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) #define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30 #define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M MAKEMASK(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045CA00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GLFLXP_RXDID_FLX_WRD_2_MAX_INDEX 63 #define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0 #define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M MAKEMASK(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_2_EXTRACTION_OFFSET_S 8 #define GLFLXP_RXDID_FLX_WRD_2_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) #define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30 #define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M MAKEMASK(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045CB00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GLFLXP_RXDID_FLX_WRD_3_MAX_INDEX 63 #define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0 #define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M MAKEMASK(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_3_EXTRACTION_OFFSET_S 8 #define GLFLXP_RXDID_FLX_WRD_3_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) #define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30 #define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M MAKEMASK(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_4(_i) (0x0045CC00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GLFLXP_RXDID_FLX_WRD_4_MAX_INDEX 63 #define GLFLXP_RXDID_FLX_WRD_4_PROT_MDID_S 0 #define GLFLXP_RXDID_FLX_WRD_4_PROT_MDID_M MAKEMASK(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_4_EXTRACTION_OFFSET_S 8 #define GLFLXP_RXDID_FLX_WRD_4_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) #define GLFLXP_RXDID_FLX_WRD_4_RXDID_OPCODE_S 30 #define GLFLXP_RXDID_FLX_WRD_4_RXDID_OPCODE_M MAKEMASK(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_5(_i) (0x0045CD00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GLFLXP_RXDID_FLX_WRD_5_MAX_INDEX 63 #define GLFLXP_RXDID_FLX_WRD_5_PROT_MDID_S 0 #define GLFLXP_RXDID_FLX_WRD_5_PROT_MDID_M MAKEMASK(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_5_EXTRACTION_OFFSET_S 8 #define GLFLXP_RXDID_FLX_WRD_5_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) #define GLFLXP_RXDID_FLX_WRD_5_RXDID_OPCODE_S 30 #define GLFLXP_RXDID_FLX_WRD_5_RXDID_OPCODE_M MAKEMASK(0x3, 30) #define GLFLXP_TX_SCHED_CORRECT(_i, _j) (0x00458000 + ((_i) * 4 + (_j) * 256)) /* _i=0...63, _j=0...31 */ /* Reset Source: CORER */ #define GLFLXP_TX_SCHED_CORRECT_MAX_INDEX 63 #define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_S 0 #define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_M MAKEMASK(0xFF, 0) #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_S 8 #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_M MAKEMASK(0x1F, 8) #define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_S 16 #define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_M MAKEMASK(0xFF, 16) #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_S 24 #define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_M MAKEMASK(0x1F, 24) #define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define QRXFLXP_CNTXT_MAX_INDEX 2047 #define QRXFLXP_CNTXT_RXDID_IDX_S 0 #define QRXFLXP_CNTXT_RXDID_IDX_M MAKEMASK(0x3F, 0) #define QRXFLXP_CNTXT_RXDID_PRIO_S 8 #define QRXFLXP_CNTXT_RXDID_PRIO_M MAKEMASK(0x7, 8) #define QRXFLXP_CNTXT_TS_S 11 #define QRXFLXP_CNTXT_TS_M BIT(11) #define GL_FWSTS 0x00083048 /* Reset Source: POR */ #define GL_FWSTS_FWS0B_S 0 #define GL_FWSTS_FWS0B_M MAKEMASK(0xFF, 0) #define GL_FWSTS_FWROWD_S 8 #define GL_FWSTS_FWROWD_M BIT(8) #define GL_FWSTS_FWRI_S 9 #define GL_FWSTS_FWRI_M BIT(9) #define GL_FWSTS_FWS1B_S 16 #define GL_FWSTS_FWS1B_M MAKEMASK(0xFF, 16) #define GL_TCVMLR_DRAIN_CNTR_CTL 0x000A21E0 /* Reset Source: CORER */ #define GL_TCVMLR_DRAIN_CNTR_CTL_OP_S 0 #define GL_TCVMLR_DRAIN_CNTR_CTL_OP_M BIT(0) #define GL_TCVMLR_DRAIN_CNTR_CTL_PORT_S 1 #define GL_TCVMLR_DRAIN_CNTR_CTL_PORT_M MAKEMASK(0x7, 1) #define GL_TCVMLR_DRAIN_CNTR_CTL_VALUE_S 4 #define GL_TCVMLR_DRAIN_CNTR_CTL_VALUE_M MAKEMASK(0x3FFF, 4) #define GL_TCVMLR_DRAIN_DONE_DEC 0x000A21A8 /* Reset Source: CORER */ #define GL_TCVMLR_DRAIN_DONE_DEC_TARGET_S 0 #define GL_TCVMLR_DRAIN_DONE_DEC_TARGET_M BIT(0) #define GL_TCVMLR_DRAIN_DONE_DEC_INDEX_S 1 #define GL_TCVMLR_DRAIN_DONE_DEC_INDEX_M MAKEMASK(0x1F, 1) #define GL_TCVMLR_DRAIN_DONE_DEC_VALUE_S 6 #define GL_TCVMLR_DRAIN_DONE_DEC_VALUE_M MAKEMASK(0xFF, 6) #define GL_TCVMLR_DRAIN_DONE_TCLAN(_i) (0x000A20A8 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GL_TCVMLR_DRAIN_DONE_TCLAN_MAX_INDEX 31 #define GL_TCVMLR_DRAIN_DONE_TCLAN_COUNT_S 0 #define GL_TCVMLR_DRAIN_DONE_TCLAN_COUNT_M MAKEMASK(0xFF, 0) #define GL_TCVMLR_DRAIN_DONE_TPB(_i) (0x000A2128 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GL_TCVMLR_DRAIN_DONE_TPB_MAX_INDEX 31 #define GL_TCVMLR_DRAIN_DONE_TPB_COUNT_S 0 #define GL_TCVMLR_DRAIN_DONE_TPB_COUNT_M MAKEMASK(0xFF, 0) #define GL_TCVMLR_DRAIN_MARKER 0x000A2008 /* Reset Source: CORER */ #define GL_TCVMLR_DRAIN_MARKER_PORT_S 0 #define GL_TCVMLR_DRAIN_MARKER_PORT_M MAKEMASK(0x7, 0) #define GL_TCVMLR_DRAIN_MARKER_TC_S 3 #define GL_TCVMLR_DRAIN_MARKER_TC_M MAKEMASK(0x1F, 3) #define GL_TCVMLR_ERR_STAT 0x000A2024 /* Reset Source: CORER */ #define GL_TCVMLR_ERR_STAT_ERROR_S 0 #define GL_TCVMLR_ERR_STAT_ERROR_M BIT(0) #define GL_TCVMLR_ERR_STAT_FW_REQ_S 1 #define GL_TCVMLR_ERR_STAT_FW_REQ_M BIT(1) #define GL_TCVMLR_ERR_STAT_STAT_S 2 #define GL_TCVMLR_ERR_STAT_STAT_M MAKEMASK(0x7, 2) #define GL_TCVMLR_ERR_STAT_ENT_TYPE_S 5 #define GL_TCVMLR_ERR_STAT_ENT_TYPE_M MAKEMASK(0x7, 5) #define GL_TCVMLR_ERR_STAT_ENT_ID_S 8 #define GL_TCVMLR_ERR_STAT_ENT_ID_M MAKEMASK(0x3FFF, 8) #define GL_TCVMLR_QCFG 0x000A2010 /* Reset Source: CORER */ #define GL_TCVMLR_QCFG_QID_S 0 #define GL_TCVMLR_QCFG_QID_M MAKEMASK(0x3FFF, 0) #define GL_TCVMLR_QCFG_OP_S 14 #define GL_TCVMLR_QCFG_OP_M BIT(14) #define GL_TCVMLR_QCFG_PORT_S 15 #define GL_TCVMLR_QCFG_PORT_M MAKEMASK(0x7, 15) #define GL_TCVMLR_QCFG_TC_S 18 #define GL_TCVMLR_QCFG_TC_M MAKEMASK(0x1F, 18) #define GL_TCVMLR_QCFG_RD 0x000A2014 /* Reset Source: CORER */ #define GL_TCVMLR_QCFG_RD_QID_S 0 #define GL_TCVMLR_QCFG_RD_QID_M MAKEMASK(0x3FFF, 0) #define GL_TCVMLR_QCFG_RD_PORT_S 14 #define GL_TCVMLR_QCFG_RD_PORT_M MAKEMASK(0x7, 14) #define GL_TCVMLR_QCFG_RD_TC_S 17 #define GL_TCVMLR_QCFG_RD_TC_M MAKEMASK(0x1F, 17) #define GL_TCVMLR_QCNTR 0x000A200C /* Reset Source: CORER */ #define GL_TCVMLR_QCNTR_CNTR_S 0 #define GL_TCVMLR_QCNTR_CNTR_M MAKEMASK(0x7FFF, 0) #define GL_TCVMLR_QCTL 0x000A2004 /* Reset Source: CORER */ #define GL_TCVMLR_QCTL_QID_S 0 #define GL_TCVMLR_QCTL_QID_M MAKEMASK(0x3FFF, 0) #define GL_TCVMLR_QCTL_OP_S 14 #define GL_TCVMLR_QCTL_OP_M BIT(14) #define GL_TCVMLR_REQ_STAT 0x000A2018 /* Reset Source: CORER */ #define GL_TCVMLR_REQ_STAT_ENT_TYPE_S 0 #define GL_TCVMLR_REQ_STAT_ENT_TYPE_M MAKEMASK(0x7, 0) #define GL_TCVMLR_REQ_STAT_ENT_ID_S 3 #define GL_TCVMLR_REQ_STAT_ENT_ID_M MAKEMASK(0x3FFF, 3) #define GL_TCVMLR_REQ_STAT_OP_S 17 #define GL_TCVMLR_REQ_STAT_OP_M BIT(17) #define GL_TCVMLR_REQ_STAT_WRITE_STATUS_S 18 #define GL_TCVMLR_REQ_STAT_WRITE_STATUS_M MAKEMASK(0x7, 18) #define GL_TCVMLR_STAT 0x000A201C /* Reset Source: CORER */ #define GL_TCVMLR_STAT_ENT_TYPE_S 0 #define GL_TCVMLR_STAT_ENT_TYPE_M MAKEMASK(0x7, 0) #define GL_TCVMLR_STAT_ENT_ID_S 3 #define GL_TCVMLR_STAT_ENT_ID_M MAKEMASK(0x3FFF, 3) #define GL_TCVMLR_STAT_STATUS_S 17 #define GL_TCVMLR_STAT_STATUS_M MAKEMASK(0x7, 17) #define GL_XLR_MARKER_TRIG_TCVMLR 0x000A2000 /* Reset Source: CORER */ #define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_NUM_S 0 #define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_NUM_M MAKEMASK(0x3FF, 0) #define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_TYPE_S 10 #define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_TYPE_M MAKEMASK(0x3, 10) #define GL_XLR_MARKER_TRIG_TCVMLR_PF_NUM_S 12 #define GL_XLR_MARKER_TRIG_TCVMLR_PF_NUM_M MAKEMASK(0x7, 12) #define GL_XLR_MARKER_TRIG_TCVMLR_PORT_NUM_S 16 #define GL_XLR_MARKER_TRIG_TCVMLR_PORT_NUM_M MAKEMASK(0x7, 16) #define GL_XLR_MARKER_TRIG_VMLR 0x00093804 /* Reset Source: CORER */ #define GL_XLR_MARKER_TRIG_VMLR_VM_VF_NUM_S 0 #define GL_XLR_MARKER_TRIG_VMLR_VM_VF_NUM_M MAKEMASK(0x3FF, 0) #define GL_XLR_MARKER_TRIG_VMLR_VM_VF_TYPE_S 10 #define GL_XLR_MARKER_TRIG_VMLR_VM_VF_TYPE_M MAKEMASK(0x3, 10) #define GL_XLR_MARKER_TRIG_VMLR_PF_NUM_S 12 #define GL_XLR_MARKER_TRIG_VMLR_PF_NUM_M MAKEMASK(0x7, 12) #define GL_XLR_MARKER_TRIG_VMLR_PORT_NUM_S 16 #define GL_XLR_MARKER_TRIG_VMLR_PORT_NUM_M MAKEMASK(0x7, 16) #define GLGEN_ANA_ABORT_PTYPE 0x0020C21C /* Reset Source: CORER */ #define GLGEN_ANA_ABORT_PTYPE_ABORT_S 0 #define GLGEN_ANA_ABORT_PTYPE_ABORT_M MAKEMASK(0x3FF, 0) #define GLGEN_ANA_ALU_ACCSS_OUT_OF_PKT 0x0020C208 /* Reset Source: CORER */ #define GLGEN_ANA_ALU_ACCSS_OUT_OF_PKT_NPC_S 0 #define GLGEN_ANA_ALU_ACCSS_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0) #define GLGEN_ANA_CFG_CTRL 0x0020C104 /* Reset Source: CORER */ #define GLGEN_ANA_CFG_CTRL_LINE_IDX_S 0 #define GLGEN_ANA_CFG_CTRL_LINE_IDX_M MAKEMASK(0x3FFFF, 0) #define GLGEN_ANA_CFG_CTRL_TABLE_ID_S 18 #define GLGEN_ANA_CFG_CTRL_TABLE_ID_M MAKEMASK(0xFF, 18) #define GLGEN_ANA_CFG_CTRL_RESRVED_S 26 #define GLGEN_ANA_CFG_CTRL_RESRVED_M MAKEMASK(0x7, 26) #define GLGEN_ANA_CFG_CTRL_OPERATION_ID_S 29 #define GLGEN_ANA_CFG_CTRL_OPERATION_ID_M MAKEMASK(0x7, 29) #define GLGEN_ANA_CFG_HTBL_LU_RESULT 0x0020C158 /* Reset Source: CORER */ #define GLGEN_ANA_CFG_HTBL_LU_RESULT_HIT_S 0 #define GLGEN_ANA_CFG_HTBL_LU_RESULT_HIT_M BIT(0) #define GLGEN_ANA_CFG_HTBL_LU_RESULT_PG_MEM_IDX_S 1 #define GLGEN_ANA_CFG_HTBL_LU_RESULT_PG_MEM_IDX_M MAKEMASK(0x7, 1) #define GLGEN_ANA_CFG_HTBL_LU_RESULT_ADDR_S 4 #define GLGEN_ANA_CFG_HTBL_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4) #define GLGEN_ANA_CFG_LU_KEY(_i) (0x0020C14C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GLGEN_ANA_CFG_LU_KEY_MAX_INDEX 2 #define GLGEN_ANA_CFG_LU_KEY_LU_KEY_S 0 #define GLGEN_ANA_CFG_LU_KEY_LU_KEY_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_CFG_RDDATA(_i) (0x0020C10C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLGEN_ANA_CFG_RDDATA_MAX_INDEX 15 #define GLGEN_ANA_CFG_RDDATA_RD_DATA_S 0 #define GLGEN_ANA_CFG_RDDATA_RD_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_CFG_SPLBUF_LU_RESULT 0x0020C15C /* Reset Source: CORER */ #define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_HIT_S 0 #define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0) #define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_RSV_S 1 #define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_RSV_M MAKEMASK(0x7, 1) #define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_ADDR_S 4 #define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4) #define GLGEN_ANA_CFG_WRDATA 0x0020C108 /* Reset Source: CORER */ #define GLGEN_ANA_CFG_WRDATA_WR_DATA_S 0 #define GLGEN_ANA_CFG_WRDATA_WR_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_DEF_PTYPE 0x0020C100 /* Reset Source: CORER */ #define GLGEN_ANA_DEF_PTYPE_DEF_PTYPE_S 0 #define GLGEN_ANA_DEF_PTYPE_DEF_PTYPE_M MAKEMASK(0x3FF, 0) #define GLGEN_ANA_ERR_CTRL 0x0020C220 /* Reset Source: CORER */ #define GLGEN_ANA_ERR_CTRL_ERR_MASK_EN_S 0 #define GLGEN_ANA_ERR_CTRL_ERR_MASK_EN_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_FLAG_MAP(_i) (0x0020C000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GLGEN_ANA_FLAG_MAP_MAX_INDEX 63 #define GLGEN_ANA_FLAG_MAP_FLAG_EN_S 0 #define GLGEN_ANA_FLAG_MAP_FLAG_EN_M BIT(0) #define GLGEN_ANA_FLAG_MAP_EXT_FLAG_ID_S 1 #define GLGEN_ANA_FLAG_MAP_EXT_FLAG_ID_M MAKEMASK(0x3F, 1) #define GLGEN_ANA_INV_NODE_PTYPE 0x0020C210 /* Reset Source: CORER */ #define GLGEN_ANA_INV_NODE_PTYPE_INV_NODE_PTYPE_S 0 #define GLGEN_ANA_INV_NODE_PTYPE_INV_NODE_PTYPE_M MAKEMASK(0x7FF, 0) #define GLGEN_ANA_INV_PTYPE_MARKER 0x0020C218 /* Reset Source: CORER */ #define GLGEN_ANA_INV_PTYPE_MARKER_INV_PTYPE_MARKER_S 0 #define GLGEN_ANA_INV_PTYPE_MARKER_INV_PTYPE_MARKER_M MAKEMASK(0x7F, 0) #define GLGEN_ANA_LAST_PROT_ID(_i) (0x0020C1E4 + ((_i) * 4)) /* _i=0...5 */ /* Reset Source: CORER */ #define GLGEN_ANA_LAST_PROT_ID_MAX_INDEX 5 #define GLGEN_ANA_LAST_PROT_ID_EN_S 0 #define GLGEN_ANA_LAST_PROT_ID_EN_M BIT(0) #define GLGEN_ANA_LAST_PROT_ID_PROT_ID_S 1 #define GLGEN_ANA_LAST_PROT_ID_PROT_ID_M MAKEMASK(0xFF, 1) #define GLGEN_ANA_NMPG_KEYMASK(_i) (0x0020C1D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GLGEN_ANA_NMPG_KEYMASK_MAX_INDEX 3 #define GLGEN_ANA_NMPG_KEYMASK_HASH_KEY_S 0 #define GLGEN_ANA_NMPG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_NMPG0_HASHKEY(_i) (0x0020C1B0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GLGEN_ANA_NMPG0_HASHKEY_MAX_INDEX 3 #define GLGEN_ANA_NMPG0_HASHKEY_HASH_KEY_S 0 #define GLGEN_ANA_NMPG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_NO_HIT_PG_NM_PG 0x0020C204 /* Reset Source: CORER */ #define GLGEN_ANA_NO_HIT_PG_NM_PG_NPC_S 0 #define GLGEN_ANA_NO_HIT_PG_NM_PG_NPC_M MAKEMASK(0xFF, 0) #define GLGEN_ANA_OUT_OF_PKT 0x0020C200 /* Reset Source: CORER */ #define GLGEN_ANA_OUT_OF_PKT_NPC_S 0 #define GLGEN_ANA_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0) #define GLGEN_ANA_P2P(_i) (0x0020C160 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLGEN_ANA_P2P_MAX_INDEX 15 #define GLGEN_ANA_P2P_TARGET_PROF_S 0 #define GLGEN_ANA_P2P_TARGET_PROF_M MAKEMASK(0xF, 0) #define GLGEN_ANA_PG_KEYMASK(_i) (0x0020C1C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GLGEN_ANA_PG_KEYMASK_MAX_INDEX 3 #define GLGEN_ANA_PG_KEYMASK_HASH_KEY_S 0 #define GLGEN_ANA_PG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_PG0_HASHKEY(_i) (0x0020C1A0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GLGEN_ANA_PG0_HASHKEY_MAX_INDEX 3 #define GLGEN_ANA_PG0_HASHKEY_HASH_KEY_S 0 #define GLGEN_ANA_PG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_PROFIL_CTRL 0x0020C1FC /* Reset Source: CORER */ #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDID_S 0 #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDID_M MAKEMASK(0x1F, 0) #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDSTART_S 5 #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDSTART_M MAKEMASK(0xF, 5) #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_S 9 #define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_M MAKEMASK(0x1F, 9) #define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14 #define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_M MAKEMASK(0x3, 14) #define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_S 16 #define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_M MAKEMASK(0xF, 16) #define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20 #define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20) #define GLGEN_ANA_TX_ABORT_PTYPE 0x0020D21C /* Reset Source: CORER */ #define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_S 0 #define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_M MAKEMASK(0x3FF, 0) #define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT 0x0020D208 /* Reset Source: CORER */ #define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_S 0 #define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0) #define GLGEN_ANA_TX_CFG_CTRL 0x0020D104 /* Reset Source: CORER */ #define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_S 0 #define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_M MAKEMASK(0x3FFFF, 0) #define GLGEN_ANA_TX_CFG_CTRL_TABLE_ID_S 18 #define GLGEN_ANA_TX_CFG_CTRL_TABLE_ID_M MAKEMASK(0xFF, 18) #define GLGEN_ANA_TX_CFG_CTRL_RESRVED_S 26 #define GLGEN_ANA_TX_CFG_CTRL_RESRVED_M MAKEMASK(0x7, 26) #define GLGEN_ANA_TX_CFG_CTRL_OPERATION_ID_S 29 #define GLGEN_ANA_TX_CFG_CTRL_OPERATION_ID_M MAKEMASK(0x7, 29) #define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT 0x0020D158 /* Reset Source: CORER */ #define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_HIT_S 0 #define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_HIT_M BIT(0) #define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_PG_MEM_IDX_S 1 #define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_PG_MEM_IDX_M MAKEMASK(0x7, 1) #define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_ADDR_S 4 #define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4) #define GLGEN_ANA_TX_CFG_LU_KEY(_i) (0x0020D14C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define GLGEN_ANA_TX_CFG_LU_KEY_MAX_INDEX 2 #define GLGEN_ANA_TX_CFG_LU_KEY_LU_KEY_S 0 #define GLGEN_ANA_TX_CFG_LU_KEY_LU_KEY_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_TX_CFG_RDDATA(_i) (0x0020D10C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLGEN_ANA_TX_CFG_RDDATA_MAX_INDEX 15 #define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_S 0 #define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT 0x0020D15C /* Reset Source: CORER */ #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_S 0 #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0) #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_S 1 #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_M MAKEMASK(0x7, 1) #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_S 4 #define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4) #define GLGEN_ANA_TX_CFG_WRDATA 0x0020D108 /* Reset Source: CORER */ #define GLGEN_ANA_TX_CFG_WRDATA_WR_DATA_S 0 #define GLGEN_ANA_TX_CFG_WRDATA_WR_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_TX_DEF_PTYPE 0x0020D100 /* Reset Source: CORER */ #define GLGEN_ANA_TX_DEF_PTYPE_DEF_PTYPE_S 0 #define GLGEN_ANA_TX_DEF_PTYPE_DEF_PTYPE_M MAKEMASK(0x3FF, 0) #define GLGEN_ANA_TX_DFD_PACE_OUT 0x0020D4CC /* Reset Source: CORER */ #define GLGEN_ANA_TX_DFD_PACE_OUT_PUSH_S 0 #define GLGEN_ANA_TX_DFD_PACE_OUT_PUSH_M BIT(0) #define GLGEN_ANA_TX_ERR_CTRL 0x0020D220 /* Reset Source: CORER */ #define GLGEN_ANA_TX_ERR_CTRL_ERR_MASK_EN_S 0 #define GLGEN_ANA_TX_ERR_CTRL_ERR_MASK_EN_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_TX_FLAG_MAP(_i) (0x0020D000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GLGEN_ANA_TX_FLAG_MAP_MAX_INDEX 63 #define GLGEN_ANA_TX_FLAG_MAP_FLAG_EN_S 0 #define GLGEN_ANA_TX_FLAG_MAP_FLAG_EN_M BIT(0) #define GLGEN_ANA_TX_FLAG_MAP_EXT_FLAG_ID_S 1 #define GLGEN_ANA_TX_FLAG_MAP_EXT_FLAG_ID_M MAKEMASK(0x3F, 1) #define GLGEN_ANA_TX_INV_NODE_PTYPE 0x0020D210 /* Reset Source: CORER */ #define GLGEN_ANA_TX_INV_NODE_PTYPE_INV_NODE_PTYPE_S 0 #define GLGEN_ANA_TX_INV_NODE_PTYPE_INV_NODE_PTYPE_M MAKEMASK(0x7FF, 0) #define GLGEN_ANA_TX_INV_PROT_ID 0x0020D214 /* Reset Source: CORER */ #define GLGEN_ANA_TX_INV_PROT_ID_INV_PROT_ID_S 0 #define GLGEN_ANA_TX_INV_PROT_ID_INV_PROT_ID_M MAKEMASK(0xFF, 0) #define GLGEN_ANA_TX_INV_PTYPE_MARKER 0x0020D218 /* Reset Source: CORER */ #define GLGEN_ANA_TX_INV_PTYPE_MARKER_INV_PTYPE_MARKER_S 0 #define GLGEN_ANA_TX_INV_PTYPE_MARKER_INV_PTYPE_MARKER_M MAKEMASK(0x7F, 0) #define GLGEN_ANA_TX_NMPG_KEYMASK(_i) (0x0020D1D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GLGEN_ANA_TX_NMPG_KEYMASK_MAX_INDEX 3 #define GLGEN_ANA_TX_NMPG_KEYMASK_HASH_KEY_S 0 #define GLGEN_ANA_TX_NMPG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_TX_NMPG0_HASHKEY(_i) (0x0020D1B0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GLGEN_ANA_TX_NMPG0_HASHKEY_MAX_INDEX 3 #define GLGEN_ANA_TX_NMPG0_HASHKEY_HASH_KEY_S 0 #define GLGEN_ANA_TX_NMPG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_TX_NO_HIT_PG_NM_PG 0x0020D204 /* Reset Source: CORER */ #define GLGEN_ANA_TX_NO_HIT_PG_NM_PG_NPC_S 0 #define GLGEN_ANA_TX_NO_HIT_PG_NM_PG_NPC_M MAKEMASK(0xFF, 0) #define GLGEN_ANA_TX_P2P(_i) (0x0020D160 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLGEN_ANA_TX_P2P_MAX_INDEX 15 #define GLGEN_ANA_TX_P2P_TARGET_PROF_S 0 #define GLGEN_ANA_TX_P2P_TARGET_PROF_M MAKEMASK(0xF, 0) #define GLGEN_ANA_TX_PG_KEYMASK(_i) (0x0020D1C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GLGEN_ANA_TX_PG_KEYMASK_MAX_INDEX 3 #define GLGEN_ANA_TX_PG_KEYMASK_HASH_KEY_S 0 #define GLGEN_ANA_TX_PG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_TX_PG0_HASHKEY(_i) (0x0020D1A0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GLGEN_ANA_TX_PG0_HASHKEY_MAX_INDEX 3 #define GLGEN_ANA_TX_PG0_HASHKEY_HASH_KEY_S 0 #define GLGEN_ANA_TX_PG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ANA_TX_PROFIL_CTRL 0x0020D1FC /* Reset Source: CORER */ #define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDID_S 0 #define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDID_M MAKEMASK(0x1F, 0) #define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDSTART_S 5 #define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDSTART_M MAKEMASK(0xF, 5) #define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_S 9 #define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_M MAKEMASK(0x1F, 9) #define GLGEN_ANA_TX_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14 #define GLGEN_ANA_TX_PROFIL_CTRL_NUM_CTRL_DOMAIN_M MAKEMASK(0x3, 14) #define GLGEN_ANA_TX_PROFIL_CTRL_DEF_PROF_ID_S 16 #define GLGEN_ANA_TX_PROFIL_CTRL_DEF_PROF_ID_M MAKEMASK(0xF, 16) #define GLGEN_ANA_TX_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20 #define GLGEN_ANA_TX_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20) #define GLGEN_ASSERT_HLP 0x000B81E4 /* Reset Source: POR */ #define GLGEN_ASSERT_HLP_CORE_ON_RST_S 0 #define GLGEN_ASSERT_HLP_CORE_ON_RST_M BIT(0) #define GLGEN_ASSERT_HLP_FULL_ON_RST_S 1 #define GLGEN_ASSERT_HLP_FULL_ON_RST_M BIT(1) #define GLGEN_CLKSTAT 0x000B8184 /* Reset Source: POR */ #define GLGEN_CLKSTAT_U_CLK_SPEED_S 0 #define GLGEN_CLKSTAT_U_CLK_SPEED_M MAKEMASK(0x7, 0) #define GLGEN_CLKSTAT_L_CLK_SPEED_S 3 #define GLGEN_CLKSTAT_L_CLK_SPEED_M MAKEMASK(0x7, 3) #define GLGEN_CLKSTAT_PSM_CLK_SPEED_S 6 #define GLGEN_CLKSTAT_PSM_CLK_SPEED_M MAKEMASK(0x7, 6) #define GLGEN_CLKSTAT_RXCTL_CLK_SPEED_S 9 #define GLGEN_CLKSTAT_RXCTL_CLK_SPEED_M MAKEMASK(0x7, 9) #define GLGEN_CLKSTAT_UANA_CLK_SPEED_S 12 #define GLGEN_CLKSTAT_UANA_CLK_SPEED_M MAKEMASK(0x7, 12) #define GLGEN_CLKSTAT_PE_CLK_SPEED_S 18 #define GLGEN_CLKSTAT_PE_CLK_SPEED_M MAKEMASK(0x7, 18) #define GLGEN_CLKSTAT_SRC 0x000B826C /* Reset Source: POR */ #define GLGEN_CLKSTAT_SRC_U_CLK_SRC_S 0 #define GLGEN_CLKSTAT_SRC_U_CLK_SRC_M MAKEMASK(0x3, 0) #define GLGEN_CLKSTAT_SRC_L_CLK_SRC_S 2 #define GLGEN_CLKSTAT_SRC_L_CLK_SRC_M MAKEMASK(0x3, 2) #define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S 4 #define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M MAKEMASK(0x3, 4) #define GLGEN_CLKSTAT_SRC_RXCTL_CLK_SRC_S 6 #define GLGEN_CLKSTAT_SRC_RXCTL_CLK_SRC_M MAKEMASK(0x3, 6) #define GLGEN_CLKSTAT_SRC_UANA_CLK_SRC_S 8 #define GLGEN_CLKSTAT_SRC_UANA_CLK_SRC_M MAKEMASK(0xF, 8) #define GLGEN_ECC_ERR_INT_TOG_MASK_H 0x00093A00 /* Reset Source: CORER */ #define GLGEN_ECC_ERR_INT_TOG_MASK_H_CLIENT_NUM_S 0 #define GLGEN_ECC_ERR_INT_TOG_MASK_H_CLIENT_NUM_M MAKEMASK(0x7F, 0) #define GLGEN_ECC_ERR_INT_TOG_MASK_L 0x000939FC /* Reset Source: CORER */ #define GLGEN_ECC_ERR_INT_TOG_MASK_L_CLIENT_NUM_S 0 #define GLGEN_ECC_ERR_INT_TOG_MASK_L_CLIENT_NUM_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_ECC_ERR_RST_MASK_H 0x000939F8 /* Reset Source: CORER */ #define GLGEN_ECC_ERR_RST_MASK_H_CLIENT_NUM_S 0 #define GLGEN_ECC_ERR_RST_MASK_H_CLIENT_NUM_M MAKEMASK(0x7F, 0) #define GLGEN_ECC_ERR_RST_MASK_L 0x000939F4 /* Reset Source: CORER */ #define GLGEN_ECC_ERR_RST_MASK_L_CLIENT_NUM_S 0 #define GLGEN_ECC_ERR_RST_MASK_L_CLIENT_NUM_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_GPIO_CTL(_i) (0x000880C8 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: POR */ #define GLGEN_GPIO_CTL_MAX_INDEX 6 #define GLGEN_GPIO_CTL_IN_VALUE_S 0 #define GLGEN_GPIO_CTL_IN_VALUE_M BIT(0) #define GLGEN_GPIO_CTL_IN_TRANSIT_S 1 #define GLGEN_GPIO_CTL_IN_TRANSIT_M BIT(1) #define GLGEN_GPIO_CTL_OUT_VALUE_S 2 #define GLGEN_GPIO_CTL_OUT_VALUE_M BIT(2) #define GLGEN_GPIO_CTL_NO_P_UP_S 3 #define GLGEN_GPIO_CTL_NO_P_UP_M BIT(3) #define GLGEN_GPIO_CTL_PIN_DIR_S 4 #define GLGEN_GPIO_CTL_PIN_DIR_M BIT(4) #define GLGEN_GPIO_CTL_TRI_CTL_S 5 #define GLGEN_GPIO_CTL_TRI_CTL_M BIT(5) #define GLGEN_GPIO_CTL_PIN_FUNC_S 8 #define GLGEN_GPIO_CTL_PIN_FUNC_M MAKEMASK(0xF, 8) #define GLGEN_GPIO_CTL_INT_MODE_S 12 #define GLGEN_GPIO_CTL_INT_MODE_M MAKEMASK(0x3, 12) #define GLGEN_MARKER_COUNT 0x000939E8 /* Reset Source: CORER */ #define GLGEN_MARKER_COUNT_MARKER_COUNT_S 0 #define GLGEN_MARKER_COUNT_MARKER_COUNT_M MAKEMASK(0xFF, 0) #define GLGEN_MARKER_COUNT_MARKER_COUNT_EN_S 31 #define GLGEN_MARKER_COUNT_MARKER_COUNT_EN_M BIT(31) #define GLGEN_RSTAT 0x000B8188 /* Reset Source: POR */ #define GLGEN_RSTAT_DEVSTATE_S 0 #define GLGEN_RSTAT_DEVSTATE_M MAKEMASK(0x3, 0) #define GLGEN_RSTAT_RESET_TYPE_S 2 #define GLGEN_RSTAT_RESET_TYPE_M MAKEMASK(0x3, 2) #define GLGEN_RSTAT_CORERCNT_S 4 #define GLGEN_RSTAT_CORERCNT_M MAKEMASK(0x3, 4) #define GLGEN_RSTAT_GLOBRCNT_S 6 #define GLGEN_RSTAT_GLOBRCNT_M MAKEMASK(0x3, 6) #define GLGEN_RSTAT_EMPRCNT_S 8 #define GLGEN_RSTAT_EMPRCNT_M MAKEMASK(0x3, 8) #define GLGEN_RSTAT_TIME_TO_RST_S 10 #define GLGEN_RSTAT_TIME_TO_RST_M MAKEMASK(0x3F, 10) #define GLGEN_RSTAT_RTRIG_FLR_S 16 #define GLGEN_RSTAT_RTRIG_FLR_M BIT(16) #define GLGEN_RSTAT_RTRIG_ECC_S 17 #define GLGEN_RSTAT_RTRIG_ECC_M BIT(17) #define GLGEN_RSTAT_RTRIG_FW_AUX_S 18 #define GLGEN_RSTAT_RTRIG_FW_AUX_M BIT(18) #define GLGEN_RSTCTL 0x000B8180 /* Reset Source: POR */ #define GLGEN_RSTCTL_GRSTDEL_S 0 #define GLGEN_RSTCTL_GRSTDEL_M MAKEMASK(0x3F, 0) #define GLGEN_RSTCTL_ECC_RST_ENA_S 8 #define GLGEN_RSTCTL_ECC_RST_ENA_M BIT(8) #define GLGEN_RSTCTL_ECC_RT_EN_S 30 #define GLGEN_RSTCTL_ECC_RT_EN_M BIT(30) #define GLGEN_RSTCTL_FLR_RT_EN_S 31 #define GLGEN_RSTCTL_FLR_RT_EN_M BIT(31) #define GLGEN_RTRIG 0x000B8190 /* Reset Source: CORER */ #define GLGEN_RTRIG_CORER_S 0 #define GLGEN_RTRIG_CORER_M BIT(0) #define GLGEN_RTRIG_GLOBR_S 1 #define GLGEN_RTRIG_GLOBR_M BIT(1) #define GLGEN_RTRIG_EMPFWR_S 2 #define GLGEN_RTRIG_EMPFWR_M BIT(2) #define GLGEN_STAT 0x000B612C /* Reset Source: POR */ #define GLGEN_STAT_RSVD4FW_S 0 #define GLGEN_STAT_RSVD4FW_M MAKEMASK(0xFF, 0) #define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLGEN_VFLRSTAT_MAX_INDEX 7 #define GLGEN_VFLRSTAT_VFLRS_S 0 #define GLGEN_VFLRSTAT_VFLRS_M MAKEMASK(0xFFFFFFFF, 0) #define GLGEN_XLR_MSK2HLP_RDY 0x000939F0 /* Reset Source: CORER */ #define GLGEN_XLR_MSK2HLP_RDY_GLGEN_XLR_MSK2HLP_RDY_S 0 #define GLGEN_XLR_MSK2HLP_RDY_GLGEN_XLR_MSK2HLP_RDY_M BIT(0) #define GLGEN_XLR_TRNS_WAIT_COUNT 0x000939EC /* Reset Source: CORER */ #define GLGEN_XLR_TRNS_WAIT_COUNT_W_BTWN_TRNS_COUNT_S 0 #define GLGEN_XLR_TRNS_WAIT_COUNT_W_BTWN_TRNS_COUNT_M MAKEMASK(0x1F, 0) #define GLGEN_XLR_TRNS_WAIT_COUNT_W_PEND_TRNS_COUNT_S 8 #define GLGEN_XLR_TRNS_WAIT_COUNT_W_PEND_TRNS_COUNT_M MAKEMASK(0xFF, 8) #define GLVFGEN_TIMER 0x000B8214 /* Reset Source: POR */ #define GLVFGEN_TIMER_GTIME_S 0 #define GLVFGEN_TIMER_GTIME_M MAKEMASK(0xFFFFFFFF, 0) #define PFGEN_CTRL 0x00091000 /* Reset Source: CORER */ #define PFGEN_CTRL_PFSWR_S 0 #define PFGEN_CTRL_PFSWR_M BIT(0) #define PFGEN_DRUN 0x00091180 /* Reset Source: CORER */ #define PFGEN_DRUN_DRVUNLD_S 0 #define PFGEN_DRUN_DRVUNLD_M BIT(0) #define PFGEN_PFRSTAT 0x00091080 /* Reset Source: CORER */ #define PFGEN_PFRSTAT_PFRD_S 0 #define PFGEN_PFRSTAT_PFRD_M BIT(0) #define PFGEN_PORTNUM 0x001D2400 /* Reset Source: CORER */ #define PFGEN_PORTNUM_PORT_NUM_S 0 #define PFGEN_PORTNUM_PORT_NUM_M MAKEMASK(0x7, 0) #define PFGEN_STATE 0x00088000 /* Reset Source: CORER */ #define PFGEN_STATE_PFPEEN_S 0 #define PFGEN_STATE_PFPEEN_M BIT(0) #define PFGEN_STATE_RSVD_S 1 #define PFGEN_STATE_RSVD_M BIT(1) #define PFGEN_STATE_PFLINKEN_S 2 #define PFGEN_STATE_PFLINKEN_M BIT(2) #define PFGEN_STATE_PFSCEN_S 3 #define PFGEN_STATE_PFSCEN_M BIT(3) #define PRT_TCVMLR_DRAIN_CNTR 0x000A21C0 /* Reset Source: CORER */ #define PRT_TCVMLR_DRAIN_CNTR_CNTR_S 0 #define PRT_TCVMLR_DRAIN_CNTR_CNTR_M MAKEMASK(0x3FFF, 0) #define PRTGEN_CNF 0x000B8120 /* Reset Source: POR */ #define PRTGEN_CNF_PORT_DIS_S 0 #define PRTGEN_CNF_PORT_DIS_M BIT(0) #define PRTGEN_CNF_ALLOW_PORT_DIS_S 1 #define PRTGEN_CNF_ALLOW_PORT_DIS_M BIT(1) #define PRTGEN_CNF_EMP_PORT_DIS_S 2 #define PRTGEN_CNF_EMP_PORT_DIS_M BIT(2) #define PRTGEN_CNF2 0x000B8160 /* Reset Source: POR */ #define PRTGEN_CNF2_ACTIVATE_PORT_LINK_S 0 #define PRTGEN_CNF2_ACTIVATE_PORT_LINK_M BIT(0) #define PRTGEN_CNF3 0x000B8280 /* Reset Source: POR */ #define PRTGEN_CNF3_PORT_STAGERING_EN_S 0 #define PRTGEN_CNF3_PORT_STAGERING_EN_M BIT(0) #define PRTGEN_STATUS 0x000B8100 /* Reset Source: POR */ #define PRTGEN_STATUS_PORT_VALID_S 0 #define PRTGEN_STATUS_PORT_VALID_M BIT(0) #define PRTGEN_STATUS_PORT_ACTIVE_S 1 #define PRTGEN_STATUS_PORT_ACTIVE_M BIT(1) #define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: VFR */ #define VFGEN_RSTAT_MAX_INDEX 255 #define VFGEN_RSTAT_VFR_STATE_S 0 #define VFGEN_RSTAT_VFR_STATE_M MAKEMASK(0x3, 0) #define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPGEN_VFRSTAT_MAX_INDEX 255 #define VPGEN_VFRSTAT_VFRD_S 0 #define VPGEN_VFRSTAT_VFRD_M BIT(0) #define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPGEN_VFRTRIG_MAX_INDEX 255 #define VPGEN_VFRTRIG_VFSWR_S 0 #define VPGEN_VFRTRIG_VFSWR_M BIT(0) #define VSIGEN_RSTAT(_VSI) (0x00092800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSIGEN_RSTAT_MAX_INDEX 767 #define VSIGEN_RSTAT_VMRD_S 0 #define VSIGEN_RSTAT_VMRD_M BIT(0) #define VSIGEN_RTRIG(_VSI) (0x00091800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSIGEN_RTRIG_MAX_INDEX 767 #define VSIGEN_RTRIG_VMSWR_S 0 #define VSIGEN_RTRIG_VMSWR_M BIT(0) #define GLHMC_APBVTINUSEBASE(_i) (0x00524A00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_APBVTINUSEBASE_MAX_INDEX 7 #define GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_S 0 #define GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_CEQPART(_i) (0x005031C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_CEQPART_MAX_INDEX 7 #define GLHMC_CEQPART_PMCEQBASE_S 0 #define GLHMC_CEQPART_PMCEQBASE_M MAKEMASK(0x3FF, 0) #define GLHMC_CEQPART_PMCEQSIZE_S 16 #define GLHMC_CEQPART_PMCEQSIZE_M MAKEMASK(0x3FF, 16) #define GLHMC_DBCQMAX 0x005220F0 /* Reset Source: CORER */ #define GLHMC_DBCQMAX_GLHMC_DBCQMAX_S 0 #define GLHMC_DBCQMAX_GLHMC_DBCQMAX_M MAKEMASK(0xFFFFF, 0) #define GLHMC_DBCQPART(_i) (0x00503180 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_DBCQPART_MAX_INDEX 7 #define GLHMC_DBCQPART_PMDBCQBASE_S 0 #define GLHMC_DBCQPART_PMDBCQBASE_M MAKEMASK(0x3FFF, 0) #define GLHMC_DBCQPART_PMDBCQSIZE_S 16 #define GLHMC_DBCQPART_PMDBCQSIZE_M MAKEMASK(0x7FFF, 16) #define GLHMC_DBQPMAX 0x005220EC /* Reset Source: CORER */ #define GLHMC_DBQPMAX_GLHMC_DBQPMAX_S 0 #define GLHMC_DBQPMAX_GLHMC_DBQPMAX_M MAKEMASK(0x7FFFF, 0) #define GLHMC_DBQPPART(_i) (0x005044C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_DBQPPART_MAX_INDEX 7 #define GLHMC_DBQPPART_PMDBQPBASE_S 0 #define GLHMC_DBQPPART_PMDBQPBASE_M MAKEMASK(0x3FFF, 0) #define GLHMC_DBQPPART_PMDBQPSIZE_S 16 #define GLHMC_DBQPPART_PMDBQPSIZE_M MAKEMASK(0x7FFF, 16) #define GLHMC_FSIAVBASE(_i) (0x00525600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_FSIAVBASE_MAX_INDEX 7 #define GLHMC_FSIAVBASE_FPMFSIAVBASE_S 0 #define GLHMC_FSIAVBASE_FPMFSIAVBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_FSIAVCNT(_i) (0x00525700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_FSIAVCNT_MAX_INDEX 7 #define GLHMC_FSIAVCNT_FPMFSIAVCNT_S 0 #define GLHMC_FSIAVCNT_FPMFSIAVCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_FSIAVMAX 0x00522068 /* Reset Source: CORER */ #define GLHMC_FSIAVMAX_PMFSIAVMAX_S 0 #define GLHMC_FSIAVMAX_PMFSIAVMAX_M MAKEMASK(0x3FFFF, 0) #define GLHMC_FSIAVOBJSZ 0x00522064 /* Reset Source: CORER */ #define GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_S 0 #define GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_FSIMCBASE(_i) (0x00526000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_FSIMCBASE_MAX_INDEX 7 #define GLHMC_FSIMCBASE_FPMFSIMCBASE_S 0 #define GLHMC_FSIMCBASE_FPMFSIMCBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_FSIMCCNT(_i) (0x00526100 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_FSIMCCNT_MAX_INDEX 7 #define GLHMC_FSIMCCNT_FPMFSIMCSZ_S 0 #define GLHMC_FSIMCCNT_FPMFSIMCSZ_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_FSIMCMAX 0x00522060 /* Reset Source: CORER */ #define GLHMC_FSIMCMAX_PMFSIMCMAX_S 0 #define GLHMC_FSIMCMAX_PMFSIMCMAX_M MAKEMASK(0x3FFF, 0) #define GLHMC_FSIMCOBJSZ 0x0052205C /* Reset Source: CORER */ #define GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_S 0 #define GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_FWPDINV 0x0052207C /* Reset Source: CORER */ #define GLHMC_FWPDINV_PMSDIDX_S 0 #define GLHMC_FWPDINV_PMSDIDX_M MAKEMASK(0xFFF, 0) #define GLHMC_FWPDINV_PMSDPARTSEL_S 15 #define GLHMC_FWPDINV_PMSDPARTSEL_M BIT(15) #define GLHMC_FWPDINV_PMPDIDX_S 16 #define GLHMC_FWPDINV_PMPDIDX_M MAKEMASK(0x1FF, 16) #define GLHMC_FWPDINV_FPMAT 0x0010207C /* Reset Source: CORER */ #define GLHMC_FWPDINV_FPMAT_PMSDIDX_S 0 #define GLHMC_FWPDINV_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0) #define GLHMC_FWPDINV_FPMAT_PMSDPARTSEL_S 15 #define GLHMC_FWPDINV_FPMAT_PMSDPARTSEL_M BIT(15) #define GLHMC_FWPDINV_FPMAT_PMPDIDX_S 16 #define GLHMC_FWPDINV_FPMAT_PMPDIDX_M MAKEMASK(0x1FF, 16) #define GLHMC_FWSDDATAHIGH 0x00522078 /* Reset Source: CORER */ #define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_S 0 #define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_FWSDDATAHIGH_FPMAT 0x00102078 /* Reset Source: CORER */ #define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 #define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_FWSDDATALOW 0x00522074 /* Reset Source: CORER */ #define GLHMC_FWSDDATALOW_PMSDVALID_S 0 #define GLHMC_FWSDDATALOW_PMSDVALID_M BIT(0) #define GLHMC_FWSDDATALOW_PMSDTYPE_S 1 #define GLHMC_FWSDDATALOW_PMSDTYPE_M BIT(1) #define GLHMC_FWSDDATALOW_PMSDBPCOUNT_S 2 #define GLHMC_FWSDDATALOW_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) #define GLHMC_FWSDDATALOW_PMSDDATALOW_S 12 #define GLHMC_FWSDDATALOW_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) #define GLHMC_FWSDDATALOW_FPMAT 0x00102074 /* Reset Source: CORER */ #define GLHMC_FWSDDATALOW_FPMAT_PMSDVALID_S 0 #define GLHMC_FWSDDATALOW_FPMAT_PMSDVALID_M BIT(0) #define GLHMC_FWSDDATALOW_FPMAT_PMSDTYPE_S 1 #define GLHMC_FWSDDATALOW_FPMAT_PMSDTYPE_M BIT(1) #define GLHMC_FWSDDATALOW_FPMAT_PMSDBPCOUNT_S 2 #define GLHMC_FWSDDATALOW_FPMAT_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) #define GLHMC_FWSDDATALOW_FPMAT_PMSDDATALOW_S 12 #define GLHMC_FWSDDATALOW_FPMAT_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) #define GLHMC_PEARPBASE(_i) (0x00524800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEARPBASE_MAX_INDEX 7 #define GLHMC_PEARPBASE_FPMPEARPBASE_S 0 #define GLHMC_PEARPBASE_FPMPEARPBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEARPCNT(_i) (0x00524900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEARPCNT_MAX_INDEX 7 #define GLHMC_PEARPCNT_FPMPEARPCNT_S 0 #define GLHMC_PEARPCNT_FPMPEARPCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PEARPMAX 0x00522038 /* Reset Source: CORER */ #define GLHMC_PEARPMAX_PMPEARPMAX_S 0 #define GLHMC_PEARPMAX_PMPEARPMAX_M MAKEMASK(0x1FFFF, 0) #define GLHMC_PEARPOBJSZ 0x00522034 /* Reset Source: CORER */ #define GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_S 0 #define GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_M MAKEMASK(0x7, 0) #define GLHMC_PECQBASE(_i) (0x00524200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PECQBASE_MAX_INDEX 7 #define GLHMC_PECQBASE_FPMPECQBASE_S 0 #define GLHMC_PECQBASE_FPMPECQBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PECQCNT(_i) (0x00524300 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PECQCNT_MAX_INDEX 7 #define GLHMC_PECQCNT_FPMPECQCNT_S 0 #define GLHMC_PECQCNT_FPMPECQCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PECQOBJSZ 0x00522020 /* Reset Source: CORER */ #define GLHMC_PECQOBJSZ_PMPECQOBJSZ_S 0 #define GLHMC_PECQOBJSZ_PMPECQOBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PEHDRBASE(_i) (0x00526200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEHDRBASE_MAX_INDEX 7 #define GLHMC_PEHDRBASE_GLHMC_PEHDRBASE_S 0 #define GLHMC_PEHDRBASE_GLHMC_PEHDRBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_PEHDRCNT(_i) (0x00526300 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEHDRCNT_MAX_INDEX 7 #define GLHMC_PEHDRCNT_GLHMC_PEHDRCNT_S 0 #define GLHMC_PEHDRCNT_GLHMC_PEHDRCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_PEHDRMAX 0x00522008 /* Reset Source: CORER */ #define GLHMC_PEHDRMAX_PMPEHDRMAX_S 0 #define GLHMC_PEHDRMAX_PMPEHDRMAX_M MAKEMASK(0x7FFFF, 0) #define GLHMC_PEHDRMAX_RSVD_S 19 #define GLHMC_PEHDRMAX_RSVD_M MAKEMASK(0x1FFF, 19) #define GLHMC_PEHDROBJSZ 0x00522004 /* Reset Source: CORER */ #define GLHMC_PEHDROBJSZ_PMPEHDROBJSZ_S 0 #define GLHMC_PEHDROBJSZ_PMPEHDROBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PEHDROBJSZ_RSVD_S 4 #define GLHMC_PEHDROBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4) #define GLHMC_PEHTCNT(_i) (0x00524700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEHTCNT_MAX_INDEX 7 #define GLHMC_PEHTCNT_FPMPEHTCNT_S 0 #define GLHMC_PEHTCNT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PEHTCNT_FPMAT(_i) (0x00104700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEHTCNT_FPMAT_MAX_INDEX 7 #define GLHMC_PEHTCNT_FPMAT_FPMPEHTCNT_S 0 #define GLHMC_PEHTCNT_FPMAT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PEHTEBASE(_i) (0x00524600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEHTEBASE_MAX_INDEX 7 #define GLHMC_PEHTEBASE_FPMPEHTEBASE_S 0 #define GLHMC_PEHTEBASE_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEHTEBASE_FPMAT(_i) (0x00104600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEHTEBASE_FPMAT_MAX_INDEX 7 #define GLHMC_PEHTEBASE_FPMAT_FPMPEHTEBASE_S 0 #define GLHMC_PEHTEBASE_FPMAT_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEHTEOBJSZ 0x0052202C /* Reset Source: CORER */ #define GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_S 0 #define GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PEHTEOBJSZ_FPMAT 0x0010202C /* Reset Source: CORER */ #define GLHMC_PEHTEOBJSZ_FPMAT_PMPEHTEOBJSZ_S 0 #define GLHMC_PEHTEOBJSZ_FPMAT_PMPEHTEOBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PEHTMAX 0x00522030 /* Reset Source: CORER */ #define GLHMC_PEHTMAX_PMPEHTMAX_S 0 #define GLHMC_PEHTMAX_PMPEHTMAX_M MAKEMASK(0x1FFFFF, 0) #define GLHMC_PEHTMAX_FPMAT 0x00102030 /* Reset Source: CORER */ #define GLHMC_PEHTMAX_FPMAT_PMPEHTMAX_S 0 #define GLHMC_PEHTMAX_FPMAT_PMPEHTMAX_M MAKEMASK(0x1FFFFF, 0) #define GLHMC_PEMDBASE(_i) (0x00526400 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEMDBASE_MAX_INDEX 7 #define GLHMC_PEMDBASE_GLHMC_PEMDBASE_S 0 #define GLHMC_PEMDBASE_GLHMC_PEMDBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_PEMDCNT(_i) (0x00526500 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEMDCNT_MAX_INDEX 7 #define GLHMC_PEMDCNT_GLHMC_PEMDCNT_S 0 #define GLHMC_PEMDCNT_GLHMC_PEMDCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_PEMDMAX 0x00522010 /* Reset Source: CORER */ #define GLHMC_PEMDMAX_PMPEMDMAX_S 0 #define GLHMC_PEMDMAX_PMPEMDMAX_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEMDMAX_RSVD_S 24 #define GLHMC_PEMDMAX_RSVD_M MAKEMASK(0xFF, 24) #define GLHMC_PEMDOBJSZ 0x0052200C /* Reset Source: CORER */ #define GLHMC_PEMDOBJSZ_PMPEMDOBJSZ_S 0 #define GLHMC_PEMDOBJSZ_PMPEMDOBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PEMDOBJSZ_RSVD_S 4 #define GLHMC_PEMDOBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4) #define GLHMC_PEMRBASE(_i) (0x00524C00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEMRBASE_MAX_INDEX 7 #define GLHMC_PEMRBASE_FPMPEMRBASE_S 0 #define GLHMC_PEMRBASE_FPMPEMRBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEMRCNT(_i) (0x00524D00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEMRCNT_MAX_INDEX 7 #define GLHMC_PEMRCNT_FPMPEMRSZ_S 0 #define GLHMC_PEMRCNT_FPMPEMRSZ_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PEMRMAX 0x00522040 /* Reset Source: CORER */ #define GLHMC_PEMRMAX_PMPEMRMAX_S 0 #define GLHMC_PEMRMAX_PMPEMRMAX_M MAKEMASK(0x7FFFFF, 0) #define GLHMC_PEMROBJSZ 0x0052203C /* Reset Source: CORER */ #define GLHMC_PEMROBJSZ_PMPEMROBJSZ_S 0 #define GLHMC_PEMROBJSZ_PMPEMROBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PEOOISCBASE(_i) (0x00526600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEOOISCBASE_MAX_INDEX 7 #define GLHMC_PEOOISCBASE_GLHMC_PEOOISCBASE_S 0 #define GLHMC_PEOOISCBASE_GLHMC_PEOOISCBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_PEOOISCCNT(_i) (0x00526700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEOOISCCNT_MAX_INDEX 7 #define GLHMC_PEOOISCCNT_GLHMC_PEOOISCCNT_S 0 #define GLHMC_PEOOISCCNT_GLHMC_PEOOISCCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_PEOOISCFFLBASE(_i) (0x00526C00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEOOISCFFLBASE_MAX_INDEX 7 #define GLHMC_PEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_S 0 #define GLHMC_PEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_PEOOISCFFLCNT_PMAT(_i) (0x00526D00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEOOISCFFLCNT_PMAT_MAX_INDEX 7 #define GLHMC_PEOOISCFFLCNT_PMAT_FPMPEOOISCFLCNT_S 0 #define GLHMC_PEOOISCFFLCNT_PMAT_FPMPEOOISCFLCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PEOOISCFFLMAX 0x005220A4 /* Reset Source: CORER */ #define GLHMC_PEOOISCFFLMAX_PMPEOOISCFFLMAX_S 0 #define GLHMC_PEOOISCFFLMAX_PMPEOOISCFFLMAX_M MAKEMASK(0x7FFFF, 0) #define GLHMC_PEOOISCFFLMAX_RSVD_S 19 #define GLHMC_PEOOISCFFLMAX_RSVD_M MAKEMASK(0x1FFF, 19) #define GLHMC_PEOOISCMAX 0x00522018 /* Reset Source: CORER */ #define GLHMC_PEOOISCMAX_PMPEOOISCMAX_S 0 #define GLHMC_PEOOISCMAX_PMPEOOISCMAX_M MAKEMASK(0x7FFFF, 0) #define GLHMC_PEOOISCMAX_RSVD_S 19 #define GLHMC_PEOOISCMAX_RSVD_M MAKEMASK(0x1FFF, 19) #define GLHMC_PEOOISCOBJSZ 0x00522014 /* Reset Source: CORER */ #define GLHMC_PEOOISCOBJSZ_PMPEOOISCOBJSZ_S 0 #define GLHMC_PEOOISCOBJSZ_PMPEOOISCOBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PEOOISCOBJSZ_RSVD_S 4 #define GLHMC_PEOOISCOBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4) #define GLHMC_PEPBLBASE(_i) (0x00525800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEPBLBASE_MAX_INDEX 7 #define GLHMC_PEPBLBASE_FPMPEPBLBASE_S 0 #define GLHMC_PEPBLBASE_FPMPEPBLBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEPBLCNT(_i) (0x00525900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEPBLCNT_MAX_INDEX 7 #define GLHMC_PEPBLCNT_FPMPEPBLCNT_S 0 #define GLHMC_PEPBLCNT_FPMPEPBLCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PEPBLMAX 0x0052206C /* Reset Source: CORER */ #define GLHMC_PEPBLMAX_PMPEPBLMAX_S 0 #define GLHMC_PEPBLMAX_PMPEPBLMAX_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PEQ1BASE(_i) (0x00525200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEQ1BASE_MAX_INDEX 7 #define GLHMC_PEQ1BASE_FPMPEQ1BASE_S 0 #define GLHMC_PEQ1BASE_FPMPEQ1BASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEQ1CNT(_i) (0x00525300 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEQ1CNT_MAX_INDEX 7 #define GLHMC_PEQ1CNT_FPMPEQ1CNT_S 0 #define GLHMC_PEQ1CNT_FPMPEQ1CNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PEQ1FLBASE(_i) (0x00525400 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEQ1FLBASE_MAX_INDEX 7 #define GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_S 0 #define GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEQ1FLMAX 0x00522058 /* Reset Source: CORER */ #define GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_S 0 #define GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_M MAKEMASK(0x3FFFFFF, 0) #define GLHMC_PEQ1MAX 0x00522054 /* Reset Source: CORER */ #define GLHMC_PEQ1MAX_PMPEQ1MAX_S 0 #define GLHMC_PEQ1MAX_PMPEQ1MAX_M MAKEMASK(0xFFFFFFF, 0) #define GLHMC_PEQ1OBJSZ 0x00522050 /* Reset Source: CORER */ #define GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_S 0 #define GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PEQPBASE(_i) (0x00524000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEQPBASE_MAX_INDEX 7 #define GLHMC_PEQPBASE_FPMPEQPBASE_S 0 #define GLHMC_PEQPBASE_FPMPEQPBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEQPCNT(_i) (0x00524100 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEQPCNT_MAX_INDEX 7 #define GLHMC_PEQPCNT_FPMPEQPCNT_S 0 #define GLHMC_PEQPCNT_FPMPEQPCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PEQPOBJSZ 0x0052201C /* Reset Source: CORER */ #define GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_S 0 #define GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PERRFBASE(_i) (0x00526800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PERRFBASE_MAX_INDEX 7 #define GLHMC_PERRFBASE_GLHMC_PERRFBASE_S 0 #define GLHMC_PERRFBASE_GLHMC_PERRFBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_PERRFCNT(_i) (0x00526900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PERRFCNT_MAX_INDEX 7 #define GLHMC_PERRFCNT_GLHMC_PERRFCNT_S 0 #define GLHMC_PERRFCNT_GLHMC_PERRFCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_PERRFFLBASE(_i) (0x00526A00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PERRFFLBASE_MAX_INDEX 7 #define GLHMC_PERRFFLBASE_GLHMC_PERRFFLBASE_S 0 #define GLHMC_PERRFFLBASE_GLHMC_PERRFFLBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_PERRFFLCNT_PMAT(_i) (0x00526B00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PERRFFLCNT_PMAT_MAX_INDEX 7 #define GLHMC_PERRFFLCNT_PMAT_FPMPERRFFLCNT_S 0 #define GLHMC_PERRFFLCNT_PMAT_FPMPERRFFLCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PERRFFLMAX 0x005220A0 /* Reset Source: CORER */ #define GLHMC_PERRFFLMAX_PMPERRFFLMAX_S 0 #define GLHMC_PERRFFLMAX_PMPERRFFLMAX_M MAKEMASK(0x3FFFFFF, 0) #define GLHMC_PERRFFLMAX_RSVD_S 26 #define GLHMC_PERRFFLMAX_RSVD_M MAKEMASK(0x3F, 26) #define GLHMC_PERRFMAX 0x0052209C /* Reset Source: CORER */ #define GLHMC_PERRFMAX_PMPERRFMAX_S 0 #define GLHMC_PERRFMAX_PMPERRFMAX_M MAKEMASK(0xFFFFFFF, 0) #define GLHMC_PERRFMAX_RSVD_S 28 #define GLHMC_PERRFMAX_RSVD_M MAKEMASK(0xF, 28) #define GLHMC_PERRFOBJSZ 0x00522098 /* Reset Source: CORER */ #define GLHMC_PERRFOBJSZ_PMPERRFOBJSZ_S 0 #define GLHMC_PERRFOBJSZ_PMPERRFOBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PERRFOBJSZ_RSVD_S 4 #define GLHMC_PERRFOBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4) #define GLHMC_PETIMERBASE(_i) (0x00525A00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PETIMERBASE_MAX_INDEX 7 #define GLHMC_PETIMERBASE_FPMPETIMERBASE_S 0 #define GLHMC_PETIMERBASE_FPMPETIMERBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PETIMERCNT(_i) (0x00525B00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PETIMERCNT_MAX_INDEX 7 #define GLHMC_PETIMERCNT_FPMPETIMERCNT_S 0 #define GLHMC_PETIMERCNT_FPMPETIMERCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PETIMERMAX 0x00522084 /* Reset Source: CORER */ #define GLHMC_PETIMERMAX_PMPETIMERMAX_S 0 #define GLHMC_PETIMERMAX_PMPETIMERMAX_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PETIMEROBJSZ 0x00522080 /* Reset Source: CORER */ #define GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_S 0 #define GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PEXFBASE(_i) (0x00524E00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEXFBASE_MAX_INDEX 7 #define GLHMC_PEXFBASE_FPMPEXFBASE_S 0 #define GLHMC_PEXFBASE_FPMPEXFBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEXFCNT(_i) (0x00524F00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEXFCNT_MAX_INDEX 7 #define GLHMC_PEXFCNT_FPMPEXFCNT_S 0 #define GLHMC_PEXFCNT_FPMPEXFCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_PEXFFLBASE(_i) (0x00525000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PEXFFLBASE_MAX_INDEX 7 #define GLHMC_PEXFFLBASE_FPMPEXFFLBASE_S 0 #define GLHMC_PEXFFLBASE_FPMPEXFFLBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_PEXFFLMAX 0x0052204C /* Reset Source: CORER */ #define GLHMC_PEXFFLMAX_PMPEXFFLMAX_S 0 #define GLHMC_PEXFFLMAX_PMPEXFFLMAX_M MAKEMASK(0xFFFFFFF, 0) #define GLHMC_PEXFMAX 0x00522048 /* Reset Source: CORER */ #define GLHMC_PEXFMAX_PMPEXFMAX_S 0 #define GLHMC_PEXFMAX_PMPEXFMAX_M MAKEMASK(0xFFFFFFF, 0) #define GLHMC_PEXFOBJSZ 0x00522044 /* Reset Source: CORER */ #define GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_S 0 #define GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_M MAKEMASK(0xF, 0) #define GLHMC_PFPESDPART(_i) (0x00520880 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PFPESDPART_MAX_INDEX 7 #define GLHMC_PFPESDPART_PMSDBASE_S 0 #define GLHMC_PFPESDPART_PMSDBASE_M MAKEMASK(0xFFF, 0) #define GLHMC_PFPESDPART_PMSDSIZE_S 16 #define GLHMC_PFPESDPART_PMSDSIZE_M MAKEMASK(0x1FFF, 16) #define GLHMC_PFPESDPART_FPMAT(_i) (0x00100880 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_PFPESDPART_FPMAT_MAX_INDEX 7 #define GLHMC_PFPESDPART_FPMAT_PMSDBASE_S 0 #define GLHMC_PFPESDPART_FPMAT_PMSDBASE_M MAKEMASK(0xFFF, 0) #define GLHMC_PFPESDPART_FPMAT_PMSDSIZE_S 16 #define GLHMC_PFPESDPART_FPMAT_PMSDSIZE_M MAKEMASK(0x1FFF, 16) #define GLHMC_SDPART(_i) (0x00520800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_SDPART_MAX_INDEX 7 #define GLHMC_SDPART_PMSDBASE_S 0 #define GLHMC_SDPART_PMSDBASE_M MAKEMASK(0xFFF, 0) #define GLHMC_SDPART_PMSDSIZE_S 16 #define GLHMC_SDPART_PMSDSIZE_M MAKEMASK(0x1FFF, 16) #define GLHMC_SDPART_FPMAT(_i) (0x00100800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLHMC_SDPART_FPMAT_MAX_INDEX 7 #define GLHMC_SDPART_FPMAT_PMSDBASE_S 0 #define GLHMC_SDPART_FPMAT_PMSDBASE_M MAKEMASK(0xFFF, 0) #define GLHMC_SDPART_FPMAT_PMSDSIZE_S 16 #define GLHMC_SDPART_FPMAT_PMSDSIZE_M MAKEMASK(0x1FFF, 16) #define GLHMC_VFAPBVTINUSEBASE(_i) (0x0052CA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 #define GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_S 0 #define GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFCEQPART(_i) (0x00502F00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFCEQPART_MAX_INDEX 31 #define GLHMC_VFCEQPART_PMCEQBASE_S 0 #define GLHMC_VFCEQPART_PMCEQBASE_M MAKEMASK(0x3FF, 0) #define GLHMC_VFCEQPART_PMCEQSIZE_S 16 #define GLHMC_VFCEQPART_PMCEQSIZE_M MAKEMASK(0x3FF, 16) #define GLHMC_VFDBCQPART(_i) (0x00502E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFDBCQPART_MAX_INDEX 31 #define GLHMC_VFDBCQPART_PMDBCQBASE_S 0 #define GLHMC_VFDBCQPART_PMDBCQBASE_M MAKEMASK(0x3FFF, 0) #define GLHMC_VFDBCQPART_PMDBCQSIZE_S 16 #define GLHMC_VFDBCQPART_PMDBCQSIZE_M MAKEMASK(0x7FFF, 16) #define GLHMC_VFDBQPPART(_i) (0x00504520 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFDBQPPART_MAX_INDEX 31 #define GLHMC_VFDBQPPART_PMDBQPBASE_S 0 #define GLHMC_VFDBQPPART_PMDBQPBASE_M MAKEMASK(0x3FFF, 0) #define GLHMC_VFDBQPPART_PMDBQPSIZE_S 16 #define GLHMC_VFDBQPPART_PMDBQPSIZE_M MAKEMASK(0x7FFF, 16) #define GLHMC_VFFSIAVBASE(_i) (0x0052D600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFFSIAVBASE_MAX_INDEX 31 #define GLHMC_VFFSIAVBASE_FPMFSIAVBASE_S 0 #define GLHMC_VFFSIAVBASE_FPMFSIAVBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFFSIAVCNT(_i) (0x0052D700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFFSIAVCNT_MAX_INDEX 31 #define GLHMC_VFFSIAVCNT_FPMFSIAVCNT_S 0 #define GLHMC_VFFSIAVCNT_FPMFSIAVCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFFSIMCBASE(_i) (0x0052E000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFFSIMCBASE_MAX_INDEX 31 #define GLHMC_VFFSIMCBASE_FPMFSIMCBASE_S 0 #define GLHMC_VFFSIMCBASE_FPMFSIMCBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFFSIMCCNT(_i) (0x0052E100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFFSIMCCNT_MAX_INDEX 31 #define GLHMC_VFFSIMCCNT_FPMFSIMCSZ_S 0 #define GLHMC_VFFSIMCCNT_FPMFSIMCSZ_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPDINV(_i) (0x00528300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPDINV_MAX_INDEX 31 #define GLHMC_VFPDINV_PMSDIDX_S 0 #define GLHMC_VFPDINV_PMSDIDX_M MAKEMASK(0xFFF, 0) #define GLHMC_VFPDINV_PMSDPARTSEL_S 15 #define GLHMC_VFPDINV_PMSDPARTSEL_M BIT(15) #define GLHMC_VFPDINV_PMPDIDX_S 16 #define GLHMC_VFPDINV_PMPDIDX_M MAKEMASK(0x1FF, 16) #define GLHMC_VFPDINV_FPMAT(_i) (0x00108300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPDINV_FPMAT_MAX_INDEX 31 #define GLHMC_VFPDINV_FPMAT_PMSDIDX_S 0 #define GLHMC_VFPDINV_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0) #define GLHMC_VFPDINV_FPMAT_PMSDPARTSEL_S 15 #define GLHMC_VFPDINV_FPMAT_PMSDPARTSEL_M BIT(15) #define GLHMC_VFPDINV_FPMAT_PMPDIDX_S 16 #define GLHMC_VFPDINV_FPMAT_PMPDIDX_M MAKEMASK(0x1FF, 16) #define GLHMC_VFPEARPBASE(_i) (0x0052C800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEARPBASE_MAX_INDEX 31 #define GLHMC_VFPEARPBASE_FPMPEARPBASE_S 0 #define GLHMC_VFPEARPBASE_FPMPEARPBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPEARPCNT(_i) (0x0052C900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEARPCNT_MAX_INDEX 31 #define GLHMC_VFPEARPCNT_FPMPEARPCNT_S 0 #define GLHMC_VFPEARPCNT_FPMPEARPCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPECQBASE(_i) (0x0052C200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPECQBASE_MAX_INDEX 31 #define GLHMC_VFPECQBASE_FPMPECQBASE_S 0 #define GLHMC_VFPECQBASE_FPMPECQBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPECQCNT(_i) (0x0052C300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPECQCNT_MAX_INDEX 31 #define GLHMC_VFPECQCNT_FPMPECQCNT_S 0 #define GLHMC_VFPECQCNT_FPMPECQCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPEHDRBASE(_i) (0x0052E200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEHDRBASE_MAX_INDEX 31 #define GLHMC_VFPEHDRBASE_GLHMC_PEHDRBASE_S 0 #define GLHMC_VFPEHDRBASE_GLHMC_PEHDRBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFPEHDRCNT(_i) (0x0052E300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEHDRCNT_MAX_INDEX 31 #define GLHMC_VFPEHDRCNT_GLHMC_PEHDRCNT_S 0 #define GLHMC_VFPEHDRCNT_GLHMC_PEHDRCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFPEHTCNT(_i) (0x0052C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEHTCNT_MAX_INDEX 31 #define GLHMC_VFPEHTCNT_FPMPEHTCNT_S 0 #define GLHMC_VFPEHTCNT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPEHTCNT_FPMAT(_i) (0x0010C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEHTCNT_FPMAT_MAX_INDEX 31 #define GLHMC_VFPEHTCNT_FPMAT_FPMPEHTCNT_S 0 #define GLHMC_VFPEHTCNT_FPMAT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPEHTEBASE(_i) (0x0052C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEHTEBASE_MAX_INDEX 31 #define GLHMC_VFPEHTEBASE_FPMPEHTEBASE_S 0 #define GLHMC_VFPEHTEBASE_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPEHTEBASE_FPMAT(_i) (0x0010C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEHTEBASE_FPMAT_MAX_INDEX 31 #define GLHMC_VFPEHTEBASE_FPMAT_FPMPEHTEBASE_S 0 #define GLHMC_VFPEHTEBASE_FPMAT_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPEMDBASE(_i) (0x0052E400 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEMDBASE_MAX_INDEX 31 #define GLHMC_VFPEMDBASE_GLHMC_PEMDBASE_S 0 #define GLHMC_VFPEMDBASE_GLHMC_PEMDBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFPEMDCNT(_i) (0x0052E500 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEMDCNT_MAX_INDEX 31 #define GLHMC_VFPEMDCNT_GLHMC_PEMDCNT_S 0 #define GLHMC_VFPEMDCNT_GLHMC_PEMDCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFPEMRBASE(_i) (0x0052CC00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEMRBASE_MAX_INDEX 31 #define GLHMC_VFPEMRBASE_FPMPEMRBASE_S 0 #define GLHMC_VFPEMRBASE_FPMPEMRBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPEMRCNT(_i) (0x0052CD00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEMRCNT_MAX_INDEX 31 #define GLHMC_VFPEMRCNT_FPMPEMRSZ_S 0 #define GLHMC_VFPEMRCNT_FPMPEMRSZ_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPEOOISCBASE(_i) (0x0052E600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEOOISCBASE_MAX_INDEX 31 #define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_S 0 #define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFPEOOISCCNT(_i) (0x0052E700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEOOISCCNT_MAX_INDEX 31 #define GLHMC_VFPEOOISCCNT_GLHMC_PEOOISCCNT_S 0 #define GLHMC_VFPEOOISCCNT_GLHMC_PEOOISCCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFPEOOISCFFLBASE(_i) (0x0052EC00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEOOISCFFLBASE_MAX_INDEX 31 #define GLHMC_VFPEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_S 0 #define GLHMC_VFPEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFPEPBLBASE(_i) (0x0052D800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEPBLBASE_MAX_INDEX 31 #define GLHMC_VFPEPBLBASE_FPMPEPBLBASE_S 0 #define GLHMC_VFPEPBLBASE_FPMPEPBLBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPEPBLCNT(_i) (0x0052D900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEPBLCNT_MAX_INDEX 31 #define GLHMC_VFPEPBLCNT_FPMPEPBLCNT_S 0 #define GLHMC_VFPEPBLCNT_FPMPEPBLCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPEQ1BASE(_i) (0x0052D200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEQ1BASE_MAX_INDEX 31 #define GLHMC_VFPEQ1BASE_FPMPEQ1BASE_S 0 #define GLHMC_VFPEQ1BASE_FPMPEQ1BASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPEQ1CNT(_i) (0x0052D300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEQ1CNT_MAX_INDEX 31 #define GLHMC_VFPEQ1CNT_FPMPEQ1CNT_S 0 #define GLHMC_VFPEQ1CNT_FPMPEQ1CNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPEQ1FLBASE(_i) (0x0052D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 #define GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_S 0 #define GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPEQPBASE(_i) (0x0052C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEQPBASE_MAX_INDEX 31 #define GLHMC_VFPEQPBASE_FPMPEQPBASE_S 0 #define GLHMC_VFPEQPBASE_FPMPEQPBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPEQPCNT(_i) (0x0052C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEQPCNT_MAX_INDEX 31 #define GLHMC_VFPEQPCNT_FPMPEQPCNT_S 0 #define GLHMC_VFPEQPCNT_FPMPEQPCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPERRFBASE(_i) (0x0052E800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPERRFBASE_MAX_INDEX 31 #define GLHMC_VFPERRFBASE_GLHMC_PERRFBASE_S 0 #define GLHMC_VFPERRFBASE_GLHMC_PERRFBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFPERRFCNT(_i) (0x0052E900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPERRFCNT_MAX_INDEX 31 #define GLHMC_VFPERRFCNT_GLHMC_PERRFCNT_S 0 #define GLHMC_VFPERRFCNT_GLHMC_PERRFCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFPERRFFLBASE(_i) (0x0052EA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPERRFFLBASE_MAX_INDEX 31 #define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_S 0 #define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFPETIMERBASE(_i) (0x0052DA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPETIMERBASE_MAX_INDEX 31 #define GLHMC_VFPETIMERBASE_FPMPETIMERBASE_S 0 #define GLHMC_VFPETIMERBASE_FPMPETIMERBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPETIMERCNT(_i) (0x0052DB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPETIMERCNT_MAX_INDEX 31 #define GLHMC_VFPETIMERCNT_FPMPETIMERCNT_S 0 #define GLHMC_VFPETIMERCNT_FPMPETIMERCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPEXFBASE(_i) (0x0052CE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEXFBASE_MAX_INDEX 31 #define GLHMC_VFPEXFBASE_FPMPEXFBASE_S 0 #define GLHMC_VFPEXFBASE_FPMPEXFBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFPEXFCNT(_i) (0x0052CF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEXFCNT_MAX_INDEX 31 #define GLHMC_VFPEXFCNT_FPMPEXFCNT_S 0 #define GLHMC_VFPEXFCNT_FPMPEXFCNT_M MAKEMASK(0x1FFFFFFF, 0) #define GLHMC_VFPEXFFLBASE(_i) (0x0052D000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFPEXFFLBASE_MAX_INDEX 31 #define GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_S 0 #define GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_M MAKEMASK(0xFFFFFF, 0) #define GLHMC_VFSDDATAHIGH(_i) (0x00528200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFSDDATAHIGH_MAX_INDEX 31 #define GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_S 0 #define GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFSDDATAHIGH_FPMAT(_i) (0x00108200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFSDDATAHIGH_FPMAT_MAX_INDEX 31 #define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 #define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) #define GLHMC_VFSDDATALOW(_i) (0x00528100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFSDDATALOW_MAX_INDEX 31 #define GLHMC_VFSDDATALOW_PMSDVALID_S 0 #define GLHMC_VFSDDATALOW_PMSDVALID_M BIT(0) #define GLHMC_VFSDDATALOW_PMSDTYPE_S 1 #define GLHMC_VFSDDATALOW_PMSDTYPE_M BIT(1) #define GLHMC_VFSDDATALOW_PMSDBPCOUNT_S 2 #define GLHMC_VFSDDATALOW_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) #define GLHMC_VFSDDATALOW_PMSDDATALOW_S 12 #define GLHMC_VFSDDATALOW_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) #define GLHMC_VFSDDATALOW_FPMAT(_i) (0x00108100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFSDDATALOW_FPMAT_MAX_INDEX 31 #define GLHMC_VFSDDATALOW_FPMAT_PMSDVALID_S 0 #define GLHMC_VFSDDATALOW_FPMAT_PMSDVALID_M BIT(0) #define GLHMC_VFSDDATALOW_FPMAT_PMSDTYPE_S 1 #define GLHMC_VFSDDATALOW_FPMAT_PMSDTYPE_M BIT(1) #define GLHMC_VFSDDATALOW_FPMAT_PMSDBPCOUNT_S 2 #define GLHMC_VFSDDATALOW_FPMAT_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) #define GLHMC_VFSDDATALOW_FPMAT_PMSDDATALOW_S 12 #define GLHMC_VFSDDATALOW_FPMAT_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) #define GLHMC_VFSDPART(_i) (0x00528800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFSDPART_MAX_INDEX 31 #define GLHMC_VFSDPART_PMSDBASE_S 0 #define GLHMC_VFSDPART_PMSDBASE_M MAKEMASK(0xFFF, 0) #define GLHMC_VFSDPART_PMSDSIZE_S 16 #define GLHMC_VFSDPART_PMSDSIZE_M MAKEMASK(0x1FFF, 16) #define GLHMC_VFSDPART_FPMAT(_i) (0x00108800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLHMC_VFSDPART_FPMAT_MAX_INDEX 31 #define GLHMC_VFSDPART_FPMAT_PMSDBASE_S 0 #define GLHMC_VFSDPART_FPMAT_PMSDBASE_M MAKEMASK(0xFFF, 0) #define GLHMC_VFSDPART_FPMAT_PMSDSIZE_S 16 #define GLHMC_VFSDPART_FPMAT_PMSDSIZE_M MAKEMASK(0x1FFF, 16) #define GLMDOC_CACHESIZE 0x0051C06C /* Reset Source: CORER */ #define GLMDOC_CACHESIZE_WORD_SIZE_S 0 #define GLMDOC_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) #define GLMDOC_CACHESIZE_SETS_S 8 #define GLMDOC_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) #define GLMDOC_CACHESIZE_WAYS_S 20 #define GLMDOC_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) #define GLPBLOC0_CACHESIZE 0x00518074 /* Reset Source: CORER */ #define GLPBLOC0_CACHESIZE_WORD_SIZE_S 0 #define GLPBLOC0_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) #define GLPBLOC0_CACHESIZE_SETS_S 8 #define GLPBLOC0_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) #define GLPBLOC0_CACHESIZE_WAYS_S 20 #define GLPBLOC0_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) #define GLPBLOC1_CACHESIZE 0x0051A074 /* Reset Source: CORER */ #define GLPBLOC1_CACHESIZE_WORD_SIZE_S 0 #define GLPBLOC1_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) #define GLPBLOC1_CACHESIZE_SETS_S 8 #define GLPBLOC1_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) #define GLPBLOC1_CACHESIZE_WAYS_S 20 #define GLPBLOC1_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) #define GLPDOC_CACHESIZE 0x00530048 /* Reset Source: CORER */ #define GLPDOC_CACHESIZE_WORD_SIZE_S 0 #define GLPDOC_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) #define GLPDOC_CACHESIZE_SETS_S 8 #define GLPDOC_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) #define GLPDOC_CACHESIZE_WAYS_S 20 #define GLPDOC_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) #define GLPDOC_CACHESIZE_FPMAT 0x00110088 /* Reset Source: CORER */ #define GLPDOC_CACHESIZE_FPMAT_WORD_SIZE_S 0 #define GLPDOC_CACHESIZE_FPMAT_WORD_SIZE_M MAKEMASK(0xFF, 0) #define GLPDOC_CACHESIZE_FPMAT_SETS_S 8 #define GLPDOC_CACHESIZE_FPMAT_SETS_M MAKEMASK(0xFFF, 8) #define GLPDOC_CACHESIZE_FPMAT_WAYS_S 20 #define GLPDOC_CACHESIZE_FPMAT_WAYS_M MAKEMASK(0xF, 20) #define GLPEOC0_CACHESIZE 0x005140A8 /* Reset Source: CORER */ #define GLPEOC0_CACHESIZE_WORD_SIZE_S 0 #define GLPEOC0_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) #define GLPEOC0_CACHESIZE_SETS_S 8 #define GLPEOC0_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) #define GLPEOC0_CACHESIZE_WAYS_S 20 #define GLPEOC0_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) #define GLPEOC1_CACHESIZE 0x005160A8 /* Reset Source: CORER */ #define GLPEOC1_CACHESIZE_WORD_SIZE_S 0 #define GLPEOC1_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) #define GLPEOC1_CACHESIZE_SETS_S 8 #define GLPEOC1_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) #define GLPEOC1_CACHESIZE_WAYS_S 20 #define GLPEOC1_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) #define PFHMC_ERRORDATA 0x00520500 /* Reset Source: PFR */ #define PFHMC_ERRORDATA_HMC_ERROR_DATA_S 0 #define PFHMC_ERRORDATA_HMC_ERROR_DATA_M MAKEMASK(0x3FFFFFFF, 0) #define PFHMC_ERRORDATA_FPMAT 0x00100500 /* Reset Source: PFR */ #define PFHMC_ERRORDATA_FPMAT_HMC_ERROR_DATA_S 0 #define PFHMC_ERRORDATA_FPMAT_HMC_ERROR_DATA_M MAKEMASK(0x3FFFFFFF, 0) #define PFHMC_ERRORINFO 0x00520400 /* Reset Source: PFR */ #define PFHMC_ERRORINFO_PMF_INDEX_S 0 #define PFHMC_ERRORINFO_PMF_INDEX_M MAKEMASK(0x1F, 0) #define PFHMC_ERRORINFO_PMF_ISVF_S 7 #define PFHMC_ERRORINFO_PMF_ISVF_M BIT(7) #define PFHMC_ERRORINFO_HMC_ERROR_TYPE_S 8 #define PFHMC_ERRORINFO_HMC_ERROR_TYPE_M MAKEMASK(0xF, 8) #define PFHMC_ERRORINFO_HMC_OBJECT_TYPE_S 16 #define PFHMC_ERRORINFO_HMC_OBJECT_TYPE_M MAKEMASK(0x1F, 16) #define PFHMC_ERRORINFO_ERROR_DETECTED_S 31 #define PFHMC_ERRORINFO_ERROR_DETECTED_M BIT(31) #define PFHMC_ERRORINFO_FPMAT 0x00100400 /* Reset Source: PFR */ #define PFHMC_ERRORINFO_FPMAT_PMF_INDEX_S 0 #define PFHMC_ERRORINFO_FPMAT_PMF_INDEX_M MAKEMASK(0x1F, 0) #define PFHMC_ERRORINFO_FPMAT_PMF_ISVF_S 7 #define PFHMC_ERRORINFO_FPMAT_PMF_ISVF_M BIT(7) #define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_S 8 #define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_M MAKEMASK(0xF, 8) #define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_S 16 #define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_M MAKEMASK(0x1F, 16) #define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_S 31 #define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_M BIT(31) #define PFHMC_PDINV 0x00520300 /* Reset Source: PFR */ #define PFHMC_PDINV_PMSDIDX_S 0 #define PFHMC_PDINV_PMSDIDX_M MAKEMASK(0xFFF, 0) #define PFHMC_PDINV_PMSDPARTSEL_S 15 #define PFHMC_PDINV_PMSDPARTSEL_M BIT(15) #define PFHMC_PDINV_PMPDIDX_S 16 #define PFHMC_PDINV_PMPDIDX_M MAKEMASK(0x1FF, 16) #define PFHMC_PDINV_FPMAT 0x00100300 /* Reset Source: PFR */ #define PFHMC_PDINV_FPMAT_PMSDIDX_S 0 #define PFHMC_PDINV_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0) #define PFHMC_PDINV_FPMAT_PMSDPARTSEL_S 15 #define PFHMC_PDINV_FPMAT_PMSDPARTSEL_M BIT(15) #define PFHMC_PDINV_FPMAT_PMPDIDX_S 16 #define PFHMC_PDINV_FPMAT_PMPDIDX_M MAKEMASK(0x1FF, 16) #define PFHMC_SDCMD 0x00520000 /* Reset Source: PFR */ #define PFHMC_SDCMD_PMSDIDX_S 0 #define PFHMC_SDCMD_PMSDIDX_M MAKEMASK(0xFFF, 0) #define PFHMC_SDCMD_PMSDPARTSEL_S 15 #define PFHMC_SDCMD_PMSDPARTSEL_M BIT(15) #define PFHMC_SDCMD_PMSDWR_S 31 #define PFHMC_SDCMD_PMSDWR_M BIT(31) #define PFHMC_SDCMD_FPMAT 0x00100000 /* Reset Source: PFR */ #define PFHMC_SDCMD_FPMAT_PMSDIDX_S 0 #define PFHMC_SDCMD_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0) #define PFHMC_SDCMD_FPMAT_PMSDPARTSEL_S 15 #define PFHMC_SDCMD_FPMAT_PMSDPARTSEL_M BIT(15) #define PFHMC_SDCMD_FPMAT_PMSDWR_S 31 #define PFHMC_SDCMD_FPMAT_PMSDWR_M BIT(31) #define PFHMC_SDDATAHIGH 0x00520200 /* Reset Source: PFR */ #define PFHMC_SDDATAHIGH_PMSDDATAHIGH_S 0 #define PFHMC_SDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) #define PFHMC_SDDATAHIGH_FPMAT 0x00100200 /* Reset Source: PFR */ #define PFHMC_SDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 #define PFHMC_SDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) #define PFHMC_SDDATALOW 0x00520100 /* Reset Source: PFR */ #define PFHMC_SDDATALOW_PMSDVALID_S 0 #define PFHMC_SDDATALOW_PMSDVALID_M BIT(0) #define PFHMC_SDDATALOW_PMSDTYPE_S 1 #define PFHMC_SDDATALOW_PMSDTYPE_M BIT(1) #define PFHMC_SDDATALOW_PMSDBPCOUNT_S 2 #define PFHMC_SDDATALOW_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) #define PFHMC_SDDATALOW_PMSDDATALOW_S 12 #define PFHMC_SDDATALOW_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) #define PFHMC_SDDATALOW_FPMAT 0x00100100 /* Reset Source: PFR */ #define PFHMC_SDDATALOW_FPMAT_PMSDVALID_S 0 #define PFHMC_SDDATALOW_FPMAT_PMSDVALID_M BIT(0) #define PFHMC_SDDATALOW_FPMAT_PMSDTYPE_S 1 #define PFHMC_SDDATALOW_FPMAT_PMSDTYPE_M BIT(1) #define PFHMC_SDDATALOW_FPMAT_PMSDBPCOUNT_S 2 #define PFHMC_SDDATALOW_FPMAT_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) #define PFHMC_SDDATALOW_FPMAT_PMSDDATALOW_S 12 #define PFHMC_SDDATALOW_FPMAT_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) #define GL_DSI_REPC 0x00294208 /* Reset Source: CORER */ #define GL_DSI_REPC_NO_DESC_CNT_S 0 #define GL_DSI_REPC_NO_DESC_CNT_M MAKEMASK(0xFFFF, 0) #define GL_DSI_REPC_ERROR_CNT_S 16 #define GL_DSI_REPC_ERROR_CNT_M MAKEMASK(0xFFFF, 16) #define GL_MDCK_TDAT_TCLAN 0x000FC0DC /* Reset Source: CORER */ #define GL_MDCK_TDAT_TCLAN_WRONG_ORDER_FORMAT_DESC_S 0 #define GL_MDCK_TDAT_TCLAN_WRONG_ORDER_FORMAT_DESC_M BIT(0) #define GL_MDCK_TDAT_TCLAN_UR_S 1 #define GL_MDCK_TDAT_TCLAN_UR_M BIT(1) #define GL_MDCK_TDAT_TCLAN_TAIL_DESC_NOT_DDESC_EOP_NOP_S 2 #define GL_MDCK_TDAT_TCLAN_TAIL_DESC_NOT_DDESC_EOP_NOP_M BIT(2) #define GL_MDCK_TDAT_TCLAN_FALSE_SCHEDULING_S 3 #define GL_MDCK_TDAT_TCLAN_FALSE_SCHEDULING_M BIT(3) #define GL_MDCK_TDAT_TCLAN_TAIL_VALUE_BIGGER_THAN_RING_LEN_S 4 #define GL_MDCK_TDAT_TCLAN_TAIL_VALUE_BIGGER_THAN_RING_LEN_M BIT(4) #define GL_MDCK_TDAT_TCLAN_MORE_THAN_8_DCMDS_IN_PKT_S 5 #define GL_MDCK_TDAT_TCLAN_MORE_THAN_8_DCMDS_IN_PKT_M BIT(5) #define GL_MDCK_TDAT_TCLAN_NO_HEAD_UPDATE_IN_QUANTA_S 6 #define GL_MDCK_TDAT_TCLAN_NO_HEAD_UPDATE_IN_QUANTA_M BIT(6) #define GL_MDCK_TDAT_TCLAN_PKT_LEN_NOT_LEGAL_S 7 #define GL_MDCK_TDAT_TCLAN_PKT_LEN_NOT_LEGAL_M BIT(7) #define GL_MDCK_TDAT_TCLAN_TSO_TLEN_NOT_COHERENT_WITH_SUM_BUFS_S 8 #define GL_MDCK_TDAT_TCLAN_TSO_TLEN_NOT_COHERENT_WITH_SUM_BUFS_M BIT(8) #define GL_MDCK_TDAT_TCLAN_TSO_TAIL_REACHED_BEFORE_TLEN_END_S 9 #define GL_MDCK_TDAT_TCLAN_TSO_TAIL_REACHED_BEFORE_TLEN_END_M BIT(9) #define GL_MDCK_TDAT_TCLAN_TSO_MORE_THAN_3_HDRS_S 10 #define GL_MDCK_TDAT_TCLAN_TSO_MORE_THAN_3_HDRS_M BIT(10) #define GL_MDCK_TDAT_TCLAN_TSO_SUM_BUFFS_LT_SUM_HDRS_S 11 #define GL_MDCK_TDAT_TCLAN_TSO_SUM_BUFFS_LT_SUM_HDRS_M BIT(11) #define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_S 12 #define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_M BIT(12) #define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_S 13 #define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_M BIT(13) #define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_S 14 #define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_M BIT(14) #define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_BYTES_EXCEED_PKTLEN_X_64_S 15 #define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_BYTES_EXCEED_PKTLEN_X_64_M BIT(15) #define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_CMDS_EXCEED_S 16 #define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_CMDS_EXCEED_M BIT(16) #define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_LAST_LSO_QUANTA_S 17 #define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_LAST_LSO_QUANTA_M BIT(17) #define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_TLEN_S 18 #define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_TLEN_M BIT(18) #define GL_MDCK_TDAT_TCLAN_TSO_COMS_QUANTA_FINISHED_TOO_EARLY_S 19 #define GL_MDCK_TDAT_TCLAN_TSO_COMS_QUANTA_FINISHED_TOO_EARLY_M BIT(19) #define GL_MDCK_TDAT_TCLAN_COMS_NUM_PKTS_IN_QUANTA_S 20 #define GL_MDCK_TDAT_TCLAN_COMS_NUM_PKTS_IN_QUANTA_M BIT(20) #define GLCORE_CLKCTL_H 0x000B81E8 /* Reset Source: POR */ #define GLCORE_CLKCTL_H_UPPER_CLK_SRC_H_S 0 #define GLCORE_CLKCTL_H_UPPER_CLK_SRC_H_M MAKEMASK(0x3, 0) #define GLCORE_CLKCTL_H_LOWER_CLK_SRC_H_S 2 #define GLCORE_CLKCTL_H_LOWER_CLK_SRC_H_M MAKEMASK(0x3, 2) #define GLCORE_CLKCTL_H_PSM_CLK_SRC_H_S 4 #define GLCORE_CLKCTL_H_PSM_CLK_SRC_H_M MAKEMASK(0x3, 4) #define GLCORE_CLKCTL_H_RXCTL_CLK_SRC_H_S 6 #define GLCORE_CLKCTL_H_RXCTL_CLK_SRC_H_M MAKEMASK(0x3, 6) #define GLCORE_CLKCTL_H_UANA_CLK_SRC_H_S 8 #define GLCORE_CLKCTL_H_UANA_CLK_SRC_H_M MAKEMASK(0x7, 8) #define GLCORE_CLKCTL_L 0x000B8254 /* Reset Source: POR */ #define GLCORE_CLKCTL_L_UPPER_CLK_SRC_L_S 0 #define GLCORE_CLKCTL_L_UPPER_CLK_SRC_L_M MAKEMASK(0x3, 0) #define GLCORE_CLKCTL_L_LOWER_CLK_SRC_L_S 2 #define GLCORE_CLKCTL_L_LOWER_CLK_SRC_L_M MAKEMASK(0x3, 2) #define GLCORE_CLKCTL_L_PSM_CLK_SRC_L_S 4 #define GLCORE_CLKCTL_L_PSM_CLK_SRC_L_M MAKEMASK(0x3, 4) #define GLCORE_CLKCTL_L_RXCTL_CLK_SRC_L_S 6 #define GLCORE_CLKCTL_L_RXCTL_CLK_SRC_L_M MAKEMASK(0x3, 6) #define GLCORE_CLKCTL_L_UANA_CLK_SRC_L_S 8 #define GLCORE_CLKCTL_L_UANA_CLK_SRC_L_M MAKEMASK(0x7, 8) #define GLCORE_CLKCTL_M 0x000B8258 /* Reset Source: POR */ #define GLCORE_CLKCTL_M_UPPER_CLK_SRC_M_S 0 #define GLCORE_CLKCTL_M_UPPER_CLK_SRC_M_M MAKEMASK(0x3, 0) #define GLCORE_CLKCTL_M_LOWER_CLK_SRC_M_S 2 #define GLCORE_CLKCTL_M_LOWER_CLK_SRC_M_M MAKEMASK(0x3, 2) #define GLCORE_CLKCTL_M_PSM_CLK_SRC_M_S 4 #define GLCORE_CLKCTL_M_PSM_CLK_SRC_M_M MAKEMASK(0x3, 4) #define GLCORE_CLKCTL_M_RXCTL_CLK_SRC_M_S 6 #define GLCORE_CLKCTL_M_RXCTL_CLK_SRC_M_M MAKEMASK(0x3, 6) #define GLCORE_CLKCTL_M_UANA_CLK_SRC_M_S 8 #define GLCORE_CLKCTL_M_UANA_CLK_SRC_M_M MAKEMASK(0x7, 8) #define GLFOC_CACHESIZE 0x000AA074 /* Reset Source: CORER */ #define GLFOC_CACHESIZE_WORD_SIZE_S 0 #define GLFOC_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) #define GLFOC_CACHESIZE_SETS_S 8 #define GLFOC_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) #define GLFOC_CACHESIZE_WAYS_S 20 #define GLFOC_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) #define GLMAC_CLKSTAT 0x000B8210 /* Reset Source: POR */ #define GLMAC_CLKSTAT_P0_CLK_SPEED_S 0 #define GLMAC_CLKSTAT_P0_CLK_SPEED_M MAKEMASK(0xF, 0) #define GLMAC_CLKSTAT_P1_CLK_SPEED_S 4 #define GLMAC_CLKSTAT_P1_CLK_SPEED_M MAKEMASK(0xF, 4) #define GLMAC_CLKSTAT_P2_CLK_SPEED_S 8 #define GLMAC_CLKSTAT_P2_CLK_SPEED_M MAKEMASK(0xF, 8) #define GLMAC_CLKSTAT_P3_CLK_SPEED_S 12 #define GLMAC_CLKSTAT_P3_CLK_SPEED_M MAKEMASK(0xF, 12) #define GLMAC_CLKSTAT_P4_CLK_SPEED_S 16 #define GLMAC_CLKSTAT_P4_CLK_SPEED_M MAKEMASK(0xF, 16) #define GLMAC_CLKSTAT_P5_CLK_SPEED_S 20 #define GLMAC_CLKSTAT_P5_CLK_SPEED_M MAKEMASK(0xF, 20) #define GLMAC_CLKSTAT_P6_CLK_SPEED_S 24 #define GLMAC_CLKSTAT_P6_CLK_SPEED_M MAKEMASK(0xF, 24) #define GLMAC_CLKSTAT_P7_CLK_SPEED_S 28 #define GLMAC_CLKSTAT_P7_CLK_SPEED_M MAKEMASK(0xF, 28) #define GLTPB_100G_MAC_FC_THRESH 0x00099510 /* Reset Source: CORER */ #define GLTPB_100G_MAC_FC_THRESH_PORT0_FC_THRESH_S 0 #define GLTPB_100G_MAC_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0) #define GLTPB_100G_MAC_FC_THRESH_PORT1_FC_THRESH_S 16 #define GLTPB_100G_MAC_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16) #define E800_GLTPB_100G_RPB_FC_THRESH 0x0009963C /* Reset Source: CORER */ #define E800_GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_S 0 #define E800_GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0) #define E800_GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_S 16 #define E800_GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16) #define GLTPB_PACING_10G 0x000994E4 /* Reset Source: CORER */ #define GLTPB_PACING_10G_N_S 0 #define GLTPB_PACING_10G_N_M MAKEMASK(0xFF, 0) #define GLTPB_PACING_10G_K_S 8 #define GLTPB_PACING_10G_K_M MAKEMASK(0xFF, 8) #define GLTPB_PACING_10G_S_S 16 #define GLTPB_PACING_10G_S_M MAKEMASK(0x1FF, 16) #define GLTPB_PACING_25G 0x000994E0 /* Reset Source: CORER */ #define GLTPB_PACING_25G_N_S 0 #define GLTPB_PACING_25G_N_M MAKEMASK(0xFF, 0) #define GLTPB_PACING_25G_K_S 8 #define GLTPB_PACING_25G_K_M MAKEMASK(0xFF, 8) #define GLTPB_PACING_25G_S_S 16 #define GLTPB_PACING_25G_S_M MAKEMASK(0x1FF, 16) #define GLTPB_PORT_PACING_SPEED 0x000994E8 /* Reset Source: CORER */ #define GLTPB_PORT_PACING_SPEED_PORT0_SPEED_S 0 #define GLTPB_PORT_PACING_SPEED_PORT0_SPEED_M BIT(0) #define GLTPB_PORT_PACING_SPEED_PORT1_SPEED_S 1 #define GLTPB_PORT_PACING_SPEED_PORT1_SPEED_M BIT(1) #define GLTPB_PORT_PACING_SPEED_PORT2_SPEED_S 2 #define GLTPB_PORT_PACING_SPEED_PORT2_SPEED_M BIT(2) #define GLTPB_PORT_PACING_SPEED_PORT3_SPEED_S 3 #define GLTPB_PORT_PACING_SPEED_PORT3_SPEED_M BIT(3) #define GLTPB_PORT_PACING_SPEED_PORT4_SPEED_S 4 #define GLTPB_PORT_PACING_SPEED_PORT4_SPEED_M BIT(4) #define GLTPB_PORT_PACING_SPEED_PORT5_SPEED_S 5 #define GLTPB_PORT_PACING_SPEED_PORT5_SPEED_M BIT(5) #define GLTPB_PORT_PACING_SPEED_PORT6_SPEED_S 6 #define GLTPB_PORT_PACING_SPEED_PORT6_SPEED_M BIT(6) #define GLTPB_PORT_PACING_SPEED_PORT7_SPEED_S 7 #define GLTPB_PORT_PACING_SPEED_PORT7_SPEED_M BIT(7) #define TPB_CFG_SCHEDULED_BC_THRESHOLD 0x00099494 /* Reset Source: CORER */ #define TPB_CFG_SCHEDULED_BC_THRESHOLD_THRESHOLD_S 0 #define TPB_CFG_SCHEDULED_BC_THRESHOLD_THRESHOLD_M MAKEMASK(0x7FFF, 0) #define GL_UFUSE_SOC 0x000A400C /* Reset Source: POR */ #define GL_UFUSE_SOC_PORT_MODE_S 0 #define GL_UFUSE_SOC_PORT_MODE_M MAKEMASK(0x3, 0) #define GL_UFUSE_SOC_BANDWIDTH_S 2 #define GL_UFUSE_SOC_BANDWIDTH_M MAKEMASK(0x3, 2) #define GL_UFUSE_SOC_PE_DISABLE_S 4 #define GL_UFUSE_SOC_PE_DISABLE_M BIT(4) #define GL_UFUSE_SOC_SWITCH_MODE_S 5 #define GL_UFUSE_SOC_SWITCH_MODE_M BIT(5) #define GL_UFUSE_SOC_CSR_PROTECTION_ENABLE_S 6 #define GL_UFUSE_SOC_CSR_PROTECTION_ENABLE_M BIT(6) #define GL_UFUSE_SOC_SERIAL_50G_S 7 #define GL_UFUSE_SOC_SERIAL_50G_M BIT(7) #define GL_UFUSE_SOC_NIC_ID_S 8 #define GL_UFUSE_SOC_NIC_ID_M BIT(8) #define GL_UFUSE_SOC_BLOCK_BME_TO_FW_S 9 #define GL_UFUSE_SOC_BLOCK_BME_TO_FW_M BIT(9) #define GL_UFUSE_SOC_SOC_TYPE_S 10 #define GL_UFUSE_SOC_SOC_TYPE_M BIT(10) #define GL_UFUSE_SOC_BTS_MODE_S 11 #define GL_UFUSE_SOC_BTS_MODE_M BIT(11) #define E800_GL_UFUSE_SOC_SPARE_FUSES_S 12 #define E800_GL_UFUSE_SOC_SPARE_FUSES_M MAKEMASK(0xF, 12) #define EMPINT_GPIO_ENA 0x000880C0 /* Reset Source: POR */ #define EMPINT_GPIO_ENA_GPIO0_ENA_S 0 #define EMPINT_GPIO_ENA_GPIO0_ENA_M BIT(0) #define EMPINT_GPIO_ENA_GPIO1_ENA_S 1 #define EMPINT_GPIO_ENA_GPIO1_ENA_M BIT(1) #define EMPINT_GPIO_ENA_GPIO2_ENA_S 2 #define EMPINT_GPIO_ENA_GPIO2_ENA_M BIT(2) #define EMPINT_GPIO_ENA_GPIO3_ENA_S 3 #define EMPINT_GPIO_ENA_GPIO3_ENA_M BIT(3) #define EMPINT_GPIO_ENA_GPIO4_ENA_S 4 #define EMPINT_GPIO_ENA_GPIO4_ENA_M BIT(4) #define EMPINT_GPIO_ENA_GPIO5_ENA_S 5 #define EMPINT_GPIO_ENA_GPIO5_ENA_M BIT(5) #define EMPINT_GPIO_ENA_GPIO6_ENA_S 6 #define EMPINT_GPIO_ENA_GPIO6_ENA_M BIT(6) #define GLGEN_MAC_LINK_TOPO 0x000B81DC /* Reset Source: GLOBR */ #define GLGEN_MAC_LINK_TOPO_LINK_TOPO_S 0 #define GLGEN_MAC_LINK_TOPO_LINK_TOPO_M MAKEMASK(0x3, 0) #define GLINT_CEQCTL(_INT) (0x0015C000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define GLINT_CEQCTL_MAX_INDEX 2047 #define GLINT_CEQCTL_MSIX_INDX_S 0 #define GLINT_CEQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define GLINT_CEQCTL_ITR_INDX_S 11 #define GLINT_CEQCTL_ITR_INDX_M MAKEMASK(0x3, 11) #define GLINT_CEQCTL_CAUSE_ENA_S 30 #define GLINT_CEQCTL_CAUSE_ENA_M BIT(30) #define GLINT_CEQCTL_INTEVENT_S 31 #define GLINT_CEQCTL_INTEVENT_M BIT(31) #define GLINT_CTL 0x0016CC54 /* Reset Source: CORER */ #define GLINT_CTL_DIS_AUTOMASK_S 0 #define GLINT_CTL_DIS_AUTOMASK_M BIT(0) #define GLINT_CTL_RSVD_S 1 #define GLINT_CTL_RSVD_M MAKEMASK(0x7FFF, 1) #define GLINT_CTL_ITR_GRAN_200_S 16 #define GLINT_CTL_ITR_GRAN_200_M MAKEMASK(0xF, 16) #define GLINT_CTL_ITR_GRAN_100_S 20 #define GLINT_CTL_ITR_GRAN_100_M MAKEMASK(0xF, 20) #define GLINT_CTL_ITR_GRAN_50_S 24 #define GLINT_CTL_ITR_GRAN_50_M MAKEMASK(0xF, 24) #define GLINT_CTL_ITR_GRAN_25_S 28 #define GLINT_CTL_ITR_GRAN_25_M MAKEMASK(0xF, 28) #define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define GLINT_DYN_CTL_MAX_INDEX 2047 #define GLINT_DYN_CTL_INTENA_S 0 #define GLINT_DYN_CTL_INTENA_M BIT(0) #define GLINT_DYN_CTL_CLEARPBA_S 1 #define GLINT_DYN_CTL_CLEARPBA_M BIT(1) #define GLINT_DYN_CTL_SWINT_TRIG_S 2 #define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2) #define GLINT_DYN_CTL_ITR_INDX_S 3 #define GLINT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, 3) #define GLINT_DYN_CTL_INTERVAL_S 5 #define GLINT_DYN_CTL_INTERVAL_M MAKEMASK(0xFFF, 5) #define GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24 #define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24) #define GLINT_DYN_CTL_SW_ITR_INDX_S 25 #define GLINT_DYN_CTL_SW_ITR_INDX_M MAKEMASK(0x3, 25) #define GLINT_DYN_CTL_WB_ON_ITR_S 30 #define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30) #define GLINT_DYN_CTL_INTENA_MSK_S 31 #define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) #define GLINT_FW_TOOL_CTL 0x0016C840 /* Reset Source: CORER */ #define GLINT_FW_TOOL_CTL_MSIX_INDX_S 0 #define GLINT_FW_TOOL_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define GLINT_FW_TOOL_CTL_ITR_INDX_S 11 #define GLINT_FW_TOOL_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define GLINT_FW_TOOL_CTL_CAUSE_ENA_S 30 #define GLINT_FW_TOOL_CTL_CAUSE_ENA_M BIT(30) #define GLINT_FW_TOOL_CTL_INTEVENT_S 31 #define GLINT_FW_TOOL_CTL_INTEVENT_M BIT(31) #define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) /* _i=0...2, _INT=0...2047 */ /* Reset Source: CORER */ #define GLINT_ITR_MAX_INDEX 2 #define GLINT_ITR_INTERVAL_S 0 #define GLINT_ITR_INTERVAL_M MAKEMASK(0xFFF, 0) #define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define GLINT_RATE_MAX_INDEX 2047 #define GLINT_RATE_INTERVAL_S 0 #define GLINT_RATE_INTERVAL_M MAKEMASK(0x3F, 0) #define GLINT_RATE_INTRL_ENA_S 6 #define GLINT_RATE_INTRL_ENA_M BIT(6) #define GLINT_TSYN_PFMSTR(_i) (0x0016CCC0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLINT_TSYN_PFMSTR_MAX_INDEX 1 #define GLINT_TSYN_PFMSTR_PF_MASTER_S 0 #define GLINT_TSYN_PFMSTR_PF_MASTER_M MAKEMASK(0x7, 0) #define GLINT_TSYN_PHY 0x0016CC50 /* Reset Source: CORER */ #define GLINT_TSYN_PHY_PHY_INDX_S 0 #define GLINT_TSYN_PHY_PHY_INDX_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLINT_TSYN_PHY_PHY_INDX_M : E800_GLINT_TSYN_PHY_PHY_INDX_M) #define E800_GLINT_TSYN_PHY_PHY_INDX_M MAKEMASK(0x1F, 0) #define E830_GLINT_TSYN_PHY_PHY_INDX_M MAKEMASK(0xFF, 0) #define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define GLINT_VECT2FUNC_MAX_INDEX 2047 #define GLINT_VECT2FUNC_VF_NUM_S 0 #define GLINT_VECT2FUNC_VF_NUM_M MAKEMASK(0xFF, 0) #define GLINT_VECT2FUNC_PF_NUM_S 12 #define GLINT_VECT2FUNC_PF_NUM_M MAKEMASK(0x7, 12) #define GLINT_VECT2FUNC_IS_PF_S 16 #define GLINT_VECT2FUNC_IS_PF_M BIT(16) #define PF0INT_FW_HLP_CTL 0x0016C844 /* Reset Source: CORER */ #define PF0INT_FW_HLP_CTL_MSIX_INDX_S 0 #define PF0INT_FW_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PF0INT_FW_HLP_CTL_ITR_INDX_S 11 #define PF0INT_FW_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PF0INT_FW_HLP_CTL_CAUSE_ENA_S 30 #define PF0INT_FW_HLP_CTL_CAUSE_ENA_M BIT(30) #define PF0INT_FW_HLP_CTL_INTEVENT_S 31 #define PF0INT_FW_HLP_CTL_INTEVENT_M BIT(31) #define PF0INT_FW_PSM_CTL 0x0016C848 /* Reset Source: CORER */ #define PF0INT_FW_PSM_CTL_MSIX_INDX_S 0 #define PF0INT_FW_PSM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PF0INT_FW_PSM_CTL_ITR_INDX_S 11 #define PF0INT_FW_PSM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PF0INT_FW_PSM_CTL_CAUSE_ENA_S 30 #define PF0INT_FW_PSM_CTL_CAUSE_ENA_M BIT(30) #define PF0INT_FW_PSM_CTL_INTEVENT_S 31 #define PF0INT_FW_PSM_CTL_INTEVENT_M BIT(31) #define PF0INT_MBX_CPM_CTL 0x0016B2C0 /* Reset Source: CORER */ #define PF0INT_MBX_CPM_CTL_MSIX_INDX_S 0 #define PF0INT_MBX_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PF0INT_MBX_CPM_CTL_ITR_INDX_S 11 #define PF0INT_MBX_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PF0INT_MBX_CPM_CTL_CAUSE_ENA_S 30 #define PF0INT_MBX_CPM_CTL_CAUSE_ENA_M BIT(30) #define PF0INT_MBX_CPM_CTL_INTEVENT_S 31 #define PF0INT_MBX_CPM_CTL_INTEVENT_M BIT(31) #define PF0INT_MBX_HLP_CTL 0x0016B2C4 /* Reset Source: CORER */ #define PF0INT_MBX_HLP_CTL_MSIX_INDX_S 0 #define PF0INT_MBX_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PF0INT_MBX_HLP_CTL_ITR_INDX_S 11 #define PF0INT_MBX_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PF0INT_MBX_HLP_CTL_CAUSE_ENA_S 30 #define PF0INT_MBX_HLP_CTL_CAUSE_ENA_M BIT(30) #define PF0INT_MBX_HLP_CTL_INTEVENT_S 31 #define PF0INT_MBX_HLP_CTL_INTEVENT_M BIT(31) #define PF0INT_MBX_PSM_CTL 0x0016B2C8 /* Reset Source: CORER */ #define PF0INT_MBX_PSM_CTL_MSIX_INDX_S 0 #define PF0INT_MBX_PSM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PF0INT_MBX_PSM_CTL_ITR_INDX_S 11 #define PF0INT_MBX_PSM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PF0INT_MBX_PSM_CTL_CAUSE_ENA_S 30 #define PF0INT_MBX_PSM_CTL_CAUSE_ENA_M BIT(30) #define PF0INT_MBX_PSM_CTL_INTEVENT_S 31 #define PF0INT_MBX_PSM_CTL_INTEVENT_M BIT(31) #define PF0INT_OICR_CPM 0x0016CC40 /* Reset Source: CORER */ #define PF0INT_OICR_CPM_INTEVENT_S 0 #define PF0INT_OICR_CPM_INTEVENT_M BIT(0) #define PF0INT_OICR_CPM_QUEUE_S 1 #define PF0INT_OICR_CPM_QUEUE_M BIT(1) #define PF0INT_OICR_CPM_RSV1_S 2 #define PF0INT_OICR_CPM_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_CPM_RSV1_M : E800_PF0INT_OICR_CPM_RSV1_M) #define E800_PF0INT_OICR_CPM_RSV1_M MAKEMASK(0xFF, 2) #define E830_PF0INT_OICR_CPM_RSV1_M MAKEMASK(0x3F, 2) #define E800_PF0INT_OICR_CPM_HH_COMP_S 10 #define E800_PF0INT_OICR_CPM_HH_COMP_M BIT(10) #define PF0INT_OICR_CPM_TSYN_TX_S 11 #define PF0INT_OICR_CPM_TSYN_TX_M BIT(11) #define PF0INT_OICR_CPM_TSYN_EVNT_S 12 #define PF0INT_OICR_CPM_TSYN_EVNT_M BIT(12) #define PF0INT_OICR_CPM_TSYN_TGT_S 13 #define PF0INT_OICR_CPM_TSYN_TGT_M BIT(13) #define PF0INT_OICR_CPM_HLP_RDY_S 14 #define PF0INT_OICR_CPM_HLP_RDY_M BIT(14) #define PF0INT_OICR_CPM_CPM_RDY_S 15 #define PF0INT_OICR_CPM_CPM_RDY_M BIT(15) #define PF0INT_OICR_CPM_ECC_ERR_S 16 #define PF0INT_OICR_CPM_ECC_ERR_M BIT(16) #define PF0INT_OICR_CPM_RSV2_S 17 #define PF0INT_OICR_CPM_RSV2_M MAKEMASK(0x3, 17) #define PF0INT_OICR_CPM_MAL_DETECT_S 19 #define PF0INT_OICR_CPM_MAL_DETECT_M BIT(19) #define PF0INT_OICR_CPM_GRST_S 20 #define PF0INT_OICR_CPM_GRST_M BIT(20) #define PF0INT_OICR_CPM_PCI_EXCEPTION_S 21 #define PF0INT_OICR_CPM_PCI_EXCEPTION_M BIT(21) #define PF0INT_OICR_CPM_GPIO_S 22 #define PF0INT_OICR_CPM_GPIO_M BIT(22) #define PF0INT_OICR_CPM_RSV3_S 23 #define PF0INT_OICR_CPM_RSV3_M BIT(23) #define PF0INT_OICR_CPM_STORM_DETECT_S 24 #define PF0INT_OICR_CPM_STORM_DETECT_M BIT(24) #define PF0INT_OICR_CPM_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_CPM_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_CPM_HMC_ERR_S 26 #define PF0INT_OICR_CPM_HMC_ERR_M BIT(26) #define PF0INT_OICR_CPM_PE_PUSH_S 27 #define PF0INT_OICR_CPM_PE_PUSH_M BIT(27) #define PF0INT_OICR_CPM_PE_CRITERR_S 28 #define PF0INT_OICR_CPM_PE_CRITERR_M BIT(28) #define PF0INT_OICR_CPM_VFLR_S 29 #define PF0INT_OICR_CPM_VFLR_M BIT(29) #define PF0INT_OICR_CPM_XLR_HW_DONE_S 30 #define PF0INT_OICR_CPM_XLR_HW_DONE_M BIT(30) #define PF0INT_OICR_CPM_SWINT_S 31 #define PF0INT_OICR_CPM_SWINT_M BIT(31) #define PF0INT_OICR_CTL_CPM 0x0016CC48 /* Reset Source: CORER */ #define PF0INT_OICR_CTL_CPM_MSIX_INDX_S 0 #define PF0INT_OICR_CTL_CPM_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PF0INT_OICR_CTL_CPM_ITR_INDX_S 11 #define PF0INT_OICR_CTL_CPM_ITR_INDX_M MAKEMASK(0x3, 11) #define PF0INT_OICR_CTL_CPM_CAUSE_ENA_S 30 #define PF0INT_OICR_CTL_CPM_CAUSE_ENA_M BIT(30) #define PF0INT_OICR_CTL_CPM_INTEVENT_S 31 #define PF0INT_OICR_CTL_CPM_INTEVENT_M BIT(31) #define PF0INT_OICR_CTL_HLP 0x0016CC5C /* Reset Source: CORER */ #define PF0INT_OICR_CTL_HLP_MSIX_INDX_S 0 #define PF0INT_OICR_CTL_HLP_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PF0INT_OICR_CTL_HLP_ITR_INDX_S 11 #define PF0INT_OICR_CTL_HLP_ITR_INDX_M MAKEMASK(0x3, 11) #define PF0INT_OICR_CTL_HLP_CAUSE_ENA_S 30 #define PF0INT_OICR_CTL_HLP_CAUSE_ENA_M BIT(30) #define PF0INT_OICR_CTL_HLP_INTEVENT_S 31 #define PF0INT_OICR_CTL_HLP_INTEVENT_M BIT(31) #define PF0INT_OICR_CTL_PSM 0x0016CC64 /* Reset Source: CORER */ #define PF0INT_OICR_CTL_PSM_MSIX_INDX_S 0 #define PF0INT_OICR_CTL_PSM_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PF0INT_OICR_CTL_PSM_ITR_INDX_S 11 #define PF0INT_OICR_CTL_PSM_ITR_INDX_M MAKEMASK(0x3, 11) #define PF0INT_OICR_CTL_PSM_CAUSE_ENA_S 30 #define PF0INT_OICR_CTL_PSM_CAUSE_ENA_M BIT(30) #define PF0INT_OICR_CTL_PSM_INTEVENT_S 31 #define PF0INT_OICR_CTL_PSM_INTEVENT_M BIT(31) #define PF0INT_OICR_ENA_CPM 0x0016CC60 /* Reset Source: CORER */ #define PF0INT_OICR_ENA_CPM_RSV0_S 0 #define PF0INT_OICR_ENA_CPM_RSV0_M BIT(0) #define PF0INT_OICR_ENA_CPM_INT_ENA_S 1 #define PF0INT_OICR_ENA_CPM_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) #define PF0INT_OICR_ENA_HLP 0x0016CC4C /* Reset Source: CORER */ #define PF0INT_OICR_ENA_HLP_RSV0_S 0 #define PF0INT_OICR_ENA_HLP_RSV0_M BIT(0) #define PF0INT_OICR_ENA_HLP_INT_ENA_S 1 #define PF0INT_OICR_ENA_HLP_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) #define PF0INT_OICR_ENA_PSM 0x0016CC58 /* Reset Source: CORER */ #define PF0INT_OICR_ENA_PSM_RSV0_S 0 #define PF0INT_OICR_ENA_PSM_RSV0_M BIT(0) #define PF0INT_OICR_ENA_PSM_INT_ENA_S 1 #define PF0INT_OICR_ENA_PSM_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) #define PF0INT_OICR_HLP 0x0016CC68 /* Reset Source: CORER */ #define PF0INT_OICR_HLP_INTEVENT_S 0 #define PF0INT_OICR_HLP_INTEVENT_M BIT(0) #define PF0INT_OICR_HLP_QUEUE_S 1 #define PF0INT_OICR_HLP_QUEUE_M BIT(1) #define PF0INT_OICR_HLP_RSV1_S 2 #define PF0INT_OICR_HLP_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_HLP_RSV1_M : E800_PF0INT_OICR_HLP_RSV1_M) #define E800_PF0INT_OICR_HLP_RSV1_M MAKEMASK(0xFF, 2) #define E830_PF0INT_OICR_HLP_RSV1_M MAKEMASK(0x3F, 2) #define E800_PF0INT_OICR_HLP_HH_COMP_S 10 #define E800_PF0INT_OICR_HLP_HH_COMP_M BIT(10) #define PF0INT_OICR_HLP_TSYN_TX_S 11 #define PF0INT_OICR_HLP_TSYN_TX_M BIT(11) #define PF0INT_OICR_HLP_TSYN_EVNT_S 12 #define PF0INT_OICR_HLP_TSYN_EVNT_M BIT(12) #define PF0INT_OICR_HLP_TSYN_TGT_S 13 #define PF0INT_OICR_HLP_TSYN_TGT_M BIT(13) #define PF0INT_OICR_HLP_HLP_RDY_S 14 #define PF0INT_OICR_HLP_HLP_RDY_M BIT(14) #define PF0INT_OICR_HLP_CPM_RDY_S 15 #define PF0INT_OICR_HLP_CPM_RDY_M BIT(15) #define PF0INT_OICR_HLP_ECC_ERR_S 16 #define PF0INT_OICR_HLP_ECC_ERR_M BIT(16) #define PF0INT_OICR_HLP_RSV2_S 17 #define PF0INT_OICR_HLP_RSV2_M MAKEMASK(0x3, 17) #define PF0INT_OICR_HLP_MAL_DETECT_S 19 #define PF0INT_OICR_HLP_MAL_DETECT_M BIT(19) #define PF0INT_OICR_HLP_GRST_S 20 #define PF0INT_OICR_HLP_GRST_M BIT(20) #define PF0INT_OICR_HLP_PCI_EXCEPTION_S 21 #define PF0INT_OICR_HLP_PCI_EXCEPTION_M BIT(21) #define PF0INT_OICR_HLP_GPIO_S 22 #define PF0INT_OICR_HLP_GPIO_M BIT(22) #define PF0INT_OICR_HLP_RSV3_S 23 #define PF0INT_OICR_HLP_RSV3_M BIT(23) #define PF0INT_OICR_HLP_STORM_DETECT_S 24 #define PF0INT_OICR_HLP_STORM_DETECT_M BIT(24) #define PF0INT_OICR_HLP_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_HLP_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_HLP_HMC_ERR_S 26 #define PF0INT_OICR_HLP_HMC_ERR_M BIT(26) #define PF0INT_OICR_HLP_PE_PUSH_S 27 #define PF0INT_OICR_HLP_PE_PUSH_M BIT(27) #define PF0INT_OICR_HLP_PE_CRITERR_S 28 #define PF0INT_OICR_HLP_PE_CRITERR_M BIT(28) #define PF0INT_OICR_HLP_VFLR_S 29 #define PF0INT_OICR_HLP_VFLR_M BIT(29) #define PF0INT_OICR_HLP_XLR_HW_DONE_S 30 #define PF0INT_OICR_HLP_XLR_HW_DONE_M BIT(30) #define PF0INT_OICR_HLP_SWINT_S 31 #define PF0INT_OICR_HLP_SWINT_M BIT(31) #define PF0INT_OICR_PSM 0x0016CC44 /* Reset Source: CORER */ #define PF0INT_OICR_PSM_INTEVENT_S 0 #define PF0INT_OICR_PSM_INTEVENT_M BIT(0) #define PF0INT_OICR_PSM_QUEUE_S 1 #define PF0INT_OICR_PSM_QUEUE_M BIT(1) #define PF0INT_OICR_PSM_RSV1_S 2 #define PF0INT_OICR_PSM_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_PSM_RSV1_M : E800_PF0INT_OICR_PSM_RSV1_M) #define E800_PF0INT_OICR_PSM_RSV1_M MAKEMASK(0xFF, 2) #define E830_PF0INT_OICR_PSM_RSV1_M MAKEMASK(0x3F, 2) #define E800_PF0INT_OICR_PSM_HH_COMP_S 10 #define E800_PF0INT_OICR_PSM_HH_COMP_M BIT(10) #define PF0INT_OICR_PSM_TSYN_TX_S 11 #define PF0INT_OICR_PSM_TSYN_TX_M BIT(11) #define PF0INT_OICR_PSM_TSYN_EVNT_S 12 #define PF0INT_OICR_PSM_TSYN_EVNT_M BIT(12) #define PF0INT_OICR_PSM_TSYN_TGT_S 13 #define PF0INT_OICR_PSM_TSYN_TGT_M BIT(13) #define PF0INT_OICR_PSM_HLP_RDY_S 14 #define PF0INT_OICR_PSM_HLP_RDY_M BIT(14) #define PF0INT_OICR_PSM_CPM_RDY_S 15 #define PF0INT_OICR_PSM_CPM_RDY_M BIT(15) #define PF0INT_OICR_PSM_ECC_ERR_S 16 #define PF0INT_OICR_PSM_ECC_ERR_M BIT(16) #define PF0INT_OICR_PSM_RSV2_S 17 #define PF0INT_OICR_PSM_RSV2_M MAKEMASK(0x3, 17) #define PF0INT_OICR_PSM_MAL_DETECT_S 19 #define PF0INT_OICR_PSM_MAL_DETECT_M BIT(19) #define PF0INT_OICR_PSM_GRST_S 20 #define PF0INT_OICR_PSM_GRST_M BIT(20) #define PF0INT_OICR_PSM_PCI_EXCEPTION_S 21 #define PF0INT_OICR_PSM_PCI_EXCEPTION_M BIT(21) #define PF0INT_OICR_PSM_GPIO_S 22 #define PF0INT_OICR_PSM_GPIO_M BIT(22) #define PF0INT_OICR_PSM_RSV3_S 23 #define PF0INT_OICR_PSM_RSV3_M BIT(23) #define PF0INT_OICR_PSM_STORM_DETECT_S 24 #define PF0INT_OICR_PSM_STORM_DETECT_M BIT(24) #define PF0INT_OICR_PSM_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_PSM_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_PSM_HMC_ERR_S 26 #define PF0INT_OICR_PSM_HMC_ERR_M BIT(26) #define PF0INT_OICR_PSM_PE_PUSH_S 27 #define PF0INT_OICR_PSM_PE_PUSH_M BIT(27) #define PF0INT_OICR_PSM_PE_CRITERR_S 28 #define PF0INT_OICR_PSM_PE_CRITERR_M BIT(28) #define PF0INT_OICR_PSM_VFLR_S 29 #define PF0INT_OICR_PSM_VFLR_M BIT(29) #define PF0INT_OICR_PSM_XLR_HW_DONE_S 30 #define PF0INT_OICR_PSM_XLR_HW_DONE_M BIT(30) #define PF0INT_OICR_PSM_SWINT_S 31 #define PF0INT_OICR_PSM_SWINT_M BIT(31) #define PF0INT_SB_CPM_CTL 0x0016B2CC /* Reset Source: CORER */ #define PF0INT_SB_CPM_CTL_MSIX_INDX_S 0 #define PF0INT_SB_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PF0INT_SB_CPM_CTL_ITR_INDX_S 11 #define PF0INT_SB_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PF0INT_SB_CPM_CTL_CAUSE_ENA_S 30 #define PF0INT_SB_CPM_CTL_CAUSE_ENA_M BIT(30) #define PF0INT_SB_CPM_CTL_INTEVENT_S 31 #define PF0INT_SB_CPM_CTL_INTEVENT_M BIT(31) #define PF0INT_SB_HLP_CTL 0x0016B640 /* Reset Source: CORER */ #define PF0INT_SB_HLP_CTL_MSIX_INDX_S 0 #define PF0INT_SB_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PF0INT_SB_HLP_CTL_ITR_INDX_S 11 #define PF0INT_SB_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PF0INT_SB_HLP_CTL_CAUSE_ENA_S 30 #define PF0INT_SB_HLP_CTL_CAUSE_ENA_M BIT(30) #define PF0INT_SB_HLP_CTL_INTEVENT_S 31 #define PF0INT_SB_HLP_CTL_INTEVENT_M BIT(31) #define PFINT_AEQCTL 0x0016CB00 /* Reset Source: CORER */ #define PFINT_AEQCTL_MSIX_INDX_S 0 #define PFINT_AEQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PFINT_AEQCTL_ITR_INDX_S 11 #define PFINT_AEQCTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PFINT_AEQCTL_CAUSE_ENA_S 30 #define PFINT_AEQCTL_CAUSE_ENA_M BIT(30) #define PFINT_AEQCTL_INTEVENT_S 31 #define PFINT_AEQCTL_INTEVENT_M BIT(31) #define PFINT_ALLOC 0x001D2600 /* Reset Source: CORER */ #define PFINT_ALLOC_FIRST_S 0 #define PFINT_ALLOC_FIRST_M MAKEMASK(0x7FF, 0) #define PFINT_ALLOC_LAST_S 12 #define PFINT_ALLOC_LAST_M MAKEMASK(0x7FF, 12) #define PFINT_ALLOC_VALID_S 31 #define PFINT_ALLOC_VALID_M BIT(31) #define PFINT_ALLOC_PCI 0x0009D800 /* Reset Source: PCIR */ #define PFINT_ALLOC_PCI_FIRST_S 0 #define PFINT_ALLOC_PCI_FIRST_M MAKEMASK(0x7FF, 0) #define PFINT_ALLOC_PCI_LAST_S 12 #define PFINT_ALLOC_PCI_LAST_M MAKEMASK(0x7FF, 12) #define PFINT_ALLOC_PCI_VALID_S 31 #define PFINT_ALLOC_PCI_VALID_M BIT(31) #define PFINT_FW_CTL 0x0016C800 /* Reset Source: CORER */ #define PFINT_FW_CTL_MSIX_INDX_S 0 #define PFINT_FW_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PFINT_FW_CTL_ITR_INDX_S 11 #define PFINT_FW_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PFINT_FW_CTL_CAUSE_ENA_S 30 #define PFINT_FW_CTL_CAUSE_ENA_M BIT(30) #define PFINT_FW_CTL_INTEVENT_S 31 #define PFINT_FW_CTL_INTEVENT_M BIT(31) #define PFINT_GPIO_ENA 0x00088080 /* Reset Source: CORER */ #define PFINT_GPIO_ENA_GPIO0_ENA_S 0 #define PFINT_GPIO_ENA_GPIO0_ENA_M BIT(0) #define PFINT_GPIO_ENA_GPIO1_ENA_S 1 #define PFINT_GPIO_ENA_GPIO1_ENA_M BIT(1) #define PFINT_GPIO_ENA_GPIO2_ENA_S 2 #define PFINT_GPIO_ENA_GPIO2_ENA_M BIT(2) #define PFINT_GPIO_ENA_GPIO3_ENA_S 3 #define PFINT_GPIO_ENA_GPIO3_ENA_M BIT(3) #define PFINT_GPIO_ENA_GPIO4_ENA_S 4 #define PFINT_GPIO_ENA_GPIO4_ENA_M BIT(4) #define PFINT_GPIO_ENA_GPIO5_ENA_S 5 #define PFINT_GPIO_ENA_GPIO5_ENA_M BIT(5) #define PFINT_GPIO_ENA_GPIO6_ENA_S 6 #define PFINT_GPIO_ENA_GPIO6_ENA_M BIT(6) #define PFINT_MBX_CTL 0x0016B280 /* Reset Source: CORER */ #define PFINT_MBX_CTL_MSIX_INDX_S 0 #define PFINT_MBX_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PFINT_MBX_CTL_ITR_INDX_S 11 #define PFINT_MBX_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PFINT_MBX_CTL_CAUSE_ENA_S 30 #define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30) #define PFINT_MBX_CTL_INTEVENT_S 31 #define PFINT_MBX_CTL_INTEVENT_M BIT(31) #define PFINT_OICR 0x0016CA00 /* Reset Source: CORER */ #define PFINT_OICR_INTEVENT_S 0 #define PFINT_OICR_INTEVENT_M BIT(0) #define PFINT_OICR_QUEUE_S 1 #define PFINT_OICR_QUEUE_M BIT(1) #define PFINT_OICR_RSV1_S 2 #define PFINT_OICR_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFINT_OICR_RSV1_M : E800_PFINT_OICR_RSV1_M) #define E800_PFINT_OICR_RSV1_M MAKEMASK(0xFF, 2) #define E830_PFINT_OICR_RSV1_M MAKEMASK(0x3F, 2) #define E800_PFINT_OICR_HH_COMP_S 10 #define E800_PFINT_OICR_HH_COMP_M BIT(10) #define PFINT_OICR_TSYN_TX_S 11 #define PFINT_OICR_TSYN_TX_M BIT(11) #define PFINT_OICR_TSYN_EVNT_S 12 #define PFINT_OICR_TSYN_EVNT_M BIT(12) #define PFINT_OICR_TSYN_TGT_S 13 #define PFINT_OICR_TSYN_TGT_M BIT(13) #define PFINT_OICR_HLP_RDY_S 14 #define PFINT_OICR_HLP_RDY_M BIT(14) #define PFINT_OICR_CPM_RDY_S 15 #define PFINT_OICR_CPM_RDY_M BIT(15) #define PFINT_OICR_ECC_ERR_S 16 #define PFINT_OICR_ECC_ERR_M BIT(16) #define PFINT_OICR_RSV2_S 17 #define PFINT_OICR_RSV2_M MAKEMASK(0x3, 17) #define PFINT_OICR_MAL_DETECT_S 19 #define PFINT_OICR_MAL_DETECT_M BIT(19) #define PFINT_OICR_GRST_S 20 #define PFINT_OICR_GRST_M BIT(20) #define PFINT_OICR_PCI_EXCEPTION_S 21 #define PFINT_OICR_PCI_EXCEPTION_M BIT(21) #define PFINT_OICR_GPIO_S 22 #define PFINT_OICR_GPIO_M BIT(22) #define PFINT_OICR_RSV3_S 23 #define PFINT_OICR_RSV3_M BIT(23) #define PFINT_OICR_STORM_DETECT_S 24 #define PFINT_OICR_STORM_DETECT_M BIT(24) #define PFINT_OICR_LINK_STAT_CHANGE_S 25 #define PFINT_OICR_LINK_STAT_CHANGE_M BIT(25) #define PFINT_OICR_HMC_ERR_S 26 #define PFINT_OICR_HMC_ERR_M BIT(26) #define PFINT_OICR_PE_PUSH_S 27 #define PFINT_OICR_PE_PUSH_M BIT(27) #define PFINT_OICR_PE_CRITERR_S 28 #define PFINT_OICR_PE_CRITERR_M BIT(28) #define PFINT_OICR_VFLR_S 29 #define PFINT_OICR_VFLR_M BIT(29) #define PFINT_OICR_XLR_HW_DONE_S 30 #define PFINT_OICR_XLR_HW_DONE_M BIT(30) #define PFINT_OICR_SWINT_S 31 #define PFINT_OICR_SWINT_M BIT(31) #define PFINT_OICR_CTL 0x0016CA80 /* Reset Source: CORER */ #define PFINT_OICR_CTL_MSIX_INDX_S 0 #define PFINT_OICR_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PFINT_OICR_CTL_ITR_INDX_S 11 #define PFINT_OICR_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PFINT_OICR_CTL_CAUSE_ENA_S 30 #define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30) #define PFINT_OICR_CTL_INTEVENT_S 31 #define PFINT_OICR_CTL_INTEVENT_M BIT(31) #define PFINT_OICR_ENA 0x0016C900 /* Reset Source: CORER */ #define PFINT_OICR_ENA_RSV0_S 0 #define PFINT_OICR_ENA_RSV0_M BIT(0) #define PFINT_OICR_ENA_INT_ENA_S 1 #define PFINT_OICR_ENA_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) #define PFINT_SB_CTL 0x0016B600 /* Reset Source: CORER */ #define PFINT_SB_CTL_MSIX_INDX_S 0 #define PFINT_SB_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define PFINT_SB_CTL_ITR_INDX_S 11 #define PFINT_SB_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define PFINT_SB_CTL_CAUSE_ENA_S 30 #define PFINT_SB_CTL_CAUSE_ENA_M BIT(30) #define PFINT_SB_CTL_INTEVENT_S 31 #define PFINT_SB_CTL_INTEVENT_M BIT(31) #define PFINT_TSYN_MSK 0x0016C980 /* Reset Source: CORER */ #define PFINT_TSYN_MSK_PHY_INDX_S 0 #define PFINT_TSYN_MSK_PHY_INDX_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFINT_TSYN_MSK_PHY_INDX_M : E800_PFINT_TSYN_MSK_PHY_INDX_M) #define E800_PFINT_TSYN_MSK_PHY_INDX_M MAKEMASK(0x1F, 0) #define E830_PFINT_TSYN_MSK_PHY_INDX_M MAKEMASK(0xFF, 0) #define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define QINT_RQCTL_MAX_INDEX 2047 #define QINT_RQCTL_MSIX_INDX_S 0 #define QINT_RQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define QINT_RQCTL_ITR_INDX_S 11 #define QINT_RQCTL_ITR_INDX_M MAKEMASK(0x3, 11) #define QINT_RQCTL_CAUSE_ENA_S 30 #define QINT_RQCTL_CAUSE_ENA_M BIT(30) #define QINT_RQCTL_INTEVENT_S 31 #define QINT_RQCTL_INTEVENT_M BIT(31) #define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */ #define QINT_TQCTL_MAX_INDEX 16383 #define QINT_TQCTL_MSIX_INDX_S 0 #define QINT_TQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define QINT_TQCTL_ITR_INDX_S 11 #define QINT_TQCTL_ITR_INDX_M MAKEMASK(0x3, 11) #define QINT_TQCTL_CAUSE_ENA_S 30 #define QINT_TQCTL_CAUSE_ENA_M BIT(30) #define QINT_TQCTL_INTEVENT_S 31 #define QINT_TQCTL_INTEVENT_M BIT(31) #define VPINT_AEQCTL(_VF) (0x0016B800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPINT_AEQCTL_MAX_INDEX 255 #define VPINT_AEQCTL_MSIX_INDX_S 0 #define VPINT_AEQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define VPINT_AEQCTL_ITR_INDX_S 11 #define VPINT_AEQCTL_ITR_INDX_M MAKEMASK(0x3, 11) #define VPINT_AEQCTL_CAUSE_ENA_S 30 #define VPINT_AEQCTL_CAUSE_ENA_M BIT(30) #define VPINT_AEQCTL_INTEVENT_S 31 #define VPINT_AEQCTL_INTEVENT_M BIT(31) #define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPINT_ALLOC_MAX_INDEX 255 #define VPINT_ALLOC_FIRST_S 0 #define VPINT_ALLOC_FIRST_M MAKEMASK(0x7FF, 0) #define VPINT_ALLOC_LAST_S 12 #define VPINT_ALLOC_LAST_M MAKEMASK(0x7FF, 12) #define VPINT_ALLOC_VALID_S 31 #define VPINT_ALLOC_VALID_M BIT(31) #define VPINT_ALLOC_PCI(_VF) (0x0009D000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PCIR */ #define VPINT_ALLOC_PCI_MAX_INDEX 255 #define VPINT_ALLOC_PCI_FIRST_S 0 #define VPINT_ALLOC_PCI_FIRST_M MAKEMASK(0x7FF, 0) #define VPINT_ALLOC_PCI_LAST_S 12 #define VPINT_ALLOC_PCI_LAST_M MAKEMASK(0x7FF, 12) #define VPINT_ALLOC_PCI_VALID_S 31 #define VPINT_ALLOC_PCI_VALID_M BIT(31) #define VPINT_MBX_CPM_CTL(_VP128) (0x0016B000 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VPINT_MBX_CPM_CTL_MAX_INDEX 127 #define VPINT_MBX_CPM_CTL_MSIX_INDX_S 0 #define VPINT_MBX_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define VPINT_MBX_CPM_CTL_ITR_INDX_S 11 #define VPINT_MBX_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define VPINT_MBX_CPM_CTL_CAUSE_ENA_S 30 #define VPINT_MBX_CPM_CTL_CAUSE_ENA_M BIT(30) #define VPINT_MBX_CPM_CTL_INTEVENT_S 31 #define VPINT_MBX_CPM_CTL_INTEVENT_M BIT(31) #define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VPINT_MBX_CTL_MAX_INDEX 767 #define VPINT_MBX_CTL_MSIX_INDX_S 0 #define VPINT_MBX_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define VPINT_MBX_CTL_ITR_INDX_S 11 #define VPINT_MBX_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define VPINT_MBX_CTL_CAUSE_ENA_S 30 #define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30) #define VPINT_MBX_CTL_INTEVENT_S 31 #define VPINT_MBX_CTL_INTEVENT_M BIT(31) #define VPINT_MBX_HLP_CTL(_VP16) (0x0016B200 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VPINT_MBX_HLP_CTL_MAX_INDEX 15 #define VPINT_MBX_HLP_CTL_MSIX_INDX_S 0 #define VPINT_MBX_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define VPINT_MBX_HLP_CTL_ITR_INDX_S 11 #define VPINT_MBX_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define VPINT_MBX_HLP_CTL_CAUSE_ENA_S 30 #define VPINT_MBX_HLP_CTL_CAUSE_ENA_M BIT(30) #define VPINT_MBX_HLP_CTL_INTEVENT_S 31 #define VPINT_MBX_HLP_CTL_INTEVENT_M BIT(31) #define VPINT_MBX_PSM_CTL(_VP16) (0x0016B240 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VPINT_MBX_PSM_CTL_MAX_INDEX 15 #define VPINT_MBX_PSM_CTL_MSIX_INDX_S 0 #define VPINT_MBX_PSM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define VPINT_MBX_PSM_CTL_ITR_INDX_S 11 #define VPINT_MBX_PSM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define VPINT_MBX_PSM_CTL_CAUSE_ENA_S 30 #define VPINT_MBX_PSM_CTL_CAUSE_ENA_M BIT(30) #define VPINT_MBX_PSM_CTL_INTEVENT_S 31 #define VPINT_MBX_PSM_CTL_INTEVENT_M BIT(31) #define VPINT_SB_CPM_CTL(_VP128) (0x0016B400 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define VPINT_SB_CPM_CTL_MAX_INDEX 127 #define VPINT_SB_CPM_CTL_MSIX_INDX_S 0 #define VPINT_SB_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) #define VPINT_SB_CPM_CTL_ITR_INDX_S 11 #define VPINT_SB_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) #define VPINT_SB_CPM_CTL_CAUSE_ENA_S 30 #define VPINT_SB_CPM_CTL_CAUSE_ENA_M BIT(30) #define VPINT_SB_CPM_CTL_INTEVENT_S 31 #define VPINT_SB_CPM_CTL_INTEVENT_M BIT(31) #define GL_HLP_PRT_IPG_PREAMBLE_SIZE(_i) (0x00049240 + ((_i) * 4)) /* _i=0...20 */ /* Reset Source: CORER */ #define GL_HLP_PRT_IPG_PREAMBLE_SIZE_MAX_INDEX 20 #define GL_HLP_PRT_IPG_PREAMBLE_SIZE_IPG_PREAMBLE_SIZE_S 0 #define GL_HLP_PRT_IPG_PREAMBLE_SIZE_IPG_PREAMBLE_SIZE_M MAKEMASK(0xFF, 0) #define GL_TDPU_PSM_DEFAULT_RECIPE(_i) (0x00049294 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GL_TDPU_PSM_DEFAULT_RECIPE_MAX_INDEX 3 #define GL_TDPU_PSM_DEFAULT_RECIPE_ADD_IPG_S 0 #define GL_TDPU_PSM_DEFAULT_RECIPE_ADD_IPG_M BIT(0) #define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_CRC_S 1 #define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_CRC_M BIT(1) #define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_ESP_TRAILER_S 2 #define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_ESP_TRAILER_M BIT(2) #define GL_TDPU_PSM_DEFAULT_RECIPE_INCLUDE_L2_PAD_S 3 #define GL_TDPU_PSM_DEFAULT_RECIPE_INCLUDE_L2_PAD_M BIT(3) #define GL_TDPU_PSM_DEFAULT_RECIPE_DEFAULT_UPDATE_MODE_S 4 #define GL_TDPU_PSM_DEFAULT_RECIPE_DEFAULT_UPDATE_MODE_M BIT(4) #define GLLAN_PF_RECIPE(_i) (0x0029420C + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLLAN_PF_RECIPE_MAX_INDEX 7 #define GLLAN_PF_RECIPE_RECIPE_S 0 #define GLLAN_PF_RECIPE_RECIPE_M MAKEMASK(0x3, 0) #define GLLAN_RCTL_0 0x002941F8 /* Reset Source: CORER */ #define GLLAN_RCTL_0_PXE_MODE_S 0 #define GLLAN_RCTL_0_PXE_MODE_M BIT(0) #define GLLAN_RCTL_1 0x002941FC /* Reset Source: CORER */ #define GLLAN_RCTL_1_RXMAX_EXPANSION_S 12 #define GLLAN_RCTL_1_RXMAX_EXPANSION_M MAKEMASK(0xF, 12) #define GLLAN_RCTL_1_RXDRDCTL_S 17 #define GLLAN_RCTL_1_RXDRDCTL_M BIT(17) #define GLLAN_RCTL_1_RXDESCRDROEN_S 18 #define GLLAN_RCTL_1_RXDESCRDROEN_M BIT(18) #define GLLAN_RCTL_1_RXDATAWRROEN_S 19 #define GLLAN_RCTL_1_RXDATAWRROEN_M BIT(19) #define GLLAN_TSOMSK_F 0x00049308 /* Reset Source: CORER */ #define GLLAN_TSOMSK_F_TCPMSKF_S 0 #define GLLAN_TSOMSK_F_TCPMSKF_M MAKEMASK(0xFFF, 0) #define GLLAN_TSOMSK_L 0x00049310 /* Reset Source: CORER */ #define GLLAN_TSOMSK_L_TCPMSKL_S 0 #define GLLAN_TSOMSK_L_TCPMSKL_M MAKEMASK(0xFFF, 0) #define GLLAN_TSOMSK_M 0x0004930C /* Reset Source: CORER */ #define GLLAN_TSOMSK_M_TCPMSKM_S 0 #define GLLAN_TSOMSK_M_TCPMSKM_M MAKEMASK(0xFFF, 0) #define PFLAN_CP_QALLOC 0x00075700 /* Reset Source: CORER */ #define PFLAN_CP_QALLOC_FIRSTQ_S 0 #define PFLAN_CP_QALLOC_FIRSTQ_M MAKEMASK(0x1FF, 0) #define PFLAN_CP_QALLOC_LASTQ_S 16 #define PFLAN_CP_QALLOC_LASTQ_M MAKEMASK(0x1FF, 16) #define PFLAN_CP_QALLOC_VALID_S 31 #define PFLAN_CP_QALLOC_VALID_M BIT(31) #define PFLAN_DB_QALLOC 0x00075680 /* Reset Source: CORER */ #define PFLAN_DB_QALLOC_FIRSTQ_S 0 #define PFLAN_DB_QALLOC_FIRSTQ_M MAKEMASK(0xFF, 0) #define PFLAN_DB_QALLOC_LASTQ_S 16 #define PFLAN_DB_QALLOC_LASTQ_M MAKEMASK(0xFF, 16) #define PFLAN_DB_QALLOC_VALID_S 31 #define PFLAN_DB_QALLOC_VALID_M BIT(31) #define PFLAN_RX_QALLOC 0x001D2500 /* Reset Source: CORER */ #define PFLAN_RX_QALLOC_FIRSTQ_S 0 #define PFLAN_RX_QALLOC_FIRSTQ_M MAKEMASK(0x7FF, 0) #define PFLAN_RX_QALLOC_LASTQ_S 16 #define PFLAN_RX_QALLOC_LASTQ_M MAKEMASK(0x7FF, 16) #define PFLAN_RX_QALLOC_VALID_S 31 #define PFLAN_RX_QALLOC_VALID_M BIT(31) #define PFLAN_TX_QALLOC 0x001D2580 /* Reset Source: CORER */ #define PFLAN_TX_QALLOC_FIRSTQ_S 0 #define PFLAN_TX_QALLOC_FIRSTQ_M MAKEMASK(0x3FFF, 0) #define PFLAN_TX_QALLOC_LASTQ_S 16 #define PFLAN_TX_QALLOC_LASTQ_M MAKEMASK(0x3FFF, 16) #define PFLAN_TX_QALLOC_VALID_S 31 #define PFLAN_TX_QALLOC_VALID_M BIT(31) #define PRT_TDPUL2TAGSEN 0x00040BA0 /* Reset Source: CORER */ #define PRT_TDPUL2TAGSEN_ENABLE_S 0 #define PRT_TDPUL2TAGSEN_ENABLE_M MAKEMASK(0xFF, 0) #define PRT_TDPUL2TAGSEN_NONLAST_TAG_S 8 #define PRT_TDPUL2TAGSEN_NONLAST_TAG_M MAKEMASK(0xFF, 8) #define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) /* _i=0...7, _QRX=0...2047 */ /* Reset Source: CORER */ #define QRX_CONTEXT_MAX_INDEX 7 #define QRX_CONTEXT_RXQ_CONTEXT_S 0 #define QRX_CONTEXT_RXQ_CONTEXT_M MAKEMASK(0xFFFFFFFF, 0) #define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: PFR */ #define QRX_CTRL_MAX_INDEX 2047 #define QRX_CTRL_QENA_REQ_S 0 #define QRX_CTRL_QENA_REQ_M BIT(0) #define QRX_CTRL_FAST_QDIS_S 1 #define QRX_CTRL_FAST_QDIS_M BIT(1) #define QRX_CTRL_QENA_STAT_S 2 #define QRX_CTRL_QENA_STAT_M BIT(2) #define QRX_CTRL_CDE_S 3 #define QRX_CTRL_CDE_M BIT(3) #define QRX_CTRL_CDS_S 4 #define QRX_CTRL_CDS_M BIT(4) #define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define QRX_ITR_MAX_INDEX 2047 #define QRX_ITR_NO_EXPR_S 0 #define QRX_ITR_NO_EXPR_M BIT(0) #define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define QRX_TAIL_MAX_INDEX 2047 #define QRX_TAIL_TAIL_S 0 #define QRX_TAIL_TAIL_M MAKEMASK(0x1FFF, 0) #define VPDSI_RX_QTABLE(_i, _VP16) (0x00074C00 + ((_i) * 64 + (_VP16) * 4)) /* _i=0...15, _VP16=0...15 */ /* Reset Source: CORER */ #define VPDSI_RX_QTABLE_MAX_INDEX 15 #define VPDSI_RX_QTABLE_PAGE_INDEX0_S 0 #define VPDSI_RX_QTABLE_PAGE_INDEX0_M MAKEMASK(0x7F, 0) #define VPDSI_RX_QTABLE_PAGE_INDEX1_S 8 #define VPDSI_RX_QTABLE_PAGE_INDEX1_M MAKEMASK(0x7F, 8) #define VPDSI_RX_QTABLE_PAGE_INDEX2_S 16 #define VPDSI_RX_QTABLE_PAGE_INDEX2_M MAKEMASK(0x7F, 16) #define VPDSI_RX_QTABLE_PAGE_INDEX3_S 24 #define VPDSI_RX_QTABLE_PAGE_INDEX3_M MAKEMASK(0x7F, 24) #define VPDSI_TX_QTABLE(_i, _VP16) (0x001D2000 + ((_i) * 64 + (_VP16) * 4)) /* _i=0...15, _VP16=0...15 */ /* Reset Source: CORER */ #define VPDSI_TX_QTABLE_MAX_INDEX 15 #define VPDSI_TX_QTABLE_PAGE_INDEX0_S 0 #define VPDSI_TX_QTABLE_PAGE_INDEX0_M MAKEMASK(0x7F, 0) #define VPDSI_TX_QTABLE_PAGE_INDEX1_S 8 #define VPDSI_TX_QTABLE_PAGE_INDEX1_M MAKEMASK(0x7F, 8) #define VPDSI_TX_QTABLE_PAGE_INDEX2_S 16 #define VPDSI_TX_QTABLE_PAGE_INDEX2_M MAKEMASK(0x7F, 16) #define VPDSI_TX_QTABLE_PAGE_INDEX3_S 24 #define VPDSI_TX_QTABLE_PAGE_INDEX3_M MAKEMASK(0x7F, 24) #define VPLAN_DB_QTABLE(_i, _VF) (0x00070000 + ((_i) * 2048 + (_VF) * 4)) /* _i=0...3, _VF=0...255 */ /* Reset Source: CORER */ #define VPLAN_DB_QTABLE_MAX_INDEX 3 #define VPLAN_DB_QTABLE_QINDEX_S 0 #define VPLAN_DB_QTABLE_QINDEX_M MAKEMASK(0x1FF, 0) #define VPLAN_DSI_VF_MODE(_VP16) (0x002D2C00 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define VPLAN_DSI_VF_MODE_MAX_INDEX 15 #define VPLAN_DSI_VF_MODE_LAN_DSI_VF_MODE_S 0 #define VPLAN_DSI_VF_MODE_LAN_DSI_VF_MODE_M BIT(0) #define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPLAN_RX_QBASE_MAX_INDEX 255 #define VPLAN_RX_QBASE_VFFIRSTQ_S 0 #define VPLAN_RX_QBASE_VFFIRSTQ_M MAKEMASK(0x7FF, 0) #define VPLAN_RX_QBASE_VFNUMQ_S 16 #define VPLAN_RX_QBASE_VFNUMQ_M MAKEMASK(0xFF, 16) #define VPLAN_RX_QBASE_VFQTABLE_ENA_S 31 #define VPLAN_RX_QBASE_VFQTABLE_ENA_M BIT(31) #define VPLAN_RX_QTABLE(_i, _VF) (0x00060000 + ((_i) * 2048 + (_VF) * 4)) /* _i=0...15, _VF=0...255 */ /* Reset Source: CORER */ #define VPLAN_RX_QTABLE_MAX_INDEX 15 #define VPLAN_RX_QTABLE_QINDEX_S 0 #define VPLAN_RX_QTABLE_QINDEX_M MAKEMASK(0xFFF, 0) #define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPLAN_RXQ_MAPENA_MAX_INDEX 255 #define VPLAN_RXQ_MAPENA_RX_ENA_S 0 #define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0) #define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPLAN_TX_QBASE_MAX_INDEX 255 #define VPLAN_TX_QBASE_VFFIRSTQ_S 0 #define VPLAN_TX_QBASE_VFFIRSTQ_M MAKEMASK(0x3FFF, 0) #define VPLAN_TX_QBASE_VFNUMQ_S 16 #define VPLAN_TX_QBASE_VFNUMQ_M MAKEMASK(0xFF, 16) #define VPLAN_TX_QBASE_VFQTABLE_ENA_S 31 #define VPLAN_TX_QBASE_VFQTABLE_ENA_M BIT(31) #define VPLAN_TX_QTABLE(_i, _VF) (0x001C0000 + ((_i) * 2048 + (_VF) * 4)) /* _i=0...15, _VF=0...255 */ /* Reset Source: CORER */ #define VPLAN_TX_QTABLE_MAX_INDEX 15 #define VPLAN_TX_QTABLE_QINDEX_S 0 #define VPLAN_TX_QTABLE_QINDEX_M MAKEMASK(0x7FFF, 0) #define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPLAN_TXQ_MAPENA_MAX_INDEX 255 #define VPLAN_TXQ_MAPENA_TX_ENA_S 0 #define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) #define VSILAN_QBASE(_VSI) (0x0044C000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ #define VSILAN_QBASE_MAX_INDEX 767 #define VSILAN_QBASE_VSIBASE_S 0 #define VSILAN_QBASE_VSIBASE_M MAKEMASK(0x7FF, 0) #define VSILAN_QBASE_VSIQTABLE_ENA_S 11 #define VSILAN_QBASE_VSIQTABLE_ENA_M BIT(11) #define VSILAN_QTABLE(_i, _VSI) (0x00440000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...7, _VSI=0...767 */ /* Reset Source: PFR */ #define VSILAN_QTABLE_MAX_INDEX 7 #define VSILAN_QTABLE_QINDEX_0_S 0 #define VSILAN_QTABLE_QINDEX_0_M MAKEMASK(0x7FF, 0) #define VSILAN_QTABLE_QINDEX_1_S 16 #define VSILAN_QTABLE_QINDEX_1_M MAKEMASK(0x7FF, 16) #define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E31C0 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_S 0 #define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_M BIT(0) #define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E34C0 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_S 0 #define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_M BIT(0) #define E800_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E35C0 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_S 0 #define E800_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_M BIT(0) #define E800_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0 #define E800_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0) #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0 #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M MAKEMASK(0xFFFFFFFF, 0) #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0 #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M MAKEMASK(0xFFFF, 0) #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_S 0 #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0) #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3280 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_S 0 #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0) #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E32A0 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_S 0 #define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_M MAKEMASK(0xFFFF, 0) #define E800_PRTMAC_HSEC_CTL_RX_QUANTA_S 0x001E3C40 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_S 0 #define E800_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_M MAKEMASK(0xFFFF, 0) #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E31A0 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_S 0 #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0) #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_S 0 #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0) #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0 #define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0) #define E800_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E3960 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_S 0 #define E800_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0) #define E800_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E3980 /* Reset Source: GLOBR */ #define E800_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_S 0 #define E800_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_M MAKEMASK(0xFFFF, 0) #define PRTMAC_LINK_DOWN_COUNTER_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_LINK_DOWN_COUNTER : E800_PRTMAC_LINK_DOWN_COUNTER) #define E800_PRTMAC_LINK_DOWN_COUNTER 0x001E47C0 /* Reset Source: GLOBR */ #define E830_PRTMAC_LINK_DOWN_COUNTER 0x001E2460 /* Reset Source: GLOBR */ #define PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_S 0 #define PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_M MAKEMASK(0xFFFF, 0) #define PRTMAC_MD_OVRRIDE_ENABLE_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_ENABLE(_i) : E800_PRTMAC_MD_OVRRIDE_ENABLE(_i)) #define E800_PRTMAC_MD_OVRRIDE_ENABLE(_i) (0x001E3C60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */ #define E830_PRTMAC_MD_OVRRIDE_ENABLE(_i) (0x001E2500 + ((_i) * 32)) /* _i=0...1 */ /* Reset Source: GLOBR */ #define PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX : E800_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX) #define E800_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX 7 #define E830_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX 1 #define PRTMAC_MD_OVRRIDE_ENABLE_PRTMAC_MD_OVRRIDE_ENABLE_S 0 #define PRTMAC_MD_OVRRIDE_ENABLE_PRTMAC_MD_OVRRIDE_ENABLE_M MAKEMASK(0xFFFFFFFF, 0) #define PRTMAC_MD_OVRRIDE_VAL_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_VAL(_i) : E800_PRTMAC_MD_OVRRIDE_VAL(_i)) #define E800_PRTMAC_MD_OVRRIDE_VAL(_i) (0x001E3D60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */ #define E830_PRTMAC_MD_OVRRIDE_VAL(_i) (0x001E2600 + ((_i) * 32)) /* _i=0...1 */ /* Reset Source: GLOBR */ #define PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX : E800_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX) #define E800_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX 7 #define E830_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX 1 #define PRTMAC_MD_OVRRIDE_VAL_PRTMAC_MD_OVRRIDE_ENABLE_S 0 #define PRTMAC_MD_OVRRIDE_VAL_PRTMAC_MD_OVRRIDE_ENABLE_M MAKEMASK(0xFFFFFFFF, 0) #define PRTMAC_RX_CNT_MRKR 0x001E48E0 /* Reset Source: GLOBR */ #define PRTMAC_RX_CNT_MRKR_RX_CNT_MRKR_S 0 #define PRTMAC_RX_CNT_MRKR_RX_CNT_MRKR_M MAKEMASK(0xFFFF, 0) #define PRTMAC_RX_PKT_DRP_CNT_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT : E800_PRTMAC_RX_PKT_DRP_CNT) #define E800_PRTMAC_RX_PKT_DRP_CNT 0x001E3C20 /* Reset Source: GLOBR */ #define E830_PRTMAC_RX_PKT_DRP_CNT 0x001E2420 /* Reset Source: GLOBR */ #define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_S 0 #define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M : E800_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M) #define E800_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M MAKEMASK(0xFFF, 0) #define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S : E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S) #define E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S 16 #define E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S 28 #define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M : E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M) #define E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M MAKEMASK(0xF, 28) #define PRTMAC_TX_CNT_MRKR 0x001E48C0 /* Reset Source: GLOBR */ #define PRTMAC_TX_CNT_MRKR_TX_CNT_MRKR_S 0 #define PRTMAC_TX_CNT_MRKR_TX_CNT_MRKR_M MAKEMASK(0xFFFF, 0) #define PRTMAC_TX_LNK_UP_CNT_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_TX_LNK_UP_CNT : E800_PRTMAC_TX_LNK_UP_CNT) #define E800_PRTMAC_TX_LNK_UP_CNT 0x001E4840 /* Reset Source: GLOBR */ #define E830_PRTMAC_TX_LNK_UP_CNT 0x001E2480 /* Reset Source: GLOBR */ #define PRTMAC_TX_LNK_UP_CNT_TX_LINK_UP_CNT_S 0 #define PRTMAC_TX_LNK_UP_CNT_TX_LINK_UP_CNT_M MAKEMASK(0xFFFF, 0) #define GL_MDCK_CFG1_TX_PQM 0x002D2DF4 /* Reset Source: CORER */ #define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DATA_LEN_S 0 #define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DATA_LEN_M MAKEMASK(0xFF, 0) #define GL_MDCK_CFG1_TX_PQM_SSO_MAX_PKT_CNT_S 8 #define GL_MDCK_CFG1_TX_PQM_SSO_MAX_PKT_CNT_M MAKEMASK(0x3F, 8) #define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DESC_CNT_S 16 #define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DESC_CNT_M MAKEMASK(0x3F, 16) #define GL_MDCK_EN_TX_PQM 0x002D2DFC /* Reset Source: CORER */ #define GL_MDCK_EN_TX_PQM_PCI_DUMMY_COMP_S 0 #define GL_MDCK_EN_TX_PQM_PCI_DUMMY_COMP_M BIT(0) #define GL_MDCK_EN_TX_PQM_PCI_UR_COMP_S 1 #define GL_MDCK_EN_TX_PQM_PCI_UR_COMP_M BIT(1) #define GL_MDCK_EN_TX_PQM_RCV_SH_BE_LSO_S 3 #define GL_MDCK_EN_TX_PQM_RCV_SH_BE_LSO_M BIT(3) #define GL_MDCK_EN_TX_PQM_Q_FL_MNG_EPY_CH_S 4 #define GL_MDCK_EN_TX_PQM_Q_FL_MNG_EPY_CH_M BIT(4) #define GL_MDCK_EN_TX_PQM_Q_EPY_MNG_FL_CH_S 5 #define GL_MDCK_EN_TX_PQM_Q_EPY_MNG_FL_CH_M BIT(5) #define GL_MDCK_EN_TX_PQM_LSO_NUMDESCS_ZERO_S 6 #define GL_MDCK_EN_TX_PQM_LSO_NUMDESCS_ZERO_M BIT(6) #define GL_MDCK_EN_TX_PQM_LSO_LENGTH_ZERO_S 7 #define GL_MDCK_EN_TX_PQM_LSO_LENGTH_ZERO_M BIT(7) #define GL_MDCK_EN_TX_PQM_LSO_MSS_BELOW_MIN_S 8 #define GL_MDCK_EN_TX_PQM_LSO_MSS_BELOW_MIN_M BIT(8) #define GL_MDCK_EN_TX_PQM_LSO_MSS_ABOVE_MAX_S 9 #define GL_MDCK_EN_TX_PQM_LSO_MSS_ABOVE_MAX_M BIT(9) #define GL_MDCK_EN_TX_PQM_LSO_HDR_SIZE_ZERO_S 10 #define GL_MDCK_EN_TX_PQM_LSO_HDR_SIZE_ZERO_M BIT(10) #define GL_MDCK_EN_TX_PQM_RCV_CNT_BE_LSO_S 11 #define GL_MDCK_EN_TX_PQM_RCV_CNT_BE_LSO_M BIT(11) #define GL_MDCK_EN_TX_PQM_SKIP_ONE_QT_ONLY_S 12 #define GL_MDCK_EN_TX_PQM_SKIP_ONE_QT_ONLY_M BIT(12) #define GL_MDCK_EN_TX_PQM_LSO_PKTCNT_ZERO_S 13 #define GL_MDCK_EN_TX_PQM_LSO_PKTCNT_ZERO_M BIT(13) #define GL_MDCK_EN_TX_PQM_SSO_LENGTH_ZERO_S 14 #define GL_MDCK_EN_TX_PQM_SSO_LENGTH_ZERO_M BIT(14) #define GL_MDCK_EN_TX_PQM_SSO_LENGTH_EXCEED_S 15 #define GL_MDCK_EN_TX_PQM_SSO_LENGTH_EXCEED_M BIT(15) #define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_ZERO_S 16 #define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_ZERO_M BIT(16) #define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_EXCEED_S 17 #define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_EXCEED_M BIT(17) #define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_S 18 #define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_M BIT(18) #define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_S 19 #define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_M BIT(19) #define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_S 20 #define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_M BIT(20) #define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_S 21 #define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_M BIT(21) #define GL_MDCK_EN_TX_PQM_ILLEGAL_HEAD_DROP_DBL_S 22 #define GL_MDCK_EN_TX_PQM_ILLEGAL_HEAD_DROP_DBL_M BIT(22) #define GL_MDCK_EN_TX_PQM_LSO_OVER_COMMS_Q_S 23 #define GL_MDCK_EN_TX_PQM_LSO_OVER_COMMS_Q_M BIT(23) #define GL_MDCK_EN_TX_PQM_ILLEGAL_VF_QNUM_S 24 #define GL_MDCK_EN_TX_PQM_ILLEGAL_VF_QNUM_M BIT(24) #define GL_MDCK_EN_TX_PQM_QTAIL_GT_RING_LENGTH_S 25 #define GL_MDCK_EN_TX_PQM_QTAIL_GT_RING_LENGTH_M BIT(25) #define E800_GL_MDCK_EN_TX_PQM_RSVD_S 26 #define E800_GL_MDCK_EN_TX_PQM_RSVD_M MAKEMASK(0x3F, 26) #define GL_MDCK_RX 0x0029422C /* Reset Source: CORER */ #define GL_MDCK_RX_DESC_ADDR_S 0 #define GL_MDCK_RX_DESC_ADDR_M BIT(0) #define GL_MDCK_TX_TDPU 0x00049348 /* Reset Source: CORER */ #define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_S 0 #define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_M BIT(0) #define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_S 1 #define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1) #define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_S 2 #define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_M BIT(2) #define GL_MDCK_TX_TDPU_MAL_OFFSET_ITR_DIS_S 3 #define GL_MDCK_TX_TDPU_MAL_OFFSET_ITR_DIS_M BIT(3) #define GL_MDCK_TX_TDPU_MAL_CMD_ITR_DIS_S 4 #define GL_MDCK_TX_TDPU_MAL_CMD_ITR_DIS_M BIT(4) #define GL_MDCK_TX_TDPU_BIG_PKT_SIZE_ITR_DIS_S 5 #define GL_MDCK_TX_TDPU_BIG_PKT_SIZE_ITR_DIS_M BIT(5) #define GL_MDCK_TX_TDPU_L2_ACCEPT_FAIL_ITR_DIS_S 6 #define GL_MDCK_TX_TDPU_L2_ACCEPT_FAIL_ITR_DIS_M BIT(6) #define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_S 7 #define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_M BIT(7) #define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_S 8 #define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_M BIT(8) #define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_S 9 #define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_M BIT(9) #define GL_MDCK_TX_TDPU_NIC_IPSEC_ITR_DIS_S 10 #define GL_MDCK_TX_TDPU_NIC_IPSEC_ITR_DIS_M BIT(10) #define GL_MDET_RX 0x00294C00 /* Reset Source: CORER */ #define GL_MDET_RX_QNUM_S 0 #define GL_MDET_RX_QNUM_M MAKEMASK(0x7FFF, 0) #define GL_MDET_RX_VF_NUM_S 15 #define GL_MDET_RX_VF_NUM_M MAKEMASK(0xFF, 15) #define GL_MDET_RX_PF_NUM_S 23 #define GL_MDET_RX_PF_NUM_M MAKEMASK(0x7, 23) #define GL_MDET_RX_MAL_TYPE_S 26 #define GL_MDET_RX_MAL_TYPE_M MAKEMASK(0x1F, 26) #define GL_MDET_RX_VALID_S 31 #define GL_MDET_RX_VALID_M BIT(31) #define GL_MDET_TX_PQM 0x002D2E00 /* Reset Source: CORER */ #define GL_MDET_TX_PQM_PF_NUM_S 0 #define GL_MDET_TX_PQM_PF_NUM_M MAKEMASK(0x7, 0) #define GL_MDET_TX_PQM_VF_NUM_S 4 #define GL_MDET_TX_PQM_VF_NUM_M MAKEMASK(0xFF, 4) #define GL_MDET_TX_PQM_QNUM_S 12 #define GL_MDET_TX_PQM_QNUM_M MAKEMASK(0x3FFF, 12) #define GL_MDET_TX_PQM_MAL_TYPE_S 26 #define GL_MDET_TX_PQM_MAL_TYPE_M MAKEMASK(0x1F, 26) #define GL_MDET_TX_PQM_VALID_S 31 #define GL_MDET_TX_PQM_VALID_M BIT(31) #define GL_MDET_TX_TCLAN 0x000FC068 /* Reset Source: CORER */ #define GL_MDET_TX_TCLAN_QNUM_S 0 #define GL_MDET_TX_TCLAN_QNUM_M MAKEMASK(0x7FFF, 0) #define GL_MDET_TX_TCLAN_VF_NUM_S 15 #define GL_MDET_TX_TCLAN_VF_NUM_M MAKEMASK(0xFF, 15) #define GL_MDET_TX_TCLAN_PF_NUM_S 23 #define GL_MDET_TX_TCLAN_PF_NUM_M MAKEMASK(0x7, 23) #define GL_MDET_TX_TCLAN_MAL_TYPE_S 26 #define GL_MDET_TX_TCLAN_MAL_TYPE_M MAKEMASK(0x1F, 26) #define GL_MDET_TX_TCLAN_VALID_S 31 #define GL_MDET_TX_TCLAN_VALID_M BIT(31) #define GL_MDET_TX_TDPU 0x00049350 /* Reset Source: CORER */ #define GL_MDET_TX_TDPU_QNUM_S 0 #define GL_MDET_TX_TDPU_QNUM_M MAKEMASK(0x7FFF, 0) #define GL_MDET_TX_TDPU_VF_NUM_S 15 #define GL_MDET_TX_TDPU_VF_NUM_M MAKEMASK(0xFF, 15) #define GL_MDET_TX_TDPU_PF_NUM_S 23 #define GL_MDET_TX_TDPU_PF_NUM_M MAKEMASK(0x7, 23) #define GL_MDET_TX_TDPU_MAL_TYPE_S 26 #define GL_MDET_TX_TDPU_MAL_TYPE_M MAKEMASK(0x1F, 26) #define GL_MDET_TX_TDPU_VALID_S 31 #define GL_MDET_TX_TDPU_VALID_M BIT(31) #define GLRLAN_MDET 0x00294200 /* Reset Source: CORER */ #define GLRLAN_MDET_PCKT_EXTRCT_ERR_S 0 #define GLRLAN_MDET_PCKT_EXTRCT_ERR_M BIT(0) #define PF_MDET_RX 0x00294280 /* Reset Source: CORER */ #define PF_MDET_RX_VALID_S 0 #define PF_MDET_RX_VALID_M BIT(0) #define PF_MDET_TX_PQM 0x002D2C80 /* Reset Source: CORER */ #define PF_MDET_TX_PQM_VALID_S 0 #define PF_MDET_TX_PQM_VALID_M BIT(0) #define PF_MDET_TX_TCLAN 0x000FC000 /* Reset Source: CORER */ #define PF_MDET_TX_TCLAN_VALID_S 0 #define PF_MDET_TX_TCLAN_VALID_M BIT(0) #define PF_MDET_TX_TDPU 0x00040800 /* Reset Source: CORER */ #define PF_MDET_TX_TDPU_VALID_S 0 #define PF_MDET_TX_TDPU_VALID_M BIT(0) #define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VP_MDET_RX_MAX_INDEX 255 #define VP_MDET_RX_VALID_S 0 #define VP_MDET_RX_VALID_M BIT(0) #define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VP_MDET_TX_PQM_MAX_INDEX 255 #define VP_MDET_TX_PQM_VALID_S 0 #define VP_MDET_TX_PQM_VALID_M BIT(0) #define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VP_MDET_TX_TCLAN_MAX_INDEX 255 #define VP_MDET_TX_TCLAN_VALID_S 0 #define VP_MDET_TX_TCLAN_VALID_M BIT(0) #define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VP_MDET_TX_TDPU_MAX_INDEX 255 #define VP_MDET_TX_TDPU_VALID_S 0 #define VP_MDET_TX_TDPU_VALID_M BIT(0) #define GENERAL_MNG_FW_DBG_CSR(_i) (0x000B6180 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: POR */ #define GENERAL_MNG_FW_DBG_CSR_MAX_INDEX 9 #define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_S 0 #define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_M MAKEMASK(0xFFFFFFFF, 0) #define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */ #define GL_FWRESETCNT_FWRESETCNT_S 0 #define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GL_MNG_FW_RAM_STAT_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FW_RAM_STAT : E800_GL_MNG_FW_RAM_STAT) #define E800_GL_MNG_FW_RAM_STAT 0x0008309C /* Reset Source: POR */ #define E830_GL_MNG_FW_RAM_STAT 0x000830F4 /* Reset Source: POR */ #define GL_MNG_FW_RAM_STAT_FW_RAM_RST_STAT_S 0 #define GL_MNG_FW_RAM_STAT_FW_RAM_RST_STAT_M BIT(0) #define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_S 1 #define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_M BIT(1) #define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */ #define GL_MNG_FWSM_FW_MODES_S 0 #define GL_MNG_FWSM_FW_MODES_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FWSM_FW_MODES_M : E800_GL_MNG_FWSM_FW_MODES_M) #define E800_GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0) #define E830_GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x3, 0) #define GL_MNG_FWSM_RSV0_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FWSM_RSV0_S : E800_GL_MNG_FWSM_RSV0_S) #define E800_GL_MNG_FWSM_RSV0_S 3 #define E830_GL_MNG_FWSM_RSV0_S 2 #define GL_MNG_FWSM_RSV0_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FWSM_RSV0_M : E800_GL_MNG_FWSM_RSV0_M) #define E800_GL_MNG_FWSM_RSV0_M MAKEMASK(0x7F, 3) #define E830_GL_MNG_FWSM_RSV0_M MAKEMASK(0xFF, 2) #define GL_MNG_FWSM_EEP_RELOAD_IND_S 10 #define GL_MNG_FWSM_EEP_RELOAD_IND_M BIT(10) #define GL_MNG_FWSM_RSV1_S 11 #define GL_MNG_FWSM_RSV1_M MAKEMASK(0xF, 11) #define GL_MNG_FWSM_RSV2_S 15 #define GL_MNG_FWSM_RSV2_M BIT(15) #define GL_MNG_FWSM_PCIR_AL_FAILURE_S 16 #define GL_MNG_FWSM_PCIR_AL_FAILURE_M BIT(16) #define GL_MNG_FWSM_POR_AL_FAILURE_S 17 #define GL_MNG_FWSM_POR_AL_FAILURE_M BIT(17) #define GL_MNG_FWSM_RSV3_S 18 #define GL_MNG_FWSM_RSV3_M BIT(18) #define GL_MNG_FWSM_EXT_ERR_IND_S 19 #define GL_MNG_FWSM_EXT_ERR_IND_M MAKEMASK(0x3F, 19) #define GL_MNG_FWSM_RSV4_S 25 #define GL_MNG_FWSM_RSV4_M BIT(25) #define GL_MNG_FWSM_RESERVED_11_S 26 #define GL_MNG_FWSM_RESERVED_11_M MAKEMASK(0xF, 26) #define GL_MNG_FWSM_RSV5_S 30 #define GL_MNG_FWSM_RSV5_M MAKEMASK(0x3, 30) #define GL_MNG_HWARB_CTRL 0x000B6130 /* Reset Source: POR */ #define GL_MNG_HWARB_CTRL_NCSI_ARB_EN_S 0 #define GL_MNG_HWARB_CTRL_NCSI_ARB_EN_M BIT(0) #define GL_MNG_SHA_EXTEND_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND(_i) : E800_GL_MNG_SHA_EXTEND(_i)) #define E800_GL_MNG_SHA_EXTEND(_i) (0x00083120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */ #define E830_GL_MNG_SHA_EXTEND(_i) (0x00083340 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */ #define GL_MNG_SHA_EXTEND_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND_MAX_INDEX : E800_GL_MNG_SHA_EXTEND_MAX_INDEX) #define E800_GL_MNG_SHA_EXTEND_MAX_INDEX 7 #define E830_GL_MNG_SHA_EXTEND_MAX_INDEX 11 #define GL_MNG_SHA_EXTEND_GL_MNG_SHA_EXTEND_S 0 #define GL_MNG_SHA_EXTEND_GL_MNG_SHA_EXTEND_M MAKEMASK(0xFFFFFFFF, 0) #define GL_MNG_SHA_EXTEND_ROM_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND_ROM(_i) : E800_GL_MNG_SHA_EXTEND_ROM(_i)) #define E800_GL_MNG_SHA_EXTEND_ROM(_i) (0x00083160 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */ #define E830_GL_MNG_SHA_EXTEND_ROM(_i) (0x000832C0 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */ #define GL_MNG_SHA_EXTEND_ROM_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX : E800_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX) #define E800_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX 7 #define E830_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX 11 #define GL_MNG_SHA_EXTEND_ROM_GL_MNG_SHA_EXTEND_ROM_S 0 #define GL_MNG_SHA_EXTEND_ROM_GL_MNG_SHA_EXTEND_ROM_M MAKEMASK(0xFFFFFFFF, 0) #define GL_MNG_SHA_EXTEND_STATUS 0x00083148 /* Reset Source: EMPR */ #define GL_MNG_SHA_EXTEND_STATUS_STAGE_S 0 #define GL_MNG_SHA_EXTEND_STATUS_STAGE_M MAKEMASK(0x7, 0) #define GL_MNG_SHA_EXTEND_STATUS_FW_HALTED_S 30 #define GL_MNG_SHA_EXTEND_STATUS_FW_HALTED_M BIT(30) #define GL_MNG_SHA_EXTEND_STATUS_DONE_S 31 #define GL_MNG_SHA_EXTEND_STATUS_DONE_M BIT(31) #define GL_SWT_PRT2MDEF(_i) (0x00216018 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: POR */ #define GL_SWT_PRT2MDEF_MAX_INDEX 31 #define GL_SWT_PRT2MDEF_MDEFIDX_S 0 #define GL_SWT_PRT2MDEF_MDEFIDX_M MAKEMASK(0x7, 0) #define GL_SWT_PRT2MDEF_MDEFENA_S 31 #define GL_SWT_PRT2MDEF_MDEFENA_M BIT(31) #define PRT_MNG_MANC 0x00214720 /* Reset Source: POR */ #define PRT_MNG_MANC_FLOW_CONTROL_DISCARD_S 0 #define PRT_MNG_MANC_FLOW_CONTROL_DISCARD_M BIT(0) #define PRT_MNG_MANC_NCSI_DISCARD_S 1 #define PRT_MNG_MANC_NCSI_DISCARD_M BIT(1) #define PRT_MNG_MANC_RCV_TCO_EN_S 17 #define PRT_MNG_MANC_RCV_TCO_EN_M BIT(17) #define PRT_MNG_MANC_RCV_ALL_S 19 #define PRT_MNG_MANC_RCV_ALL_M BIT(19) #define PRT_MNG_MANC_FIXED_NET_TYPE_S 25 #define PRT_MNG_MANC_FIXED_NET_TYPE_M BIT(25) #define PRT_MNG_MANC_NET_TYPE_S 26 #define PRT_MNG_MANC_NET_TYPE_M BIT(26) #define PRT_MNG_MANC_EN_BMC2OS_S 28 #define PRT_MNG_MANC_EN_BMC2OS_M BIT(28) #define PRT_MNG_MANC_EN_BMC2NET_S 29 #define PRT_MNG_MANC_EN_BMC2NET_M BIT(29) #define PRT_MNG_MAVTV(_i) (0x00214780 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: POR */ #define PRT_MNG_MAVTV_MAX_INDEX 7 #define PRT_MNG_MAVTV_VID_S 0 #define PRT_MNG_MAVTV_VID_M MAKEMASK(0xFFF, 0) #define PRT_MNG_MDEF(_i) (0x00214880 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: POR */ #define PRT_MNG_MDEF_MAX_INDEX 7 #define PRT_MNG_MDEF_MAC_EXACT_AND_S 0 #define PRT_MNG_MDEF_MAC_EXACT_AND_M MAKEMASK(0xF, 0) #define PRT_MNG_MDEF_BROADCAST_AND_S 4 #define PRT_MNG_MDEF_BROADCAST_AND_M BIT(4) #define PRT_MNG_MDEF_VLAN_AND_S 5 #define PRT_MNG_MDEF_VLAN_AND_M MAKEMASK(0xFF, 5) #define PRT_MNG_MDEF_IPV4_ADDRESS_AND_S 13 #define PRT_MNG_MDEF_IPV4_ADDRESS_AND_M MAKEMASK(0xF, 13) #define PRT_MNG_MDEF_IPV6_ADDRESS_AND_S 17 #define PRT_MNG_MDEF_IPV6_ADDRESS_AND_M MAKEMASK(0xF, 17) #define PRT_MNG_MDEF_MAC_EXACT_OR_S 21 #define PRT_MNG_MDEF_MAC_EXACT_OR_M MAKEMASK(0xF, 21) #define PRT_MNG_MDEF_BROADCAST_OR_S 25 #define PRT_MNG_MDEF_BROADCAST_OR_M BIT(25) #define PRT_MNG_MDEF_MULTICAST_AND_S 26 #define PRT_MNG_MDEF_MULTICAST_AND_M BIT(26) #define PRT_MNG_MDEF_ARP_REQUEST_OR_S 27 #define PRT_MNG_MDEF_ARP_REQUEST_OR_M BIT(27) #define PRT_MNG_MDEF_ARP_RESPONSE_OR_S 28 #define PRT_MNG_MDEF_ARP_RESPONSE_OR_M BIT(28) #define PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_S 29 #define PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_M BIT(29) #define PRT_MNG_MDEF_PORT_0X298_OR_S 30 #define PRT_MNG_MDEF_PORT_0X298_OR_M BIT(30) #define PRT_MNG_MDEF_PORT_0X26F_OR_S 31 #define PRT_MNG_MDEF_PORT_0X26F_OR_M BIT(31) #define PRT_MNG_MDEF_EXT(_i) (0x00214A00 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: POR */ #define PRT_MNG_MDEF_EXT_MAX_INDEX 7 #define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_S 0 #define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_M MAKEMASK(0xF, 0) #define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_S 4 #define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_M MAKEMASK(0xF, 4) #define PRT_MNG_MDEF_EXT_FLEX_PORT_OR_S 8 #define PRT_MNG_MDEF_EXT_FLEX_PORT_OR_M MAKEMASK(0xFFFF, 8) #define PRT_MNG_MDEF_EXT_FLEX_TCO_S 24 #define PRT_MNG_MDEF_EXT_FLEX_TCO_M BIT(24) #define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_S 25 #define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_M BIT(25) #define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_S 26 #define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_M BIT(26) #define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_S 27 #define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_M BIT(27) #define PRT_MNG_MDEF_EXT_ICMP_OR_S 28 #define PRT_MNG_MDEF_EXT_ICMP_OR_M BIT(28) #define PRT_MNG_MDEF_EXT_MLD_S 29 #define PRT_MNG_MDEF_EXT_MLD_M BIT(29) #define PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_S 30 #define PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_M BIT(30) #define PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_S 31 #define PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_M BIT(31) #define PRT_MNG_MDEFVSI(_i) (0x00214980 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */ #define PRT_MNG_MDEFVSI_MAX_INDEX 3 #define PRT_MNG_MDEFVSI_MDEFVSI_2N_S 0 #define PRT_MNG_MDEFVSI_MDEFVSI_2N_M MAKEMASK(0xFFFF, 0) #define PRT_MNG_MDEFVSI_MDEFVSI_2NP1_S 16 #define PRT_MNG_MDEFVSI_MDEFVSI_2NP1_M MAKEMASK(0xFFFF, 16) #define PRT_MNG_METF(_i) (0x00214120 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */ #define PRT_MNG_METF_MAX_INDEX 3 #define PRT_MNG_METF_ETYPE_S 0 #define PRT_MNG_METF_ETYPE_M MAKEMASK(0xFFFF, 0) #define PRT_MNG_METF_POLARITY_S 30 #define PRT_MNG_METF_POLARITY_M BIT(30) #define PRT_MNG_MFUTP(_i) (0x00214320 + ((_i) * 32)) /* _i=0...15 */ /* Reset Source: POR */ #define PRT_MNG_MFUTP_MAX_INDEX 15 #define PRT_MNG_MFUTP_MFUTP_N_S 0 #define PRT_MNG_MFUTP_MFUTP_N_M MAKEMASK(0xFFFF, 0) #define PRT_MNG_MFUTP_UDP_S 16 #define PRT_MNG_MFUTP_UDP_M BIT(16) #define PRT_MNG_MFUTP_TCP_S 17 #define PRT_MNG_MFUTP_TCP_M BIT(17) #define PRT_MNG_MFUTP_SOURCE_DESTINATION_S 18 #define PRT_MNG_MFUTP_SOURCE_DESTINATION_M BIT(18) #define PRT_MNG_MIPAF4(_i) (0x002141A0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */ #define PRT_MNG_MIPAF4_MAX_INDEX 3 #define PRT_MNG_MIPAF4_MIPAF_S 0 #define PRT_MNG_MIPAF4_MIPAF_M MAKEMASK(0xFFFFFFFF, 0) #define PRT_MNG_MIPAF6(_i) (0x00214520 + ((_i) * 32)) /* _i=0...15 */ /* Reset Source: POR */ #define PRT_MNG_MIPAF6_MAX_INDEX 15 #define PRT_MNG_MIPAF6_MIPAF_S 0 #define PRT_MNG_MIPAF6_MIPAF_M MAKEMASK(0xFFFFFFFF, 0) #define PRT_MNG_MMAH(_i) (0x00214220 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */ #define PRT_MNG_MMAH_MAX_INDEX 3 #define PRT_MNG_MMAH_MMAH_S 0 #define PRT_MNG_MMAH_MMAH_M MAKEMASK(0xFFFF, 0) #define PRT_MNG_MMAL(_i) (0x002142A0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */ #define PRT_MNG_MMAL_MAX_INDEX 3 #define PRT_MNG_MMAL_MMAL_S 0 #define PRT_MNG_MMAL_MMAL_M MAKEMASK(0xFFFFFFFF, 0) #define PRT_MNG_MNGONLY 0x00214740 /* Reset Source: POR */ #define PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_S 0 #define PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_M MAKEMASK(0xFF, 0) #define PRT_MNG_MSFM 0x00214760 /* Reset Source: POR */ #define PRT_MNG_MSFM_PORT_26F_UDP_S 0 #define PRT_MNG_MSFM_PORT_26F_UDP_M BIT(0) #define PRT_MNG_MSFM_PORT_26F_TCP_S 1 #define PRT_MNG_MSFM_PORT_26F_TCP_M BIT(1) #define PRT_MNG_MSFM_PORT_298_UDP_S 2 #define PRT_MNG_MSFM_PORT_298_UDP_M BIT(2) #define PRT_MNG_MSFM_PORT_298_TCP_S 3 #define PRT_MNG_MSFM_PORT_298_TCP_M BIT(3) #define PRT_MNG_MSFM_IPV6_0_MASK_S 4 #define PRT_MNG_MSFM_IPV6_0_MASK_M BIT(4) #define PRT_MNG_MSFM_IPV6_1_MASK_S 5 #define PRT_MNG_MSFM_IPV6_1_MASK_M BIT(5) #define PRT_MNG_MSFM_IPV6_2_MASK_S 6 #define PRT_MNG_MSFM_IPV6_2_MASK_M BIT(6) #define PRT_MNG_MSFM_IPV6_3_MASK_S 7 #define PRT_MNG_MSFM_IPV6_3_MASK_M BIT(7) #define MSIX_PBA_PAGE(_i) (0x02E08000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: FLR */ #define MSIX_PBA_PAGE_MAX_INDEX 63 #define MSIX_PBA_PAGE_PENBIT_S 0 #define MSIX_PBA_PAGE_PENBIT_M MAKEMASK(0xFFFFFFFF, 0) #define MSIX_PBA1(_i) (0x00008000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: FLR */ #define MSIX_PBA1_MAX_INDEX 63 #define MSIX_PBA1_PENBIT_S 0 #define MSIX_PBA1_PENBIT_M MAKEMASK(0xFFFFFFFF, 0) #define MSIX_TADD_PAGE(_i) (0x02E00000 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ #define MSIX_TADD_PAGE_MAX_INDEX 2047 #define MSIX_TADD_PAGE_MSIXTADD10_S 0 #define MSIX_TADD_PAGE_MSIXTADD10_M MAKEMASK(0x3, 0) #define MSIX_TADD_PAGE_MSIXTADD_S 2 #define MSIX_TADD_PAGE_MSIXTADD_M MAKEMASK(0x3FFFFFFF, 2) #define MSIX_TADD1(_i) (0x00000000 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ #define MSIX_TADD1_MAX_INDEX 2047 #define MSIX_TADD1_MSIXTADD10_S 0 #define MSIX_TADD1_MSIXTADD10_M MAKEMASK(0x3, 0) #define MSIX_TADD1_MSIXTADD_S 2 #define MSIX_TADD1_MSIXTADD_M MAKEMASK(0x3FFFFFFF, 2) #define MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ #define MSIX_TMSG_MAX_INDEX 2047 #define MSIX_TMSG_MSIXTMSG_S 0 #define MSIX_TMSG_MSIXTMSG_M MAKEMASK(0xFFFFFFFF, 0) #define MSIX_TMSG_PAGE(_i) (0x02E00008 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ #define MSIX_TMSG_PAGE_MAX_INDEX 2047 #define MSIX_TMSG_PAGE_MSIXTMSG_S 0 #define MSIX_TMSG_PAGE_MSIXTMSG_M MAKEMASK(0xFFFFFFFF, 0) #define MSIX_TUADD_PAGE(_i) (0x02E00004 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ #define MSIX_TUADD_PAGE_MAX_INDEX 2047 #define MSIX_TUADD_PAGE_MSIXTUADD_S 0 #define MSIX_TUADD_PAGE_MSIXTUADD_M MAKEMASK(0xFFFFFFFF, 0) #define MSIX_TUADD1(_i) (0x00000004 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ #define MSIX_TUADD1_MAX_INDEX 2047 #define MSIX_TUADD1_MSIXTUADD_S 0 #define MSIX_TUADD1_MSIXTUADD_M MAKEMASK(0xFFFFFFFF, 0) #define MSIX_TVCTRL_PAGE(_i) (0x02E0000C + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ #define MSIX_TVCTRL_PAGE_MAX_INDEX 2047 #define MSIX_TVCTRL_PAGE_MASK_S 0 #define MSIX_TVCTRL_PAGE_MASK_M BIT(0) #define MSIX_TVCTRL1(_i) (0x0000000C + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ #define MSIX_TVCTRL1_MAX_INDEX 2047 #define MSIX_TVCTRL1_MASK_S 0 #define MSIX_TVCTRL1_MASK_M BIT(0) #define GLNVM_AL_DONE_HLP 0x000824C4 /* Reset Source: POR */ #define GLNVM_AL_DONE_HLP_HLP_CORER_S 0 #define GLNVM_AL_DONE_HLP_HLP_CORER_M BIT(0) #define GLNVM_AL_DONE_HLP_HLP_FULLR_S 1 #define GLNVM_AL_DONE_HLP_HLP_FULLR_M BIT(1) #define GLNVM_ALTIMERS 0x000B6140 /* Reset Source: POR */ #define GLNVM_ALTIMERS_PCI_ALTIMER_S 0 #define GLNVM_ALTIMERS_PCI_ALTIMER_M MAKEMASK(0xFFF, 0) #define GLNVM_ALTIMERS_GEN_ALTIMER_S 12 #define GLNVM_ALTIMERS_GEN_ALTIMER_M MAKEMASK(0xFFFFF, 12) #define GLNVM_FLA 0x000B6108 /* Reset Source: POR */ #define GLNVM_FLA_LOCKED_S 6 #define GLNVM_FLA_LOCKED_M BIT(6) #define GLNVM_GENS 0x000B6100 /* Reset Source: POR */ #define GLNVM_GENS_NVM_PRES_S 0 #define GLNVM_GENS_NVM_PRES_M BIT(0) #define GLNVM_GENS_SR_SIZE_S 5 #define GLNVM_GENS_SR_SIZE_M MAKEMASK(0x7, 5) #define GLNVM_GENS_BANK1VAL_S 8 #define GLNVM_GENS_BANK1VAL_M BIT(8) #define GLNVM_GENS_ALT_PRST_S 23 #define GLNVM_GENS_ALT_PRST_M BIT(23) #define GLNVM_GENS_FL_AUTO_RD_S 25 #define GLNVM_GENS_FL_AUTO_RD_M BIT(25) #define GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset Source: POR */ #define GLNVM_PROTCSR_MAX_INDEX 59 #define GLNVM_PROTCSR_ADDR_BLOCK_S 0 #define GLNVM_PROTCSR_ADDR_BLOCK_M MAKEMASK(0xFFFFFF, 0) #define GLNVM_ULD 0x000B6008 /* Reset Source: POR */ #define GLNVM_ULD_PCIER_DONE_S 0 #define GLNVM_ULD_PCIER_DONE_M BIT(0) #define GLNVM_ULD_PCIER_DONE_1_S 1 #define GLNVM_ULD_PCIER_DONE_1_M BIT(1) #define GLNVM_ULD_CORER_DONE_S 3 #define GLNVM_ULD_CORER_DONE_M BIT(3) #define GLNVM_ULD_GLOBR_DONE_S 4 #define GLNVM_ULD_GLOBR_DONE_M BIT(4) #define GLNVM_ULD_POR_DONE_S 5 #define GLNVM_ULD_POR_DONE_M BIT(5) #define GLNVM_ULD_POR_DONE_1_S 8 #define GLNVM_ULD_POR_DONE_1_M BIT(8) #define GLNVM_ULD_PCIER_DONE_2_S 9 #define GLNVM_ULD_PCIER_DONE_2_M BIT(9) #define GLNVM_ULD_PE_DONE_S 10 #define GLNVM_ULD_PE_DONE_M BIT(10) #define GLNVM_ULD_HLP_CORE_DONE_S 11 #define GLNVM_ULD_HLP_CORE_DONE_M BIT(11) #define GLNVM_ULD_HLP_FULL_DONE_S 12 #define GLNVM_ULD_HLP_FULL_DONE_M BIT(12) #define GLNVM_ULT 0x000B6154 /* Reset Source: POR */ #define GLNVM_ULT_CONF_PCIR_AE_S 0 #define GLNVM_ULT_CONF_PCIR_AE_M BIT(0) #define GLNVM_ULT_CONF_PCIRTL_AE_S 1 #define GLNVM_ULT_CONF_PCIRTL_AE_M BIT(1) #define GLNVM_ULT_RESERVED_1_S 2 #define GLNVM_ULT_RESERVED_1_M BIT(2) #define GLNVM_ULT_CONF_CORE_AE_S 3 #define GLNVM_ULT_CONF_CORE_AE_M BIT(3) #define GLNVM_ULT_CONF_GLOBAL_AE_S 4 #define GLNVM_ULT_CONF_GLOBAL_AE_M BIT(4) #define GLNVM_ULT_CONF_POR_AE_S 5 #define GLNVM_ULT_CONF_POR_AE_M BIT(5) #define GLNVM_ULT_RESERVED_2_S 6 #define GLNVM_ULT_RESERVED_2_M BIT(6) #define GLNVM_ULT_RESERVED_3_S 7 #define GLNVM_ULT_RESERVED_3_M BIT(7) #define GLNVM_ULT_RESERVED_5_S 8 #define GLNVM_ULT_RESERVED_5_M BIT(8) #define GLNVM_ULT_CONF_PCIALT_AE_S 9 #define GLNVM_ULT_CONF_PCIALT_AE_M BIT(9) #define GLNVM_ULT_CONF_PE_AE_S 10 #define GLNVM_ULT_CONF_PE_AE_M BIT(10) #define GLNVM_ULT_RESERVED_4_S 11 #define GLNVM_ULT_RESERVED_4_M MAKEMASK(0x1FFFFF, 11) #define GL_COTF_MARKER_STATUS 0x00200200 /* Reset Source: CORER */ #define GL_COTF_MARKER_STATUS_MRKR_BUSY_S 0 #define GL_COTF_MARKER_STATUS_MRKR_BUSY_M MAKEMASK(0xFF, 0) #define GL_COTF_MARKER_TRIG_RCU_PRS(_i) (0x002001D4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_COTF_MARKER_TRIG_RCU_PRS_MAX_INDEX 7 #define GL_COTF_MARKER_TRIG_RCU_PRS_SET_RST_S 0 #define GL_COTF_MARKER_TRIG_RCU_PRS_SET_RST_M BIT(0) #define GL_PRS_MARKER_ERROR 0x00200204 /* Reset Source: CORER */ #define GL_PRS_MARKER_ERROR_XLR_CFG_ERR_S 0 #define GL_PRS_MARKER_ERROR_XLR_CFG_ERR_M BIT(0) #define GL_PRS_MARKER_ERROR_QH_CFG_ERR_S 1 #define GL_PRS_MARKER_ERROR_QH_CFG_ERR_M BIT(1) #define GL_PRS_MARKER_ERROR_COTF_CFG_ERR_S 2 #define GL_PRS_MARKER_ERROR_COTF_CFG_ERR_M BIT(2) #define GL_PRS_RX_PIPE_INIT0(_i) (0x0020000C + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */ #define GL_PRS_RX_PIPE_INIT0_MAX_INDEX 6 #define GL_PRS_RX_PIPE_INIT0_GPCSR_INIT_S 0 #define GL_PRS_RX_PIPE_INIT0_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) #define GL_PRS_RX_PIPE_INIT1 0x00200028 /* Reset Source: CORER */ #define GL_PRS_RX_PIPE_INIT1_GPCSR_INIT_S 0 #define GL_PRS_RX_PIPE_INIT1_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) #define GL_PRS_RX_PIPE_INIT2 0x0020002C /* Reset Source: CORER */ #define GL_PRS_RX_PIPE_INIT2_GPCSR_INIT_S 0 #define GL_PRS_RX_PIPE_INIT2_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) #define GL_PRS_RX_SIZE_CTRL 0x00200004 /* Reset Source: CORER */ #define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_S 0 #define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_M MAKEMASK(0x3FF, 0) #define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_EN_S 15 #define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_EN_M BIT(15) #define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_S 16 #define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_M MAKEMASK(0x3FF, 16) #define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_EN_S 31 #define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_EN_M BIT(31) #define GL_PRS_TX_PIPE_INIT0(_i) (0x00202018 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */ #define GL_PRS_TX_PIPE_INIT0_MAX_INDEX 6 #define GL_PRS_TX_PIPE_INIT0_GPCSR_INIT_S 0 #define GL_PRS_TX_PIPE_INIT0_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) #define GL_PRS_TX_PIPE_INIT1 0x00202034 /* Reset Source: CORER */ #define GL_PRS_TX_PIPE_INIT1_GPCSR_INIT_S 0 #define GL_PRS_TX_PIPE_INIT1_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) #define GL_PRS_TX_PIPE_INIT2 0x00202038 /* Reset Source: CORER */ #define GL_PRS_TX_PIPE_INIT2_GPCSR_INIT_S 0 #define GL_PRS_TX_PIPE_INIT2_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) #define GL_PRS_TX_SIZE_CTRL 0x00202014 /* Reset Source: CORER */ #define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_S 0 #define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_M MAKEMASK(0x3FF, 0) #define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_EN_S 15 #define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_EN_M BIT(15) #define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_S 16 #define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_M MAKEMASK(0x3FF, 16) #define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_EN_S 31 #define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_EN_M BIT(31) #define GL_QH_MARKER_STATUS 0x002001FC /* Reset Source: CORER */ #define GL_QH_MARKER_STATUS_MRKR_BUSY_S 0 #define GL_QH_MARKER_STATUS_MRKR_BUSY_M MAKEMASK(0xF, 0) #define GL_QH_MARKER_TRIG_RCU_PRS(_i) (0x002001C4 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GL_QH_MARKER_TRIG_RCU_PRS_MAX_INDEX 3 #define GL_QH_MARKER_TRIG_RCU_PRS_QPID_S 0 #define GL_QH_MARKER_TRIG_RCU_PRS_QPID_M MAKEMASK(0x3FFFF, 0) #define GL_QH_MARKER_TRIG_RCU_PRS_PE_TAG_S 18 #define GL_QH_MARKER_TRIG_RCU_PRS_PE_TAG_M MAKEMASK(0xFF, 18) #define GL_QH_MARKER_TRIG_RCU_PRS_PORT_NUM_S 26 #define GL_QH_MARKER_TRIG_RCU_PRS_PORT_NUM_M MAKEMASK(0x7, 26) #define GL_QH_MARKER_TRIG_RCU_PRS_SET_RST_S 31 #define GL_QH_MARKER_TRIG_RCU_PRS_SET_RST_M BIT(31) #define GL_RPRS_ANA_CSR_CTRL 0x00200708 /* Reset Source: CORER */ #define GL_RPRS_ANA_CSR_CTRL_SELECT_EN_S 0 #define GL_RPRS_ANA_CSR_CTRL_SELECT_EN_M BIT(0) #define GL_RPRS_ANA_CSR_CTRL_SELECTED_ANA_S 1 #define GL_RPRS_ANA_CSR_CTRL_SELECTED_ANA_M BIT(1) #define GL_TPRS_ANA_CSR_CTRL 0x00202100 /* Reset Source: CORER */ #define GL_TPRS_ANA_CSR_CTRL_SELECT_EN_S 0 #define GL_TPRS_ANA_CSR_CTRL_SELECT_EN_M BIT(0) #define GL_TPRS_ANA_CSR_CTRL_SELECTED_ANA_S 1 #define GL_TPRS_ANA_CSR_CTRL_SELECTED_ANA_M BIT(1) #define GL_TPRS_MNG_PM_THR 0x00202004 /* Reset Source: CORER */ #define GL_TPRS_MNG_PM_THR_MNG_PM_THR_S 0 #define GL_TPRS_MNG_PM_THR_MNG_PM_THR_M MAKEMASK(0x3FFF, 0) #define GL_TPRS_PM_CNT(_i) (0x00202008 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GL_TPRS_PM_CNT_MAX_INDEX 1 #define GL_TPRS_PM_CNT_GL_PRS_PM_CNT_S 0 #define GL_TPRS_PM_CNT_GL_PRS_PM_CNT_M MAKEMASK(0x3FFF, 0) #define GL_TPRS_PM_THR 0x00202000 /* Reset Source: CORER */ #define GL_TPRS_PM_THR_PM_THR_S 0 #define GL_TPRS_PM_THR_PM_THR_M MAKEMASK(0x3FFF, 0) #define GL_XLR_MARKER_LOG_RCU_PRS(_i) (0x00200208 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GL_XLR_MARKER_LOG_RCU_PRS_MAX_INDEX 63 #define GL_XLR_MARKER_LOG_RCU_PRS_XLR_TRIG_S 0 #define GL_XLR_MARKER_LOG_RCU_PRS_XLR_TRIG_M MAKEMASK(0xFFFFFFFF, 0) #define GL_XLR_MARKER_STATUS(_i) (0x002001F4 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GL_XLR_MARKER_STATUS_MAX_INDEX 1 #define GL_XLR_MARKER_STATUS_MRKR_BUSY_S 0 #define GL_XLR_MARKER_STATUS_MRKR_BUSY_M MAKEMASK(0xFFFFFFFF, 0) #define GL_XLR_MARKER_TRIG_PE 0x005008C0 /* Reset Source: CORER */ #define GL_XLR_MARKER_TRIG_PE_VM_VF_NUM_S 0 #define GL_XLR_MARKER_TRIG_PE_VM_VF_NUM_M MAKEMASK(0x3FF, 0) #define GL_XLR_MARKER_TRIG_PE_VM_VF_TYPE_S 10 #define GL_XLR_MARKER_TRIG_PE_VM_VF_TYPE_M MAKEMASK(0x3, 10) #define GL_XLR_MARKER_TRIG_PE_PF_NUM_S 12 #define GL_XLR_MARKER_TRIG_PE_PF_NUM_M MAKEMASK(0x7, 12) #define GL_XLR_MARKER_TRIG_PE_PORT_NUM_S 16 #define GL_XLR_MARKER_TRIG_PE_PORT_NUM_M MAKEMASK(0x7, 16) #define GL_XLR_MARKER_TRIG_RCU_PRS 0x002001C0 /* Reset Source: CORER */ #define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_S 0 #define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_M MAKEMASK(0x3FF, 0) #define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_S 10 #define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_M MAKEMASK(0x3, 10) #define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_S 12 #define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_M MAKEMASK(0x7, 12) #define GL_XLR_MARKER_TRIG_RCU_PRS_PORT_NUM_S 16 #define GL_XLR_MARKER_TRIG_RCU_PRS_PORT_NUM_M MAKEMASK(0x7, 16) #define GL_CLKGATE_EVENTS 0x0009DE70 /* Reset Source: PERST */ #define GL_CLKGATE_EVENTS_PRIMARY_CLKGATE_EVENTS_S 0 #define GL_CLKGATE_EVENTS_PRIMARY_CLKGATE_EVENTS_M MAKEMASK(0xFFFF, 0) #define GL_CLKGATE_EVENTS_SIDEBAND_CLKGATE_EVENTS_S 16 #define GL_CLKGATE_EVENTS_SIDEBAND_CLKGATE_EVENTS_M MAKEMASK(0xFFFF, 16) #define GLPCI_BYTCTH_NP_C 0x000BFDA8 /* Reset Source: PCIR */ #define GLPCI_BYTCTH_NP_C_PCI_COUNT_BW_BCT_S 0 #define GLPCI_BYTCTH_NP_C_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPCI_BYTCTH_P 0x0009E970 /* Reset Source: PCIR */ #define GLPCI_BYTCTH_P_PCI_COUNT_BW_BCT_S 0 #define GLPCI_BYTCTH_P_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPCI_BYTCTL_NP_C 0x000BFDAC /* Reset Source: PCIR */ #define GLPCI_BYTCTL_NP_C_PCI_COUNT_BW_BCT_S 0 #define GLPCI_BYTCTL_NP_C_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPCI_BYTCTL_P 0x0009E994 /* Reset Source: PCIR */ #define GLPCI_BYTCTL_P_PCI_COUNT_BW_BCT_S 0 #define GLPCI_BYTCTL_P_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPCI_CAPCTRL 0x0009DE88 /* Reset Source: PCIR */ #define GLPCI_CAPCTRL_VPD_EN_S 0 #define GLPCI_CAPCTRL_VPD_EN_M BIT(0) #define GLPCI_CAPSUP 0x0009DE8C /* Reset Source: PCIR */ #define GLPCI_CAPSUP_PCIE_VER_S 0 #define GLPCI_CAPSUP_PCIE_VER_M BIT(0) #define E800_GLPCI_CAPSUP_RESERVED_2_S 1 #define E800_GLPCI_CAPSUP_RESERVED_2_M BIT(1) #define GLPCI_CAPSUP_LTR_EN_S 2 #define GLPCI_CAPSUP_LTR_EN_M BIT(2) #define GLPCI_CAPSUP_TPH_EN_S 3 #define GLPCI_CAPSUP_TPH_EN_M BIT(3) #define GLPCI_CAPSUP_ARI_EN_S 4 #define GLPCI_CAPSUP_ARI_EN_M BIT(4) #define GLPCI_CAPSUP_IOV_EN_S 5 #define GLPCI_CAPSUP_IOV_EN_M BIT(5) #define GLPCI_CAPSUP_ACS_EN_S 6 #define GLPCI_CAPSUP_ACS_EN_M BIT(6) #define GLPCI_CAPSUP_SEC_EN_S 7 #define GLPCI_CAPSUP_SEC_EN_M BIT(7) #define GLPCI_CAPSUP_PASID_EN_S 8 #define GLPCI_CAPSUP_PASID_EN_M BIT(8) #define GLPCI_CAPSUP_DLFE_EN_S 9 #define GLPCI_CAPSUP_DLFE_EN_M BIT(9) #define GLPCI_CAPSUP_GEN4_EXT_EN_S 10 #define GLPCI_CAPSUP_GEN4_EXT_EN_M BIT(10) #define GLPCI_CAPSUP_GEN4_MARG_EN_S 11 #define GLPCI_CAPSUP_GEN4_MARG_EN_M BIT(11) #define GLPCI_CAPSUP_ECRC_GEN_EN_S 16 #define GLPCI_CAPSUP_ECRC_GEN_EN_M BIT(16) #define GLPCI_CAPSUP_ECRC_CHK_EN_S 17 #define GLPCI_CAPSUP_ECRC_CHK_EN_M BIT(17) #define GLPCI_CAPSUP_IDO_EN_S 18 #define GLPCI_CAPSUP_IDO_EN_M BIT(18) #define GLPCI_CAPSUP_MSI_MASK_S 19 #define GLPCI_CAPSUP_MSI_MASK_M BIT(19) #define GLPCI_CAPSUP_CSR_CONF_EN_S 20 #define GLPCI_CAPSUP_CSR_CONF_EN_M BIT(20) #define GLPCI_CAPSUP_WAKUP_EN_S 21 #define GLPCI_CAPSUP_WAKUP_EN_M BIT(21) #define GLPCI_CAPSUP_LOAD_SUBSYS_ID_S 30 #define GLPCI_CAPSUP_LOAD_SUBSYS_ID_M BIT(30) #define GLPCI_CAPSUP_LOAD_DEV_ID_S 31 #define GLPCI_CAPSUP_LOAD_DEV_ID_M BIT(31) #define GLPCI_CNF 0x0009DEA0 /* Reset Source: POR */ #define GLPCI_CNF_FLEX10_S 1 #define GLPCI_CNF_FLEX10_M BIT(1) #define GLPCI_CNF_WAKE_PIN_EN_S 2 #define GLPCI_CNF_WAKE_PIN_EN_M BIT(2) #define GLPCI_CNF_MSIX_ECC_BLOCK_DISABLE_S 3 #define GLPCI_CNF_MSIX_ECC_BLOCK_DISABLE_M BIT(3) #define GLPCI_CNF2 0x000BE004 /* Reset Source: PCIR */ #define GLPCI_CNF2_RO_DIS_S 0 #define GLPCI_CNF2_RO_DIS_M BIT(0) #define GLPCI_CNF2_CACHELINE_SIZE_S 1 #define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1) #define GLPCI_DREVID 0x0009E9AC /* Reset Source: PCIR */ #define GLPCI_DREVID_DEFAULT_REVID_S 0 #define GLPCI_DREVID_DEFAULT_REVID_M MAKEMASK(0xFF, 0) #define GLPCI_GSCL_1_NP_C 0x000BFDA4 /* Reset Source: PCIR */ #define GLPCI_GSCL_1_NP_C_RT_MODE_S 8 #define GLPCI_GSCL_1_NP_C_RT_MODE_M BIT(8) #define GLPCI_GSCL_1_NP_C_RT_EVENT_S 9 #define GLPCI_GSCL_1_NP_C_RT_EVENT_M MAKEMASK(0x1F, 9) #define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EN_S 14 #define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EN_M BIT(14) #define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EV_S 15 #define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EV_M MAKEMASK(0x1F, 15) #define GLPCI_GSCL_1_NP_C_GIO_COUNT_RESET_S 29 #define GLPCI_GSCL_1_NP_C_GIO_COUNT_RESET_M BIT(29) #define GLPCI_GSCL_1_NP_C_GIO_COUNT_STOP_S 30 #define GLPCI_GSCL_1_NP_C_GIO_COUNT_STOP_M BIT(30) #define GLPCI_GSCL_1_NP_C_GIO_COUNT_START_S 31 #define GLPCI_GSCL_1_NP_C_GIO_COUNT_START_M BIT(31) #define GLPCI_GSCL_1_P 0x0009E9B4 /* Reset Source: PCIR */ #define GLPCI_GSCL_1_P_GIO_COUNT_EN_0_S 0 #define GLPCI_GSCL_1_P_GIO_COUNT_EN_0_M BIT(0) #define GLPCI_GSCL_1_P_GIO_COUNT_EN_1_S 1 #define GLPCI_GSCL_1_P_GIO_COUNT_EN_1_M BIT(1) #define GLPCI_GSCL_1_P_GIO_COUNT_EN_2_S 2 #define GLPCI_GSCL_1_P_GIO_COUNT_EN_2_M BIT(2) #define GLPCI_GSCL_1_P_GIO_COUNT_EN_3_S 3 #define GLPCI_GSCL_1_P_GIO_COUNT_EN_3_M BIT(3) #define GLPCI_GSCL_1_P_LBC_ENABLE_0_S 4 #define GLPCI_GSCL_1_P_LBC_ENABLE_0_M BIT(4) #define GLPCI_GSCL_1_P_LBC_ENABLE_1_S 5 #define GLPCI_GSCL_1_P_LBC_ENABLE_1_M BIT(5) #define GLPCI_GSCL_1_P_LBC_ENABLE_2_S 6 #define GLPCI_GSCL_1_P_LBC_ENABLE_2_M BIT(6) #define GLPCI_GSCL_1_P_LBC_ENABLE_3_S 7 #define GLPCI_GSCL_1_P_LBC_ENABLE_3_M BIT(7) #define GLPCI_GSCL_1_P_PCI_COUNT_BW_EN_S 14 #define GLPCI_GSCL_1_P_PCI_COUNT_BW_EN_M BIT(14) #define GLPCI_GSCL_1_P_GIO_64_BIT_EN_S 28 #define GLPCI_GSCL_1_P_GIO_64_BIT_EN_M BIT(28) #define GLPCI_GSCL_1_P_GIO_COUNT_RESET_S 29 #define GLPCI_GSCL_1_P_GIO_COUNT_RESET_M BIT(29) #define GLPCI_GSCL_1_P_GIO_COUNT_STOP_S 30 #define GLPCI_GSCL_1_P_GIO_COUNT_STOP_M BIT(30) #define GLPCI_GSCL_1_P_GIO_COUNT_START_S 31 #define GLPCI_GSCL_1_P_GIO_COUNT_START_M BIT(31) #define GLPCI_GSCL_2 0x0009E998 /* Reset Source: PCIR */ #define GLPCI_GSCL_2_GIO_EVENT_NUM_0_S 0 #define GLPCI_GSCL_2_GIO_EVENT_NUM_0_M MAKEMASK(0xFF, 0) #define GLPCI_GSCL_2_GIO_EVENT_NUM_1_S 8 #define GLPCI_GSCL_2_GIO_EVENT_NUM_1_M MAKEMASK(0xFF, 8) #define GLPCI_GSCL_2_GIO_EVENT_NUM_2_S 16 #define GLPCI_GSCL_2_GIO_EVENT_NUM_2_M MAKEMASK(0xFF, 16) #define GLPCI_GSCL_2_GIO_EVENT_NUM_3_S 24 #define GLPCI_GSCL_2_GIO_EVENT_NUM_3_M MAKEMASK(0xFF, 24) #define GLPCI_GSCL_5_8(_i) (0x0009E954 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: PCIR */ #define GLPCI_GSCL_5_8_MAX_INDEX 3 #define GLPCI_GSCL_5_8_LBC_THRESHOLD_N_S 0 #define GLPCI_GSCL_5_8_LBC_THRESHOLD_N_M MAKEMASK(0xFFFF, 0) #define GLPCI_GSCL_5_8_LBC_TIMER_N_S 16 #define GLPCI_GSCL_5_8_LBC_TIMER_N_M MAKEMASK(0xFFFF, 16) #define GLPCI_GSCN_0_3(_i) (0x0009E99C + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: PCIR */ #define GLPCI_GSCN_0_3_MAX_INDEX 3 #define GLPCI_GSCN_0_3_EVENT_COUNTER_S 0 #define GLPCI_GSCN_0_3_EVENT_COUNTER_M MAKEMASK(0xFFFFFFFF, 0) #define GLPCI_LATCT_NP_C 0x000BFDA0 /* Reset Source: PCIR */ #define GLPCI_LATCT_NP_C_PCI_LATENCY_COUNT_S 0 #define GLPCI_LATCT_NP_C_PCI_LATENCY_COUNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPCI_LBARCTRL 0x0009DE74 /* Reset Source: POR */ #define GLPCI_LBARCTRL_PREFBAR_S 0 #define GLPCI_LBARCTRL_PREFBAR_M BIT(0) #define GLPCI_LBARCTRL_BAR32_S 1 #define GLPCI_LBARCTRL_BAR32_M BIT(1) #define GLPCI_LBARCTRL_PAGES_SPACE_EN_PF_S 2 #define GLPCI_LBARCTRL_PAGES_SPACE_EN_PF_M BIT(2) #define GLPCI_LBARCTRL_FLASH_EXPOSE_S 3 #define GLPCI_LBARCTRL_FLASH_EXPOSE_M BIT(3) #define GLPCI_LBARCTRL_PE_DB_SIZE_S 4 #define GLPCI_LBARCTRL_PE_DB_SIZE_M MAKEMASK(0x3, 4) #define GLPCI_LBARCTRL_PAGES_SPACE_EN_VF_S 9 #define GLPCI_LBARCTRL_PAGES_SPACE_EN_VF_M BIT(9) #define GLPCI_LBARCTRL_EXROM_SIZE_S 11 #define GLPCI_LBARCTRL_EXROM_SIZE_M MAKEMASK(0x7, 11) #define GLPCI_LBARCTRL_VF_PE_DB_SIZE_S 14 #define GLPCI_LBARCTRL_VF_PE_DB_SIZE_M MAKEMASK(0x3, 14) #define GLPCI_LINKCAP 0x0009DE90 /* Reset Source: PCIR */ #define GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_S 0 #define GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_M MAKEMASK(0x3F, 0) #define GLPCI_LINKCAP_MAX_LINK_WIDTH_S 9 #define GLPCI_LINKCAP_MAX_LINK_WIDTH_M MAKEMASK(0xF, 9) #define GLPCI_NPQ_CFG 0x000BFD80 /* Reset Source: PCIR */ #define GLPCI_NPQ_CFG_EXTEND_TO_S 0 #define GLPCI_NPQ_CFG_EXTEND_TO_M BIT(0) #define GLPCI_NPQ_CFG_SMALL_TO_S 1 #define GLPCI_NPQ_CFG_SMALL_TO_M BIT(1) #define GLPCI_NPQ_CFG_WEIGHT_AVG_S 2 #define GLPCI_NPQ_CFG_WEIGHT_AVG_M MAKEMASK(0xF, 2) #define GLPCI_NPQ_CFG_NPQ_SPARE_S 6 #define GLPCI_NPQ_CFG_NPQ_SPARE_M MAKEMASK(0x3FF, 6) #define GLPCI_NPQ_CFG_NPQ_ERR_STAT_S 16 #define GLPCI_NPQ_CFG_NPQ_ERR_STAT_M MAKEMASK(0xF, 16) #define GLPCI_PKTCT_NP_C 0x000BFD9C /* Reset Source: PCIR */ #define GLPCI_PKTCT_NP_C_PCI_COUNT_BW_PCT_S 0 #define GLPCI_PKTCT_NP_C_PCI_COUNT_BW_PCT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPCI_PKTCT_P 0x0009E9B0 /* Reset Source: PCIR */ #define GLPCI_PKTCT_P_PCI_COUNT_BW_PCT_S 0 #define GLPCI_PKTCT_P_PCI_COUNT_BW_PCT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPCI_PMSUP 0x0009DE94 /* Reset Source: PCIR */ #define GLPCI_PMSUP_RESERVED_0_S 0 #define GLPCI_PMSUP_RESERVED_0_M MAKEMASK(0x3, 0) #define GLPCI_PMSUP_RESERVED_1_S 2 #define GLPCI_PMSUP_RESERVED_1_M MAKEMASK(0x7, 2) #define GLPCI_PMSUP_RESERVED_2_S 5 #define GLPCI_PMSUP_RESERVED_2_M MAKEMASK(0x7, 5) #define GLPCI_PMSUP_L0S_ACC_LAT_S 8 #define GLPCI_PMSUP_L0S_ACC_LAT_M MAKEMASK(0x7, 8) #define GLPCI_PMSUP_L1_ACC_LAT_S 11 #define GLPCI_PMSUP_L1_ACC_LAT_M MAKEMASK(0x7, 11) #define GLPCI_PMSUP_RESERVED_3_S 14 #define GLPCI_PMSUP_RESERVED_3_M BIT(14) #define GLPCI_PMSUP_OBFF_SUP_S 15 #define GLPCI_PMSUP_OBFF_SUP_M MAKEMASK(0x3, 15) #define GLPCI_PUSH_PE_IF_TO_STATUS 0x0009DF44 /* Reset Source: PCIR */ #define GLPCI_PUSH_PE_IF_TO_STATUS_GLPCI_PUSH_PE_IF_TO_STATUS_S 0 #define GLPCI_PUSH_PE_IF_TO_STATUS_GLPCI_PUSH_PE_IF_TO_STATUS_M BIT(0) #define GLPCI_PWRDATA 0x0009DE7C /* Reset Source: PCIR */ #define GLPCI_PWRDATA_D0_POWER_S 0 #define GLPCI_PWRDATA_D0_POWER_M MAKEMASK(0xFF, 0) #define GLPCI_PWRDATA_COMM_POWER_S 8 #define GLPCI_PWRDATA_COMM_POWER_M MAKEMASK(0xFF, 8) #define GLPCI_PWRDATA_D3_POWER_S 16 #define GLPCI_PWRDATA_D3_POWER_M MAKEMASK(0xFF, 16) #define GLPCI_PWRDATA_DATA_SCALE_S 24 #define GLPCI_PWRDATA_DATA_SCALE_M MAKEMASK(0x3, 24) #define GLPCI_REVID 0x0009DE98 /* Reset Source: PCIR */ #define GLPCI_REVID_NVM_REVID_S 0 #define GLPCI_REVID_NVM_REVID_M MAKEMASK(0xFF, 0) #define GLPCI_SERH 0x0009DE84 /* Reset Source: PCIR */ #define GLPCI_SERH_SER_NUM_H_S 0 #define GLPCI_SERH_SER_NUM_H_M MAKEMASK(0xFFFF, 0) #define GLPCI_SERL 0x0009DE80 /* Reset Source: PCIR */ #define GLPCI_SERL_SER_NUM_L_S 0 #define GLPCI_SERL_SER_NUM_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPCI_SUBVENID 0x0009DEE8 /* Reset Source: PCIR */ #define GLPCI_SUBVENID_SUB_VEN_ID_S 0 #define GLPCI_SUBVENID_SUB_VEN_ID_M MAKEMASK(0xFFFF, 0) #define GLPCI_UPADD 0x000BE0D4 /* Reset Source: PCIR */ #define GLPCI_UPADD_ADDRESS_S 1 #define GLPCI_UPADD_ADDRESS_M MAKEMASK(0x7FFFFFFF, 1) #define GLPCI_VENDORID 0x0009DEC8 /* Reset Source: PCIR */ #define GLPCI_VENDORID_VENDORID_S 0 #define GLPCI_VENDORID_VENDORID_M MAKEMASK(0xFFFF, 0) #define GLPCI_VFSUP 0x0009DE9C /* Reset Source: PCIR */ #define GLPCI_VFSUP_VF_PREFETCH_S 0 #define GLPCI_VFSUP_VF_PREFETCH_M BIT(0) #define GLPCI_VFSUP_VR_BAR_TYPE_S 1 #define GLPCI_VFSUP_VR_BAR_TYPE_M BIT(1) #define GLPCI_WATMK_CLNT_PIPEMON 0x000BFD90 /* Reset Source: PCIR */ #define GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_S 0 #define GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_M MAKEMASK(0xFFFF, 0) #define PF_FUNC_RID 0x0009E880 /* Reset Source: PCIR */ #define PF_FUNC_RID_FUNCTION_NUMBER_S 0 #define PF_FUNC_RID_FUNCTION_NUMBER_M MAKEMASK(0x7, 0) #define PF_FUNC_RID_DEVICE_NUMBER_S 3 #define PF_FUNC_RID_DEVICE_NUMBER_M MAKEMASK(0x1F, 3) #define PF_FUNC_RID_BUS_NUMBER_S 8 #define PF_FUNC_RID_BUS_NUMBER_M MAKEMASK(0xFF, 8) #define PF_PCI_CIAA 0x0009E580 /* Reset Source: FLR */ #define PF_PCI_CIAA_ADDRESS_S 0 #define PF_PCI_CIAA_ADDRESS_M MAKEMASK(0xFFF, 0) #define PF_PCI_CIAA_VF_NUM_S 12 #define PF_PCI_CIAA_VF_NUM_M MAKEMASK(0xFF, 12) #define PF_PCI_CIAD 0x0009E500 /* Reset Source: FLR */ #define PF_PCI_CIAD_DATA_S 0 #define PF_PCI_CIAD_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define PFPCI_CLASS 0x0009DB00 /* Reset Source: PCIR */ #define PFPCI_CLASS_STORAGE_CLASS_S 0 #define PFPCI_CLASS_STORAGE_CLASS_M BIT(0) #define PFPCI_CLASS_PF_IS_LAN_S 2 #define PFPCI_CLASS_PF_IS_LAN_M BIT(2) #define PFPCI_CNF 0x0009DF00 /* Reset Source: PCIR */ #define PFPCI_CNF_MSI_EN_S 2 #define PFPCI_CNF_MSI_EN_M BIT(2) #define PFPCI_CNF_EXROM_DIS_S 3 #define PFPCI_CNF_EXROM_DIS_M BIT(3) #define PFPCI_CNF_IO_BAR_S 4 #define PFPCI_CNF_IO_BAR_M BIT(4) #define PFPCI_CNF_INT_PIN_S 5 #define PFPCI_CNF_INT_PIN_M MAKEMASK(0x3, 5) #define PFPCI_DEVID 0x0009DE00 /* Reset Source: PCIR */ #define PFPCI_DEVID_PF_DEV_ID_S 0 #define PFPCI_DEVID_PF_DEV_ID_M MAKEMASK(0xFFFF, 0) #define PFPCI_DEVID_VF_DEV_ID_S 16 #define PFPCI_DEVID_VF_DEV_ID_M MAKEMASK(0xFFFF, 16) #define PFPCI_FACTPS 0x0009E900 /* Reset Source: FLR */ #define PFPCI_FACTPS_FUNC_POWER_STATE_S 0 #define PFPCI_FACTPS_FUNC_POWER_STATE_M MAKEMASK(0x3, 0) #define PFPCI_FACTPS_FUNC_AUX_EN_S 3 #define PFPCI_FACTPS_FUNC_AUX_EN_M BIT(3) #define PFPCI_FUNC 0x0009D980 /* Reset Source: POR */ #define PFPCI_FUNC_FUNC_DIS_S 0 #define PFPCI_FUNC_FUNC_DIS_M BIT(0) #define PFPCI_FUNC_ALLOW_FUNC_DIS_S 1 #define PFPCI_FUNC_ALLOW_FUNC_DIS_M BIT(1) #define PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_S 2 #define PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_M BIT(2) #define PFPCI_PF_FLUSH_DONE 0x0009E400 /* Reset Source: PCIR */ #define PFPCI_PF_FLUSH_DONE_FLUSH_DONE_S 0 #define PFPCI_PF_FLUSH_DONE_FLUSH_DONE_M BIT(0) #define PFPCI_PM 0x0009DA80 /* Reset Source: POR */ #define PFPCI_PM_PME_EN_S 0 #define PFPCI_PM_PME_EN_M BIT(0) #define PFPCI_STATUS1 0x0009DA00 /* Reset Source: POR */ #define PFPCI_STATUS1_FUNC_VALID_S 0 #define PFPCI_STATUS1_FUNC_VALID_M BIT(0) #define PFPCI_SUBSYSID 0x0009D880 /* Reset Source: PCIR */ #define PFPCI_SUBSYSID_PF_SUBSYS_ID_S 0 #define PFPCI_SUBSYSID_PF_SUBSYS_ID_M MAKEMASK(0xFFFF, 0) #define PFPCI_SUBSYSID_VF_SUBSYS_ID_S 16 #define PFPCI_SUBSYSID_VF_SUBSYS_ID_M MAKEMASK(0xFFFF, 16) #define PFPCI_VF_FLUSH_DONE(_VF) (0x0009E000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PCIR */ #define PFPCI_VF_FLUSH_DONE_MAX_INDEX 255 #define PFPCI_VF_FLUSH_DONE_FLUSH_DONE_S 0 #define PFPCI_VF_FLUSH_DONE_FLUSH_DONE_M BIT(0) #define PFPCI_VM_FLUSH_DONE 0x0009E480 /* Reset Source: PCIR */ #define PFPCI_VM_FLUSH_DONE_FLUSH_DONE_S 0 #define PFPCI_VM_FLUSH_DONE_FLUSH_DONE_M BIT(0) #define PFPCI_VMINDEX 0x0009E600 /* Reset Source: PCIR */ #define PFPCI_VMINDEX_VMINDEX_S 0 #define PFPCI_VMINDEX_VMINDEX_M MAKEMASK(0x3FF, 0) #define PFPCI_VMPEND 0x0009E800 /* Reset Source: PCIR */ #define PFPCI_VMPEND_PENDING_S 0 #define PFPCI_VMPEND_PENDING_M BIT(0) #define PQ_FIFO_STATUS 0x0009DF40 /* Reset Source: PCIR */ #define PQ_FIFO_STATUS_PQ_FIFO_COUNT_S 0 #define PQ_FIFO_STATUS_PQ_FIFO_COUNT_M MAKEMASK(0x7FFFFFFF, 0) #define PQ_FIFO_STATUS_PQ_FIFO_EMPTY_S 31 #define PQ_FIFO_STATUS_PQ_FIFO_EMPTY_M BIT(31) #define GLPE_CPUSTATUS0 0x0050BA5C /* Reset Source: CORER */ #define GLPE_CPUSTATUS0_PECPUSTATUS0_S 0 #define GLPE_CPUSTATUS0_PECPUSTATUS0_M MAKEMASK(0xFFFFFFFF, 0) #define GLPE_CPUSTATUS1 0x0050BA60 /* Reset Source: CORER */ #define GLPE_CPUSTATUS1_PECPUSTATUS1_S 0 #define GLPE_CPUSTATUS1_PECPUSTATUS1_M MAKEMASK(0xFFFFFFFF, 0) #define GLPE_CPUSTATUS2 0x0050BA64 /* Reset Source: CORER */ #define GLPE_CPUSTATUS2_PECPUSTATUS2_S 0 #define GLPE_CPUSTATUS2_PECPUSTATUS2_M MAKEMASK(0xFFFFFFFF, 0) #define GLPE_MDQ_BASE(_i) (0x00536000 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLPE_MDQ_BASE_MAX_INDEX 511 #define GLPE_MDQ_BASE_MDOC_INDEX_S 0 #define GLPE_MDQ_BASE_MDOC_INDEX_M MAKEMASK(0xFFFFFFF, 0) #define GLPE_MDQ_PTR(_i) (0x00537000 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLPE_MDQ_PTR_MAX_INDEX 511 #define GLPE_MDQ_PTR_MDQ_HEAD_S 0 #define GLPE_MDQ_PTR_MDQ_HEAD_M MAKEMASK(0x3FFF, 0) #define GLPE_MDQ_PTR_MDQ_TAIL_S 16 #define GLPE_MDQ_PTR_MDQ_TAIL_M MAKEMASK(0x3FFF, 16) #define GLPE_MDQ_SIZE(_i) (0x00536800 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLPE_MDQ_SIZE_MAX_INDEX 511 #define GLPE_MDQ_SIZE_MDQ_SIZE_S 0 #define GLPE_MDQ_SIZE_MDQ_SIZE_M MAKEMASK(0x3FFF, 0) #define GLPE_PEPM_CTRL 0x0050C000 /* Reset Source: PERST */ #define GLPE_PEPM_CTRL_PEPM_ENABLE_S 0 #define GLPE_PEPM_CTRL_PEPM_ENABLE_M BIT(0) #define GLPE_PEPM_CTRL_PEPM_HALT_S 8 #define GLPE_PEPM_CTRL_PEPM_HALT_M BIT(8) #define GLPE_PEPM_CTRL_PEPM_PUSH_MARGIN_S 16 #define GLPE_PEPM_CTRL_PEPM_PUSH_MARGIN_M MAKEMASK(0xFF, 16) #define GLPE_PEPM_DEALLOC 0x0050C004 /* Reset Source: PERST */ #define GLPE_PEPM_DEALLOC_MDQ_CREDITS_S 0 #define GLPE_PEPM_DEALLOC_MDQ_CREDITS_M MAKEMASK(0x3FFF, 0) #define GLPE_PEPM_DEALLOC_PSQ_CREDITS_S 14 #define GLPE_PEPM_DEALLOC_PSQ_CREDITS_M MAKEMASK(0x1F, 14) #define GLPE_PEPM_DEALLOC_PQID_S 19 #define GLPE_PEPM_DEALLOC_PQID_M MAKEMASK(0x1FF, 19) #define GLPE_PEPM_DEALLOC_PORT_S 28 #define GLPE_PEPM_DEALLOC_PORT_M MAKEMASK(0x7, 28) #define GLPE_PEPM_DEALLOC_DEALLOC_RDY_S 31 #define GLPE_PEPM_DEALLOC_DEALLOC_RDY_M BIT(31) #define GLPE_PEPM_PSQ_COUNT 0x0050C020 /* Reset Source: PERST */ #define GLPE_PEPM_PSQ_COUNT_PEPM_PSQ_COUNT_S 0 #define GLPE_PEPM_PSQ_COUNT_PEPM_PSQ_COUNT_M MAKEMASK(0xFFFF, 0) #define GLPE_PEPM_THRESH(_i) (0x0050C840 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: PERST */ #define GLPE_PEPM_THRESH_MAX_INDEX 511 #define GLPE_PEPM_THRESH_PEPM_PSQ_THRESH_S 0 #define GLPE_PEPM_THRESH_PEPM_PSQ_THRESH_M MAKEMASK(0x1F, 0) #define GLPE_PEPM_THRESH_PEPM_MDQ_THRESH_S 16 #define GLPE_PEPM_THRESH_PEPM_MDQ_THRESH_M MAKEMASK(0x3FFF, 16) #define GLPE_PFAEQEDROPCNT(_i) (0x00503240 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPE_PFAEQEDROPCNT_MAX_INDEX 7 #define GLPE_PFAEQEDROPCNT_AEQEDROPCNT_S 0 #define GLPE_PFAEQEDROPCNT_AEQEDROPCNT_M MAKEMASK(0xFFFF, 0) #define GLPE_PFCEQEDROPCNT(_i) (0x00503220 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPE_PFCEQEDROPCNT_MAX_INDEX 7 #define GLPE_PFCEQEDROPCNT_CEQEDROPCNT_S 0 #define GLPE_PFCEQEDROPCNT_CEQEDROPCNT_M MAKEMASK(0xFFFF, 0) #define GLPE_PFCQEDROPCNT(_i) (0x00503200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPE_PFCQEDROPCNT_MAX_INDEX 7 #define GLPE_PFCQEDROPCNT_CQEDROPCNT_S 0 #define GLPE_PFCQEDROPCNT_CQEDROPCNT_M MAKEMASK(0xFFFF, 0) #define GLPE_PFFLMOOISCALLOCERR(_i) (0x0050B960 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPE_PFFLMOOISCALLOCERR_MAX_INDEX 7 #define GLPE_PFFLMOOISCALLOCERR_ERROR_COUNT_S 0 #define GLPE_PFFLMOOISCALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) #define GLPE_PFFLMQ1ALLOCERR(_i) (0x0050B920 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPE_PFFLMQ1ALLOCERR_MAX_INDEX 7 #define GLPE_PFFLMQ1ALLOCERR_ERROR_COUNT_S 0 #define GLPE_PFFLMQ1ALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) #define GLPE_PFFLMRRFALLOCERR(_i) (0x0050B940 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPE_PFFLMRRFALLOCERR_MAX_INDEX 7 #define GLPE_PFFLMRRFALLOCERR_ERROR_COUNT_S 0 #define GLPE_PFFLMRRFALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) #define GLPE_PFFLMXMITALLOCERR(_i) (0x0050B900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPE_PFFLMXMITALLOCERR_MAX_INDEX 7 #define GLPE_PFFLMXMITALLOCERR_ERROR_COUNT_S 0 #define GLPE_PFFLMXMITALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) #define GLPE_PFTCPNOW50USCNT(_i) (0x0050B8C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPE_PFTCPNOW50USCNT_MAX_INDEX 7 #define GLPE_PFTCPNOW50USCNT_CNT_S 0 #define GLPE_PFTCPNOW50USCNT_CNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPE_PUSH_PEPM 0x0053241C /* Reset Source: CORER */ #define GLPE_PUSH_PEPM_MDQ_CREDITS_S 0 #define GLPE_PUSH_PEPM_MDQ_CREDITS_M MAKEMASK(0xFF, 0) #define GLPE_VFAEQEDROPCNT(_i) (0x00503100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLPE_VFAEQEDROPCNT_MAX_INDEX 31 #define GLPE_VFAEQEDROPCNT_AEQEDROPCNT_S 0 #define GLPE_VFAEQEDROPCNT_AEQEDROPCNT_M MAKEMASK(0xFFFF, 0) #define GLPE_VFCEQEDROPCNT(_i) (0x00503080 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLPE_VFCEQEDROPCNT_MAX_INDEX 31 #define GLPE_VFCEQEDROPCNT_CEQEDROPCNT_S 0 #define GLPE_VFCEQEDROPCNT_CEQEDROPCNT_M MAKEMASK(0xFFFF, 0) #define GLPE_VFCQEDROPCNT(_i) (0x00503000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLPE_VFCQEDROPCNT_MAX_INDEX 31 #define GLPE_VFCQEDROPCNT_CQEDROPCNT_S 0 #define GLPE_VFCQEDROPCNT_CQEDROPCNT_M MAKEMASK(0xFFFF, 0) #define GLPE_VFFLMOOISCALLOCERR(_i) (0x0050B580 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLPE_VFFLMOOISCALLOCERR_MAX_INDEX 31 #define GLPE_VFFLMOOISCALLOCERR_ERROR_COUNT_S 0 #define GLPE_VFFLMOOISCALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) #define GLPE_VFFLMQ1ALLOCERR(_i) (0x0050B480 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 #define GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_S 0 #define GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) #define GLPE_VFFLMRRFALLOCERR(_i) (0x0050B500 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLPE_VFFLMRRFALLOCERR_MAX_INDEX 31 #define GLPE_VFFLMRRFALLOCERR_ERROR_COUNT_S 0 #define GLPE_VFFLMRRFALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) #define GLPE_VFFLMXMITALLOCERR(_i) (0x0050B400 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 #define GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_S 0 #define GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) #define GLPE_VFTCPNOW50USCNT(_i) (0x0050B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: PE_CORER */ #define GLPE_VFTCPNOW50USCNT_MAX_INDEX 31 #define GLPE_VFTCPNOW50USCNT_CNT_S 0 #define GLPE_VFTCPNOW50USCNT_CNT_M MAKEMASK(0xFFFFFFFF, 0) #define PFPE_AEQALLOC 0x00502D00 /* Reset Source: PFR */ #define PFPE_AEQALLOC_AECOUNT_S 0 #define PFPE_AEQALLOC_AECOUNT_M MAKEMASK(0xFFFFFFFF, 0) #define PFPE_CCQPHIGH 0x0050A100 /* Reset Source: PFR */ #define PFPE_CCQPHIGH_PECCQPHIGH_S 0 #define PFPE_CCQPHIGH_PECCQPHIGH_M MAKEMASK(0xFFFFFFFF, 0) #define PFPE_CCQPLOW 0x0050A080 /* Reset Source: PFR */ #define PFPE_CCQPLOW_PECCQPLOW_S 0 #define PFPE_CCQPLOW_PECCQPLOW_M MAKEMASK(0xFFFFFFFF, 0) #define PFPE_CCQPSTATUS 0x0050A000 /* Reset Source: PFR */ #define PFPE_CCQPSTATUS_CCQP_DONE_S 0 #define PFPE_CCQPSTATUS_CCQP_DONE_M BIT(0) #define PFPE_CCQPSTATUS_HMC_PROFILE_S 4 #define PFPE_CCQPSTATUS_HMC_PROFILE_M MAKEMASK(0x7, 4) #define PFPE_CCQPSTATUS_RDMA_EN_VFS_S 16 #define PFPE_CCQPSTATUS_RDMA_EN_VFS_M MAKEMASK(0x3F, 16) #define PFPE_CCQPSTATUS_CCQP_ERR_S 31 #define PFPE_CCQPSTATUS_CCQP_ERR_M BIT(31) #define PFPE_CQACK 0x00502C80 /* Reset Source: PFR */ #define PFPE_CQACK_PECQID_S 0 #define PFPE_CQACK_PECQID_M MAKEMASK(0x7FFFF, 0) #define PFPE_CQARM 0x00502C00 /* Reset Source: PFR */ #define PFPE_CQARM_PECQID_S 0 #define PFPE_CQARM_PECQID_M MAKEMASK(0x7FFFF, 0) #define PFPE_CQPDB 0x00500800 /* Reset Source: PFR */ #define PFPE_CQPDB_WQHEAD_S 0 #define PFPE_CQPDB_WQHEAD_M MAKEMASK(0x7FF, 0) #define PFPE_CQPERRCODES 0x0050A200 /* Reset Source: PFR */ #define PFPE_CQPERRCODES_CQP_MINOR_CODE_S 0 #define PFPE_CQPERRCODES_CQP_MINOR_CODE_M MAKEMASK(0xFFFF, 0) #define PFPE_CQPERRCODES_CQP_MAJOR_CODE_S 16 #define PFPE_CQPERRCODES_CQP_MAJOR_CODE_M MAKEMASK(0xFFFF, 16) #define PFPE_CQPTAIL 0x00500880 /* Reset Source: PFR */ #define PFPE_CQPTAIL_WQTAIL_S 0 #define PFPE_CQPTAIL_WQTAIL_M MAKEMASK(0x7FF, 0) #define PFPE_CQPTAIL_CQP_OP_ERR_S 31 #define PFPE_CQPTAIL_CQP_OP_ERR_M BIT(31) #define PFPE_IPCONFIG0 0x0050A180 /* Reset Source: PFR */ #define PFPE_IPCONFIG0_PEIPID_S 0 #define PFPE_IPCONFIG0_PEIPID_M MAKEMASK(0xFFFF, 0) #define PFPE_IPCONFIG0_USEENTIREIDRANGE_S 16 #define PFPE_IPCONFIG0_USEENTIREIDRANGE_M BIT(16) #define PFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_S 17 #define PFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_M BIT(17) #define PFPE_MRTEIDXMASK 0x0050A300 /* Reset Source: PFR */ #define PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_S 0 #define PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0) #define E800_PFPE_RCVUNEXPECTEDERROR 0x0050A380 /* Reset Source: PFR */ #define E800_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0 #define E800_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0) #define PFPE_TCPNOWTIMER 0x0050A280 /* Reset Source: PFR */ #define PFPE_TCPNOWTIMER_TCP_NOW_S 0 #define PFPE_TCPNOWTIMER_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0) #define PFPE_WQEALLOC 0x00504400 /* Reset Source: PFR */ #define PFPE_WQEALLOC_PEQPID_S 0 #define PFPE_WQEALLOC_PEQPID_M MAKEMASK(0x3FFFF, 0) #define PFPE_WQEALLOC_WQE_DESC_INDEX_S 20 #define PFPE_WQEALLOC_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20) #define PRT_PEPM_COUNT(_i) (0x0050C040 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: PERST */ #define PRT_PEPM_COUNT_MAX_INDEX 511 #define PRT_PEPM_COUNT_PEPM_PSQ_COUNT_S 0 #define PRT_PEPM_COUNT_PEPM_PSQ_COUNT_M MAKEMASK(0x1F, 0) #define PRT_PEPM_COUNT_PEPM_MDQ_COUNT_S 16 #define PRT_PEPM_COUNT_PEPM_MDQ_COUNT_M MAKEMASK(0x3FFF, 16) #define VFPE_AEQALLOC(_VF) (0x00502800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_AEQALLOC_MAX_INDEX 255 #define VFPE_AEQALLOC_AECOUNT_S 0 #define VFPE_AEQALLOC_AECOUNT_M MAKEMASK(0xFFFFFFFF, 0) #define VFPE_CCQPHIGH(_VF) (0x00508800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_CCQPHIGH_MAX_INDEX 255 #define VFPE_CCQPHIGH_PECCQPHIGH_S 0 #define VFPE_CCQPHIGH_PECCQPHIGH_M MAKEMASK(0xFFFFFFFF, 0) #define VFPE_CCQPLOW(_VF) (0x00508400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_CCQPLOW_MAX_INDEX 255 #define VFPE_CCQPLOW_PECCQPLOW_S 0 #define VFPE_CCQPLOW_PECCQPLOW_M MAKEMASK(0xFFFFFFFF, 0) #define VFPE_CCQPSTATUS(_VF) (0x00508000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_CCQPSTATUS_MAX_INDEX 255 #define VFPE_CCQPSTATUS_CCQP_DONE_S 0 #define VFPE_CCQPSTATUS_CCQP_DONE_M BIT(0) #define VFPE_CCQPSTATUS_HMC_PROFILE_S 4 #define VFPE_CCQPSTATUS_HMC_PROFILE_M MAKEMASK(0x7, 4) #define VFPE_CCQPSTATUS_RDMA_EN_VFS_S 16 #define VFPE_CCQPSTATUS_RDMA_EN_VFS_M MAKEMASK(0x3F, 16) #define VFPE_CCQPSTATUS_CCQP_ERR_S 31 #define VFPE_CCQPSTATUS_CCQP_ERR_M BIT(31) #define VFPE_CQACK(_VF) (0x00502400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_CQACK_MAX_INDEX 255 #define VFPE_CQACK_PECQID_S 0 #define VFPE_CQACK_PECQID_M MAKEMASK(0x7FFFF, 0) #define VFPE_CQARM(_VF) (0x00502000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_CQARM_MAX_INDEX 255 #define VFPE_CQARM_PECQID_S 0 #define VFPE_CQARM_PECQID_M MAKEMASK(0x7FFFF, 0) #define VFPE_CQPDB(_VF) (0x00500000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_CQPDB_MAX_INDEX 255 #define VFPE_CQPDB_WQHEAD_S 0 #define VFPE_CQPDB_WQHEAD_M MAKEMASK(0x7FF, 0) #define VFPE_CQPERRCODES(_VF) (0x00509000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_CQPERRCODES_MAX_INDEX 255 #define VFPE_CQPERRCODES_CQP_MINOR_CODE_S 0 #define VFPE_CQPERRCODES_CQP_MINOR_CODE_M MAKEMASK(0xFFFF, 0) #define VFPE_CQPERRCODES_CQP_MAJOR_CODE_S 16 #define VFPE_CQPERRCODES_CQP_MAJOR_CODE_M MAKEMASK(0xFFFF, 16) #define VFPE_CQPTAIL(_VF) (0x00500400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_CQPTAIL_MAX_INDEX 255 #define VFPE_CQPTAIL_WQTAIL_S 0 #define VFPE_CQPTAIL_WQTAIL_M MAKEMASK(0x7FF, 0) #define VFPE_CQPTAIL_CQP_OP_ERR_S 31 #define VFPE_CQPTAIL_CQP_OP_ERR_M BIT(31) #define VFPE_IPCONFIG0(_VF) (0x00508C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_IPCONFIG0_MAX_INDEX 255 #define VFPE_IPCONFIG0_PEIPID_S 0 #define VFPE_IPCONFIG0_PEIPID_M MAKEMASK(0xFFFF, 0) #define VFPE_IPCONFIG0_USEENTIREIDRANGE_S 16 #define VFPE_IPCONFIG0_USEENTIREIDRANGE_M BIT(16) #define VFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_S 17 #define VFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_M BIT(17) #define E800_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00509C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define E800_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 255 #define E800_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0 #define E800_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0) #define VFPE_TCPNOWTIMER(_VF) (0x00509400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_TCPNOWTIMER_MAX_INDEX 255 #define VFPE_TCPNOWTIMER_TCP_NOW_S 0 #define VFPE_TCPNOWTIMER_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0) #define VFPE_WQEALLOC(_VF) (0x00504000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_WQEALLOC_MAX_INDEX 255 #define VFPE_WQEALLOC_PEQPID_S 0 #define VFPE_WQEALLOC_PEQPID_M MAKEMASK(0x3FFFF, 0) #define VFPE_WQEALLOC_WQE_DESC_INDEX_S 20 #define VFPE_WQEALLOC_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20) #define GLPES_PFIP4RXDISCARD(_i) (0x00541400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXDISCARD_MAX_INDEX 127 #define GLPES_PFIP4RXDISCARD_IP4RXDISCARD_S 0 #define GLPES_PFIP4RXDISCARD_IP4RXDISCARD_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4RXFRAGSHI(_i) (0x00541C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXFRAGSHI_MAX_INDEX 127 #define GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_S 0 #define GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP4RXFRAGSLO(_i) (0x00541C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXFRAGSLO_MAX_INDEX 127 #define GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_S 0 #define GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4RXMCOCTSHI(_i) (0x00542404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 127 #define GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_S 0 #define GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP4RXMCOCTSLO(_i) (0x00542400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 127 #define GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_S 0 #define GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4RXMCPKTSHI(_i) (0x00542C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 127 #define GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_S 0 #define GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP4RXMCPKTSLO(_i) (0x00542C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 127 #define GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_S 0 #define GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4RXOCTSHI(_i) (0x00540404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXOCTSHI_MAX_INDEX 127 #define GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_S 0 #define GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP4RXOCTSLO(_i) (0x00540400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXOCTSLO_MAX_INDEX 127 #define GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_S 0 #define GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4RXPKTSHI(_i) (0x00540C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXPKTSHI_MAX_INDEX 127 #define GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_S 0 #define GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP4RXPKTSLO(_i) (0x00540C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXPKTSLO_MAX_INDEX 127 #define GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_S 0 #define GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4RXTRUNC(_i) (0x00541800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4RXTRUNC_MAX_INDEX 127 #define GLPES_PFIP4RXTRUNC_IP4RXTRUNC_S 0 #define GLPES_PFIP4RXTRUNC_IP4RXTRUNC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4TXFRAGSHI(_i) (0x00547404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXFRAGSHI_MAX_INDEX 127 #define GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_S 0 #define GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP4TXFRAGSLO(_i) (0x00547400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXFRAGSLO_MAX_INDEX 127 #define GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_S 0 #define GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4TXMCOCTSHI(_i) (0x00547C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 127 #define GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_S 0 #define GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP4TXMCOCTSLO(_i) (0x00547C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 127 #define GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_S 0 #define GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4TXMCPKTSHI(_i) (0x00548404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 127 #define GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_S 0 #define GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP4TXMCPKTSLO(_i) (0x00548400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 127 #define GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_S 0 #define GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4TXNOROUTE(_i) (0x0054B400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXNOROUTE_MAX_INDEX 127 #define GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_S 0 #define GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_M MAKEMASK(0xFFFFFF, 0) #define GLPES_PFIP4TXOCTSHI(_i) (0x00546404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXOCTSHI_MAX_INDEX 127 #define GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_S 0 #define GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP4TXOCTSLO(_i) (0x00546400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXOCTSLO_MAX_INDEX 127 #define GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_S 0 #define GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP4TXPKTSHI(_i) (0x00546C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXPKTSHI_MAX_INDEX 127 #define GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_S 0 #define GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP4TXPKTSLO(_i) (0x00546C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP4TXPKTSLO_MAX_INDEX 127 #define GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_S 0 #define GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6RXDISCARD(_i) (0x00544400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXDISCARD_MAX_INDEX 127 #define GLPES_PFIP6RXDISCARD_IP6RXDISCARD_S 0 #define GLPES_PFIP6RXDISCARD_IP6RXDISCARD_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6RXFRAGSHI(_i) (0x00544C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXFRAGSHI_MAX_INDEX 127 #define GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_S 0 #define GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP6RXFRAGSLO(_i) (0x00544C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXFRAGSLO_MAX_INDEX 127 #define GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_S 0 #define GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6RXMCOCTSHI(_i) (0x00545404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 127 #define GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_S 0 #define GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP6RXMCOCTSLO(_i) (0x00545400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 127 #define GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_S 0 #define GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6RXMCPKTSHI(_i) (0x00545C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 127 #define GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_S 0 #define GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP6RXMCPKTSLO(_i) (0x00545C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 127 #define GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_S 0 #define GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6RXOCTSHI(_i) (0x00543404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXOCTSHI_MAX_INDEX 127 #define GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_S 0 #define GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP6RXOCTSLO(_i) (0x00543400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXOCTSLO_MAX_INDEX 127 #define GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_S 0 #define GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6RXPKTSHI(_i) (0x00543C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXPKTSHI_MAX_INDEX 127 #define GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_S 0 #define GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP6RXPKTSLO(_i) (0x00543C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXPKTSLO_MAX_INDEX 127 #define GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_S 0 #define GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6RXTRUNC(_i) (0x00544800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6RXTRUNC_MAX_INDEX 127 #define GLPES_PFIP6RXTRUNC_IP6RXTRUNC_S 0 #define GLPES_PFIP6RXTRUNC_IP6RXTRUNC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6TXFRAGSHI(_i) (0x00549C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXFRAGSHI_MAX_INDEX 127 #define GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_S 0 #define GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP6TXFRAGSLO(_i) (0x00549C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXFRAGSLO_MAX_INDEX 127 #define GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_S 0 #define GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6TXMCOCTSHI(_i) (0x0054A404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 127 #define GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_S 0 #define GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP6TXMCOCTSLO(_i) (0x0054A400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 127 #define GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_S 0 #define GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6TXMCPKTSHI(_i) (0x0054AC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 127 #define GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_S 0 #define GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP6TXMCPKTSLO(_i) (0x0054AC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 127 #define GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_S 0 #define GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6TXNOROUTE(_i) (0x0054B800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXNOROUTE_MAX_INDEX 127 #define GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_S 0 #define GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_M MAKEMASK(0xFFFFFF, 0) #define GLPES_PFIP6TXOCTSHI(_i) (0x00548C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXOCTSHI_MAX_INDEX 127 #define GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_S 0 #define GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP6TXOCTSLO(_i) (0x00548C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXOCTSLO_MAX_INDEX 127 #define GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_S 0 #define GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFIP6TXPKTSHI(_i) (0x00549404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXPKTSHI_MAX_INDEX 127 #define GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_S 0 #define GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFIP6TXPKTSLO(_i) (0x00549400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFIP6TXPKTSLO_MAX_INDEX 127 #define GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_S 0 #define GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFRDMARXRDSHI(_i) (0x0054EC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMARXRDSHI_MAX_INDEX 127 #define GLPES_PFRDMARXRDSHI_RDMARXRDSHI_S 0 #define GLPES_PFRDMARXRDSHI_RDMARXRDSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFRDMARXRDSLO(_i) (0x0054EC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMARXRDSLO_MAX_INDEX 127 #define GLPES_PFRDMARXRDSLO_RDMARXRDSLO_S 0 #define GLPES_PFRDMARXRDSLO_RDMARXRDSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFRDMARXSNDSHI(_i) (0x0054F404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMARXSNDSHI_MAX_INDEX 127 #define GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_S 0 #define GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFRDMARXSNDSLO(_i) (0x0054F400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMARXSNDSLO_MAX_INDEX 127 #define GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_S 0 #define GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFRDMARXWRSHI(_i) (0x0054E404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMARXWRSHI_MAX_INDEX 127 #define GLPES_PFRDMARXWRSHI_RDMARXWRSHI_S 0 #define GLPES_PFRDMARXWRSHI_RDMARXWRSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFRDMARXWRSLO(_i) (0x0054E400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMARXWRSLO_MAX_INDEX 127 #define GLPES_PFRDMARXWRSLO_RDMARXWRSLO_S 0 #define GLPES_PFRDMARXWRSLO_RDMARXWRSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFRDMATXRDSHI(_i) (0x00550404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMATXRDSHI_MAX_INDEX 127 #define GLPES_PFRDMATXRDSHI_RDMARXRDSHI_S 0 #define GLPES_PFRDMATXRDSHI_RDMARXRDSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFRDMATXRDSLO(_i) (0x00550400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMATXRDSLO_MAX_INDEX 127 #define GLPES_PFRDMATXRDSLO_RDMARXRDSLO_S 0 #define GLPES_PFRDMATXRDSLO_RDMARXRDSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFRDMATXSNDSHI(_i) (0x00550C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMATXSNDSHI_MAX_INDEX 127 #define GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_S 0 #define GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFRDMATXSNDSLO(_i) (0x00550C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMATXSNDSLO_MAX_INDEX 127 #define GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_S 0 #define GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFRDMATXWRSHI(_i) (0x0054FC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMATXWRSHI_MAX_INDEX 127 #define GLPES_PFRDMATXWRSHI_RDMARXWRSHI_S 0 #define GLPES_PFRDMATXWRSHI_RDMARXWRSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFRDMATXWRSLO(_i) (0x0054FC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMATXWRSLO_MAX_INDEX 127 #define GLPES_PFRDMATXWRSLO_RDMARXWRSLO_S 0 #define GLPES_PFRDMATXWRSLO_RDMARXWRSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFRDMAVBNDHI(_i) (0x00551404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMAVBNDHI_MAX_INDEX 127 #define GLPES_PFRDMAVBNDHI_RDMAVBNDHI_S 0 #define GLPES_PFRDMAVBNDHI_RDMAVBNDHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFRDMAVBNDLO(_i) (0x00551400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMAVBNDLO_MAX_INDEX 127 #define GLPES_PFRDMAVBNDLO_RDMAVBNDLO_S 0 #define GLPES_PFRDMAVBNDLO_RDMAVBNDLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFRDMAVINVHI(_i) (0x00551C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMAVINVHI_MAX_INDEX 127 #define GLPES_PFRDMAVINVHI_RDMAVINVHI_S 0 #define GLPES_PFRDMAVINVHI_RDMAVINVHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFRDMAVINVLO(_i) (0x00551C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRDMAVINVLO_MAX_INDEX 127 #define GLPES_PFRDMAVINVLO_RDMAVINVLO_S 0 #define GLPES_PFRDMAVINVLO_RDMAVINVLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFRXVLANERR(_i) (0x00540000 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFRXVLANERR_MAX_INDEX 127 #define GLPES_PFRXVLANERR_RXVLANERR_S 0 #define GLPES_PFRXVLANERR_RXVLANERR_M MAKEMASK(0xFFFFFF, 0) #define GLPES_PFTCPRTXSEG(_i) (0x00552400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFTCPRTXSEG_MAX_INDEX 127 #define GLPES_PFTCPRTXSEG_TCPRTXSEG_S 0 #define GLPES_PFTCPRTXSEG_TCPRTXSEG_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFTCPRXOPTERR(_i) (0x0054C400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFTCPRXOPTERR_MAX_INDEX 127 #define GLPES_PFTCPRXOPTERR_TCPRXOPTERR_S 0 #define GLPES_PFTCPRXOPTERR_TCPRXOPTERR_M MAKEMASK(0xFFFFFF, 0) #define GLPES_PFTCPRXPROTOERR(_i) (0x0054C800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFTCPRXPROTOERR_MAX_INDEX 127 #define GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_S 0 #define GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_M MAKEMASK(0xFFFFFF, 0) #define GLPES_PFTCPRXSEGSHI(_i) (0x0054BC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFTCPRXSEGSHI_MAX_INDEX 127 #define GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_S 0 #define GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFTCPRXSEGSLO(_i) (0x0054BC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFTCPRXSEGSLO_MAX_INDEX 127 #define GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_S 0 #define GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFTCPTXSEGHI(_i) (0x0054CC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFTCPTXSEGHI_MAX_INDEX 127 #define GLPES_PFTCPTXSEGHI_TCPTXSEGHI_S 0 #define GLPES_PFTCPTXSEGHI_TCPTXSEGHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFTCPTXSEGLO(_i) (0x0054CC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFTCPTXSEGLO_MAX_INDEX 127 #define GLPES_PFTCPTXSEGLO_TCPTXSEGLO_S 0 #define GLPES_PFTCPTXSEGLO_TCPTXSEGLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFUDPRXPKTSHI(_i) (0x0054D404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFUDPRXPKTSHI_MAX_INDEX 127 #define GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_S 0 #define GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFUDPRXPKTSLO(_i) (0x0054D400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFUDPRXPKTSLO_MAX_INDEX 127 #define GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_S 0 #define GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_PFUDPTXPKTSHI(_i) (0x0054DC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFUDPTXPKTSHI_MAX_INDEX 127 #define GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_S 0 #define GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_M MAKEMASK(0xFFFF, 0) #define GLPES_PFUDPTXPKTSLO(_i) (0x0054DC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLPES_PFUDPTXPKTSLO_MAX_INDEX 127 #define GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_S 0 #define GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_RDMARXMULTFPDUSHI 0x0055E00C /* Reset Source: CORER */ #define GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_S 0 #define GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_M MAKEMASK(0xFFFFFF, 0) #define GLPES_RDMARXMULTFPDUSLO 0x0055E008 /* Reset Source: CORER */ #define GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_S 0 #define GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_RDMARXOOODDPHI 0x0055E014 /* Reset Source: CORER */ #define GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_S 0 #define GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_M MAKEMASK(0xFFFFFF, 0) #define GLPES_RDMARXOOODDPLO 0x0055E010 /* Reset Source: CORER */ #define GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_S 0 #define GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_RDMARXOOONOMARK 0x0055E004 /* Reset Source: CORER */ #define GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_S 0 #define GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_RDMARXUNALIGN 0x0055E000 /* Reset Source: CORER */ #define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_S 0 #define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_TCPRXFOURHOLEHI 0x0055E03C /* Reset Source: CORER */ #define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_S 0 #define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_M MAKEMASK(0xFFFFFF, 0) #define GLPES_TCPRXFOURHOLELO 0x0055E038 /* Reset Source: CORER */ #define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_S 0 #define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_TCPRXONEHOLEHI 0x0055E024 /* Reset Source: CORER */ #define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_S 0 #define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_M MAKEMASK(0xFFFFFF, 0) #define GLPES_TCPRXONEHOLELO 0x0055E020 /* Reset Source: CORER */ #define GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_S 0 #define GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_TCPRXPUREACKHI 0x0055E01C /* Reset Source: CORER */ #define GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_S 0 #define GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_M MAKEMASK(0xFFFFFF, 0) #define GLPES_TCPRXPUREACKSLO 0x0055E018 /* Reset Source: CORER */ #define GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_S 0 #define GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_TCPRXTHREEHOLEHI 0x0055E034 /* Reset Source: CORER */ #define GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_S 0 #define GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_M MAKEMASK(0xFFFFFF, 0) #define GLPES_TCPRXTHREEHOLELO 0x0055E030 /* Reset Source: CORER */ #define GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_S 0 #define GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_TCPRXTWOHOLEHI 0x0055E02C /* Reset Source: CORER */ #define GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_S 0 #define GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_M MAKEMASK(0xFFFFFF, 0) #define GLPES_TCPRXTWOHOLELO 0x0055E028 /* Reset Source: CORER */ #define GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_S 0 #define GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_TCPTXRETRANSFASTHI 0x0055E044 /* Reset Source: CORER */ #define GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_S 0 #define GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_M MAKEMASK(0xFFFFFF, 0) #define GLPES_TCPTXRETRANSFASTLO 0x0055E040 /* Reset Source: CORER */ #define GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_S 0 #define GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_TCPTXTOUTSFASTHI 0x0055E04C /* Reset Source: CORER */ #define GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_S 0 #define GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_M MAKEMASK(0xFFFFFF, 0) #define GLPES_TCPTXTOUTSFASTLO 0x0055E048 /* Reset Source: CORER */ #define GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_S 0 #define GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_M MAKEMASK(0xFFFFFFFF, 0) #define GLPES_TCPTXTOUTSHI 0x0055E054 /* Reset Source: CORER */ #define GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_S 0 #define GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_M MAKEMASK(0xFFFFFF, 0) #define GLPES_TCPTXTOUTSLO 0x0055E050 /* Reset Source: CORER */ #define GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_S 0 #define GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define GL_PWR_MODE_CTL 0x000B820C /* Reset Source: POR */ #define GL_PWR_MODE_CTL_SWITCH_PWR_MODE_EN_S 0 #define GL_PWR_MODE_CTL_SWITCH_PWR_MODE_EN_M BIT(0) #define GL_PWR_MODE_CTL_NIC_PWR_MODE_EN_S 1 #define GL_PWR_MODE_CTL_NIC_PWR_MODE_EN_M BIT(1) #define GL_PWR_MODE_CTL_S5_PWR_MODE_EN_S 2 #define GL_PWR_MODE_CTL_S5_PWR_MODE_EN_M BIT(2) #define GL_PWR_MODE_CTL_CAR_MAX_SW_CONFIG_S 3 #define GL_PWR_MODE_CTL_CAR_MAX_SW_CONFIG_M MAKEMASK(0x3, 3) #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M MAKEMASK(0x3, 30) #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT 0x000B825C /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PECLK_S 0 #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PECLK_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UCLK_S 3 #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UCLK_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_LCLK_S 6 #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_LCLK_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PSM_S 9 #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PSM_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_RXCTL_S 12 #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_RXCTL_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UANA_S 15 #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UANA_M MAKEMASK(0x7, 15) #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_S5_S 18 #define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_S5_M MAKEMASK(0x7, 18) #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT 0x000B8218 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PECLK_S 0 #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PECLK_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UCLK_S 3 #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UCLK_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_LCLK_S 6 #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_LCLK_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PSM_S 9 #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PSM_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_RXCTL_S 12 #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_RXCTL_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UANA_S 15 #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UANA_M MAKEMASK(0x7, 15) #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_S5_S 18 #define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_S5_M MAKEMASK(0x7, 18) #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT 0x000B8260 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PECLK_S 0 #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PECLK_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UCLK_S 3 #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UCLK_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_LCLK_S 6 #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_LCLK_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PSM_S 9 #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PSM_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_RXCTL_S 12 #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_RXCTL_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UANA_S 15 #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UANA_M MAKEMASK(0x7, 15) #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_S5_S 18 #define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_S5_M MAKEMASK(0x7, 18) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK 0x000B8200 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_50G_H_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_25G_H_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_10G_H_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_4G_H_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_A50G_H_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK 0x000B81F0 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_50G_H_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_25G_H_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_10G_H_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_4G_H_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_A50G_H_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM 0x000B81FC /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_50G_H_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_25G_H_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_10G_H_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_4G_H_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_A50G_H_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL 0x000B81F8 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_50G_H_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_25G_H_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_10G_H_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_4G_H_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_A50G_H_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA 0x000B8208 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_50G_H_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_25G_H_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_10G_H_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_4G_H_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_A50G_H_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK 0x000B81F4 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_50G_H_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_25G_H_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_10G_H_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_4G_H_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_A50G_H_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK 0x000B8244 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_50G_L_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_25G_L_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_10G_L_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_4G_L_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_A50G_L_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK 0x000B8220 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_50G_L_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_25G_L_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_10G_L_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_4G_L_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_A50G_L_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM 0x000B8240 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_50G_L_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_25G_L_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_10G_L_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_4G_L_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_A50G_L_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL 0x000B823C /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_50G_L_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_25G_L_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_10G_L_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_4G_L_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_A50G_L_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA 0x000B8248 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_50G_L_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_25G_L_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_10G_L_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_4G_L_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_A50G_L_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK 0x000B8238 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_50G_L_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_25G_L_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_10G_L_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_4G_L_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_A50G_L_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK 0x000B8230 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_50G_M_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_25G_M_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_10G_M_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_4G_M_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_A50G_M_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK 0x000B821C /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_50G_M_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_25G_M_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_10G_M_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_4G_M_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_A50G_M_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM 0x000B822C /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_50G_M_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_25G_M_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_10G_M_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_4G_M_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_A50G_M_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL 0x000B8228 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_50G_M_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_25G_M_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_10G_M_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_4G_M_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_A50G_M_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA 0x000B8234 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_50G_M_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_25G_M_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_10G_M_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_4G_M_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_A50G_M_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK 0x000B8224 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_50G_M_S 0 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_25G_M_S 3 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_10G_M_S 6 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_4G_M_S 9 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_A50G_M_S 12 #define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S5_H_CTRL 0x000B81EC /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_50G_H_S 0 #define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_25G_H_S 3 #define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_10G_H_S 6 #define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_4G_H_S 9 #define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_A50G_H_S 12 #define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) #define GL_PWR_MODE_DIVIDE_S5_L_CTRL 0x000B824C /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_50G_L_S 0 #define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_25G_L_S 3 #define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_10G_L_S 6 #define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_4G_L_S 9 #define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_A50G_L_S 12 #define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) #define GL_PWR_MODE_DIVIDE_S5_M_CTRL 0x000B8250 /* Reset Source: POR */ #define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_50G_M_S 0 #define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) #define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_25G_M_S 3 #define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) #define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_10G_M_S 6 #define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) #define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_4G_M_S 9 #define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) #define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_A50G_M_S 12 #define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) #define GL_S5_PWR_MODE_EXIT_CTL 0x000B8270 /* Reset Source: POR */ #define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_AUTO_EXIT_S 0 #define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_AUTO_EXIT_M BIT(0) #define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_FW_EXIT_S 1 #define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_FW_EXIT_M BIT(1) #define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_PRST_FLOWS_ON_CORER_S 3 #define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_PRST_FLOWS_ON_CORER_M BIT(3) #define GLGEN_PME_TO 0x000B81BC /* Reset Source: POR */ #define GLGEN_PME_TO_PME_TO_FOR_PE_S 0 #define GLGEN_PME_TO_PME_TO_FOR_PE_M BIT(0) #define PRTPM_EEE_STAT 0x001E4320 /* Reset Source: GLOBR */ #define PRTPM_EEE_STAT_EEE_NEG_S 29 #define PRTPM_EEE_STAT_EEE_NEG_M BIT(29) #define PRTPM_EEE_STAT_RX_LPI_STATUS_S 30 #define PRTPM_EEE_STAT_RX_LPI_STATUS_M BIT(30) #define PRTPM_EEE_STAT_TX_LPI_STATUS_S 31 #define PRTPM_EEE_STAT_TX_LPI_STATUS_M BIT(31) #define PRTPM_EEEC 0x001E4380 /* Reset Source: GLOBR */ #define PRTPM_EEEC_TW_WAKE_MIN_S 16 #define PRTPM_EEEC_TW_WAKE_MIN_M MAKEMASK(0x3F, 16) #define PRTPM_EEEC_TX_LU_LPI_DLY_S 24 #define PRTPM_EEEC_TX_LU_LPI_DLY_M MAKEMASK(0x3, 24) #define PRTPM_EEEC_TEEE_DLY_S 26 #define PRTPM_EEEC_TEEE_DLY_M MAKEMASK(0x3F, 26) #define PRTPM_EEEFWD 0x001E4400 /* Reset Source: GLOBR */ #define PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_S 31 #define PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_M BIT(31) #define PRTPM_EEER 0x001E4360 /* Reset Source: GLOBR */ #define PRTPM_EEER_TW_SYSTEM_S 0 #define PRTPM_EEER_TW_SYSTEM_M MAKEMASK(0xFFFF, 0) #define PRTPM_EEER_TX_LPI_EN_S 16 #define PRTPM_EEER_TX_LPI_EN_M BIT(16) #define PRTPM_EEETXC 0x001E43E0 /* Reset Source: GLOBR */ #define PRTPM_EEETXC_TW_PHY_S 0 #define PRTPM_EEETXC_TW_PHY_M MAKEMASK(0xFFFF, 0) #define PRTPM_RLPIC 0x001E43A0 /* Reset Source: GLOBR */ #define PRTPM_RLPIC_ERLPIC_S 0 #define PRTPM_RLPIC_ERLPIC_M MAKEMASK(0xFFFFFFFF, 0) #define PRTPM_TLPIC 0x001E43C0 /* Reset Source: GLOBR */ #define PRTPM_TLPIC_ETLPIC_S 0 #define PRTPM_TLPIC_ETLPIC_M MAKEMASK(0xFFFFFFFF, 0) #define GLRPB_DHW(_i) (0x000AC000 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLRPB_DHW_MAX_INDEX 15 #define GLRPB_DHW_DHW_TCN_S 0 #define GLRPB_DHW_DHW_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_DHW_DHW_TCN_M : E800_GLRPB_DHW_DHW_TCN_M) #define E800_GLRPB_DHW_DHW_TCN_M MAKEMASK(0xFFFFF, 0) #define E830_GLRPB_DHW_DHW_TCN_M MAKEMASK(0x3FFFFF, 0) #define GLRPB_DLW(_i) (0x000AC044 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLRPB_DLW_MAX_INDEX 15 #define GLRPB_DLW_DLW_TCN_S 0 #define GLRPB_DLW_DLW_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_DLW_DLW_TCN_M : E800_GLRPB_DLW_DLW_TCN_M) #define E800_GLRPB_DLW_DLW_TCN_M MAKEMASK(0xFFFFF, 0) #define E830_GLRPB_DLW_DLW_TCN_M MAKEMASK(0x3FFFFF, 0) #define GLRPB_DPS(_i) (0x000AC084 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLRPB_DPS_MAX_INDEX 15 #define GLRPB_DPS_DPS_TCN_S 0 #define GLRPB_DPS_DPS_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_DPS_DPS_TCN_M : E800_GLRPB_DPS_DPS_TCN_M) #define E800_GLRPB_DPS_DPS_TCN_M MAKEMASK(0xFFFFF, 0) #define E830_GLRPB_DPS_DPS_TCN_M MAKEMASK(0x3FFFFF, 0) #define GLRPB_DSI_EN 0x000AC324 /* Reset Source: CORER */ #define GLRPB_DSI_EN_DSI_EN_S 0 #define GLRPB_DSI_EN_DSI_EN_M BIT(0) #define GLRPB_DSI_EN_DSI_L2_MAC_ERR_DROP_EN_S 1 #define GLRPB_DSI_EN_DSI_L2_MAC_ERR_DROP_EN_M BIT(1) #define GLRPB_SHW(_i) (0x000AC120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLRPB_SHW_MAX_INDEX 7 #define GLRPB_SHW_SHW_S 0 #define GLRPB_SHW_SHW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_SHW_SHW_M : E800_GLRPB_SHW_SHW_M) #define E800_GLRPB_SHW_SHW_M MAKEMASK(0xFFFFF, 0) #define E830_GLRPB_SHW_SHW_M MAKEMASK(0x3FFFFF, 0) #define GLRPB_SLW(_i) (0x000AC140 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLRPB_SLW_MAX_INDEX 7 #define GLRPB_SLW_SLW_S 0 #define GLRPB_SLW_SLW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_SLW_SLW_M : E800_GLRPB_SLW_SLW_M) #define E800_GLRPB_SLW_SLW_M MAKEMASK(0xFFFFF, 0) #define E830_GLRPB_SLW_SLW_M MAKEMASK(0x3FFFFF, 0) #define GLRPB_SPS(_i) (0x000AC0C4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLRPB_SPS_MAX_INDEX 7 #define GLRPB_SPS_SPS_TCN_S 0 #define GLRPB_SPS_SPS_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_SPS_SPS_TCN_M : E800_GLRPB_SPS_SPS_TCN_M) #define E800_GLRPB_SPS_SPS_TCN_M MAKEMASK(0xFFFFF, 0) #define E830_GLRPB_SPS_SPS_TCN_M MAKEMASK(0x3FFFFF, 0) #define GLRPB_TC_CFG(_i) (0x000AC2A4 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLRPB_TC_CFG_MAX_INDEX 31 #define GLRPB_TC_CFG_D_POOL_S 0 #define GLRPB_TC_CFG_D_POOL_M MAKEMASK(0xFFFF, 0) #define GLRPB_TC_CFG_S_POOL_S 16 #define GLRPB_TC_CFG_S_POOL_M MAKEMASK(0xFFFF, 16) #define GLRPB_TCHW(_i) (0x000AC330 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLRPB_TCHW_MAX_INDEX 31 #define GLRPB_TCHW_TCHW_S 0 #define GLRPB_TCHW_TCHW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_TCHW_TCHW_M : E800_GLRPB_TCHW_TCHW_M) #define E800_GLRPB_TCHW_TCHW_M MAKEMASK(0xFFFFF, 0) #define E830_GLRPB_TCHW_TCHW_M MAKEMASK(0x3FFFFF, 0) #define GLRPB_TCLW(_i) (0x000AC3B0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLRPB_TCLW_MAX_INDEX 31 #define GLRPB_TCLW_TCLW_S 0 #define GLRPB_TCLW_TCLW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_TCLW_TCLW_M : E800_GLRPB_TCLW_TCLW_M) #define E800_GLRPB_TCLW_TCLW_M MAKEMASK(0xFFFFF, 0) #define E830_GLRPB_TCLW_TCLW_M MAKEMASK(0x3FFFFF, 0) #define GLQF_APBVT(_i) (0x00450000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define GLQF_APBVT_MAX_INDEX 2047 #define GLQF_APBVT_APBVT_S 0 #define GLQF_APBVT_APBVT_M MAKEMASK(0xFFFFFFFF, 0) #define GLQF_FD_CLSN_0 0x00460028 /* Reset Source: CORER */ #define GLQF_FD_CLSN_0_HITSBCNT_S 0 #define GLQF_FD_CLSN_0_HITSBCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLQF_FD_CLSN1 0x00460030 /* Reset Source: CORER */ #define GLQF_FD_CLSN1_HITLBCNT_S 0 #define GLQF_FD_CLSN1_HITLBCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLQF_FD_CNT 0x00460018 /* Reset Source: CORER */ #define GLQF_FD_CNT_FD_GCNT_S 0 #define GLQF_FD_CNT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_CNT_FD_GCNT_M : E800_GLQF_FD_CNT_FD_GCNT_M) #define E800_GLQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0) #define E830_GLQF_FD_CNT_FD_GCNT_M MAKEMASK(0xFFFF, 0) #define GLQF_FD_CNT_FD_BCNT_S 16 #define GLQF_FD_CNT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_CNT_FD_BCNT_M : E800_GLQF_FD_CNT_FD_BCNT_M) #define E800_GLQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16) #define E830_GLQF_FD_CNT_FD_BCNT_M MAKEMASK(0xFFFF, 16) #define GLQF_FD_CTL 0x00460000 /* Reset Source: CORER */ #define GLQF_FD_CTL_FDLONG_S 0 #define GLQF_FD_CTL_FDLONG_M MAKEMASK(0xF, 0) #define GLQF_FD_CTL_HASH_REPORT_S 4 #define GLQF_FD_CTL_HASH_REPORT_M BIT(4) #define GLQF_FD_CTL_FLT_ADDR_REPORT_S 5 #define GLQF_FD_CTL_FLT_ADDR_REPORT_M BIT(5) #define GLQF_FD_SIZE 0x00460010 /* Reset Source: CORER */ #define GLQF_FD_SIZE_FD_GSIZE_S 0 #define GLQF_FD_SIZE_FD_GSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_SIZE_FD_GSIZE_M : E800_GLQF_FD_SIZE_FD_GSIZE_M) #define E800_GLQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0) #define E830_GLQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0xFFFF, 0) #define GLQF_FD_SIZE_FD_BSIZE_S 16 #define GLQF_FD_SIZE_FD_BSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_SIZE_FD_BSIZE_M : E800_GLQF_FD_SIZE_FD_BSIZE_M) #define E800_GLQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16) #define E830_GLQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0xFFFF, 16) #define GLQF_FDCNT_0 0x00460020 /* Reset Source: CORER */ #define GLQF_FDCNT_0_BUCKETCNT_S 0 #define GLQF_FDCNT_0_BUCKETCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FDCNT_0_BUCKETCNT_M : E800_GLQF_FDCNT_0_BUCKETCNT_M) #define E800_GLQF_FDCNT_0_BUCKETCNT_M MAKEMASK(0x7FFF, 0) #define E830_GLQF_FDCNT_0_BUCKETCNT_M MAKEMASK(0xFFFF, 0) #define GLQF_FDCNT_0_CNT_NOT_VLD_S 31 #define GLQF_FDCNT_0_CNT_NOT_VLD_M BIT(31) #define GLQF_FDEVICTENA(_i) (0x00452000 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GLQF_FDEVICTENA_MAX_INDEX 3 #define GLQF_FDEVICTENA_FDEVICTENA_S 0 #define GLQF_FDEVICTENA_FDEVICTENA_M MAKEMASK(0xFFFFFFFF, 0) #define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */ #define GLQF_FDINSET_MAX_INDEX 127 #define GLQF_FDINSET_FV_WORD_INDX0_S 0 #define GLQF_FDINSET_FV_WORD_INDX0_M MAKEMASK(0x1F, 0) #define GLQF_FDINSET_FV_WORD_VAL0_S 7 #define GLQF_FDINSET_FV_WORD_VAL0_M BIT(7) #define GLQF_FDINSET_FV_WORD_INDX1_S 8 #define GLQF_FDINSET_FV_WORD_INDX1_M MAKEMASK(0x1F, 8) #define GLQF_FDINSET_FV_WORD_VAL1_S 15 #define GLQF_FDINSET_FV_WORD_VAL1_M BIT(15) #define GLQF_FDINSET_FV_WORD_INDX2_S 16 #define GLQF_FDINSET_FV_WORD_INDX2_M MAKEMASK(0x1F, 16) #define GLQF_FDINSET_FV_WORD_VAL2_S 23 #define GLQF_FDINSET_FV_WORD_VAL2_M BIT(23) #define GLQF_FDINSET_FV_WORD_INDX3_S 24 #define GLQF_FDINSET_FV_WORD_INDX3_M MAKEMASK(0x1F, 24) #define GLQF_FDINSET_FV_WORD_VAL3_S 31 #define GLQF_FDINSET_FV_WORD_VAL3_M BIT(31) #define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLQF_FDMASK_MAX_INDEX 31 #define GLQF_FDMASK_MSK_INDEX_S 0 #define GLQF_FDMASK_MSK_INDEX_M MAKEMASK(0x1F, 0) #define GLQF_FDMASK_MASK_S 16 #define GLQF_FDMASK_MASK_M MAKEMASK(0xFFFF, 16) #define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLQF_FDMASK_SEL_MAX_INDEX 127 #define GLQF_FDMASK_SEL_MASK_SEL_S 0 #define GLQF_FDMASK_SEL_MASK_SEL_M MAKEMASK(0xFFFFFFFF, 0) #define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */ #define GLQF_FDSWAP_MAX_INDEX 127 #define GLQF_FDSWAP_FV_WORD_INDX0_S 0 #define GLQF_FDSWAP_FV_WORD_INDX0_M MAKEMASK(0x1F, 0) #define GLQF_FDSWAP_FV_WORD_VAL0_S 7 #define GLQF_FDSWAP_FV_WORD_VAL0_M BIT(7) #define GLQF_FDSWAP_FV_WORD_INDX1_S 8 #define GLQF_FDSWAP_FV_WORD_INDX1_M MAKEMASK(0x1F, 8) #define GLQF_FDSWAP_FV_WORD_VAL1_S 15 #define GLQF_FDSWAP_FV_WORD_VAL1_M BIT(15) #define GLQF_FDSWAP_FV_WORD_INDX2_S 16 #define GLQF_FDSWAP_FV_WORD_INDX2_M MAKEMASK(0x1F, 16) #define GLQF_FDSWAP_FV_WORD_VAL2_S 23 #define GLQF_FDSWAP_FV_WORD_VAL2_M BIT(23) #define GLQF_FDSWAP_FV_WORD_INDX3_S 24 #define GLQF_FDSWAP_FV_WORD_INDX3_M MAKEMASK(0x1F, 24) #define GLQF_FDSWAP_FV_WORD_VAL3_S 31 #define GLQF_FDSWAP_FV_WORD_VAL3_M BIT(31) #define GLQF_HINSET(_i, _j) (0x0040E000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */ #define GLQF_HINSET_MAX_INDEX 127 #define GLQF_HINSET_FV_WORD_INDX0_S 0 #define GLQF_HINSET_FV_WORD_INDX0_M MAKEMASK(0x1F, 0) #define GLQF_HINSET_FV_WORD_VAL0_S 7 #define GLQF_HINSET_FV_WORD_VAL0_M BIT(7) #define GLQF_HINSET_FV_WORD_INDX1_S 8 #define GLQF_HINSET_FV_WORD_INDX1_M MAKEMASK(0x1F, 8) #define GLQF_HINSET_FV_WORD_VAL1_S 15 #define GLQF_HINSET_FV_WORD_VAL1_M BIT(15) #define GLQF_HINSET_FV_WORD_INDX2_S 16 #define GLQF_HINSET_FV_WORD_INDX2_M MAKEMASK(0x1F, 16) #define GLQF_HINSET_FV_WORD_VAL2_S 23 #define GLQF_HINSET_FV_WORD_VAL2_M BIT(23) #define GLQF_HINSET_FV_WORD_INDX3_S 24 #define GLQF_HINSET_FV_WORD_INDX3_M MAKEMASK(0x1F, 24) #define GLQF_HINSET_FV_WORD_VAL3_S 31 #define GLQF_HINSET_FV_WORD_VAL3_M BIT(31) #define GLQF_HKEY(_i) (0x00456000 + ((_i) * 4)) /* _i=0...12 */ /* Reset Source: CORER */ #define GLQF_HKEY_MAX_INDEX 12 #define GLQF_HKEY_KEY_0_S 0 #define GLQF_HKEY_KEY_0_M MAKEMASK(0xFF, 0) #define GLQF_HKEY_KEY_1_S 8 #define GLQF_HKEY_KEY_1_M MAKEMASK(0xFF, 8) #define GLQF_HKEY_KEY_2_S 16 #define GLQF_HKEY_KEY_2_M MAKEMASK(0xFF, 16) #define GLQF_HKEY_KEY_3_S 24 #define GLQF_HKEY_KEY_3_M MAKEMASK(0xFF, 24) #define GLQF_HLUT(_i, _j) (0x00438000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...15 */ /* Reset Source: CORER */ #define GLQF_HLUT_MAX_INDEX 127 #define GLQF_HLUT_LUT0_S 0 #define GLQF_HLUT_LUT0_M MAKEMASK(0x3F, 0) #define GLQF_HLUT_LUT1_S 8 #define GLQF_HLUT_LUT1_M MAKEMASK(0x3F, 8) #define GLQF_HLUT_LUT2_S 16 #define GLQF_HLUT_LUT2_M MAKEMASK(0x3F, 16) #define GLQF_HLUT_LUT3_S 24 #define GLQF_HLUT_LUT3_M MAKEMASK(0x3F, 24) #define GLQF_HLUT_SIZE(_i) (0x00455400 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLQF_HLUT_SIZE_MAX_INDEX 15 #define GLQF_HLUT_SIZE_HSIZE_S 0 #define GLQF_HLUT_SIZE_HSIZE_M BIT(0) #define GLQF_HMASK(_i) (0x0040FC00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLQF_HMASK_MAX_INDEX 31 #define GLQF_HMASK_MSK_INDEX_S 0 #define GLQF_HMASK_MSK_INDEX_M MAKEMASK(0x1F, 0) #define GLQF_HMASK_MASK_S 16 #define GLQF_HMASK_MASK_M MAKEMASK(0xFFFF, 16) #define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define GLQF_HMASK_SEL_MAX_INDEX 127 #define GLQF_HMASK_SEL_MASK_SEL_S 0 #define GLQF_HMASK_SEL_MASK_SEL_M MAKEMASK(0xFFFFFFFF, 0) #define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */ #define GLQF_HSYMM_MAX_INDEX 127 #define GLQF_HSYMM_FV_SYMM_INDX0_S 0 #define GLQF_HSYMM_FV_SYMM_INDX0_M MAKEMASK(0x1F, 0) #define GLQF_HSYMM_SYMM0_ENA_S 7 #define GLQF_HSYMM_SYMM0_ENA_M BIT(7) #define GLQF_HSYMM_FV_SYMM_INDX1_S 8 #define GLQF_HSYMM_FV_SYMM_INDX1_M MAKEMASK(0x1F, 8) #define GLQF_HSYMM_SYMM1_ENA_S 15 #define GLQF_HSYMM_SYMM1_ENA_M BIT(15) #define GLQF_HSYMM_FV_SYMM_INDX2_S 16 #define GLQF_HSYMM_FV_SYMM_INDX2_M MAKEMASK(0x1F, 16) #define GLQF_HSYMM_SYMM2_ENA_S 23 #define GLQF_HSYMM_SYMM2_ENA_M BIT(23) #define GLQF_HSYMM_FV_SYMM_INDX3_S 24 #define GLQF_HSYMM_FV_SYMM_INDX3_M MAKEMASK(0x1F, 24) #define GLQF_HSYMM_SYMM3_ENA_S 31 #define GLQF_HSYMM_SYMM3_ENA_M BIT(31) #define GLQF_PE_APBVT_CNT 0x00455500 /* Reset Source: CORER */ #define GLQF_PE_APBVT_CNT_APBVT_LAN_S 0 #define GLQF_PE_APBVT_CNT_APBVT_LAN_M MAKEMASK(0xFFFFFFFF, 0) #define GLQF_PE_CMD 0x00471080 /* Reset Source: CORER */ #define GLQF_PE_CMD_ADDREM_STS_S 0 #define GLQF_PE_CMD_ADDREM_STS_M MAKEMASK(0xFFFFFF, 0) #define GLQF_PE_CMD_ADDREM_ID_S 28 #define GLQF_PE_CMD_ADDREM_ID_M MAKEMASK(0xF, 28) #define GLQF_PE_CTL 0x004710C0 /* Reset Source: CORER */ #define GLQF_PE_CTL_PELONG_S 0 #define GLQF_PE_CTL_PELONG_M MAKEMASK(0xF, 0) #define GLQF_PE_CTL2(_i) (0x00455200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLQF_PE_CTL2_MAX_INDEX 31 #define GLQF_PE_CTL2_TO_QH_S 0 #define GLQF_PE_CTL2_TO_QH_M MAKEMASK(0x3, 0) #define GLQF_PE_CTL2_APBVT_ENA_S 2 #define GLQF_PE_CTL2_APBVT_ENA_M BIT(2) #define GLQF_PE_FVE 0x0020E514 /* Reset Source: CORER */ #define GLQF_PE_FVE_W_ENA_S 0 #define GLQF_PE_FVE_W_ENA_M MAKEMASK(0xFFFFFF, 0) #define GLQF_PE_OSR_STS 0x00471040 /* Reset Source: CORER */ #define GLQF_PE_OSR_STS_QH_SRCH_MAXOSR_S 0 #define GLQF_PE_OSR_STS_QH_SRCH_MAXOSR_M MAKEMASK(0x3FF, 0) #define GLQF_PE_OSR_STS_QH_CMD_MAXOSR_S 16 #define GLQF_PE_OSR_STS_QH_CMD_MAXOSR_M MAKEMASK(0x3FF, 16) #define GLQF_PEINSET(_i, _j) (0x00415000 + ((_i) * 4 + (_j) * 128)) /* _i=0...31, _j=0...5 */ /* Reset Source: CORER */ #define GLQF_PEINSET_MAX_INDEX 31 #define GLQF_PEINSET_FV_WORD_INDX0_S 0 #define GLQF_PEINSET_FV_WORD_INDX0_M MAKEMASK(0x1F, 0) #define GLQF_PEINSET_FV_WORD_VAL0_S 7 #define GLQF_PEINSET_FV_WORD_VAL0_M BIT(7) #define GLQF_PEINSET_FV_WORD_INDX1_S 8 #define GLQF_PEINSET_FV_WORD_INDX1_M MAKEMASK(0x1F, 8) #define GLQF_PEINSET_FV_WORD_VAL1_S 15 #define GLQF_PEINSET_FV_WORD_VAL1_M BIT(15) #define GLQF_PEINSET_FV_WORD_INDX2_S 16 #define GLQF_PEINSET_FV_WORD_INDX2_M MAKEMASK(0x1F, 16) #define GLQF_PEINSET_FV_WORD_VAL2_S 23 #define GLQF_PEINSET_FV_WORD_VAL2_M BIT(23) #define GLQF_PEINSET_FV_WORD_INDX3_S 24 #define GLQF_PEINSET_FV_WORD_INDX3_M MAKEMASK(0x1F, 24) #define GLQF_PEINSET_FV_WORD_VAL3_S 31 #define GLQF_PEINSET_FV_WORD_VAL3_M BIT(31) #define GLQF_PEMASK(_i) (0x00415400 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLQF_PEMASK_MAX_INDEX 15 #define GLQF_PEMASK_MSK_INDEX_S 0 #define GLQF_PEMASK_MSK_INDEX_M MAKEMASK(0x1F, 0) #define GLQF_PEMASK_MASK_S 16 #define GLQF_PEMASK_MASK_M MAKEMASK(0xFFFF, 16) #define GLQF_PEMASK_SEL(_i) (0x00415500 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLQF_PEMASK_SEL_MAX_INDEX 31 #define GLQF_PEMASK_SEL_MASK_SEL_S 0 #define GLQF_PEMASK_SEL_MASK_SEL_M MAKEMASK(0xFFFF, 0) #define GLQF_PETABLE_CLR(_i) (0x000AA078 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLQF_PETABLE_CLR_MAX_INDEX 1 #define GLQF_PETABLE_CLR_VM_VF_NUM_S 0 #define GLQF_PETABLE_CLR_VM_VF_NUM_M MAKEMASK(0x3FF, 0) #define GLQF_PETABLE_CLR_VM_VF_TYPE_S 10 #define GLQF_PETABLE_CLR_VM_VF_TYPE_M MAKEMASK(0x3, 10) #define GLQF_PETABLE_CLR_PF_NUM_S 12 #define GLQF_PETABLE_CLR_PF_NUM_M MAKEMASK(0x7, 12) #define GLQF_PETABLE_CLR_PE_BUSY_S 16 #define GLQF_PETABLE_CLR_PE_BUSY_M BIT(16) #define GLQF_PETABLE_CLR_PE_CLEAR_S 17 #define GLQF_PETABLE_CLR_PE_CLEAR_M BIT(17) #define GLQF_PROF2TC(_i, _j) (0x0044D000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...3 */ /* Reset Source: CORER */ #define GLQF_PROF2TC_MAX_INDEX 127 #define GLQF_PROF2TC_OVERRIDE_ENA_0_S 0 #define GLQF_PROF2TC_OVERRIDE_ENA_0_M BIT(0) #define GLQF_PROF2TC_REGION_0_S 1 #define GLQF_PROF2TC_REGION_0_M MAKEMASK(0x7, 1) #define GLQF_PROF2TC_OVERRIDE_ENA_1_S 4 #define GLQF_PROF2TC_OVERRIDE_ENA_1_M BIT(4) #define GLQF_PROF2TC_REGION_1_S 5 #define GLQF_PROF2TC_REGION_1_M MAKEMASK(0x7, 5) #define GLQF_PROF2TC_OVERRIDE_ENA_2_S 8 #define GLQF_PROF2TC_OVERRIDE_ENA_2_M BIT(8) #define GLQF_PROF2TC_REGION_2_S 9 #define GLQF_PROF2TC_REGION_2_M MAKEMASK(0x7, 9) #define GLQF_PROF2TC_OVERRIDE_ENA_3_S 12 #define GLQF_PROF2TC_OVERRIDE_ENA_3_M BIT(12) #define GLQF_PROF2TC_REGION_3_S 13 #define GLQF_PROF2TC_REGION_3_M MAKEMASK(0x7, 13) #define GLQF_PROF2TC_OVERRIDE_ENA_4_S 16 #define GLQF_PROF2TC_OVERRIDE_ENA_4_M BIT(16) #define GLQF_PROF2TC_REGION_4_S 17 #define GLQF_PROF2TC_REGION_4_M MAKEMASK(0x7, 17) #define GLQF_PROF2TC_OVERRIDE_ENA_5_S 20 #define GLQF_PROF2TC_OVERRIDE_ENA_5_M BIT(20) #define GLQF_PROF2TC_REGION_5_S 21 #define GLQF_PROF2TC_REGION_5_M MAKEMASK(0x7, 21) #define GLQF_PROF2TC_OVERRIDE_ENA_6_S 24 #define GLQF_PROF2TC_OVERRIDE_ENA_6_M BIT(24) #define GLQF_PROF2TC_REGION_6_S 25 #define GLQF_PROF2TC_REGION_6_M MAKEMASK(0x7, 25) #define GLQF_PROF2TC_OVERRIDE_ENA_7_S 28 #define GLQF_PROF2TC_OVERRIDE_ENA_7_M BIT(28) #define GLQF_PROF2TC_REGION_7_S 29 #define GLQF_PROF2TC_REGION_7_M MAKEMASK(0x7, 29) #define PFQF_FD_CNT 0x00460180 /* Reset Source: CORER */ #define PFQF_FD_CNT_FD_GCNT_S 0 #define PFQF_FD_CNT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_CNT_FD_GCNT_M : E800_PFQF_FD_CNT_FD_GCNT_M) #define E800_PFQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0) #define E830_PFQF_FD_CNT_FD_GCNT_M MAKEMASK(0xFFFF, 0) #define PFQF_FD_CNT_FD_BCNT_S 16 #define PFQF_FD_CNT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_CNT_FD_BCNT_M : E800_PFQF_FD_CNT_FD_BCNT_M) #define E800_PFQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16) #define E830_PFQF_FD_CNT_FD_BCNT_M MAKEMASK(0xFFFF, 16) #define PFQF_FD_ENA 0x0043A000 /* Reset Source: CORER */ #define PFQF_FD_ENA_FD_ENA_S 0 #define PFQF_FD_ENA_FD_ENA_M BIT(0) #define PFQF_FD_SIZE 0x00460100 /* Reset Source: CORER */ #define PFQF_FD_SIZE_FD_GSIZE_S 0 #define PFQF_FD_SIZE_FD_GSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SIZE_FD_GSIZE_M : E800_PFQF_FD_SIZE_FD_GSIZE_M) #define E800_PFQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0) #define E830_PFQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0xFFFF, 0) #define PFQF_FD_SIZE_FD_BSIZE_S 16 #define PFQF_FD_SIZE_FD_BSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SIZE_FD_BSIZE_M : E800_PFQF_FD_SIZE_FD_BSIZE_M) #define E800_PFQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16) #define E830_PFQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0xFFFF, 16) #define PFQF_FD_SUBTRACT 0x00460200 /* Reset Source: CORER */ #define PFQF_FD_SUBTRACT_FD_GCNT_S 0 #define PFQF_FD_SUBTRACT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SUBTRACT_FD_GCNT_M : E800_PFQF_FD_SUBTRACT_FD_GCNT_M) #define E800_PFQF_FD_SUBTRACT_FD_GCNT_M MAKEMASK(0x7FFF, 0) #define E830_PFQF_FD_SUBTRACT_FD_GCNT_M MAKEMASK(0xFFFF, 0) #define PFQF_FD_SUBTRACT_FD_BCNT_S 16 #define PFQF_FD_SUBTRACT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SUBTRACT_FD_BCNT_M : E800_PFQF_FD_SUBTRACT_FD_BCNT_M) #define E800_PFQF_FD_SUBTRACT_FD_BCNT_M MAKEMASK(0x7FFF, 16) #define E830_PFQF_FD_SUBTRACT_FD_BCNT_M MAKEMASK(0xFFFF, 16) #define PFQF_HLUT(_i) (0x00430000 + ((_i) * 64)) /* _i=0...511 */ /* Reset Source: CORER */ #define PFQF_HLUT_MAX_INDEX 511 #define PFQF_HLUT_LUT0_S 0 #define PFQF_HLUT_LUT0_M MAKEMASK(0xFF, 0) #define PFQF_HLUT_LUT1_S 8 #define PFQF_HLUT_LUT1_M MAKEMASK(0xFF, 8) #define PFQF_HLUT_LUT2_S 16 #define PFQF_HLUT_LUT2_M MAKEMASK(0xFF, 16) #define PFQF_HLUT_LUT3_S 24 #define PFQF_HLUT_LUT3_M MAKEMASK(0xFF, 24) #define PFQF_HLUT_SIZE 0x00455480 /* Reset Source: CORER */ #define PFQF_HLUT_SIZE_HSIZE_S 0 #define PFQF_HLUT_SIZE_HSIZE_M MAKEMASK(0x3, 0) #define PFQF_PE_CLSN0 0x00470480 /* Reset Source: CORER */ #define PFQF_PE_CLSN0_HITSBCNT_S 0 #define PFQF_PE_CLSN0_HITSBCNT_M MAKEMASK(0xFFFFFFFF, 0) #define PFQF_PE_CLSN1 0x00470500 /* Reset Source: CORER */ #define PFQF_PE_CLSN1_HITLBCNT_S 0 #define PFQF_PE_CLSN1_HITLBCNT_M MAKEMASK(0xFFFFFFFF, 0) #define PFQF_PE_CTL1 0x00470000 /* Reset Source: CORER */ #define PFQF_PE_CTL1_PEHSIZE_S 0 #define PFQF_PE_CTL1_PEHSIZE_M MAKEMASK(0xF, 0) #define PFQF_PE_CTL2 0x00470040 /* Reset Source: CORER */ #define PFQF_PE_CTL2_PEDSIZE_S 0 #define PFQF_PE_CTL2_PEDSIZE_M MAKEMASK(0xF, 0) #define PFQF_PE_FILTERING_ENA 0x0043A080 /* Reset Source: CORER */ #define PFQF_PE_FILTERING_ENA_PE_ENA_S 0 #define PFQF_PE_FILTERING_ENA_PE_ENA_M BIT(0) #define PFQF_PE_FLHD 0x00470100 /* Reset Source: CORER */ #define PFQF_PE_FLHD_FLHD_S 0 #define PFQF_PE_FLHD_FLHD_M MAKEMASK(0xFFFFFF, 0) #define PFQF_PE_ST_CTL 0x00470400 /* Reset Source: CORER */ #define PFQF_PE_ST_CTL_PF_CNT_EN_S 0 #define PFQF_PE_ST_CTL_PF_CNT_EN_M BIT(0) #define PFQF_PE_ST_CTL_VFS_CNT_EN_S 1 #define PFQF_PE_ST_CTL_VFS_CNT_EN_M BIT(1) #define PFQF_PE_ST_CTL_VF_CNT_EN_S 2 #define PFQF_PE_ST_CTL_VF_CNT_EN_M BIT(2) #define PFQF_PE_ST_CTL_VF_NUM_S 16 #define PFQF_PE_ST_CTL_VF_NUM_M MAKEMASK(0xFF, 16) #define PFQF_PE_TC_CTL 0x00452080 /* Reset Source: CORER */ #define PFQF_PE_TC_CTL_TC_EN_PF_S 0 #define PFQF_PE_TC_CTL_TC_EN_PF_M MAKEMASK(0xFF, 0) #define PFQF_PE_TC_CTL_TC_EN_VF_S 16 #define PFQF_PE_TC_CTL_TC_EN_VF_M MAKEMASK(0xFF, 16) #define PFQF_PECNT_0 0x00470200 /* Reset Source: CORER */ #define PFQF_PECNT_0_BUCKETCNT_S 0 #define PFQF_PECNT_0_BUCKETCNT_M MAKEMASK(0x3FFFF, 0) #define PFQF_PECNT_1 0x00470300 /* Reset Source: CORER */ #define PFQF_PECNT_1_FLTCNT_S 0 #define PFQF_PECNT_1_FLTCNT_M MAKEMASK(0x3FFFF, 0) #define VPQF_PE_CTL1(_VF) (0x00474000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPQF_PE_CTL1_MAX_INDEX 255 #define VPQF_PE_CTL1_PEHSIZE_S 0 #define VPQF_PE_CTL1_PEHSIZE_M MAKEMASK(0xF, 0) #define VPQF_PE_CTL2(_VF) (0x00474800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPQF_PE_CTL2_MAX_INDEX 255 #define VPQF_PE_CTL2_PEDSIZE_S 0 #define VPQF_PE_CTL2_PEDSIZE_M MAKEMASK(0xF, 0) #define VPQF_PE_FILTERING_ENA(_VF) (0x00455800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPQF_PE_FILTERING_ENA_MAX_INDEX 255 #define VPQF_PE_FILTERING_ENA_PE_ENA_S 0 #define VPQF_PE_FILTERING_ENA_PE_ENA_M BIT(0) #define VPQF_PE_FLHD(_VF) (0x00472000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPQF_PE_FLHD_MAX_INDEX 255 #define VPQF_PE_FLHD_FLHD_S 0 #define VPQF_PE_FLHD_FLHD_M MAKEMASK(0xFFFFFF, 0) #define VPQF_PECNT_0(_VF) (0x00472800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPQF_PECNT_0_MAX_INDEX 255 #define VPQF_PECNT_0_BUCKETCNT_S 0 #define VPQF_PECNT_0_BUCKETCNT_M MAKEMASK(0x3FFFF, 0) #define VPQF_PECNT_1(_VF) (0x00473000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VPQF_PECNT_1_MAX_INDEX 255 #define VPQF_PECNT_1_FLTCNT_S 0 #define VPQF_PECNT_1_FLTCNT_M MAKEMASK(0x3FFFF, 0) #define GLDCB_RMPMC 0x001223C8 /* Reset Source: CORER */ #define GLDCB_RMPMC_RSPM_S 0 #define GLDCB_RMPMC_RSPM_M MAKEMASK(0x3F, 0) #define GLDCB_RMPMC_MIQ_NODROP_MODE_S 6 #define GLDCB_RMPMC_MIQ_NODROP_MODE_M MAKEMASK(0x1F, 6) #define GLDCB_RMPMC_RPM_DIS_S 31 #define GLDCB_RMPMC_RPM_DIS_M BIT(31) #define GLDCB_RMPMS 0x001223CC /* Reset Source: CORER */ #define GLDCB_RMPMS_RMPM_S 0 #define GLDCB_RMPMS_RMPM_M MAKEMASK(0xFFFF, 0) #define GLDCB_RPCC 0x00122260 /* Reset Source: CORER */ #define GLDCB_RPCC_EN_S 0 #define GLDCB_RPCC_EN_M BIT(0) #define GLDCB_RPCC_SCL_FACT_S 4 #define GLDCB_RPCC_SCL_FACT_M MAKEMASK(0x1F, 4) #define GLDCB_RPCC_THRSH_S 16 #define GLDCB_RPCC_THRSH_M MAKEMASK(0xFFF, 16) #define GLDCB_RSPMC 0x001223C4 /* Reset Source: CORER */ #define GLDCB_RSPMC_RSPM_S 0 #define GLDCB_RSPMC_RSPM_M MAKEMASK(0xFF, 0) #define GLDCB_RSPMC_RPM_MODE_S 8 #define GLDCB_RSPMC_RPM_MODE_M MAKEMASK(0x3, 8) #define GLDCB_RSPMC_PRR_MAX_EXP_S 10 #define GLDCB_RSPMC_PRR_MAX_EXP_M MAKEMASK(0xF, 10) #define GLDCB_RSPMC_PFCTIMER_S 14 #define GLDCB_RSPMC_PFCTIMER_M MAKEMASK(0x3FFF, 14) #define GLDCB_RSPMC_RPM_DIS_S 31 #define GLDCB_RSPMC_RPM_DIS_M BIT(31) #define GLDCB_RSPMS 0x001223C0 /* Reset Source: CORER */ #define GLDCB_RSPMS_RSPM_S 0 #define GLDCB_RSPMS_RSPM_M MAKEMASK(0x3FFFF, 0) #define GLDCB_RTCTI 0x001223D0 /* Reset Source: CORER */ #define GLDCB_RTCTI_PFCTIMEOUT_TC_S 0 #define GLDCB_RTCTI_PFCTIMEOUT_TC_M MAKEMASK(0xFFFFFFFF, 0) #define GLDCB_RTCTQ(_i) (0x001222C0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLDCB_RTCTQ_MAX_INDEX 31 #define GLDCB_RTCTQ_RXQNUM_S 0 #define GLDCB_RTCTQ_RXQNUM_M MAKEMASK(0x7FF, 0) #define GLDCB_RTCTQ_IS_PF_Q_S 16 #define GLDCB_RTCTQ_IS_PF_Q_M BIT(16) #define GLDCB_RTCTS(_i) (0x00122340 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLDCB_RTCTS_MAX_INDEX 31 #define GLDCB_RTCTS_PFCTIMER_S 0 #define GLDCB_RTCTS_PFCTIMER_M MAKEMASK(0x3FFF, 0) #define GLRCB_CFG_COTF_CNT(_i) (0x001223D4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLRCB_CFG_COTF_CNT_MAX_INDEX 7 #define GLRCB_CFG_COTF_CNT_MRKR_COTF_CNT_S 0 #define GLRCB_CFG_COTF_CNT_MRKR_COTF_CNT_M MAKEMASK(0x3F, 0) #define GLRCB_CFG_COTF_ST 0x001223F4 /* Reset Source: CORER */ #define GLRCB_CFG_COTF_ST_MRKR_COTF_ST_S 0 #define GLRCB_CFG_COTF_ST_MRKR_COTF_ST_M MAKEMASK(0xFF, 0) #define GLRPRS_PMCFG_DHW(_i) (0x00200388 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLRPRS_PMCFG_DHW_MAX_INDEX 15 #define GLRPRS_PMCFG_DHW_DHW_S 0 #define GLRPRS_PMCFG_DHW_DHW_M MAKEMASK(0xFFFFF, 0) #define GLRPRS_PMCFG_DLW(_i) (0x002003C8 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLRPRS_PMCFG_DLW_MAX_INDEX 15 #define GLRPRS_PMCFG_DLW_DLW_S 0 #define GLRPRS_PMCFG_DLW_DLW_M MAKEMASK(0xFFFFF, 0) #define GLRPRS_PMCFG_DPS(_i) (0x00200308 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLRPRS_PMCFG_DPS_MAX_INDEX 15 #define GLRPRS_PMCFG_DPS_DPS_S 0 #define GLRPRS_PMCFG_DPS_DPS_M MAKEMASK(0xFFFFF, 0) #define GLRPRS_PMCFG_SHW(_i) (0x00200448 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLRPRS_PMCFG_SHW_MAX_INDEX 7 #define GLRPRS_PMCFG_SHW_SHW_S 0 #define GLRPRS_PMCFG_SHW_SHW_M MAKEMASK(0xFFFFF, 0) #define GLRPRS_PMCFG_SLW(_i) (0x00200468 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLRPRS_PMCFG_SLW_MAX_INDEX 7 #define GLRPRS_PMCFG_SLW_SLW_S 0 #define GLRPRS_PMCFG_SLW_SLW_M MAKEMASK(0xFFFFF, 0) #define GLRPRS_PMCFG_SPS(_i) (0x00200408 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLRPRS_PMCFG_SPS_MAX_INDEX 7 #define GLRPRS_PMCFG_SPS_SPS_S 0 #define GLRPRS_PMCFG_SPS_SPS_M MAKEMASK(0xFFFFF, 0) #define GLRPRS_PMCFG_TC_CFG(_i) (0x00200488 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLRPRS_PMCFG_TC_CFG_MAX_INDEX 31 #define GLRPRS_PMCFG_TC_CFG_D_POOL_S 0 #define GLRPRS_PMCFG_TC_CFG_D_POOL_M MAKEMASK(0xF, 0) #define GLRPRS_PMCFG_TC_CFG_S_POOL_S 16 #define GLRPRS_PMCFG_TC_CFG_S_POOL_M MAKEMASK(0x7, 16) #define GLRPRS_PMCFG_TCHW(_i) (0x00200588 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLRPRS_PMCFG_TCHW_MAX_INDEX 31 #define GLRPRS_PMCFG_TCHW_TCHW_S 0 #define GLRPRS_PMCFG_TCHW_TCHW_M MAKEMASK(0xFFFFF, 0) #define GLRPRS_PMCFG_TCLW(_i) (0x00200608 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLRPRS_PMCFG_TCLW_MAX_INDEX 31 #define GLRPRS_PMCFG_TCLW_TCLW_S 0 #define GLRPRS_PMCFG_TCLW_TCLW_M MAKEMASK(0xFFFFF, 0) #define GLSWT_PMCFG_TC_CFG(_i) (0x00204900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSWT_PMCFG_TC_CFG_MAX_INDEX 31 #define GLSWT_PMCFG_TC_CFG_D_POOL_S 0 #define GLSWT_PMCFG_TC_CFG_D_POOL_M MAKEMASK(0xF, 0) #define GLSWT_PMCFG_TC_CFG_S_POOL_S 16 #define GLSWT_PMCFG_TC_CFG_S_POOL_M MAKEMASK(0x7, 16) #define PRTDCB_RLANPMS 0x00122280 /* Reset Source: CORER */ #define PRTDCB_RLANPMS_LANRPPM_S 0 #define PRTDCB_RLANPMS_LANRPPM_M MAKEMASK(0x3FFFF, 0) #define PRTDCB_RPPMC 0x00122240 /* Reset Source: CORER */ #define PRTDCB_RPPMC_LANRPPM_S 0 #define PRTDCB_RPPMC_LANRPPM_M MAKEMASK(0xFF, 0) #define PRTDCB_RPPMC_RDMARPPM_S 8 #define PRTDCB_RPPMC_RDMARPPM_M MAKEMASK(0xFF, 8) #define PRTDCB_RRDMAPMS 0x00122120 /* Reset Source: CORER */ #define PRTDCB_RRDMAPMS_RDMARPPM_S 0 #define PRTDCB_RRDMAPMS_RDMARPPM_M MAKEMASK(0x3FFFF, 0) #define GL_STAT_SWR_BPCH(_i) (0x00347804 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GL_STAT_SWR_BPCH_MAX_INDEX 127 #define GL_STAT_SWR_BPCH_VLBPCH_S 0 #define GL_STAT_SWR_BPCH_VLBPCH_M MAKEMASK(0xFF, 0) #define GL_STAT_SWR_BPCL(_i) (0x00347800 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GL_STAT_SWR_BPCL_MAX_INDEX 127 #define GL_STAT_SWR_BPCL_VLBPCL_S 0 #define GL_STAT_SWR_BPCL_VLBPCL_M MAKEMASK(0xFFFFFFFF, 0) #define GL_STAT_SWR_GORCH(_i) (0x00342004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GL_STAT_SWR_GORCH_MAX_INDEX 127 #define GL_STAT_SWR_GORCH_VLBCH_S 0 #define GL_STAT_SWR_GORCH_VLBCH_M MAKEMASK(0xFF, 0) #define GL_STAT_SWR_GORCL(_i) (0x00342000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GL_STAT_SWR_GORCL_MAX_INDEX 127 #define GL_STAT_SWR_GORCL_VLBCL_S 0 #define GL_STAT_SWR_GORCL_VLBCL_M MAKEMASK(0xFFFFFFFF, 0) #define GL_STAT_SWR_GOTCH(_i) (0x00304004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GL_STAT_SWR_GOTCH_MAX_INDEX 127 #define GL_STAT_SWR_GOTCH_VLBCH_S 0 #define GL_STAT_SWR_GOTCH_VLBCH_M MAKEMASK(0xFF, 0) #define GL_STAT_SWR_GOTCL(_i) (0x00304000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GL_STAT_SWR_GOTCL_MAX_INDEX 127 #define GL_STAT_SWR_GOTCL_VLBCL_S 0 #define GL_STAT_SWR_GOTCL_VLBCL_M MAKEMASK(0xFFFFFFFF, 0) #define GL_STAT_SWR_MPCH(_i) (0x00347404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GL_STAT_SWR_MPCH_MAX_INDEX 127 #define GL_STAT_SWR_MPCH_VLMPCH_S 0 #define GL_STAT_SWR_MPCH_VLMPCH_M MAKEMASK(0xFF, 0) #define GL_STAT_SWR_MPCL(_i) (0x00347400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GL_STAT_SWR_MPCL_MAX_INDEX 127 #define GL_STAT_SWR_MPCL_VLMPCL_S 0 #define GL_STAT_SWR_MPCL_VLMPCL_M MAKEMASK(0xFFFFFFFF, 0) #define GL_STAT_SWR_UPCH(_i) (0x00347004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GL_STAT_SWR_UPCH_MAX_INDEX 127 #define GL_STAT_SWR_UPCH_VLUPCH_S 0 #define GL_STAT_SWR_UPCH_VLUPCH_M MAKEMASK(0xFF, 0) #define GL_STAT_SWR_UPCL(_i) (0x00347000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define GL_STAT_SWR_UPCL_MAX_INDEX 127 #define GL_STAT_SWR_UPCL_VLUPCL_S 0 #define GL_STAT_SWR_UPCL_VLUPCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_AORCL(_i) (0x003812C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_AORCL_MAX_INDEX 7 #define GLPRT_AORCL_AORCL_S 0 #define GLPRT_AORCL_AORCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_BPRCH_MAX_INDEX 7 #define E800_GLPRT_BPRCH_UPRCH_S 0 #define E800_GLPRT_BPRCH_UPRCH_M MAKEMASK(0xFF, 0) #define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_BPRCL_MAX_INDEX 7 #define E800_GLPRT_BPRCL_UPRCH_S 0 #define E800_GLPRT_BPRCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_BPTCH_MAX_INDEX 7 #define E800_GLPRT_BPTCH_UPRCH_S 0 #define E800_GLPRT_BPTCH_UPRCH_M MAKEMASK(0xFF, 0) #define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_BPTCL_MAX_INDEX 7 #define E800_GLPRT_BPTCL_UPRCH_S 0 #define E800_GLPRT_BPTCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_CRCERRS_MAX_INDEX 7 #define GLPRT_CRCERRS_CRCERRS_S 0 #define GLPRT_CRCERRS_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_CRCERRS_H(_i) (0x00380104 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_CRCERRS_H_MAX_INDEX 7 #define GLPRT_CRCERRS_H_CRCERRS_S 0 #define GLPRT_CRCERRS_H_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_GORCH_MAX_INDEX 7 #define GLPRT_GORCH_GORCH_S 0 #define GLPRT_GORCH_GORCH_M MAKEMASK(0xFF, 0) #define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_GORCL_MAX_INDEX 7 #define GLPRT_GORCL_GORCL_S 0 #define GLPRT_GORCL_GORCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_GOTCH_MAX_INDEX 7 #define GLPRT_GOTCH_GOTCH_S 0 #define GLPRT_GOTCH_GOTCH_M MAKEMASK(0xFF, 0) #define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_GOTCL_MAX_INDEX 7 #define GLPRT_GOTCL_GOTCL_S 0 #define GLPRT_GOTCL_GOTCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_ILLERRC_MAX_INDEX 7 #define GLPRT_ILLERRC_ILLERRC_S 0 #define GLPRT_ILLERRC_ILLERRC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_ILLERRC_H(_i) (0x003801C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_ILLERRC_H_MAX_INDEX 7 #define GLPRT_ILLERRC_H_ILLERRC_S 0 #define GLPRT_ILLERRC_H_ILLERRC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_LXOFFRXC_MAX_INDEX 7 #define GLPRT_LXOFFRXC_LXOFFRXCNT_S 0 #define GLPRT_LXOFFRXC_LXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_LXOFFRXC_H(_i) (0x003802C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_LXOFFRXC_H_MAX_INDEX 7 #define GLPRT_LXOFFRXC_H_LXOFFRXCNT_S 0 #define GLPRT_LXOFFRXC_H_LXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_LXOFFTXC_MAX_INDEX 7 #define GLPRT_LXOFFTXC_LXOFFTXC_S 0 #define GLPRT_LXOFFTXC_LXOFFTXC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_LXOFFTXC_H(_i) (0x00381184 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_LXOFFTXC_H_MAX_INDEX 7 #define GLPRT_LXOFFTXC_H_LXOFFTXC_S 0 #define GLPRT_LXOFFTXC_H_LXOFFTXC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_LXONRXC_MAX_INDEX 7 #define GLPRT_LXONRXC_LXONRXCNT_S 0 #define GLPRT_LXONRXC_LXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_LXONRXC_H(_i) (0x00380284 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_LXONRXC_H_MAX_INDEX 7 #define GLPRT_LXONRXC_H_LXONRXCNT_S 0 #define GLPRT_LXONRXC_H_LXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_LXONTXC_MAX_INDEX 7 #define GLPRT_LXONTXC_LXONTXC_S 0 #define GLPRT_LXONTXC_LXONTXC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_LXONTXC_H(_i) (0x00381144 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_LXONTXC_H_MAX_INDEX 7 #define GLPRT_LXONTXC_H_LXONTXC_S 0 #define GLPRT_LXONTXC_H_LXONTXC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_MLFC_MAX_INDEX 7 #define GLPRT_MLFC_MLFC_S 0 #define GLPRT_MLFC_MLFC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_MLFC_H(_i) (0x00380044 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_MLFC_H_MAX_INDEX 7 #define GLPRT_MLFC_H_MLFC_S 0 #define GLPRT_MLFC_H_MLFC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_MPRCH_MAX_INDEX 7 #define GLPRT_MPRCH_MPRCH_S 0 #define GLPRT_MPRCH_MPRCH_M MAKEMASK(0xFF, 0) #define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_MPRCL_MAX_INDEX 7 #define GLPRT_MPRCL_MPRCL_S 0 #define GLPRT_MPRCL_MPRCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_MPTCH_MAX_INDEX 7 #define GLPRT_MPTCH_MPTCH_S 0 #define GLPRT_MPTCH_MPTCH_M MAKEMASK(0xFF, 0) #define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_MPTCL_MAX_INDEX 7 #define GLPRT_MPTCL_MPTCL_S 0 #define GLPRT_MPTCL_MPTCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_MRFC_MAX_INDEX 7 #define GLPRT_MRFC_MRFC_S 0 #define GLPRT_MRFC_MRFC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_MRFC_H(_i) (0x00380084 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_MRFC_H_MAX_INDEX 7 #define GLPRT_MRFC_H_MRFC_S 0 #define GLPRT_MRFC_H_MRFC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC1023H_MAX_INDEX 7 #define GLPRT_PRC1023H_PRC1023H_S 0 #define GLPRT_PRC1023H_PRC1023H_M MAKEMASK(0xFF, 0) #define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC1023L_MAX_INDEX 7 #define GLPRT_PRC1023L_PRC1023L_S 0 #define GLPRT_PRC1023L_PRC1023L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC127H_MAX_INDEX 7 #define GLPRT_PRC127H_PRC127H_S 0 #define GLPRT_PRC127H_PRC127H_M MAKEMASK(0xFF, 0) #define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC127L_MAX_INDEX 7 #define GLPRT_PRC127L_PRC127L_S 0 #define GLPRT_PRC127L_PRC127L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC1522H_MAX_INDEX 7 #define GLPRT_PRC1522H_PRC1522H_S 0 #define GLPRT_PRC1522H_PRC1522H_M MAKEMASK(0xFF, 0) #define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC1522L_MAX_INDEX 7 #define GLPRT_PRC1522L_PRC1522L_S 0 #define GLPRT_PRC1522L_PRC1522L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC255H_MAX_INDEX 7 #define GLPRT_PRC255H_PRTPRC255H_S 0 #define GLPRT_PRC255H_PRTPRC255H_M MAKEMASK(0xFF, 0) #define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC255L_MAX_INDEX 7 #define GLPRT_PRC255L_PRC255L_S 0 #define GLPRT_PRC255L_PRC255L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC511H_MAX_INDEX 7 #define GLPRT_PRC511H_PRC511H_S 0 #define GLPRT_PRC511H_PRC511H_M MAKEMASK(0xFF, 0) #define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC511L_MAX_INDEX 7 #define GLPRT_PRC511L_PRC511L_S 0 #define GLPRT_PRC511L_PRC511L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC64H_MAX_INDEX 7 #define GLPRT_PRC64H_PRC64H_S 0 #define GLPRT_PRC64H_PRC64H_M MAKEMASK(0xFF, 0) #define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC64L_MAX_INDEX 7 #define GLPRT_PRC64L_PRC64L_S 0 #define GLPRT_PRC64L_PRC64L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC9522H_MAX_INDEX 7 #define GLPRT_PRC9522H_PRC1522H_S 0 #define GLPRT_PRC9522H_PRC1522H_M MAKEMASK(0xFF, 0) #define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PRC9522L_MAX_INDEX 7 #define GLPRT_PRC9522L_PRC1522L_S 0 #define GLPRT_PRC9522L_PRC1522L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC1023H_MAX_INDEX 7 #define GLPRT_PTC1023H_PTC1023H_S 0 #define GLPRT_PTC1023H_PTC1023H_M MAKEMASK(0xFF, 0) #define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC1023L_MAX_INDEX 7 #define GLPRT_PTC1023L_PTC1023L_S 0 #define GLPRT_PTC1023L_PTC1023L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC127H_MAX_INDEX 7 #define GLPRT_PTC127H_PTC127H_S 0 #define GLPRT_PTC127H_PTC127H_M MAKEMASK(0xFF, 0) #define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC127L_MAX_INDEX 7 #define GLPRT_PTC127L_PTC127L_S 0 #define GLPRT_PTC127L_PTC127L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC1522H_MAX_INDEX 7 #define GLPRT_PTC1522H_PTC1522H_S 0 #define GLPRT_PTC1522H_PTC1522H_M MAKEMASK(0xFF, 0) #define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC1522L_MAX_INDEX 7 #define GLPRT_PTC1522L_PTC1522L_S 0 #define GLPRT_PTC1522L_PTC1522L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC255H_MAX_INDEX 7 #define GLPRT_PTC255H_PTC255H_S 0 #define GLPRT_PTC255H_PTC255H_M MAKEMASK(0xFF, 0) #define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC255L_MAX_INDEX 7 #define GLPRT_PTC255L_PTC255L_S 0 #define GLPRT_PTC255L_PTC255L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC511H_MAX_INDEX 7 #define GLPRT_PTC511H_PTC511H_S 0 #define GLPRT_PTC511H_PTC511H_M MAKEMASK(0xFF, 0) #define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC511L_MAX_INDEX 7 #define GLPRT_PTC511L_PTC511L_S 0 #define GLPRT_PTC511L_PTC511L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC64H_MAX_INDEX 7 #define GLPRT_PTC64H_PTC64H_S 0 #define GLPRT_PTC64H_PTC64H_M MAKEMASK(0xFF, 0) #define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC64L_MAX_INDEX 7 #define GLPRT_PTC64L_PTC64L_S 0 #define GLPRT_PTC64L_PTC64L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC9522H_MAX_INDEX 7 #define GLPRT_PTC9522H_PTC9522H_S 0 #define GLPRT_PTC9522H_PTC9522H_M MAKEMASK(0xFF, 0) #define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_PTC9522L_MAX_INDEX 7 #define GLPRT_PTC9522L_PTC9522L_S 0 #define GLPRT_PTC9522L_PTC9522L_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ #define GLPRT_PXOFFRXC_MAX_INDEX 7 #define GLPRT_PXOFFRXC_PRPXOFFRXCNT_S 0 #define GLPRT_PXOFFRXC_PRPXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PXOFFRXC_H(_i, _j) (0x00380504 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ #define GLPRT_PXOFFRXC_H_MAX_INDEX 7 #define GLPRT_PXOFFRXC_H_PRPXOFFRXCNT_S 0 #define GLPRT_PXOFFRXC_H_PRPXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ #define GLPRT_PXOFFTXC_MAX_INDEX 7 #define GLPRT_PXOFFTXC_PRPXOFFTXCNT_S 0 #define GLPRT_PXOFFTXC_PRPXOFFTXCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PXOFFTXC_H(_i, _j) (0x00380F44 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ #define GLPRT_PXOFFTXC_H_MAX_INDEX 7 #define GLPRT_PXOFFTXC_H_PRPXOFFTXCNT_S 0 #define GLPRT_PXOFFTXC_H_PRPXOFFTXCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PXONRXC(_i, _j) (0x00380300 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ #define GLPRT_PXONRXC_MAX_INDEX 7 #define GLPRT_PXONRXC_PRPXONRXCNT_S 0 #define GLPRT_PXONRXC_PRPXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PXONRXC_H(_i, _j) (0x00380304 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ #define GLPRT_PXONRXC_H_MAX_INDEX 7 #define GLPRT_PXONRXC_H_PRPXONRXCNT_S 0 #define GLPRT_PXONRXC_H_PRPXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PXONTXC(_i, _j) (0x00380D40 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ #define GLPRT_PXONTXC_MAX_INDEX 7 #define GLPRT_PXONTXC_PRPXONTXC_S 0 #define GLPRT_PXONTXC_PRPXONTXC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_PXONTXC_H(_i, _j) (0x00380D44 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ #define GLPRT_PXONTXC_H_MAX_INDEX 7 #define GLPRT_PXONTXC_H_PRPXONTXC_S 0 #define GLPRT_PXONTXC_H_PRPXONTXC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_RFC_MAX_INDEX 7 #define GLPRT_RFC_RFC_S 0 #define GLPRT_RFC_RFC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_RFC_H(_i) (0x00380AC4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_RFC_H_MAX_INDEX 7 #define GLPRT_RFC_H_RFC_S 0 #define GLPRT_RFC_H_RFC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_RJC_MAX_INDEX 7 #define GLPRT_RJC_RJC_S 0 #define GLPRT_RJC_RJC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_RJC_H(_i) (0x00380B04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_RJC_H_MAX_INDEX 7 #define GLPRT_RJC_H_RJC_S 0 #define GLPRT_RJC_H_RJC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_RLEC_MAX_INDEX 7 #define GLPRT_RLEC_RLEC_S 0 #define GLPRT_RLEC_RLEC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_RLEC_H(_i) (0x00380144 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_RLEC_H_MAX_INDEX 7 #define GLPRT_RLEC_H_RLEC_S 0 #define GLPRT_RLEC_H_RLEC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_ROC_MAX_INDEX 7 #define GLPRT_ROC_ROC_S 0 #define GLPRT_ROC_ROC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_ROC_H(_i) (0x00380244 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_ROC_H_MAX_INDEX 7 #define GLPRT_ROC_H_ROC_S 0 #define GLPRT_ROC_H_ROC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_RUC_MAX_INDEX 7 #define GLPRT_RUC_RUC_S 0 #define GLPRT_RUC_RUC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_RUC_H(_i) (0x00380204 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_RUC_H_MAX_INDEX 7 #define GLPRT_RUC_H_RUC_S 0 #define GLPRT_RUC_H_RUC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ #define GLPRT_RXON2OFFCNT_MAX_INDEX 7 #define GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_S 0 #define GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_RXON2OFFCNT_H(_i, _j) (0x00380704 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ #define GLPRT_RXON2OFFCNT_H_MAX_INDEX 7 #define GLPRT_RXON2OFFCNT_H_PRRXON2OFFCNT_S 0 #define GLPRT_RXON2OFFCNT_H_PRRXON2OFFCNT_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_STDC(_i) (0x00340000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_STDC_MAX_INDEX 7 #define GLPRT_STDC_STDC_S 0 #define GLPRT_STDC_STDC_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_TDOLD_MAX_INDEX 7 #define GLPRT_TDOLD_GLPRT_TDOLD_S 0 #define GLPRT_TDOLD_GLPRT_TDOLD_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_TDOLD_H(_i) (0x00381284 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_TDOLD_H_MAX_INDEX 7 #define GLPRT_TDOLD_H_GLPRT_TDOLD_S 0 #define GLPRT_TDOLD_H_GLPRT_TDOLD_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_UPRCH_MAX_INDEX 7 #define GLPRT_UPRCH_UPRCH_S 0 #define GLPRT_UPRCH_UPRCH_M MAKEMASK(0xFF, 0) #define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_UPRCL_MAX_INDEX 7 #define GLPRT_UPRCL_UPRCL_S 0 #define GLPRT_UPRCL_UPRCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_UPTCH_MAX_INDEX 7 #define GLPRT_UPTCH_UPTCH_S 0 #define GLPRT_UPTCH_UPTCH_M MAKEMASK(0xFF, 0) #define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLPRT_UPTCL_MAX_INDEX 7 #define E800_GLPRT_UPTCL_VUPTCH_S 0 #define E800_GLPRT_UPTCL_VUPTCH_M MAKEMASK(0xFFFFFFFF, 0) #define GLSTAT_ACL_CNT_0_H(_i) (0x00388004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLSTAT_ACL_CNT_0_H_MAX_INDEX 511 #define GLSTAT_ACL_CNT_0_H_CNT_MSB_S 0 #define GLSTAT_ACL_CNT_0_H_CNT_MSB_M MAKEMASK(0xFF, 0) #define GLSTAT_ACL_CNT_0_L(_i) (0x00388000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLSTAT_ACL_CNT_0_L_MAX_INDEX 511 #define GLSTAT_ACL_CNT_0_L_CNT_LSB_S 0 #define GLSTAT_ACL_CNT_0_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0) #define GLSTAT_ACL_CNT_1_H(_i) (0x00389004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLSTAT_ACL_CNT_1_H_MAX_INDEX 511 #define GLSTAT_ACL_CNT_1_H_CNT_MSB_S 0 #define GLSTAT_ACL_CNT_1_H_CNT_MSB_M MAKEMASK(0xFF, 0) #define GLSTAT_ACL_CNT_1_L(_i) (0x00389000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLSTAT_ACL_CNT_1_L_MAX_INDEX 511 #define GLSTAT_ACL_CNT_1_L_CNT_LSB_S 0 #define GLSTAT_ACL_CNT_1_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0) #define GLSTAT_ACL_CNT_2_H(_i) (0x0038A004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLSTAT_ACL_CNT_2_H_MAX_INDEX 511 #define GLSTAT_ACL_CNT_2_H_CNT_MSB_S 0 #define GLSTAT_ACL_CNT_2_H_CNT_MSB_M MAKEMASK(0xFF, 0) #define GLSTAT_ACL_CNT_2_L(_i) (0x0038A000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLSTAT_ACL_CNT_2_L_MAX_INDEX 511 #define GLSTAT_ACL_CNT_2_L_CNT_LSB_S 0 #define GLSTAT_ACL_CNT_2_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0) #define GLSTAT_ACL_CNT_3_H(_i) (0x0038B004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLSTAT_ACL_CNT_3_H_MAX_INDEX 511 #define GLSTAT_ACL_CNT_3_H_CNT_MSB_S 0 #define GLSTAT_ACL_CNT_3_H_CNT_MSB_M MAKEMASK(0xFF, 0) #define GLSTAT_ACL_CNT_3_L(_i) (0x0038B000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLSTAT_ACL_CNT_3_L_MAX_INDEX 511 #define GLSTAT_ACL_CNT_3_L_CNT_LSB_S 0 #define GLSTAT_ACL_CNT_3_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0) #define GLSTAT_FD_CNT0H(_i) (0x003A0004 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */ #define GLSTAT_FD_CNT0H_MAX_INDEX 4095 #define GLSTAT_FD_CNT0H_FD0_CNT_H_S 0 #define GLSTAT_FD_CNT0H_FD0_CNT_H_M MAKEMASK(0xFF, 0) #define GLSTAT_FD_CNT0L(_i) (0x003A0000 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */ #define GLSTAT_FD_CNT0L_MAX_INDEX 4095 #define GLSTAT_FD_CNT0L_FD0_CNT_L_S 0 #define GLSTAT_FD_CNT0L_FD0_CNT_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLSTAT_FD_CNT1H(_i) (0x003A8004 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */ #define GLSTAT_FD_CNT1H_MAX_INDEX 4095 #define GLSTAT_FD_CNT1H_FD0_CNT_H_S 0 #define GLSTAT_FD_CNT1H_FD0_CNT_H_M MAKEMASK(0xFF, 0) #define GLSTAT_FD_CNT1L(_i) (0x003A8000 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */ #define GLSTAT_FD_CNT1L_MAX_INDEX 4095 #define GLSTAT_FD_CNT1L_FD0_CNT_L_S 0 #define GLSTAT_FD_CNT1L_FD0_CNT_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLSW_BPRCH(_i) (0x00346204 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_BPRCH_MAX_INDEX 31 #define GLSW_BPRCH_BPRCH_S 0 #define GLSW_BPRCH_BPRCH_M MAKEMASK(0xFF, 0) #define GLSW_BPRCL(_i) (0x00346200 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_BPRCL_MAX_INDEX 31 #define GLSW_BPRCL_BPRCL_S 0 #define GLSW_BPRCL_BPRCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLSW_BPTCH(_i) (0x00310204 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_BPTCH_MAX_INDEX 31 #define GLSW_BPTCH_BPTCH_S 0 #define GLSW_BPTCH_BPTCH_M MAKEMASK(0xFF, 0) #define GLSW_BPTCL(_i) (0x00310200 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_BPTCL_MAX_INDEX 31 #define GLSW_BPTCL_BPTCL_S 0 #define GLSW_BPTCL_BPTCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLSW_GORCH(_i) (0x00341004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_GORCH_MAX_INDEX 31 #define GLSW_GORCH_GORCH_S 0 #define GLSW_GORCH_GORCH_M MAKEMASK(0xFF, 0) #define GLSW_GORCL(_i) (0x00341000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_GORCL_MAX_INDEX 31 #define GLSW_GORCL_GORCL_S 0 #define GLSW_GORCL_GORCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLSW_GOTCH(_i) (0x00302004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_GOTCH_MAX_INDEX 31 #define GLSW_GOTCH_GOTCH_S 0 #define GLSW_GOTCH_GOTCH_M MAKEMASK(0xFF, 0) #define GLSW_GOTCL(_i) (0x00302000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_GOTCL_MAX_INDEX 31 #define GLSW_GOTCL_GOTCL_S 0 #define GLSW_GOTCL_GOTCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLSW_MPRCH(_i) (0x00346104 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_MPRCH_MAX_INDEX 31 #define GLSW_MPRCH_MPRCH_S 0 #define GLSW_MPRCH_MPRCH_M MAKEMASK(0xFF, 0) #define GLSW_MPRCL(_i) (0x00346100 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_MPRCL_MAX_INDEX 31 #define GLSW_MPRCL_MPRCL_S 0 #define GLSW_MPRCL_MPRCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLSW_MPTCH(_i) (0x00310104 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_MPTCH_MAX_INDEX 31 #define GLSW_MPTCH_MPTCH_S 0 #define GLSW_MPTCH_MPTCH_M MAKEMASK(0xFF, 0) #define GLSW_MPTCL(_i) (0x00310100 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_MPTCL_MAX_INDEX 31 #define GLSW_MPTCL_MPTCL_S 0 #define GLSW_MPTCL_MPTCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLSW_UPRCH(_i) (0x00346004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_UPRCH_MAX_INDEX 31 #define GLSW_UPRCH_UPRCH_S 0 #define GLSW_UPRCH_UPRCH_M MAKEMASK(0xFF, 0) #define GLSW_UPRCL(_i) (0x00346000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_UPRCL_MAX_INDEX 31 #define GLSW_UPRCL_UPRCL_S 0 #define GLSW_UPRCL_UPRCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLSW_UPTCH(_i) (0x00310004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_UPTCH_MAX_INDEX 31 #define GLSW_UPTCH_UPTCH_S 0 #define GLSW_UPTCH_UPTCH_M MAKEMASK(0xFF, 0) #define GLSW_UPTCL(_i) (0x00310000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ #define GLSW_UPTCL_MAX_INDEX 31 #define GLSW_UPTCL_UPTCL_S 0 #define GLSW_UPTCL_UPTCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLSWID_RUPP(_i) (0x00345000 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define GLSWID_RUPP_MAX_INDEX 255 #define GLSWID_RUPP_RUPP_S 0 #define GLSWID_RUPP_RUPP_M MAKEMASK(0xFFFFFFFF, 0) #define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_BPRCH_MAX_INDEX 767 #define GLV_BPRCH_BPRCH_S 0 #define GLV_BPRCH_BPRCH_M MAKEMASK(0xFF, 0) #define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_BPRCL_MAX_INDEX 767 #define GLV_BPRCL_BPRCL_S 0 #define GLV_BPRCL_BPRCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_BPTCH_MAX_INDEX 767 #define GLV_BPTCH_BPTCH_S 0 #define GLV_BPTCH_BPTCH_M MAKEMASK(0xFF, 0) #define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_BPTCL_MAX_INDEX 767 #define GLV_BPTCL_BPTCL_S 0 #define GLV_BPTCL_BPTCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_GORCH_MAX_INDEX 767 #define GLV_GORCH_GORCH_S 0 #define GLV_GORCH_GORCH_M MAKEMASK(0xFF, 0) #define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_GORCL_MAX_INDEX 767 #define GLV_GORCL_GORCL_S 0 #define GLV_GORCL_GORCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_GOTCH_MAX_INDEX 767 #define GLV_GOTCH_GOTCH_S 0 #define GLV_GOTCH_GOTCH_M MAKEMASK(0xFF, 0) #define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_GOTCL_MAX_INDEX 767 #define GLV_GOTCL_GOTCL_S 0 #define GLV_GOTCL_GOTCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_MPRCH_MAX_INDEX 767 #define GLV_MPRCH_MPRCH_S 0 #define GLV_MPRCH_MPRCH_M MAKEMASK(0xFF, 0) #define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_MPRCL_MAX_INDEX 767 #define GLV_MPRCL_MPRCL_S 0 #define GLV_MPRCL_MPRCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_MPTCH_MAX_INDEX 767 #define GLV_MPTCH_MPTCH_S 0 #define GLV_MPTCH_MPTCH_M MAKEMASK(0xFF, 0) #define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_MPTCL_MAX_INDEX 767 #define GLV_MPTCL_MPTCL_S 0 #define GLV_MPTCL_MPTCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_RDPC_MAX_INDEX 767 #define GLV_RDPC_RDPC_S 0 #define GLV_RDPC_RDPC_M MAKEMASK(0xFFFFFFFF, 0) #define GLV_REPC(_i) (0x00295804 + ((_i) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_REPC_MAX_INDEX 767 #define GLV_REPC_NO_DESC_CNT_S 0 #define GLV_REPC_NO_DESC_CNT_M MAKEMASK(0xFFFF, 0) #define GLV_REPC_ERROR_CNT_S 16 #define GLV_REPC_ERROR_CNT_M MAKEMASK(0xFFFF, 16) #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_TEPC_MAX_INDEX 767 #define GLV_TEPC_TEPC_S 0 #define GLV_TEPC_TEPC_M MAKEMASK(0xFFFFFFFF, 0) #define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_UPRCH_MAX_INDEX 767 #define GLV_UPRCH_UPRCH_S 0 #define GLV_UPRCH_UPRCH_M MAKEMASK(0xFF, 0) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_UPRCL_MAX_INDEX 767 #define GLV_UPRCL_UPRCL_S 0 #define GLV_UPRCL_UPRCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_UPTCH_MAX_INDEX 767 #define GLV_UPTCH_GLVUPTCH_S 0 #define GLV_UPTCH_GLVUPTCH_M MAKEMASK(0xFF, 0) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ #define GLV_UPTCL_MAX_INDEX 767 #define GLV_UPTCL_UPTCL_S 0 #define GLV_UPTCL_UPTCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLVEBUP_RBCH(_i, _j) (0x00343004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ #define GLVEBUP_RBCH_MAX_INDEX 7 #define GLVEBUP_RBCH_UPBCH_S 0 #define GLVEBUP_RBCH_UPBCH_M MAKEMASK(0xFF, 0) #define GLVEBUP_RBCL(_i, _j) (0x00343000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ #define GLVEBUP_RBCL_MAX_INDEX 7 #define GLVEBUP_RBCL_UPBCL_S 0 #define GLVEBUP_RBCL_UPBCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLVEBUP_RPCH(_i, _j) (0x00344004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ #define GLVEBUP_RPCH_MAX_INDEX 7 #define GLVEBUP_RPCH_UPPCH_S 0 #define GLVEBUP_RPCH_UPPCH_M MAKEMASK(0xFF, 0) #define GLVEBUP_RPCL(_i, _j) (0x00344000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ #define GLVEBUP_RPCL_MAX_INDEX 7 #define GLVEBUP_RPCL_UPPCL_S 0 #define GLVEBUP_RPCL_UPPCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLVEBUP_TBCH(_i, _j) (0x00306004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ #define GLVEBUP_TBCH_MAX_INDEX 7 #define GLVEBUP_TBCH_UPBCH_S 0 #define GLVEBUP_TBCH_UPBCH_M MAKEMASK(0xFF, 0) #define GLVEBUP_TBCL(_i, _j) (0x00306000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ #define GLVEBUP_TBCL_MAX_INDEX 7 #define GLVEBUP_TBCL_UPBCL_S 0 #define GLVEBUP_TBCL_UPBCL_M MAKEMASK(0xFFFFFFFF, 0) #define GLVEBUP_TPCH(_i, _j) (0x00308004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ #define GLVEBUP_TPCH_MAX_INDEX 7 #define GLVEBUP_TPCH_UPPCH_S 0 #define GLVEBUP_TPCH_UPPCH_M MAKEMASK(0xFF, 0) #define GLVEBUP_TPCL(_i, _j) (0x00308000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ #define GLVEBUP_TPCL_MAX_INDEX 7 #define GLVEBUP_TPCL_UPPCL_S 0 #define GLVEBUP_TPCL_UPPCL_M MAKEMASK(0xFFFFFFFF, 0) #define PRTRPB_LDPC 0x000AC280 /* Reset Source: CORER */ #define PRTRPB_LDPC_CRCERRS_S 0 #define PRTRPB_LDPC_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0) #define PRTRPB_RDPC 0x000AC260 /* Reset Source: CORER */ #define PRTRPB_RDPC_CRCERRS_S 0 #define PRTRPB_RDPC_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0) #define PRTTPB_STAT_TC_BYTES_SENTL(_i) (0x00098200 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define PRTTPB_STAT_TC_BYTES_SENTL_MAX_INDEX 63 #define PRTTPB_STAT_TC_BYTES_SENTL_TCCNT_S 0 #define PRTTPB_STAT_TC_BYTES_SENTL_TCCNT_M MAKEMASK(0xFFFFFFFF, 0) #define TPB_PRTTPB_STAT_PKT_SENT(_i) (0x00099470 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define TPB_PRTTPB_STAT_PKT_SENT_MAX_INDEX 7 #define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_S 0 #define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_M MAKEMASK(0xFFFFFFFF, 0) #define TPB_PRTTPB_STAT_TC_BYTES_SENT(_i) (0x00099094 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define TPB_PRTTPB_STAT_TC_BYTES_SENT_MAX_INDEX 63 #define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_S 0 #define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_M MAKEMASK(0xFFFFFFFF, 0) #define EMP_SWT_PRUNIND 0x00204020 /* Reset Source: CORER */ #define EMP_SWT_PRUNIND_OPCODE_S 0 #define EMP_SWT_PRUNIND_OPCODE_M MAKEMASK(0xF, 0) #define EMP_SWT_PRUNIND_LIST_INDEX_NUM_S 4 #define EMP_SWT_PRUNIND_LIST_INDEX_NUM_M MAKEMASK(0x3FF, 4) #define EMP_SWT_PRUNIND_VSI_NUM_S 16 #define EMP_SWT_PRUNIND_VSI_NUM_M MAKEMASK(0x3FF, 16) #define EMP_SWT_PRUNIND_BIT_VALUE_S 31 #define EMP_SWT_PRUNIND_BIT_VALUE_M BIT(31) #define EMP_SWT_REPIND 0x0020401C /* Reset Source: CORER */ #define EMP_SWT_REPIND_OPCODE_S 0 #define EMP_SWT_REPIND_OPCODE_M MAKEMASK(0xF, 0) #define EMP_SWT_REPIND_LIST_INDEX_NUMBER_S 4 #define EMP_SWT_REPIND_LIST_INDEX_NUMBER_M MAKEMASK(0x3FF, 4) #define EMP_SWT_REPIND_VSI_NUM_S 16 #define EMP_SWT_REPIND_VSI_NUM_M MAKEMASK(0x3FF, 16) #define EMP_SWT_REPIND_BIT_VALUE_S 31 #define EMP_SWT_REPIND_BIT_VALUE_M BIT(31) #define GL_OVERRIDEC 0x002040A4 /* Reset Source: CORER */ #define GL_OVERRIDEC_OVERRIDE_ATTEMPTC_S 0 #define GL_OVERRIDEC_OVERRIDE_ATTEMPTC_M MAKEMASK(0xFFFF, 0) #define GL_OVERRIDEC_LAST_VSI_S 16 #define GL_OVERRIDEC_LAST_VSI_M MAKEMASK(0x3FF, 16) #define GL_PLG_AVG_CALC_CFG 0x0020A5AC /* Reset Source: CORER */ #define GL_PLG_AVG_CALC_CFG_CYCLE_LEN_S 0 #define GL_PLG_AVG_CALC_CFG_CYCLE_LEN_M MAKEMASK(0x7FFFFFFF, 0) #define GL_PLG_AVG_CALC_CFG_MODE_S 31 #define GL_PLG_AVG_CALC_CFG_MODE_M BIT(31) #define GL_PLG_AVG_CALC_ST 0x0020A5B0 /* Reset Source: CORER */ #define GL_PLG_AVG_CALC_ST_IN_DATA_S 0 #define GL_PLG_AVG_CALC_ST_IN_DATA_M MAKEMASK(0x7FFF, 0) #define GL_PLG_AVG_CALC_ST_OUT_DATA_S 16 #define GL_PLG_AVG_CALC_ST_OUT_DATA_M MAKEMASK(0x7FFF, 16) #define GL_PLG_AVG_CALC_ST_VALID_S 31 #define GL_PLG_AVG_CALC_ST_VALID_M BIT(31) #define GL_PRE_CFG_CMD 0x00214090 /* Reset Source: CORER */ #define GL_PRE_CFG_CMD_ADDR_S 0 #define GL_PRE_CFG_CMD_ADDR_M MAKEMASK(0x1FFF, 0) #define GL_PRE_CFG_CMD_TBLIDX_S 16 #define GL_PRE_CFG_CMD_TBLIDX_M MAKEMASK(0x7, 16) #define GL_PRE_CFG_CMD_CMD_S 29 #define GL_PRE_CFG_CMD_CMD_M BIT(29) #define GL_PRE_CFG_CMD_DONE_S 31 #define GL_PRE_CFG_CMD_DONE_M BIT(31) #define GL_PRE_CFG_DATA(_i) (0x00214074 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */ #define GL_PRE_CFG_DATA_MAX_INDEX 6 #define GL_PRE_CFG_DATA_GL_PRE_RCP_DATA_S 0 #define GL_PRE_CFG_DATA_GL_PRE_RCP_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_SWT_FUNCFILT 0x001D2698 /* Reset Source: CORER */ #define GL_SWT_FUNCFILT_FUNCFILT_S 0 #define GL_SWT_FUNCFILT_FUNCFILT_M BIT(0) #define GL_SWT_FW_STS(_i) (0x00216000 + ((_i) * 4)) /* _i=0...5 */ /* Reset Source: CORER */ #define GL_SWT_FW_STS_MAX_INDEX 5 #define GL_SWT_FW_STS_GL_SWT_FW_STS_S 0 #define GL_SWT_FW_STS_GL_SWT_FW_STS_M MAKEMASK(0xFFFFFFFF, 0) #define GL_SWT_LAT_DOUBLE 0x00204004 /* Reset Source: CORER */ #define GL_SWT_LAT_DOUBLE_BASE_S 0 #define GL_SWT_LAT_DOUBLE_BASE_M MAKEMASK(0x7FF, 0) #define GL_SWT_LAT_DOUBLE_SIZE_S 16 #define GL_SWT_LAT_DOUBLE_SIZE_M MAKEMASK(0x7FF, 16) #define GL_SWT_LAT_QUAD 0x00204008 /* Reset Source: CORER */ #define GL_SWT_LAT_QUAD_BASE_S 0 #define GL_SWT_LAT_QUAD_BASE_M MAKEMASK(0x7FF, 0) #define GL_SWT_LAT_QUAD_SIZE_S 16 #define GL_SWT_LAT_QUAD_SIZE_M MAKEMASK(0x7FF, 16) #define GL_SWT_LAT_SINGLE 0x00204000 /* Reset Source: CORER */ #define GL_SWT_LAT_SINGLE_BASE_S 0 #define GL_SWT_LAT_SINGLE_BASE_M MAKEMASK(0x7FF, 0) #define GL_SWT_LAT_SINGLE_SIZE_S 16 #define GL_SWT_LAT_SINGLE_SIZE_M MAKEMASK(0x7FF, 16) #define GL_SWT_MD_PRI 0x002040AC /* Reset Source: CORER */ #define GL_SWT_MD_PRI_VSI_PRI_S 0 #define GL_SWT_MD_PRI_VSI_PRI_M MAKEMASK(0x7, 0) #define GL_SWT_MD_PRI_LB_PRI_S 4 #define GL_SWT_MD_PRI_LB_PRI_M MAKEMASK(0x7, 4) #define GL_SWT_MD_PRI_LAN_EN_PRI_S 8 #define GL_SWT_MD_PRI_LAN_EN_PRI_M MAKEMASK(0x7, 8) #define GL_SWT_MD_PRI_QH_PRI_S 12 #define GL_SWT_MD_PRI_QH_PRI_M MAKEMASK(0x7, 12) #define GL_SWT_MD_PRI_QL_PRI_S 16 #define GL_SWT_MD_PRI_QL_PRI_M MAKEMASK(0x7, 16) #define GL_SWT_MIRTARVSI(_i) (0x00204500 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define GL_SWT_MIRTARVSI_MAX_INDEX 63 #define GL_SWT_MIRTARVSI_VFVMNUMBER_S 0 #define GL_SWT_MIRTARVSI_VFVMNUMBER_M MAKEMASK(0x3FF, 0) #define GL_SWT_MIRTARVSI_FUNCTIONTYPE_S 10 #define GL_SWT_MIRTARVSI_FUNCTIONTYPE_M MAKEMASK(0x3, 10) #define GL_SWT_MIRTARVSI_PFNUMBER_S 12 #define GL_SWT_MIRTARVSI_PFNUMBER_M MAKEMASK(0x7, 12) #define GL_SWT_MIRTARVSI_TARGETVSI_S 20 #define GL_SWT_MIRTARVSI_TARGETVSI_M MAKEMASK(0x3FF, 20) #define GL_SWT_MIRTARVSI_RULEENABLE_S 31 #define GL_SWT_MIRTARVSI_RULEENABLE_M BIT(31) #define GL_SWT_SWIDFVIDX 0x00214114 /* Reset Source: CORER */ #define GL_SWT_SWIDFVIDX_SWIDFVIDX_S 0 #define GL_SWT_SWIDFVIDX_SWIDFVIDX_M MAKEMASK(0x3F, 0) #define GL_SWT_SWIDFVIDX_PORT_TYPE_S 31 #define GL_SWT_SWIDFVIDX_PORT_TYPE_M BIT(31) #define GL_VP_SWITCHID(_i) (0x00214094 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GL_VP_SWITCHID_MAX_INDEX 31 #define GL_VP_SWITCHID_SWITCHID_S 0 #define GL_VP_SWITCHID_SWITCHID_M MAKEMASK(0xFF, 0) #define GLSWID_STAT_BLOCK(_i) (0x0020A1A4 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define GLSWID_STAT_BLOCK_MAX_INDEX 255 #define GLSWID_STAT_BLOCK_VEBID_S 0 #define GLSWID_STAT_BLOCK_VEBID_M MAKEMASK(0x1F, 0) #define GLSWID_STAT_BLOCK_VEBID_VALID_S 31 #define GLSWID_STAT_BLOCK_VEBID_VALID_M BIT(31) #define GLSWT_ACT_RESP_0 0x0020A5A4 /* Reset Source: CORER */ #define GLSWT_ACT_RESP_0_GLSWT_ACT_RESP_S 0 #define GLSWT_ACT_RESP_0_GLSWT_ACT_RESP_M MAKEMASK(0xFFFFFFFF, 0) #define GLSWT_ACT_RESP_1 0x0020A5A8 /* Reset Source: CORER */ #define GLSWT_ACT_RESP_1_GLSWT_ACT_RESP_S 0 #define GLSWT_ACT_RESP_1_GLSWT_ACT_RESP_M MAKEMASK(0xFFFFFFFF, 0) #define GLSWT_ARB_MODE 0x0020A674 /* Reset Source: CORER */ #define GLSWT_ARB_MODE_FLU_PRI_SHM_S 0 #define GLSWT_ARB_MODE_FLU_PRI_SHM_M BIT(0) #define GLSWT_ARB_MODE_TX_RX_FWD_PRI_S 1 #define GLSWT_ARB_MODE_TX_RX_FWD_PRI_M BIT(1) #define PRT_SBPVSI 0x00204120 /* Reset Source: CORER */ #define PRT_SBPVSI_BAD_FRAMES_VSI_S 0 #define PRT_SBPVSI_BAD_FRAMES_VSI_M MAKEMASK(0x3FF, 0) #define PRT_SBPVSI_SBP_S 31 #define PRT_SBPVSI_SBP_M BIT(31) #define PRT_SCSTS 0x00204140 /* Reset Source: CORER */ #define PRT_SCSTS_BSCA_S 0 #define PRT_SCSTS_BSCA_M BIT(0) #define PRT_SCSTS_BSCAP_S 1 #define PRT_SCSTS_BSCAP_M BIT(1) #define PRT_SCSTS_MSCA_S 2 #define PRT_SCSTS_MSCA_M BIT(2) #define PRT_SCSTS_MSCAP_S 3 #define PRT_SCSTS_MSCAP_M BIT(3) #define PRT_SWT_BSCCNT 0x00204160 /* Reset Source: CORER */ #define PRT_SWT_BSCCNT_CCOUNT_S 0 #define PRT_SWT_BSCCNT_CCOUNT_M MAKEMASK(0x1FFFFFF, 0) #define PRT_SWT_BSCTRH 0x00204180 /* Reset Source: CORER */ #define PRT_SWT_BSCTRH_UTRESH_S 0 #define PRT_SWT_BSCTRH_UTRESH_M MAKEMASK(0x7FFFF, 0) #define PRT_SWT_MIREG 0x002042A0 /* Reset Source: CORER */ #define PRT_SWT_MIREG_MIRRULE_S 0 #define PRT_SWT_MIREG_MIRRULE_M MAKEMASK(0x3F, 0) #define PRT_SWT_MIREG_MIRENA_S 7 #define PRT_SWT_MIREG_MIRENA_M BIT(7) #define PRT_SWT_MIRIG 0x00204280 /* Reset Source: CORER */ #define PRT_SWT_MIRIG_MIRRULE_S 0 #define PRT_SWT_MIRIG_MIRRULE_M MAKEMASK(0x3F, 0) #define PRT_SWT_MIRIG_MIRENA_S 7 #define PRT_SWT_MIRIG_MIRENA_M BIT(7) #define PRT_SWT_MSCCNT 0x00204100 /* Reset Source: CORER */ #define PRT_SWT_MSCCNT_CCOUNT_S 0 #define PRT_SWT_MSCCNT_CCOUNT_M MAKEMASK(0x1FFFFFF, 0) #define PRT_SWT_MSCTRH 0x002041C0 /* Reset Source: CORER */ #define PRT_SWT_MSCTRH_UTRESH_S 0 #define PRT_SWT_MSCTRH_UTRESH_M MAKEMASK(0x7FFFF, 0) #define PRT_SWT_SCBI 0x002041E0 /* Reset Source: CORER */ #define PRT_SWT_SCBI_BI_S 0 #define PRT_SWT_SCBI_BI_M MAKEMASK(0x1FFFFFF, 0) #define PRT_SWT_SCCRL 0x00204200 /* Reset Source: CORER */ #define PRT_SWT_SCCRL_MDIPW_S 0 #define PRT_SWT_SCCRL_MDIPW_M BIT(0) #define PRT_SWT_SCCRL_MDICW_S 1 #define PRT_SWT_SCCRL_MDICW_M BIT(1) #define PRT_SWT_SCCRL_BDIPW_S 2 #define PRT_SWT_SCCRL_BDIPW_M BIT(2) #define PRT_SWT_SCCRL_BDICW_S 3 #define PRT_SWT_SCCRL_BDICW_M BIT(3) #define PRT_SWT_SCCRL_INTERVAL_S 8 #define PRT_SWT_SCCRL_INTERVAL_M MAKEMASK(0xFFFFF, 8) #define PRT_TCTUPR(_i) (0x00040840 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define PRT_TCTUPR_MAX_INDEX 31 #define PRT_TCTUPR_UP0_S 0 #define PRT_TCTUPR_UP0_M MAKEMASK(0x7, 0) #define PRT_TCTUPR_UP1_S 4 #define PRT_TCTUPR_UP1_M MAKEMASK(0x7, 4) #define PRT_TCTUPR_UP2_S 8 #define PRT_TCTUPR_UP2_M MAKEMASK(0x7, 8) #define PRT_TCTUPR_UP3_S 12 #define PRT_TCTUPR_UP3_M MAKEMASK(0x7, 12) #define PRT_TCTUPR_UP4_S 16 #define PRT_TCTUPR_UP4_M MAKEMASK(0x7, 16) #define PRT_TCTUPR_UP5_S 20 #define PRT_TCTUPR_UP5_M MAKEMASK(0x7, 20) #define PRT_TCTUPR_UP6_S 24 #define PRT_TCTUPR_UP6_M MAKEMASK(0x7, 24) #define PRT_TCTUPR_UP7_S 28 #define PRT_TCTUPR_UP7_M MAKEMASK(0x7, 28) #define GLHH_ART_CTL 0x000A41D4 /* Reset Source: POR */ #define GLHH_ART_CTL_ACTIVE_S 0 #define GLHH_ART_CTL_ACTIVE_M BIT(0) #define GLHH_ART_CTL_TIME_OUT1_S 1 #define GLHH_ART_CTL_TIME_OUT1_M BIT(1) #define GLHH_ART_CTL_TIME_OUT2_S 2 #define GLHH_ART_CTL_TIME_OUT2_M BIT(2) #define GLHH_ART_CTL_RESET_HH_S 31 #define GLHH_ART_CTL_RESET_HH_M BIT(31) #define GLHH_ART_DATA 0x000A41E0 /* Reset Source: POR */ #define GLHH_ART_DATA_AGENT_TYPE_S 0 #define GLHH_ART_DATA_AGENT_TYPE_M MAKEMASK(0x7, 0) #define GLHH_ART_DATA_SYNC_TYPE_S 3 #define GLHH_ART_DATA_SYNC_TYPE_M BIT(3) #define GLHH_ART_DATA_MAX_DELAY_S 4 #define GLHH_ART_DATA_MAX_DELAY_M MAKEMASK(0xF, 4) #define GLHH_ART_DATA_TIME_BASE_S 8 #define GLHH_ART_DATA_TIME_BASE_M MAKEMASK(0xF, 8) #define GLHH_ART_DATA_RSV_DATA_S 12 #define GLHH_ART_DATA_RSV_DATA_M MAKEMASK(0xFFFFF, 12) #define GLHH_ART_TIME_H 0x000A41D8 /* Reset Source: POR */ #define GLHH_ART_TIME_H_ART_TIME_H_S 0 #define GLHH_ART_TIME_H_ART_TIME_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLHH_ART_TIME_L 0x000A41DC /* Reset Source: POR */ #define GLHH_ART_TIME_L_ART_TIME_L_S 0 #define GLHH_ART_TIME_L_ART_TIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_AUX_IN_0(_i) (0x000889D8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_AUX_IN_0_MAX_INDEX 1 #define GLTSYN_AUX_IN_0_EVNTLVL_S 0 #define GLTSYN_AUX_IN_0_EVNTLVL_M MAKEMASK(0x3, 0) #define GLTSYN_AUX_IN_0_INT_ENA_S 4 #define GLTSYN_AUX_IN_0_INT_ENA_M BIT(4) #define GLTSYN_AUX_IN_1(_i) (0x000889E0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_AUX_IN_1_MAX_INDEX 1 #define GLTSYN_AUX_IN_1_EVNTLVL_S 0 #define GLTSYN_AUX_IN_1_EVNTLVL_M MAKEMASK(0x3, 0) #define GLTSYN_AUX_IN_1_INT_ENA_S 4 #define GLTSYN_AUX_IN_1_INT_ENA_M BIT(4) #define GLTSYN_AUX_IN_2(_i) (0x000889E8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_AUX_IN_2_MAX_INDEX 1 #define GLTSYN_AUX_IN_2_EVNTLVL_S 0 #define GLTSYN_AUX_IN_2_EVNTLVL_M MAKEMASK(0x3, 0) #define GLTSYN_AUX_IN_2_INT_ENA_S 4 #define GLTSYN_AUX_IN_2_INT_ENA_M BIT(4) #define GLTSYN_AUX_OUT_0(_i) (0x00088998 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_AUX_OUT_0_MAX_INDEX 1 #define GLTSYN_AUX_OUT_0_OUT_ENA_S 0 #define GLTSYN_AUX_OUT_0_OUT_ENA_M BIT(0) #define GLTSYN_AUX_OUT_0_OUTMOD_S 1 #define GLTSYN_AUX_OUT_0_OUTMOD_M MAKEMASK(0x3, 1) #define GLTSYN_AUX_OUT_0_OUTLVL_S 3 #define GLTSYN_AUX_OUT_0_OUTLVL_M BIT(3) #define GLTSYN_AUX_OUT_0_INT_ENA_S 4 #define GLTSYN_AUX_OUT_0_INT_ENA_M BIT(4) #define GLTSYN_AUX_OUT_0_PULSEW_S 8 #define GLTSYN_AUX_OUT_0_PULSEW_M MAKEMASK(0xF, 8) #define GLTSYN_AUX_OUT_1(_i) (0x000889A0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_AUX_OUT_1_MAX_INDEX 1 #define GLTSYN_AUX_OUT_1_OUT_ENA_S 0 #define GLTSYN_AUX_OUT_1_OUT_ENA_M BIT(0) #define GLTSYN_AUX_OUT_1_OUTMOD_S 1 #define GLTSYN_AUX_OUT_1_OUTMOD_M MAKEMASK(0x3, 1) #define GLTSYN_AUX_OUT_1_OUTLVL_S 3 #define GLTSYN_AUX_OUT_1_OUTLVL_M BIT(3) #define GLTSYN_AUX_OUT_1_INT_ENA_S 4 #define GLTSYN_AUX_OUT_1_INT_ENA_M BIT(4) #define GLTSYN_AUX_OUT_1_PULSEW_S 8 #define GLTSYN_AUX_OUT_1_PULSEW_M MAKEMASK(0xF, 8) #define GLTSYN_AUX_OUT_2(_i) (0x000889A8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_AUX_OUT_2_MAX_INDEX 1 #define GLTSYN_AUX_OUT_2_OUT_ENA_S 0 #define GLTSYN_AUX_OUT_2_OUT_ENA_M BIT(0) #define GLTSYN_AUX_OUT_2_OUTMOD_S 1 #define GLTSYN_AUX_OUT_2_OUTMOD_M MAKEMASK(0x3, 1) #define GLTSYN_AUX_OUT_2_OUTLVL_S 3 #define GLTSYN_AUX_OUT_2_OUTLVL_M BIT(3) #define GLTSYN_AUX_OUT_2_INT_ENA_S 4 #define GLTSYN_AUX_OUT_2_INT_ENA_M BIT(4) #define GLTSYN_AUX_OUT_2_PULSEW_S 8 #define GLTSYN_AUX_OUT_2_PULSEW_M MAKEMASK(0xF, 8) #define GLTSYN_AUX_OUT_3(_i) (0x000889B0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_AUX_OUT_3_MAX_INDEX 1 #define GLTSYN_AUX_OUT_3_OUT_ENA_S 0 #define GLTSYN_AUX_OUT_3_OUT_ENA_M BIT(0) #define GLTSYN_AUX_OUT_3_OUTMOD_S 1 #define GLTSYN_AUX_OUT_3_OUTMOD_M MAKEMASK(0x3, 1) #define GLTSYN_AUX_OUT_3_OUTLVL_S 3 #define GLTSYN_AUX_OUT_3_OUTLVL_M BIT(3) #define GLTSYN_AUX_OUT_3_INT_ENA_S 4 #define GLTSYN_AUX_OUT_3_INT_ENA_M BIT(4) #define GLTSYN_AUX_OUT_3_PULSEW_S 8 #define GLTSYN_AUX_OUT_3_PULSEW_M MAKEMASK(0xF, 8) #define GLTSYN_CLKO_0(_i) (0x000889B8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_CLKO_0_MAX_INDEX 1 #define GLTSYN_CLKO_0_TSYNCLKO_S 0 #define GLTSYN_CLKO_0_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_CLKO_1(_i) (0x000889C0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_CLKO_1_MAX_INDEX 1 #define GLTSYN_CLKO_1_TSYNCLKO_S 0 #define GLTSYN_CLKO_1_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_CLKO_2(_i) (0x000889C8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_CLKO_2_MAX_INDEX 1 #define GLTSYN_CLKO_2_TSYNCLKO_S 0 #define GLTSYN_CLKO_2_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_CLKO_3(_i) (0x000889D0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_CLKO_3_MAX_INDEX 1 #define GLTSYN_CLKO_3_TSYNCLKO_S 0 #define GLTSYN_CLKO_3_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_CMD 0x00088810 /* Reset Source: CORER */ #define GLTSYN_CMD_CMD_S 0 #define GLTSYN_CMD_CMD_M MAKEMASK(0xFF, 0) #define GLTSYN_CMD_SEL_MASTER_S 8 #define GLTSYN_CMD_SEL_MASTER_M BIT(8) #define GLTSYN_CMD_SYNC 0x00088814 /* Reset Source: CORER */ #define GLTSYN_CMD_SYNC_SYNC_S 0 #define GLTSYN_CMD_SYNC_SYNC_M MAKEMASK(0x3, 0) #define GLTSYN_ENA(_i) (0x00088808 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_ENA_MAX_INDEX 1 #define GLTSYN_ENA_TSYN_ENA_S 0 #define GLTSYN_ENA_TSYN_ENA_M BIT(0) #define GLTSYN_EVNT_H_0(_i) (0x00088970 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_EVNT_H_0_MAX_INDEX 1 #define GLTSYN_EVNT_H_0_TSYNEVNT_H_S 0 #define GLTSYN_EVNT_H_0_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_EVNT_H_1(_i) (0x00088980 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_EVNT_H_1_MAX_INDEX 1 #define GLTSYN_EVNT_H_1_TSYNEVNT_H_S 0 #define GLTSYN_EVNT_H_1_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_EVNT_H_2(_i) (0x00088990 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_EVNT_H_2_MAX_INDEX 1 #define GLTSYN_EVNT_H_2_TSYNEVNT_H_S 0 #define GLTSYN_EVNT_H_2_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_EVNT_L_0(_i) (0x00088968 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_EVNT_L_0_MAX_INDEX 1 #define GLTSYN_EVNT_L_0_TSYNEVNT_L_S 0 #define GLTSYN_EVNT_L_0_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_EVNT_L_1(_i) (0x00088978 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_EVNT_L_1_MAX_INDEX 1 #define GLTSYN_EVNT_L_1_TSYNEVNT_L_S 0 #define GLTSYN_EVNT_L_1_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_EVNT_L_2(_i) (0x00088988 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_EVNT_L_2_MAX_INDEX 1 #define GLTSYN_EVNT_L_2_TSYNEVNT_L_S 0 #define GLTSYN_EVNT_L_2_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_HHTIME_H(_i) (0x00088900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_HHTIME_H_MAX_INDEX 1 #define GLTSYN_HHTIME_H_TSYNEVNT_H_S 0 #define GLTSYN_HHTIME_H_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_HHTIME_L(_i) (0x000888F8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_HHTIME_L_MAX_INDEX 1 #define GLTSYN_HHTIME_L_TSYNEVNT_L_S 0 #define GLTSYN_HHTIME_L_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_INCVAL_H_MAX_INDEX 1 #define GLTSYN_INCVAL_H_INCVAL_H_S 0 #define GLTSYN_INCVAL_H_INCVAL_H_M MAKEMASK(0xFF, 0) #define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_INCVAL_L_MAX_INDEX 1 #define GLTSYN_INCVAL_L_INCVAL_L_S 0 #define GLTSYN_INCVAL_L_INCVAL_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_SHADJ_H_MAX_INDEX 1 #define GLTSYN_SHADJ_H_ADJUST_H_S 0 #define GLTSYN_SHADJ_H_ADJUST_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_SHADJ_L(_i) (0x00088908 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_SHADJ_L_MAX_INDEX 1 #define GLTSYN_SHADJ_L_ADJUST_L_S 0 #define GLTSYN_SHADJ_L_ADJUST_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_SHTIME_0(_i) (0x000888E0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_SHTIME_0_MAX_INDEX 1 #define GLTSYN_SHTIME_0_TSYNTIME_0_S 0 #define GLTSYN_SHTIME_0_TSYNTIME_0_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_SHTIME_H(_i) (0x000888F0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_SHTIME_H_MAX_INDEX 1 #define GLTSYN_SHTIME_H_TSYNTIME_H_S 0 #define GLTSYN_SHTIME_H_TSYNTIME_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_SHTIME_L(_i) (0x000888E8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_SHTIME_L_MAX_INDEX 1 #define GLTSYN_SHTIME_L_TSYNTIME_L_S 0 #define GLTSYN_SHTIME_L_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_STAT(_i) (0x000888C0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_STAT_MAX_INDEX 1 #define GLTSYN_STAT_EVENT0_S 0 #define GLTSYN_STAT_EVENT0_M BIT(0) #define GLTSYN_STAT_EVENT1_S 1 #define GLTSYN_STAT_EVENT1_M BIT(1) #define GLTSYN_STAT_EVENT2_S 2 #define GLTSYN_STAT_EVENT2_M BIT(2) #define GLTSYN_STAT_TGT0_S 4 #define GLTSYN_STAT_TGT0_M BIT(4) #define GLTSYN_STAT_TGT1_S 5 #define GLTSYN_STAT_TGT1_M BIT(5) #define GLTSYN_STAT_TGT2_S 6 #define GLTSYN_STAT_TGT2_M BIT(6) #define GLTSYN_STAT_TGT3_S 7 #define GLTSYN_STAT_TGT3_M BIT(7) #define GLTSYN_SYNC_DLAY 0x00088818 /* Reset Source: CORER */ #define GLTSYN_SYNC_DLAY_SYNC_DELAY_S 0 #define GLTSYN_SYNC_DLAY_SYNC_DELAY_M MAKEMASK(0x1F, 0) #define GLTSYN_TGT_H_0(_i) (0x00088930 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TGT_H_0_MAX_INDEX 1 #define GLTSYN_TGT_H_0_TSYNTGTT_H_S 0 #define GLTSYN_TGT_H_0_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_TGT_H_1(_i) (0x00088940 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TGT_H_1_MAX_INDEX 1 #define GLTSYN_TGT_H_1_TSYNTGTT_H_S 0 #define GLTSYN_TGT_H_1_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_TGT_H_2(_i) (0x00088950 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TGT_H_2_MAX_INDEX 1 #define GLTSYN_TGT_H_2_TSYNTGTT_H_S 0 #define GLTSYN_TGT_H_2_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_TGT_H_3(_i) (0x00088960 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TGT_H_3_MAX_INDEX 1 #define GLTSYN_TGT_H_3_TSYNTGTT_H_S 0 #define GLTSYN_TGT_H_3_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TGT_L_0_MAX_INDEX 1 #define GLTSYN_TGT_L_0_TSYNTGTT_L_S 0 #define GLTSYN_TGT_L_0_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_TGT_L_1(_i) (0x00088938 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TGT_L_1_MAX_INDEX 1 #define GLTSYN_TGT_L_1_TSYNTGTT_L_S 0 #define GLTSYN_TGT_L_1_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_TGT_L_2(_i) (0x00088948 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TGT_L_2_MAX_INDEX 1 #define GLTSYN_TGT_L_2_TSYNTGTT_L_S 0 #define GLTSYN_TGT_L_2_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_TGT_L_3(_i) (0x00088958 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TGT_L_3_MAX_INDEX 1 #define GLTSYN_TGT_L_3_TSYNTGTT_L_S 0 #define GLTSYN_TGT_L_3_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_TIME_0(_i) (0x000888C8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TIME_0_MAX_INDEX 1 #define GLTSYN_TIME_0_TSYNTIME_0_S 0 #define GLTSYN_TIME_0_TSYNTIME_0_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TIME_H_MAX_INDEX 1 #define GLTSYN_TIME_H_TSYNTIME_H_S 0 #define GLTSYN_TIME_H_TSYNTIME_H_M MAKEMASK(0xFFFFFFFF, 0) #define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GLTSYN_TIME_L_MAX_INDEX 1 #define GLTSYN_TIME_L_TSYNTIME_L_S 0 #define GLTSYN_TIME_L_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define PFHH_SEM 0x000A4200 /* Reset Source: PFR */ #define PFHH_SEM_BUSY_S 0 #define PFHH_SEM_BUSY_M BIT(0) #define PFHH_SEM_PF_OWNER_S 4 #define PFHH_SEM_PF_OWNER_M MAKEMASK(0x7, 4) #define PFTSYN_SEM 0x00088880 /* Reset Source: PFR */ #define PFTSYN_SEM_BUSY_S 0 #define PFTSYN_SEM_BUSY_M BIT(0) #define PFTSYN_SEM_PF_OWNER_S 4 #define PFTSYN_SEM_PF_OWNER_M MAKEMASK(0x7, 4) #define GLPE_TSCD_FLR(_i) (0x0051E24C + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define GLPE_TSCD_FLR_MAX_INDEX 3 #define GLPE_TSCD_FLR_DRAIN_VCTR_ID_S 0 #define GLPE_TSCD_FLR_DRAIN_VCTR_ID_M MAKEMASK(0x3, 0) #define GLPE_TSCD_FLR_PORT_S 2 #define GLPE_TSCD_FLR_PORT_M MAKEMASK(0x7, 2) #define GLPE_TSCD_FLR_PF_NUM_S 5 #define GLPE_TSCD_FLR_PF_NUM_M MAKEMASK(0x7, 5) #define GLPE_TSCD_FLR_VM_VF_TYPE_S 8 #define GLPE_TSCD_FLR_VM_VF_TYPE_M MAKEMASK(0x3, 8) #define GLPE_TSCD_FLR_VM_VF_NUM_S 16 #define GLPE_TSCD_FLR_VM_VF_NUM_M MAKEMASK(0x3FF, 16) #define GLPE_TSCD_FLR_VLD_S 31 #define GLPE_TSCD_FLR_VLD_M BIT(31) #define GLPE_TSCD_PEPM 0x0051E228 /* Reset Source: CORER */ #define GLPE_TSCD_PEPM_MDQ_CREDITS_S 0 #define GLPE_TSCD_PEPM_MDQ_CREDITS_M MAKEMASK(0xFF, 0) #define PF_VIRT_VSTATUS 0x0009E680 /* Reset Source: PFR */ #define PF_VIRT_VSTATUS_NUM_VFS_S 0 #define PF_VIRT_VSTATUS_NUM_VFS_M MAKEMASK(0xFF, 0) #define PF_VIRT_VSTATUS_TOTAL_VFS_S 8 #define PF_VIRT_VSTATUS_TOTAL_VFS_M MAKEMASK(0xFF, 8) #define PF_VIRT_VSTATUS_IOV_ACTIVE_S 16 #define PF_VIRT_VSTATUS_IOV_ACTIVE_M BIT(16) #define PF_VT_PFALLOC 0x001D2480 /* Reset Source: CORER */ #define PF_VT_PFALLOC_FIRSTVF_S 0 #define PF_VT_PFALLOC_FIRSTVF_M MAKEMASK(0xFF, 0) #define PF_VT_PFALLOC_LASTVF_S 8 #define PF_VT_PFALLOC_LASTVF_M MAKEMASK(0xFF, 8) #define PF_VT_PFALLOC_VALID_S 31 #define PF_VT_PFALLOC_VALID_M BIT(31) #define PF_VT_PFALLOC_HIF 0x0009DD80 /* Reset Source: PCIR */ #define PF_VT_PFALLOC_HIF_FIRSTVF_S 0 #define PF_VT_PFALLOC_HIF_FIRSTVF_M MAKEMASK(0xFF, 0) #define PF_VT_PFALLOC_HIF_LASTVF_S 8 #define PF_VT_PFALLOC_HIF_LASTVF_M MAKEMASK(0xFF, 8) #define PF_VT_PFALLOC_HIF_VALID_S 31 #define PF_VT_PFALLOC_HIF_VALID_M BIT(31) #define PF_VT_PFALLOC_PCIE 0x000BE080 /* Reset Source: PCIR */ #define PF_VT_PFALLOC_PCIE_FIRSTVF_S 0 #define PF_VT_PFALLOC_PCIE_FIRSTVF_M MAKEMASK(0xFF, 0) #define PF_VT_PFALLOC_PCIE_LASTVF_S 8 #define PF_VT_PFALLOC_PCIE_LASTVF_M MAKEMASK(0xFF, 8) #define PF_VT_PFALLOC_PCIE_VALID_S 31 #define PF_VT_PFALLOC_PCIE_VALID_M BIT(31) #define VSI_L2TAGSTXVALID(_VSI) (0x00046000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_L2TAGSTXVALID_MAX_INDEX 767 #define VSI_L2TAGSTXVALID_L2TAG1INSERTID_S 0 #define VSI_L2TAGSTXVALID_L2TAG1INSERTID_M MAKEMASK(0x7, 0) #define VSI_L2TAGSTXVALID_L2TAG1INSERTID_VALID_S 3 #define VSI_L2TAGSTXVALID_L2TAG1INSERTID_VALID_M BIT(3) #define VSI_L2TAGSTXVALID_L2TAG2INSERTID_S 4 #define VSI_L2TAGSTXVALID_L2TAG2INSERTID_M MAKEMASK(0x7, 4) #define VSI_L2TAGSTXVALID_L2TAG2INSERTID_VALID_S 7 #define VSI_L2TAGSTXVALID_L2TAG2INSERTID_VALID_M BIT(7) #define VSI_L2TAGSTXVALID_TIR0INSERTID_S 16 #define VSI_L2TAGSTXVALID_TIR0INSERTID_M MAKEMASK(0x7, 16) #define VSI_L2TAGSTXVALID_TIR0_INSERT_S 19 #define VSI_L2TAGSTXVALID_TIR0_INSERT_M BIT(19) #define VSI_L2TAGSTXVALID_TIR1INSERTID_S 20 #define VSI_L2TAGSTXVALID_TIR1INSERTID_M MAKEMASK(0x7, 20) #define VSI_L2TAGSTXVALID_TIR1_INSERT_S 23 #define VSI_L2TAGSTXVALID_TIR1_INSERT_M BIT(23) #define VSI_L2TAGSTXVALID_TIR2INSERTID_S 24 #define VSI_L2TAGSTXVALID_TIR2INSERTID_M MAKEMASK(0x7, 24) #define VSI_L2TAGSTXVALID_TIR2_INSERT_S 27 #define VSI_L2TAGSTXVALID_TIR2_INSERT_M BIT(27) #define VSI_PASID(_VSI) (0x0009C000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ #define VSI_PASID_MAX_INDEX 767 #define VSI_PASID_PASID_S 0 #define VSI_PASID_PASID_M MAKEMASK(0xFFFFF, 0) #define VSI_PASID_EN_S 31 #define VSI_PASID_EN_M BIT(31) #define VSI_RUPR(_VSI) (0x00050000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_RUPR_MAX_INDEX 767 #define VSI_RUPR_UP0_S 0 #define VSI_RUPR_UP0_M MAKEMASK(0x7, 0) #define VSI_RUPR_UP1_S 3 #define VSI_RUPR_UP1_M MAKEMASK(0x7, 3) #define VSI_RUPR_UP2_S 6 #define VSI_RUPR_UP2_M MAKEMASK(0x7, 6) #define VSI_RUPR_UP3_S 9 #define VSI_RUPR_UP3_M MAKEMASK(0x7, 9) #define VSI_RUPR_UP4_S 12 #define VSI_RUPR_UP4_M MAKEMASK(0x7, 12) #define VSI_RUPR_UP5_S 15 #define VSI_RUPR_UP5_M MAKEMASK(0x7, 15) #define VSI_RUPR_UP6_S 18 #define VSI_RUPR_UP6_M MAKEMASK(0x7, 18) #define VSI_RUPR_UP7_S 21 #define VSI_RUPR_UP7_M MAKEMASK(0x7, 21) #define VSI_RXSWCTRL(_VSI) (0x00205000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_RXSWCTRL_MAX_INDEX 767 #define VSI_RXSWCTRL_MACVSIPRUNEENABLE_S 8 #define VSI_RXSWCTRL_MACVSIPRUNEENABLE_M BIT(8) #define VSI_RXSWCTRL_PRUNEENABLE_S 9 #define VSI_RXSWCTRL_PRUNEENABLE_M MAKEMASK(0xF, 9) #define VSI_RXSWCTRL_SRCPRUNEENABLE_S 13 #define VSI_RXSWCTRL_SRCPRUNEENABLE_M BIT(13) #define VSI_SRCSWCTRL(_VSI) (0x00209000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_SRCSWCTRL_MAX_INDEX 767 #define VSI_SRCSWCTRL_ALLOWDESTOVERRIDE_S 0 #define VSI_SRCSWCTRL_ALLOWDESTOVERRIDE_M BIT(0) #define VSI_SRCSWCTRL_ALLOWLOOPBACK_S 1 #define VSI_SRCSWCTRL_ALLOWLOOPBACK_M BIT(1) #define VSI_SRCSWCTRL_LANENABLE_S 2 #define VSI_SRCSWCTRL_LANENABLE_M BIT(2) #define VSI_SRCSWCTRL_MACAS_S 3 #define VSI_SRCSWCTRL_MACAS_M BIT(3) #define VSI_SRCSWCTRL_PRUNEENABLE_S 4 #define VSI_SRCSWCTRL_PRUNEENABLE_M MAKEMASK(0xF, 4) #define VSI_SWITCHID(_VSI) (0x00215000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_SWITCHID_MAX_INDEX 767 #define VSI_SWITCHID_SWITCHID_S 0 #define VSI_SWITCHID_SWITCHID_M MAKEMASK(0xFF, 0) #define VSI_SWT_MIREG(_VSI) (0x00207000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_SWT_MIREG_MAX_INDEX 767 #define VSI_SWT_MIREG_MIRRULE_S 0 #define VSI_SWT_MIREG_MIRRULE_M MAKEMASK(0x3F, 0) #define VSI_SWT_MIREG_MIRENA_S 7 #define VSI_SWT_MIREG_MIRENA_M BIT(7) #define VSI_SWT_MIRIG(_VSI) (0x00208000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_SWT_MIRIG_MAX_INDEX 767 #define VSI_SWT_MIRIG_MIRRULE_S 0 #define VSI_SWT_MIRIG_MIRRULE_M MAKEMASK(0x3F, 0) #define VSI_SWT_MIRIG_MIRENA_S 7 #define VSI_SWT_MIRIG_MIRENA_M BIT(7) #define VSI_TAIR(_VSI) (0x00044000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ #define VSI_TAIR_MAX_INDEX 767 #define VSI_TAIR_PORT_TAG_ID_S 0 #define VSI_TAIR_PORT_TAG_ID_M MAKEMASK(0xFFFF, 0) #define VSI_TAR(_VSI) (0x00045000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_TAR_MAX_INDEX 767 #define VSI_TAR_ACCEPTTAGGED_S 0 #define VSI_TAR_ACCEPTTAGGED_M MAKEMASK(0x3FF, 0) #define VSI_TAR_ACCEPTUNTAGGED_S 16 #define VSI_TAR_ACCEPTUNTAGGED_M MAKEMASK(0x3FF, 16) #define VSI_TIR_0(_VSI) (0x00041000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_TIR_0_MAX_INDEX 767 #define VSI_TIR_0_PORT_TAG_ID_S 0 #define VSI_TIR_0_PORT_TAG_ID_M MAKEMASK(0xFFFF, 0) #define VSI_TIR_1(_VSI) (0x00042000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_TIR_1_MAX_INDEX 767 #define VSI_TIR_1_PORT_TAG_ID_S 0 #define VSI_TIR_1_PORT_TAG_ID_M MAKEMASK(0xFFFFFFFF, 0) #define VSI_TIR_2(_VSI) (0x00043000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_TIR_2_MAX_INDEX 767 #define VSI_TIR_2_PORT_TAG_ID_S 0 #define VSI_TIR_2_PORT_TAG_ID_M MAKEMASK(0xFFFF, 0) #define VSI_TSR(_VSI) (0x00051000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_TSR_MAX_INDEX 767 #define VSI_TSR_STRIPTAG_S 0 #define VSI_TSR_STRIPTAG_M MAKEMASK(0x3FF, 0) #define VSI_TSR_SHOWTAG_S 10 #define VSI_TSR_SHOWTAG_M MAKEMASK(0x3FF, 10) #define VSI_TSR_SHOWPRIONLY_S 20 #define VSI_TSR_SHOWPRIONLY_M MAKEMASK(0x3FF, 20) #define VSI_TUPIOM(_VSI) (0x00048000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_TUPIOM_MAX_INDEX 767 #define VSI_TUPIOM_UP0_S 0 #define VSI_TUPIOM_UP0_M MAKEMASK(0x7, 0) #define VSI_TUPIOM_UP1_S 3 #define VSI_TUPIOM_UP1_M MAKEMASK(0x7, 3) #define VSI_TUPIOM_UP2_S 6 #define VSI_TUPIOM_UP2_M MAKEMASK(0x7, 6) #define VSI_TUPIOM_UP3_S 9 #define VSI_TUPIOM_UP3_M MAKEMASK(0x7, 9) #define VSI_TUPIOM_UP4_S 12 #define VSI_TUPIOM_UP4_M MAKEMASK(0x7, 12) #define VSI_TUPIOM_UP5_S 15 #define VSI_TUPIOM_UP5_M MAKEMASK(0x7, 15) #define VSI_TUPIOM_UP6_S 18 #define VSI_TUPIOM_UP6_M MAKEMASK(0x7, 18) #define VSI_TUPIOM_UP7_S 21 #define VSI_TUPIOM_UP7_M MAKEMASK(0x7, 21) #define VSI_TUPR(_VSI) (0x00047000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_TUPR_MAX_INDEX 767 #define VSI_TUPR_UP0_S 0 #define VSI_TUPR_UP0_M MAKEMASK(0x7, 0) #define VSI_TUPR_UP1_S 3 #define VSI_TUPR_UP1_M MAKEMASK(0x7, 3) #define VSI_TUPR_UP2_S 6 #define VSI_TUPR_UP2_M MAKEMASK(0x7, 6) #define VSI_TUPR_UP3_S 9 #define VSI_TUPR_UP3_M MAKEMASK(0x7, 9) #define VSI_TUPR_UP4_S 12 #define VSI_TUPR_UP4_M MAKEMASK(0x7, 12) #define VSI_TUPR_UP5_S 15 #define VSI_TUPR_UP5_M MAKEMASK(0x7, 15) #define VSI_TUPR_UP6_S 18 #define VSI_TUPR_UP6_M MAKEMASK(0x7, 18) #define VSI_TUPR_UP7_S 21 #define VSI_TUPR_UP7_M MAKEMASK(0x7, 21) #define VSI_VSI2F(_VSI) (0x001D0000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ #define VSI_VSI2F_MAX_INDEX 767 #define VSI_VSI2F_VFVMNUMBER_S 0 #define VSI_VSI2F_VFVMNUMBER_M MAKEMASK(0x3FF, 0) #define VSI_VSI2F_FUNCTIONTYPE_S 10 #define VSI_VSI2F_FUNCTIONTYPE_M MAKEMASK(0x3, 10) #define VSI_VSI2F_PFNUMBER_S 12 #define VSI_VSI2F_PFNUMBER_M MAKEMASK(0x7, 12) #define VSI_VSI2F_BUFFERNUMBER_S 16 #define VSI_VSI2F_BUFFERNUMBER_M MAKEMASK(0x7, 16) #define VSI_VSI2F_VSI_NUMBER_S 20 #define VSI_VSI2F_VSI_NUMBER_M MAKEMASK(0x3FF, 20) #define VSI_VSI2F_VSI_ENABLE_S 31 #define VSI_VSI2F_VSI_ENABLE_M BIT(31) #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ #define VSIQF_FD_CNT_MAX_INDEX 767 #define VSIQF_FD_CNT_FD_GCNT_S 0 #define VSIQF_FD_CNT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_CNT_FD_GCNT_M : E800_VSIQF_FD_CNT_FD_GCNT_M) #define E800_VSIQF_FD_CNT_FD_GCNT_M MAKEMASK(0x3FFF, 0) #define E830_VSIQF_FD_CNT_FD_GCNT_M MAKEMASK(0xFFFF, 0) #define VSIQF_FD_CNT_FD_BCNT_S 16 #define VSIQF_FD_CNT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_CNT_FD_BCNT_M : E800_VSIQF_FD_CNT_FD_BCNT_M) #define E800_VSIQF_FD_CNT_FD_BCNT_M MAKEMASK(0x3FFF, 16) #define E830_VSIQF_FD_CNT_FD_BCNT_M MAKEMASK(0xFFFF, 16) #define VSIQF_FD_CTL1(_VSI) (0x00411000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSIQF_FD_CTL1_MAX_INDEX 767 #define VSIQF_FD_CTL1_FLT_ENA_S 0 #define VSIQF_FD_CTL1_FLT_ENA_M BIT(0) #define VSIQF_FD_CTL1_CFG_ENA_S 1 #define VSIQF_FD_CTL1_CFG_ENA_M BIT(1) #define VSIQF_FD_CTL1_EVICT_ENA_S 2 #define VSIQF_FD_CTL1_EVICT_ENA_M BIT(2) #define VSIQF_FD_DFLT(_VSI) (0x00457000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSIQF_FD_DFLT_MAX_INDEX 767 #define VSIQF_FD_DFLT_DEFLT_QINDX_S 0 #define VSIQF_FD_DFLT_DEFLT_QINDX_M MAKEMASK(0x7FF, 0) #define VSIQF_FD_DFLT_DEFLT_TOQUEUE_S 12 #define VSIQF_FD_DFLT_DEFLT_TOQUEUE_M MAKEMASK(0x7, 12) #define VSIQF_FD_DFLT_COMP_QINDX_S 16 #define VSIQF_FD_DFLT_COMP_QINDX_M MAKEMASK(0x7FF, 16) #define VSIQF_FD_DFLT_DEFLT_QINDX_PRIO_S 28 #define VSIQF_FD_DFLT_DEFLT_QINDX_PRIO_M MAKEMASK(0x7, 28) #define VSIQF_FD_DFLT_DEFLT_DROP_S 31 #define VSIQF_FD_DFLT_DEFLT_DROP_M BIT(31) #define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSIQF_FD_SIZE_MAX_INDEX 767 #define VSIQF_FD_SIZE_FD_GSIZE_S 0 #define VSIQF_FD_SIZE_FD_GSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_SIZE_FD_GSIZE_M : E800_VSIQF_FD_SIZE_FD_GSIZE_M) #define E800_VSIQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x3FFF, 0) #define E830_VSIQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0xFFFF, 0) #define VSIQF_FD_SIZE_FD_BSIZE_S 16 #define VSIQF_FD_SIZE_FD_BSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_SIZE_FD_BSIZE_M : E800_VSIQF_FD_SIZE_FD_BSIZE_M) #define E800_VSIQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x3FFF, 16) #define E830_VSIQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0xFFFF, 16) #define VSIQF_HASH_CTL(_VSI) (0x0040D000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSIQF_HASH_CTL_MAX_INDEX 767 #define VSIQF_HASH_CTL_HASH_LUT_SEL_S 0 #define VSIQF_HASH_CTL_HASH_LUT_SEL_M MAKEMASK(0x3, 0) #define VSIQF_HASH_CTL_GLOB_LUT_S 2 #define VSIQF_HASH_CTL_GLOB_LUT_M MAKEMASK(0xF, 2) #define VSIQF_HASH_CTL_HASH_SCHEME_S 6 #define VSIQF_HASH_CTL_HASH_SCHEME_M MAKEMASK(0x3, 6) #define VSIQF_HASH_CTL_TC_OVER_SEL_S 8 #define VSIQF_HASH_CTL_TC_OVER_SEL_M MAKEMASK(0x1F, 8) #define VSIQF_HASH_CTL_TC_OVER_ENA_S 15 #define VSIQF_HASH_CTL_TC_OVER_ENA_M BIT(15) #define VSIQF_HKEY(_i, _VSI) (0x00400000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...12, _VSI=0...767 */ /* Reset Source: PFR */ #define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HKEY_KEY_0_S 0 #define VSIQF_HKEY_KEY_0_M MAKEMASK(0xFF, 0) #define VSIQF_HKEY_KEY_1_S 8 #define VSIQF_HKEY_KEY_1_M MAKEMASK(0xFF, 8) #define VSIQF_HKEY_KEY_2_S 16 #define VSIQF_HKEY_KEY_2_M MAKEMASK(0xFF, 16) #define VSIQF_HKEY_KEY_3_S 24 #define VSIQF_HKEY_KEY_3_M MAKEMASK(0xFF, 24) #define VSIQF_HLUT(_i, _VSI) (0x00420000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...15, _VSI=0...767 */ /* Reset Source: PFR */ #define VSIQF_HLUT_MAX_INDEX 15 #define VSIQF_HLUT_LUT0_S 0 #define VSIQF_HLUT_LUT0_M MAKEMASK(0xF, 0) #define VSIQF_HLUT_LUT1_S 8 #define VSIQF_HLUT_LUT1_M MAKEMASK(0xF, 8) #define VSIQF_HLUT_LUT2_S 16 #define VSIQF_HLUT_LUT2_M MAKEMASK(0xF, 16) #define VSIQF_HLUT_LUT3_S 24 #define VSIQF_HLUT_LUT3_M MAKEMASK(0xF, 24) #define VSIQF_PE_CTL1(_VSI) (0x00414000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSIQF_PE_CTL1_MAX_INDEX 767 #define VSIQF_PE_CTL1_PE_FLTENA_S 0 #define VSIQF_PE_CTL1_PE_FLTENA_M BIT(0) #define VSIQF_TC_REGION(_i, _VSI) (0x00448000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...3, _VSI=0...767 */ /* Reset Source: CORER */ #define VSIQF_TC_REGION_MAX_INDEX 3 #define VSIQF_TC_REGION_TC_BASE0_S 0 #define VSIQF_TC_REGION_TC_BASE0_M MAKEMASK(0x7FF, 0) #define VSIQF_TC_REGION_TC_SIZE0_S 11 #define VSIQF_TC_REGION_TC_SIZE0_M MAKEMASK(0xF, 11) #define VSIQF_TC_REGION_TC_BASE1_S 16 #define VSIQF_TC_REGION_TC_BASE1_M MAKEMASK(0x7FF, 16) #define VSIQF_TC_REGION_TC_SIZE1_S 27 #define VSIQF_TC_REGION_TC_SIZE1_M MAKEMASK(0xF, 27) #define GLPM_WUMC 0x0009DEE4 /* Reset Source: POR */ #define GLPM_WUMC_MNG_WU_PF_S 16 #define GLPM_WUMC_MNG_WU_PF_M MAKEMASK(0xFF, 16) #define PFPM_APM 0x000B8080 /* Reset Source: POR */ #define PFPM_APM_APME_S 0 #define PFPM_APM_APME_M BIT(0) #define PFPM_WUC 0x0009DC80 /* Reset Source: POR */ #define PFPM_WUC_EN_APM_D0_S 5 #define PFPM_WUC_EN_APM_D0_M BIT(5) #define PFPM_WUFC 0x0009DC00 /* Reset Source: POR */ #define PFPM_WUFC_LNKC_S 0 #define PFPM_WUFC_LNKC_M BIT(0) #define PFPM_WUFC_MAG_S 1 #define PFPM_WUFC_MAG_M BIT(1) #define PFPM_WUFC_MNG_S 3 #define PFPM_WUFC_MNG_M BIT(3) #define PFPM_WUFC_FLX0_ACT_S 4 #define PFPM_WUFC_FLX0_ACT_M BIT(4) #define PFPM_WUFC_FLX1_ACT_S 5 #define PFPM_WUFC_FLX1_ACT_M BIT(5) #define PFPM_WUFC_FLX2_ACT_S 6 #define PFPM_WUFC_FLX2_ACT_M BIT(6) #define PFPM_WUFC_FLX3_ACT_S 7 #define PFPM_WUFC_FLX3_ACT_M BIT(7) #define PFPM_WUFC_FLX4_ACT_S 8 #define PFPM_WUFC_FLX4_ACT_M BIT(8) #define PFPM_WUFC_FLX5_ACT_S 9 #define PFPM_WUFC_FLX5_ACT_M BIT(9) #define PFPM_WUFC_FLX6_ACT_S 10 #define PFPM_WUFC_FLX6_ACT_M BIT(10) #define PFPM_WUFC_FLX7_ACT_S 11 #define PFPM_WUFC_FLX7_ACT_M BIT(11) #define PFPM_WUFC_FLX0_S 16 #define PFPM_WUFC_FLX0_M BIT(16) #define PFPM_WUFC_FLX1_S 17 #define PFPM_WUFC_FLX1_M BIT(17) #define PFPM_WUFC_FLX2_S 18 #define PFPM_WUFC_FLX2_M BIT(18) #define PFPM_WUFC_FLX3_S 19 #define PFPM_WUFC_FLX3_M BIT(19) #define PFPM_WUFC_FLX4_S 20 #define PFPM_WUFC_FLX4_M BIT(20) #define PFPM_WUFC_FLX5_S 21 #define PFPM_WUFC_FLX5_M BIT(21) #define PFPM_WUFC_FLX6_S 22 #define PFPM_WUFC_FLX6_M BIT(22) #define PFPM_WUFC_FLX7_S 23 #define PFPM_WUFC_FLX7_M BIT(23) #define PFPM_WUFC_FW_RST_WK_S 31 #define PFPM_WUFC_FW_RST_WK_M BIT(31) #define PFPM_WUS 0x0009DB80 /* Reset Source: POR */ #define PFPM_WUS_LNKC_S 0 #define PFPM_WUS_LNKC_M BIT(0) #define PFPM_WUS_MAG_S 1 #define PFPM_WUS_MAG_M BIT(1) #define PFPM_WUS_PME_STATUS_S 2 #define PFPM_WUS_PME_STATUS_M BIT(2) #define PFPM_WUS_MNG_S 3 #define PFPM_WUS_MNG_M BIT(3) #define PFPM_WUS_FLX0_S 16 #define PFPM_WUS_FLX0_M BIT(16) #define PFPM_WUS_FLX1_S 17 #define PFPM_WUS_FLX1_M BIT(17) #define PFPM_WUS_FLX2_S 18 #define PFPM_WUS_FLX2_M BIT(18) #define PFPM_WUS_FLX3_S 19 #define PFPM_WUS_FLX3_M BIT(19) #define PFPM_WUS_FLX4_S 20 #define PFPM_WUS_FLX4_M BIT(20) #define PFPM_WUS_FLX5_S 21 #define PFPM_WUS_FLX5_M BIT(21) #define PFPM_WUS_FLX6_S 22 #define PFPM_WUS_FLX6_M BIT(22) #define PFPM_WUS_FLX7_S 23 #define PFPM_WUS_FLX7_M BIT(23) #define PFPM_WUS_FW_RST_WK_S 31 #define PFPM_WUS_FW_RST_WK_M BIT(31) #define PRTPM_SAH_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTPM_SAH(_i) : E800_PRTPM_SAH(_i)) #define E800_PRTPM_SAH(_i) (0x001E3BA0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */ #define E830_PRTPM_SAH(_i) (0x001E2380 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */ #define PRTPM_SAH_MAX_INDEX 3 #define PRTPM_SAH_PFPM_SAH_S 0 #define PRTPM_SAH_PFPM_SAH_M MAKEMASK(0xFFFF, 0) #define PRTPM_SAH_PF_NUM_S 26 #define PRTPM_SAH_PF_NUM_M MAKEMASK(0xF, 26) #define PRTPM_SAH_MC_MAG_EN_S 30 #define PRTPM_SAH_MC_MAG_EN_M BIT(30) #define PRTPM_SAH_AV_S 31 #define PRTPM_SAH_AV_M BIT(31) #define PRTPM_SAL_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTPM_SAL(_i) : E800_PRTPM_SAL(_i)) #define E800_PRTPM_SAL(_i) (0x001E3B20 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */ #define E830_PRTPM_SAL(_i) (0x001E2300 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */ #define PRTPM_SAL_MAX_INDEX 3 #define PRTPM_SAL_PFPM_SAL_S 0 #define PRTPM_SAL_PFPM_SAL_M MAKEMASK(0xFFFFFFFF, 0) #define GLPE_CQM_FUNC_INVALIDATE 0x00503300 /* Reset Source: CORER */ #define GLPE_CQM_FUNC_INVALIDATE_PF_NUM_S 0 #define GLPE_CQM_FUNC_INVALIDATE_PF_NUM_M MAKEMASK(0x7, 0) #define GLPE_CQM_FUNC_INVALIDATE_VM_VF_NUM_S 3 #define GLPE_CQM_FUNC_INVALIDATE_VM_VF_NUM_M MAKEMASK(0x3FF, 3) #define GLPE_CQM_FUNC_INVALIDATE_VM_VF_TYPE_S 13 #define GLPE_CQM_FUNC_INVALIDATE_VM_VF_TYPE_M MAKEMASK(0x3, 13) #define GLPE_CQM_FUNC_INVALIDATE_ENABLE_S 31 #define GLPE_CQM_FUNC_INVALIDATE_ENABLE_M BIT(31) #define VFPE_MRTEIDXMASK_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VFPE_MRTEIDXMASK : E800_VFPE_MRTEIDXMASK) #define E800_VFPE_MRTEIDXMASK 0x00009000 /* Reset Source: PFR */ #define E830_VFPE_MRTEIDXMASK(_VF) (0x00509800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_S 0 #define VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0) #define GLTSYN_HH_DLAY 0x0008881C /* Reset Source: CORER */ #define GLTSYN_HH_DLAY_SYNC_DELAY_S 0 #define GLTSYN_HH_DLAY_SYNC_DELAY_M MAKEMASK(0xF, 0) #define VF_MBX_ARQBAH1 0x00006000 /* Reset Source: CORER */ #define VF_MBX_ARQBAH1_ARQBAH_S 0 #define VF_MBX_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_ARQBAL1 0x00006C00 /* Reset Source: CORER */ #define VF_MBX_ARQBAL1_ARQBAL_LSB_S 0 #define VF_MBX_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VF_MBX_ARQBAL1_ARQBAL_S 6 #define VF_MBX_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_ARQH1 0x00007400 /* Reset Source: CORER */ #define VF_MBX_ARQH1_ARQH_S 0 #define VF_MBX_ARQH1_ARQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_ARQLEN1 0x00008000 /* Reset Source: PFR */ #define VF_MBX_ARQLEN1_ARQLEN_S 0 #define VF_MBX_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_ARQLEN1_ARQVFE_S 28 #define VF_MBX_ARQLEN1_ARQVFE_M BIT(28) #define VF_MBX_ARQLEN1_ARQOVFL_S 29 #define VF_MBX_ARQLEN1_ARQOVFL_M BIT(29) #define VF_MBX_ARQLEN1_ARQCRIT_S 30 #define VF_MBX_ARQLEN1_ARQCRIT_M BIT(30) #define VF_MBX_ARQLEN1_ARQENABLE_S 31 #define VF_MBX_ARQLEN1_ARQENABLE_M BIT(31) #define VF_MBX_ARQT1 0x00007000 /* Reset Source: CORER */ #define VF_MBX_ARQT1_ARQT_S 0 #define VF_MBX_ARQT1_ARQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_ATQBAH1 0x00007800 /* Reset Source: CORER */ #define VF_MBX_ATQBAH1_ATQBAH_S 0 #define VF_MBX_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_ATQBAL1 0x00007C00 /* Reset Source: CORER */ #define VF_MBX_ATQBAL1_ATQBAL_S 6 #define VF_MBX_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_ATQH1 0x00006400 /* Reset Source: CORER */ #define VF_MBX_ATQH1_ATQH_S 0 #define VF_MBX_ATQH1_ATQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_ATQLEN1 0x00006800 /* Reset Source: PFR */ #define VF_MBX_ATQLEN1_ATQLEN_S 0 #define VF_MBX_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_ATQLEN1_ATQVFE_S 28 #define VF_MBX_ATQLEN1_ATQVFE_M BIT(28) #define VF_MBX_ATQLEN1_ATQOVFL_S 29 #define VF_MBX_ATQLEN1_ATQOVFL_M BIT(29) #define VF_MBX_ATQLEN1_ATQCRIT_S 30 #define VF_MBX_ATQLEN1_ATQCRIT_M BIT(30) #define VF_MBX_ATQLEN1_ATQENABLE_S 31 #define VF_MBX_ATQLEN1_ATQENABLE_M BIT(31) #define VF_MBX_ATQT1 0x00008400 /* Reset Source: CORER */ #define VF_MBX_ATQT1_ATQT_S 0 #define VF_MBX_ATQT1_ATQT_M MAKEMASK(0x3FF, 0) #define PFPCI_VF_FLUSH_DONE1 0x0000E400 /* Reset Source: PCIR */ #define PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_S 0 #define PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_M BIT(0) #define VFGEN_RSTAT1 0x00008800 /* Reset Source: VFR */ #define VFGEN_RSTAT1_VFR_STATE_S 0 #define VFGEN_RSTAT1_VFR_STATE_M MAKEMASK(0x3, 0) #define VFINT_DYN_CTL0 0x00005C00 /* Reset Source: CORER */ #define VFINT_DYN_CTL0_INTENA_S 0 #define VFINT_DYN_CTL0_INTENA_M BIT(0) #define VFINT_DYN_CTL0_CLEARPBA_S 1 #define VFINT_DYN_CTL0_CLEARPBA_M BIT(1) #define VFINT_DYN_CTL0_SWINT_TRIG_S 2 #define VFINT_DYN_CTL0_SWINT_TRIG_M BIT(2) #define VFINT_DYN_CTL0_ITR_INDX_S 3 #define VFINT_DYN_CTL0_ITR_INDX_M MAKEMASK(0x3, 3) #define VFINT_DYN_CTL0_INTERVAL_S 5 #define VFINT_DYN_CTL0_INTERVAL_M MAKEMASK(0xFFF, 5) #define VFINT_DYN_CTL0_SW_ITR_INDX_ENA_S 24 #define VFINT_DYN_CTL0_SW_ITR_INDX_ENA_M BIT(24) #define VFINT_DYN_CTL0_SW_ITR_INDX_S 25 #define VFINT_DYN_CTL0_SW_ITR_INDX_M MAKEMASK(0x3, 25) #define VFINT_DYN_CTL0_WB_ON_ITR_S 30 #define VFINT_DYN_CTL0_WB_ON_ITR_M BIT(30) #define VFINT_DYN_CTL0_INTENA_MSK_S 31 #define VFINT_DYN_CTL0_INTENA_MSK_M BIT(31) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define VFINT_DYN_CTLN_MAX_INDEX 63 #define VFINT_DYN_CTLN_INTENA_S 0 #define VFINT_DYN_CTLN_INTENA_M BIT(0) #define VFINT_DYN_CTLN_CLEARPBA_S 1 #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #define VFINT_DYN_CTLN_SWINT_TRIG_S 2 #define VFINT_DYN_CTLN_SWINT_TRIG_M BIT(2) #define VFINT_DYN_CTLN_ITR_INDX_S 3 #define VFINT_DYN_CTLN_ITR_INDX_M MAKEMASK(0x3, 3) #define VFINT_DYN_CTLN_INTERVAL_S 5 #define VFINT_DYN_CTLN_INTERVAL_M MAKEMASK(0xFFF, 5) #define VFINT_DYN_CTLN_SW_ITR_INDX_ENA_S 24 #define VFINT_DYN_CTLN_SW_ITR_INDX_ENA_M BIT(24) #define VFINT_DYN_CTLN_SW_ITR_INDX_S 25 #define VFINT_DYN_CTLN_SW_ITR_INDX_M MAKEMASK(0x3, 25) #define VFINT_DYN_CTLN_WB_ON_ITR_S 30 #define VFINT_DYN_CTLN_WB_ON_ITR_M BIT(30) #define VFINT_DYN_CTLN_INTENA_MSK_S 31 #define VFINT_DYN_CTLN_INTENA_MSK_M BIT(31) #define VFINT_ITR0(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ #define VFINT_ITR0_MAX_INDEX 2 #define VFINT_ITR0_INTERVAL_S 0 #define VFINT_ITR0_INTERVAL_M MAKEMASK(0xFFF, 0) #define VFINT_ITRN_BY_MAC(hw, _i, _j) ((hw)->mac_type == ICE_MAC_E830 ? E830_VFINT_ITRN(_i, _j) : E800_VFINT_ITRN(_i, _j)) #define E800_VFINT_ITRN(_i, _j) (0x00002800 + ((_i) * 4 + (_j) * 12)) /* _i=0...2, _j=0...63 */ /* Reset Source: CORER */ #define E830_VFINT_ITRN(_i, _j) (0x00002800 + ((_i) * 4 + (_j) * 64)) /* _i=0...15, _j=0...2 */ /* Reset Source: CORER */ #define VFINT_ITRN_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VFINT_ITRN_MAX_INDEX : E800_VFINT_ITRN_MAX_INDEX) #define E800_VFINT_ITRN_MAX_INDEX 2 #define E830_VFINT_ITRN_MAX_INDEX 15 #define VFINT_ITRN_INTERVAL_S 0 #define VFINT_ITRN_INTERVAL_M MAKEMASK(0xFFF, 0) #define QRX_TAIL1(_QRX) (0x00002000 + ((_QRX) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define QRX_TAIL1_MAX_INDEX 255 #define QRX_TAIL1_TAIL_S 0 #define QRX_TAIL1_TAIL_M MAKEMASK(0x1FFF, 0) #define QTX_TAIL(_DBQM) (0x00000000 + ((_DBQM) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define QTX_TAIL_MAX_INDEX 255 #define QTX_TAIL_QTX_COMM_DBELL_S 0 #define QTX_TAIL_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_CPM_ARQBAH1 0x0000F060 /* Reset Source: CORER */ #define VF_MBX_CPM_ARQBAH1_ARQBAH_S 0 #define VF_MBX_CPM_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_CPM_ARQBAL1 0x0000F050 /* Reset Source: CORER */ #define VF_MBX_CPM_ARQBAL1_ARQBAL_LSB_S 0 #define VF_MBX_CPM_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VF_MBX_CPM_ARQBAL1_ARQBAL_S 6 #define VF_MBX_CPM_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_CPM_ARQH1 0x0000F080 /* Reset Source: CORER */ #define VF_MBX_CPM_ARQH1_ARQH_S 0 #define VF_MBX_CPM_ARQH1_ARQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ARQLEN1 0x0000F070 /* Reset Source: PFR */ #define VF_MBX_CPM_ARQLEN1_ARQLEN_S 0 #define VF_MBX_CPM_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ARQLEN1_ARQVFE_S 28 #define VF_MBX_CPM_ARQLEN1_ARQVFE_M BIT(28) #define VF_MBX_CPM_ARQLEN1_ARQOVFL_S 29 #define VF_MBX_CPM_ARQLEN1_ARQOVFL_M BIT(29) #define VF_MBX_CPM_ARQLEN1_ARQCRIT_S 30 #define VF_MBX_CPM_ARQLEN1_ARQCRIT_M BIT(30) #define VF_MBX_CPM_ARQLEN1_ARQENABLE_S 31 #define VF_MBX_CPM_ARQLEN1_ARQENABLE_M BIT(31) #define VF_MBX_CPM_ARQT1 0x0000F090 /* Reset Source: CORER */ #define VF_MBX_CPM_ARQT1_ARQT_S 0 #define VF_MBX_CPM_ARQT1_ARQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ATQBAH1 0x0000F010 /* Reset Source: CORER */ #define VF_MBX_CPM_ATQBAH1_ATQBAH_S 0 #define VF_MBX_CPM_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_CPM_ATQBAL1 0x0000F000 /* Reset Source: CORER */ #define VF_MBX_CPM_ATQBAL1_ATQBAL_S 6 #define VF_MBX_CPM_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_CPM_ATQH1 0x0000F030 /* Reset Source: CORER */ #define VF_MBX_CPM_ATQH1_ATQH_S 0 #define VF_MBX_CPM_ATQH1_ATQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ATQLEN1 0x0000F020 /* Reset Source: PFR */ #define VF_MBX_CPM_ATQLEN1_ATQLEN_S 0 #define VF_MBX_CPM_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_CPM_ATQLEN1_ATQVFE_S 28 #define VF_MBX_CPM_ATQLEN1_ATQVFE_M BIT(28) #define VF_MBX_CPM_ATQLEN1_ATQOVFL_S 29 #define VF_MBX_CPM_ATQLEN1_ATQOVFL_M BIT(29) #define VF_MBX_CPM_ATQLEN1_ATQCRIT_S 30 #define VF_MBX_CPM_ATQLEN1_ATQCRIT_M BIT(30) #define VF_MBX_CPM_ATQLEN1_ATQENABLE_S 31 #define VF_MBX_CPM_ATQLEN1_ATQENABLE_M BIT(31) #define VF_MBX_CPM_ATQT1 0x0000F040 /* Reset Source: CORER */ #define VF_MBX_CPM_ATQT1_ATQT_S 0 #define VF_MBX_CPM_ATQT1_ATQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ARQBAH1 0x00020060 /* Reset Source: CORER */ #define VF_MBX_HLP_ARQBAH1_ARQBAH_S 0 #define VF_MBX_HLP_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_HLP_ARQBAL1 0x00020050 /* Reset Source: CORER */ #define VF_MBX_HLP_ARQBAL1_ARQBAL_LSB_S 0 #define VF_MBX_HLP_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VF_MBX_HLP_ARQBAL1_ARQBAL_S 6 #define VF_MBX_HLP_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_HLP_ARQH1 0x00020080 /* Reset Source: CORER */ #define VF_MBX_HLP_ARQH1_ARQH_S 0 #define VF_MBX_HLP_ARQH1_ARQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ARQLEN1 0x00020070 /* Reset Source: PFR */ #define VF_MBX_HLP_ARQLEN1_ARQLEN_S 0 #define VF_MBX_HLP_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ARQLEN1_ARQVFE_S 28 #define VF_MBX_HLP_ARQLEN1_ARQVFE_M BIT(28) #define VF_MBX_HLP_ARQLEN1_ARQOVFL_S 29 #define VF_MBX_HLP_ARQLEN1_ARQOVFL_M BIT(29) #define VF_MBX_HLP_ARQLEN1_ARQCRIT_S 30 #define VF_MBX_HLP_ARQLEN1_ARQCRIT_M BIT(30) #define VF_MBX_HLP_ARQLEN1_ARQENABLE_S 31 #define VF_MBX_HLP_ARQLEN1_ARQENABLE_M BIT(31) #define VF_MBX_HLP_ARQT1 0x00020090 /* Reset Source: CORER */ #define VF_MBX_HLP_ARQT1_ARQT_S 0 #define VF_MBX_HLP_ARQT1_ARQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ATQBAH1 0x00020010 /* Reset Source: CORER */ #define VF_MBX_HLP_ATQBAH1_ATQBAH_S 0 #define VF_MBX_HLP_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_HLP_ATQBAL1 0x00020000 /* Reset Source: CORER */ #define VF_MBX_HLP_ATQBAL1_ATQBAL_S 6 #define VF_MBX_HLP_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_HLP_ATQH1 0x00020030 /* Reset Source: CORER */ #define VF_MBX_HLP_ATQH1_ATQH_S 0 #define VF_MBX_HLP_ATQH1_ATQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ATQLEN1 0x00020020 /* Reset Source: PFR */ #define VF_MBX_HLP_ATQLEN1_ATQLEN_S 0 #define VF_MBX_HLP_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_HLP_ATQLEN1_ATQVFE_S 28 #define VF_MBX_HLP_ATQLEN1_ATQVFE_M BIT(28) #define VF_MBX_HLP_ATQLEN1_ATQOVFL_S 29 #define VF_MBX_HLP_ATQLEN1_ATQOVFL_M BIT(29) #define VF_MBX_HLP_ATQLEN1_ATQCRIT_S 30 #define VF_MBX_HLP_ATQLEN1_ATQCRIT_M BIT(30) #define VF_MBX_HLP_ATQLEN1_ATQENABLE_S 31 #define VF_MBX_HLP_ATQLEN1_ATQENABLE_M BIT(31) #define VF_MBX_HLP_ATQT1 0x00020040 /* Reset Source: CORER */ #define VF_MBX_HLP_ATQT1_ATQT_S 0 #define VF_MBX_HLP_ATQT1_ATQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ARQBAH1 0x00021060 /* Reset Source: CORER */ #define VF_MBX_PSM_ARQBAH1_ARQBAH_S 0 #define VF_MBX_PSM_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_PSM_ARQBAL1 0x00021050 /* Reset Source: CORER */ #define VF_MBX_PSM_ARQBAL1_ARQBAL_LSB_S 0 #define VF_MBX_PSM_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VF_MBX_PSM_ARQBAL1_ARQBAL_S 6 #define VF_MBX_PSM_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_PSM_ARQH1 0x00021080 /* Reset Source: CORER */ #define VF_MBX_PSM_ARQH1_ARQH_S 0 #define VF_MBX_PSM_ARQH1_ARQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ARQLEN1 0x00021070 /* Reset Source: PFR */ #define VF_MBX_PSM_ARQLEN1_ARQLEN_S 0 #define VF_MBX_PSM_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ARQLEN1_ARQVFE_S 28 #define VF_MBX_PSM_ARQLEN1_ARQVFE_M BIT(28) #define VF_MBX_PSM_ARQLEN1_ARQOVFL_S 29 #define VF_MBX_PSM_ARQLEN1_ARQOVFL_M BIT(29) #define VF_MBX_PSM_ARQLEN1_ARQCRIT_S 30 #define VF_MBX_PSM_ARQLEN1_ARQCRIT_M BIT(30) #define VF_MBX_PSM_ARQLEN1_ARQENABLE_S 31 #define VF_MBX_PSM_ARQLEN1_ARQENABLE_M BIT(31) #define VF_MBX_PSM_ARQT1 0x00021090 /* Reset Source: CORER */ #define VF_MBX_PSM_ARQT1_ARQT_S 0 #define VF_MBX_PSM_ARQT1_ARQT_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ATQBAH1 0x00021010 /* Reset Source: CORER */ #define VF_MBX_PSM_ATQBAH1_ATQBAH_S 0 #define VF_MBX_PSM_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_MBX_PSM_ATQBAL1 0x00021000 /* Reset Source: CORER */ #define VF_MBX_PSM_ATQBAL1_ATQBAL_S 6 #define VF_MBX_PSM_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_MBX_PSM_ATQH1 0x00021030 /* Reset Source: CORER */ #define VF_MBX_PSM_ATQH1_ATQH_S 0 #define VF_MBX_PSM_ATQH1_ATQH_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ATQLEN1 0x00021020 /* Reset Source: PFR */ #define VF_MBX_PSM_ATQLEN1_ATQLEN_S 0 #define VF_MBX_PSM_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0) #define VF_MBX_PSM_ATQLEN1_ATQVFE_S 28 #define VF_MBX_PSM_ATQLEN1_ATQVFE_M BIT(28) #define VF_MBX_PSM_ATQLEN1_ATQOVFL_S 29 #define VF_MBX_PSM_ATQLEN1_ATQOVFL_M BIT(29) #define VF_MBX_PSM_ATQLEN1_ATQCRIT_S 30 #define VF_MBX_PSM_ATQLEN1_ATQCRIT_M BIT(30) #define VF_MBX_PSM_ATQLEN1_ATQENABLE_S 31 #define VF_MBX_PSM_ATQLEN1_ATQENABLE_M BIT(31) #define VF_MBX_PSM_ATQT1 0x00021040 /* Reset Source: CORER */ #define VF_MBX_PSM_ATQT1_ATQT_S 0 #define VF_MBX_PSM_ATQT1_ATQT_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ARQBAH1 0x0000F160 /* Reset Source: CORER */ #define VF_SB_CPM_ARQBAH1_ARQBAH_S 0 #define VF_SB_CPM_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_SB_CPM_ARQBAL1 0x0000F150 /* Reset Source: CORER */ #define VF_SB_CPM_ARQBAL1_ARQBAL_LSB_S 0 #define VF_SB_CPM_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VF_SB_CPM_ARQBAL1_ARQBAL_S 6 #define VF_SB_CPM_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_SB_CPM_ARQH1 0x0000F180 /* Reset Source: CORER */ #define VF_SB_CPM_ARQH1_ARQH_S 0 #define VF_SB_CPM_ARQH1_ARQH_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ARQLEN1 0x0000F170 /* Reset Source: PFR */ #define VF_SB_CPM_ARQLEN1_ARQLEN_S 0 #define VF_SB_CPM_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ARQLEN1_ARQVFE_S 28 #define VF_SB_CPM_ARQLEN1_ARQVFE_M BIT(28) #define VF_SB_CPM_ARQLEN1_ARQOVFL_S 29 #define VF_SB_CPM_ARQLEN1_ARQOVFL_M BIT(29) #define VF_SB_CPM_ARQLEN1_ARQCRIT_S 30 #define VF_SB_CPM_ARQLEN1_ARQCRIT_M BIT(30) #define VF_SB_CPM_ARQLEN1_ARQENABLE_S 31 #define VF_SB_CPM_ARQLEN1_ARQENABLE_M BIT(31) #define VF_SB_CPM_ARQT1 0x0000F190 /* Reset Source: CORER */ #define VF_SB_CPM_ARQT1_ARQT_S 0 #define VF_SB_CPM_ARQT1_ARQT_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ATQBAH1 0x0000F110 /* Reset Source: CORER */ #define VF_SB_CPM_ATQBAH1_ATQBAH_S 0 #define VF_SB_CPM_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VF_SB_CPM_ATQBAL1 0x0000F100 /* Reset Source: CORER */ #define VF_SB_CPM_ATQBAL1_ATQBAL_S 6 #define VF_SB_CPM_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VF_SB_CPM_ATQH1 0x0000F130 /* Reset Source: CORER */ #define VF_SB_CPM_ATQH1_ATQH_S 0 #define VF_SB_CPM_ATQH1_ATQH_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ATQLEN1 0x0000F120 /* Reset Source: PFR */ #define VF_SB_CPM_ATQLEN1_ATQLEN_S 0 #define VF_SB_CPM_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0) #define VF_SB_CPM_ATQLEN1_ATQVFE_S 28 #define VF_SB_CPM_ATQLEN1_ATQVFE_M BIT(28) #define VF_SB_CPM_ATQLEN1_ATQOVFL_S 29 #define VF_SB_CPM_ATQLEN1_ATQOVFL_M BIT(29) #define VF_SB_CPM_ATQLEN1_ATQCRIT_S 30 #define VF_SB_CPM_ATQLEN1_ATQCRIT_M BIT(30) #define VF_SB_CPM_ATQLEN1_ATQENABLE_S 31 #define VF_SB_CPM_ATQLEN1_ATQENABLE_M BIT(31) #define VF_SB_CPM_ATQT1 0x0000F140 /* Reset Source: CORER */ #define VF_SB_CPM_ATQT1_ATQT_S 0 #define VF_SB_CPM_ATQT1_ATQT_M MAKEMASK(0x3FF, 0) #define VFINT_DYN_CTL(_i) (0x00023000 + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */ #define VFINT_DYN_CTL_MAX_INDEX 7 #define VFINT_DYN_CTL_INTENA_S 0 #define VFINT_DYN_CTL_INTENA_M BIT(0) #define VFINT_DYN_CTL_CLEARPBA_S 1 #define VFINT_DYN_CTL_CLEARPBA_M BIT(1) #define VFINT_DYN_CTL_SWINT_TRIG_S 2 #define VFINT_DYN_CTL_SWINT_TRIG_M BIT(2) #define VFINT_DYN_CTL_ITR_INDX_S 3 #define VFINT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, 3) #define VFINT_DYN_CTL_INTERVAL_S 5 #define VFINT_DYN_CTL_INTERVAL_M MAKEMASK(0xFFF, 5) #define VFINT_DYN_CTL_SW_ITR_INDX_ENA_S 24 #define VFINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24) #define VFINT_DYN_CTL_SW_ITR_INDX_S 25 #define VFINT_DYN_CTL_SW_ITR_INDX_M MAKEMASK(0x3, 25) #define VFINT_DYN_CTL_WB_ON_ITR_S 30 #define VFINT_DYN_CTL_WB_ON_ITR_M BIT(30) #define VFINT_DYN_CTL_INTENA_MSK_S 31 #define VFINT_DYN_CTL_INTENA_MSK_M BIT(31) #define VFINT_ITR_0(_i) (0x00023004 + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */ #define VFINT_ITR_0_MAX_INDEX 7 #define VFINT_ITR_0_INTERVAL_S 0 #define VFINT_ITR_0_INTERVAL_M MAKEMASK(0xFFF, 0) #define VFINT_ITR_1(_i) (0x00023008 + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */ #define VFINT_ITR_1_MAX_INDEX 7 #define VFINT_ITR_1_INTERVAL_S 0 #define VFINT_ITR_1_INTERVAL_M MAKEMASK(0xFFF, 0) #define VFINT_ITR_2(_i) (0x0002300C + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */ #define VFINT_ITR_2_MAX_INDEX 7 #define VFINT_ITR_2_INTERVAL_S 0 #define VFINT_ITR_2_INTERVAL_M MAKEMASK(0xFFF, 0) #define VFQRX_TAIL(_QRX) (0x0002E000 + ((_QRX) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VFQRX_TAIL_MAX_INDEX 255 #define VFQRX_TAIL_TAIL_S 0 #define VFQRX_TAIL_TAIL_M MAKEMASK(0x1FFF, 0) #define VFQTX_COMM_DBELL(_DBQM) (0x00030000 + ((_DBQM) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define VFQTX_COMM_DBELL_MAX_INDEX 255 #define VFQTX_COMM_DBELL_QTX_COMM_DBELL_S 0 #define VFQTX_COMM_DBELL_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define VFQTX_COMM_DBLQ_DBELL(_DBLQ) (0x00022000 + ((_DBLQ) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ #define VFQTX_COMM_DBLQ_DBELL_MAX_INDEX 3 #define VFQTX_COMM_DBLQ_DBELL_TAIL_S 0 #define VFQTX_COMM_DBLQ_DBELL_TAIL_M MAKEMASK(0x1FFF, 0) #define MSIX_TMSG1(_i) (0x00000008 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ #define MSIX_TMSG1_MAX_INDEX 64 #define MSIX_TMSG1_MSIXTMSG_S 0 #define MSIX_TMSG1_MSIXTMSG_M MAKEMASK(0xFFFFFFFF, 0) #define VFPE_AEQALLOC1 0x0000A400 /* Reset Source: VFR */ #define VFPE_AEQALLOC1_AECOUNT_S 0 #define VFPE_AEQALLOC1_AECOUNT_M MAKEMASK(0xFFFFFFFF, 0) #define VFPE_CCQPHIGH1 0x00009800 /* Reset Source: VFR */ #define VFPE_CCQPHIGH1_PECCQPHIGH_S 0 #define VFPE_CCQPHIGH1_PECCQPHIGH_M MAKEMASK(0xFFFFFFFF, 0) #define VFPE_CCQPLOW1 0x0000AC00 /* Reset Source: VFR */ #define VFPE_CCQPLOW1_PECCQPLOW_S 0 #define VFPE_CCQPLOW1_PECCQPLOW_M MAKEMASK(0xFFFFFFFF, 0) #define VFPE_CCQPSTATUS1 0x0000B800 /* Reset Source: VFR */ #define VFPE_CCQPSTATUS1_CCQP_DONE_S 0 #define VFPE_CCQPSTATUS1_CCQP_DONE_M BIT(0) #define VFPE_CCQPSTATUS1_HMC_PROFILE_S 4 #define VFPE_CCQPSTATUS1_HMC_PROFILE_M MAKEMASK(0x7, 4) #define VFPE_CCQPSTATUS1_RDMA_EN_VFS_S 16 #define VFPE_CCQPSTATUS1_RDMA_EN_VFS_M MAKEMASK(0x3F, 16) #define VFPE_CCQPSTATUS1_CCQP_ERR_S 31 #define VFPE_CCQPSTATUS1_CCQP_ERR_M BIT(31) #define VFPE_CQACK1 0x0000B000 /* Reset Source: VFR */ #define VFPE_CQACK1_PECQID_S 0 #define VFPE_CQACK1_PECQID_M MAKEMASK(0x7FFFF, 0) #define VFPE_CQARM1 0x0000B400 /* Reset Source: VFR */ #define VFPE_CQARM1_PECQID_S 0 #define VFPE_CQARM1_PECQID_M MAKEMASK(0x7FFFF, 0) #define VFPE_CQPDB1 0x0000BC00 /* Reset Source: VFR */ #define VFPE_CQPDB1_WQHEAD_S 0 #define VFPE_CQPDB1_WQHEAD_M MAKEMASK(0x7FF, 0) #define VFPE_CQPERRCODES1 0x00009C00 /* Reset Source: VFR */ #define VFPE_CQPERRCODES1_CQP_MINOR_CODE_S 0 #define VFPE_CQPERRCODES1_CQP_MINOR_CODE_M MAKEMASK(0xFFFF, 0) #define VFPE_CQPERRCODES1_CQP_MAJOR_CODE_S 16 #define VFPE_CQPERRCODES1_CQP_MAJOR_CODE_M MAKEMASK(0xFFFF, 16) #define VFPE_CQPTAIL1 0x0000A000 /* Reset Source: VFR */ #define VFPE_CQPTAIL1_WQTAIL_S 0 #define VFPE_CQPTAIL1_WQTAIL_M MAKEMASK(0x7FF, 0) #define VFPE_CQPTAIL1_CQP_OP_ERR_S 31 #define VFPE_CQPTAIL1_CQP_OP_ERR_M BIT(31) #define VFPE_IPCONFIG01 0x00008C00 /* Reset Source: VFR */ #define VFPE_IPCONFIG01_PEIPID_S 0 #define VFPE_IPCONFIG01_PEIPID_M MAKEMASK(0xFFFF, 0) #define VFPE_IPCONFIG01_USEENTIREIDRANGE_S 16 #define VFPE_IPCONFIG01_USEENTIREIDRANGE_M BIT(16) #define VFPE_IPCONFIG01_UDP_SRC_PORT_MASK_EN_S 17 #define VFPE_IPCONFIG01_UDP_SRC_PORT_MASK_EN_M BIT(17) #define E800_VFPE_MRTEIDXMASK1(_VF) (0x00509800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ #define E800_VFPE_MRTEIDXMASK1_MAX_INDEX 255 #define E800_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_S 0 #define E800_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0) #define E800_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset Source: VFR */ #define E800_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_S 0 #define E800_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0) #define VFPE_TCPNOWTIMER1 0x0000A800 /* Reset Source: VFR */ #define VFPE_TCPNOWTIMER1_TCP_NOW_S 0 #define VFPE_TCPNOWTIMER1_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0) #define VFPE_WQEALLOC1 0x0000C000 /* Reset Source: VFR */ #define VFPE_WQEALLOC1_PEQPID_S 0 #define VFPE_WQEALLOC1_PEQPID_M MAKEMASK(0x3FFFF, 0) #define VFPE_WQEALLOC1_WQE_DESC_INDEX_S 20 #define VFPE_WQEALLOC1_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20) #define E830_GL_QRX_CONTEXT_CTL 0x00296640 /* Reset Source: CORER */ #define E830_GL_QRX_CONTEXT_CTL_QUEUE_ID_S 0 #define E830_GL_QRX_CONTEXT_CTL_QUEUE_ID_M MAKEMASK(0xFFF, 0) #define E830_GL_QRX_CONTEXT_CTL_CMD_S 16 #define E830_GL_QRX_CONTEXT_CTL_CMD_M MAKEMASK(0x7, 16) #define E830_GL_QRX_CONTEXT_CTL_CMD_EXEC_S 19 #define E830_GL_QRX_CONTEXT_CTL_CMD_EXEC_M BIT(19) #define E830_GL_QRX_CONTEXT_DATA(_i) (0x00296620 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_QRX_CONTEXT_DATA_MAX_INDEX 7 #define E830_GL_QRX_CONTEXT_DATA_DATA_S 0 #define E830_GL_QRX_CONTEXT_DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_QRX_CONTEXT_STAT 0x00296644 /* Reset Source: CORER */ #define E830_GL_QRX_CONTEXT_STAT_CMD_IN_PROG_S 0 #define E830_GL_QRX_CONTEXT_STAT_CMD_IN_PROG_M BIT(0) #define E830_GL_RCB_INTERNAL(_i) (0x00122600 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define E830_GL_RCB_INTERNAL_MAX_INDEX 63 #define E830_GL_RCB_INTERNAL_INTERNAL_S 0 #define E830_GL_RCB_INTERNAL_INTERNAL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_RLAN_INTERNAL(_i) (0x00296700 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define E830_GL_RLAN_INTERNAL_MAX_INDEX 63 #define E830_GL_RLAN_INTERNAL_INTERNAL_S 0 #define E830_GL_RLAN_INTERNAL_INTERNAL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS 0x002D30F0 /* Reset Source: CORER */ #define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_DBLQ_S 0 #define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_DBLQ_M MAKEMASK(0xFF, 0) #define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_FDBL_S 8 #define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_FDBL_M MAKEMASK(0xFF, 8) #define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_TXT_S 16 #define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_TXT_M MAKEMASK(0xFF, 16) #define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS 0x002D30F4 /* Reset Source: CORER */ #define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_DBLQ_S 0 #define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_DBLQ_M MAKEMASK(0x3F, 0) #define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_FDBL_S 6 #define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_FDBL_M MAKEMASK(0x3F, 6) #define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_TXT_S 12 #define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_TXT_M MAKEMASK(0x3F, 12) #define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS 0x002D30F8 /* Reset Source: CORER */ #define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_DBLQ_FDBL_S 0 #define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_DBLQ_FDBL_M MAKEMASK(0xFF, 0) #define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_TXT_S 8 #define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_TXT_M MAKEMASK(0xFF, 8) #define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS 0x002D30FC /* Reset Source: CORER */ #define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_DBLQ_FDBL_S 0 #define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_DBLQ_FDBL_M MAKEMASK(0x3F, 0) #define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_TXT_S 6 #define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_TXT_M MAKEMASK(0x3F, 6) #define E830_GLQTX_TXTIME_DBELL_LSB(_DBQM) (0x002E0000 + ((_DBQM) * 8)) /* _i=0...16383 */ /* Reset Source: CORER */ #define E830_GLQTX_TXTIME_DBELL_LSB_MAX_INDEX 16383 #define E830_GLQTX_TXTIME_DBELL_LSB_QTX_TXTIME_DBELL_S 0 #define E830_GLQTX_TXTIME_DBELL_LSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLQTX_TXTIME_DBELL_MSB(_DBQM) (0x002E0004 + ((_DBQM) * 8)) /* _i=0...16383 */ /* Reset Source: CORER */ #define E830_GLQTX_TXTIME_DBELL_MSB_MAX_INDEX 16383 #define E830_GLQTX_TXTIME_DBELL_MSB_QTX_TXTIME_DBELL_S 0 #define E830_GLQTX_TXTIME_DBELL_MSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTCLAN_CQ_CNTX2_SRC_VSI_S 18 #define E830_GLTCLAN_CQ_CNTX2_SRC_VSI_M MAKEMASK(0x3FF, 18) #define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS 0x002D320C /* Reset Source: CORER */ #define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_DBL_S 0 #define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_DBL_M MAKEMASK(0xFF, 0) #define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_COMP_S 8 #define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_COMP_M MAKEMASK(0xFF, 8) #define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS 0x002D3210 /* Reset Source: CORER */ #define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_DBL_S 0 #define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_DBL_M MAKEMASK(0x3F, 0) #define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_COMP_S 6 #define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_COMP_M MAKEMASK(0x3F, 6) #define E830_GLTXTIME_FETCH_PROFILE(_i, _j) (0x002D3500 + ((_i) * 4 + (_j) * 64)) /* _i=0...15, _j=0...15 */ /* Reset Source: CORER */ #define E830_GLTXTIME_FETCH_PROFILE_MAX_INDEX 15 #define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_S 0 #define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M MAKEMASK(0x1FF, 0) #define E830_GLTXTIME_FETCH_PROFILE_FETCH_FIFO_TRESH_S 9 #define E830_GLTXTIME_FETCH_PROFILE_FETCH_FIFO_TRESH_M MAKEMASK(0x7F, 9) #define E830_GLTXTIME_OUTST_REQ_CNTL 0x002D3214 /* Reset Source: CORER */ #define E830_GLTXTIME_OUTST_REQ_CNTL_THRESHOLD_S 0 #define E830_GLTXTIME_OUTST_REQ_CNTL_THRESHOLD_M MAKEMASK(0x3FF, 0) #define E830_GLTXTIME_OUTST_REQ_CNTL_SNAPSHOT_S 10 #define E830_GLTXTIME_OUTST_REQ_CNTL_SNAPSHOT_M MAKEMASK(0x3FF, 10) #define E830_GLTXTIME_QTX_CNTX_CTL 0x002D3204 /* Reset Source: CORER */ #define E830_GLTXTIME_QTX_CNTX_CTL_QUEUE_ID_S 0 #define E830_GLTXTIME_QTX_CNTX_CTL_QUEUE_ID_M MAKEMASK(0x7FF, 0) #define E830_GLTXTIME_QTX_CNTX_CTL_CMD_S 16 #define E830_GLTXTIME_QTX_CNTX_CTL_CMD_M MAKEMASK(0x7, 16) #define E830_GLTXTIME_QTX_CNTX_CTL_CMD_EXEC_S 19 #define E830_GLTXTIME_QTX_CNTX_CTL_CMD_EXEC_M BIT(19) #define E830_GLTXTIME_QTX_CNTX_DATA(_i) (0x002D3104 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */ #define E830_GLTXTIME_QTX_CNTX_DATA_MAX_INDEX 6 #define E830_GLTXTIME_QTX_CNTX_DATA_DATA_S 0 #define E830_GLTXTIME_QTX_CNTX_DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTXTIME_QTX_CNTX_STAT 0x002D3208 /* Reset Source: CORER */ #define E830_GLTXTIME_QTX_CNTX_STAT_CMD_IN_PROG_S 0 #define E830_GLTXTIME_QTX_CNTX_STAT_CMD_IN_PROG_M BIT(0) #define E830_GLTXTIME_TS_CFG 0x002D3100 /* Reset Source: CORER */ #define E830_GLTXTIME_TS_CFG_TXTIME_ENABLE_S 0 #define E830_GLTXTIME_TS_CFG_TXTIME_ENABLE_M BIT(0) #define E830_GLTXTIME_TS_CFG_STORAGE_MODE_S 2 #define E830_GLTXTIME_TS_CFG_STORAGE_MODE_M MAKEMASK(0x7, 2) #define E830_GLTXTIME_TS_CFG_PIPE_LATENCY_STATIC_S 5 #define E830_GLTXTIME_TS_CFG_PIPE_LATENCY_STATIC_M MAKEMASK(0x1FFF, 5) #define E830_MBX_PF_DEC_ERR 0x00234100 /* Reset Source: CORER */ #define E830_MBX_PF_DEC_ERR_DEC_ERR_S 0 #define E830_MBX_PF_DEC_ERR_DEC_ERR_M BIT(0) #define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000 /* Reset Source: CORER */ #define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH_TRESH_S 0 #define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH_TRESH_M MAKEMASK(0x3FF, 0) #define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define E830_MBX_VF_DEC_TRIG_MAX_INDEX 255 #define E830_MBX_VF_DEC_TRIG_DEC_S 0 #define E830_MBX_VF_DEC_TRIG_DEC_M MAKEMASK(0x3FF, 0) #define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT_MAX_INDEX 255 #define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT_MSGS_S 0 #define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT_MSGS_M MAKEMASK(0x3FF, 0) #define E830_GLRCB_AG_ARBITER_CONFIG 0x00122500 /* Reset Source: CORER */ #define E830_GLRCB_AG_ARBITER_CONFIG_CREDIT_MAX_S 0 #define E830_GLRCB_AG_ARBITER_CONFIG_CREDIT_MAX_M MAKEMASK(0xFFFFF, 0) #define E830_GLRCB_AG_DCB_ARBITER_CONFIG 0x00122518 /* Reset Source: CORER */ #define E830_GLRCB_AG_DCB_ARBITER_CONFIG_CREDIT_MAX_S 0 #define E830_GLRCB_AG_DCB_ARBITER_CONFIG_CREDIT_MAX_M MAKEMASK(0x7F, 0) #define E830_GLRCB_AG_DCB_ARBITER_CONFIG_STRICT_WRR_S 7 #define E830_GLRCB_AG_DCB_ARBITER_CONFIG_STRICT_WRR_M BIT(7) #define E830_GLRCB_AG_DCB_NODE_CONFIG(_i) (0x00122510 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define E830_GLRCB_AG_DCB_NODE_CONFIG_MAX_INDEX 1 #define E830_GLRCB_AG_DCB_NODE_CONFIG_BWSHARE_S 0 #define E830_GLRCB_AG_DCB_NODE_CONFIG_BWSHARE_M MAKEMASK(0xF, 0) #define E830_GLRCB_AG_DCB_NODE_STATE(_i) (0x00122508 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define E830_GLRCB_AG_DCB_NODE_STATE_MAX_INDEX 1 #define E830_GLRCB_AG_DCB_NODE_STATE_CREDITS_S 0 #define E830_GLRCB_AG_DCB_NODE_STATE_CREDITS_M MAKEMASK(0xFF, 0) #define E830_GLRCB_AG_NODE_CONFIG(_i) (0x001224E0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GLRCB_AG_NODE_CONFIG_MAX_INDEX 7 #define E830_GLRCB_AG_NODE_CONFIG_BWSHARE_S 0 #define E830_GLRCB_AG_NODE_CONFIG_BWSHARE_M MAKEMASK(0x7F, 0) #define E830_GLRCB_AG_NODE_STATE(_i) (0x001224C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GLRCB_AG_NODE_STATE_MAX_INDEX 7 #define E830_GLRCB_AG_NODE_STATE_CREDITS_S 0 #define E830_GLRCB_AG_NODE_STATE_CREDITS_M MAKEMASK(0xFFFFF, 0) #define E830_PRT_AG_PORT_FC_MAP 0x00122520 /* Reset Source: CORER */ #define E830_PRT_AG_PORT_FC_MAP_AG_BITMAP_S 0 #define E830_PRT_AG_PORT_FC_MAP_AG_BITMAP_M MAKEMASK(0xFF, 0) #define E830_GL_FW_LOGS_CTL 0x000827F8 /* Reset Source: POR */ #define E830_GL_FW_LOGS_CTL_PAGE_SELECT_S 0 #define E830_GL_FW_LOGS_CTL_PAGE_SELECT_M MAKEMASK(0x3FF, 0) #define E830_GL_FW_LOGS_STS 0x000827FC /* Reset Source: POR */ #define E830_GL_FW_LOGS_STS_MAX_PAGE_S 0 #define E830_GL_FW_LOGS_STS_MAX_PAGE_M MAKEMASK(0x3FF, 0) #define E830_GL_FW_LOGS_STS_FW_LOGS_ENA_S 31 #define E830_GL_FW_LOGS_STS_FW_LOGS_ENA_M BIT(31) #define E830_GL_RTCTL 0x000827F0 /* Reset Source: POR */ #define E830_GL_RTCTL_RTCTL_S 0 #define E830_GL_RTCTL_RTCTL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_RTCTM 0x000827F4 /* Reset Source: POR */ #define E830_GL_RTCTM_RTCTM_S 0 #define E830_GL_RTCTM_RTCTM_M MAKEMASK(0xFFFF, 0) #define E830_GLGEN_RTRIG_EMPR_WO_GLOBR_S 3 #define E830_GLGEN_RTRIG_EMPR_WO_GLOBR_M BIT(3) #define E830_GLPE_TSCD_NUM_PQS 0x0051E2FC /* Reset Source: CORER */ #define E830_GLPE_TSCD_NUM_PQS_NUM_PQS_S 0 #define E830_GLPE_TSCD_NUM_PQS_NUM_PQS_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTPB_100G_RPB_FC_THRESH2 0x0009972C /* Reset Source: CORER */ #define E830_GLTPB_100G_RPB_FC_THRESH2_PORT4_FC_THRESH_S 0 #define E830_GLTPB_100G_RPB_FC_THRESH2_PORT4_FC_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_GLTPB_100G_RPB_FC_THRESH2_PORT5_FC_THRESH_S 16 #define E830_GLTPB_100G_RPB_FC_THRESH2_PORT5_FC_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_GLTPB_100G_RPB_FC_THRESH3 0x00099730 /* Reset Source: CORER */ #define E830_GLTPB_100G_RPB_FC_THRESH3_PORT6_FC_THRESH_S 0 #define E830_GLTPB_100G_RPB_FC_THRESH3_PORT6_FC_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_GLTPB_100G_RPB_FC_THRESH3_PORT7_FC_THRESH_S 16 #define E830_GLTPB_100G_RPB_FC_THRESH3_PORT7_FC_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_PORT_TIMER_SEL(_i) (0x00088BE0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_PORT_TIMER_SEL_MAX_INDEX 7 #define E830_PORT_TIMER_SEL_TIMER_SEL_S 0 #define E830_PORT_TIMER_SEL_TIMER_SEL_M BIT(0) #define E830_GL_RDPU_CNTRL_CHECKSUM_COMPLETE_INV_S 22 #define E830_GL_RDPU_CNTRL_CHECKSUM_COMPLETE_INV_M BIT(22) #define E830_PRTMAC_SHORT_PAC_DROP_BYTE_CNT 0x001E2280 /* Reset Source: GLOBR */ #define E830_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_S 0 #define E830_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32)) /* _i=0...63 */ /* Reset Source: GLOBR */ #define E830_PRTTSYN_TXTIME_H_MAX_INDEX 63 #define E830_PRTTSYN_TXTIME_H_TX_TIMESTAMP_HIGH_S 0 #define E830_PRTTSYN_TXTIME_H_TX_TIMESTAMP_HIGH_M MAKEMASK(0xFF, 0) #define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32)) /* _i=0...63 */ /* Reset Source: GLOBR */ #define E830_PRTTSYN_TXTIME_L_MAX_INDEX 63 #define E830_PRTTSYN_TXTIME_L_TX_VALID_S 0 #define E830_PRTTSYN_TXTIME_L_TX_VALID_M BIT(0) #define E830_PRTTSYN_TXTIME_L_TX_TIMESTAMP_LOW_S 1 #define E830_PRTTSYN_TXTIME_L_TX_TIMESTAMP_LOW_M MAKEMASK(0x7FFFFFFF, 1) #define E830_GL_MDCK_TDAT_TCLAN_TSYN 0x000FD200 /* Reset Source: CORER */ #define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_FROM_Q_NOT_ALLOWED_S 0 #define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_FROM_Q_NOT_ALLOWED_M BIT(0) #define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_RANGE_VIOLATION_S 1 #define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_RANGE_VIOLATION_M BIT(1) #define E830_GL_MDET_RX_FIFO 0x00296840 /* Reset Source: CORER */ #define E830_GL_MDET_RX_FIFO_FUNC_NUM_S 0 #define E830_GL_MDET_RX_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0) #define E830_GL_MDET_RX_FIFO_PF_NUM_S 10 #define E830_GL_MDET_RX_FIFO_PF_NUM_M MAKEMASK(0x7, 10) #define E830_GL_MDET_RX_FIFO_FUNC_TYPE_S 13 #define E830_GL_MDET_RX_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13) #define E830_GL_MDET_RX_FIFO_MAL_TYPE_S 15 #define E830_GL_MDET_RX_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15) #define E830_GL_MDET_RX_FIFO_FIFO_FULL_S 20 #define E830_GL_MDET_RX_FIFO_FIFO_FULL_M BIT(20) #define E830_GL_MDET_RX_FIFO_VALID_S 21 #define E830_GL_MDET_RX_FIFO_VALID_M BIT(21) #define E830_GL_MDET_RX_FIFO_EVENT_CNT_S 24 #define E830_GL_MDET_RX_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24) #define E830_GL_MDET_RX_PF_CNT(_i) (0x00296800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_MDET_RX_PF_CNT_MAX_INDEX 7 #define E830_GL_MDET_RX_PF_CNT_CNT_S 0 #define E830_GL_MDET_RX_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_MDET_RX_VF(_i) (0x00296820 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_MDET_RX_VF_MAX_INDEX 7 #define E830_GL_MDET_RX_VF_VF_MAL_EVENT_S 0 #define E830_GL_MDET_RX_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_MDET_TX_PQM_FIFO 0x002D4B00 /* Reset Source: CORER */ #define E830_GL_MDET_TX_PQM_FIFO_FUNC_NUM_S 0 #define E830_GL_MDET_TX_PQM_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0) #define E830_GL_MDET_TX_PQM_FIFO_PF_NUM_S 10 #define E830_GL_MDET_TX_PQM_FIFO_PF_NUM_M MAKEMASK(0x7, 10) #define E830_GL_MDET_TX_PQM_FIFO_FUNC_TYPE_S 13 #define E830_GL_MDET_TX_PQM_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13) #define E830_GL_MDET_TX_PQM_FIFO_MAL_TYPE_S 15 #define E830_GL_MDET_TX_PQM_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15) #define E830_GL_MDET_TX_PQM_FIFO_FIFO_FULL_S 20 #define E830_GL_MDET_TX_PQM_FIFO_FIFO_FULL_M BIT(20) #define E830_GL_MDET_TX_PQM_FIFO_VALID_S 21 #define E830_GL_MDET_TX_PQM_FIFO_VALID_M BIT(21) #define E830_GL_MDET_TX_PQM_FIFO_EVENT_CNT_S 24 #define E830_GL_MDET_TX_PQM_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24) #define E830_GL_MDET_TX_PQM_PF_CNT(_i) (0x002D4AC0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_MDET_TX_PQM_PF_CNT_MAX_INDEX 7 #define E830_GL_MDET_TX_PQM_PF_CNT_CNT_S 0 #define E830_GL_MDET_TX_PQM_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_MDET_TX_PQM_VF(_i) (0x002D4AE0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_MDET_TX_PQM_VF_MAX_INDEX 7 #define E830_GL_MDET_TX_PQM_VF_VF_MAL_EVENT_S 0 #define E830_GL_MDET_TX_PQM_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_MDET_TX_TCLAN_FIFO 0x000FCFD0 /* Reset Source: CORER */ #define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_NUM_S 0 #define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0) #define E830_GL_MDET_TX_TCLAN_FIFO_PF_NUM_S 10 #define E830_GL_MDET_TX_TCLAN_FIFO_PF_NUM_M MAKEMASK(0x7, 10) #define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_TYPE_S 13 #define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13) #define E830_GL_MDET_TX_TCLAN_FIFO_MAL_TYPE_S 15 #define E830_GL_MDET_TX_TCLAN_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15) #define E830_GL_MDET_TX_TCLAN_FIFO_FIFO_FULL_S 20 #define E830_GL_MDET_TX_TCLAN_FIFO_FIFO_FULL_M BIT(20) #define E830_GL_MDET_TX_TCLAN_FIFO_VALID_S 21 #define E830_GL_MDET_TX_TCLAN_FIFO_VALID_M BIT(21) #define E830_GL_MDET_TX_TCLAN_FIFO_EVENT_CNT_S 24 #define E830_GL_MDET_TX_TCLAN_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24) #define E830_GL_MDET_TX_TCLAN_PF_CNT(_i) (0x000FCF90 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_MDET_TX_TCLAN_PF_CNT_MAX_INDEX 7 #define E830_GL_MDET_TX_TCLAN_PF_CNT_CNT_S 0 #define E830_GL_MDET_TX_TCLAN_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_MDET_TX_TCLAN_VF(_i) (0x000FCFB0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_MDET_TX_TCLAN_VF_MAX_INDEX 7 #define E830_GL_MDET_TX_TCLAN_VF_VF_MAL_EVENT_S 0 #define E830_GL_MDET_TX_TCLAN_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_MDET_TX_TDPU_FIFO 0x00049D80 /* Reset Source: CORER */ #define E830_GL_MDET_TX_TDPU_FIFO_FUNC_NUM_S 0 #define E830_GL_MDET_TX_TDPU_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0) #define E830_GL_MDET_TX_TDPU_FIFO_PF_NUM_S 10 #define E830_GL_MDET_TX_TDPU_FIFO_PF_NUM_M MAKEMASK(0x7, 10) #define E830_GL_MDET_TX_TDPU_FIFO_FUNC_TYPE_S 13 #define E830_GL_MDET_TX_TDPU_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13) #define E830_GL_MDET_TX_TDPU_FIFO_MAL_TYPE_S 15 #define E830_GL_MDET_TX_TDPU_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15) #define E830_GL_MDET_TX_TDPU_FIFO_FIFO_FULL_S 20 #define E830_GL_MDET_TX_TDPU_FIFO_FIFO_FULL_M BIT(20) #define E830_GL_MDET_TX_TDPU_FIFO_VALID_S 21 #define E830_GL_MDET_TX_TDPU_FIFO_VALID_M BIT(21) #define E830_GL_MDET_TX_TDPU_FIFO_EVENT_CNT_S 24 #define E830_GL_MDET_TX_TDPU_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24) #define E830_GL_MDET_TX_TDPU_PF_CNT(_i) (0x00049D40 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_MDET_TX_TDPU_PF_CNT_MAX_INDEX 7 #define E830_GL_MDET_TX_TDPU_PF_CNT_CNT_S 0 #define E830_GL_MDET_TX_TDPU_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_MDET_TX_TDPU_VF(_i) (0x00049D60 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_MDET_TX_TDPU_VF_MAX_INDEX 7 #define E830_GL_MDET_TX_TDPU_VF_VF_MAL_EVENT_S 0 #define E830_GL_MDET_TX_TDPU_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_MNG_ECDSA_PUBKEY_HIGH(_i) (0x00083400 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */ #define E830_GL_MNG_ECDSA_PUBKEY_HIGH_MAX_INDEX 11 #define E830_GL_MNG_ECDSA_PUBKEY_HIGH_GL_MNG_ECDSA_PUBKEY_S 0 #define E830_GL_MNG_ECDSA_PUBKEY_HIGH_GL_MNG_ECDSA_PUBKEY_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_MNG_ECDSA_PUBKEY_LOW(_i) (0x00083300 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */ #define E830_GL_MNG_ECDSA_PUBKEY_LOW_MAX_INDEX 11 #define E830_GL_MNG_ECDSA_PUBKEY_LOW_GL_MNG_ECDSA_PUBKEY_S 0 #define E830_GL_MNG_ECDSA_PUBKEY_LOW_GL_MNG_ECDSA_PUBKEY_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_PPRS_RX_SIZE_CTRL_0(_i) (0x00084900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define E830_GL_PPRS_RX_SIZE_CTRL_0_MAX_INDEX 1 #define E830_GL_PPRS_RX_SIZE_CTRL_0_MAX_HEADER_SIZE_S 16 #define E830_GL_PPRS_RX_SIZE_CTRL_0_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16) #define E830_GL_PPRS_RX_SIZE_CTRL_1(_i) (0x00085900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define E830_GL_PPRS_RX_SIZE_CTRL_1_MAX_INDEX 1 #define E830_GL_PPRS_RX_SIZE_CTRL_1_MAX_HEADER_SIZE_S 16 #define E830_GL_PPRS_RX_SIZE_CTRL_1_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16) #define E830_GL_PPRS_RX_SIZE_CTRL_2(_i) (0x00086900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define E830_GL_PPRS_RX_SIZE_CTRL_2_MAX_INDEX 1 #define E830_GL_PPRS_RX_SIZE_CTRL_2_MAX_HEADER_SIZE_S 16 #define E830_GL_PPRS_RX_SIZE_CTRL_2_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16) #define E830_GL_PPRS_RX_SIZE_CTRL_3(_i) (0x00087900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define E830_GL_PPRS_RX_SIZE_CTRL_3_MAX_INDEX 1 #define E830_GL_PPRS_RX_SIZE_CTRL_3_MAX_HEADER_SIZE_S 16 #define E830_GL_PPRS_RX_SIZE_CTRL_3_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16) #define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP 0x00200740 /* Reset Source: CORER */ #define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_S 0 #define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_M MAKEMASK(0xFF, 0) #define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_S 8 #define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_M MAKEMASK(0xFF, 8) #define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_S 16 #define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_M MAKEMASK(0xFF, 16) #define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_S 24 #define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_M MAKEMASK(0xFF, 24) #define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP 0x00200744 /* Reset Source: CORER */ #define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_S 0 #define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_M MAKEMASK(0xFF, 0) #define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_S 8 #define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_M MAKEMASK(0xFF, 8) #define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_S 16 #define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_M MAKEMASK(0xFF, 16) #define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_S 24 #define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_M MAKEMASK(0xFF, 24) #define E830_GL_RPRS_PROT_ID_MAP(_i) (0x00200800 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define E830_GL_RPRS_PROT_ID_MAP_MAX_INDEX 255 #define E830_GL_RPRS_PROT_ID_MAP_PROT_ID0_S 0 #define E830_GL_RPRS_PROT_ID_MAP_PROT_ID0_M MAKEMASK(0xFF, 0) #define E830_GL_RPRS_PROT_ID_MAP_PROT_ID1_S 8 #define E830_GL_RPRS_PROT_ID_MAP_PROT_ID1_M MAKEMASK(0xFF, 8) #define E830_GL_RPRS_PROT_ID_MAP_PROT_ID2_S 16 #define E830_GL_RPRS_PROT_ID_MAP_PROT_ID2_M MAKEMASK(0xFF, 16) #define E830_GL_RPRS_PROT_ID_MAP_PROT_ID3_S 24 #define E830_GL_RPRS_PROT_ID_MAP_PROT_ID3_M MAKEMASK(0xFF, 24) #define E830_GL_RPRS_PROT_ID_MAP_PRFL(_i) (0x00201000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define E830_GL_RPRS_PROT_ID_MAP_PRFL_MAX_INDEX 63 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_S 0 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_M MAKEMASK(0x3, 0) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_S 2 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_M MAKEMASK(0x3, 2) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_S 4 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_M MAKEMASK(0x3, 4) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_S 6 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_M MAKEMASK(0x3, 6) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_S 8 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_M MAKEMASK(0x3, 8) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_S 10 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_M MAKEMASK(0x3, 10) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_S 12 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_M MAKEMASK(0x3, 12) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_S 14 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_M MAKEMASK(0x3, 14) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_S 16 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_M MAKEMASK(0x3, 16) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_S 18 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_M MAKEMASK(0x3, 18) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_S 20 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_M MAKEMASK(0x3, 20) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_S 22 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_M MAKEMASK(0x3, 22) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_S 24 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_M MAKEMASK(0x3, 24) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_S 26 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_M MAKEMASK(0x3, 26) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_S 28 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_M MAKEMASK(0x3, 28) #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_S 30 #define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_M MAKEMASK(0x3, 30) #define E830_GL_RPRS_VALIDATE_CHECKS_CTL 0x00200748 /* Reset Source: CORER */ #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_S 0 #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_M BIT(0) #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_S 1 #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_M BIT(1) #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_S 2 #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_M BIT(2) #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_S 3 #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_M BIT(3) #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_S 4 #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_M BIT(4) #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_S 5 #define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_M BIT(5) #define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP 0x00203A04 /* Reset Source: CORER */ #define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_S 0 #define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_M MAKEMASK(0xFF, 0) #define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_S 8 #define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_M MAKEMASK(0xFF, 8) #define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_S 16 #define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_M MAKEMASK(0xFF, 16) #define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_S 24 #define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_M MAKEMASK(0xFF, 24) #define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP 0x00203A08 /* Reset Source: CORER */ #define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_S 0 #define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_M MAKEMASK(0xFF, 0) #define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_S 8 #define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_M MAKEMASK(0xFF, 8) #define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_S 16 #define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_M MAKEMASK(0xFF, 16) #define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_S 24 #define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_M MAKEMASK(0xFF, 24) #define E830_GL_TPRS_PROT_ID_MAP(_i) (0x00202200 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define E830_GL_TPRS_PROT_ID_MAP_MAX_INDEX 255 #define E830_GL_TPRS_PROT_ID_MAP_PROT_ID0_S 0 #define E830_GL_TPRS_PROT_ID_MAP_PROT_ID0_M MAKEMASK(0xFF, 0) #define E830_GL_TPRS_PROT_ID_MAP_PROT_ID1_S 8 #define E830_GL_TPRS_PROT_ID_MAP_PROT_ID1_M MAKEMASK(0xFF, 8) #define E830_GL_TPRS_PROT_ID_MAP_PROT_ID2_S 16 #define E830_GL_TPRS_PROT_ID_MAP_PROT_ID2_M MAKEMASK(0xFF, 16) #define E830_GL_TPRS_PROT_ID_MAP_PROT_ID3_S 24 #define E830_GL_TPRS_PROT_ID_MAP_PROT_ID3_M MAKEMASK(0xFF, 24) #define E830_GL_TPRS_PROT_ID_MAP_PRFL(_i) (0x00202A00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define E830_GL_TPRS_PROT_ID_MAP_PRFL_MAX_INDEX 63 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_S 0 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_M MAKEMASK(0x3, 0) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_S 2 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_M MAKEMASK(0x3, 2) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_S 4 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_M MAKEMASK(0x3, 4) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_S 6 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_M MAKEMASK(0x3, 6) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_S 8 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_M MAKEMASK(0x3, 8) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_S 10 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_M MAKEMASK(0x3, 10) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_S 12 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_M MAKEMASK(0x3, 12) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_S 14 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_M MAKEMASK(0x3, 14) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_S 16 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_M MAKEMASK(0x3, 16) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_S 18 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_M MAKEMASK(0x3, 18) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_S 20 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_M MAKEMASK(0x3, 20) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_S 22 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_M MAKEMASK(0x3, 22) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_S 24 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_M MAKEMASK(0x3, 24) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_S 26 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_M MAKEMASK(0x3, 26) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_S 28 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_M MAKEMASK(0x3, 28) #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_S 30 #define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_M MAKEMASK(0x3, 30) #define E830_GL_TPRS_VALIDATE_CHECKS_CTL 0x00203A00 /* Reset Source: CORER */ #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_S 0 #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_M BIT(0) #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_S 1 #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_M BIT(1) #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_S 2 #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_M BIT(2) #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_S 3 #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_M BIT(3) #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_S 4 #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_M BIT(4) #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_S 5 #define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_M BIT(5) #define E830_PRT_TDPU_TX_SIZE_CTRL 0x00049D20 /* Reset Source: CORER */ #define E830_PRT_TDPU_TX_SIZE_CTRL_MAX_HEADER_SIZE_S 16 #define E830_PRT_TDPU_TX_SIZE_CTRL_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16) #define E830_PRT_TPB_RX_LB_SIZE_CTRL 0x00099740 /* Reset Source: CORER */ #define E830_PRT_TPB_RX_LB_SIZE_CTRL_MAX_HEADER_SIZE_S 16 #define E830_PRT_TPB_RX_LB_SIZE_CTRL_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16) #define E830_GLQTX_TXTIME_DBELL_LSB_PAGE(_DBQM) (0x04000008 + ((_DBQM) * 4096)) /* _i=0...16383 */ /* Reset Source: CORER */ #define E830_GLQTX_TXTIME_DBELL_LSB_PAGE_MAX_INDEX 16383 #define E830_GLQTX_TXTIME_DBELL_LSB_PAGE_QTX_TXTIME_DBELL_S 0 #define E830_GLQTX_TXTIME_DBELL_LSB_PAGE_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLQTX_TXTIME_DBELL_MSB_PAGE(_DBQM) (0x0400000C + ((_DBQM) * 4096)) /* _i=0...16383 */ /* Reset Source: CORER */ #define E830_GLQTX_TXTIME_DBELL_MSB_PAGE_MAX_INDEX 16383 #define E830_GLQTX_TXTIME_DBELL_MSB_PAGE_QTX_TXTIME_DBELL_S 0 #define E830_GLQTX_TXTIME_DBELL_MSB_PAGE_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PF0INT_OICR_PSM_PAGE_PTM_COMP_S 8 #define E830_PF0INT_OICR_PSM_PAGE_PTM_COMP_M BIT(8) #define E830_PF0INT_OICR_PSM_PAGE_PQM_DBL_TO_S 9 #define E830_PF0INT_OICR_PSM_PAGE_PQM_DBL_TO_M BIT(9) #define E830_PF0INT_OICR_PSM_PAGE_RSV5_S 10 #define E830_PF0INT_OICR_PSM_PAGE_RSV5_M BIT(10) #define E830_GL_HIBA(_i) (0x00081000 + ((_i) * 4)) /* _i=0...1023 */ /* Reset Source: EMPR */ #define E830_GL_HIBA_MAX_INDEX 1023 #define E830_GL_HIBA_GL_HIBA_S 0 #define E830_GL_HIBA_GL_HIBA_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_HICR 0x00082040 /* Reset Source: EMPR */ #define E830_GL_HICR_C_S 1 #define E830_GL_HICR_C_M BIT(1) #define E830_GL_HICR_SV_S 2 #define E830_GL_HICR_SV_M BIT(2) #define E830_GL_HICR_EV_S 3 #define E830_GL_HICR_EV_M BIT(3) #define E830_GL_HICR_EN 0x00082044 /* Reset Source: EMPR */ #define E830_GL_HICR_EN_EN_S 0 #define E830_GL_HICR_EN_EN_M BIT(0) #define E830_GL_HIDA(_i) (0x00082000 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: EMPR */ #define E830_GL_HIDA_MAX_INDEX 15 #define E830_GL_HIDA_GL_HIDB_S 0 #define E830_GL_HIDA_GL_HIDB_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLFLXP_RXDID_FLX_WRD_0_SPARE_S 18 #define E830_GLFLXP_RXDID_FLX_WRD_0_SPARE_M MAKEMASK(0xF, 18) #define E830_GLFLXP_RXDID_FLX_WRD_1_SPARE_S 18 #define E830_GLFLXP_RXDID_FLX_WRD_1_SPARE_M MAKEMASK(0xF, 18) #define E830_GLFLXP_RXDID_FLX_WRD_2_SPARE_S 18 #define E830_GLFLXP_RXDID_FLX_WRD_2_SPARE_M MAKEMASK(0xF, 18) #define E830_GLFLXP_RXDID_FLX_WRD_3_SPARE_S 18 #define E830_GLFLXP_RXDID_FLX_WRD_3_SPARE_M MAKEMASK(0xF, 18) #define E830_GLFLXP_RXDID_FLX_WRD_4_SPARE_S 18 #define E830_GLFLXP_RXDID_FLX_WRD_4_SPARE_M MAKEMASK(0xF, 18) #define E830_GLFLXP_RXDID_FLX_WRD_5_SPARE_S 18 #define E830_GLFLXP_RXDID_FLX_WRD_5_SPARE_M MAKEMASK(0xF, 18) #define E830_GLFLXP_RXDID_FLX_WRD_6(_i) (0x0045CE00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define E830_GLFLXP_RXDID_FLX_WRD_6_MAX_INDEX 63 #define E830_GLFLXP_RXDID_FLX_WRD_6_PROT_MDID_S 0 #define E830_GLFLXP_RXDID_FLX_WRD_6_PROT_MDID_M MAKEMASK(0xFF, 0) #define E830_GLFLXP_RXDID_FLX_WRD_6_EXTRACTION_OFFSET_S 8 #define E830_GLFLXP_RXDID_FLX_WRD_6_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) #define E830_GLFLXP_RXDID_FLX_WRD_6_L2TAG_OVRD_EN_S 18 #define E830_GLFLXP_RXDID_FLX_WRD_6_L2TAG_OVRD_EN_M BIT(18) #define E830_GLFLXP_RXDID_FLX_WRD_6_SPARE_S 19 #define E830_GLFLXP_RXDID_FLX_WRD_6_SPARE_M MAKEMASK(0x7, 19) #define E830_GLFLXP_RXDID_FLX_WRD_6_RXDID_OPCODE_S 30 #define E830_GLFLXP_RXDID_FLX_WRD_6_RXDID_OPCODE_M MAKEMASK(0x3, 30) #define E830_GLFLXP_RXDID_FLX_WRD_7(_i) (0x0045CF00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define E830_GLFLXP_RXDID_FLX_WRD_7_MAX_INDEX 63 #define E830_GLFLXP_RXDID_FLX_WRD_7_PROT_MDID_S 0 #define E830_GLFLXP_RXDID_FLX_WRD_7_PROT_MDID_M MAKEMASK(0xFF, 0) #define E830_GLFLXP_RXDID_FLX_WRD_7_EXTRACTION_OFFSET_S 8 #define E830_GLFLXP_RXDID_FLX_WRD_7_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) #define E830_GLFLXP_RXDID_FLX_WRD_7_L2TAG_OVRD_EN_S 18 #define E830_GLFLXP_RXDID_FLX_WRD_7_L2TAG_OVRD_EN_M BIT(18) #define E830_GLFLXP_RXDID_FLX_WRD_7_SPARE_S 19 #define E830_GLFLXP_RXDID_FLX_WRD_7_SPARE_M MAKEMASK(0x7, 19) #define E830_GLFLXP_RXDID_FLX_WRD_7_RXDID_OPCODE_S 30 #define E830_GLFLXP_RXDID_FLX_WRD_7_RXDID_OPCODE_M MAKEMASK(0x3, 30) #define E830_GLFLXP_RXDID_FLX_WRD_8(_i) (0x0045D500 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ #define E830_GLFLXP_RXDID_FLX_WRD_8_MAX_INDEX 63 #define E830_GLFLXP_RXDID_FLX_WRD_8_PROT_MDID_S 0 #define E830_GLFLXP_RXDID_FLX_WRD_8_PROT_MDID_M MAKEMASK(0xFF, 0) #define E830_GLFLXP_RXDID_FLX_WRD_8_EXTRACTION_OFFSET_S 8 #define E830_GLFLXP_RXDID_FLX_WRD_8_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) #define E830_GLFLXP_RXDID_FLX_WRD_8_L2TAG_OVRD_EN_S 18 #define E830_GLFLXP_RXDID_FLX_WRD_8_L2TAG_OVRD_EN_M BIT(18) #define E830_GLFLXP_RXDID_FLX_WRD_8_SPARE_S 19 #define E830_GLFLXP_RXDID_FLX_WRD_8_SPARE_M MAKEMASK(0x7, 19) #define E830_GLFLXP_RXDID_FLX_WRD_8_RXDID_OPCODE_S 30 #define E830_GLFLXP_RXDID_FLX_WRD_8_RXDID_OPCODE_M MAKEMASK(0x3, 30) #define E830_GL_FW_LOGS(_i) (0x00082800 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: POR */ #define E830_GL_FW_LOGS_MAX_INDEX 255 #define E830_GL_FW_LOGS_GL_FW_LOGS_S 0 #define E830_GL_FW_LOGS_GL_FW_LOGS_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_FWSTS_FWABS_S 10 #define E830_GL_FWSTS_FWABS_M MAKEMASK(0x3, 10) #define E830_GL_FWSTS_FW_FAILOVER_TRIG_S 12 #define E830_GL_FWSTS_FW_FAILOVER_TRIG_M BIT(12) #define E830_GLGEN_RSTAT_EMPR_WO_GLOBR_CNT_S 19 #define E830_GLGEN_RSTAT_EMPR_WO_GLOBR_CNT_M MAKEMASK(0x3, 19) #define E830_GLGEN_RSTAT_EMPR_TYPE_S 21 #define E830_GLGEN_RSTAT_EMPR_TYPE_M BIT(21) #define E830_GLPCI_PLATFORM_INFO 0x0009DDC4 /* Reset Source: POR */ #define E830_GLPCI_PLATFORM_INFO_PLATFORM_TYPE_S 0 #define E830_GLPCI_PLATFORM_INFO_PLATFORM_TYPE_M MAKEMASK(0xFF, 0) #define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_FROM_Q_NOT_ALLOWED_S 21 #define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_FROM_Q_NOT_ALLOWED_M BIT(21) #define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_RANGE_VIOLATION_S 22 #define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_RANGE_VIOLATION_M BIT(22) #define E830_GL_MDCK_TDAT_TCLAN_DESC_TYPE_ACL_DTYPE_NOT_ALLOWED_S 23 #define E830_GL_MDCK_TDAT_TCLAN_DESC_TYPE_ACL_DTYPE_NOT_ALLOWED_M BIT(23) #define E830_GL_TPB_LOCAL_TOPO 0x000996F4 /* Reset Source: CORER */ #define E830_GL_TPB_LOCAL_TOPO_ALLOW_TOPO_OVERRIDE_S 0 #define E830_GL_TPB_LOCAL_TOPO_ALLOW_TOPO_OVERRIDE_M BIT(0) #define E830_GL_TPB_LOCAL_TOPO_TOPO_VAL_S 1 #define E830_GL_TPB_LOCAL_TOPO_TOPO_VAL_M MAKEMASK(0x3, 1) #define E830_GL_TPB_PM_RESET 0x000996F0 /* Reset Source: CORER */ #define E830_GL_TPB_PM_RESET_MAC_PM_RESET_S 0 #define E830_GL_TPB_PM_RESET_MAC_PM_RESET_M BIT(0) #define E830_GL_TPB_PM_RESET_RPB_PM_RESET_S 1 #define E830_GL_TPB_PM_RESET_RPB_PM_RESET_M BIT(1) #define E830_GLTPB_100G_MAC_FC_THRESH1 0x00099724 /* Reset Source: CORER */ #define E830_GLTPB_100G_MAC_FC_THRESH1_PORT2_FC_THRESH_S 0 #define E830_GLTPB_100G_MAC_FC_THRESH1_PORT2_FC_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_GLTPB_100G_MAC_FC_THRESH1_PORT3_FC_THRESH_S 16 #define E830_GLTPB_100G_MAC_FC_THRESH1_PORT3_FC_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_GLTPB_100G_RPB_FC_THRESH0 0x0009963C /* Reset Source: CORER */ #define E830_GLTPB_100G_RPB_FC_THRESH0_PORT0_FC_THRESH_S 0 #define E830_GLTPB_100G_RPB_FC_THRESH0_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_GLTPB_100G_RPB_FC_THRESH0_PORT1_FC_THRESH_S 16 #define E830_GLTPB_100G_RPB_FC_THRESH0_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_GLTPB_100G_RPB_FC_THRESH1 0x00099728 /* Reset Source: CORER */ #define E830_GLTPB_100G_RPB_FC_THRESH1_PORT2_FC_THRESH_S 0 #define E830_GLTPB_100G_RPB_FC_THRESH1_PORT2_FC_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_GLTPB_100G_RPB_FC_THRESH1_PORT3_FC_THRESH_S 16 #define E830_GLTPB_100G_RPB_FC_THRESH1_PORT3_FC_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_GL_UFUSE_SOC_MAX_PORT_SPEED_S 12 #define E830_GL_UFUSE_SOC_MAX_PORT_SPEED_M MAKEMASK(0xFFFF, 12) #define E830_PF0INT_OICR_PSM_PTM_COMP_S 8 #define E830_PF0INT_OICR_PSM_PTM_COMP_M BIT(8) #define E830_PF0INT_OICR_PSM_PQM_DBL_TO_S 9 #define E830_PF0INT_OICR_PSM_PQM_DBL_TO_M BIT(9) #define E830_PF0INT_OICR_PSM_RSV5_S 10 #define E830_PF0INT_OICR_PSM_RSV5_M BIT(10) #define E830_PFINT_OICR_PTM_COMP_S 8 #define E830_PFINT_OICR_PTM_COMP_M BIT(8) #define E830_PFINT_OICR_PQM_DBL_TO_S 9 #define E830_PFINT_OICR_PQM_DBL_TO_M BIT(9) #define E830_PFINT_OICR_RSV5_S 10 #define E830_PFINT_OICR_RSV5_M BIT(10) #define E830_QRX_CTRL_IDE_S 27 #define E830_QRX_CTRL_IDE_M BIT(27) #define E830_PRTMAC_200G_CL01_PAUSE_QUANTA 0x001E3854 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_S 0 #define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_S 16 #define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_CL01_QUANTA_THRESH 0x001E3864 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_S 0 #define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_S 16 #define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_CL23_PAUSE_QUANTA 0x001E3858 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_S 0 #define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_S 16 #define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_CL23_QUANTA_THRESH 0x001E3868 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_S 0 #define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_S 16 #define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_CL45_PAUSE_QUANTA 0x001E385C /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_S 0 #define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_S 16 #define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_CL45_QUANTA_THRESH 0x001E386C /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_S 0 #define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_S 16 #define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_CL67_PAUSE_QUANTA 0x001E3860 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_S 0 #define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_S 16 #define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_CL67_QUANTA_THRESH 0x001E3870 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_S 0 #define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_S 16 #define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_COMMAND_CONFIG 0x001E3808 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ENA_S 0 #define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ENA_M BIT(0) #define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ENA_S 1 #define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ENA_M BIT(1) #define E830_PRTMAC_200G_COMMAND_CONFIG_PROMIS_EN_S 4 #define E830_PRTMAC_200G_COMMAND_CONFIG_PROMIS_EN_M BIT(4) #define E830_PRTMAC_200G_COMMAND_CONFIG_PAD_EN_S 5 #define E830_PRTMAC_200G_COMMAND_CONFIG_PAD_EN_M BIT(5) #define E830_PRTMAC_200G_COMMAND_CONFIG_CRC_FWD_S 6 #define E830_PRTMAC_200G_COMMAND_CONFIG_CRC_FWD_M BIT(6) #define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_FWD_S 7 #define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_FWD_M BIT(7) #define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_IGNORE_S 8 #define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_IGNORE_M BIT(8) #define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ADDR_INS_S 9 #define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ADDR_INS_M BIT(9) #define E830_PRTMAC_200G_COMMAND_CONFIG_LOOPBACK_EN_S 10 #define E830_PRTMAC_200G_COMMAND_CONFIG_LOOPBACK_EN_M BIT(10) #define E830_PRTMAC_200G_COMMAND_CONFIG_TX_PAD_EN_S 11 #define E830_PRTMAC_200G_COMMAND_CONFIG_TX_PAD_EN_M BIT(11) #define E830_PRTMAC_200G_COMMAND_CONFIG_SW_RESET_S 12 #define E830_PRTMAC_200G_COMMAND_CONFIG_SW_RESET_M BIT(12) #define E830_PRTMAC_200G_COMMAND_CONFIG_CNTL_FRM_ENA_S 13 #define E830_PRTMAC_200G_COMMAND_CONFIG_CNTL_FRM_ENA_M BIT(13) #define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ERR_DISC_S 14 #define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ERR_DISC_M BIT(14) #define E830_PRTMAC_200G_COMMAND_CONFIG_PHY_TXENA_S 15 #define E830_PRTMAC_200G_COMMAND_CONFIG_PHY_TXENA_M BIT(15) #define E830_PRTMAC_200G_COMMAND_CONFIG_SEND_IDLE_S 16 #define E830_PRTMAC_200G_COMMAND_CONFIG_SEND_IDLE_M BIT(16) #define E830_PRTMAC_200G_COMMAND_CONFIG_NO_LGTH_CHECK_S 17 #define E830_PRTMAC_200G_COMMAND_CONFIG_NO_LGTH_CHECK_M BIT(17) #define E830_PRTMAC_200G_COMMAND_CONFIG_PFC_MODE_S 19 #define E830_PRTMAC_200G_COMMAND_CONFIG_PFC_MODE_M BIT(19) #define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_PFC_COMP_S 20 #define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_PFC_COMP_M BIT(20) #define E830_PRTMAC_200G_COMMAND_CONFIG_RX_SFD_ANY_S 21 #define E830_PRTMAC_200G_COMMAND_CONFIG_RX_SFD_ANY_M BIT(21) #define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FLUSH_S 22 #define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FLUSH_M BIT(22) #define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_TX_STOP_S 25 #define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_TX_STOP_M BIT(25) #define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FIFO_RESET_S 26 #define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FIFO_RESET_M BIT(26) #define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_HDL_DIS_S 27 #define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_HDL_DIS_M BIT(27) #define E830_PRTMAC_200G_COMMAND_CONFIG_INV_LOOP_S 31 #define E830_PRTMAC_200G_COMMAND_CONFIG_INV_LOOP_M BIT(31) #define E830_PRTMAC_200G_CRC_INV_M 0x001E384C /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_CRC_INV_MASK_CRC_INV_MASK_S 0 #define E830_PRTMAC_200G_CRC_INV_MASK_CRC_INV_MASK_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_200G_FRM_LENGTH 0x001E3814 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_FRM_LENGTH_FRM_LENGTH_S 0 #define E830_PRTMAC_200G_FRM_LENGTH_FRM_LENGTH_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_FRM_LENGTH_TX_MTU_S 16 #define E830_PRTMAC_200G_FRM_LENGTH_TX_MTU_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_HASHTABLE_LOAD 0x001E382C /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_HASHTABLE_LOAD_HASH_TABLE_ADDR_S 0 #define E830_PRTMAC_200G_HASHTABLE_LOAD_HASH_TABLE_ADDR_M MAKEMASK(0x3F, 0) #define E830_PRTMAC_200G_HASHTABLE_LOAD_MCAST_EN_S 8 #define E830_PRTMAC_200G_HASHTABLE_LOAD_MCAST_EN_M BIT(8) #define E830_PRTMAC_200G_MAC_ADDR_0 0x001E380C /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_MAC_ADDR_0_MAC_ADDR_0_S 0 #define E830_PRTMAC_200G_MAC_ADDR_0_MAC_ADDR_0_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_200G_MAC_ADDR_1 0x001E3810 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_MAC_ADDR_1_MAC_ADDR_1_S 0 #define E830_PRTMAC_200G_MAC_ADDR_1_MAC_ADDR_1_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_MDIO_CFG_STATUS 0x001E3830 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_BUSY_S 0 #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_BUSY_M BIT(0) #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_RD_ERR_S 1 #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_RD_ERR_M BIT(1) #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_HOLD_TIME_S 2 #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_HOLD_TIME_M MAKEMASK(0x7, 2) #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_S 5 #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_M BIT(5) #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLS_45_EN_S 6 #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLS_45_EN_M BIT(6) #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_S 7 #define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_M MAKEMASK(0x1FF, 7) #define E830_PRTMAC_200G_MDIO_COMMAND 0x001E3834 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_COMMAND_S 0 #define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_COMMAND_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_MDIO_COMMAND_RESERVED_2_S 16 #define E830_PRTMAC_200G_MDIO_COMMAND_RESERVED_2_M MAKEMASK(0x7FFF, 16) #define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_BUSY_S 31 #define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_BUSY_M BIT(31) #define E830_PRTMAC_200G_MDIO_DATA 0x001E3838 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_MDIO_DATA_MDIO_DATA_S 0 #define E830_PRTMAC_200G_MDIO_DATA_MDIO_DATA_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_MDIO_DATA_RESERVED_2_S 16 #define E830_PRTMAC_200G_MDIO_DATA_RESERVED_2_M MAKEMASK(0x7FFF, 16) #define E830_PRTMAC_200G_MDIO_DATA_MDIO_BUSY_S 31 #define E830_PRTMAC_200G_MDIO_DATA_MDIO_BUSY_M BIT(31) #define E830_PRTMAC_200G_MDIO_REGADDR 0x001E383C /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_MDIO_REGADDR_MDIO_REGADDR_S 0 #define E830_PRTMAC_200G_MDIO_REGADDR_MDIO_REGADDR_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_200G_REVISION 0x001E3800 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_REVISION_CORE_REVISION_S 0 #define E830_PRTMAC_200G_REVISION_CORE_REVISION_M MAKEMASK(0xFF, 0) #define E830_PRTMAC_200G_REVISION_CORE_VERSION_S 8 #define E830_PRTMAC_200G_REVISION_CORE_VERSION_M MAKEMASK(0xFF, 8) #define E830_PRTMAC_200G_REVISION_CUSTOMER_VERSION_S 16 #define E830_PRTMAC_200G_REVISION_CUSTOMER_VERSION_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_RX_PAUSE_STATUS 0x001E3874 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_RX_PAUSE_STATUS_RX_PAUSE_STATUS_S 0 #define E830_PRTMAC_200G_RX_PAUSE_STATUS_RX_PAUSE_STATUS_M MAKEMASK(0xFF, 0) #define E830_PRTMAC_200G_SCRATCH 0x001E3804 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_SCRATCH_SCRATCH_S 0 #define E830_PRTMAC_200G_SCRATCH_SCRATCH_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_200G_STATUS 0x001E3840 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_STATUS_RX_LOC_FAULT_S 0 #define E830_PRTMAC_200G_STATUS_RX_LOC_FAULT_M BIT(0) #define E830_PRTMAC_200G_STATUS_RX_REM_FAULT_S 1 #define E830_PRTMAC_200G_STATUS_RX_REM_FAULT_M BIT(1) #define E830_PRTMAC_200G_STATUS_PHY_LOS_S 2 #define E830_PRTMAC_200G_STATUS_PHY_LOS_M BIT(2) #define E830_PRTMAC_200G_STATUS_TS_AVAIL_S 3 #define E830_PRTMAC_200G_STATUS_TS_AVAIL_M BIT(3) #define E830_PRTMAC_200G_STATUS_RESERVED_5_S 4 #define E830_PRTMAC_200G_STATUS_RESERVED_5_M BIT(4) #define E830_PRTMAC_200G_STATUS_TX_EMPTY_S 5 #define E830_PRTMAC_200G_STATUS_TX_EMPTY_M BIT(5) #define E830_PRTMAC_200G_STATUS_RX_EMPTY_S 6 #define E830_PRTMAC_200G_STATUS_RX_EMPTY_M BIT(6) #define E830_PRTMAC_200G_STATUS_RESERVED1_S 7 #define E830_PRTMAC_200G_STATUS_RESERVED1_M BIT(7) #define E830_PRTMAC_200G_STATUS_TX_ISIDLE_S 8 #define E830_PRTMAC_200G_STATUS_TX_ISIDLE_M BIT(8) #define E830_PRTMAC_200G_STATUS_RESERVED2_S 9 #define E830_PRTMAC_200G_STATUS_RESERVED2_M MAKEMASK(0x7FFFFF, 9) #define E830_PRTMAC_200G_TS_TIMESTAMP 0x001E387C /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_TS_TIMESTAMP_TS_TIMESTAMP_S 0 #define E830_PRTMAC_200G_TS_TIMESTAMP_TS_TIMESTAMP_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_200G_TX_FIFO_SECTIONS 0x001E3820 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_S 0 #define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_S 16 #define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_200G_TX_IPG_LENGTH 0x001E3844 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_TX_IPG_LENGTH_AVG_IPG_LEN_S 0 #define E830_PRTMAC_200G_TX_IPG_LENGTH_AVG_IPG_LEN_M MAKEMASK(0x7F, 0) #define E830_PRTMAC_200G_TX_IPG_LENGTH_IPG_COMP_12_0_S 19 #define E830_PRTMAC_200G_TX_IPG_LENGTH_IPG_COMP_12_0_M MAKEMASK(0x1FFF, 19) #define E830_PRTMAC_200G_XIF_MODE 0x001E3880 /* Reset Source: GLOBR */ #define E830_PRTMAC_200G_XIF_MODE_RESERVED_1_S 0 #define E830_PRTMAC_200G_XIF_MODE_RESERVED_1_M MAKEMASK(0x1F, 0) #define E830_PRTMAC_200G_XIF_MODE_ONE_STEP_ENA_S 5 #define E830_PRTMAC_200G_XIF_MODE_ONE_STEP_ENA_M BIT(5) #define E830_PRTMAC_200G_XIF_MODE_PFC_PULSE_MODE_S 17 #define E830_PRTMAC_200G_XIF_MODE_PFC_PULSE_MODE_M BIT(17) #define E830_PRTMAC_200G_XIF_MODE_PFC_LP_MODE_S 18 #define E830_PRTMAC_200G_XIF_MODE_PFC_LP_MODE_M BIT(18) #define E830_PRTMAC_200G_XIF_MODE_PFC_LP_16PRI_S 19 #define E830_PRTMAC_200G_XIF_MODE_PFC_LP_16PRI_M BIT(19) #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_0 0x001E3C00 /* Reset Source: GLOBR */ #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_0_APPROVED_SW_ADDR_MAC_100G_0_S 0 #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_0_APPROVED_SW_ADDR_MAC_100G_0_M MAKEMASK(0x3F, 0) #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_1 0x001E3C20 /* Reset Source: GLOBR */ #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_1_APPROVED_SW_ADDR_MAC_100G_1_S 0 #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_1_APPROVED_SW_ADDR_MAC_100G_1_M MAKEMASK(0x3F, 0) #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_2 0x001E3C40 /* Reset Source: GLOBR */ #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_2_APPROVED_SW_ADDR_MAC_100G_2_S 0 #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_2_APPROVED_SW_ADDR_MAC_100G_2_M MAKEMASK(0x3F, 0) #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_3 0x001E3C60 /* Reset Source: GLOBR */ #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_3_APPROVED_SW_ADDR_MAC_100G_3_S 0 #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_3_APPROVED_SW_ADDR_MAC_100G_3_M MAKEMASK(0x3F, 0) #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_0 0x001E3C80 /* Reset Source: GLOBR */ #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_0_APPROVED_SW_ADDR_MAC_200G_0_S 0 #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_0_APPROVED_SW_ADDR_MAC_200G_0_M MAKEMASK(0xFF, 0) #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_1 0x001E3CA0 /* Reset Source: GLOBR */ #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_1_APPROVED_SW_ADDR_MAC_200G_1_S 0 #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_1_APPROVED_SW_ADDR_MAC_200G_1_M MAKEMASK(0xFF, 0) #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_2 0x001E3CC0 /* Reset Source: GLOBR */ #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_2_APPROVED_SW_ADDR_MAC_200G_2_S 0 #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_2_APPROVED_SW_ADDR_MAC_200G_2_M MAKEMASK(0xFF, 0) #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_3 0x001E3CE0 /* Reset Source: GLOBR */ #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_3_APPROVED_SW_ADDR_MAC_200G_3_S 0 #define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_3_APPROVED_SW_ADDR_MAC_200G_3_M MAKEMASK(0xFF, 0) #define E830_PRTMAC_CF_GEN_STATUS 0x001E33C0 /* Reset Source: GLOBR */ #define E830_PRTMAC_CF_GEN_STATUS_CF_GEN_SENT_S 0 #define E830_PRTMAC_CF_GEN_STATUS_CF_GEN_SENT_M BIT(0) #define E830_PRTMAC_CL01_PAUSE_QUANTA 0x001E32A0 /* Reset Source: GLOBR */ #define E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_S 0 #define E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_S 16 #define E830_PRTMAC_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_CL01_QUANTA_THRESH 0x001E3320 /* Reset Source: GLOBR */ #define E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_S 0 #define E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_S 16 #define E830_PRTMAC_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_CL23_PAUSE_QUANTA 0x001E32C0 /* Reset Source: GLOBR */ #define E830_PRTMAC_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_S 0 #define E830_PRTMAC_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_S 16 #define E830_PRTMAC_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_CL23_QUANTA_THRESH 0x001E3340 /* Reset Source: GLOBR */ #define E830_PRTMAC_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_S 0 #define E830_PRTMAC_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_S 16 #define E830_PRTMAC_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_CL45_PAUSE_QUANTA 0x001E32E0 /* Reset Source: GLOBR */ #define E830_PRTMAC_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_S 0 #define E830_PRTMAC_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_S 16 #define E830_PRTMAC_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_CL45_QUANTA_THRESH 0x001E3360 /* Reset Source: GLOBR */ #define E830_PRTMAC_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_S 0 #define E830_PRTMAC_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_S 16 #define E830_PRTMAC_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_CL67_PAUSE_QUANTA 0x001E3300 /* Reset Source: GLOBR */ #define E830_PRTMAC_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_S 0 #define E830_PRTMAC_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_S 16 #define E830_PRTMAC_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_CL67_QUANTA_THRESH 0x001E3380 /* Reset Source: GLOBR */ #define E830_PRTMAC_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_S 0 #define E830_PRTMAC_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_S 16 #define E830_PRTMAC_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_COMMAND_CONFIG 0x001E3040 /* Reset Source: GLOBR */ #define E830_PRTMAC_COMMAND_CONFIG_TX_ENA_S 0 #define E830_PRTMAC_COMMAND_CONFIG_TX_ENA_M BIT(0) #define E830_PRTMAC_COMMAND_CONFIG_RX_ENA_S 1 #define E830_PRTMAC_COMMAND_CONFIG_RX_ENA_M BIT(1) #define E830_PRTMAC_COMMAND_CONFIG_RESERVED1_S 3 #define E830_PRTMAC_COMMAND_CONFIG_RESERVED1_M BIT(3) #define E830_PRTMAC_COMMAND_CONFIG_PROMIS_EN_S 4 #define E830_PRTMAC_COMMAND_CONFIG_PROMIS_EN_M BIT(4) #define E830_PRTMAC_COMMAND_CONFIG_RESERVED2_S 5 #define E830_PRTMAC_COMMAND_CONFIG_RESERVED2_M BIT(5) #define E830_PRTMAC_COMMAND_CONFIG_CRC_FWD_S 6 #define E830_PRTMAC_COMMAND_CONFIG_CRC_FWD_M BIT(6) #define E830_PRTMAC_COMMAND_CONFIG_PAUSE_FWD_S 7 #define E830_PRTMAC_COMMAND_CONFIG_PAUSE_FWD_M BIT(7) #define E830_PRTMAC_COMMAND_CONFIG_PAUSE_IGNORE_S 8 #define E830_PRTMAC_COMMAND_CONFIG_PAUSE_IGNORE_M BIT(8) #define E830_PRTMAC_COMMAND_CONFIG_TX_ADDR_INS_S 9 #define E830_PRTMAC_COMMAND_CONFIG_TX_ADDR_INS_M BIT(9) #define E830_PRTMAC_COMMAND_CONFIG_LOOP_ENA_S 10 #define E830_PRTMAC_COMMAND_CONFIG_LOOP_ENA_M BIT(10) #define E830_PRTMAC_COMMAND_CONFIG_TX_PAD_EN_S 11 #define E830_PRTMAC_COMMAND_CONFIG_TX_PAD_EN_M BIT(11) #define E830_PRTMAC_COMMAND_CONFIG_SW_RESET_S 12 #define E830_PRTMAC_COMMAND_CONFIG_SW_RESET_M BIT(12) #define E830_PRTMAC_COMMAND_CONFIG_CNTL_FRM_ENA_S 13 #define E830_PRTMAC_COMMAND_CONFIG_CNTL_FRM_ENA_M BIT(13) #define E830_PRTMAC_COMMAND_CONFIG_RESERVED3_S 14 #define E830_PRTMAC_COMMAND_CONFIG_RESERVED3_M BIT(14) #define E830_PRTMAC_COMMAND_CONFIG_PHY_TXENA_S 15 #define E830_PRTMAC_COMMAND_CONFIG_PHY_TXENA_M BIT(15) #define E830_PRTMAC_COMMAND_CONFIG_FORCE_SEND__S 16 #define E830_PRTMAC_COMMAND_CONFIG_FORCE_SEND__M BIT(16) #define E830_PRTMAC_COMMAND_CONFIG_RESERVED4_S 17 #define E830_PRTMAC_COMMAND_CONFIG_RESERVED4_M BIT(17) #define E830_PRTMAC_COMMAND_CONFIG_RESERVED5_S 18 #define E830_PRTMAC_COMMAND_CONFIG_RESERVED5_M BIT(18) #define E830_PRTMAC_COMMAND_CONFIG_PFC_MODE_S 19 #define E830_PRTMAC_COMMAND_CONFIG_PFC_MODE_M BIT(19) #define E830_PRTMAC_COMMAND_CONFIG_PAUSE_PFC_COMP_S 20 #define E830_PRTMAC_COMMAND_CONFIG_PAUSE_PFC_COMP_M BIT(20) #define E830_PRTMAC_COMMAND_CONFIG_RX_SFD_ANY_S 21 #define E830_PRTMAC_COMMAND_CONFIG_RX_SFD_ANY_M BIT(21) #define E830_PRTMAC_COMMAND_CONFIG_TX_FLUSH_S 22 #define E830_PRTMAC_COMMAND_CONFIG_TX_FLUSH_M BIT(22) #define E830_PRTMAC_COMMAND_CONFIG_TX_LOWP_ENA_S 23 #define E830_PRTMAC_COMMAND_CONFIG_TX_LOWP_ENA_M BIT(23) #define E830_PRTMAC_COMMAND_CONFIG_REG_LOWP_RXEMPTY_S 24 #define E830_PRTMAC_COMMAND_CONFIG_REG_LOWP_RXEMPTY_M BIT(24) #define E830_PRTMAC_COMMAND_CONFIG_FLT_TX_STOP_S 25 #define E830_PRTMAC_COMMAND_CONFIG_FLT_TX_STOP_M BIT(25) #define E830_PRTMAC_COMMAND_CONFIG_TX_FIFO_RESET_S 26 #define E830_PRTMAC_COMMAND_CONFIG_TX_FIFO_RESET_M BIT(26) #define E830_PRTMAC_COMMAND_CONFIG_FLT_HDL_DIS_S 27 #define E830_PRTMAC_COMMAND_CONFIG_FLT_HDL_DIS_M BIT(27) #define E830_PRTMAC_COMMAND_CONFIG_TX_PAUSE_DIS_S 28 #define E830_PRTMAC_COMMAND_CONFIG_TX_PAUSE_DIS_M BIT(28) #define E830_PRTMAC_COMMAND_CONFIG_RX_PAUSE_DIS_S 29 #define E830_PRTMAC_COMMAND_CONFIG_RX_PAUSE_DIS_M BIT(29) #define E830_PRTMAC_COMMAND_CONFIG_SHORT_PREAM_S 30 #define E830_PRTMAC_COMMAND_CONFIG_SHORT_PREAM_M BIT(30) #define E830_PRTMAC_COMMAND_CONFIG_NO_PREAM_S 31 #define E830_PRTMAC_COMMAND_CONFIG_NO_PREAM_M BIT(31) #define E830_PRTMAC_CRC_INV_M 0x001E3260 /* Reset Source: GLOBR */ #define E830_PRTMAC_CRC_INV_MASK_CRC_INV_MASK_S 0 #define E830_PRTMAC_CRC_INV_MASK_CRC_INV_MASK_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_CRC_MODE 0x001E3240 /* Reset Source: GLOBR */ #define E830_PRTMAC_CRC_MODE_DISABLE_RX_CRC_CHECKING_S 16 #define E830_PRTMAC_CRC_MODE_DISABLE_RX_CRC_CHECKING_M BIT(16) #define E830_PRTMAC_CRC_MODE_ONE_BYTE_CRC_S 18 #define E830_PRTMAC_CRC_MODE_ONE_BYTE_CRC_M BIT(18) #define E830_PRTMAC_CRC_MODE_TWO_BYTES_CRC_S 19 #define E830_PRTMAC_CRC_MODE_TWO_BYTES_CRC_M BIT(19) #define E830_PRTMAC_CRC_MODE_ZERO_BYTE_CRC_S 20 #define E830_PRTMAC_CRC_MODE_ZERO_BYTE_CRC_M BIT(20) #define E830_PRTMAC_CSR_TIMEOUT_CFG 0x001E3D00 /* Reset Source: GLOBR */ #define E830_PRTMAC_CSR_TIMEOUT_CFG_CSR_TIMEOUT_EN_S 0 #define E830_PRTMAC_CSR_TIMEOUT_CFG_CSR_TIMEOUT_EN_M BIT(0) #define E830_PRTMAC_CTL_RX_CFG 0x001E2160 /* Reset Source: GLOBR */ #define E830_PRTMAC_CTL_RX_CFG_SUB_CRC_STAT_S 0 #define E830_PRTMAC_CTL_RX_CFG_SUB_CRC_STAT_M BIT(0) #define E830_PRTMAC_CTL_RX_CFG_FRM_DROP_FOR_STAT_MODE_S 1 #define E830_PRTMAC_CTL_RX_CFG_FRM_DROP_FOR_STAT_MODE_M MAKEMASK(0x3, 1) #define E830_PRTMAC_CTL_RX_CFG_MAC_PAC_AFULL_TRSH_S 3 #define E830_PRTMAC_CTL_RX_CFG_MAC_PAC_AFULL_TRSH_M MAKEMASK(0x7, 3) #define E830_PRTMAC_CTL_RX_PAUSE_ENABLE 0x001E2180 /* Reset Source: GLOBR */ #define E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_S 0 #define E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0) #define E830_PRTMAC_CTL_TX_PAUSE_ENABLE 0x001E21A0 /* Reset Source: GLOBR */ #define E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_S 0 #define E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0) #define E830_PRTMAC_FRM_LENGTH 0x001E30A0 /* Reset Source: GLOBR */ #define E830_PRTMAC_FRM_LENGTH_FRM_LENGTH_S 0 #define E830_PRTMAC_FRM_LENGTH_FRM_LENGTH_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_FRM_LENGTH_TX_MTU_S 16 #define E830_PRTMAC_FRM_LENGTH_TX_MTU_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_MAC_ADDR_0 0x001E3060 /* Reset Source: GLOBR */ #define E830_PRTMAC_MAC_ADDR_0_MAC_ADDR_0_S 0 #define E830_PRTMAC_MAC_ADDR_0_MAC_ADDR_0_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_MAC_ADDR_1 0x001E3080 /* Reset Source: GLOBR */ #define E830_PRTMAC_MAC_ADDR_1_MAC_ADDR_1_S 0 #define E830_PRTMAC_MAC_ADDR_1_MAC_ADDR_1_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_MDIO_CFG_STATUS 0x001E3180 /* Reset Source: GLOBR */ #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_BUSY_S 0 #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_BUSY_M BIT(0) #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_RD_ERR_S 1 #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_RD_ERR_M BIT(1) #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_HOLD_TIME_S 2 #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_HOLD_TIME_M MAKEMASK(0x7, 2) #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_S 5 #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_M BIT(5) #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLS_45_EN_S 6 #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLS_45_EN_M BIT(6) #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_S 7 #define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_M MAKEMASK(0x1FF, 7) #define E830_PRTMAC_MDIO_COMMAND 0x001E31A0 /* Reset Source: GLOBR */ #define E830_PRTMAC_MDIO_COMMAND_MDIO_COMMAND_S 0 #define E830_PRTMAC_MDIO_COMMAND_MDIO_COMMAND_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_MDIO_COMMAND_RESERVED_2_S 16 #define E830_PRTMAC_MDIO_COMMAND_RESERVED_2_M MAKEMASK(0x7FFF, 16) #define E830_PRTMAC_MDIO_COMMAND_MDIO_BUSY_S 31 #define E830_PRTMAC_MDIO_COMMAND_MDIO_BUSY_M BIT(31) #define E830_PRTMAC_MDIO_DATA 0x001E31C0 /* Reset Source: GLOBR */ #define E830_PRTMAC_MDIO_DATA_MDIO_DATA_S 0 #define E830_PRTMAC_MDIO_DATA_MDIO_DATA_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_MDIO_DATA_RESERVED_2_S 16 #define E830_PRTMAC_MDIO_DATA_RESERVED_2_M MAKEMASK(0x7FFF, 16) #define E830_PRTMAC_MDIO_DATA_MDIO_BUSY_S 31 #define E830_PRTMAC_MDIO_DATA_MDIO_BUSY_M BIT(31) #define E830_PRTMAC_MDIO_REGADDR 0x001E31E0 /* Reset Source: GLOBR */ #define E830_PRTMAC_MDIO_REGADDR_MDIO_REGADDR_S 0 #define E830_PRTMAC_MDIO_REGADDR_MDIO_REGADDR_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_REVISION 0x001E3000 /* Reset Source: GLOBR */ #define E830_PRTMAC_REVISION_CORE_REVISION_S 0 #define E830_PRTMAC_REVISION_CORE_REVISION_M MAKEMASK(0xFF, 0) #define E830_PRTMAC_REVISION_CORE_VERSION_S 8 #define E830_PRTMAC_REVISION_CORE_VERSION_M MAKEMASK(0xFF, 8) #define E830_PRTMAC_REVISION_CUSTOMER_VERSION_S 16 #define E830_PRTMAC_REVISION_CUSTOMER_VERSION_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_RX_OFLOW_PKT_DRP_BSOP_CNT 0x001E24C0 /* Reset Source: GLOBR */ #define E830_PRTMAC_RX_OFLOW_PKT_DRP_BSOP_CNT_RX_OFLOW_PKT_DRP_BSOP_CNT_S 0 #define E830_PRTMAC_RX_OFLOW_PKT_DRP_BSOP_CNT_RX_OFLOW_PKT_DRP_BSOP_CNT_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_RX_PAUSE_STATUS 0x001E33A0 /* Reset Source: GLOBR */ #define E830_PRTMAC_RX_PAUSE_STATUS_RX_PAUSE_STATUS_S 0 #define E830_PRTMAC_RX_PAUSE_STATUS_RX_PAUSE_STATUS_M MAKEMASK(0xFF, 0) #define E830_PRTMAC_RX_PKT_DRP_CNT_RX_OFLOW_PKT_DRP_CNT_S 12 #define E830_PRTMAC_RX_PKT_DRP_CNT_RX_OFLOW_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 12) #define E830_PRTMAC_SCRATCH 0x001E3020 /* Reset Source: GLOBR */ #define E830_PRTMAC_SCRATCH_SCRATCH_S 0 #define E830_PRTMAC_SCRATCH_SCRATCH_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_STATUS 0x001E3200 /* Reset Source: GLOBR */ #define E830_PRTMAC_STATUS_RX_LOC_FAULT_S 0 #define E830_PRTMAC_STATUS_RX_LOC_FAULT_M BIT(0) #define E830_PRTMAC_STATUS_RX_REM_FAULT_S 1 #define E830_PRTMAC_STATUS_RX_REM_FAULT_M BIT(1) #define E830_PRTMAC_STATUS_PHY_LOS_S 2 #define E830_PRTMAC_STATUS_PHY_LOS_M BIT(2) #define E830_PRTMAC_STATUS_TS_AVAIL_S 3 #define E830_PRTMAC_STATUS_TS_AVAIL_M BIT(3) #define E830_PRTMAC_STATUS_RX_LOWP_S 4 #define E830_PRTMAC_STATUS_RX_LOWP_M BIT(4) #define E830_PRTMAC_STATUS_TX_EMPTY_S 5 #define E830_PRTMAC_STATUS_TX_EMPTY_M BIT(5) #define E830_PRTMAC_STATUS_RX_EMPTY_S 6 #define E830_PRTMAC_STATUS_RX_EMPTY_M BIT(6) #define E830_PRTMAC_STATUS_RX_LINT_FAULT_S 7 #define E830_PRTMAC_STATUS_RX_LINT_FAULT_M BIT(7) #define E830_PRTMAC_STATUS_TX_ISIDLE_S 8 #define E830_PRTMAC_STATUS_TX_ISIDLE_M BIT(8) #define E830_PRTMAC_STATUS_RESERVED_10_S 9 #define E830_PRTMAC_STATUS_RESERVED_10_M MAKEMASK(0x7FFFFF, 9) #define E830_PRTMAC_STATUS_SPARE 0x001E2740 /* Reset Source: GLOBR */ #define E830_PRTMAC_STATUS_SPARE_DFD_STATUS_SPARE_S 0 #define E830_PRTMAC_STATUS_SPARE_DFD_STATUS_SPARE_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_TS_RX_PCS_LATENCY 0x001E2220 /* Reset Source: GLOBR */ #define E830_PRTMAC_TS_RX_PCS_LATENCY_TS_RX_PCS_LATENCY_S 0 #define E830_PRTMAC_TS_RX_PCS_LATENCY_TS_RX_PCS_LATENCY_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_TS_TIMESTAMP 0x001E33E0 /* Reset Source: GLOBR */ #define E830_PRTMAC_TS_TIMESTAMP_TS_TIMESTAMP_S 0 #define E830_PRTMAC_TS_TIMESTAMP_TS_TIMESTAMP_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020 /* Reset Source: GLOBR */ #define E830_PRTMAC_TS_TX_MEM_VALID_H_TIMESTAMP_TX_VALID_ARR_H_S 0 #define E830_PRTMAC_TS_TX_MEM_VALID_H_TIMESTAMP_TX_VALID_ARR_H_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000 /* Reset Source: GLOBR */ #define E830_PRTMAC_TS_TX_MEM_VALID_L_TIMESTAMP_TX_VALID_ARR_L_S 0 #define E830_PRTMAC_TS_TX_MEM_VALID_L_TIMESTAMP_TX_VALID_ARR_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PRTMAC_TS_TX_PCS_LATENCY 0x001E2200 /* Reset Source: GLOBR */ #define E830_PRTMAC_TS_TX_PCS_LATENCY_TS_TX_PCS_LATENCY_S 0 #define E830_PRTMAC_TS_TX_PCS_LATENCY_TS_TX_PCS_LATENCY_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_TX_FIFO_SECTIONS 0x001E3100 /* Reset Source: GLOBR */ #define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_S 0 #define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_S 16 #define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_TX_IPG_LENGTH 0x001E3220 /* Reset Source: GLOBR */ #define E830_PRTMAC_TX_IPG_LENGTH_AVG_IPG_LEN_S 0 #define E830_PRTMAC_TX_IPG_LENGTH_AVG_IPG_LEN_M MAKEMASK(0x3F, 0) #define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_23_16_S 8 #define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_23_16_M MAKEMASK(0xFF, 8) #define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_15_0_S 16 #define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_15_0_M MAKEMASK(0xFFFF, 16) #define E830_PRTMAC_USER_TX_PAUSE_CNT 0x001E2760 /* Reset Source: GLOBR */ #define E830_PRTMAC_USER_TX_PAUSE_CNT_USER_TX_PAUSE_CNT_S 0 #define E830_PRTMAC_USER_TX_PAUSE_CNT_USER_TX_PAUSE_CNT_M MAKEMASK(0xFFFF, 0) #define E830_PRTMAC_XIF_MODE 0x001E3400 /* Reset Source: GLOBR */ #define E830_PRTMAC_XIF_MODE_XGMII_ENA_S 0 #define E830_PRTMAC_XIF_MODE_XGMII_ENA_M BIT(0) #define E830_PRTMAC_XIF_MODE_RESERVED_2_S 1 #define E830_PRTMAC_XIF_MODE_RESERVED_2_M MAKEMASK(0x7, 1) #define E830_PRTMAC_XIF_MODE_PAUSETIMERX8_S 4 #define E830_PRTMAC_XIF_MODE_PAUSETIMERX8_M BIT(4) #define E830_PRTMAC_XIF_MODE_ONE_STEP_ENA_S 5 #define E830_PRTMAC_XIF_MODE_ONE_STEP_ENA_M BIT(5) #define E830_PRTMAC_XIF_MODE_RX_PAUSE_BYPASS_S 6 #define E830_PRTMAC_XIF_MODE_RX_PAUSE_BYPASS_M BIT(6) #define E830_PRTMAC_XIF_MODE_RESERVED1_S 7 #define E830_PRTMAC_XIF_MODE_RESERVED1_M BIT(7) #define E830_PRTMAC_XIF_MODE_TX_MAC_RS_ERR_S 8 #define E830_PRTMAC_XIF_MODE_TX_MAC_RS_ERR_M BIT(8) #define E830_PRTMAC_XIF_MODE_TS_DELTA_MODE_S 9 #define E830_PRTMAC_XIF_MODE_TS_DELTA_MODE_M BIT(9) #define E830_PRTMAC_XIF_MODE_TS_DELAY_MODE_S 10 #define E830_PRTMAC_XIF_MODE_TS_DELAY_MODE_M BIT(10) #define E830_PRTMAC_XIF_MODE_TS_BINARY_MODE_S 11 #define E830_PRTMAC_XIF_MODE_TS_BINARY_MODE_M BIT(11) #define E830_PRTMAC_XIF_MODE_TS_UPD64_MODE_S 12 #define E830_PRTMAC_XIF_MODE_TS_UPD64_MODE_M BIT(12) #define E830_PRTMAC_XIF_MODE_RESERVED2_S 13 #define E830_PRTMAC_XIF_MODE_RESERVED2_M MAKEMASK(0x7, 13) #define E830_PRTMAC_XIF_MODE_RX_CNT_MODE_S 16 #define E830_PRTMAC_XIF_MODE_RX_CNT_MODE_M BIT(16) #define E830_PRTMAC_XIF_MODE_PFC_PULSE_MODE_S 17 #define E830_PRTMAC_XIF_MODE_PFC_PULSE_MODE_M BIT(17) #define E830_PRTMAC_XIF_MODE_PFC_LP_MODE_S 18 #define E830_PRTMAC_XIF_MODE_PFC_LP_MODE_M BIT(18) #define E830_PRTMAC_XIF_MODE_PFC_LP_16PRI_S 19 #define E830_PRTMAC_XIF_MODE_PFC_LP_16PRI_M BIT(19) #define E830_PRTMAC_XIF_MODE_TS_SFD_ENA_S 20 #define E830_PRTMAC_XIF_MODE_TS_SFD_ENA_M BIT(20) #define E830_PRTMAC_XIF_MODE_RESERVED3_S 21 #define E830_PRTMAC_XIF_MODE_RESERVED3_M MAKEMASK(0x7FF, 21) #define E830_PRTPM_DFD_WOL_CNTR_PER_PF 0x001E2700 /* Reset Source: GLOBR */ #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF0_S 0 #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF0_M MAKEMASK(0xF, 0) #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF1_S 4 #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF1_M MAKEMASK(0xF, 4) #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF2_S 8 #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF2_M MAKEMASK(0xF, 8) #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF3_S 12 #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF3_M MAKEMASK(0xF, 12) #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF4_S 16 #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF4_M MAKEMASK(0xF, 16) #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF5_S 20 #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF5_M MAKEMASK(0xF, 20) #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF6_S 24 #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF6_M MAKEMASK(0xF, 24) #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF7_S 28 #define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF7_M MAKEMASK(0xF, 28) #define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SW_ABOVE_HW_TAIL_S 28 #define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SW_ABOVE_HW_TAIL_M BIT(28) #define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SAME_TAIL_S 29 #define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SAME_TAIL_M BIT(29) #define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_TAIL_GE_QLEN_S 30 #define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_TAIL_GE_QLEN_M BIT(30) #define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_UR_S 31 #define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_UR_M BIT(31) #define E830_GL_MDET_HIF_UR_FIFO 0x00096844 /* Reset Source: CORER */ #define E830_GL_MDET_HIF_UR_FIFO_FUNC_NUM_S 0 #define E830_GL_MDET_HIF_UR_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0) #define E830_GL_MDET_HIF_UR_FIFO_PF_NUM_S 10 #define E830_GL_MDET_HIF_UR_FIFO_PF_NUM_M MAKEMASK(0x7, 10) #define E830_GL_MDET_HIF_UR_FIFO_FUNC_TYPE_S 13 #define E830_GL_MDET_HIF_UR_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13) #define E830_GL_MDET_HIF_UR_FIFO_MAL_TYPE_S 15 #define E830_GL_MDET_HIF_UR_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15) #define E830_GL_MDET_HIF_UR_FIFO_FIFO_FULL_S 20 #define E830_GL_MDET_HIF_UR_FIFO_FIFO_FULL_M BIT(20) #define E830_GL_MDET_HIF_UR_FIFO_VALID_S 21 #define E830_GL_MDET_HIF_UR_FIFO_VALID_M BIT(21) #define E830_GL_MDET_HIF_UR_FIFO_EVENT_CNT_S 24 #define E830_GL_MDET_HIF_UR_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24) #define E830_GL_MDET_HIF_UR_PF_CNT(_i) (0x00096804 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_MDET_HIF_UR_PF_CNT_MAX_INDEX 7 #define E830_GL_MDET_HIF_UR_PF_CNT_CNT_S 0 #define E830_GL_MDET_HIF_UR_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GL_MDET_HIF_UR_VF(_i) (0x00096824 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GL_MDET_HIF_UR_VF_MAX_INDEX 7 #define E830_GL_MDET_HIF_UR_VF_VF_MAL_EVENT_S 0 #define E830_GL_MDET_HIF_UR_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PF_MDET_HIF_UR 0x00096880 /* Reset Source: CORER */ #define E830_PF_MDET_HIF_UR_VALID_S 0 #define E830_PF_MDET_HIF_UR_VALID_M BIT(0) #define E830_VM_MDET_TX_TCLAN(_i) (0x000FC348 + ((_i) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define E830_VM_MDET_TX_TCLAN_MAX_INDEX 767 #define E830_VM_MDET_TX_TCLAN_VALID_S 0 #define E830_VM_MDET_TX_TCLAN_VALID_M BIT(0) #define E830_VP_MDET_HIF_UR(_VF) (0x00096C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define E830_VP_MDET_HIF_UR_MAX_INDEX 255 #define E830_VP_MDET_HIF_UR_VALID_S 0 #define E830_VP_MDET_HIF_UR_VALID_M BIT(0) #define E830_GLNVM_FLA_GLOBAL_LOCKED_S 7 #define E830_GLNVM_FLA_GLOBAL_LOCKED_M BIT(7) #define E830_DMA_AGENT_AT0 0x000BE268 /* Reset Source: PCIR */ #define E830_DMA_AGENT_AT0_RLAN_PASID_SELECTED_S 0 #define E830_DMA_AGENT_AT0_RLAN_PASID_SELECTED_M MAKEMASK(0x3, 0) #define E830_DMA_AGENT_AT0_TCLAN_PASID_SELECTED_S 2 #define E830_DMA_AGENT_AT0_TCLAN_PASID_SELECTED_M MAKEMASK(0x3, 2) #define E830_DMA_AGENT_AT0_PQM_DBL_PASID_SELECTED_S 4 #define E830_DMA_AGENT_AT0_PQM_DBL_PASID_SELECTED_M MAKEMASK(0x3, 4) #define E830_DMA_AGENT_AT0_PQM_DESC_PASID_SELECTED_S 6 #define E830_DMA_AGENT_AT0_PQM_DESC_PASID_SELECTED_M MAKEMASK(0x3, 6) #define E830_DMA_AGENT_AT0_PQM_TS_DESC_PASID_SELECTED_S 8 #define E830_DMA_AGENT_AT0_PQM_TS_DESC_PASID_SELECTED_M MAKEMASK(0x3, 8) #define E830_DMA_AGENT_AT0_RDPU_PASID_SELECTED_S 10 #define E830_DMA_AGENT_AT0_RDPU_PASID_SELECTED_M MAKEMASK(0x3, 10) #define E830_DMA_AGENT_AT0_TDPU_PASID_SELECTED_S 12 #define E830_DMA_AGENT_AT0_TDPU_PASID_SELECTED_M MAKEMASK(0x3, 12) #define E830_DMA_AGENT_AT0_MBX_PASID_SELECTED_S 14 #define E830_DMA_AGENT_AT0_MBX_PASID_SELECTED_M MAKEMASK(0x3, 14) #define E830_DMA_AGENT_AT0_MNG_PASID_SELECTED_S 16 #define E830_DMA_AGENT_AT0_MNG_PASID_SELECTED_M MAKEMASK(0x3, 16) #define E830_DMA_AGENT_AT0_TEP_PMAT_PASID_SELECTED_S 18 #define E830_DMA_AGENT_AT0_TEP_PMAT_PASID_SELECTED_M MAKEMASK(0x3, 18) #define E830_DMA_AGENT_AT0_RX_PE_PASID_SELECTED_S 20 #define E830_DMA_AGENT_AT0_RX_PE_PASID_SELECTED_M MAKEMASK(0x3, 20) #define E830_DMA_AGENT_AT0_TX_PE_PASID_SELECTED_S 22 #define E830_DMA_AGENT_AT0_TX_PE_PASID_SELECTED_M MAKEMASK(0x3, 22) #define E830_DMA_AGENT_AT0_PEPMAT_PASID_SELECTED_S 24 #define E830_DMA_AGENT_AT0_PEPMAT_PASID_SELECTED_M MAKEMASK(0x3, 24) #define E830_DMA_AGENT_AT0_FPMAT_PASID_SELECTED_S 26 #define E830_DMA_AGENT_AT0_FPMAT_PASID_SELECTED_M MAKEMASK(0x3, 26) #define E830_DMA_AGENT_AT1 0x000BE26C /* Reset Source: PCIR */ #define E830_DMA_AGENT_AT1_RLAN_PASID_SELECTED_S 0 #define E830_DMA_AGENT_AT1_RLAN_PASID_SELECTED_M MAKEMASK(0x3, 0) #define E830_DMA_AGENT_AT1_TCLAN_PASID_SELECTED_S 2 #define E830_DMA_AGENT_AT1_TCLAN_PASID_SELECTED_M MAKEMASK(0x3, 2) #define E830_DMA_AGENT_AT1_PQM_DBL_PASID_SELECTED_S 4 #define E830_DMA_AGENT_AT1_PQM_DBL_PASID_SELECTED_M MAKEMASK(0x3, 4) #define E830_DMA_AGENT_AT1_PQM_DESC_PASID_SELECTED_S 6 #define E830_DMA_AGENT_AT1_PQM_DESC_PASID_SELECTED_M MAKEMASK(0x3, 6) #define E830_DMA_AGENT_AT1_PQM_TS_DESC_PASID_SELECTED_S 8 #define E830_DMA_AGENT_AT1_PQM_TS_DESC_PASID_SELECTED_M MAKEMASK(0x3, 8) #define E830_DMA_AGENT_AT1_RDPU_PASID_SELECTED_S 10 #define E830_DMA_AGENT_AT1_RDPU_PASID_SELECTED_M MAKEMASK(0x3, 10) #define E830_DMA_AGENT_AT1_TDPU_PASID_SELECTED_S 12 #define E830_DMA_AGENT_AT1_TDPU_PASID_SELECTED_M MAKEMASK(0x3, 12) #define E830_DMA_AGENT_AT1_MBX_PASID_SELECTED_S 14 #define E830_DMA_AGENT_AT1_MBX_PASID_SELECTED_M MAKEMASK(0x3, 14) #define E830_DMA_AGENT_AT1_MNG_PASID_SELECTED_S 16 #define E830_DMA_AGENT_AT1_MNG_PASID_SELECTED_M MAKEMASK(0x3, 16) #define E830_DMA_AGENT_AT1_TEP_PMAT_PASID_SELECTED_S 18 #define E830_DMA_AGENT_AT1_TEP_PMAT_PASID_SELECTED_M MAKEMASK(0x3, 18) #define E830_DMA_AGENT_AT1_RX_PE_PASID_SELECTED_S 20 #define E830_DMA_AGENT_AT1_RX_PE_PASID_SELECTED_M MAKEMASK(0x3, 20) #define E830_DMA_AGENT_AT1_TX_PE_PASID_SELECTED_S 22 #define E830_DMA_AGENT_AT1_TX_PE_PASID_SELECTED_M MAKEMASK(0x3, 22) #define E830_DMA_AGENT_AT1_PEPMAT_PASID_SELECTED_S 24 #define E830_DMA_AGENT_AT1_PEPMAT_PASID_SELECTED_M MAKEMASK(0x3, 24) #define E830_DMA_AGENT_AT1_FPMAT_PASID_SELECTED_S 26 #define E830_DMA_AGENT_AT1_FPMAT_PASID_SELECTED_M MAKEMASK(0x3, 26) #define E830_GLPCI_CAPSUP_DOE_EN_S 1 #define E830_GLPCI_CAPSUP_DOE_EN_M BIT(1) #define E830_GLPCI_CAPSUP_GEN5_EXT_EN_S 12 #define E830_GLPCI_CAPSUP_GEN5_EXT_EN_M BIT(12) #define E830_GLPCI_CAPSUP_PTM_EN_S 13 #define E830_GLPCI_CAPSUP_PTM_EN_M BIT(13) #define E830_GLPCI_CAPSUP_SNPS_RAS_EN_S 14 #define E830_GLPCI_CAPSUP_SNPS_RAS_EN_M BIT(14) #define E830_GLPCI_CAPSUP_SIOV_EN_S 15 #define E830_GLPCI_CAPSUP_SIOV_EN_M BIT(15) #define E830_GLPCI_CAPSUP_PTM_VSEC_EN_S 22 #define E830_GLPCI_CAPSUP_PTM_VSEC_EN_M BIT(22) #define E830_GLPCI_CAPSUP_SNPS_RAS_PROT_EN_S 23 #define E830_GLPCI_CAPSUP_SNPS_RAS_PROT_EN_M BIT(23) #define E830_GLPCI_DOE_BUSY_STATUS 0x0009DF70 /* Reset Source: PCIR */ #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_REQ_S 0 #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_REQ_M BIT(0) #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_EMPR_S 1 #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_EMPR_M BIT(1) #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_PCIER_S 2 #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_PCIER_M BIT(2) #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FLR_S 3 #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FLR_M BIT(3) #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_CFG_ABORT_S 4 #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_CFG_ABORT_M BIT(4) #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FW_S 5 #define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FW_M BIT(5) #define E830_GLPCI_DOE_CFG 0x0009DF54 /* Reset Source: PCIR */ #define E830_GLPCI_DOE_CFG_ENABLE_S 0 #define E830_GLPCI_DOE_CFG_ENABLE_M BIT(0) #define E830_GLPCI_DOE_CFG_ITR_SUPPORT_S 1 #define E830_GLPCI_DOE_CFG_ITR_SUPPORT_M BIT(1) #define E830_GLPCI_DOE_CFG_POISON_CFGWR_PIOSF_EP_BIT_S 2 #define E830_GLPCI_DOE_CFG_POISON_CFGWR_PIOSF_EP_BIT_M BIT(2) #define E830_GLPCI_DOE_CFG_POISON_CFGWR_SBIOSF_AER_MSG_S 3 #define E830_GLPCI_DOE_CFG_POISON_CFGWR_SBIOSF_AER_MSG_M BIT(3) #define E830_GLPCI_DOE_CFG_MSIX_VECTOR_S 8 #define E830_GLPCI_DOE_CFG_MSIX_VECTOR_M MAKEMASK(0x7FF, 8) #define E830_GLPCI_DOE_CTRL 0x0009DF60 /* Reset Source: PCIR */ #define E830_GLPCI_DOE_CTRL_BUSY_FW_SET_S 0 #define E830_GLPCI_DOE_CTRL_BUSY_FW_SET_M BIT(0) #define E830_GLPCI_DOE_CTRL_DOE_CFG_ERR_SET_S 1 #define E830_GLPCI_DOE_CTRL_DOE_CFG_ERR_SET_M BIT(1) #define E830_GLPCI_DOE_DBG 0x0009DF6C /* Reset Source: PCIR */ #define E830_GLPCI_DOE_DBG_CFG_BUSY_S 0 #define E830_GLPCI_DOE_DBG_CFG_BUSY_M BIT(0) #define E830_GLPCI_DOE_DBG_CFG_DATA_OBJECT_READY_S 1 #define E830_GLPCI_DOE_DBG_CFG_DATA_OBJECT_READY_M BIT(1) #define E830_GLPCI_DOE_DBG_CFG_ERROR_S 2 #define E830_GLPCI_DOE_DBG_CFG_ERROR_M BIT(2) #define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_ENABLE_S 3 #define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_ENABLE_M BIT(3) #define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_STATUS_S 4 #define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_STATUS_M BIT(4) #define E830_GLPCI_DOE_DBG_REQ_BUF_SW_WR_PTR_S 8 #define E830_GLPCI_DOE_DBG_REQ_BUF_SW_WR_PTR_M MAKEMASK(0x1FF, 8) #define E830_GLPCI_DOE_DBG_RESP_BUF_SW_RD_PTR_S 20 #define E830_GLPCI_DOE_DBG_RESP_BUF_SW_RD_PTR_M MAKEMASK(0x1FF, 20) #define E830_GLPCI_DOE_ERR_EN 0x0009DF64 /* Reset Source: PCIR */ #define E830_GLPCI_DOE_ERR_EN_RD_REQ_BUF_ECC_ERR_EN_S 0 #define E830_GLPCI_DOE_ERR_EN_RD_REQ_BUF_ECC_ERR_EN_M BIT(0) #define E830_GLPCI_DOE_ERR_EN_RD_RESP_BUF_ECC_ERR_EN_S 1 #define E830_GLPCI_DOE_ERR_EN_RD_RESP_BUF_ECC_ERR_EN_M BIT(1) #define E830_GLPCI_DOE_ERR_EN_SW_WR_CFG_POISONED_EN_S 2 #define E830_GLPCI_DOE_ERR_EN_SW_WR_CFG_POISONED_EN_M BIT(2) #define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_EN_S 3 #define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_EN_M BIT(3) #define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_REQ_EN_S 4 #define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_REQ_EN_M BIT(4) #define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_EN_S 5 #define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_EN_M BIT(5) #define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_FW_EN_S 6 #define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_FW_EN_M BIT(6) #define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_OVERFLOW_EN_S 7 #define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_OVERFLOW_EN_M BIT(7) #define E830_GLPCI_DOE_ERR_EN_SW_GO_REQ_BUF_EMPTY_EN_S 8 #define E830_GLPCI_DOE_ERR_EN_SW_GO_REQ_BUF_EMPTY_EN_M BIT(8) #define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_READY_LOW_EN_S 9 #define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_READY_LOW_EN_M BIT(9) #define E830_GLPCI_DOE_ERR_EN_SW_REQ_DURING_MNG_RST_EN_S 10 #define E830_GLPCI_DOE_ERR_EN_SW_REQ_DURING_MNG_RST_EN_M BIT(10) #define E830_GLPCI_DOE_ERR_EN_FW_SET_ERROR_EN_S 11 #define E830_GLPCI_DOE_ERR_EN_FW_SET_ERROR_EN_M BIT(11) #define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_EN_S 12 #define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_EN_M BIT(12) #define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_ABORT_EN_S 13 #define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_ABORT_EN_M BIT(13) #define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_EN_S 14 #define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_EN_M BIT(14) #define E830_GLPCI_DOE_ERR_STATUS 0x0009DF68 /* Reset Source: PCIR */ #define E830_GLPCI_DOE_ERR_STATUS_RD_REQ_BUF_ECC_ERR_S 0 #define E830_GLPCI_DOE_ERR_STATUS_RD_REQ_BUF_ECC_ERR_M BIT(0) #define E830_GLPCI_DOE_ERR_STATUS_RD_RESP_BUF_ECC_ERR_S 1 #define E830_GLPCI_DOE_ERR_STATUS_RD_RESP_BUF_ECC_ERR_M BIT(1) #define E830_GLPCI_DOE_ERR_STATUS_SW_WR_CFG_POISONED_S 2 #define E830_GLPCI_DOE_ERR_STATUS_SW_WR_CFG_POISONED_M BIT(2) #define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_S 3 #define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_M BIT(3) #define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_REQ_S 4 #define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_REQ_M BIT(4) #define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_S 5 #define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_M BIT(5) #define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_FW_S 6 #define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_FW_M BIT(6) #define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_OVERFLOW_S 7 #define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_OVERFLOW_M BIT(7) #define E830_GLPCI_DOE_ERR_STATUS_SW_GO_REQ_BUF_EMPTY_S 8 #define E830_GLPCI_DOE_ERR_STATUS_SW_GO_REQ_BUF_EMPTY_M BIT(8) #define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_READY_LOW_S 9 #define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_READY_LOW_M BIT(9) #define E830_GLPCI_DOE_ERR_STATUS_SW_REQ_DURING_MNG_RST_S 10 #define E830_GLPCI_DOE_ERR_STATUS_SW_REQ_DURING_MNG_RST_M BIT(10) #define E830_GLPCI_DOE_ERR_STATUS_FW_SET_ERROR_S 11 #define E830_GLPCI_DOE_ERR_STATUS_FW_SET_ERROR_M BIT(11) #define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_S 12 #define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_M BIT(12) #define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_ABORT_S 13 #define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_ABORT_M BIT(13) #define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_S 14 #define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_M BIT(14) #define E830_GLPCI_DOE_ERR_STATUS_CFG_ERR_IDX_S 24 #define E830_GLPCI_DOE_ERR_STATUS_CFG_ERR_IDX_M MAKEMASK(0x1F, 24) #define E830_GLPCI_DOE_REQ_MSG_NUM_DWS 0x0009DF58 /* Reset Source: PCIR */ #define E830_GLPCI_DOE_REQ_MSG_NUM_DWS_GLPCI_DOE_REQ_MSG_NUM_DWS_S 0 #define E830_GLPCI_DOE_REQ_MSG_NUM_DWS_GLPCI_DOE_REQ_MSG_NUM_DWS_M MAKEMASK(0x1FF, 0) #define E830_GLPCI_DOE_RESP 0x0009DF5C /* Reset Source: PCIR */ #define E830_GLPCI_DOE_RESP_MSG_NUM_DWS_S 0 #define E830_GLPCI_DOE_RESP_MSG_NUM_DWS_M MAKEMASK(0x1FF, 0) #define E830_GLPCI_DOE_RESP_READY_SET_S 16 #define E830_GLPCI_DOE_RESP_READY_SET_M BIT(16) #define E830_GLPCI_ERR_DBG 0x0009DF84 /* Reset Source: PCIR */ #define E830_GLPCI_ERR_DBG_ERR_MIFO_FULL_DROP_CTR_S 0 #define E830_GLPCI_ERR_DBG_ERR_MIFO_FULL_DROP_CTR_M MAKEMASK(0x3, 0) #define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_SM_S 2 #define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_SM_M BIT(2) #define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_FIFO_NUM_ENTRIES_S 3 #define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_FIFO_NUM_ENTRIES_M MAKEMASK(0x7, 3) #define E830_GLPCI_ERR_DBG_ERR_MIFO_NUM_ENTRIES_S 6 #define E830_GLPCI_ERR_DBG_ERR_MIFO_NUM_ENTRIES_M MAKEMASK(0xF, 6) #define E830_GLPCI_NPQ_CFG_HIGH_TO_S 20 #define E830_GLPCI_NPQ_CFG_HIGH_TO_M BIT(20) #define E830_GLPCI_NPQ_CFG_INC_150MS_TO_S 21 #define E830_GLPCI_NPQ_CFG_INC_150MS_TO_M BIT(21) #define E830_GLPCI_PUSH_PQM_CTRL 0x0009DF74 /* Reset Source: POR */ #define E830_GLPCI_PUSH_PQM_CTRL_PF_LEGACY_RANGE_EN_S 0 #define E830_GLPCI_PUSH_PQM_CTRL_PF_LEGACY_RANGE_EN_M BIT(0) #define E830_GLPCI_PUSH_PQM_CTRL_PF_TXTIME_RANGE_EN_S 1 #define E830_GLPCI_PUSH_PQM_CTRL_PF_TXTIME_RANGE_EN_M BIT(1) #define E830_GLPCI_PUSH_PQM_CTRL_PF_4K_RANGE_EN_S 2 #define E830_GLPCI_PUSH_PQM_CTRL_PF_4K_RANGE_EN_M BIT(2) #define E830_GLPCI_PUSH_PQM_CTRL_VF_LEGACY_RANGE_EN_S 3 #define E830_GLPCI_PUSH_PQM_CTRL_VF_LEGACY_RANGE_EN_M BIT(3) #define E830_GLPCI_PUSH_PQM_CTRL_VF_TXTIME_RANGE_EN_S 4 #define E830_GLPCI_PUSH_PQM_CTRL_VF_TXTIME_RANGE_EN_M BIT(4) #define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_VAL_S 8 #define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_VAL_M MAKEMASK(0xF, 8) #define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_DIS_S 12 #define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_DIS_M BIT(12) #define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_2DWS_ONE_CHUNK_EN_S 16 #define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_2DWS_ONE_CHUNK_EN_M BIT(16) #define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_1DW_ON_XLR_S 17 #define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_1DW_ON_XLR_M BIT(17) #define E830_GLPCI_PUSH_PQM_DBG 0x0009DF7C /* Reset Source: PCIR */ #define E830_GLPCI_PUSH_PQM_DBG_EVENTS_CTR_S 0 #define E830_GLPCI_PUSH_PQM_DBG_EVENTS_CTR_M MAKEMASK(0xFF, 0) #define E830_GLPCI_PUSH_PQM_DBG_DROP_CTR_S 8 #define E830_GLPCI_PUSH_PQM_DBG_DROP_CTR_M MAKEMASK(0xFF, 8) #define E830_GLPCI_PUSH_PQM_DBG_ASYNC_FIFO_USED_SPACE_S 16 #define E830_GLPCI_PUSH_PQM_DBG_ASYNC_FIFO_USED_SPACE_M MAKEMASK(0xF, 16) #define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_USED_SPACE_S 20 #define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_USED_SPACE_M MAKEMASK(0x1F, 20) #define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_PUSH_WHEN_FULL_ERR_S 25 #define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_PUSH_WHEN_FULL_ERR_M BIT(25) #define E830_GLPCI_PUSH_PQM_IF_TO_STATUS 0x0009DF78 /* Reset Source: PCIR */ #define E830_GLPCI_PUSH_PQM_IF_TO_STATUS_GLPCI_PUSH_PQM_IF_TO_STATUS_S 0 #define E830_GLPCI_PUSH_PQM_IF_TO_STATUS_GLPCI_PUSH_PQM_IF_TO_STATUS_M BIT(0) #define E830_GLPCI_RDPU_CMD_DBG 0x000BE264 /* Reset Source: PCIR */ #define E830_GLPCI_RDPU_CMD_DBG_RDPU0_CMD_POP_CNT_S 0 #define E830_GLPCI_RDPU_CMD_DBG_RDPU0_CMD_POP_CNT_M MAKEMASK(0xFF, 0) #define E830_GLPCI_RDPU_CMD_DBG_RDPU1_CMD_POP_CNT_S 8 #define E830_GLPCI_RDPU_CMD_DBG_RDPU1_CMD_POP_CNT_M MAKEMASK(0xFF, 8) #define E830_GLPCI_RDPU_CMD_DBG_RDPU2_CMD_POP_CNT_S 16 #define E830_GLPCI_RDPU_CMD_DBG_RDPU2_CMD_POP_CNT_M MAKEMASK(0xFF, 16) #define E830_GLPCI_RDPU_CMD_DBG_RDPU3_CMD_POP_CNT_S 24 #define E830_GLPCI_RDPU_CMD_DBG_RDPU3_CMD_POP_CNT_M MAKEMASK(0xFF, 24) #define E830_GLPCI_RDPU_CMD_FIFO_DBG0 0x000BE25C /* Reset Source: PCIR */ #define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU0_CMD_NUM_ENTRIES_S 0 #define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU0_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 0) #define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU1_CMD_NUM_ENTRIES_S 16 #define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU1_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 16) #define E830_GLPCI_RDPU_CMD_FIFO_DBG1 0x000BE260 /* Reset Source: PCIR */ #define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU2_CMD_NUM_ENTRIES_S 0 #define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU2_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 0) #define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU3_CMD_NUM_ENTRIES_S 16 #define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU3_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 16) #define E830_GLPCI_RDPU_TAG 0x000BE258 /* Reset Source: PCIR */ #define E830_GLPCI_RDPU_TAG_OVERRIDE_DELAY_S 0 #define E830_GLPCI_RDPU_TAG_OVERRIDE_DELAY_M MAKEMASK(0xFF, 0) #define E830_GLPCI_RDPU_TAG_EXPECTED_TAG_S 8 #define E830_GLPCI_RDPU_TAG_EXPECTED_TAG_M MAKEMASK(0x3FF, 8) #define E830_GLPCI_SB_AER_MSG_OUT 0x0009DF80 /* Reset Source: PCIR */ #define E830_GLPCI_SB_AER_MSG_OUT_EN_S 0 #define E830_GLPCI_SB_AER_MSG_OUT_EN_M BIT(0) #define E830_GLPCI_SB_AER_MSG_OUT_ANF_SET_EN_S 1 #define E830_GLPCI_SB_AER_MSG_OUT_ANF_SET_EN_M BIT(1) #define E830_PF_FUNC_RID_HOST_S 16 #define E830_PF_FUNC_RID_HOST_M MAKEMASK(0x3, 16) #define E830_GLPES_PFRXNPECNMARKEDPKTSHI(_i) (0x00553004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define E830_GLPES_PFRXNPECNMARKEDPKTSHI_MAX_INDEX 127 #define E830_GLPES_PFRXNPECNMARKEDPKTSHI_RXNPECNMARKEDPKTSHI_S 0 #define E830_GLPES_PFRXNPECNMARKEDPKTSHI_RXNPECNMARKEDPKTSHI_M MAKEMASK(0xFFFFFF, 0) #define E830_GLPES_PFRXNPECNMARKEDPKTSLO(_i) (0x00553000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ #define E830_GLPES_PFRXNPECNMARKEDPKTSLO_MAX_INDEX 127 #define E830_GLPES_PFRXNPECNMARKEDPKTSLO_RXNPECNMARKEDPKTSLO_S 0 #define E830_GLPES_PFRXNPECNMARKEDPKTSLO_RXNPECNMARKEDPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLPES_PFRXRPCNPHANDLED(_i) (0x00552C00 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define E830_GLPES_PFRXRPCNPHANDLED_MAX_INDEX 127 #define E830_GLPES_PFRXRPCNPHANDLED_RXRPCNPHANDLED_S 0 #define E830_GLPES_PFRXRPCNPHANDLED_RXRPCNPHANDLED_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLPES_PFRXRPCNPIGNORED(_i) (0x00552800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define E830_GLPES_PFRXRPCNPIGNORED_MAX_INDEX 127 #define E830_GLPES_PFRXRPCNPIGNORED_RXRPCNPIGNORED_S 0 #define E830_GLPES_PFRXRPCNPIGNORED_RXRPCNPIGNORED_M MAKEMASK(0xFFFFFF, 0) #define E830_GLPES_PFTXNPCNPSENT(_i) (0x00553800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ #define E830_GLPES_PFTXNPCNPSENT_MAX_INDEX 127 #define E830_GLPES_PFTXNPCNPSENT_TXNPCNPSENT_S 0 #define E830_GLPES_PFTXNPCNPSENT_TXNPCNPSENT_M MAKEMASK(0xFFFFFF, 0) #define E830_GLQF_FLAT_HLUT(_i) (0x004C0000 + ((_i) * 4)) /* _i=0...8191 */ /* Reset Source: CORER */ #define E830_GLQF_FLAT_HLUT_MAX_INDEX 8191 #define E830_GLQF_FLAT_HLUT_LUT0_S 0 #define E830_GLQF_FLAT_HLUT_LUT0_M MAKEMASK(0xFF, 0) #define E830_GLQF_FLAT_HLUT_LUT1_S 8 #define E830_GLQF_FLAT_HLUT_LUT1_M MAKEMASK(0xFF, 8) #define E830_GLQF_FLAT_HLUT_LUT2_S 16 #define E830_GLQF_FLAT_HLUT_LUT2_M MAKEMASK(0xFF, 16) #define E830_GLQF_FLAT_HLUT_LUT3_S 24 #define E830_GLQF_FLAT_HLUT_LUT3_M MAKEMASK(0xFF, 24) #define E830_GLQF_QGRP_CNTX(_i) (0x00490000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define E830_GLQF_QGRP_CNTX_MAX_INDEX 2047 #define E830_GLQF_QGRP_CNTX_QG_LUT_BASE_S 0 #define E830_GLQF_QGRP_CNTX_QG_LUT_BASE_M MAKEMASK(0x7FFF, 0) #define E830_GLQF_QGRP_CNTX_QG_LUT_SIZE_S 16 #define E830_GLQF_QGRP_CNTX_QG_LUT_SIZE_M MAKEMASK(0xF, 16) #define E830_GLQF_QGRP_CNTX_VSI_S 20 #define E830_GLQF_QGRP_CNTX_VSI_M MAKEMASK(0x3FF, 20) #define E830_GLQF_QGRP_PF_OWNER(_i) (0x00484000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ #define E830_GLQF_QGRP_PF_OWNER_MAX_INDEX 2047 #define E830_GLQF_QGRP_PF_OWNER_OWNER_PF_S 0 #define E830_GLQF_QGRP_PF_OWNER_OWNER_PF_M MAKEMASK(0x7, 0) #define E830_PFQF_LUT_ALLOC 0x0048E000 /* Reset Source: CORER */ #define E830_PFQF_LUT_ALLOC_LUT_BASE_S 0 #define E830_PFQF_LUT_ALLOC_LUT_BASE_M MAKEMASK(0x7FFF, 0) #define E830_PFQF_LUT_ALLOC_LUT_SIZE_S 16 #define E830_PFQF_LUT_ALLOC_LUT_SIZE_M MAKEMASK(0xF, 16) #define E830_VSIQF_DEF_QGRP(_VSI) (0x00486000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ #define E830_VSIQF_DEF_QGRP_MAX_INDEX 767 #define E830_VSIQF_DEF_QGRP_DEF_QGRP_S 0 #define E830_VSIQF_DEF_QGRP_DEF_QGRP_M MAKEMASK(0x7FF, 0) #define E830_GLPRT_BPRCH_BPRCH_S 0 #define E830_GLPRT_BPRCH_BPRCH_M MAKEMASK(0xFF, 0) #define E830_GLPRT_BPRCL_BPRCL_S 0 #define E830_GLPRT_BPRCL_BPRCL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLPRT_BPTCH_BPTCH_S 0 #define E830_GLPRT_BPTCH_BPTCH_M MAKEMASK(0xFF, 0) #define E830_GLPRT_BPTCL_BPTCL_S 0 #define E830_GLPRT_BPTCL_BPTCL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLPRT_UPTCL_UPTCL_S 0 #define E830_GLPRT_UPTCL_UPTCL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLPTM_ART_CTL 0x00088B50 /* Reset Source: POR */ #define E830_GLPTM_ART_CTL_ACTIVE_S 0 #define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0) #define E830_GLPTM_ART_CTL_TIME_OUT_S 1 #define E830_GLPTM_ART_CTL_TIME_OUT_M BIT(1) #define E830_GLPTM_ART_CTL_PTM_READY_S 2 #define E830_GLPTM_ART_CTL_PTM_READY_M BIT(2) #define E830_GLPTM_ART_CTL_PTM_AUTO_S 3 #define E830_GLPTM_ART_CTL_PTM_AUTO_M BIT(3) #define E830_GLPTM_ART_CTL_PTM_AUTO_LATCH_S 4 #define E830_GLPTM_ART_CTL_PTM_AUTO_LATCH_M BIT(4) #define E830_GLPTM_ART_CTL_LATCH_PTP_T1_S 5 #define E830_GLPTM_ART_CTL_LATCH_PTP_T1_M BIT(5) #define E830_GLPTM_ART_CTL_AUTO_POURSE_S 6 #define E830_GLPTM_ART_CTL_AUTO_POURSE_M BIT(6) #define E830_GLPTM_ART_TIME_H 0x00088B54 /* Reset Source: POR */ #define E830_GLPTM_ART_TIME_H_ART_TIME_H_S 0 #define E830_GLPTM_ART_TIME_H_ART_TIME_H_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLPTM_ART_TIME_L 0x00088B58 /* Reset Source: POR */ #define E830_GLPTM_ART_TIME_L_ART_TIME_L_S 0 #define E830_GLPTM_ART_TIME_L_ART_TIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define E830_GLTSYN_PTMTIME_H_MAX_INDEX 1 #define E830_GLTSYN_PTMTIME_H_TSYNEVNT_H_S 0 #define E830_GLTSYN_PTMTIME_H_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define E830_GLTSYN_PTMTIME_L_MAX_INDEX 1 #define E830_GLTSYN_PTMTIME_L_TSYNEVNT_L_S 0 #define E830_GLTSYN_PTMTIME_L_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTSYN_TIME_H_0_AL 0x0008A004 /* Reset Source: CORER */ #define E830_GLTSYN_TIME_H_0_AL_TSYNTIME_L_S 0 #define E830_GLTSYN_TIME_H_0_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTSYN_TIME_H_1_AL 0x0008B004 /* Reset Source: CORER */ #define E830_GLTSYN_TIME_H_1_AL_TSYNTIME_L_S 0 #define E830_GLTSYN_TIME_H_1_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTSYN_TIME_L_0_AL 0x0008A000 /* Reset Source: CORER */ #define E830_GLTSYN_TIME_L_0_AL_TSYNTIME_L_S 0 #define E830_GLTSYN_TIME_L_0_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTSYN_TIME_L_1_AL 0x0008B000 /* Reset Source: CORER */ #define E830_GLTSYN_TIME_L_1_AL_TSYNTIME_L_S 0 #define E830_GLTSYN_TIME_L_1_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_PFPTM_SEM 0x00088B00 /* Reset Source: PFR */ #define E830_PFPTM_SEM_BUSY_S 0 #define E830_PFPTM_SEM_BUSY_M BIT(0) #define E830_PFPTM_SEM_PF_OWNER_S 4 #define E830_PFPTM_SEM_PF_OWNER_M MAKEMASK(0x7, 4) #define E830_VSI_PASID_1(_VSI) (0x00094000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ #define E830_VSI_PASID_1_MAX_INDEX 767 #define E830_VSI_PASID_1_PASID_S 0 #define E830_VSI_PASID_1_PASID_M MAKEMASK(0xFFFFF, 0) #define E830_VSI_PASID_1_EN_S 31 #define E830_VSI_PASID_1_EN_M BIT(31) #define E830_VSI_PASID_2(_VSI) (0x00095000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ #define E830_VSI_PASID_2_MAX_INDEX 767 #define E830_VSI_PASID_2_PASID_S 0 #define E830_VSI_PASID_2_PASID_M MAKEMASK(0xFFFFF, 0) #define E830_VSI_PASID_2_EN_S 31 #define E830_VSI_PASID_2_EN_M BIT(31) #define E830_GLPE_CQM_FUNC_INVALIDATE_PMF_ID_S 15 #define E830_GLPE_CQM_FUNC_INVALIDATE_PMF_ID_M MAKEMASK(0x3F, 15) #define E830_GLPE_CQM_FUNC_INVALIDATE_INVALIDATE_TYPE_S 29 #define E830_GLPE_CQM_FUNC_INVALIDATE_INVALIDATE_TYPE_M MAKEMASK(0x3, 29) #define E830_VFPE_MRTEIDXMASK_MAX_INDEX 255 #define E830_VSIQF_QGRP_CFG(_VSI) (0x00492000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define E830_VSIQF_QGRP_CFG_MAX_INDEX 767 #define E830_VSIQF_QGRP_CFG_VSI_QGRP_ENABLE_S 0 #define E830_VSIQF_QGRP_CFG_VSI_QGRP_ENABLE_M BIT(0) #define E830_VSIQF_QGRP_CFG_VSI_QGRP_GEN_INDEX_S 1 #define E830_VSIQF_QGRP_CFG_VSI_QGRP_GEN_INDEX_M MAKEMASK(0x7, 1) #define E830_GLDCB_RTC_BLOCKED 0x0012274C /* Reset Source: CORER */ #define E830_GLDCB_RTC_BLOCKED_BLOCKED_S 0 #define E830_GLDCB_RTC_BLOCKED_BLOCKED_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLDCB_RTCID 0x00122900 /* Reset Source: CORER */ #define E830_GLDCB_RTCID_IMM_DROP_TC_S 0 #define E830_GLDCB_RTCID_IMM_DROP_TC_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLDCB_RTCTI_CDS_SET 0x00122748 /* Reset Source: CORER */ #define E830_GLDCB_RTCTI_CDS_SET_CDS_SET_S 0 #define E830_GLDCB_RTCTI_CDS_SET_CDS_SET_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLDCB_RTCTQ_PD(_i) (0x00122700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GLDCB_RTCTQ_PD_MAX_INDEX 7 #define E830_GLDCB_RTCTQ_PD_RXQNUM_S 0 #define E830_GLDCB_RTCTQ_PD_RXQNUM_M MAKEMASK(0x7FF, 0) #define E830_GLDCB_RTCTQ_PD_IS_PF_Q_S 16 #define E830_GLDCB_RTCTQ_PD_IS_PF_Q_M BIT(16) #define E830_GLDCB_RTCTQ_SET 0x00122750 /* Reset Source: CORER */ #define E830_GLDCB_RTCTQ_SET_RTCTQ_VALID_S 0 #define E830_GLDCB_RTCTQ_SET_RTCTQ_VALID_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLDCB_RTCTQ_STICKY_EN 0x00122754 /* Reset Source: CORER */ #define E830_GLDCB_RTCTQ_STICKY_EN_EN_S 0 #define E830_GLDCB_RTCTQ_STICKY_EN_EN_M BIT(0) #define E830_GLDCB_RTCTS_PD(_i) (0x00122720 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define E830_GLDCB_RTCTS_PD_MAX_INDEX 7 #define E830_GLDCB_RTCTS_PD_PFCTIMER_S 0 #define E830_GLDCB_RTCTS_PD_PFCTIMER_M MAKEMASK(0x3FFF, 0) #define E830_GLRPB_TC_TOTAL_PC(_i) (0x000ACD00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define E830_GLRPB_TC_TOTAL_PC_MAX_INDEX 31 #define E830_GLRPB_TC_TOTAL_PC_BYTE_CNT_S 0 #define E830_GLRPB_TC_TOTAL_PC_BYTE_CNT_M MAKEMASK(0xFFFFFFFF, 0) #define E830_VFINT_ITRN_64(_i, _j) (0x00002C00 + ((_i) * 4 + (_j) * 256)) /* _i=0...63, _j=0...2 */ /* Reset Source: CORER */ #define E830_VFINT_ITRN_64_MAX_INDEX 63 #define E830_VFINT_ITRN_64_INTERVAL_S 0 #define E830_VFINT_ITRN_64_INTERVAL_M MAKEMASK(0xFFF, 0) #define E830_GLQTX_TXTIME_DBELL_LSB1(_DBQM) (0x0000D000 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */ #define E830_GLQTX_TXTIME_DBELL_LSB1_MAX_INDEX 255 #define E830_GLQTX_TXTIME_DBELL_LSB1_QTX_TXTIME_DBELL_S 0 #define E830_GLQTX_TXTIME_DBELL_LSB1_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLQTX_TXTIME_DBELL_MSB1(_DBQM) (0x0000D004 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */ #define E830_GLQTX_TXTIME_DBELL_MSB1_MAX_INDEX 255 #define E830_GLQTX_TXTIME_DBELL_MSB1_QTX_TXTIME_DBELL_S 0 #define E830_GLQTX_TXTIME_DBELL_MSB1_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLQTX_TXTIME_LARGE_DBELL_LSB(_DBQM) (0x00040000 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */ #define E830_GLQTX_TXTIME_LARGE_DBELL_LSB_MAX_INDEX 255 #define E830_GLQTX_TXTIME_LARGE_DBELL_LSB_QTX_TXTIME_DBELL_S 0 #define E830_GLQTX_TXTIME_LARGE_DBELL_LSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLQTX_TXTIME_LARGE_DBELL_MSB(_DBQM) (0x00040004 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */ #define E830_GLQTX_TXTIME_LARGE_DBELL_MSB_MAX_INDEX 255 #define E830_GLQTX_TXTIME_LARGE_DBELL_MSB_QTX_TXTIME_DBELL_S 0 #define E830_GLQTX_TXTIME_LARGE_DBELL_MSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTSYN_TIME_H_0_AL1 0x00003004 /* Reset Source: CORER */ #define E830_GLTSYN_TIME_H_0_AL1_TSYNTIME_L_S 0 #define E830_GLTSYN_TIME_H_0_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTSYN_TIME_H_1_AL1 0x0000300C /* Reset Source: CORER */ #define E830_GLTSYN_TIME_H_1_AL1_TSYNTIME_L_S 0 #define E830_GLTSYN_TIME_H_1_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTSYN_TIME_L_0_AL1 0x00003000 /* Reset Source: CORER */ #define E830_GLTSYN_TIME_L_0_AL1_TSYNTIME_L_S 0 #define E830_GLTSYN_TIME_L_0_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_GLTSYN_TIME_L_1_AL1 0x00003008 /* Reset Source: CORER */ #define E830_GLTSYN_TIME_L_1_AL1_TSYNTIME_L_S 0 #define E830_GLTSYN_TIME_L_1_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) #define E830_VSI_VSI2F_LEM(_VSI) (0x006100A0 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ #define E830_VSI_VSI2F_LEM_MAX_INDEX 767 #define E830_VSI_VSI2F_LEM_VFVMNUMBER_S 0 #define E830_VSI_VSI2F_LEM_VFVMNUMBER_M MAKEMASK(0x3FF, 0) #define E830_VSI_VSI2F_LEM_FUNCTIONTYPE_S 10 #define E830_VSI_VSI2F_LEM_FUNCTIONTYPE_M MAKEMASK(0x3, 10) #define E830_VSI_VSI2F_LEM_PFNUMBER_S 12 #define E830_VSI_VSI2F_LEM_PFNUMBER_M MAKEMASK(0x7, 12) #define E830_VSI_VSI2F_LEM_BUFFERNUMBER_S 16 #define E830_VSI_VSI2F_LEM_BUFFERNUMBER_M MAKEMASK(0x7, 16) #define E830_VSI_VSI2F_LEM_VSI_NUMBER_S 20 #define E830_VSI_VSI2F_LEM_VSI_NUMBER_M MAKEMASK(0x3FF, 20) #define E830_VSI_VSI2F_LEM_VSI_ENABLE_S 31 #define E830_VSI_VSI2F_LEM_VSI_ENABLE_M BIT(31) #endif /* !_ICE_HW_AUTOGEN_H_ */ diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c index ef55df061f3c..d2d13cd2db60 100644 --- a/sys/dev/ice/ice_lib.c +++ b/sys/dev/ice/ice_lib.c @@ -1,12102 +1,12064 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2024, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * @file ice_lib.c * @brief Generic device setup and sysctl functions * * Library of generic device functions not specific to the networking stack. * * This includes hardware initialization functions, as well as handlers for * many of the device sysctls used to probe driver status or tune specific * behaviors. */ #include "ice_lib.h" #include "ice_iflib.h" #include #include #include #include #include #include #include /** * @var M_ICE * @brief main ice driver allocation type * * malloc(9) allocation type used by the majority of memory allocations in the * ice driver. */ MALLOC_DEFINE(M_ICE, "ice", "Intel(R) 100Gb Network Driver lib allocations"); /* * Helper function prototypes */ static int ice_get_next_vsi(struct ice_vsi **all_vsi, int size); static void ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx); static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type); static int ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx); static int ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf_q); static int ice_setup_rx_ctx(struct ice_rx_queue *rxq); static int ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg); static void ice_free_fltr_list(struct ice_list_head *list); static int ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, const u8 *addr, enum ice_sw_fwd_act_type action); static void ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, struct ice_ctl_q_info *cq); static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_info *e); static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, struct ice_rq_event_info *event); static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf); static void ice_update_port_oversize(struct ice_softc *sc, u64 rx_errors); static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info); static u_int ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, u_int errors); static void ice_add_debug_tunables(struct ice_softc *sc); static void ice_add_debug_sysctls(struct ice_softc *sc); static void ice_vsi_set_rss_params(struct ice_vsi *vsi); static void ice_get_default_rss_key(u8 *seed); static int ice_set_rss_key(struct ice_vsi *vsi); static int ice_set_rss_lut(struct ice_vsi *vsi); static void ice_set_rss_flow_flds(struct ice_vsi *vsi); static void ice_clean_vsi_rss_cfg(struct ice_vsi *vsi); static const char *ice_aq_speed_to_str(struct ice_port_info *pi); static const char *ice_requested_fec_mode(struct ice_port_info *pi); static const char *ice_negotiated_fec_mode(struct ice_port_info *pi); static const char *ice_autoneg_mode(struct ice_port_info *pi); static const char *ice_flowcontrol_mode(struct ice_port_info *pi); static void ice_print_bus_link_data(device_t dev, struct ice_hw *hw); static void ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status); static uint8_t ice_pcie_bandwidth_check(struct ice_softc *sc); static uint64_t ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed); static int ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width); static uint64_t ice_phy_types_to_max_rate(struct ice_port_info *pi); static void ice_add_sysctls_sw_stats(struct ice_vsi *vsi, struct sysctl_ctx_list *ctx, struct sysctl_oid *parent); static void ice_add_sysctls_mac_pfc_one_stat(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent_list, u64* pfc_stat_location, const char *node_name, const char *descr); static void ice_add_sysctls_mac_pfc_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid *parent, struct ice_hw_port_stats *stats); static void ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, enum ice_vsi_type type, int idx, bool dynamic); static void ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event); static void ice_handle_lan_overflow_event(struct ice_softc *sc, struct ice_rq_event_info *event); static int ice_add_ethertype_to_list(struct ice_vsi *vsi, struct ice_list_head *list, u16 ethertype, u16 direction, enum ice_sw_fwd_act_type action); static void ice_del_rx_lldp_filter(struct ice_softc *sc); static u16 ice_aq_phy_types_to_link_speeds(u64 phy_type_low, u64 phy_type_high); struct ice_phy_data; static int ice_intersect_phy_types_and_speeds(struct ice_softc *sc, struct ice_phy_data *phy_data); static int ice_apply_saved_phy_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg); static int ice_apply_saved_fec_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg); static void ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg); static void ice_print_ldo_tlv(struct ice_softc *sc, struct ice_link_default_override_tlv *tlv); static void ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, u64 *phy_type_high); static u16 ice_apply_supported_speed_filter(u16 report_speeds, u8 mod_type); static void ice_handle_health_status_event(struct ice_softc *sc, struct ice_rq_event_info *event); static void ice_print_health_status_string(device_t dev, struct ice_aqc_health_status_elem *elem); static void ice_debug_print_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event); static bool ice_check_ets_bw(u8 *table); static u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); static bool ice_dcb_needs_reconfig(struct ice_softc *sc, struct ice_dcbx_cfg *old_cfg, struct ice_dcbx_cfg *new_cfg); static void ice_dcb_recfg(struct ice_softc *sc); static u8 ice_dcb_tc_contig(u8 tc_map); static int ice_ets_str_to_tbl(const char *str, u8 *table, u8 limit); static int ice_pf_vsi_cfg_tc(struct ice_softc *sc, u8 tc_map); static void ice_sbuf_print_ets_cfg(struct sbuf *sbuf, const char *name, struct ice_dcb_ets_cfg *ets); static void ice_stop_pf_vsi(struct ice_softc *sc); static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt); static int ice_config_pfc(struct ice_softc *sc, u8 new_mode); void ice_add_dscp2tc_map_sysctls(struct ice_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *ctx_list); static void ice_set_default_local_mib_settings(struct ice_softc *sc); static bool ice_dscp_is_mapped(struct ice_dcbx_cfg *dcbcfg); static void ice_start_dcbx_agent(struct ice_softc *sc); static u16 ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 cluster_id); static void ice_fw_debug_dump_print_clusters(struct ice_softc *sc, struct sbuf *sbuf); static void ice_remove_vsi_mirroring(struct ice_vsi *vsi); static int ice_get_tx_rx_equalizations(struct ice_hw *hw, u8 serdes_num, struct ice_serdes_equalization *ptr); static int ice_fec_counter_read(struct ice_hw *hw, u32 receiver_id, u32 reg_offset, u16 *output); static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, struct ice_fec_stats_to_sysctl *fec_stats); static bool ice_is_serdes_muxed(struct ice_hw *hw); static int ice_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed); static int ice_update_port_topology(u8 lport, struct ice_port_topology *port_topology, bool is_muxed); static int ice_get_port_topology(struct ice_hw *hw, u8 lport, struct ice_port_topology *port_topology); static int ice_module_init(void); static int ice_module_exit(void); /* * package version comparison functions */ static bool pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name); static int pkg_ver_compatible(struct ice_pkg_ver *pkg_ver); /* * dynamic sysctl handlers */ static int ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS); static int ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS); static int ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS); static int ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS); static int ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS); static int ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS); static int ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS); static int __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high); static int ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS); static int ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS); static int ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode); static int ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS); static int ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); static int ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS); static int ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS); static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS); static int ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_dcbx_cfg(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_vsi_cfg(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_phy_stats(SYSCTL_HANDLER_ARGS); static int ice_sysctl_ets_min_rate(SYSCTL_HANDLER_ARGS); static int ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS); static int ice_sysctl_pfc_config(SYSCTL_HANDLER_ARGS); static int ice_sysctl_query_port_ets(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dscp2tc_map(SYSCTL_HANDLER_ARGS); static int ice_sysctl_pfc_mode(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS); static int ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS); static int ice_sysctl_set_link_active(SYSCTL_HANDLER_ARGS); static int ice_sysctl_debug_set_link(SYSCTL_HANDLER_ARGS); static int ice_sysctl_temperature(SYSCTL_HANDLER_ARGS); static int ice_sysctl_create_mirror_interface(SYSCTL_HANDLER_ARGS); static int ice_sysctl_destroy_mirror_interface(SYSCTL_HANDLER_ARGS); /** * ice_map_bar - Map PCIe BAR memory * @dev: the PCIe device * @bar: the BAR info structure * @bar_num: PCIe BAR number * * Maps the specified PCIe BAR. Stores the mapping data in struct * ice_bar_info. */ int ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num) { if (bar->res != NULL) { device_printf(dev, "PCI BAR%d already mapped\n", bar_num); return (EDOOFUS); } bar->rid = PCIR_BAR(bar_num); bar->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar->rid, RF_ACTIVE); if (!bar->res) { device_printf(dev, "PCI BAR%d mapping failed\n", bar_num); return (ENXIO); } bar->tag = rman_get_bustag(bar->res); bar->handle = rman_get_bushandle(bar->res); bar->size = rman_get_size(bar->res); return (0); } /** * ice_free_bar - Free PCIe BAR memory * @dev: the PCIe device * @bar: the BAR info structure * * Frees the specified PCIe BAR, releasing its resources. */ void ice_free_bar(device_t dev, struct ice_bar_info *bar) { if (bar->res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, bar->rid, bar->res); bar->res = NULL; } /** * ice_set_ctrlq_len - Configure ctrlq lengths for a device * @hw: the device hardware structure * * Configures the control queues for the given device, setting up the * specified lengths, prior to initializing hardware. */ void ice_set_ctrlq_len(struct ice_hw *hw) { hw->adminq.num_rq_entries = ICE_AQ_LEN; hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->sbq.num_rq_entries = ICE_SBQ_LEN; hw->sbq.num_sq_entries = ICE_SBQ_LEN; hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; } /** * ice_get_next_vsi - Get the next available VSI slot * @all_vsi: the VSI list * @size: the size of the VSI list * * Returns the index to the first available VSI slot. Will return size (one * past the last index) if there are no slots available. */ static int ice_get_next_vsi(struct ice_vsi **all_vsi, int size) { int i; for (i = 0; i < size; i++) { if (all_vsi[i] == NULL) return i; } return size; } /** * ice_setup_vsi_common - Common VSI setup for both dynamic and static VSIs * @sc: the device private softc structure * @vsi: the VSI to setup * @type: the VSI type of the new VSI * @idx: the index in the all_vsi array to use * @dynamic: whether this VSI memory was dynamically allocated * * Perform setup for a VSI that is common to both dynamically allocated VSIs * and the static PF VSI which is embedded in the softc structure. */ static void ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, enum ice_vsi_type type, int idx, bool dynamic) { /* Store important values in VSI struct */ vsi->type = type; vsi->sc = sc; vsi->idx = idx; sc->all_vsi[idx] = vsi; vsi->dynamic = dynamic; /* Set default mirroring rule information */ vsi->rule_mir_ingress = ICE_INVAL_MIRROR_RULE_ID; vsi->rule_mir_egress = ICE_INVAL_MIRROR_RULE_ID; /* Setup the VSI tunables now */ ice_add_vsi_tunables(vsi, sc->vsi_sysctls); } /** * ice_alloc_vsi - Allocate a dynamic VSI * @sc: device softc structure * @type: VSI type * * Allocates a new dynamic VSI structure and inserts it into the VSI list. */ struct ice_vsi * ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type) { struct ice_vsi *vsi; int idx; /* Find an open index for a new VSI to be allocated. If the returned * index is >= the num_available_vsi then it means no slot is * available. */ idx = ice_get_next_vsi(sc->all_vsi, sc->num_available_vsi); if (idx >= sc->num_available_vsi) { device_printf(sc->dev, "No available VSI slots\n"); return NULL; } vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_NOWAIT | M_ZERO); if (!vsi) { device_printf(sc->dev, "Unable to allocate VSI memory\n"); return NULL; } ice_setup_vsi_common(sc, vsi, type, idx, true); return vsi; } /** * ice_setup_pf_vsi - Setup the PF VSI * @sc: the device private softc * * Setup the PF VSI structure which is embedded as sc->pf_vsi in the device * private softc. Unlike other VSIs, the PF VSI memory is allocated as part of * the softc memory, instead of being dynamically allocated at creation. */ void ice_setup_pf_vsi(struct ice_softc *sc) { ice_setup_vsi_common(sc, &sc->pf_vsi, ICE_VSI_PF, 0, false); } /** * ice_alloc_vsi_qmap * @vsi: VSI structure * @max_tx_queues: Number of transmit queues to identify * @max_rx_queues: Number of receive queues to identify * * Allocates a max_[t|r]x_queues array of words for the VSI where each * word contains the index of the queue it represents. In here, all * words are initialized to an index of ICE_INVALID_RES_IDX, indicating * all queues for this VSI are not yet assigned an index and thus, * not ready for use. * */ void ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues, const int max_rx_queues) { int i; MPASS(max_tx_queues > 0); MPASS(max_rx_queues > 0); /* Allocate Tx queue mapping memory */ vsi->tx_qmap = malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK); /* Allocate Rx queue mapping memory */ vsi->rx_qmap = malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK); /* Mark every queue map as invalid to start with */ for (i = 0; i < max_tx_queues; i++) { vsi->tx_qmap[i] = ICE_INVALID_RES_IDX; } for (i = 0; i < max_rx_queues; i++) { vsi->rx_qmap[i] = ICE_INVALID_RES_IDX; } } /** * ice_free_vsi_qmaps - Free the PF qmaps associated with a VSI * @vsi: the VSI private structure * * Frees the PF qmaps associated with the given VSI. Generally this will be * called by ice_release_vsi, but may need to be called during attach cleanup, * depending on when the qmaps were allocated. */ void ice_free_vsi_qmaps(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; if (vsi->tx_qmap) { ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, vsi->num_tx_queues); free(vsi->tx_qmap, M_ICE); vsi->tx_qmap = NULL; } if (vsi->rx_qmap) { ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap, vsi->num_rx_queues); free(vsi->rx_qmap, M_ICE); vsi->rx_qmap = NULL; } } /** * ice_set_default_vsi_ctx - Setup default VSI context parameters * @ctx: the VSI context to initialize * * Initialize and prepare a default VSI context for configuring a new VSI. */ static void ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx) { u32 table = 0; memset(&ctx->info, 0, sizeof(ctx->info)); /* VSI will be allocated from shared pool */ ctx->alloc_from_pool = true; /* Enable source pruning by default */ ctx->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; /* Traffic from VSI can be sent to LAN */ ctx->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; /* Allow all packets untagged/tagged */ ctx->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >> ICE_AQ_VSI_INNER_VLAN_TX_MODE_S); /* Show VLAN/UP from packets in Rx descriptors */ ctx->info.inner_vlan_flags |= ((ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH & ICE_AQ_VSI_INNER_VLAN_EMODE_M) >> ICE_AQ_VSI_INNER_VLAN_EMODE_S); /* Have 1:1 UP mapping for both ingress/egress tables */ table |= ICE_UP_TABLE_TRANSLATE(0, 0); table |= ICE_UP_TABLE_TRANSLATE(1, 1); table |= ICE_UP_TABLE_TRANSLATE(2, 2); table |= ICE_UP_TABLE_TRANSLATE(3, 3); table |= ICE_UP_TABLE_TRANSLATE(4, 4); table |= ICE_UP_TABLE_TRANSLATE(5, 5); table |= ICE_UP_TABLE_TRANSLATE(6, 6); table |= ICE_UP_TABLE_TRANSLATE(7, 7); ctx->info.ingress_table = CPU_TO_LE32(table); ctx->info.egress_table = CPU_TO_LE32(table); /* Have 1:1 UP mapping for outer to inner UP table */ ctx->info.outer_up_table = CPU_TO_LE32(table); /* No Outer tag support, so outer_vlan_flags remains zero */ } /** * ice_set_rss_vsi_ctx - Setup VSI context parameters for RSS * @ctx: the VSI context to configure * @type: the VSI type * * Configures the VSI context for RSS, based on the VSI type. */ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type) { u8 lut_type, hash_type; switch (type) { case ICE_VSI_PF: lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; case ICE_VSI_VF: case ICE_VSI_VMDQ2: lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; default: /* Other VSI types do not support RSS */ return; } ctx->info.q_opt_rss = (((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & ICE_AQ_VSI_Q_OPT_RSS_HASH_M)); } /** * ice_setup_vsi_qmap - Setup the queue mapping for a VSI * @vsi: the VSI to configure * @ctx: the VSI context to configure * * Configures the context for the given VSI, setting up how the firmware * should map the queues for this VSI. * * @pre vsi->qmap_type is set to a valid type */ static int ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) { int pow = 0; u16 qmap; MPASS(vsi->rx_qmap != NULL); switch (vsi->qmap_type) { case ICE_RESMGR_ALLOC_CONTIGUOUS: ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_CONTIG); ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]); ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues); break; case ICE_RESMGR_ALLOC_SCATTERED: ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_NONCONTIG); for (int i = 0; i < vsi->num_rx_queues; i++) ctx->info.q_mapping[i] = CPU_TO_LE16(vsi->rx_qmap[i]); break; default: return (EOPNOTSUPP); } /* Calculate the next power-of-2 of number of queues */ if (vsi->num_rx_queues) pow = flsl(vsi->num_rx_queues - 1); /* Assign all the queues to traffic class zero */ qmap = (pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M; ctx->info.tc_mapping[0] = CPU_TO_LE16(qmap); /* Fill out default driver TC queue info for VSI */ vsi->tc_info[0].qoffset = 0; vsi->tc_info[0].qcount_rx = vsi->num_rx_queues; vsi->tc_info[0].qcount_tx = vsi->num_tx_queues; for (int i = 1; i < ICE_MAX_TRAFFIC_CLASS; i++) { vsi->tc_info[i].qoffset = 0; vsi->tc_info[i].qcount_rx = 1; vsi->tc_info[i].qcount_tx = 1; } vsi->tc_map = 0x1; return 0; } /** * ice_setup_vsi_mirroring -- Setup a VSI for mirroring PF VSI traffic * @vsi: VSI to setup * * @pre vsi->mirror_src_vsi is set to the SW VSI num that traffic is to be * mirrored from * * Returns 0 on success, EINVAL on failure. */ int ice_setup_vsi_mirroring(struct ice_vsi *vsi) { struct ice_mir_rule_buf rule = { }; struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; u16 rule_id, dest_vsi; u16 count = 1; rule.vsi_idx = ice_get_hw_vsi_num(hw, vsi->mirror_src_vsi); rule.add = true; dest_vsi = ice_get_hw_vsi_num(hw, vsi->idx); rule_id = ICE_INVAL_MIRROR_RULE_ID; status = ice_aq_add_update_mir_rule(hw, ICE_AQC_RULE_TYPE_VPORT_INGRESS, dest_vsi, count, &rule, NULL, &rule_id); if (status) { device_printf(dev, "Could not add INGRESS rule for mirror vsi %d to vsi %d, err %s aq_err %s\n", rule.vsi_idx, dest_vsi, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EINVAL); } vsi->rule_mir_ingress = rule_id; rule_id = ICE_INVAL_MIRROR_RULE_ID; status = ice_aq_add_update_mir_rule(hw, ICE_AQC_RULE_TYPE_VPORT_EGRESS, dest_vsi, count, &rule, NULL, &rule_id); if (status) { device_printf(dev, "Could not add EGRESS rule for mirror vsi %d to vsi %d, err %s aq_err %s\n", rule.vsi_idx, dest_vsi, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EINVAL); } vsi->rule_mir_egress = rule_id; return (0); } /** * ice_remove_vsi_mirroring -- Teardown any VSI mirroring rules * @vsi: VSI to remove mirror rules from */ static void ice_remove_vsi_mirroring(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int status = 0; bool keep_alloc = false; if (vsi->rule_mir_ingress != ICE_INVAL_MIRROR_RULE_ID) status = ice_aq_delete_mir_rule(hw, vsi->rule_mir_ingress, keep_alloc, NULL); if (status) device_printf(vsi->sc->dev, "Could not remove mirror VSI ingress rule, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); status = 0; if (vsi->rule_mir_egress != ICE_INVAL_MIRROR_RULE_ID) status = ice_aq_delete_mir_rule(hw, vsi->rule_mir_egress, keep_alloc, NULL); if (status) device_printf(vsi->sc->dev, "Could not remove mirror VSI egress rule, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } /** * ice_initialize_vsi - Initialize a VSI for use * @vsi: the vsi to initialize * * Initialize a VSI over the adminq and prepare it for operation. * * @pre vsi->num_tx_queues is set * @pre vsi->num_rx_queues is set */ int ice_initialize_vsi(struct ice_vsi *vsi) { struct ice_vsi_ctx ctx = { 0 }; struct ice_hw *hw = &vsi->sc->hw; u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; int status; int err; /* For now, we only have code supporting PF VSIs */ switch (vsi->type) { case ICE_VSI_PF: ctx.flags = ICE_AQ_VSI_TYPE_PF; break; case ICE_VSI_VMDQ2: ctx.flags = ICE_AQ_VSI_TYPE_VMDQ2; break; default: return (ENODEV); } ice_set_default_vsi_ctx(&ctx); ice_set_rss_vsi_ctx(&ctx, vsi->type); /* XXX: VSIs of other types may need different port info? */ ctx.info.sw_id = hw->port_info->sw_id; /* Set some RSS parameters based on the VSI type */ ice_vsi_set_rss_params(vsi); /* Initialize the Rx queue mapping for this VSI */ err = ice_setup_vsi_qmap(vsi, &ctx); if (err) { return err; } /* (Re-)add VSI to HW VSI handle list */ status = ice_add_vsi(hw, vsi->idx, &ctx, NULL); if (status != 0) { device_printf(vsi->sc->dev, "Add VSI AQ call failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } vsi->info = ctx.info; /* Initialize VSI with just 1 TC to start */ max_txqs[0] = vsi->num_tx_queues; status = ice_cfg_vsi_lan(hw->port_info, vsi->idx, ICE_DFLT_TRAFFIC_CLASS, max_txqs); if (status) { device_printf(vsi->sc->dev, "Failed VSI lan queue config, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); ice_deinit_vsi(vsi); return (ENODEV); } /* Reset VSI stats */ ice_reset_vsi_stats(vsi); return 0; } /** * ice_deinit_vsi - Tell firmware to release resources for a VSI * @vsi: the VSI to release * * Helper function which requests the firmware to release the hardware * resources associated with a given VSI. */ void ice_deinit_vsi(struct ice_vsi *vsi) { struct ice_vsi_ctx ctx = { 0 }; struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; int status; /* Assert that the VSI pointer matches in the list */ MPASS(vsi == sc->all_vsi[vsi->idx]); ctx.info = vsi->info; status = ice_rm_vsi_lan_cfg(hw->port_info, vsi->idx); if (status) { /* * This should only fail if the VSI handle is invalid, or if * any of the nodes have leaf nodes which are still in use. */ device_printf(sc->dev, "Unable to remove scheduler nodes for VSI %d, err %s\n", vsi->idx, ice_status_str(status)); } /* Tell firmware to release the VSI resources */ status = ice_free_vsi(hw, vsi->idx, &ctx, false, NULL); if (status != 0) { device_printf(sc->dev, "Free VSI %u AQ call failed, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } } /** * ice_release_vsi - Release resources associated with a VSI * @vsi: the VSI to release * * Release software and firmware resources associated with a VSI. Release the * queue managers associated with this VSI. Also free the VSI structure memory * if the VSI was allocated dynamically using ice_alloc_vsi(). */ void ice_release_vsi(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; int idx = vsi->idx; /* Assert that the VSI pointer matches in the list */ MPASS(vsi == sc->all_vsi[idx]); /* Cleanup RSS configuration */ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) ice_clean_vsi_rss_cfg(vsi); ice_del_vsi_sysctl_ctx(vsi); /* Remove the configured mirror rule, if it exists */ ice_remove_vsi_mirroring(vsi); /* * If we unload the driver after a reset fails, we do not need to do * this step. */ if (!ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) ice_deinit_vsi(vsi); ice_free_vsi_qmaps(vsi); if (vsi->dynamic) { free(sc->all_vsi[idx], M_ICE); } sc->all_vsi[idx] = NULL; } /** * ice_aq_speed_to_rate - Convert AdminQ speed enum to baudrate * @pi: port info data * * Returns the baudrate value for the current link speed of a given port. */ uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi) { switch (pi->phy.link_info.link_speed) { case ICE_AQ_LINK_SPEED_200GB: return IF_Gbps(200); case ICE_AQ_LINK_SPEED_100GB: return IF_Gbps(100); case ICE_AQ_LINK_SPEED_50GB: return IF_Gbps(50); case ICE_AQ_LINK_SPEED_40GB: return IF_Gbps(40); case ICE_AQ_LINK_SPEED_25GB: return IF_Gbps(25); case ICE_AQ_LINK_SPEED_10GB: return IF_Gbps(10); case ICE_AQ_LINK_SPEED_5GB: return IF_Gbps(5); case ICE_AQ_LINK_SPEED_2500MB: return IF_Mbps(2500); case ICE_AQ_LINK_SPEED_1000MB: return IF_Mbps(1000); case ICE_AQ_LINK_SPEED_100MB: return IF_Mbps(100); case ICE_AQ_LINK_SPEED_10MB: return IF_Mbps(10); case ICE_AQ_LINK_SPEED_UNKNOWN: default: /* return 0 if we don't know the link speed */ return 0; } } /** * ice_aq_speed_to_str - Convert AdminQ speed enum to string representation * @pi: port info data * * Returns the string representation of the current link speed for a given * port. */ static const char * ice_aq_speed_to_str(struct ice_port_info *pi) { switch (pi->phy.link_info.link_speed) { case ICE_AQ_LINK_SPEED_200GB: return "200 Gbps"; case ICE_AQ_LINK_SPEED_100GB: return "100 Gbps"; case ICE_AQ_LINK_SPEED_50GB: return "50 Gbps"; case ICE_AQ_LINK_SPEED_40GB: return "40 Gbps"; case ICE_AQ_LINK_SPEED_25GB: return "25 Gbps"; case ICE_AQ_LINK_SPEED_20GB: return "20 Gbps"; case ICE_AQ_LINK_SPEED_10GB: return "10 Gbps"; case ICE_AQ_LINK_SPEED_5GB: return "5 Gbps"; case ICE_AQ_LINK_SPEED_2500MB: return "2.5 Gbps"; case ICE_AQ_LINK_SPEED_1000MB: return "1 Gbps"; case ICE_AQ_LINK_SPEED_100MB: return "100 Mbps"; case ICE_AQ_LINK_SPEED_10MB: return "10 Mbps"; case ICE_AQ_LINK_SPEED_UNKNOWN: default: return "Unknown speed"; } } /** * ice_get_phy_type_low - Get media associated with phy_type_low * @phy_type_low: the low 64bits of phy_type from the AdminQ * * Given the lower 64bits of the phy_type from the hardware, return the * ifm_active bit associated. Return IFM_UNKNOWN when phy_type_low is unknown. * Note that only one of ice_get_phy_type_low or ice_get_phy_type_high should * be called. If phy_type_low is zero, call ice_phy_type_high. */ int ice_get_phy_type_low(uint64_t phy_type_low) { switch (phy_type_low) { case ICE_PHY_TYPE_LOW_100BASE_TX: return IFM_100_TX; case ICE_PHY_TYPE_LOW_100M_SGMII: return IFM_100_SGMII; case ICE_PHY_TYPE_LOW_1000BASE_T: return IFM_1000_T; case ICE_PHY_TYPE_LOW_1000BASE_SX: return IFM_1000_SX; case ICE_PHY_TYPE_LOW_1000BASE_LX: return IFM_1000_LX; case ICE_PHY_TYPE_LOW_1000BASE_KX: return IFM_1000_KX; case ICE_PHY_TYPE_LOW_1G_SGMII: return IFM_1000_SGMII; case ICE_PHY_TYPE_LOW_2500BASE_T: return IFM_2500_T; case ICE_PHY_TYPE_LOW_2500BASE_X: return IFM_2500_X; case ICE_PHY_TYPE_LOW_2500BASE_KX: return IFM_2500_KX; case ICE_PHY_TYPE_LOW_5GBASE_T: return IFM_5000_T; case ICE_PHY_TYPE_LOW_5GBASE_KR: return IFM_5000_KR; case ICE_PHY_TYPE_LOW_10GBASE_T: return IFM_10G_T; case ICE_PHY_TYPE_LOW_10G_SFI_DA: return IFM_10G_TWINAX; case ICE_PHY_TYPE_LOW_10GBASE_SR: return IFM_10G_SR; case ICE_PHY_TYPE_LOW_10GBASE_LR: return IFM_10G_LR; case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: return IFM_10G_KR; case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: return IFM_10G_AOC; case ICE_PHY_TYPE_LOW_10G_SFI_C2C: return IFM_10G_SFI; case ICE_PHY_TYPE_LOW_25GBASE_T: return IFM_25G_T; case ICE_PHY_TYPE_LOW_25GBASE_CR: return IFM_25G_CR; case ICE_PHY_TYPE_LOW_25GBASE_CR_S: return IFM_25G_CR_S; case ICE_PHY_TYPE_LOW_25GBASE_CR1: return IFM_25G_CR1; case ICE_PHY_TYPE_LOW_25GBASE_SR: return IFM_25G_SR; case ICE_PHY_TYPE_LOW_25GBASE_LR: return IFM_25G_LR; case ICE_PHY_TYPE_LOW_25GBASE_KR: return IFM_25G_KR; case ICE_PHY_TYPE_LOW_25GBASE_KR_S: return IFM_25G_KR_S; case ICE_PHY_TYPE_LOW_25GBASE_KR1: return IFM_25G_KR1; case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: return IFM_25G_AOC; case ICE_PHY_TYPE_LOW_25G_AUI_C2C: return IFM_25G_AUI; case ICE_PHY_TYPE_LOW_40GBASE_CR4: return IFM_40G_CR4; case ICE_PHY_TYPE_LOW_40GBASE_SR4: return IFM_40G_SR4; case ICE_PHY_TYPE_LOW_40GBASE_LR4: return IFM_40G_LR4; case ICE_PHY_TYPE_LOW_40GBASE_KR4: return IFM_40G_KR4; case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: return IFM_40G_XLAUI_AC; case ICE_PHY_TYPE_LOW_40G_XLAUI: return IFM_40G_XLAUI; case ICE_PHY_TYPE_LOW_50GBASE_CR2: return IFM_50G_CR2; case ICE_PHY_TYPE_LOW_50GBASE_SR2: return IFM_50G_SR2; case ICE_PHY_TYPE_LOW_50GBASE_LR2: return IFM_50G_LR2; case ICE_PHY_TYPE_LOW_50GBASE_KR2: return IFM_50G_KR2; case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: return IFM_50G_LAUI2_AC; case ICE_PHY_TYPE_LOW_50G_LAUI2: return IFM_50G_LAUI2; case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: return IFM_50G_AUI2_AC; case ICE_PHY_TYPE_LOW_50G_AUI2: return IFM_50G_AUI2; case ICE_PHY_TYPE_LOW_50GBASE_CP: return IFM_50G_CP; case ICE_PHY_TYPE_LOW_50GBASE_SR: return IFM_50G_SR; case ICE_PHY_TYPE_LOW_50GBASE_FR: return IFM_50G_FR; case ICE_PHY_TYPE_LOW_50GBASE_LR: return IFM_50G_LR; case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: return IFM_50G_KR_PAM4; case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: return IFM_50G_AUI1_AC; case ICE_PHY_TYPE_LOW_50G_AUI1: return IFM_50G_AUI1; case ICE_PHY_TYPE_LOW_100GBASE_CR4: return IFM_100G_CR4; case ICE_PHY_TYPE_LOW_100GBASE_SR4: return IFM_100G_SR4; case ICE_PHY_TYPE_LOW_100GBASE_LR4: return IFM_100G_LR4; case ICE_PHY_TYPE_LOW_100GBASE_KR4: return IFM_100G_KR4; case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: return IFM_100G_CAUI4_AC; case ICE_PHY_TYPE_LOW_100G_CAUI4: return IFM_100G_CAUI4; case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: return IFM_100G_AUI4_AC; case ICE_PHY_TYPE_LOW_100G_AUI4: return IFM_100G_AUI4; case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: return IFM_100G_CR_PAM4; case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: return IFM_100G_KR_PAM4; case ICE_PHY_TYPE_LOW_100GBASE_CP2: return IFM_100G_CP2; case ICE_PHY_TYPE_LOW_100GBASE_SR2: return IFM_100G_SR2; case ICE_PHY_TYPE_LOW_100GBASE_DR: return IFM_100G_DR; default: return IFM_UNKNOWN; } } /** * ice_get_phy_type_high - Get media associated with phy_type_high * @phy_type_high: the upper 64bits of phy_type from the AdminQ * * Given the upper 64bits of the phy_type from the hardware, return the * ifm_active bit associated. Return IFM_UNKNOWN on an unknown value. Note * that only one of ice_get_phy_type_low or ice_get_phy_type_high should be * called. If phy_type_high is zero, call ice_get_phy_type_low. */ int ice_get_phy_type_high(uint64_t phy_type_high) { switch (phy_type_high) { case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: return IFM_100G_KR2_PAM4; case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: return IFM_100G_CAUI2_AC; case ICE_PHY_TYPE_HIGH_100G_CAUI2: return IFM_100G_CAUI2; case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: return IFM_100G_AUI2_AC; case ICE_PHY_TYPE_HIGH_100G_AUI2: return IFM_100G_AUI2; case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4: return IFM_200G_CR4_PAM4; case ICE_PHY_TYPE_HIGH_200G_SR4: return IFM_200G_SR4; case ICE_PHY_TYPE_HIGH_200G_FR4: return IFM_200G_FR4; case ICE_PHY_TYPE_HIGH_200G_LR4: return IFM_200G_LR4; case ICE_PHY_TYPE_HIGH_200G_DR4: return IFM_200G_DR4; case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4: return IFM_200G_KR4_PAM4; case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC: return IFM_200G_AUI4_AC; case ICE_PHY_TYPE_HIGH_200G_AUI4: return IFM_200G_AUI4; case ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC: return IFM_200G_AUI8_AC; case ICE_PHY_TYPE_HIGH_200G_AUI8: return IFM_200G_AUI8; default: return IFM_UNKNOWN; } } /** * ice_phy_types_to_max_rate - Returns port's max supported baudrate * @pi: port info struct * * ice_aq_get_phy_caps() w/ ICE_AQC_REPORT_TOPO_CAP_MEDIA parameter needs * to have been called before this function for it to work. */ static uint64_t ice_phy_types_to_max_rate(struct ice_port_info *pi) { uint64_t phy_low = pi->phy.phy_type_low; uint64_t phy_high = pi->phy.phy_type_high; uint64_t max_rate = 0; int bit; /* * These are based on the indices used in the BIT() macros for * ICE_PHY_TYPE_LOW_* */ static const uint64_t phy_rates[] = { IF_Mbps(100), IF_Mbps(100), IF_Gbps(1ULL), IF_Gbps(1ULL), IF_Gbps(1ULL), IF_Gbps(1ULL), IF_Gbps(1ULL), IF_Mbps(2500ULL), IF_Mbps(2500ULL), IF_Mbps(2500ULL), IF_Gbps(5ULL), IF_Gbps(5ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(40ULL), IF_Gbps(40ULL), IF_Gbps(40ULL), IF_Gbps(40ULL), IF_Gbps(40ULL), IF_Gbps(40ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), /* These rates are for ICE_PHY_TYPE_HIGH_* */ IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(200ULL), IF_Gbps(200ULL), IF_Gbps(200ULL), IF_Gbps(200ULL), IF_Gbps(200ULL), IF_Gbps(200ULL), IF_Gbps(200ULL), IF_Gbps(200ULL), IF_Gbps(200ULL), IF_Gbps(200ULL), }; /* coverity[address_of] */ for_each_set_bit(bit, &phy_high, 64) if ((bit + 64) < (int)ARRAY_SIZE(phy_rates)) max_rate = uqmax(max_rate, phy_rates[(bit + 64)]); /* coverity[address_of] */ for_each_set_bit(bit, &phy_low, 64) max_rate = uqmax(max_rate, phy_rates[bit]); return (max_rate); } /* The if_media type is split over the original 5 bit media variant field, * along with extended types using up extra bits in the options section. * We want to convert this split number into a bitmap index, so we reverse the * calculation of IFM_X here. */ #define IFM_IDX(x) (((x) & IFM_TMASK) | \ (((x) & IFM_ETH_XTYPE) >> IFM_ETH_XSHIFT)) /** * ice_add_media_types - Add supported media types to the media structure * @sc: ice private softc structure * @media: ifmedia structure to setup * * Looks up the supported phy types, and initializes the various media types * available. * * @pre this function must be protected from being called while another thread * is accessing the ifmedia types. */ int ice_add_media_types(struct ice_softc *sc, struct ifmedia *media) { struct ice_aqc_get_phy_caps_data pcaps = { 0 }; struct ice_port_info *pi = sc->hw.port_info; int status; uint64_t phy_low, phy_high; int bit; ASSERT_CFG_LOCKED(sc); /* the maximum possible media type index is 511. We probably don't * need most of this space, but this ensures future compatibility when * additional media types are used. */ ice_declare_bitmap(already_added, 511); /* Remove all previous media types */ ifmedia_removeall(media); status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status) { device_printf(sc->dev, "%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); return (status); } phy_low = le64toh(pcaps.phy_type_low); phy_high = le64toh(pcaps.phy_type_high); /* make sure the added bitmap is zero'd */ memset(already_added, 0, sizeof(already_added)); /* coverity[address_of] */ for_each_set_bit(bit, &phy_low, 64) { uint64_t type = BIT_ULL(bit); int ostype; /* get the OS media type */ ostype = ice_get_phy_type_low(type); /* don't bother adding the unknown type */ if (ostype == IFM_UNKNOWN) continue; /* only add each media type to the list once */ if (ice_is_bit_set(already_added, IFM_IDX(ostype))) continue; ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); ice_set_bit(IFM_IDX(ostype), already_added); } /* coverity[address_of] */ for_each_set_bit(bit, &phy_high, 64) { uint64_t type = BIT_ULL(bit); int ostype; /* get the OS media type */ ostype = ice_get_phy_type_high(type); /* don't bother adding the unknown type */ if (ostype == IFM_UNKNOWN) continue; /* only add each media type to the list once */ if (ice_is_bit_set(already_added, IFM_IDX(ostype))) continue; ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); ice_set_bit(IFM_IDX(ostype), already_added); } /* Use autoselect media by default */ ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(media, IFM_ETHER | IFM_AUTO); return (0); } /** * ice_configure_rxq_interrupt - Configure HW Rx queue for an MSI-X interrupt * @hw: ice hw structure * @rxqid: Rx queue index in PF space * @vector: MSI-X vector index in PF/VF space * @itr_idx: ITR index to use for interrupt * * @remark ice_flush() may need to be called after this */ void ice_configure_rxq_interrupt(struct ice_hw *hw, u16 rxqid, u16 vector, u8 itr_idx) { u32 val; MPASS(itr_idx <= ICE_ITR_NONE); val = (QINT_RQCTL_CAUSE_ENA_M | (itr_idx << QINT_RQCTL_ITR_INDX_S) | (vector << QINT_RQCTL_MSIX_INDX_S)); wr32(hw, QINT_RQCTL(rxqid), val); } /** * ice_configure_all_rxq_interrupts - Configure HW Rx queues for MSI-X interrupts * @vsi: the VSI to configure * * Called when setting up MSI-X interrupts to configure the Rx hardware queues. */ void ice_configure_all_rxq_interrupts(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; for (i = 0; i < vsi->num_rx_queues; i++) { struct ice_rx_queue *rxq = &vsi->rx_queues[i]; ice_configure_rxq_interrupt(hw, vsi->rx_qmap[rxq->me], rxq->irqv->me, ICE_RX_ITR); ice_debug(hw, ICE_DBG_INIT, "RXQ(%d) intr enable: me %d rxqid %d vector %d\n", i, rxq->me, vsi->rx_qmap[rxq->me], rxq->irqv->me); } ice_flush(hw); } /** * ice_configure_txq_interrupt - Configure HW Tx queue for an MSI-X interrupt * @hw: ice hw structure * @txqid: Tx queue index in PF space * @vector: MSI-X vector index in PF/VF space * @itr_idx: ITR index to use for interrupt * * @remark ice_flush() may need to be called after this */ void ice_configure_txq_interrupt(struct ice_hw *hw, u16 txqid, u16 vector, u8 itr_idx) { u32 val; MPASS(itr_idx <= ICE_ITR_NONE); val = (QINT_TQCTL_CAUSE_ENA_M | (itr_idx << QINT_TQCTL_ITR_INDX_S) | (vector << QINT_TQCTL_MSIX_INDX_S)); wr32(hw, QINT_TQCTL(txqid), val); } /** * ice_configure_all_txq_interrupts - Configure HW Tx queues for MSI-X interrupts * @vsi: the VSI to configure * * Called when setting up MSI-X interrupts to configure the Tx hardware queues. */ void ice_configure_all_txq_interrupts(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; for (i = 0; i < vsi->num_tx_queues; i++) { struct ice_tx_queue *txq = &vsi->tx_queues[i]; ice_configure_txq_interrupt(hw, vsi->tx_qmap[txq->me], txq->irqv->me, ICE_TX_ITR); } ice_flush(hw); } /** * ice_flush_rxq_interrupts - Unconfigure Hw Rx queues MSI-X interrupt cause * @vsi: the VSI to configure * * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger * a software interrupt on that cause. This is required as part of the Rx * queue disable logic to dissociate the Rx queue from the interrupt. * * Note: this function must be called prior to disabling Rx queues with * ice_control_all_rx_queues, otherwise the Rx queue may not be disabled properly. */ void ice_flush_rxq_interrupts(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; for (i = 0; i < vsi->num_rx_queues; i++) { struct ice_rx_queue *rxq = &vsi->rx_queues[i]; u32 reg, val; /* Clear the CAUSE_ENA flag */ reg = vsi->rx_qmap[rxq->me]; val = rd32(hw, QINT_RQCTL(reg)); val &= ~QINT_RQCTL_CAUSE_ENA_M; wr32(hw, QINT_RQCTL(reg), val); ice_flush(hw); /* Trigger a software interrupt to complete interrupt * dissociation. */ wr32(hw, GLINT_DYN_CTL(rxq->irqv->me), GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); } } /** * ice_flush_txq_interrupts - Unconfigure Hw Tx queues MSI-X interrupt cause * @vsi: the VSI to configure * * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger * a software interrupt on that cause. This is required as part of the Tx * queue disable logic to dissociate the Tx queue from the interrupt. * * Note: this function must be called prior to ice_vsi_disable_tx, otherwise * the Tx queue disable may not complete properly. */ void ice_flush_txq_interrupts(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; for (i = 0; i < vsi->num_tx_queues; i++) { struct ice_tx_queue *txq = &vsi->tx_queues[i]; u32 reg, val; /* Clear the CAUSE_ENA flag */ reg = vsi->tx_qmap[txq->me]; val = rd32(hw, QINT_TQCTL(reg)); val &= ~QINT_TQCTL_CAUSE_ENA_M; wr32(hw, QINT_TQCTL(reg), val); ice_flush(hw); /* Trigger a software interrupt to complete interrupt * dissociation. */ wr32(hw, GLINT_DYN_CTL(txq->irqv->me), GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); } } /** * ice_configure_rx_itr - Configure the Rx ITR settings for this VSI * @vsi: the VSI to configure * * Program the hardware ITR registers with the settings for this VSI. */ void ice_configure_rx_itr(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; /* TODO: Handle per-queue/per-vector ITR? */ for (i = 0; i < vsi->num_rx_queues; i++) { struct ice_rx_queue *rxq = &vsi->rx_queues[i]; wr32(hw, GLINT_ITR(ICE_RX_ITR, rxq->irqv->me), ice_itr_to_reg(hw, vsi->rx_itr)); } ice_flush(hw); } /** * ice_configure_tx_itr - Configure the Tx ITR settings for this VSI * @vsi: the VSI to configure * * Program the hardware ITR registers with the settings for this VSI. */ void ice_configure_tx_itr(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; /* TODO: Handle per-queue/per-vector ITR? */ for (i = 0; i < vsi->num_tx_queues; i++) { struct ice_tx_queue *txq = &vsi->tx_queues[i]; wr32(hw, GLINT_ITR(ICE_TX_ITR, txq->irqv->me), ice_itr_to_reg(hw, vsi->tx_itr)); } ice_flush(hw); } /** * ice_setup_tx_ctx - Setup an ice_tlan_ctx structure for a queue * @txq: the Tx queue to configure * @tlan_ctx: the Tx LAN queue context structure to initialize * @pf_q: real queue number */ static int ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) { struct ice_vsi *vsi = txq->vsi; struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; tlan_ctx->port_num = hw->port_info->lport; /* number of descriptors in the queue */ tlan_ctx->qlen = txq->desc_count; /* set the transmit queue base address, defined in 128 byte units */ tlan_ctx->base = txq->tx_paddr >> 7; tlan_ctx->pf_num = hw->pf_id; switch (vsi->type) { case ICE_VSI_PF: tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; break; case ICE_VSI_VMDQ2: tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; break; default: return (ENODEV); } tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); /* Enable TSO */ tlan_ctx->tso_ena = 1; tlan_ctx->internal_usage_flag = 1; tlan_ctx->tso_qnum = pf_q; /* * Stick with the older legacy Tx queue interface, instead of the new * advanced queue interface. */ tlan_ctx->legacy_int = 1; /* Descriptor WB mode */ tlan_ctx->wb_mode = 0; return (0); } /** * ice_cfg_vsi_for_tx - Configure the hardware for Tx * @vsi: the VSI to configure * * Configure the device Tx queues through firmware AdminQ commands. After * this, Tx queues will be ready for transmit. */ int ice_cfg_vsi_for_tx(struct ice_vsi *vsi) { struct ice_aqc_add_tx_qgrp *qg; struct ice_hw *hw = &vsi->sc->hw; device_t dev = vsi->sc->dev; int status; int i; int err = 0; u16 qg_size, pf_q; qg_size = ice_struct_size(qg, txqs, 1); qg = (struct ice_aqc_add_tx_qgrp *)malloc(qg_size, M_ICE, M_NOWAIT|M_ZERO); if (!qg) return (ENOMEM); qg->num_txqs = 1; for (i = 0; i < vsi->num_tx_queues; i++) { struct ice_tlan_ctx tlan_ctx = { 0 }; struct ice_tx_queue *txq = &vsi->tx_queues[i]; pf_q = vsi->tx_qmap[txq->me]; qg->txqs[0].txq_id = htole16(pf_q); err = ice_setup_tx_ctx(txq, &tlan_ctx, pf_q); if (err) goto free_txqg; ice_set_ctx(hw, (u8 *)&tlan_ctx, qg->txqs[0].txq_ctx, ice_tlan_ctx_info); status = ice_ena_vsi_txq(hw->port_info, vsi->idx, txq->tc, txq->q_handle, 1, qg, qg_size, NULL); if (status) { device_printf(dev, "Failed to set LAN Tx queue %d (TC %d, handle %d) context, err %s aq_err %s\n", i, txq->tc, txq->q_handle, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = ENODEV; goto free_txqg; } /* Keep track of the Tx queue TEID */ if (pf_q == le16toh(qg->txqs[0].txq_id)) txq->q_teid = le32toh(qg->txqs[0].q_teid); } free_txqg: free(qg, M_ICE); return (err); } /** * ice_setup_rx_ctx - Setup an Rx context structure for a receive queue * @rxq: the receive queue to program * * Setup an Rx queue context structure and program it into the hardware * registers. This is a necessary step for enabling the Rx queue. * * @pre the VSI associated with this queue must have initialized mbuf_sz */ static int ice_setup_rx_ctx(struct ice_rx_queue *rxq) { struct ice_rlan_ctx rlan_ctx = {0}; struct ice_vsi *vsi = rxq->vsi; struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; int status; u32 rxdid = ICE_RXDID_FLEX_NIC; u32 regval; u16 pf_q; pf_q = vsi->rx_qmap[rxq->me]; /* set the receive queue base address, defined in 128 byte units */ rlan_ctx.base = rxq->rx_paddr >> 7; rlan_ctx.qlen = rxq->desc_count; rlan_ctx.dbuf = vsi->mbuf_sz >> ICE_RLAN_CTX_DBUF_S; /* use 32 byte descriptors */ rlan_ctx.dsize = 1; /* Strip the Ethernet CRC bytes before the packet is posted to the * host memory. */ rlan_ctx.crcstrip = 1; rlan_ctx.l2tsel = 1; /* don't do header splitting */ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; /* strip VLAN from inner headers */ rlan_ctx.showiv = 1; rlan_ctx.rxmax = min(vsi->max_frame_size, ICE_MAX_RX_SEGS * vsi->mbuf_sz); rlan_ctx.lrxqthresh = 1; if (vsi->type != ICE_VSI_VF) { regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); regval &= ~QRXFLXP_CNTXT_RXDID_IDX_M; regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & QRXFLXP_CNTXT_RXDID_IDX_M; regval &= ~QRXFLXP_CNTXT_RXDID_PRIO_M; regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & QRXFLXP_CNTXT_RXDID_PRIO_M; wr32(hw, QRXFLXP_CNTXT(pf_q), regval); } status = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); if (status) { device_printf(sc->dev, "Failed to set LAN Rx queue context, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } wr32(hw, rxq->tail, 0); return 0; } /** * ice_cfg_vsi_for_rx - Configure the hardware for Rx * @vsi: the VSI to configure * * Prepare an Rx context descriptor and configure the device to receive * traffic. * * @pre the VSI must have initialized mbuf_sz */ int ice_cfg_vsi_for_rx(struct ice_vsi *vsi) { int i, err; for (i = 0; i < vsi->num_rx_queues; i++) { MPASS(vsi->mbuf_sz > 0); err = ice_setup_rx_ctx(&vsi->rx_queues[i]); if (err) return err; } return (0); } /** * ice_is_rxq_ready - Check if an Rx queue is ready * @hw: ice hw structure * @pf_q: absolute PF queue index to check * @reg: on successful return, contains qrx_ctrl contents * * Reads the QRX_CTRL register and verifies if the queue is in a consistent * state. That is, QENA_REQ matches QENA_STAT. Used to check before making * a request to change the queue, as well as to verify the request has * finished. The queue should change status within a few microseconds, so we * use a small delay while polling the register. * * Returns an error code if the queue does not update after a few retries. */ static int ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg) { u32 qrx_ctrl, qena_req, qena_stat; int i; for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { qrx_ctrl = rd32(hw, QRX_CTRL(pf_q)); qena_req = (qrx_ctrl >> QRX_CTRL_QENA_REQ_S) & 1; qena_stat = (qrx_ctrl >> QRX_CTRL_QENA_STAT_S) & 1; /* if the request and status bits equal, then the queue is * fully disabled or enabled. */ if (qena_req == qena_stat) { *reg = qrx_ctrl; return (0); } /* wait a few microseconds before we check again */ DELAY(10); } return (ETIMEDOUT); } /** * ice_control_rx_queue - Configure hardware to start or stop an Rx queue * @vsi: VSI containing queue to enable/disable * @qidx: Queue index in VSI space * @enable: true to enable queue, false to disable * * Control the Rx queue through the QRX_CTRL register, enabling or disabling * it. Wait for the appropriate time to ensure that the queue has actually * reached the expected state. */ int ice_control_rx_queue(struct ice_vsi *vsi, u16 qidx, bool enable) { struct ice_hw *hw = &vsi->sc->hw; device_t dev = vsi->sc->dev; u32 qrx_ctrl = 0; int err; struct ice_rx_queue *rxq = &vsi->rx_queues[qidx]; int pf_q = vsi->rx_qmap[rxq->me]; err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); if (err) { device_printf(dev, "Rx queue %d is not ready\n", pf_q); return err; } /* Skip if the queue is already in correct state */ if (enable == !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) return (0); if (enable) qrx_ctrl |= QRX_CTRL_QENA_REQ_M; else qrx_ctrl &= ~QRX_CTRL_QENA_REQ_M; wr32(hw, QRX_CTRL(pf_q), qrx_ctrl); /* wait for the queue to finalize the request */ err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); if (err) { device_printf(dev, "Rx queue %d %sable timeout\n", pf_q, (enable ? "en" : "dis")); return err; } /* this should never happen */ if (enable != !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) { device_printf(dev, "Rx queue %d invalid state\n", pf_q); return (EDOOFUS); } return (0); } /** * ice_control_all_rx_queues - Configure hardware to start or stop the Rx queues * @vsi: VSI to enable/disable queues * @enable: true to enable queues, false to disable * * Control the Rx queues through the QRX_CTRL register, enabling or disabling * them. Wait for the appropriate time to ensure that the queues have actually * reached the expected state. */ int ice_control_all_rx_queues(struct ice_vsi *vsi, bool enable) { int i, err; /* TODO: amortize waits by changing all queues up front and then * checking their status afterwards. This will become more necessary * when we have a large number of queues. */ for (i = 0; i < vsi->num_rx_queues; i++) { err = ice_control_rx_queue(vsi, i, enable); if (err) break; } return (0); } /** * ice_add_mac_to_list - Add MAC filter to a MAC filter list * @vsi: the VSI to forward to * @list: list which contains MAC filter entries * @addr: the MAC address to be added * @action: filter action to perform on match * * Adds a MAC address filter to the list which will be forwarded to firmware * to add a series of MAC address filters. * * Returns 0 on success, and an error code on failure. * */ static int ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, const u8 *addr, enum ice_sw_fwd_act_type action) { struct ice_fltr_list_entry *entry; entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); if (!entry) return (ENOMEM); entry->fltr_info.flag = ICE_FLTR_TX; entry->fltr_info.src_id = ICE_SRC_ID_VSI; entry->fltr_info.lkup_type = ICE_SW_LKUP_MAC; entry->fltr_info.fltr_act = action; entry->fltr_info.vsi_handle = vsi->idx; bcopy(addr, entry->fltr_info.l_data.mac.mac_addr, ETHER_ADDR_LEN); LIST_ADD(&entry->list_entry, list); return 0; } /** * ice_free_fltr_list - Free memory associated with a MAC address list * @list: the list to free * * Free the memory of each entry associated with the list. */ static void ice_free_fltr_list(struct ice_list_head *list) { struct ice_fltr_list_entry *e, *tmp; LIST_FOR_EACH_ENTRY_SAFE(e, tmp, list, ice_fltr_list_entry, list_entry) { LIST_DEL(&e->list_entry); free(e, M_ICE); } } /** * ice_add_vsi_mac_filter - Add a MAC address filter for a VSI * @vsi: the VSI to add the filter for * @addr: MAC address to add a filter for * * Add a MAC address filter for a given VSI. This is a wrapper around * ice_add_mac to simplify the interface. First, it only accepts a single * address, so we don't have to mess around with the list setup in other * functions. Second, it ignores the ICE_ERR_ALREADY_EXISTS error, so that * callers don't need to worry about attempting to add the same filter twice. */ int ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) { struct ice_list_head mac_addr_list; struct ice_hw *hw = &vsi->sc->hw; device_t dev = vsi->sc->dev; int status; int err = 0; INIT_LIST_HEAD(&mac_addr_list); err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); if (err) goto free_mac_list; status = ice_add_mac(hw, &mac_addr_list); if (status == ICE_ERR_ALREADY_EXISTS) { ; /* Don't complain if we try to add a filter that already exists */ } else if (status) { device_printf(dev, "Failed to add a filter for MAC %6D, err %s aq_err %s\n", addr, ":", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); } free_mac_list: ice_free_fltr_list(&mac_addr_list); return err; } /** * ice_cfg_pf_default_mac_filters - Setup default unicast and broadcast addrs * @sc: device softc structure * * Program the default unicast and broadcast filters for the PF VSI. */ int ice_cfg_pf_default_mac_filters(struct ice_softc *sc) { struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; int err; /* Add the LAN MAC address */ err = ice_add_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); if (err) return err; /* Add the broadcast address */ err = ice_add_vsi_mac_filter(vsi, broadcastaddr); if (err) return err; return (0); } /** * ice_remove_vsi_mac_filter - Remove a MAC address filter for a VSI * @vsi: the VSI to add the filter for * @addr: MAC address to remove a filter for * * Remove a MAC address filter from a given VSI. This is a wrapper around * ice_remove_mac to simplify the interface. First, it only accepts a single * address, so we don't have to mess around with the list setup in other * functions. Second, it ignores the ICE_ERR_DOES_NOT_EXIST error, so that * callers don't need to worry about attempting to remove filters which * haven't yet been added. */ int ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) { struct ice_list_head mac_addr_list; struct ice_hw *hw = &vsi->sc->hw; device_t dev = vsi->sc->dev; int status; int err = 0; INIT_LIST_HEAD(&mac_addr_list); err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); if (err) goto free_mac_list; status = ice_remove_mac(hw, &mac_addr_list); if (status == ICE_ERR_DOES_NOT_EXIST) { ; /* Don't complain if we try to remove a filter that doesn't exist */ } else if (status) { device_printf(dev, "Failed to remove a filter for MAC %6D, err %s aq_err %s\n", addr, ":", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); } free_mac_list: ice_free_fltr_list(&mac_addr_list); return err; } /** * ice_rm_pf_default_mac_filters - Remove default unicast and broadcast addrs * @sc: device softc structure * * Remove the default unicast and broadcast filters from the PF VSI. */ int ice_rm_pf_default_mac_filters(struct ice_softc *sc) { struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; int err; /* Remove the LAN MAC address */ err = ice_remove_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); if (err) return err; /* Remove the broadcast address */ err = ice_remove_vsi_mac_filter(vsi, broadcastaddr); if (err) return (EIO); return (0); } /** * ice_check_ctrlq_errors - Check for and report controlq errors * @sc: device private structure * @qname: name of the controlq * @cq: the controlq to check * * Check and report controlq errors. Currently all we do is report them to the * kernel message log, but we might want to improve this in the future, such * as to keep track of statistics. */ static void ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, struct ice_ctl_q_info *cq) { struct ice_hw *hw = &sc->hw; u32 val; /* Check for error indications. Note that all the controlqs use the * same register layout, so we use the PF_FW_AxQLEN defines only. */ val = rd32(hw, cq->rq.len); if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | PF_FW_ARQLEN_ARQCRIT_M)) { if (val & PF_FW_ARQLEN_ARQVFE_M) device_printf(sc->dev, "%s Receive Queue VF Error detected\n", qname); if (val & PF_FW_ARQLEN_ARQOVFL_M) device_printf(sc->dev, "%s Receive Queue Overflow Error detected\n", qname); if (val & PF_FW_ARQLEN_ARQCRIT_M) device_printf(sc->dev, "%s Receive Queue Critical Error detected\n", qname); val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | PF_FW_ARQLEN_ARQCRIT_M); wr32(hw, cq->rq.len, val); } val = rd32(hw, cq->sq.len); if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | PF_FW_ATQLEN_ATQCRIT_M)) { if (val & PF_FW_ATQLEN_ATQVFE_M) device_printf(sc->dev, "%s Send Queue VF Error detected\n", qname); if (val & PF_FW_ATQLEN_ATQOVFL_M) device_printf(sc->dev, "%s Send Queue Overflow Error detected\n", qname); if (val & PF_FW_ATQLEN_ATQCRIT_M) device_printf(sc->dev, "%s Send Queue Critical Error detected\n", qname); val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | PF_FW_ATQLEN_ATQCRIT_M); wr32(hw, cq->sq.len, val); } } /** * ice_process_link_event - Process a link event indication from firmware * @sc: device softc structure * @e: the received event data * * Gets the current link status from hardware, and may print a message if an * unqualified is detected. */ static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_info __invariant_only *e) { struct ice_port_info *pi = sc->hw.port_info; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; /* Sanity check that the data length isn't too small */ MPASS(le16toh(e->desc.datalen) >= ICE_GET_LINK_STATUS_DATALEN_V1); /* * Even though the adapter gets link status information inside the * event, it needs to send a Get Link Status AQ command in order * to re-enable link events. */ pi->phy.get_link_info = true; ice_get_link_status(pi, &sc->link_up); if (pi->phy.link_info.topo_media_conflict & (ICE_AQ_LINK_TOPO_CONFLICT | ICE_AQ_LINK_MEDIA_CONFLICT | ICE_AQ_LINK_TOPO_CORRUPT)) device_printf(dev, "Possible mis-configuration of the Ethernet port detected; please use the Intel (R) Ethernet Port Configuration Tool utility to address the issue.\n"); if ((pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) && !(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) { if (!(pi->phy.link_info.an_info & ICE_AQ_QUALIFIED_MODULE)) device_printf(dev, "Link is disabled on this device because an unsupported module type was detected! Refer to the Intel (R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); if (pi->phy.link_info.link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) device_printf(dev, "The module's power requirements exceed the device's power supply. Cannot start link.\n"); if (pi->phy.link_info.link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) device_printf(dev, "The installed module is incompatible with the device's NVM image. Cannot start link.\n"); } if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) { status = ice_aq_set_link_restart_an(pi, false, NULL); if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE) device_printf(dev, "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } } /* ICE_STATE_NO_MEDIA is cleared when polling task detects media */ /* Indicate that link status must be reported again */ ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); /* OS link info is updated elsewhere */ } /** * ice_process_ctrlq_event - Respond to a controlq event * @sc: device private structure * @qname: the name for this controlq * @event: the event to process * * Perform actions in response to various controlq event notifications. */ static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, struct ice_rq_event_info *event) { u16 opcode; opcode = le16toh(event->desc.opcode); switch (opcode) { case ice_aqc_opc_get_link_status: ice_process_link_event(sc, event); break; case ice_aqc_opc_fw_logs_event: ice_handle_fw_log_event(sc, &event->desc, event->msg_buf); break; case ice_aqc_opc_lldp_set_mib_change: ice_handle_mib_change_event(sc, event); break; case ice_aqc_opc_event_lan_overflow: ice_handle_lan_overflow_event(sc, event); break; case ice_aqc_opc_get_health_status: ice_handle_health_status_event(sc, event); break; default: device_printf(sc->dev, "%s Receive Queue unhandled event 0x%04x ignored\n", qname, opcode); } } /** * ice_process_ctrlq - helper function to process controlq rings * @sc: device private structure * @q_type: specific control queue type * @pending: return parameter to track remaining events * * Process controlq events for a given control queue type. Returns zero on * success, and an error code on failure. If successful, pending is the number * of remaining events left in the queue. */ int ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending) { struct ice_rq_event_info event = { { 0 } }; struct ice_hw *hw = &sc->hw; struct ice_ctl_q_info *cq; int status; const char *qname; int loop = 0; switch (q_type) { case ICE_CTL_Q_ADMIN: cq = &hw->adminq; qname = "Admin"; break; case ICE_CTL_Q_SB: cq = &hw->sbq; qname = "Sideband"; break; case ICE_CTL_Q_MAILBOX: cq = &hw->mailboxq; qname = "Mailbox"; break; default: device_printf(sc->dev, "Unknown control queue type 0x%x\n", q_type); return 0; } ice_check_ctrlq_errors(sc, qname, cq); /* * Control queue processing happens during the admin task which may be * holding a non-sleepable lock, so we *must* use M_NOWAIT here. */ event.buf_len = cq->rq_buf_size; event.msg_buf = (u8 *)malloc(event.buf_len, M_ICE, M_ZERO | M_NOWAIT); if (!event.msg_buf) { device_printf(sc->dev, "Unable to allocate memory for %s Receive Queue event\n", qname); return (ENOMEM); } do { status = ice_clean_rq_elem(hw, cq, &event, pending); if (status == ICE_ERR_AQ_NO_WORK) break; if (status) { device_printf(sc->dev, "%s Receive Queue event error %s\n", qname, ice_status_str(status)); free(event.msg_buf, M_ICE); return (EIO); } /* XXX should we separate this handler by controlq type? */ ice_process_ctrlq_event(sc, qname, &event); } while (*pending && (++loop < ICE_CTRLQ_WORK_LIMIT)); free(event.msg_buf, M_ICE); return 0; } /** * pkg_ver_empty - Check if a package version is empty * @pkg_ver: the package version to check * @pkg_name: the package name to check * * Checks if the package version structure is empty. We consider a package * version as empty if none of the versions are non-zero and the name string * is null as well. * * This is used to check if the package version was initialized by the driver, * as we do not expect an actual DDP package file to have a zero'd version and * name. * * @returns true if the package version is valid, or false otherwise. */ static bool pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name) { return (pkg_name[0] == '\0' && pkg_ver->major == 0 && pkg_ver->minor == 0 && pkg_ver->update == 0 && pkg_ver->draft == 0); } /** * pkg_ver_compatible - Check if the package version is compatible * @pkg_ver: the package version to check * * Compares the package version number to the driver's expected major/minor * version. Returns an integer indicating whether the version is older, newer, * or compatible with the driver. * * @returns 0 if the package version is compatible, -1 if the package version * is older, and 1 if the package version is newer than the driver version. */ static int pkg_ver_compatible(struct ice_pkg_ver *pkg_ver) { if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ) return (1); /* newer */ else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && (pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) return (1); /* newer */ else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && (pkg_ver->minor == ICE_PKG_SUPP_VER_MNR)) return (0); /* compatible */ else return (-1); /* older */ } /** * ice_os_pkg_version_str - Format OS package version info into a sbuf * @hw: device hw structure * @buf: string buffer to store name/version string * * Formats the name and version of the OS DDP package as found in the ice_ddp * module into a string. * * @remark This will almost always be the same as the active package, but * could be different in some cases. Use ice_active_pkg_version_str to get the * version of the active DDP package. */ static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) { char name_buf[ICE_PKG_NAME_SIZE]; /* If the OS DDP package info is empty, use "None" */ if (pkg_ver_empty(&hw->pkg_ver, hw->pkg_name)) { sbuf_printf(buf, "None"); return; } /* * This should already be null-terminated, but since this is a raw * value from an external source, strlcpy() into a new buffer to * make sure. */ bzero(name_buf, sizeof(name_buf)); strlcpy(name_buf, (char *)hw->pkg_name, ICE_PKG_NAME_SIZE); sbuf_printf(buf, "%s version %u.%u.%u.%u", name_buf, hw->pkg_ver.major, hw->pkg_ver.minor, hw->pkg_ver.update, hw->pkg_ver.draft); } /** * ice_active_pkg_version_str - Format active package version info into a sbuf * @hw: device hw structure * @buf: string buffer to store name/version string * * Formats the name and version of the active DDP package info into a string * buffer for use. */ static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) { char name_buf[ICE_PKG_NAME_SIZE]; /* If the active DDP package info is empty, use "None" */ if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { sbuf_printf(buf, "None"); return; } /* * This should already be null-terminated, but since this is a raw * value from an external source, strlcpy() into a new buffer to * make sure. */ bzero(name_buf, sizeof(name_buf)); strlcpy(name_buf, (char *)hw->active_pkg_name, ICE_PKG_NAME_SIZE); sbuf_printf(buf, "%s version %u.%u.%u.%u", name_buf, hw->active_pkg_ver.major, hw->active_pkg_ver.minor, hw->active_pkg_ver.update, hw->active_pkg_ver.draft); if (hw->active_track_id != 0) sbuf_printf(buf, ", track id 0x%08x", hw->active_track_id); } /** * ice_nvm_version_str - Format the NVM version information into a sbuf * @hw: device hw structure * @buf: string buffer to store version string * * Formats the NVM information including firmware version, API version, NVM * version, the EETRACK id, and OEM specific version information into a string * buffer. */ static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf) { struct ice_nvm_info *nvm = &hw->flash.nvm; struct ice_orom_info *orom = &hw->flash.orom; struct ice_netlist_info *netlist = &hw->flash.netlist; /* Note that the netlist versions are stored in packed Binary Coded * Decimal format. The use of '%x' will correctly display these as * decimal numbers. This works because every 4 bits will be displayed * as a hexadecimal digit, and the BCD format will only use the values * 0-9. */ sbuf_printf(buf, "fw %u.%u.%u api %u.%u nvm %x.%02x etid %08x netlist %x.%x.%x-%x.%x.%x.%04x oem %u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, hw->api_maj_ver, hw->api_min_ver, nvm->major, nvm->minor, nvm->eetrack, netlist->major, netlist->minor, netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, netlist->cust_ver, netlist->hash, orom->major, orom->build, orom->patch); } /** * ice_print_nvm_version - Print the NVM info to the kernel message log * @sc: the device softc structure * * Format and print an NVM version string using ice_nvm_version_str(). */ void ice_print_nvm_version(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; struct sbuf *sbuf; sbuf = sbuf_new_auto(); ice_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); device_printf(dev, "%s\n", sbuf_data(sbuf)); sbuf_delete(sbuf); } /** * ice_update_port_oversize - Update port oversize stats * @sc: device private structure * @rx_errors: VSI error drops * * Add ERROR_CNT from GLV_REPC VSI register and rx_oversize stats counter */ static void ice_update_port_oversize(struct ice_softc *sc, u64 rx_errors) { struct ice_hw_port_stats *cur_ps; cur_ps = &sc->stats.cur; sc->soft_stats.rx_roc_error = rx_errors + cur_ps->rx_oversize; } /** * ice_update_vsi_hw_stats - Update VSI-specific ethernet statistics counters * @vsi: the VSI to be updated * * Reads hardware stats and updates the ice_vsi_hw_stats tracking structure with * the updated values. */ void ice_update_vsi_hw_stats(struct ice_vsi *vsi) { struct ice_eth_stats *prev_es, *cur_es; struct ice_hw *hw = &vsi->sc->hw; u16 vsi_num; if (!ice_is_vsi_valid(hw, vsi->idx)) return; vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); /* HW absolute index of a VSI */ prev_es = &vsi->hw_stats.prev; cur_es = &vsi->hw_stats.cur; #define ICE_VSI_STAT40(name, location) \ ice_stat_update40(hw, name ## L(vsi_num), \ vsi->hw_stats.offsets_loaded, \ &prev_es->location, &cur_es->location) #define ICE_VSI_STAT32(name, location) \ ice_stat_update32(hw, name(vsi_num), \ vsi->hw_stats.offsets_loaded, \ &prev_es->location, &cur_es->location) ICE_VSI_STAT40(GLV_GORC, rx_bytes); ICE_VSI_STAT40(GLV_UPRC, rx_unicast); ICE_VSI_STAT40(GLV_MPRC, rx_multicast); ICE_VSI_STAT40(GLV_BPRC, rx_broadcast); ICE_VSI_STAT32(GLV_RDPC, rx_discards); ICE_VSI_STAT40(GLV_GOTC, tx_bytes); ICE_VSI_STAT40(GLV_UPTC, tx_unicast); ICE_VSI_STAT40(GLV_MPTC, tx_multicast); ICE_VSI_STAT40(GLV_BPTC, tx_broadcast); ICE_VSI_STAT32(GLV_TEPC, tx_errors); ice_stat_update_repc(hw, vsi->idx, vsi->hw_stats.offsets_loaded, cur_es); ice_update_port_oversize(vsi->sc, cur_es->rx_errors); #undef ICE_VSI_STAT40 #undef ICE_VSI_STAT32 vsi->hw_stats.offsets_loaded = true; } /** * ice_reset_vsi_stats - Reset VSI statistics counters * @vsi: VSI structure * * Resets the software tracking counters for the VSI statistics, and indicate * that the offsets haven't been loaded. This is intended to be called * post-reset so that VSI statistics count from zero again. */ void ice_reset_vsi_stats(struct ice_vsi *vsi) { /* Reset HW stats */ memset(&vsi->hw_stats.prev, 0, sizeof(vsi->hw_stats.prev)); memset(&vsi->hw_stats.cur, 0, sizeof(vsi->hw_stats.cur)); vsi->hw_stats.offsets_loaded = false; } /** * ice_update_pf_stats - Update port stats counters * @sc: device private softc structure * * Reads hardware statistics registers and updates the software tracking * structure with new values. */ void ice_update_pf_stats(struct ice_softc *sc) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &sc->hw; u8 lport; MPASS(hw->port_info); prev_ps = &sc->stats.prev; cur_ps = &sc->stats.cur; lport = hw->port_info->lport; #define ICE_PF_STAT_PFC(name, location, index) \ ice_stat_update40(hw, name(lport, index), \ sc->stats.offsets_loaded, \ &prev_ps->location[index], &cur_ps->location[index]) #define ICE_PF_STAT40(name, location) \ ice_stat_update40(hw, name ## L(lport), \ sc->stats.offsets_loaded, \ &prev_ps->location, &cur_ps->location) #define ICE_PF_STAT32(name, location) \ ice_stat_update32(hw, name(lport), \ sc->stats.offsets_loaded, \ &prev_ps->location, &cur_ps->location) ICE_PF_STAT40(GLPRT_GORC, eth.rx_bytes); ICE_PF_STAT40(GLPRT_UPRC, eth.rx_unicast); ICE_PF_STAT40(GLPRT_MPRC, eth.rx_multicast); ICE_PF_STAT40(GLPRT_BPRC, eth.rx_broadcast); ICE_PF_STAT40(GLPRT_GOTC, eth.tx_bytes); ICE_PF_STAT40(GLPRT_UPTC, eth.tx_unicast); ICE_PF_STAT40(GLPRT_MPTC, eth.tx_multicast); ICE_PF_STAT40(GLPRT_BPTC, eth.tx_broadcast); /* This stat register doesn't have an lport */ ice_stat_update32(hw, PRTRPB_RDPC, sc->stats.offsets_loaded, &prev_ps->eth.rx_discards, &cur_ps->eth.rx_discards); ICE_PF_STAT32(GLPRT_TDOLD, tx_dropped_link_down); ICE_PF_STAT40(GLPRT_PRC64, rx_size_64); ICE_PF_STAT40(GLPRT_PRC127, rx_size_127); ICE_PF_STAT40(GLPRT_PRC255, rx_size_255); ICE_PF_STAT40(GLPRT_PRC511, rx_size_511); ICE_PF_STAT40(GLPRT_PRC1023, rx_size_1023); ICE_PF_STAT40(GLPRT_PRC1522, rx_size_1522); ICE_PF_STAT40(GLPRT_PRC9522, rx_size_big); ICE_PF_STAT40(GLPRT_PTC64, tx_size_64); ICE_PF_STAT40(GLPRT_PTC127, tx_size_127); ICE_PF_STAT40(GLPRT_PTC255, tx_size_255); ICE_PF_STAT40(GLPRT_PTC511, tx_size_511); ICE_PF_STAT40(GLPRT_PTC1023, tx_size_1023); ICE_PF_STAT40(GLPRT_PTC1522, tx_size_1522); ICE_PF_STAT40(GLPRT_PTC9522, tx_size_big); /* Update Priority Flow Control Stats */ for (int i = 0; i <= GLPRT_PXOFFRXC_MAX_INDEX; i++) { ICE_PF_STAT_PFC(GLPRT_PXONRXC, priority_xon_rx, i); ICE_PF_STAT_PFC(GLPRT_PXOFFRXC, priority_xoff_rx, i); ICE_PF_STAT_PFC(GLPRT_PXONTXC, priority_xon_tx, i); ICE_PF_STAT_PFC(GLPRT_PXOFFTXC, priority_xoff_tx, i); ICE_PF_STAT_PFC(GLPRT_RXON2OFFCNT, priority_xon_2_xoff, i); } ICE_PF_STAT32(GLPRT_LXONRXC, link_xon_rx); ICE_PF_STAT32(GLPRT_LXOFFRXC, link_xoff_rx); ICE_PF_STAT32(GLPRT_LXONTXC, link_xon_tx); ICE_PF_STAT32(GLPRT_LXOFFTXC, link_xoff_tx); ICE_PF_STAT32(GLPRT_CRCERRS, crc_errors); ICE_PF_STAT32(GLPRT_ILLERRC, illegal_bytes); ICE_PF_STAT32(GLPRT_MLFC, mac_local_faults); ICE_PF_STAT32(GLPRT_MRFC, mac_remote_faults); ICE_PF_STAT32(GLPRT_RLEC, rx_len_errors); ICE_PF_STAT32(GLPRT_RUC, rx_undersize); ICE_PF_STAT32(GLPRT_RFC, rx_fragments); ICE_PF_STAT32(GLPRT_ROC, rx_oversize); ICE_PF_STAT32(GLPRT_RJC, rx_jabber); #undef ICE_PF_STAT40 #undef ICE_PF_STAT32 #undef ICE_PF_STAT_PFC sc->stats.offsets_loaded = true; } /** * ice_reset_pf_stats - Reset port stats counters * @sc: Device private softc structure * * Reset software tracking values for statistics to zero, and indicate that * offsets haven't been loaded. Intended to be called after a device reset so * that statistics count from zero again. */ void ice_reset_pf_stats(struct ice_softc *sc) { memset(&sc->stats.prev, 0, sizeof(sc->stats.prev)); memset(&sc->stats.cur, 0, sizeof(sc->stats.cur)); sc->stats.offsets_loaded = false; } /** * ice_sysctl_show_fw - sysctl callback to show firmware information * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for the fw_version sysctl, to display the current firmware * information found at hardware init time. */ static int ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct sbuf *sbuf; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ice_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_pba_number - sysctl callback to show PBA number * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for the pba_number sysctl, used to read the Product Board Assembly * number for this device. */ static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; u8 pba_string[32] = ""; int status; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_read_pba_string(hw, pba_string, sizeof(pba_string)); if (status) { device_printf(dev, "%s: failed to read PBA string from NVM; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } return sysctl_handle_string(oidp, pba_string, sizeof(pba_string), req); } /** * ice_sysctl_pkg_version - sysctl to show the active package version info * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for the pkg_version sysctl, to display the active DDP package name * and version information. */ static int ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct sbuf *sbuf; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ice_active_pkg_version_str(hw, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_os_pkg_version - sysctl to show the OS package version info * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for the pkg_version sysctl, to display the OS DDP package name and * version info found in the ice_ddp module. */ static int ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct sbuf *sbuf; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ice_os_pkg_version_str(hw, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_current_speed - sysctl callback to show current link speed * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for the current_speed sysctl, to display the string representing * the current link speed. */ static int ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct sbuf *sbuf; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); sbuf = sbuf_new_for_sysctl(NULL, NULL, 10, req); sbuf_printf(sbuf, "%s", ice_aq_speed_to_str(hw->port_info)); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * @var phy_link_speeds * @brief PHY link speed conversion array * * Array of link speeds to convert ICE_PHY_TYPE_LOW and ICE_PHY_TYPE_HIGH into * link speeds used by the link speed sysctls. * * @remark these are based on the indices used in the BIT() macros for the * ICE_PHY_TYPE_LOW_* and ICE_PHY_TYPE_HIGH_* definitions. */ static const uint16_t phy_link_speeds[] = { ICE_AQ_LINK_SPEED_100MB, ICE_AQ_LINK_SPEED_100MB, ICE_AQ_LINK_SPEED_1000MB, ICE_AQ_LINK_SPEED_1000MB, ICE_AQ_LINK_SPEED_1000MB, ICE_AQ_LINK_SPEED_1000MB, ICE_AQ_LINK_SPEED_1000MB, ICE_AQ_LINK_SPEED_2500MB, ICE_AQ_LINK_SPEED_2500MB, ICE_AQ_LINK_SPEED_2500MB, ICE_AQ_LINK_SPEED_5GB, ICE_AQ_LINK_SPEED_5GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, /* These rates are for ICE_PHY_TYPE_HIGH_* */ ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_200GB, ICE_AQ_LINK_SPEED_200GB, ICE_AQ_LINK_SPEED_200GB, ICE_AQ_LINK_SPEED_200GB, ICE_AQ_LINK_SPEED_200GB, ICE_AQ_LINK_SPEED_200GB, ICE_AQ_LINK_SPEED_200GB, ICE_AQ_LINK_SPEED_200GB, ICE_AQ_LINK_SPEED_200GB, ICE_AQ_LINK_SPEED_200GB, }; #define ICE_SYSCTL_HELP_ADVERTISE_SPEED \ "\nControl advertised link speed." \ "\nFlags:" \ "\n\t 0x0 - Auto" \ "\n\t 0x1 - 10 Mb" \ "\n\t 0x2 - 100 Mb" \ "\n\t 0x4 - 1G" \ "\n\t 0x8 - 2.5G" \ "\n\t 0x10 - 5G" \ "\n\t 0x20 - 10G" \ "\n\t 0x40 - 20G" \ "\n\t 0x80 - 25G" \ "\n\t 0x100 - 40G" \ "\n\t 0x200 - 50G" \ "\n\t 0x400 - 100G" \ "\n\t 0x800 - 200G" \ "\n\t0x8000 - Unknown" \ "\n\t" \ "\nUse \"sysctl -x\" to view flags properly." #define ICE_PHYS_100MB \ (ICE_PHY_TYPE_LOW_100BASE_TX | \ ICE_PHY_TYPE_LOW_100M_SGMII) #define ICE_PHYS_1000MB \ (ICE_PHY_TYPE_LOW_1000BASE_T | \ ICE_PHY_TYPE_LOW_1000BASE_SX | \ ICE_PHY_TYPE_LOW_1000BASE_LX | \ ICE_PHY_TYPE_LOW_1000BASE_KX | \ ICE_PHY_TYPE_LOW_1G_SGMII) #define ICE_PHYS_2500MB \ (ICE_PHY_TYPE_LOW_2500BASE_T | \ ICE_PHY_TYPE_LOW_2500BASE_X | \ ICE_PHY_TYPE_LOW_2500BASE_KX) #define ICE_PHYS_5GB \ (ICE_PHY_TYPE_LOW_5GBASE_T | \ ICE_PHY_TYPE_LOW_5GBASE_KR) #define ICE_PHYS_10GB \ (ICE_PHY_TYPE_LOW_10GBASE_T | \ ICE_PHY_TYPE_LOW_10G_SFI_DA | \ ICE_PHY_TYPE_LOW_10GBASE_SR | \ ICE_PHY_TYPE_LOW_10GBASE_LR | \ ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \ ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \ ICE_PHY_TYPE_LOW_10G_SFI_C2C) #define ICE_PHYS_25GB \ (ICE_PHY_TYPE_LOW_25GBASE_T | \ ICE_PHY_TYPE_LOW_25GBASE_CR | \ ICE_PHY_TYPE_LOW_25GBASE_CR_S | \ ICE_PHY_TYPE_LOW_25GBASE_CR1 | \ ICE_PHY_TYPE_LOW_25GBASE_SR | \ ICE_PHY_TYPE_LOW_25GBASE_LR | \ ICE_PHY_TYPE_LOW_25GBASE_KR | \ ICE_PHY_TYPE_LOW_25GBASE_KR_S | \ ICE_PHY_TYPE_LOW_25GBASE_KR1 | \ ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | \ ICE_PHY_TYPE_LOW_25G_AUI_C2C) #define ICE_PHYS_40GB \ (ICE_PHY_TYPE_LOW_40GBASE_CR4 | \ ICE_PHY_TYPE_LOW_40GBASE_SR4 | \ ICE_PHY_TYPE_LOW_40GBASE_LR4 | \ ICE_PHY_TYPE_LOW_40GBASE_KR4 | \ ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | \ ICE_PHY_TYPE_LOW_40G_XLAUI) #define ICE_PHYS_50GB \ (ICE_PHY_TYPE_LOW_50GBASE_CR2 | \ ICE_PHY_TYPE_LOW_50GBASE_SR2 | \ ICE_PHY_TYPE_LOW_50GBASE_LR2 | \ ICE_PHY_TYPE_LOW_50GBASE_KR2 | \ ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | \ ICE_PHY_TYPE_LOW_50G_LAUI2 | \ ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | \ ICE_PHY_TYPE_LOW_50G_AUI2 | \ ICE_PHY_TYPE_LOW_50GBASE_CP | \ ICE_PHY_TYPE_LOW_50GBASE_SR | \ ICE_PHY_TYPE_LOW_50GBASE_FR | \ ICE_PHY_TYPE_LOW_50GBASE_LR | \ ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 | \ ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | \ ICE_PHY_TYPE_LOW_50G_AUI1) #define ICE_PHYS_100GB_LOW \ (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \ ICE_PHY_TYPE_LOW_100GBASE_SR4 | \ ICE_PHY_TYPE_LOW_100GBASE_LR4 | \ ICE_PHY_TYPE_LOW_100GBASE_KR4 | \ ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \ ICE_PHY_TYPE_LOW_100G_CAUI4 | \ ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \ ICE_PHY_TYPE_LOW_100G_AUI4 | \ ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \ ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \ ICE_PHY_TYPE_LOW_100GBASE_CP2 | \ ICE_PHY_TYPE_LOW_100GBASE_SR2 | \ ICE_PHY_TYPE_LOW_100GBASE_DR) #define ICE_PHYS_100GB_HIGH \ (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \ ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \ ICE_PHY_TYPE_HIGH_100G_CAUI2 | \ ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ ICE_PHY_TYPE_HIGH_100G_AUI2) #define ICE_PHYS_200GB \ (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \ ICE_PHY_TYPE_HIGH_200G_SR4 | \ ICE_PHY_TYPE_HIGH_200G_FR4 | \ ICE_PHY_TYPE_HIGH_200G_LR4 | \ ICE_PHY_TYPE_HIGH_200G_DR4 | \ ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \ ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \ ICE_PHY_TYPE_HIGH_200G_AUI4 | \ ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC | \ ICE_PHY_TYPE_HIGH_200G_AUI8) /** * ice_aq_phy_types_to_link_speeds - Convert the PHY Types to speeds * @phy_type_low: lower 64-bit PHY Type bitmask * @phy_type_high: upper 64-bit PHY Type bitmask * * Convert the PHY Type fields from Get PHY Abilities and Set PHY Config into * link speed flags. If phy_type_high has an unknown PHY type, then the return * value will include the "ICE_AQ_LINK_SPEED_UNKNOWN" flag as well. */ static u16 ice_aq_phy_types_to_link_speeds(u64 phy_type_low, u64 phy_type_high) { u16 sysctl_speeds = 0; int bit; /* coverity[address_of] */ for_each_set_bit(bit, &phy_type_low, 64) sysctl_speeds |= phy_link_speeds[bit]; /* coverity[address_of] */ for_each_set_bit(bit, &phy_type_high, 64) { if ((bit + 64) < (int)ARRAY_SIZE(phy_link_speeds)) sysctl_speeds |= phy_link_speeds[bit + 64]; else sysctl_speeds |= ICE_AQ_LINK_SPEED_UNKNOWN; } return (sysctl_speeds); } /** * ice_sysctl_speeds_to_aq_phy_types - Convert sysctl speed flags to AQ PHY flags * @sysctl_speeds: 16-bit sysctl speeds or AQ_LINK_SPEED flags * @phy_type_low: output parameter for lower AQ PHY flags * @phy_type_high: output parameter for higher AQ PHY flags * * Converts the given link speed flags into AQ PHY type flag sets appropriate * for use in a Set PHY Config command. */ static void ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, u64 *phy_type_high) { *phy_type_low = 0, *phy_type_high = 0; if (sysctl_speeds & ICE_AQ_LINK_SPEED_100MB) *phy_type_low |= ICE_PHYS_100MB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_1000MB) *phy_type_low |= ICE_PHYS_1000MB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_2500MB) *phy_type_low |= ICE_PHYS_2500MB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_5GB) *phy_type_low |= ICE_PHYS_5GB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_10GB) *phy_type_low |= ICE_PHYS_10GB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_25GB) *phy_type_low |= ICE_PHYS_25GB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_40GB) *phy_type_low |= ICE_PHYS_40GB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_50GB) *phy_type_low |= ICE_PHYS_50GB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_100GB) { *phy_type_low |= ICE_PHYS_100GB_LOW; *phy_type_high |= ICE_PHYS_100GB_HIGH; } if (sysctl_speeds & ICE_AQ_LINK_SPEED_200GB) *phy_type_high |= ICE_PHYS_200GB; } /** * @struct ice_phy_data * @brief PHY caps and link speeds * * Buffer providing report mode and user speeds; * returning intersection of PHY types and speeds. */ struct ice_phy_data { u64 phy_low_orig; /* PHY low quad from report */ u64 phy_high_orig; /* PHY high quad from report */ u64 phy_low_intr; /* PHY low quad intersection with user speeds */ u64 phy_high_intr; /* PHY high quad intersection with user speeds */ u16 user_speeds_orig; /* Input from caller - See ICE_AQ_LINK_SPEED_* */ u16 user_speeds_intr; /* Intersect with report speeds */ u8 report_mode; /* See ICE_AQC_REPORT_* */ }; /** * ice_intersect_phy_types_and_speeds - Return intersection of link speeds * @sc: device private structure * @phy_data: device PHY data * * On read: Displays the currently supported speeds * On write: Sets the device's supported speeds * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED */ static int ice_intersect_phy_types_and_speeds(struct ice_softc *sc, struct ice_phy_data *phy_data) { struct ice_aqc_get_phy_caps_data pcaps = { 0 }; const char *report_types[5] = { "w/o MEDIA", "w/MEDIA", "ACTIVE", "EDOOFUS", /* Not used */ "DFLT" }; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi = hw->port_info; int status; u16 report_speeds, temp_speeds; u8 report_type; bool apply_speed_filter = false; switch (phy_data->report_mode) { case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: case ICE_AQC_REPORT_TOPO_CAP_MEDIA: case ICE_AQC_REPORT_ACTIVE_CFG: case ICE_AQC_REPORT_DFLT_CFG: report_type = phy_data->report_mode >> 1; break; default: device_printf(sc->dev, "%s: phy_data.report_mode \"%u\" doesn't exist\n", __func__, phy_data->report_mode); return (EINVAL); } /* 0 is treated as "Auto"; the driver will handle selecting the * correct speeds. Including, in some cases, applying an override * if provided. */ if (phy_data->user_speeds_orig == 0) phy_data->user_speeds_orig = USHRT_MAX; else if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) apply_speed_filter = true; status = ice_aq_get_phy_caps(pi, false, phy_data->report_mode, &pcaps, NULL); if (status) { device_printf(sc->dev, "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", __func__, report_types[report_type], ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); return (EIO); } phy_data->phy_low_orig = le64toh(pcaps.phy_type_low); phy_data->phy_high_orig = le64toh(pcaps.phy_type_high); report_speeds = ice_aq_phy_types_to_link_speeds(phy_data->phy_low_orig, phy_data->phy_high_orig); if (apply_speed_filter) { temp_speeds = ice_apply_supported_speed_filter(report_speeds, pcaps.module_type[0]); if ((phy_data->user_speeds_orig & temp_speeds) == 0) { device_printf(sc->dev, "User-specified speeds (\"0x%04X\") not supported\n", phy_data->user_speeds_orig); return (EINVAL); } report_speeds = temp_speeds; } ice_sysctl_speeds_to_aq_phy_types(phy_data->user_speeds_orig, &phy_data->phy_low_intr, &phy_data->phy_high_intr); phy_data->user_speeds_intr = phy_data->user_speeds_orig & report_speeds; phy_data->phy_low_intr &= phy_data->phy_low_orig; phy_data->phy_high_intr &= phy_data->phy_high_orig; return (0); } /** * ice_sysctl_advertise_speed - Display/change link speeds supported by port * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the currently supported speeds * On write: Sets the device's supported speeds * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED */ static int ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_port_info *pi = sc->hw.port_info; struct ice_phy_data phy_data = { 0 }; device_t dev = sc->dev; u16 sysctl_speeds; int ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Get the current speeds from the adapter's "active" configuration. */ phy_data.report_mode = ICE_AQC_REPORT_ACTIVE_CFG; ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); if (ret) { /* Error message already printed within function */ return (ret); } sysctl_speeds = phy_data.user_speeds_intr; ret = sysctl_handle_16(oidp, &sysctl_speeds, 0, req); if ((ret) || (req->newptr == NULL)) return (ret); if (sysctl_speeds > ICE_SYSCTL_SPEEDS_VALID_RANGE) { device_printf(dev, "%s: \"%u\" is outside of the range of acceptable values.\n", __func__, sysctl_speeds); return (EINVAL); } pi->phy.curr_user_speed_req = sysctl_speeds; if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && !sc->link_up) return 0; /* Apply settings requested by user */ return ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS); } #define ICE_SYSCTL_HELP_FEC_CONFIG \ "\nDisplay or set the port's requested FEC mode." \ "\n\tauto - " ICE_FEC_STRING_AUTO \ "\n\tfc - " ICE_FEC_STRING_BASER \ "\n\trs - " ICE_FEC_STRING_RS \ "\n\tnone - " ICE_FEC_STRING_NONE \ "\nEither of the left or right strings above can be used to set the requested mode." /** * ice_sysctl_fec_config - Display/change the configured FEC mode * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the configured FEC mode * On write: Sets the device's FEC mode to the input string, if it's valid. * Valid input strings: see ICE_SYSCTL_HELP_FEC_CONFIG */ static int ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_port_info *pi = sc->hw.port_info; enum ice_fec_mode new_mode; device_t dev = sc->dev; char req_fec[32]; int ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); bzero(req_fec, sizeof(req_fec)); strlcpy(req_fec, ice_requested_fec_mode(pi), sizeof(req_fec)); ret = sysctl_handle_string(oidp, req_fec, sizeof(req_fec), req); if ((ret) || (req->newptr == NULL)) return (ret); if (strcmp(req_fec, "auto") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_AUTO)) == 0) { if (sc->allow_no_fec_mod_in_auto) new_mode = ICE_FEC_DIS_AUTO; else new_mode = ICE_FEC_AUTO; } else if (strcmp(req_fec, "fc") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_BASER)) == 0) { new_mode = ICE_FEC_BASER; } else if (strcmp(req_fec, "rs") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_RS)) == 0) { new_mode = ICE_FEC_RS; } else if (strcmp(req_fec, "none") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_NONE)) == 0) { new_mode = ICE_FEC_NONE; } else { device_printf(dev, "%s: \"%s\" is not a valid FEC mode\n", __func__, req_fec); return (EINVAL); } /* Cache user FEC mode for later link ups */ pi->phy.curr_user_fec_req = new_mode; if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && !sc->link_up) return 0; /* Apply settings requested by user */ return ice_apply_saved_phy_cfg(sc, ICE_APPLY_FEC); } /** * ice_sysctl_negotiated_fec - Display the negotiated FEC mode on the link * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the negotiated FEC mode, in a string */ static int ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; char neg_fec[32]; int ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Copy const string into a buffer to drop const qualifier */ bzero(neg_fec, sizeof(neg_fec)); strlcpy(neg_fec, ice_negotiated_fec_mode(hw->port_info), sizeof(neg_fec)); ret = sysctl_handle_string(oidp, neg_fec, 0, req); if (req->newptr != NULL) return (EPERM); return (ret); } #define ICE_SYSCTL_HELP_FC_CONFIG \ "\nDisplay or set the port's advertised flow control mode.\n" \ "\t0 - " ICE_FC_STRING_NONE \ "\n\t1 - " ICE_FC_STRING_RX \ "\n\t2 - " ICE_FC_STRING_TX \ "\n\t3 - " ICE_FC_STRING_FULL \ "\nEither the numbers or the strings above can be used to set the advertised mode." /** * ice_sysctl_fc_config - Display/change the advertised flow control mode * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the configured flow control mode * On write: Sets the device's flow control mode to the input, if it's valid. * Valid input strings: see ICE_SYSCTL_HELP_FC_CONFIG */ static int ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_port_info *pi = sc->hw.port_info; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; enum ice_fc_mode old_mode, new_mode; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; int ret, fc_num; bool mode_set = false; struct sbuf buf; char *fc_str_end; char fc_str[32]; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status) { device_printf(dev, "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } /* Convert HW response format to SW enum value */ if ((pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) old_mode = ICE_FC_FULL; else if (pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) old_mode = ICE_FC_TX_PAUSE; else if (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) old_mode = ICE_FC_RX_PAUSE; else old_mode = ICE_FC_NONE; /* Create "old" string for output */ bzero(fc_str, sizeof(fc_str)); sbuf_new_for_sysctl(&buf, fc_str, sizeof(fc_str), req); sbuf_printf(&buf, "%d<%s>", old_mode, ice_fc_str(old_mode)); sbuf_finish(&buf); sbuf_delete(&buf); ret = sysctl_handle_string(oidp, fc_str, sizeof(fc_str), req); if ((ret) || (req->newptr == NULL)) return (ret); /* Try to parse input as a string, first */ if (strcasecmp(ice_fc_str(ICE_FC_FULL), fc_str) == 0) { new_mode = ICE_FC_FULL; mode_set = true; } else if (strcasecmp(ice_fc_str(ICE_FC_TX_PAUSE), fc_str) == 0) { new_mode = ICE_FC_TX_PAUSE; mode_set = true; } else if (strcasecmp(ice_fc_str(ICE_FC_RX_PAUSE), fc_str) == 0) { new_mode = ICE_FC_RX_PAUSE; mode_set = true; } else if (strcasecmp(ice_fc_str(ICE_FC_NONE), fc_str) == 0) { new_mode = ICE_FC_NONE; mode_set = true; } /* * Then check if it's an integer, for compatibility with the method * used in older drivers. */ if (!mode_set) { fc_num = strtol(fc_str, &fc_str_end, 0); if (fc_str_end == fc_str) fc_num = -1; switch (fc_num) { case 3: new_mode = ICE_FC_FULL; break; case 2: new_mode = ICE_FC_TX_PAUSE; break; case 1: new_mode = ICE_FC_RX_PAUSE; break; case 0: new_mode = ICE_FC_NONE; break; default: device_printf(dev, "%s: \"%s\" is not a valid flow control mode\n", __func__, fc_str); return (EINVAL); } } /* Save flow control mode from user */ pi->phy.curr_user_fc_req = new_mode; /* Turn off Priority Flow Control when Link Flow Control is enabled */ if ((hw->port_info->qos_cfg.is_sw_lldp) && (hw->port_info->qos_cfg.local_dcbx_cfg.pfc.pfcena != 0) && (new_mode != ICE_FC_NONE)) { ret = ice_config_pfc(sc, 0x0); if (ret) return (ret); } if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && !sc->link_up) return 0; /* Apply settings requested by user */ return ice_apply_saved_phy_cfg(sc, ICE_APPLY_FC); } /** * ice_sysctl_negotiated_fc - Display currently negotiated FC mode * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the currently negotiated flow control settings. * * If link is not established, this will report ICE_FC_NONE, as no flow * control is negotiated while link is down. */ static int ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_port_info *pi = sc->hw.port_info; const char *negotiated_fc; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); negotiated_fc = ice_flowcontrol_mode(pi); return sysctl_handle_string(oidp, __DECONST(char *, negotiated_fc), 0, req); } /** * __ice_sysctl_phy_type_handler - Display/change supported PHY types/speeds * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * @is_phy_type_high: if true, handle the high PHY type instead of the low PHY type * * Private handler for phy_type_high and phy_type_low sysctls. */ static int __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; uint64_t types; int ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status) { device_printf(dev, "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } if (is_phy_type_high) types = pcaps.phy_type_high; else types = pcaps.phy_type_low; ret = sysctl_handle_64(oidp, &types, sizeof(types), req); if ((ret) || (req->newptr == NULL)) return (ret); ice_copy_phy_caps_to_cfg(hw->port_info, &pcaps, &cfg); if (is_phy_type_high) cfg.phy_type_high = types & hw->port_info->phy.phy_type_high; else cfg.phy_type_low = types & hw->port_info->phy.phy_type_low; cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; status = ice_aq_set_phy_cfg(hw, hw->port_info, &cfg, NULL); if (status) { device_printf(dev, "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } return (0); } /** * ice_sysctl_phy_type_low - Display/change supported lower PHY types/speeds * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the currently supported lower PHY types * On write: Sets the device's supported low PHY types */ static int ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS) { return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, false); } /** * ice_sysctl_phy_type_high - Display/change supported higher PHY types/speeds * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the currently supported higher PHY types * On write: Sets the device's supported high PHY types */ static int ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS) { return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, true); } /** * ice_sysctl_phy_caps - Display response from Get PHY abililties * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * @report_mode: the mode to report * * On read: Display the response from Get PHY abillities with the given report * mode. */ static int ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi = hw->port_info; device_t dev = sc->dev; int status; int ret; UNREFERENCED_PARAMETER(arg2); ret = priv_check(curthread, PRIV_DRIVER); if (ret) return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_aq_get_phy_caps(pi, true, report_mode, &pcaps, NULL); if (status) { device_printf(dev, "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } ret = sysctl_handle_opaque(oidp, &pcaps, sizeof(pcaps), req); if (req->newptr != NULL) return (EPERM); return (ret); } /** * ice_sysctl_phy_sw_caps - Display response from Get PHY abililties * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Display the response from Get PHY abillities reporting the last * software configuration. */ static int ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS) { return ice_sysctl_phy_caps(oidp, arg1, arg2, req, ICE_AQC_REPORT_ACTIVE_CFG); } /** * ice_sysctl_phy_nvm_caps - Display response from Get PHY abililties * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Display the response from Get PHY abillities reporting the NVM * configuration. */ static int ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS) { return ice_sysctl_phy_caps(oidp, arg1, arg2, req, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA); } /** * ice_sysctl_phy_topo_caps - Display response from Get PHY abililties * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Display the response from Get PHY abillities reporting the * topology configuration. */ static int ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS) { return ice_sysctl_phy_caps(oidp, arg1, arg2, req, ICE_AQC_REPORT_TOPO_CAP_MEDIA); } /** * ice_sysctl_phy_link_status - Display response from Get Link Status * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Display the response from firmware for the Get Link Status * request. */ static int ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS) { struct ice_aqc_get_link_status_data link_data = { 0 }; struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi = hw->port_info; struct ice_aqc_get_link_status *resp; struct ice_aq_desc desc; device_t dev = sc->dev; int status; int ret; UNREFERENCED_PARAMETER(arg2); /* * Ensure that only contexts with driver privilege are allowed to * access this information */ ret = priv_check(curthread, PRIV_DRIVER); if (ret) return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); resp = &desc.params.get_link_status; resp->lport_num = pi->lport; status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), NULL); if (status) { device_printf(dev, "%s: ice_aq_send_cmd failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } ret = sysctl_handle_opaque(oidp, &link_data, sizeof(link_data), req); if (req->newptr != NULL) return (EPERM); return (ret); } /** * ice_sysctl_fw_cur_lldp_persist_status - Display current FW LLDP status * @oidp: sysctl oid structure * @arg1: pointer to private softc structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays current persistent LLDP status. */ static int ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; struct sbuf *sbuf; u32 lldp_state; UNREFERENCED_PARAMETER(arg2); UNREFERENCED_PARAMETER(oidp); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_get_cur_lldp_persist_status(hw, &lldp_state); if (status) { device_printf(dev, "Could not acquire current LLDP persistence status, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_fw_dflt_lldp_persist_status - Display default FW LLDP status * @oidp: sysctl oid structure * @arg1: pointer to private softc structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays default persistent LLDP status. */ static int ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; struct sbuf *sbuf; u32 lldp_state; UNREFERENCED_PARAMETER(arg2); UNREFERENCED_PARAMETER(oidp); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_get_dflt_lldp_persist_status(hw, &lldp_state); if (status) { device_printf(dev, "Could not acquire default LLDP persistence status, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_dscp_is_mapped - Check for non-zero DSCP to TC mappings * @dcbcfg: Configuration struct to check for mappings in * * @return true if there exists a non-zero DSCP to TC mapping * inside the input DCB configuration struct. */ static bool ice_dscp_is_mapped(struct ice_dcbx_cfg *dcbcfg) { for (int i = 0; i < ICE_DSCP_NUM_VAL; i++) if (dcbcfg->dscp_map[i] != 0) return (true); return (false); } #define ICE_SYSCTL_HELP_FW_LLDP_AGENT \ "\nDisplay or change FW LLDP agent state:" \ "\n\t0 - disabled" \ "\n\t1 - enabled" /** * ice_sysctl_fw_lldp_agent - Display or change the FW LLDP agent status * @oidp: sysctl oid structure * @arg1: pointer to private softc structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays whether the FW LLDP agent is running * On write: Persistently enables or disables the FW LLDP agent */ static int ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; int ret; u32 old_state; u8 fw_lldp_enabled; bool retried_start_lldp = false; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_get_cur_lldp_persist_status(hw, &old_state); if (status) { device_printf(dev, "Could not acquire current LLDP persistence status, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } if (old_state > ICE_LLDP_ADMINSTATUS_ENA_RXTX) { status = ice_get_dflt_lldp_persist_status(hw, &old_state); if (status) { device_printf(dev, "Could not acquire default LLDP persistence status, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } if (old_state == 0) fw_lldp_enabled = false; else fw_lldp_enabled = true; ret = sysctl_handle_bool(oidp, &fw_lldp_enabled, 0, req); if ((ret) || (req->newptr == NULL)) return (ret); if (old_state == 0 && fw_lldp_enabled == false) return (0); if (old_state != 0 && fw_lldp_enabled == true) return (0); /* Block transition to FW LLDP if DSCP mode is enabled */ local_dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg; if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) || ice_dscp_is_mapped(local_dcbx_cfg)) { device_printf(dev, "Cannot enable FW-LLDP agent while DSCP QoS is active.\n"); return (EOPNOTSUPP); } if (fw_lldp_enabled == false) { status = ice_aq_stop_lldp(hw, true, true, NULL); /* EPERM is returned if the LLDP agent is already shutdown */ if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EPERM) { device_printf(dev, "%s: ice_aq_stop_lldp failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } ice_aq_set_dcb_parameters(hw, true, NULL); hw->port_info->qos_cfg.is_sw_lldp = true; ice_add_rx_lldp_filter(sc); } else { ice_del_rx_lldp_filter(sc); retry_start_lldp: status = ice_aq_start_lldp(hw, true, NULL); if (status) { switch (hw->adminq.sq_last_status) { /* EEXIST is returned if the LLDP agent is already started */ case ICE_AQ_RC_EEXIST: break; case ICE_AQ_RC_EAGAIN: /* Retry command after a 2 second wait */ if (retried_start_lldp == false) { retried_start_lldp = true; pause("slldp", ICE_START_LLDP_RETRY_WAIT); goto retry_start_lldp; } /* Fallthrough */ default: device_printf(dev, "%s: ice_aq_start_lldp failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } ice_start_dcbx_agent(sc); /* Init DCB needs to be done during enabling LLDP to properly * propagate the configuration. */ status = ice_init_dcb(hw, true); if (status) { device_printf(dev, "%s: ice_init_dcb failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); hw->port_info->qos_cfg.dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; } } return (ret); } #define ICE_SYSCTL_HELP_ETS_MIN_RATE \ "\nIn FW DCB mode (fw_lldp_agent=1), displays the current ETS bandwidth table." \ "\nIn SW DCB mode, displays and allows setting the table." \ "\nInput must be in the format e.g. 30,10,10,10,10,10,10,10" \ "\nWhere the bandwidth total must add up to 100" /** * ice_sysctl_ets_min_rate - Report/configure ETS bandwidth * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Returns the current ETS TC bandwidth table * cached by the driver. * * In SW DCB mode this sysctl also accepts a value that will * be sent to the firmware for configuration. */ static int ice_sysctl_ets_min_rate(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_port_info *pi; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; struct sbuf *sbuf; int ret; /* Store input rates from user */ char ets_user_buf[128] = ""; u8 new_ets_table[ICE_MAX_TRAFFIC_CLASS] = {}; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); if (req->oldptr == NULL && req->newptr == NULL) { ret = SYSCTL_OUT(req, 0, 128); return (ret); } pi = hw->port_info; local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; sbuf = sbuf_new(NULL, ets_user_buf, 128, SBUF_FIXEDLEN | SBUF_INCLUDENUL); /* Format ETS BW data for output */ for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { sbuf_printf(sbuf, "%d", local_dcbx_cfg->etscfg.tcbwtable[i]); if (i != ICE_MAX_TRAFFIC_CLASS - 1) sbuf_printf(sbuf, ","); } sbuf_finish(sbuf); sbuf_delete(sbuf); /* Read in the new ETS values */ ret = sysctl_handle_string(oidp, ets_user_buf, sizeof(ets_user_buf), req); if ((ret) || (req->newptr == NULL)) return (ret); /* Don't allow setting changes in FW DCB mode */ if (!hw->port_info->qos_cfg.is_sw_lldp) return (EPERM); ret = ice_ets_str_to_tbl(ets_user_buf, new_ets_table, 100); if (ret) { device_printf(dev, "%s: Could not parse input BW table: %s\n", __func__, ets_user_buf); return (ret); } if (!ice_check_ets_bw(new_ets_table)) { device_printf(dev, "%s: Bandwidth sum does not equal 100: %s\n", __func__, ets_user_buf); return (EINVAL); } memcpy(local_dcbx_cfg->etscfg.tcbwtable, new_ets_table, sizeof(new_ets_table)); /* If BW > 0, then set TSA entry to 2 */ for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { if (new_ets_table[i] > 0) local_dcbx_cfg->etscfg.tsatable[i] = 2; else local_dcbx_cfg->etscfg.tsatable[i] = 0; } local_dcbx_cfg->etscfg.willing = 0; local_dcbx_cfg->etsrec = local_dcbx_cfg->etscfg; local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING; status = ice_set_dcb_cfg(pi); if (status) { device_printf(dev, "%s: Failed to set DCB config; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } ice_do_dcb_reconfig(sc, false); return (0); } #define ICE_SYSCTL_HELP_UP2TC_MAP \ "\nIn FW DCB mode (fw_lldp_agent=1), displays the current ETS priority assignment table." \ "\nIn SW DCB mode, displays and allows setting the table." \ "\nInput must be in this format: 0,1,2,3,4,5,6,7" \ "\nWhere the 1st number is the TC for UP0, 2nd number is the TC for UP1, etc" /** * ice_sysctl_up2tc_map - Report or configure UP2TC mapping * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * In FW DCB mode, returns the current ETS prio table / * UP2TC mapping from the local MIB. * * In SW DCB mode this sysctl also accepts a value that will * be sent to the firmware for configuration. */ static int ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_port_info *pi; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; struct sbuf *sbuf; int ret; /* Store input rates from user */ char up2tc_user_buf[128] = ""; /* This array is indexed by UP, not TC */ u8 new_up2tc[ICE_MAX_TRAFFIC_CLASS] = {}; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); if (req->oldptr == NULL && req->newptr == NULL) { ret = SYSCTL_OUT(req, 0, 128); return (ret); } pi = hw->port_info; local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; sbuf = sbuf_new(NULL, up2tc_user_buf, 128, SBUF_FIXEDLEN | SBUF_INCLUDENUL); /* Format ETS Priority Mapping Table for output */ for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { sbuf_printf(sbuf, "%d", local_dcbx_cfg->etscfg.prio_table[i]); if (i != ICE_MAX_TRAFFIC_CLASS - 1) sbuf_printf(sbuf, ","); } sbuf_finish(sbuf); sbuf_delete(sbuf); /* Read in the new ETS priority mapping */ ret = sysctl_handle_string(oidp, up2tc_user_buf, sizeof(up2tc_user_buf), req); if ((ret) || (req->newptr == NULL)) return (ret); /* Don't allow setting changes in FW DCB mode */ if (!hw->port_info->qos_cfg.is_sw_lldp) return (EPERM); ret = ice_ets_str_to_tbl(up2tc_user_buf, new_up2tc, ICE_MAX_TRAFFIC_CLASS - 1); if (ret) { device_printf(dev, "%s: Could not parse input priority assignment table: %s\n", __func__, up2tc_user_buf); return (ret); } /* Prepare updated ETS CFG/REC TLVs */ memcpy(local_dcbx_cfg->etscfg.prio_table, new_up2tc, sizeof(new_up2tc)); memcpy(local_dcbx_cfg->etsrec.prio_table, new_up2tc, sizeof(new_up2tc)); status = ice_set_dcb_cfg(pi); if (status) { device_printf(dev, "%s: Failed to set DCB config; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } ice_do_dcb_reconfig(sc, false); return (0); } /** * ice_config_pfc - helper function to set PFC config in FW * @sc: device private structure * @new_mode: bit flags indicating PFC status for TCs * * @pre must be in SW DCB mode * * Configures the driver's local PFC TLV and sends it to the * FW for configuration, then reconfigures the driver/VSI * for DCB if needed. */ static int ice_config_pfc(struct ice_softc *sc, u8 new_mode) { struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi; device_t dev = sc->dev; int status; pi = hw->port_info; local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; /* Prepare updated PFC TLV */ local_dcbx_cfg->pfc.pfcena = new_mode; local_dcbx_cfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS; local_dcbx_cfg->pfc.willing = 0; local_dcbx_cfg->pfc.mbc = 0; /* Warn if PFC is being disabled with RoCE v2 in use */ if (new_mode == 0 && sc->rdma_entry.attached) device_printf(dev, "WARNING: Recommended that Priority Flow Control is enabled when RoCEv2 is in use\n"); status = ice_set_dcb_cfg(pi); if (status) { device_printf(dev, "%s: Failed to set DCB config; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } ice_do_dcb_reconfig(sc, false); return (0); } #define ICE_SYSCTL_HELP_PFC_CONFIG \ "\nIn FW DCB mode (fw_lldp_agent=1), displays the current Priority Flow Control configuration" \ "\nIn SW DCB mode, displays and allows setting the configuration" \ "\nInput/Output is in this format: 0xff" \ "\nWhere bit position # enables/disables PFC for that Traffic Class #" /** * ice_sysctl_pfc_config - Report or configure enabled PFC TCs * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * In FW DCB mode, returns a bitmap containing the current TCs * that have PFC enabled on them. * * In SW DCB mode this sysctl also accepts a value that will * be sent to the firmware for configuration. */ static int ice_sysctl_pfc_config(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_port_info *pi; struct ice_hw *hw = &sc->hw; int ret; /* Store input flags from user */ u8 user_pfc; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); if (req->oldptr == NULL && req->newptr == NULL) { ret = SYSCTL_OUT(req, 0, sizeof(u8)); return (ret); } pi = hw->port_info; local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; /* Format current PFC enable setting for output */ user_pfc = local_dcbx_cfg->pfc.pfcena; /* Read in the new PFC config */ ret = sysctl_handle_8(oidp, &user_pfc, 0, req); if ((ret) || (req->newptr == NULL)) return (ret); /* Don't allow setting changes in FW DCB mode */ if (!hw->port_info->qos_cfg.is_sw_lldp) return (EPERM); /* If LFC is active and PFC is going to be turned on, turn LFC off */ if (user_pfc != 0 && pi->phy.curr_user_fc_req != ICE_FC_NONE) { pi->phy.curr_user_fc_req = ICE_FC_NONE; if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) || sc->link_up) { ret = ice_apply_saved_phy_cfg(sc, ICE_APPLY_FC); if (ret) return (ret); } } return ice_config_pfc(sc, user_pfc); } #define ICE_SYSCTL_HELP_PFC_MODE \ "\nDisplay and set the current QoS mode for the firmware" \ "\n\t0: VLAN UP mode" \ "\n\t1: DSCP mode" /** * ice_sysctl_pfc_mode * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Gets and sets whether the port is in DSCP or VLAN PCP-based * PFC mode. This is also used to set whether DSCP or VLAN PCP * -based settings are configured for DCB. */ static int ice_sysctl_pfc_mode(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_port_info *pi; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; u8 user_pfc_mode, aq_pfc_mode; int ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); if (req->oldptr == NULL && req->newptr == NULL) { ret = SYSCTL_OUT(req, 0, sizeof(u8)); return (ret); } pi = hw->port_info; local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; user_pfc_mode = local_dcbx_cfg->pfc_mode; /* Read in the new mode */ ret = sysctl_handle_8(oidp, &user_pfc_mode, 0, req); if ((ret) || (req->newptr == NULL)) return (ret); /* Don't allow setting changes in FW DCB mode */ if (!hw->port_info->qos_cfg.is_sw_lldp) return (EPERM); /* Currently, there are only two modes */ switch (user_pfc_mode) { case 0: aq_pfc_mode = ICE_AQC_PFC_VLAN_BASED_PFC; break; case 1: aq_pfc_mode = ICE_AQC_PFC_DSCP_BASED_PFC; break; default: device_printf(dev, "%s: Valid input range is 0-1 (input %d)\n", __func__, user_pfc_mode); return (EINVAL); } status = ice_aq_set_pfc_mode(hw, aq_pfc_mode, NULL); if (status == ICE_ERR_NOT_SUPPORTED) { device_printf(dev, "%s: Failed to set PFC mode; DCB not supported\n", __func__); return (ENODEV); } if (status) { device_printf(dev, "%s: Failed to set PFC mode; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } /* Reset settings to default when mode is changed */ ice_set_default_local_mib_settings(sc); /* Cache current settings and reconfigure */ local_dcbx_cfg->pfc_mode = user_pfc_mode; ice_do_dcb_reconfig(sc, false); return (0); } #define ICE_SYSCTL_HELP_SET_LINK_ACTIVE \ "\nKeep link active after setting interface down:" \ "\n\t0 - disable" \ "\n\t1 - enable" /** * ice_sysctl_set_link_active * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Set the link_active_on_if_down sysctl flag. */ static int ice_sysctl_set_link_active(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; bool mode; int ret; UNREFERENCED_PARAMETER(arg2); mode = ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN); ret = sysctl_handle_bool(oidp, &mode, 0, req); if ((ret) || (req->newptr == NULL)) return (ret); if (mode) ice_set_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN); else ice_clear_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN); return (0); } /** * ice_sysctl_debug_set_link * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Set link up/down in debug session. */ static int ice_sysctl_debug_set_link(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; bool mode; int ret; UNREFERENCED_PARAMETER(arg2); ret = sysctl_handle_bool(oidp, &mode, 0, req); if ((ret) || (req->newptr == NULL)) return (ret); ice_set_link(sc, mode != 0); return (0); } /** * ice_add_device_sysctls - add device specific dynamic sysctls * @sc: device private structure * * Add per-device dynamic sysctls which show device configuration or enable * configuring device functionality. For tunable values which can be set prior * to load, see ice_add_device_tunables. * * This function depends on the sysctl layout setup by ice_add_device_tunables, * and likely should be called near the end of the attach process. */ void ice_add_device_sysctls(struct ice_softc *sc) { struct sysctl_oid *hw_node; device_t dev = sc->dev; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_show_fw, "A", "Firmware version"); if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_HAS_PBA)) { SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "pba_number", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_pba_number, "A", "Product Board Assembly Number"); } if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_TEMP_SENSOR)) { SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "temp", CTLTYPE_S8 | CTLFLAG_RD, sc, 0, ice_sysctl_temperature, "CU", "Device temperature in degrees Celcius (C)"); } SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "ddp_version", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_pkg_version, "A", "Active DDP package name and version"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_current_speed, "A", "Current Port Link Speed"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "requested_fec", CTLTYPE_STRING | CTLFLAG_RW, sc, 0, ice_sysctl_fec_config, "A", ICE_SYSCTL_HELP_FEC_CONFIG); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "negotiated_fec", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_negotiated_fec, "A", "Current Negotiated FEC mode"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fc", CTLTYPE_STRING | CTLFLAG_RW, sc, 0, ice_sysctl_fc_config, "A", ICE_SYSCTL_HELP_FC_CONFIG); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "advertise_speed", CTLTYPE_U16 | CTLFLAG_RW, sc, 0, ice_sysctl_advertise_speed, "SU", ICE_SYSCTL_HELP_ADVERTISE_SPEED); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_lldp_agent", CTLTYPE_U8 | CTLFLAG_RWTUN, sc, 0, ice_sysctl_fw_lldp_agent, "CU", ICE_SYSCTL_HELP_FW_LLDP_AGENT); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "ets_min_rate", CTLTYPE_STRING | CTLFLAG_RW, sc, 0, ice_sysctl_ets_min_rate, "A", ICE_SYSCTL_HELP_ETS_MIN_RATE); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "up2tc_map", CTLTYPE_STRING | CTLFLAG_RW, sc, 0, ice_sysctl_up2tc_map, "A", ICE_SYSCTL_HELP_UP2TC_MAP); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "pfc", CTLTYPE_U8 | CTLFLAG_RW, sc, 0, ice_sysctl_pfc_config, "CU", ICE_SYSCTL_HELP_PFC_CONFIG); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "pfc_mode", CTLTYPE_U8 | CTLFLAG_RWTUN, sc, 0, ice_sysctl_pfc_mode, "CU", ICE_SYSCTL_HELP_PFC_MODE); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "allow_no_fec_modules_in_auto", CTLTYPE_U8 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, sc, 0, ice_sysctl_allow_no_fec_mod_in_auto, "CU", "Allow \"No FEC\" mode in FEC auto-negotiation"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "link_active_on_if_down", CTLTYPE_U8 | CTLFLAG_RWTUN, sc, 0, ice_sysctl_set_link_active, "CU", ICE_SYSCTL_HELP_SET_LINK_ACTIVE); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "create_mirror_interface", CTLTYPE_STRING | CTLFLAG_RW, sc, 0, ice_sysctl_create_mirror_interface, "A", ""); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "destroy_mirror_interface", CTLTYPE_STRING | CTLFLAG_RW, sc, 0, ice_sysctl_destroy_mirror_interface, "A", ""); ice_add_dscp2tc_map_sysctls(sc, ctx, ctx_list); /* Differentiate software and hardware statistics, by keeping hw stats * in their own node. This isn't in ice_add_device_tunables, because * we won't have any CTLFLAG_TUN sysctls under this node. */ hw_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "hw", CTLFLAG_RD, NULL, "Port Hardware Statistics"); ice_add_sysctls_mac_stats(ctx, hw_node, sc); /* Add the main PF VSI stats now. Other VSIs will add their own stats * during creation */ ice_add_vsi_sysctls(&sc->pf_vsi); /* Add sysctls related to debugging the device driver. This includes * sysctls which display additional internal driver state for use in * understanding what is happening within the driver. */ ice_add_debug_sysctls(sc); } /** * @enum hmc_error_type * @brief enumeration of HMC errors * * Enumeration defining the possible HMC errors that might occur. */ enum hmc_error_type { HMC_ERR_PMF_INVALID = 0, HMC_ERR_VF_IDX_INVALID = 1, HMC_ERR_VF_PARENT_PF_INVALID = 2, /* 3 is reserved */ HMC_ERR_INDEX_TOO_BIG = 4, HMC_ERR_ADDRESS_TOO_LARGE = 5, HMC_ERR_SEGMENT_DESC_INVALID = 6, HMC_ERR_SEGMENT_DESC_TOO_SMALL = 7, HMC_ERR_PAGE_DESC_INVALID = 8, HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION = 9, /* 10 is reserved */ HMC_ERR_INVALID_OBJECT_TYPE = 11, /* 12 is reserved */ }; /** * ice_log_hmc_error - Log an HMC error message * @hw: device hw structure * @dev: the device to pass to device_printf() * * Log a message when an HMC error interrupt is triggered. */ void ice_log_hmc_error(struct ice_hw *hw, device_t dev) { u32 info, data; u8 index, errtype, objtype; bool isvf; info = rd32(hw, PFHMC_ERRORINFO); data = rd32(hw, PFHMC_ERRORDATA); index = (u8)(info & PFHMC_ERRORINFO_PMF_INDEX_M); errtype = (u8)((info & PFHMC_ERRORINFO_HMC_ERROR_TYPE_M) >> PFHMC_ERRORINFO_HMC_ERROR_TYPE_S); objtype = (u8)((info & PFHMC_ERRORINFO_HMC_OBJECT_TYPE_M) >> PFHMC_ERRORINFO_HMC_OBJECT_TYPE_S); isvf = info & PFHMC_ERRORINFO_PMF_ISVF_M; device_printf(dev, "%s HMC Error detected on PMF index %d:\n", isvf ? "VF" : "PF", index); device_printf(dev, "error type %d, object type %d, data 0x%08x\n", errtype, objtype, data); switch (errtype) { case HMC_ERR_PMF_INVALID: device_printf(dev, "Private Memory Function is not valid\n"); break; case HMC_ERR_VF_IDX_INVALID: device_printf(dev, "Invalid Private Memory Function index for PE enabled VF\n"); break; case HMC_ERR_VF_PARENT_PF_INVALID: device_printf(dev, "Invalid parent PF for PE enabled VF\n"); break; case HMC_ERR_INDEX_TOO_BIG: device_printf(dev, "Object index too big\n"); break; case HMC_ERR_ADDRESS_TOO_LARGE: device_printf(dev, "Address extends beyond segment descriptor limit\n"); break; case HMC_ERR_SEGMENT_DESC_INVALID: device_printf(dev, "Segment descriptor is invalid\n"); break; case HMC_ERR_SEGMENT_DESC_TOO_SMALL: device_printf(dev, "Segment descriptor is too small\n"); break; case HMC_ERR_PAGE_DESC_INVALID: device_printf(dev, "Page descriptor is invalid\n"); break; case HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION: device_printf(dev, "Unsupported Request completion received from PCIe\n"); break; case HMC_ERR_INVALID_OBJECT_TYPE: device_printf(dev, "Invalid object type\n"); break; default: device_printf(dev, "Unknown HMC error\n"); } /* Clear the error indication */ wr32(hw, PFHMC_ERRORINFO, 0); } /** * @struct ice_sysctl_info * @brief sysctl information * * Structure used to simplify the process of defining the many similar * statistics sysctls. */ struct ice_sysctl_info { u64 *stat; const char *name; const char *description; }; /** * ice_add_sysctls_eth_stats - Add sysctls for ethernet statistics * @ctx: sysctl ctx to use * @parent: the parent node to add sysctls under * @stats: the ethernet stats structure to source values from * * Adds statistics sysctls for the ethernet statistics of the MAC or a VSI. * Will add them under the parent node specified. * * Note that tx_errors is only meaningful for VSIs and not the global MAC/PF * statistics, so it is not included here. Similarly, rx_discards has different * descriptions for VSIs and MAC/PF stats, so it is also not included here. */ void ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid *parent, struct ice_eth_stats *stats) { const struct ice_sysctl_info ctls[] = { /* Rx Stats */ { &stats->rx_bytes, "good_octets_rcvd", "Good Octets Received" }, { &stats->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received" }, { &stats->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received" }, { &stats->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received" }, /* Tx Stats */ { &stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted" }, { &stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted" }, { &stats->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted" }, { &stats->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted" }, /* End */ { 0, 0, 0 } }; struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); const struct ice_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_U64(ctx, parent_list, OID_AUTO, entry->name, CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, entry->description); entry++; } } /** * ice_sysctl_tx_cso_stat - Display Tx checksum offload statistic * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: Tx CSO stat to read * @req: sysctl request pointer * * On read: Sums the per-queue Tx CSO stat and displays it. */ static int ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS) { struct ice_vsi *vsi = (struct ice_vsi *)arg1; enum ice_tx_cso_stat type = (enum ice_tx_cso_stat)arg2; u64 stat = 0; int i; if (ice_driver_is_detaching(vsi->sc)) return (ESHUTDOWN); /* Check that the type is valid */ if (type >= ICE_CSO_STAT_TX_COUNT) return (EDOOFUS); /* Sum the stat for each of the Tx queues */ for (i = 0; i < vsi->num_tx_queues; i++) stat += vsi->tx_queues[i].stats.cso[type]; return sysctl_handle_64(oidp, NULL, stat, req); } /** * ice_sysctl_rx_cso_stat - Display Rx checksum offload statistic * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: Rx CSO stat to read * @req: sysctl request pointer * * On read: Sums the per-queue Rx CSO stat and displays it. */ static int ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS) { struct ice_vsi *vsi = (struct ice_vsi *)arg1; enum ice_rx_cso_stat type = (enum ice_rx_cso_stat)arg2; u64 stat = 0; int i; if (ice_driver_is_detaching(vsi->sc)) return (ESHUTDOWN); /* Check that the type is valid */ if (type >= ICE_CSO_STAT_RX_COUNT) return (EDOOFUS); /* Sum the stat for each of the Rx queues */ for (i = 0; i < vsi->num_rx_queues; i++) stat += vsi->rx_queues[i].stats.cso[type]; return sysctl_handle_64(oidp, NULL, stat, req); } /** * ice_sysctl_rx_errors_stat - Display aggregate of Rx errors * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Sums current values of Rx error statistics and * displays it. */ static int ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS) { struct ice_vsi *vsi = (struct ice_vsi *)arg1; struct ice_hw_port_stats *hs = &vsi->sc->stats.cur; u64 stat = 0; int i, type; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(vsi->sc)) return (ESHUTDOWN); stat += hs->rx_undersize; stat += hs->rx_fragments; stat += hs->rx_oversize; stat += hs->rx_jabber; stat += hs->crc_errors; stat += hs->illegal_bytes; /* Checksum error stats */ for (i = 0; i < vsi->num_rx_queues; i++) for (type = ICE_CSO_STAT_RX_IP4_ERR; type < ICE_CSO_STAT_RX_COUNT; type++) stat += vsi->rx_queues[i].stats.cso[type]; return sysctl_handle_64(oidp, NULL, stat, req); } /** * @struct ice_rx_cso_stat_info * @brief sysctl information for an Rx checksum offload statistic * * Structure used to simplify the process of defining the checksum offload * statistics. */ struct ice_rx_cso_stat_info { enum ice_rx_cso_stat type; const char *name; const char *description; }; /** * @struct ice_tx_cso_stat_info * @brief sysctl information for a Tx checksum offload statistic * * Structure used to simplify the process of defining the checksum offload * statistics. */ struct ice_tx_cso_stat_info { enum ice_tx_cso_stat type; const char *name; const char *description; }; /** * ice_add_sysctls_sw_stats - Add sysctls for software statistics * @vsi: pointer to the VSI to add sysctls for * @ctx: sysctl ctx to use * @parent: the parent node to add sysctls under * * Add statistics sysctls for software tracked statistics of a VSI. * * Currently this only adds checksum offload statistics, but more counters may * be added in the future. */ static void ice_add_sysctls_sw_stats(struct ice_vsi *vsi, struct sysctl_ctx_list *ctx, struct sysctl_oid *parent) { struct sysctl_oid *cso_node; struct sysctl_oid_list *cso_list; /* Tx CSO Stats */ const struct ice_tx_cso_stat_info tx_ctls[] = { { ICE_CSO_STAT_TX_TCP, "tx_tcp", "Transmit TCP Packets marked for HW checksum" }, { ICE_CSO_STAT_TX_UDP, "tx_udp", "Transmit UDP Packets marked for HW checksum" }, { ICE_CSO_STAT_TX_SCTP, "tx_sctp", "Transmit SCTP Packets marked for HW checksum" }, { ICE_CSO_STAT_TX_IP4, "tx_ip4", "Transmit IPv4 Packets marked for HW checksum" }, { ICE_CSO_STAT_TX_IP6, "tx_ip6", "Transmit IPv6 Packets marked for HW checksum" }, { ICE_CSO_STAT_TX_L3_ERR, "tx_l3_err", "Transmit packets that driver failed to set L3 HW CSO bits for" }, { ICE_CSO_STAT_TX_L4_ERR, "tx_l4_err", "Transmit packets that driver failed to set L4 HW CSO bits for" }, /* End */ { ICE_CSO_STAT_TX_COUNT, 0, 0 } }; /* Rx CSO Stats */ const struct ice_rx_cso_stat_info rx_ctls[] = { { ICE_CSO_STAT_RX_IP4_ERR, "rx_ip4_err", "Received packets with invalid IPv4 checksum indicated by HW" }, { ICE_CSO_STAT_RX_IP6_ERR, "rx_ip6_err", "Received IPv6 packets with extension headers" }, { ICE_CSO_STAT_RX_L3_ERR, "rx_l3_err", "Received packets with an unexpected invalid L3 checksum indicated by HW" }, { ICE_CSO_STAT_RX_TCP_ERR, "rx_tcp_err", "Received packets with invalid TCP checksum indicated by HW" }, { ICE_CSO_STAT_RX_UDP_ERR, "rx_udp_err", "Received packets with invalid UDP checksum indicated by HW" }, { ICE_CSO_STAT_RX_SCTP_ERR, "rx_sctp_err", "Received packets with invalid SCTP checksum indicated by HW" }, { ICE_CSO_STAT_RX_L4_ERR, "rx_l4_err", "Received packets with an unexpected invalid L4 checksum indicated by HW" }, /* End */ { ICE_CSO_STAT_RX_COUNT, 0, 0 } }; struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); /* Add a node for statistics tracked by software. */ cso_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "cso", CTLFLAG_RD, NULL, "Checksum offload Statistics"); cso_list = SYSCTL_CHILDREN(cso_node); const struct ice_tx_cso_stat_info *tx_entry = tx_ctls; while (tx_entry->name && tx_entry->description) { SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, tx_entry->name, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, vsi, tx_entry->type, ice_sysctl_tx_cso_stat, "QU", tx_entry->description); tx_entry++; } const struct ice_rx_cso_stat_info *rx_entry = rx_ctls; while (rx_entry->name && rx_entry->description) { SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, rx_entry->name, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, vsi, rx_entry->type, ice_sysctl_rx_cso_stat, "QU", rx_entry->description); rx_entry++; } } /** * ice_add_vsi_sysctls - Add sysctls for a VSI * @vsi: pointer to VSI structure * * Add various sysctls for a given VSI. */ void ice_add_vsi_sysctls(struct ice_vsi *vsi) { struct sysctl_ctx_list *ctx = &vsi->ctx; struct sysctl_oid *hw_node, *sw_node; struct sysctl_oid_list *vsi_list, *hw_list; vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); /* Keep hw stats in their own node. */ hw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "hw", CTLFLAG_RD, NULL, "VSI Hardware Statistics"); hw_list = SYSCTL_CHILDREN(hw_node); /* Add the ethernet statistics for this VSI */ ice_add_sysctls_eth_stats(ctx, hw_node, &vsi->hw_stats.cur); SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_discards", CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards, 0, "Discarded Rx Packets (see rx_errors or rx_no_desc)"); SYSCTL_ADD_PROC(ctx, hw_list, OID_AUTO, "rx_errors", CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, vsi, 0, ice_sysctl_rx_errors_stat, "QU", "Aggregate of all Rx errors"); SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_no_desc", CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_no_desc, 0, "Rx Packets Discarded Due To Lack Of Descriptors"); SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "tx_errors", CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.tx_errors, 0, "Tx Packets Discarded Due To Error"); /* Add a node for statistics tracked by software. */ sw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "sw", CTLFLAG_RD, NULL, "VSI Software Statistics"); ice_add_sysctls_sw_stats(vsi, ctx, sw_node); } /** * ice_add_sysctls_mac_pfc_one_stat - Add sysctl node for a PFC statistic * @ctx: sysctl ctx to use * @parent_list: parent sysctl list to add sysctls under * @pfc_stat_location: address of statistic for sysctl to display * @node_name: Name for statistic node * @descr: Description used for nodes added in this function * * A helper function for ice_add_sysctls_mac_pfc_stats that adds a node * for a stat and leaves for each traffic class for that stat. */ static void ice_add_sysctls_mac_pfc_one_stat(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent_list, u64* pfc_stat_location, const char *node_name, const char *descr) { struct sysctl_oid_list *node_list; struct sysctl_oid *node; struct sbuf *namebuf, *descbuf; node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, node_name, CTLFLAG_RD, NULL, descr); node_list = SYSCTL_CHILDREN(node); namebuf = sbuf_new_auto(); descbuf = sbuf_new_auto(); for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { sbuf_clear(namebuf); sbuf_clear(descbuf); sbuf_printf(namebuf, "%d", i); sbuf_printf(descbuf, "%s for TC %d", descr, i); sbuf_finish(namebuf); sbuf_finish(descbuf); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, sbuf_data(namebuf), CTLFLAG_RD | CTLFLAG_STATS, &pfc_stat_location[i], 0, sbuf_data(descbuf)); } sbuf_delete(namebuf); sbuf_delete(descbuf); } /** * ice_add_sysctls_mac_pfc_stats - Add sysctls for MAC PFC statistics * @ctx: the sysctl ctx to use * @parent: parent node to add the sysctls under * @stats: the hw ports stat structure to pull values from * * Add global Priority Flow Control MAC statistics sysctls. These are * structured as a node with the PFC statistic, where there are eight * nodes for each traffic class. */ static void ice_add_sysctls_mac_pfc_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid *parent, struct ice_hw_port_stats *stats) { struct sysctl_oid_list *parent_list; parent_list = SYSCTL_CHILDREN(parent); ice_add_sysctls_mac_pfc_one_stat(ctx, parent_list, stats->priority_xon_rx, "p_xon_recvd", "PFC XON received"); ice_add_sysctls_mac_pfc_one_stat(ctx, parent_list, stats->priority_xoff_rx, "p_xoff_recvd", "PFC XOFF received"); ice_add_sysctls_mac_pfc_one_stat(ctx, parent_list, stats->priority_xon_tx, "p_xon_txd", "PFC XON transmitted"); ice_add_sysctls_mac_pfc_one_stat(ctx, parent_list, stats->priority_xoff_tx, "p_xoff_txd", "PFC XOFF transmitted"); ice_add_sysctls_mac_pfc_one_stat(ctx, parent_list, stats->priority_xon_2_xoff, "p_xon2xoff", "PFC XON to XOFF transitions"); } /** * ice_add_sysctls_mac_stats - Add sysctls for global MAC statistics * @ctx: the sysctl ctx to use * @parent: parent node to add the sysctls under * @sc: device private structure * * Add global MAC statistics sysctls. */ void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid *parent, struct ice_softc *sc) { struct sysctl_oid *mac_node; struct sysctl_oid_list *parent_list, *mac_list; struct ice_hw_port_stats *stats = &sc->stats.cur; parent_list = SYSCTL_CHILDREN(parent); mac_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "mac", CTLFLAG_RD, NULL, "Mac Hardware Statistics"); mac_list = SYSCTL_CHILDREN(mac_node); /* Add the ethernet statistics common to VSI and MAC */ ice_add_sysctls_eth_stats(ctx, mac_node, &stats->eth); /* Add PFC stats that add per-TC counters */ ice_add_sysctls_mac_pfc_stats(ctx, mac_node, stats); const struct ice_sysctl_info ctls[] = { /* Packet Reception Stats */ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, {&stats->eth.rx_discards, "rx_discards", "Discarded Rx Packets by Port (shortage of storage space)"}, /* Packet Transmission Stats */ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, {&stats->tx_dropped_link_down, "tx_dropped", "Tx Dropped Due To Link Down"}, /* Flow control */ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, /* Other */ {&stats->crc_errors, "crc_errors", "CRC Errors"}, {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, /* End */ { 0, 0, 0 } }; const struct ice_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_U64(ctx, mac_list, OID_AUTO, entry->name, CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, entry->description); entry++; } /* Port oversize packet stats */ SYSCTL_ADD_U64(ctx, mac_list, OID_AUTO, "rx_oversized", CTLFLAG_RD | CTLFLAG_STATS, &sc->soft_stats.rx_roc_error, 0, "Oversized packets received"); } /** * ice_configure_misc_interrupts - enable 'other' interrupt causes * @sc: pointer to device private softc * * Enable various "other" interrupt causes, and associate them to interrupt 0, * which is our administrative interrupt. */ void ice_configure_misc_interrupts(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; u32 val; /* Read the OICR register to clear it */ rd32(hw, PFINT_OICR); /* Enable useful "other" interrupt causes */ val = (PFINT_OICR_ECC_ERR_M | PFINT_OICR_MAL_DETECT_M | PFINT_OICR_GRST_M | PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_VFLR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_CRITERR_M); wr32(hw, PFINT_OICR_ENA, val); /* Note that since we're using MSI-X index 0, and ITR index 0, we do * not explicitly program them when writing to the PFINT_*_CTL * registers. Nevertheless, these writes are associating the * interrupts with the ITR 0 vector */ /* Associate the OICR interrupt with ITR 0, and enable it */ wr32(hw, PFINT_OICR_CTL, PFINT_OICR_CTL_CAUSE_ENA_M); /* Associate the Mailbox interrupt with ITR 0, and enable it */ wr32(hw, PFINT_MBX_CTL, PFINT_MBX_CTL_CAUSE_ENA_M); /* Associate the SB Queue interrupt with ITR 0, and enable it */ wr32(hw, PFINT_SB_CTL, PFINT_SB_CTL_CAUSE_ENA_M); /* Associate the AdminQ interrupt with ITR 0, and enable it */ wr32(hw, PFINT_FW_CTL, PFINT_FW_CTL_CAUSE_ENA_M); } /** * ice_filter_is_mcast - Check if info is a multicast filter * @vsi: vsi structure addresses are targeted towards * @info: filter info * * @returns true if the provided info is a multicast filter, and false * otherwise. */ static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info) { const u8 *addr = info->l_data.mac.mac_addr; /* * Check if this info matches a multicast filter added by * ice_add_mac_to_list */ if ((info->flag == ICE_FLTR_TX) && (info->src_id == ICE_SRC_ID_VSI) && (info->lkup_type == ICE_SW_LKUP_MAC) && (info->vsi_handle == vsi->idx) && ETHER_IS_MULTICAST(addr) && !ETHER_IS_BROADCAST(addr)) return true; return false; } /** * @struct ice_mcast_sync_data * @brief data used by ice_sync_one_mcast_filter function * * Structure used to store data needed for processing by the * ice_sync_one_mcast_filter. This structure contains a linked list of filters * to be added, an error indication, and a pointer to the device softc. */ struct ice_mcast_sync_data { struct ice_list_head add_list; struct ice_softc *sc; int err; }; /** * ice_sync_one_mcast_filter - Check if we need to program the filter * @p: void pointer to algorithm data * @sdl: link level socket address * @count: unused count value * * Called by if_foreach_llmaddr to operate on each filter in the ifp filter * list. For the given address, search our internal list to see if we have * found the filter. If not, add it to our list of filters that need to be * programmed. * * @returns (1) if we've actually setup the filter to be added */ static u_int ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, u_int __unused count) { struct ice_mcast_sync_data *data = (struct ice_mcast_sync_data *)p; struct ice_softc *sc = data->sc; struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; const u8 *sdl_addr = (const u8 *)LLADDR(sdl); struct ice_fltr_mgmt_list_entry *itr; struct ice_list_head *rules; int err; rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; /* * If a previous filter already indicated an error, there is no need * for us to finish processing the rest of the filters. */ if (data->err) return (0); /* See if this filter has already been programmed */ LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { struct ice_fltr_info *info = &itr->fltr_info; const u8 *addr = info->l_data.mac.mac_addr; /* Only check multicast filters */ if (!ice_filter_is_mcast(&sc->pf_vsi, info)) continue; /* * If this filter matches, mark the internal filter as * "found", and exit. */ if (bcmp(addr, sdl_addr, ETHER_ADDR_LEN) == 0) { itr->marker = ICE_FLTR_FOUND; return (1); } } /* * If we failed to locate the filter in our internal list, we need to * place it into our add list. */ err = ice_add_mac_to_list(&sc->pf_vsi, &data->add_list, sdl_addr, ICE_FWD_TO_VSI); if (err) { device_printf(sc->dev, "Failed to place MAC %6D onto add list, err %s\n", sdl_addr, ":", ice_err_str(err)); data->err = err; return (0); } return (1); } /** * ice_sync_multicast_filters - Synchronize OS and internal filter list * @sc: device private structure * * Called in response to SIOCDELMULTI to synchronize the operating system * multicast address list with the internal list of filters programmed to * firmware. * * Works in one phase to find added and deleted filters using a marker bit on * the internal list. * * First, a loop over the internal list clears the marker bit. Second, for * each filter in the ifp list is checked. If we find it in the internal list, * the marker bit is set. Otherwise, the filter is added to the add list. * Third, a loop over the internal list determines if any filters have not * been found. Each of these is added to the delete list. Finally, the add and * delete lists are programmed to firmware to update the filters. * * @returns zero on success or an integer error code on failure. */ int ice_sync_multicast_filters(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *itr; struct ice_mcast_sync_data data = {}; struct ice_list_head *rules, remove_list; int status; int err = 0; INIT_LIST_HEAD(&data.add_list); INIT_LIST_HEAD(&remove_list); data.sc = sc; data.err = 0; rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; /* Acquire the lock for the entire duration */ ice_acquire_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); /* (1) Reset the marker state for all filters */ LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) itr->marker = ICE_FLTR_NOT_FOUND; /* (2) determine which filters need to be added and removed */ if_foreach_llmaddr(sc->ifp, ice_sync_one_mcast_filter, (void *)&data); if (data.err) { /* ice_sync_one_mcast_filter already prints an error */ err = data.err; ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); goto free_filter_lists; } LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { struct ice_fltr_info *info = &itr->fltr_info; const u8 *addr = info->l_data.mac.mac_addr; /* Only check multicast filters */ if (!ice_filter_is_mcast(&sc->pf_vsi, info)) continue; /* * If the filter is not marked as found, then it must no * longer be in the ifp address list, so we need to remove it. */ if (itr->marker == ICE_FLTR_NOT_FOUND) { err = ice_add_mac_to_list(&sc->pf_vsi, &remove_list, addr, ICE_FWD_TO_VSI); if (err) { device_printf(sc->dev, "Failed to place MAC %6D onto remove list, err %s\n", addr, ":", ice_err_str(err)); ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); goto free_filter_lists; } } } ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); status = ice_add_mac(hw, &data.add_list); if (status) { device_printf(sc->dev, "Could not add new MAC filters, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); goto free_filter_lists; } status = ice_remove_mac(hw, &remove_list); if (status) { device_printf(sc->dev, "Could not remove old MAC filters, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); goto free_filter_lists; } free_filter_lists: ice_free_fltr_list(&data.add_list); ice_free_fltr_list(&remove_list); return (err); } /** * ice_add_vlan_hw_filters - Add multiple VLAN filters for a given VSI * @vsi: The VSI to add the filter for * @vid: array of VLAN ids to add * @length: length of vid array * * Programs HW filters so that the given VSI will receive the specified VLANs. */ int ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length) { struct ice_hw *hw = &vsi->sc->hw; struct ice_list_head vlan_list; struct ice_fltr_list_entry *vlan_entries; int status; MPASS(length > 0); INIT_LIST_HEAD(&vlan_list); vlan_entries = (struct ice_fltr_list_entry *) malloc(sizeof(*vlan_entries) * length, M_ICE, M_NOWAIT | M_ZERO); if (!vlan_entries) return (ICE_ERR_NO_MEMORY); for (u16 i = 0; i < length; i++) { vlan_entries[i].fltr_info.lkup_type = ICE_SW_LKUP_VLAN; vlan_entries[i].fltr_info.fltr_act = ICE_FWD_TO_VSI; vlan_entries[i].fltr_info.flag = ICE_FLTR_TX; vlan_entries[i].fltr_info.src_id = ICE_SRC_ID_VSI; vlan_entries[i].fltr_info.vsi_handle = vsi->idx; vlan_entries[i].fltr_info.l_data.vlan.vlan_id = vid[i]; LIST_ADD(&vlan_entries[i].list_entry, &vlan_list); } status = ice_add_vlan(hw, &vlan_list); if (!status) goto done; device_printf(vsi->sc->dev, "Failed to add VLAN filters:\n"); for (u16 i = 0; i < length; i++) { device_printf(vsi->sc->dev, "- vlan %d, status %d\n", vlan_entries[i].fltr_info.l_data.vlan.vlan_id, vlan_entries[i].status); } done: free(vlan_entries, M_ICE); return (status); } /** * ice_add_vlan_hw_filter - Add a VLAN filter for a given VSI * @vsi: The VSI to add the filter for * @vid: VLAN to add * * Programs a HW filter so that the given VSI will receive the specified VLAN. */ int ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) { return ice_add_vlan_hw_filters(vsi, &vid, 1); } /** * ice_remove_vlan_hw_filters - Remove multiple VLAN filters for a given VSI * @vsi: The VSI to remove the filters from * @vid: array of VLAN ids to remove * @length: length of vid array * * Removes previously programmed HW filters for the specified VSI. */ int ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length) { struct ice_hw *hw = &vsi->sc->hw; struct ice_list_head vlan_list; struct ice_fltr_list_entry *vlan_entries; int status; MPASS(length > 0); INIT_LIST_HEAD(&vlan_list); vlan_entries = (struct ice_fltr_list_entry *) malloc(sizeof(*vlan_entries) * length, M_ICE, M_NOWAIT | M_ZERO); if (!vlan_entries) return (ICE_ERR_NO_MEMORY); for (u16 i = 0; i < length; i++) { vlan_entries[i].fltr_info.lkup_type = ICE_SW_LKUP_VLAN; vlan_entries[i].fltr_info.fltr_act = ICE_FWD_TO_VSI; vlan_entries[i].fltr_info.flag = ICE_FLTR_TX; vlan_entries[i].fltr_info.src_id = ICE_SRC_ID_VSI; vlan_entries[i].fltr_info.vsi_handle = vsi->idx; vlan_entries[i].fltr_info.l_data.vlan.vlan_id = vid[i]; LIST_ADD(&vlan_entries[i].list_entry, &vlan_list); } status = ice_remove_vlan(hw, &vlan_list); if (!status) goto done; device_printf(vsi->sc->dev, "Failed to remove VLAN filters:\n"); for (u16 i = 0; i < length; i++) { device_printf(vsi->sc->dev, "- vlan %d, status %d\n", vlan_entries[i].fltr_info.l_data.vlan.vlan_id, vlan_entries[i].status); } done: free(vlan_entries, M_ICE); return (status); } /** * ice_remove_vlan_hw_filter - Remove a VLAN filter for a given VSI * @vsi: The VSI to remove the filter from * @vid: VLAN to remove * * Removes a previously programmed HW filter for the specified VSI. */ int ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) { return ice_remove_vlan_hw_filters(vsi, &vid, 1); } #define ICE_SYSCTL_HELP_RX_ITR \ "\nControl Rx interrupt throttle rate." \ "\n\t0-8160 - sets interrupt rate in usecs" \ "\n\t -1 - reset the Rx itr to default" /** * ice_sysctl_rx_itr - Display or change the Rx ITR for a VSI * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the current Rx ITR value * on write: Sets the Rx ITR value, reconfiguring device if it is up */ static int ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS) { struct ice_vsi *vsi = (struct ice_vsi *)arg1; struct ice_softc *sc = vsi->sc; int increment, ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); ret = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req); if ((ret) || (req->newptr == NULL)) return (ret); if (vsi->rx_itr < 0) vsi->rx_itr = ICE_DFLT_RX_ITR; if (vsi->rx_itr > ICE_ITR_MAX) vsi->rx_itr = ICE_ITR_MAX; /* Assume 2usec increment if it hasn't been loaded yet */ increment = sc->hw.itr_gran ? : 2; /* We need to round the value to the hardware's ITR granularity */ vsi->rx_itr = (vsi->rx_itr / increment ) * increment; /* If the driver has finished initializing, then we need to reprogram * the ITR registers now. Otherwise, they will be programmed during * driver initialization. */ if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) ice_configure_rx_itr(vsi); return (0); } #define ICE_SYSCTL_HELP_TX_ITR \ "\nControl Tx interrupt throttle rate." \ "\n\t0-8160 - sets interrupt rate in usecs" \ "\n\t -1 - reset the Tx itr to default" /** * ice_sysctl_tx_itr - Display or change the Tx ITR for a VSI * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the current Tx ITR value * on write: Sets the Tx ITR value, reconfiguring device if it is up */ static int ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS) { struct ice_vsi *vsi = (struct ice_vsi *)arg1; struct ice_softc *sc = vsi->sc; int increment, ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); ret = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req); if ((ret) || (req->newptr == NULL)) return (ret); /* Allow configuring a negative value to reset to the default */ if (vsi->tx_itr < 0) vsi->tx_itr = ICE_DFLT_TX_ITR; if (vsi->tx_itr > ICE_ITR_MAX) vsi->tx_itr = ICE_ITR_MAX; /* Assume 2usec increment if it hasn't been loaded yet */ increment = sc->hw.itr_gran ? : 2; /* We need to round the value to the hardware's ITR granularity */ vsi->tx_itr = (vsi->tx_itr / increment ) * increment; /* If the driver has finished initializing, then we need to reprogram * the ITR registers now. Otherwise, they will be programmed during * driver initialization. */ if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) ice_configure_tx_itr(vsi); return (0); } /** * ice_add_vsi_tunables - Add tunables and nodes for a VSI * @vsi: pointer to VSI structure * @parent: parent node to add the tunables under * * Create a sysctl context for the VSI, so that sysctls for the VSI can be * dynamically removed upon VSI removal. * * Add various tunables and set up the basic node structure for the VSI. Must * be called *prior* to ice_add_vsi_sysctls. It should be called as soon as * possible after the VSI memory is initialized. * * VSI specific sysctls with CTLFLAG_TUN should be initialized here so that * their values can be read from loader.conf prior to their first use in the * driver. */ void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent) { struct sysctl_oid_list *vsi_list; char vsi_name[32], vsi_desc[32]; struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); /* Initialize the sysctl context for this VSI */ sysctl_ctx_init(&vsi->ctx); /* Add a node to collect this VSI's statistics together */ snprintf(vsi_name, sizeof(vsi_name), "%u", vsi->idx); snprintf(vsi_desc, sizeof(vsi_desc), "VSI %u", vsi->idx); vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->ctx, parent_list, OID_AUTO, vsi_name, CTLFLAG_RD, NULL, vsi_desc); vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); vsi->rx_itr = ICE_DFLT_TX_ITR; SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "rx_itr", CTLTYPE_S16 | CTLFLAG_RWTUN, vsi, 0, ice_sysctl_rx_itr, "S", ICE_SYSCTL_HELP_RX_ITR); vsi->tx_itr = ICE_DFLT_TX_ITR; SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "tx_itr", CTLTYPE_S16 | CTLFLAG_RWTUN, vsi, 0, ice_sysctl_tx_itr, "S", ICE_SYSCTL_HELP_TX_ITR); } /** * ice_del_vsi_sysctl_ctx - Delete the sysctl context(s) of a VSI * @vsi: the VSI to remove contexts for * * Free the context for the VSI sysctls. This includes the main context, as * well as the per-queue sysctls. */ void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi) { device_t dev = vsi->sc->dev; int err; if (vsi->vsi_node) { err = sysctl_ctx_free(&vsi->ctx); if (err) device_printf(dev, "failed to free VSI %d sysctl context, err %s\n", vsi->idx, ice_err_str(err)); vsi->vsi_node = NULL; } } /** * ice_add_dscp2tc_map_sysctls - Add sysctl tree for DSCP to TC mapping * @sc: pointer to device private softc * @ctx: the sysctl ctx to use * @ctx_list: list of sysctl children for device (to add sysctl tree to) * * Add a sysctl tree for individual dscp2tc_map sysctls. Each child of this * node can map 8 DSCPs to TC values; there are 8 of these in turn for a total * of 64 DSCP to TC map values that the user can configure. */ void ice_add_dscp2tc_map_sysctls(struct ice_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *ctx_list) { struct sysctl_oid_list *node_list; struct sysctl_oid *node; struct sbuf *namebuf, *descbuf; int first_dscp_val, last_dscp_val; node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "dscp2tc_map", CTLFLAG_RD, NULL, "Map of DSCP values to DCB TCs"); node_list = SYSCTL_CHILDREN(node); namebuf = sbuf_new_auto(); descbuf = sbuf_new_auto(); for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { sbuf_clear(namebuf); sbuf_clear(descbuf); first_dscp_val = i * 8; last_dscp_val = first_dscp_val + 7; sbuf_printf(namebuf, "%d-%d", first_dscp_val, last_dscp_val); sbuf_printf(descbuf, "Map DSCP values %d to %d to TCs", first_dscp_val, last_dscp_val); sbuf_finish(namebuf); sbuf_finish(descbuf); SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, sbuf_data(namebuf), CTLTYPE_STRING | CTLFLAG_RW, sc, i, ice_sysctl_dscp2tc_map, "A", sbuf_data(descbuf)); } sbuf_delete(namebuf); sbuf_delete(descbuf); } /** * ice_add_device_tunables - Add early tunable sysctls and sysctl nodes * @sc: device private structure * * Add per-device dynamic tunable sysctls, and setup the general sysctl trees * for re-use by ice_add_device_sysctls. * * In order for the sysctl fields to be initialized before use, this function * should be called as early as possible during attach activities. * * Any non-global sysctl marked as CTLFLAG_TUN should likely be initialized * here in this function, rather than later in ice_add_device_sysctls. * * To make things easier, this function is also expected to setup the various * sysctl nodes in addition to tunables so that other sysctls which can't be * initialized early can hook into the same nodes. */ void ice_add_device_tunables(struct ice_softc *sc) { device_t dev = sc->dev; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); sc->enable_health_events = ice_enable_health_events; SYSCTL_ADD_BOOL(ctx, ctx_list, OID_AUTO, "enable_health_events", CTLFLAG_RDTUN, &sc->enable_health_events, 0, "Enable FW health event reporting for this PF"); /* Add a node to track VSI sysctls. Keep track of the node in the * softc so that we can hook other sysctls into it later. This * includes both the VSI statistics, as well as potentially dynamic * VSIs in the future. */ sc->vsi_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "vsi", CTLFLAG_RD, NULL, "VSI Configuration and Statistics"); /* Add debug tunables */ ice_add_debug_tunables(sc); } /** * ice_sysctl_dump_mac_filters - Dump a list of all HW MAC Filters * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "mac_filters" sysctl to dump the programmed MAC filters. */ static int ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_list_head *rule_head; struct ice_lock *rule_lock; struct ice_fltr_info *fi; struct sbuf *sbuf; int ret; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Wire the old buffer so we can take a non-sleepable lock */ ret = sysctl_wire_old_buffer(req, 0); if (ret) return (ret); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; sbuf_printf(sbuf, "MAC Filter List"); ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { fi = &fm_entry->fltr_info; sbuf_printf(sbuf, "\nmac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %d", fi->l_data.mac.mac_addr, ":", fi->vsi_handle, ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); /* if we have a vsi_list_info, print some information about that */ if (fm_entry->vsi_list_info) { sbuf_printf(sbuf, ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", fm_entry->vsi_count, fm_entry->vsi_list_info->vsi_list_id, fm_entry->vsi_list_info->ref_cnt); } } ice_release_lock(rule_lock); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_dump_vlan_filters - Dump a list of all HW VLAN Filters * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "vlan_filters" sysctl to dump the programmed VLAN filters. */ static int ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_list_head *rule_head; struct ice_lock *rule_lock; struct ice_fltr_info *fi; struct sbuf *sbuf; int ret; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Wire the old buffer so we can take a non-sleepable lock */ ret = sysctl_wire_old_buffer(req, 0); if (ret) return (ret); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; sbuf_printf(sbuf, "VLAN Filter List"); ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { fi = &fm_entry->fltr_info; sbuf_printf(sbuf, "\nvlan_id = %4d, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", fi->l_data.vlan.vlan_id, fi->vsi_handle, ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); /* if we have a vsi_list_info, print some information about that */ if (fm_entry->vsi_list_info) { sbuf_printf(sbuf, ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", fm_entry->vsi_count, fm_entry->vsi_list_info->vsi_list_id, fm_entry->vsi_list_info->ref_cnt); } } ice_release_lock(rule_lock); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_dump_ethertype_filters - Dump a list of all HW Ethertype filters * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "ethertype_filters" sysctl to dump the programmed Ethertype * filters. */ static int ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_list_head *rule_head; struct ice_lock *rule_lock; struct ice_fltr_info *fi; struct sbuf *sbuf; int ret; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Wire the old buffer so we can take a non-sleepable lock */ ret = sysctl_wire_old_buffer(req, 0); if (ret) return (ret); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rules; sbuf_printf(sbuf, "Ethertype Filter List"); ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { fi = &fm_entry->fltr_info; sbuf_printf(sbuf, "\nethertype = 0x%04x, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", fi->l_data.ethertype_mac.ethertype, fi->vsi_handle, ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); /* if we have a vsi_list_info, print some information about that */ if (fm_entry->vsi_list_info) { sbuf_printf(sbuf, ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", fm_entry->vsi_count, fm_entry->vsi_list_info->vsi_list_id, fm_entry->vsi_list_info->ref_cnt); } } ice_release_lock(rule_lock); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_dump_ethertype_mac_filters - Dump a list of all HW Ethertype/MAC filters * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "ethertype_mac_filters" sysctl to dump the programmed * Ethertype/MAC filters. */ static int ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_list_head *rule_head; struct ice_lock *rule_lock; struct ice_fltr_info *fi; struct sbuf *sbuf; int ret; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Wire the old buffer so we can take a non-sleepable lock */ ret = sysctl_wire_old_buffer(req, 0); if (ret) return (ret); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rules; sbuf_printf(sbuf, "Ethertype/MAC Filter List"); ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { fi = &fm_entry->fltr_info; sbuf_printf(sbuf, "\nethertype = 0x%04x, mac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", fi->l_data.ethertype_mac.ethertype, fi->l_data.ethertype_mac.mac_addr, ":", fi->vsi_handle, ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); /* if we have a vsi_list_info, print some information about that */ if (fm_entry->vsi_list_info) { sbuf_printf(sbuf, ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", fm_entry->vsi_count, fm_entry->vsi_list_info->vsi_list_id, fm_entry->vsi_list_info->ref_cnt); } } ice_release_lock(rule_lock); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_dump_state_flags - Dump device driver state flags * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "state" sysctl to display currently set driver state flags. */ static int ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct sbuf *sbuf; u32 copied_state; unsigned int i; bool at_least_one = false; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Make a copy of the state to ensure we display coherent values */ copied_state = atomic_load_acq_32(&sc->state); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); /* Add the string for each set state to the sbuf */ for (i = 0; i < 32; i++) { if (copied_state & BIT(i)) { const char *str = ice_state_to_str((enum ice_state)i); at_least_one = true; if (str) sbuf_printf(sbuf, "\n%s", str); else sbuf_printf(sbuf, "\nBIT(%u)", i); } } if (!at_least_one) sbuf_printf(sbuf, "Nothing set"); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } #define ICE_SYSCTL_DEBUG_MASK_HELP \ "\nSelect debug statements to print to kernel message log" \ "\nFlags:" \ "\n\t 0x1 - Function Tracing" \ "\n\t 0x2 - Driver Initialization" \ "\n\t 0x4 - Release" \ "\n\t 0x8 - FW Logging" \ "\n\t 0x10 - Link" \ "\n\t 0x20 - PHY" \ "\n\t 0x40 - Queue Context" \ "\n\t 0x80 - NVM" \ "\n\t 0x100 - LAN" \ "\n\t 0x200 - Flow" \ "\n\t 0x400 - DCB" \ "\n\t 0x800 - Diagnostics" \ "\n\t 0x1000 - Flow Director" \ "\n\t 0x2000 - Switch" \ "\n\t 0x4000 - Scheduler" \ "\n\t 0x8000 - RDMA" \ "\n\t 0x10000 - DDP Package" \ "\n\t 0x20000 - Resources" \ "\n\t 0x40000 - ACL" \ "\n\t 0x80000 - PTP" \ "\n\t 0x100000 - Admin Queue messages" \ "\n\t 0x200000 - Admin Queue descriptors" \ "\n\t 0x400000 - Admin Queue descriptor buffers" \ "\n\t 0x800000 - Admin Queue commands" \ "\n\t 0x1000000 - Parser" \ "\n\t ..." \ "\n\t 0x80000000 - (Reserved for user)" \ "\n\t" \ "\nUse \"sysctl -x\" to view flags properly." /** * ice_add_debug_tunables - Add tunables helpful for debugging the device driver * @sc: device private structure * * Add sysctl tunable values related to debugging the device driver. For now, * this means a tunable to set the debug mask early during driver load. * * The debug node will be marked CTLFLAG_SKIP unless INVARIANTS is defined, so * that in normal kernel builds, these will all be hidden, but on a debug * kernel they will be more easily visible. */ static void ice_add_debug_tunables(struct ice_softc *sc) { struct sysctl_oid_list *debug_list; device_t dev = sc->dev; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL, "Debug Sysctls"); debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "debug_mask", ICE_CTLFLAG_DEBUG | CTLFLAG_RWTUN, &sc->hw.debug_mask, 0, ICE_SYSCTL_DEBUG_MASK_HELP); /* Load the default value from the global sysctl first */ sc->enable_tx_fc_filter = ice_enable_tx_fc_filter; SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_fc_filter", ICE_CTLFLAG_DEBUG | CTLFLAG_RDTUN, &sc->enable_tx_fc_filter, 0, "Drop Ethertype 0x8808 control frames originating from software on this PF"); sc->tx_balance_en = ice_tx_balance_en; SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "tx_balance", ICE_CTLFLAG_DEBUG | CTLFLAG_RWTUN, &sc->tx_balance_en, 0, "Enable 5-layer scheduler topology"); /* Load the default value from the global sysctl first */ sc->enable_tx_lldp_filter = ice_enable_tx_lldp_filter; SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_lldp_filter", ICE_CTLFLAG_DEBUG | CTLFLAG_RDTUN, &sc->enable_tx_lldp_filter, 0, "Drop Ethertype 0x88cc LLDP frames originating from software on this PF"); ice_add_fw_logging_tunables(sc, sc->debug_sysctls); } #define ICE_SYSCTL_HELP_REQUEST_RESET \ "\nRequest the driver to initiate a reset." \ "\n\tpfr - Initiate a PF reset" \ "\n\tcorer - Initiate a CORE reset" \ "\n\tglobr - Initiate a GLOBAL reset" /** * @var rl_sysctl_ticks * @brief timestamp for latest reset request sysctl call * * Helps rate-limit the call to the sysctl which resets the device */ int rl_sysctl_ticks = 0; /** * ice_sysctl_request_reset - Request that the driver initiate a reset * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "request_reset" sysctl to request that the driver initiate * a reset. Expects to be passed one of the following strings * * "pfr" - Initiate a PF reset * "corer" - Initiate a CORE reset * "globr" - Initiate a Global reset */ static int ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; int status; enum ice_reset_req reset_type = ICE_RESET_INVAL; const char *reset_message; int ret; /* Buffer to store the requested reset string. Must contain enough * space to store the largest expected reset string, which currently * means 6 bytes of space. */ char reset[6] = ""; UNREFERENCED_PARAMETER(arg2); ret = priv_check(curthread, PRIV_DRIVER); if (ret) return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Read in the requested reset type. */ ret = sysctl_handle_string(oidp, reset, sizeof(reset), req); if ((ret) || (req->newptr == NULL)) return (ret); if (strcmp(reset, "pfr") == 0) { reset_message = "Requesting a PF reset"; reset_type = ICE_RESET_PFR; } else if (strcmp(reset, "corer") == 0) { reset_message = "Initiating a CORE reset"; reset_type = ICE_RESET_CORER; } else if (strcmp(reset, "globr") == 0) { reset_message = "Initiating a GLOBAL reset"; reset_type = ICE_RESET_GLOBR; } else if (strcmp(reset, "empr") == 0) { device_printf(sc->dev, "Triggering an EMP reset via software is not currently supported\n"); return (EOPNOTSUPP); } if (reset_type == ICE_RESET_INVAL) { device_printf(sc->dev, "%s is not a valid reset request\n", reset); return (EINVAL); } /* * Rate-limit the frequency at which this function is called. * Assuming this is called successfully once, typically, * everything should be handled within the allotted time frame. * However, in the odd setup situations, we've also put in * guards for when the reset has finished, but we're in the * process of rebuilding. And instead of queueing an intent, * simply error out and let the caller retry, if so desired. */ if (TICKS_2_MSEC(ticks - rl_sysctl_ticks) < 500) { device_printf(sc->dev, "Call frequency too high. Operation aborted.\n"); return (EBUSY); } rl_sysctl_ticks = ticks; if (TICKS_2_MSEC(ticks - sc->rebuild_ticks) < 100) { device_printf(sc->dev, "Device rebuilding. Operation aborted.\n"); return (EBUSY); } if (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) { device_printf(sc->dev, "Device in reset. Operation aborted.\n"); return (EBUSY); } device_printf(sc->dev, "%s\n", reset_message); /* Initiate the PF reset during the admin status task */ if (reset_type == ICE_RESET_PFR) { ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); return (0); } /* * Other types of resets including CORE and GLOBAL resets trigger an * interrupt on all PFs. Initiate the reset now. Preparation and * rebuild logic will be handled by the admin status task. */ status = ice_reset(hw, reset_type); /* * Resets can take a long time and we still don't want another call * to this function before we settle down. */ rl_sysctl_ticks = ticks; if (status) { device_printf(sc->dev, "failed to initiate device reset, err %s\n", ice_status_str(status)); ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); return (EFAULT); } return (0); } #define ICE_AQC_DBG_DUMP_CLUSTER_ID_INVALID (0xFFFFFF) #define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING \ "\nSelect clusters to dump with \"dump\" sysctl" \ "\nFlags:" \ "\n\t 0 - All clusters (default)" \ "\n\t 0x1 - Switch" \ "\n\t 0x2 - ACL" \ "\n\t 0x4 - Tx Scheduler" \ "\n\t 0x8 - Profile Configuration" \ "\n\t 0x20 - Link" \ "\n\t 0x80 - DCB" \ "\n\t 0x100 - L2P" \ "\n\t 0x400000 - Manageability Transactions (excluding E830)" \ "\n" \ "\nUse \"sysctl -x\" to view flags properly." /** * ice_sysctl_fw_debug_dump_cluster_setting - Set which clusters to dump * from FW when FW debug dump occurs * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer */ static int ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; device_t dev = sc->dev; u32 clusters; int ret; UNREFERENCED_PARAMETER(arg2); ret = priv_check(curthread, PRIV_DRIVER); if (ret) return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); clusters = sc->fw_debug_dump_cluster_mask; ret = sysctl_handle_32(oidp, &clusters, 0, req); if ((ret) || (req->newptr == NULL)) return (ret); u32 valid_cluster_mask; if (ice_is_e830(&sc->hw)) valid_cluster_mask = ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E830; else valid_cluster_mask = ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E810; if (clusters & ~(valid_cluster_mask)) { device_printf(dev, "%s: ERROR: Incorrect settings requested\n", __func__); sc->fw_debug_dump_cluster_mask = ICE_AQC_DBG_DUMP_CLUSTER_ID_INVALID; return (EINVAL); } sc->fw_debug_dump_cluster_mask = clusters; return (0); } #define ICE_FW_DUMP_AQ_COUNT_LIMIT (10000) /** * ice_fw_debug_dump_print_cluster - Print formatted cluster data from FW * @sc: the device softc * @sbuf: initialized sbuf to print data to * @cluster_id: FW cluster ID to print data from * * Reads debug data from the specified cluster id in the FW and prints it to * the input sbuf. This function issues multiple AQ commands to the FW in * order to get all of the data in the cluster. * * @remark Only intended to be used by the sysctl handler * ice_sysctl_fw_debug_dump_do_dump */ static u16 ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 cluster_id) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; u16 data_buf_size = ICE_AQ_MAX_BUF_LEN; const u8 reserved_buf[8] = {}; int status; int counter = 0; u8 *data_buf; /* Input parameters / loop variables */ u16 table_id = 0; u32 offset = 0; /* Output from the Get Internal Data AQ command */ u16 ret_buf_size = 0; u16 ret_next_cluster = 0; u16 ret_next_table = 0; u32 ret_next_index = 0; /* Other setup */ data_buf = (u8 *)malloc(data_buf_size, M_ICE, M_NOWAIT | M_ZERO); if (!data_buf) return ret_next_cluster; ice_debug(hw, ICE_DBG_DIAG, "%s: dumping cluster id %d\n", __func__, cluster_id); for (;;) { /* Do not trust the FW behavior to be completely correct */ if (counter++ >= ICE_FW_DUMP_AQ_COUNT_LIMIT) { device_printf(dev, "%s: Exceeded counter limit for cluster %d\n", __func__, cluster_id); break; } ice_debug(hw, ICE_DBG_DIAG, "---\n"); ice_debug(hw, ICE_DBG_DIAG, "table_id 0x%04x offset 0x%08x buf_size %d\n", table_id, offset, data_buf_size); status = ice_aq_get_internal_data(hw, cluster_id, table_id, offset, data_buf, data_buf_size, &ret_buf_size, &ret_next_cluster, &ret_next_table, &ret_next_index, NULL); if (status) { device_printf(dev, "%s: ice_aq_get_internal_data in cluster %d: err %s aq_err %s\n", __func__, cluster_id, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); break; } ice_debug(hw, ICE_DBG_DIAG, "ret_table_id 0x%04x ret_offset 0x%08x ret_buf_size %d\n", ret_next_table, ret_next_index, ret_buf_size); /* Print cluster id */ u32 print_cluster_id = (u32)cluster_id; sbuf_bcat(sbuf, &print_cluster_id, sizeof(print_cluster_id)); /* Print table id */ u32 print_table_id = (u32)table_id; sbuf_bcat(sbuf, &print_table_id, sizeof(print_table_id)); /* Print table length */ u32 print_table_length = (u32)ret_buf_size; sbuf_bcat(sbuf, &print_table_length, sizeof(print_table_length)); /* Print current offset */ u32 print_curr_offset = offset; sbuf_bcat(sbuf, &print_curr_offset, sizeof(print_curr_offset)); /* Print reserved bytes */ sbuf_bcat(sbuf, reserved_buf, sizeof(reserved_buf)); /* Print data */ sbuf_bcat(sbuf, data_buf, ret_buf_size); /* Adjust loop variables */ memset(data_buf, 0, data_buf_size); bool same_table_next = (table_id == ret_next_table); bool last_table_next; if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_NEXT_CLUSTER_ID)) last_table_next = (ret_next_table == 0xffff); else last_table_next = (ret_next_table == 0xff || ret_next_table == 0xffff); bool last_offset_next = (ret_next_index == 0xffffffff || ret_next_index == 0); if ((!same_table_next && !last_offset_next) || (same_table_next && last_table_next)) { device_printf(dev, "%s: Unexpected conditions for same_table_next(%d) last_table_next(%d) last_offset_next(%d), ending cluster (%d)\n", __func__, same_table_next, last_table_next, last_offset_next, cluster_id); break; } if (!same_table_next && !last_table_next && last_offset_next) { /* We've hit the end of the table */ table_id = ret_next_table; offset = 0; } else if (!same_table_next && last_table_next && last_offset_next) { /* We've hit the end of the cluster */ break; } else if (same_table_next && !last_table_next && last_offset_next) { if (cluster_id == 0x1 && table_id < 39) table_id += 1; else break; } else { /* if (same_table_next && !last_table_next && !last_offset_next) */ /* More data left in the table */ offset = ret_next_index; } } free(data_buf, M_ICE); return ret_next_cluster; } /** * ice_fw_debug_dump_print_clusters - Print data from FW clusters to sbuf * @sc: the device softc * @sbuf: initialized sbuf to print data to * * Handles dumping all of the clusters to dump to the indicated sbuf. The * clusters do dump are determined by the value in the * fw_debug_dump_cluster_mask field in the sc argument. * * @remark Only intended to be used by the sysctl handler * ice_sysctl_fw_debug_dump_do_dump */ static void ice_fw_debug_dump_print_clusters(struct ice_softc *sc, struct sbuf *sbuf) { u16 next_cluster_id, max_cluster_id, start_cluster_id; u32 cluster_mask = sc->fw_debug_dump_cluster_mask; struct ice_hw *hw = &sc->hw; int bit; ice_debug(hw, ICE_DBG_DIAG, "%s: Debug Dump running...\n", __func__); if (ice_is_e830(hw)) { max_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG_E830; start_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E830; } else { max_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG_E810; start_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E810; } if (cluster_mask != 0) { for_each_set_bit(bit, &cluster_mask, sizeof(cluster_mask) * BITS_PER_BYTE) { ice_fw_debug_dump_print_cluster(sc, sbuf, bit + start_cluster_id); } } else { next_cluster_id = start_cluster_id; /* We don't support QUEUE_MNG and FULL_CSR_SPACE */ do { next_cluster_id = ice_fw_debug_dump_print_cluster(sc, sbuf, next_cluster_id); } while ((next_cluster_id != 0) && (next_cluster_id < max_cluster_id)); } } #define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_DO_DUMP \ "\nWrite 1 to output a FW debug dump containing the clusters specified by the" \ "\n\"clusters\" sysctl." \ "\n" \ "\nThe \"-b\" flag must be used in order to dump this data as binary data because" \ "\nthis data is opaque and not a string." #define ICE_FW_DUMP_BASE_TEXT_SIZE (1024 * 1024) #define ICE_FW_DUMP_ALL_TEXT_SIZE (10 * 1024 * 1024) #define ICE_FW_DUMP_CLUST0_TEXT_SIZE (2 * 1024 * 1024) #define ICE_FW_DUMP_CLUST1_TEXT_SIZE (128 * 1024) #define ICE_FW_DUMP_CLUST2_TEXT_SIZE (2 * 1024 * 1024) /** * ice_sysctl_fw_debug_dump_do_dump - Dump data from FW to sysctl output * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Sysctl handler for the debug.dump.dump sysctl. Prints out a specially- * formatted dump of some debug FW data intended to be processed by a special * Intel tool. Prints out the cluster data specified by the "clusters" * sysctl. * * @remark The actual AQ calls and printing are handled by a helper * function above. */ static int ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; device_t dev = sc->dev; struct sbuf *sbuf; int ret; UNREFERENCED_PARAMETER(arg2); ret = priv_check(curthread, PRIV_DRIVER); if (ret) return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* If the user hasn't written "1" to this sysctl yet: */ if (!ice_test_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP)) { /* Avoid output on the first set of reads to this sysctl in * order to prevent a null byte from being written to the * end result when called via sysctl(8). */ if (req->oldptr == NULL && req->newptr == NULL) { ret = SYSCTL_OUT(req, 0, 0); return (ret); } char input_buf[2] = ""; ret = sysctl_handle_string(oidp, input_buf, sizeof(input_buf), req); if ((ret) || (req->newptr == NULL)) return (ret); /* If we get '1', then indicate we'll do a dump in the next * sysctl read call. */ if (input_buf[0] == '1') { if (sc->fw_debug_dump_cluster_mask == ICE_AQC_DBG_DUMP_CLUSTER_ID_INVALID) { device_printf(dev, "%s: Debug Dump failed because an invalid cluster was specified.\n", __func__); return (EINVAL); } ice_set_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP); return (0); } return (EINVAL); } /* --- FW debug dump state is set --- */ /* Caller just wants the upper bound for size */ if (req->oldptr == NULL && req->newptr == NULL) { size_t est_output_len = ICE_FW_DUMP_BASE_TEXT_SIZE; if (sc->fw_debug_dump_cluster_mask == 0) est_output_len += ICE_FW_DUMP_ALL_TEXT_SIZE; else { if (sc->fw_debug_dump_cluster_mask & 0x1) est_output_len += ICE_FW_DUMP_CLUST0_TEXT_SIZE; if (sc->fw_debug_dump_cluster_mask & 0x2) est_output_len += ICE_FW_DUMP_CLUST1_TEXT_SIZE; if (sc->fw_debug_dump_cluster_mask & 0x4) est_output_len += ICE_FW_DUMP_CLUST2_TEXT_SIZE; } ret = SYSCTL_OUT(req, 0, est_output_len); return (ret); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); sbuf_clear_flags(sbuf, SBUF_INCLUDENUL); ice_fw_debug_dump_print_clusters(sc, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); ice_clear_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP); return (ret); } /** * ice_add_debug_sysctls - Add sysctls helpful for debugging the device driver * @sc: device private structure * * Add sysctls related to debugging the device driver. Generally these should * simply be sysctls which dump internal driver state, to aid in understanding * what the driver is doing. */ static void ice_add_debug_sysctls(struct ice_softc *sc) { struct sysctl_oid *sw_node, *dump_node; struct sysctl_oid_list *debug_list, *sw_list, *dump_list; device_t dev = sc->dev; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "request_reset", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_WR, sc, 0, ice_sysctl_request_reset, "A", ICE_SYSCTL_HELP_REQUEST_RESET); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "pfr_count", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, &sc->soft_stats.pfr_count, 0, "# of PF resets handled"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "corer_count", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, &sc->soft_stats.corer_count, 0, "# of CORE resets handled"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "globr_count", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, &sc->soft_stats.globr_count, 0, "# of Global resets handled"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "empr_count", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, &sc->soft_stats.empr_count, 0, "# of EMP resets handled"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "tx_mdd_count", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, &sc->soft_stats.tx_mdd_count, 0, "# of Tx MDD events detected"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "rx_mdd_count", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, &sc->soft_stats.rx_mdd_count, 0, "# of Rx MDD events detected"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "state", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_state_flags, "A", "Driver State Flags"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "set_link", ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RW, sc, 0, ice_sysctl_debug_set_link, "CU", "Set link"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_type_low", ICE_CTLFLAG_DEBUG | CTLTYPE_U64 | CTLFLAG_RW, sc, 0, ice_sysctl_phy_type_low, "QU", "PHY type Low from Get PHY Caps/Set PHY Cfg"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_type_high", ICE_CTLFLAG_DEBUG | CTLTYPE_U64 | CTLFLAG_RW, sc, 0, ice_sysctl_phy_type_high, "QU", "PHY type High from Get PHY Caps/Set PHY Cfg"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_sw_caps", ICE_CTLFLAG_DEBUG | CTLTYPE_STRUCT | CTLFLAG_RD, sc, 0, ice_sysctl_phy_sw_caps, "", "Get PHY Capabilities (Software configuration)"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_nvm_caps", ICE_CTLFLAG_DEBUG | CTLTYPE_STRUCT | CTLFLAG_RD, sc, 0, ice_sysctl_phy_nvm_caps, "", "Get PHY Capabilities (NVM configuration)"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_topo_caps", ICE_CTLFLAG_DEBUG | CTLTYPE_STRUCT | CTLFLAG_RD, sc, 0, ice_sysctl_phy_topo_caps, "", "Get PHY Capabilities (Topology configuration)"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_link_status", ICE_CTLFLAG_DEBUG | CTLTYPE_STRUCT | CTLFLAG_RD, sc, 0, ice_sysctl_phy_link_status, "", "Get PHY Link Status"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "read_i2c_diag_data", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "fw_build", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, &sc->hw.fw_build, 0, "FW Build ID"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "os_ddp_version", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_os_pkg_version, "A", "DDP package name and version found in ice_ddp"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "cur_lldp_persist_status", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_fw_cur_lldp_persist_status, "A", "Current LLDP persistent status"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "dflt_lldp_persist_status", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_fw_dflt_lldp_persist_status, "A", "Default LLDP persistent status"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "negotiated_fc", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_negotiated_fc, "A", "Current Negotiated Flow Control mode"); if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_PHY_STATISTICS)) { SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_statistics", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_phy_stats, "A", "Dumps PHY statistics from firmware"); } SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "local_dcbx_cfg", CTLTYPE_STRING | CTLFLAG_RD, sc, ICE_AQ_LLDP_MIB_LOCAL, ice_sysctl_dump_dcbx_cfg, "A", "Dumps Local MIB information from firmware"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "remote_dcbx_cfg", CTLTYPE_STRING | CTLFLAG_RD, sc, ICE_AQ_LLDP_MIB_REMOTE, ice_sysctl_dump_dcbx_cfg, "A", "Dumps Remote MIB information from firmware"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "pf_vsi_cfg", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_vsi_cfg, "A", "Dumps Selected PF VSI parameters from firmware"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "query_port_ets", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_query_port_ets, "A", "Prints selected output from Query Port ETS AQ command"); SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD | CTLFLAG_STATS, &sc->stats.cur.rx_len_errors, 0, "Receive Length Errors (SNAP packets)"); sw_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "switch", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL, "Switch Configuration"); sw_list = SYSCTL_CHILDREN(sw_node); SYSCTL_ADD_PROC(ctx, sw_list, OID_AUTO, "mac_filters", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_mac_filters, "A", "MAC Filters"); SYSCTL_ADD_PROC(ctx, sw_list, OID_AUTO, "vlan_filters", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_vlan_filters, "A", "VLAN Filters"); SYSCTL_ADD_PROC(ctx, sw_list, OID_AUTO, "ethertype_filters", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_ethertype_filters, "A", "Ethertype Filters"); SYSCTL_ADD_PROC(ctx, sw_list, OID_AUTO, "ethertype_mac_filters", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_ethertype_mac_filters, "A", "Ethertype/MAC Filters"); dump_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "dump", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL, "Internal FW Dump"); dump_list = SYSCTL_CHILDREN(dump_node); SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "clusters", ICE_CTLFLAG_DEBUG | CTLTYPE_U32 | CTLFLAG_RW, sc, 0, ice_sysctl_fw_debug_dump_cluster_setting, "SU", ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING); SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "dump", ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, ice_sysctl_fw_debug_dump_do_dump, "", ICE_SYSCTL_HELP_FW_DEBUG_DUMP_DO_DUMP); } /** * ice_vsi_disable_tx - Disable (unconfigure) Tx queues for a VSI * @vsi: the VSI to disable * * Disables the Tx queues associated with this VSI. Essentially the opposite * of ice_cfg_vsi_for_tx. */ int ice_vsi_disable_tx(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; int status; u32 *q_teids; u16 *q_ids, *q_handles; size_t q_teids_size, q_ids_size, q_handles_size; int tc, j, buf_idx, err = 0; if (vsi->num_tx_queues > 255) return (ENOSYS); q_teids_size = sizeof(*q_teids) * vsi->num_tx_queues; q_teids = (u32 *)malloc(q_teids_size, M_ICE, M_NOWAIT|M_ZERO); if (!q_teids) return (ENOMEM); q_ids_size = sizeof(*q_ids) * vsi->num_tx_queues; q_ids = (u16 *)malloc(q_ids_size, M_ICE, M_NOWAIT|M_ZERO); if (!q_ids) { err = (ENOMEM); goto free_q_teids; } q_handles_size = sizeof(*q_handles) * vsi->num_tx_queues; q_handles = (u16 *)malloc(q_handles_size, M_ICE, M_NOWAIT|M_ZERO); if (!q_handles) { err = (ENOMEM); goto free_q_ids; } ice_for_each_traffic_class(tc) { struct ice_tc_info *tc_info = &vsi->tc_info[tc]; u16 start_idx, end_idx; /* Skip rest of disabled TCs once the first * disabled TC is found */ if (!(vsi->tc_map & BIT(tc))) break; /* Fill out TX queue information for this TC */ start_idx = tc_info->qoffset; end_idx = start_idx + tc_info->qcount_tx; buf_idx = 0; for (j = start_idx; j < end_idx; j++) { struct ice_tx_queue *txq = &vsi->tx_queues[j]; q_ids[buf_idx] = vsi->tx_qmap[j]; q_handles[buf_idx] = txq->q_handle; q_teids[buf_idx] = txq->q_teid; buf_idx++; } status = ice_dis_vsi_txq(hw->port_info, vsi->idx, tc, buf_idx, q_handles, q_ids, q_teids, ICE_NO_RESET, 0, NULL); if (status == ICE_ERR_DOES_NOT_EXIST) { ; /* Queues have already been disabled, no need to report this as an error */ } else if (status == ICE_ERR_RESET_ONGOING) { device_printf(sc->dev, "Reset in progress. LAN Tx queues already disabled\n"); break; } else if (status) { device_printf(sc->dev, "Failed to disable LAN Tx queues: err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (ENODEV); break; } /* Clear buffers */ memset(q_teids, 0, q_teids_size); memset(q_ids, 0, q_ids_size); memset(q_handles, 0, q_handles_size); } /* free_q_handles: */ free(q_handles, M_ICE); free_q_ids: free(q_ids, M_ICE); free_q_teids: free(q_teids, M_ICE); return err; } /** * ice_vsi_set_rss_params - Set the RSS parameters for the VSI * @vsi: the VSI to configure * * Sets the RSS table size and lookup table type for the VSI based on its * VSI type. */ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; struct ice_hw_common_caps *cap; cap = &sc->hw.func_caps.common_cap; switch (vsi->type) { case ICE_VSI_PF: /* The PF VSI inherits RSS instance of the PF */ vsi->rss_table_size = cap->rss_table_size; vsi->rss_lut_type = ICE_LUT_PF; break; case ICE_VSI_VF: case ICE_VSI_VMDQ2: vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; vsi->rss_lut_type = ICE_LUT_VSI; break; default: device_printf(sc->dev, "VSI %d: RSS not supported for VSI type %d\n", vsi->idx, vsi->type); break; } } /** * ice_vsi_add_txqs_ctx - Create a sysctl context and node to store txq sysctls * @vsi: The VSI to add the context for * * Creates a sysctl context for storing txq sysctls. Additionally creates * a node rooted at the given VSI's main sysctl node. This context will be * used to store per-txq sysctls which may need to be released during the * driver's lifetime. */ void ice_vsi_add_txqs_ctx(struct ice_vsi *vsi) { struct sysctl_oid_list *vsi_list; sysctl_ctx_init(&vsi->txqs_ctx); vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); vsi->txqs_node = SYSCTL_ADD_NODE(&vsi->txqs_ctx, vsi_list, OID_AUTO, "txqs", CTLFLAG_RD, NULL, "Tx Queues"); } /** * ice_vsi_add_rxqs_ctx - Create a sysctl context and node to store rxq sysctls * @vsi: The VSI to add the context for * * Creates a sysctl context for storing rxq sysctls. Additionally creates * a node rooted at the given VSI's main sysctl node. This context will be * used to store per-rxq sysctls which may need to be released during the * driver's lifetime. */ void ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi) { struct sysctl_oid_list *vsi_list; sysctl_ctx_init(&vsi->rxqs_ctx); vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); vsi->rxqs_node = SYSCTL_ADD_NODE(&vsi->rxqs_ctx, vsi_list, OID_AUTO, "rxqs", CTLFLAG_RD, NULL, "Rx Queues"); } /** * ice_vsi_del_txqs_ctx - Delete the Tx queue sysctl context for this VSI * @vsi: The VSI to delete from * * Frees the txq sysctl context created for storing the per-queue Tx sysctls. * Must be called prior to freeing the Tx queue memory, in order to avoid * having sysctls point at stale memory. */ void ice_vsi_del_txqs_ctx(struct ice_vsi *vsi) { device_t dev = vsi->sc->dev; int err; if (vsi->txqs_node) { err = sysctl_ctx_free(&vsi->txqs_ctx); if (err) device_printf(dev, "failed to free VSI %d txqs_ctx, err %s\n", vsi->idx, ice_err_str(err)); vsi->txqs_node = NULL; } } /** * ice_vsi_del_rxqs_ctx - Delete the Rx queue sysctl context for this VSI * @vsi: The VSI to delete from * * Frees the rxq sysctl context created for storing the per-queue Rx sysctls. * Must be called prior to freeing the Rx queue memory, in order to avoid * having sysctls point at stale memory. */ void ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi) { device_t dev = vsi->sc->dev; int err; if (vsi->rxqs_node) { err = sysctl_ctx_free(&vsi->rxqs_ctx); if (err) device_printf(dev, "failed to free VSI %d rxqs_ctx, err %s\n", vsi->idx, ice_err_str(err)); vsi->rxqs_node = NULL; } } /** * ice_add_txq_sysctls - Add per-queue sysctls for a Tx queue * @txq: pointer to the Tx queue * * Add per-queue sysctls for a given Tx queue. Can't be called during * ice_add_vsi_sysctls, since the queue memory has not yet been setup. */ void ice_add_txq_sysctls(struct ice_tx_queue *txq) { struct ice_vsi *vsi = txq->vsi; struct sysctl_ctx_list *ctx = &vsi->txqs_ctx; struct sysctl_oid_list *txqs_list, *this_txq_list; struct sysctl_oid *txq_node; char txq_name[32], txq_desc[32]; const struct ice_sysctl_info ctls[] = { { &txq->stats.tx_packets, "tx_packets", "Queue Packets Transmitted" }, { &txq->stats.tx_bytes, "tx_bytes", "Queue Bytes Transmitted" }, { &txq->stats.mss_too_small, "mss_too_small", "TSO sends with an MSS less than 64" }, { &txq->stats.tso, "tso", "TSO packets" }, { 0, 0, 0 } }; const struct ice_sysctl_info *entry = ctls; txqs_list = SYSCTL_CHILDREN(vsi->txqs_node); snprintf(txq_name, sizeof(txq_name), "%u", txq->me); snprintf(txq_desc, sizeof(txq_desc), "Tx Queue %u", txq->me); txq_node = SYSCTL_ADD_NODE(ctx, txqs_list, OID_AUTO, txq_name, CTLFLAG_RD, NULL, txq_desc); this_txq_list = SYSCTL_CHILDREN(txq_node); /* Add the Tx queue statistics */ while (entry->stat != 0) { SYSCTL_ADD_U64(ctx, this_txq_list, OID_AUTO, entry->name, CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, entry->description); entry++; } SYSCTL_ADD_U8(ctx, this_txq_list, OID_AUTO, "tc", CTLFLAG_RD, &txq->tc, 0, "Traffic Class that Queue belongs to"); } /** * ice_add_rxq_sysctls - Add per-queue sysctls for an Rx queue * @rxq: pointer to the Rx queue * * Add per-queue sysctls for a given Rx queue. Can't be called during * ice_add_vsi_sysctls, since the queue memory has not yet been setup. */ void ice_add_rxq_sysctls(struct ice_rx_queue *rxq) { struct ice_vsi *vsi = rxq->vsi; struct sysctl_ctx_list *ctx = &vsi->rxqs_ctx; struct sysctl_oid_list *rxqs_list, *this_rxq_list; struct sysctl_oid *rxq_node; char rxq_name[32], rxq_desc[32]; const struct ice_sysctl_info ctls[] = { { &rxq->stats.rx_packets, "rx_packets", "Queue Packets Received" }, { &rxq->stats.rx_bytes, "rx_bytes", "Queue Bytes Received" }, { &rxq->stats.desc_errs, "rx_desc_errs", "Queue Rx Descriptor Errors" }, { 0, 0, 0 } }; const struct ice_sysctl_info *entry = ctls; rxqs_list = SYSCTL_CHILDREN(vsi->rxqs_node); snprintf(rxq_name, sizeof(rxq_name), "%u", rxq->me); snprintf(rxq_desc, sizeof(rxq_desc), "Rx Queue %u", rxq->me); rxq_node = SYSCTL_ADD_NODE(ctx, rxqs_list, OID_AUTO, rxq_name, CTLFLAG_RD, NULL, rxq_desc); this_rxq_list = SYSCTL_CHILDREN(rxq_node); /* Add the Rx queue statistics */ while (entry->stat != 0) { SYSCTL_ADD_U64(ctx, this_rxq_list, OID_AUTO, entry->name, CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, entry->description); entry++; } SYSCTL_ADD_U8(ctx, this_rxq_list, OID_AUTO, "tc", CTLFLAG_RD, &rxq->tc, 0, "Traffic Class that Queue belongs to"); } /** * ice_get_default_rss_key - Obtain a default RSS key * @seed: storage for the RSS key data * * Copies a pre-generated RSS key into the seed memory. The seed pointer must * point to a block of memory that is at least 40 bytes in size. * * The key isn't randomly generated each time this function is called because * that makes the RSS key change every time we reconfigure RSS. This does mean * that we're hard coding a possibly 'well known' key. We might want to * investigate randomly generating this key once during the first call. */ static void ice_get_default_rss_key(u8 *seed) { const u8 default_seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE] = { 0x39, 0xed, 0xff, 0x4d, 0x43, 0x58, 0x42, 0xc3, 0x5f, 0xb8, 0xa5, 0x32, 0x95, 0x65, 0x81, 0xcd, 0x36, 0x79, 0x71, 0x97, 0xde, 0xa4, 0x41, 0x40, 0x6f, 0x27, 0xe9, 0x81, 0x13, 0xa0, 0x95, 0x93, 0x5b, 0x1e, 0x9d, 0x27, 0x9d, 0x24, 0x84, 0xb5, }; bcopy(default_seed, seed, ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); } /** * ice_set_rss_key - Configure a given VSI with the default RSS key * @vsi: the VSI to configure * * Program the hardware RSS key. We use rss_getkey to grab the kernel RSS key. * If the kernel RSS interface is not available, this will fall back to our * pre-generated hash seed from ice_get_default_rss_key(). */ static int ice_set_rss_key(struct ice_vsi *vsi) { struct ice_aqc_get_set_rss_keys keydata = { .standard_rss_key = {0} }; struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; int status; /* * If the RSS kernel interface is disabled, this will return the * default RSS key above. */ rss_getkey(keydata.standard_rss_key); status = ice_aq_set_rss_key(hw, vsi->idx, &keydata); if (status) { device_printf(sc->dev, "ice_aq_set_rss_key status %s, error %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } return (0); } /** * ice_set_rss_flow_flds - Program the RSS hash flows after package init * @vsi: the VSI to configure * * If the package file is initialized, the default RSS flows are reset. We * need to reprogram the expected hash configuration. We'll use * rss_gethashconfig() to determine which flows to enable. If RSS kernel * support is not enabled, this macro will fall back to suitable defaults. */ static void ice_set_rss_flow_flds(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; struct ice_rss_hash_cfg rss_cfg = { 0, 0, ICE_RSS_ANY_HEADERS, false }; device_t dev = sc->dev; int status; u_int rss_hash_config; rss_hash_config = rss_gethashconfig(); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4; rss_cfg.hash_flds = ICE_FLOW_HASH_IPV4; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for ipv4 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP; rss_cfg.hash_flds = ICE_HASH_TCP_IPV4; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for tcp4 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP; rss_cfg.hash_flds = ICE_HASH_UDP_IPV4; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for udp4 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } if (rss_hash_config & (RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_IPV6_EX)) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6; rss_cfg.hash_flds = ICE_FLOW_HASH_IPV6; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for ipv6 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP; rss_cfg.hash_flds = ICE_HASH_TCP_IPV6; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for tcp6 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP; rss_cfg.hash_flds = ICE_HASH_UDP_IPV6; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for udp6 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } /* Warn about RSS hash types which are not supported */ /* coverity[dead_error_condition] */ if (rss_hash_config & ~ICE_DEFAULT_RSS_HASH_CONFIG) { device_printf(dev, "ice_add_rss_cfg on VSI %d could not configure every requested hash type\n", vsi->idx); } } /** * ice_set_rss_lut - Program the RSS lookup table for a VSI * @vsi: the VSI to configure * * Programs the RSS lookup table for a given VSI. We use * rss_get_indirection_to_bucket which will use the indirection table provided * by the kernel RSS interface when available. If the kernel RSS interface is * not available, we will fall back to a simple round-robin fashion queue * assignment. */ static int ice_set_rss_lut(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; struct ice_aq_get_set_rss_lut_params lut_params; int status; int i, err = 0; u8 *lut; lut = (u8 *)malloc(vsi->rss_table_size, M_ICE, M_NOWAIT|M_ZERO); if (!lut) { device_printf(dev, "Failed to allocate RSS lut memory\n"); return (ENOMEM); } /* Populate the LUT with max no. of queues. If the RSS kernel * interface is disabled, this will assign the lookup table in * a simple round robin fashion */ for (i = 0; i < vsi->rss_table_size; i++) { /* XXX: this needs to be changed if num_rx_queues ever counts * more than just the RSS queues */ lut[i] = rss_get_indirection_to_bucket(i) % vsi->num_rx_queues; } lut_params.vsi_handle = vsi->idx; lut_params.lut_size = vsi->rss_table_size; lut_params.lut_type = vsi->rss_lut_type; lut_params.lut = lut; lut_params.global_lut_id = 0; status = ice_aq_set_rss_lut(hw, &lut_params); if (status) { device_printf(dev, "Cannot set RSS lut, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); } free(lut, M_ICE); return err; } /** * ice_config_rss - Configure RSS for a VSI * @vsi: the VSI to configure * * If FEATURE_RSS is enabled, configures the RSS lookup table and hash key for * a given VSI. */ int ice_config_rss(struct ice_vsi *vsi) { int err; /* Nothing to do, if RSS is not enabled */ if (!ice_is_bit_set(vsi->sc->feat_en, ICE_FEATURE_RSS)) return 0; err = ice_set_rss_key(vsi); if (err) return err; ice_set_rss_flow_flds(vsi); return ice_set_rss_lut(vsi); } /** * ice_log_pkg_init - Log a message about status of DDP initialization * @sc: the device softc pointer * @pkg_status: the status result of ice_copy_and_init_pkg * * Called by ice_load_pkg after an attempt to download the DDP package * contents to the device to log an appropriate message for the system * administrator about download status. * * @post ice_is_init_pkg_successful function is used to determine * whether the download was successful and DDP package is compatible * with this driver. Otherwise driver will transition to Safe Mode. */ void ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; struct sbuf *active_pkg, *os_pkg; active_pkg = sbuf_new_auto(); ice_active_pkg_version_str(hw, active_pkg); sbuf_finish(active_pkg); os_pkg = sbuf_new_auto(); ice_os_pkg_version_str(hw, os_pkg); sbuf_finish(os_pkg); switch (pkg_status) { case ICE_DDP_PKG_SUCCESS: device_printf(dev, "The DDP package was successfully loaded: %s.\n", sbuf_data(active_pkg)); break; case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: case ICE_DDP_PKG_ALREADY_LOADED: device_printf(dev, "DDP package already present on device: %s.\n", sbuf_data(active_pkg)); break; case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: device_printf(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n", sbuf_data(active_pkg), sbuf_data(os_pkg)); break; case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: device_printf(dev, "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); break; case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: device_printf(dev, "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); break; case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: /* * This assumes that the active_pkg_ver will not be * initialized if the ice_ddp package version is not * supported. */ if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { /* The ice_ddp version is not supported */ if (pkg_ver_compatible(&hw->pkg_ver) > 0) { device_printf(dev, "The DDP package in the ice_ddp module is higher than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated driver. Entering Safe Mode.\n", sbuf_data(os_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } else if (pkg_ver_compatible(&hw->pkg_ver) < 0) { device_printf(dev, "The DDP package in the ice_ddp module is lower than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated ice_ddp module. Entering Safe Mode.\n", sbuf_data(os_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } else { device_printf(dev, "An unknown error occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(os_pkg), sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } } else { if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { device_printf(dev, "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } else if (pkg_ver_compatible(&hw->active_pkg_ver) < 0) { device_printf(dev, "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } else { device_printf(dev, "An unknown error occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(os_pkg), sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } } break; case ICE_DDP_PKG_INVALID_FILE: device_printf(dev, "The DDP package in the ice_ddp module is invalid. Entering Safe Mode\n"); break; case ICE_DDP_PKG_FW_MISMATCH: device_printf(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); break; case ICE_DDP_PKG_NO_SEC_MANIFEST: case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: device_printf(dev, "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n"); break; case ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW: device_printf(dev, "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n"); break; case ICE_DDP_PKG_MANIFEST_INVALID: case ICE_DDP_PKG_BUFFER_INVALID: device_printf(dev, "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n"); break; default: device_printf(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); break; } sbuf_delete(active_pkg); sbuf_delete(os_pkg); } /** * ice_load_pkg_file - Load the DDP package file using firmware_get * @sc: device private softc * * Use firmware_get to load the DDP package memory and then request that * firmware download the package contents and program the relevant hardware * bits. * * This function makes a copy of the DDP package memory which is tracked in * the ice_hw structure. The copy will be managed and released by * ice_deinit_hw(). This allows the firmware reference to be immediately * released using firmware_put. */ int ice_load_pkg_file(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_ddp_state state; const struct firmware *pkg; int status = 0; u8 cached_layer_count; u8 *buf_copy; pkg = firmware_get("ice_ddp"); if (!pkg) { device_printf(dev, "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n"); if (cold) device_printf(dev, "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n"); status = ICE_ERR_CFG; goto err_load_pkg; } /* Check for topology change */ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_TX_BALANCE)) { cached_layer_count = hw->num_tx_sched_layers; buf_copy = (u8 *)malloc(pkg->datasize, M_ICE, M_NOWAIT); if (buf_copy == NULL) return ICE_ERR_NO_MEMORY; memcpy(buf_copy, pkg->data, pkg->datasize); status = ice_cfg_tx_topo(&sc->hw, buf_copy, pkg->datasize); free(buf_copy, M_ICE); /* Success indicates a change was made */ if (!status) { /* 9 -> 5 */ if (cached_layer_count == 9) device_printf(dev, "Transmit balancing feature enabled\n"); else device_printf(dev, "Transmit balancing feature disabled\n"); ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_en); return (status); } else if (status == ICE_ERR_CFG) { /* Status is ICE_ERR_CFG when DDP does not support transmit balancing */ device_printf(dev, "DDP package does not support transmit balancing feature - please update to the latest DDP package and try again\n"); } else if (status == ICE_ERR_ALREADY_EXISTS) { /* Requested config already loaded */ } else if (status == ICE_ERR_AQ_ERROR) { device_printf(dev, "Error configuring transmit balancing: %s\n", ice_status_str(status)); } } /* Copy and download the pkg contents */ state = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize); /* Release the firmware reference */ firmware_put(pkg, FIRMWARE_UNLOAD); /* Check the active DDP package version and log a message */ ice_log_pkg_init(sc, state); /* Place the driver into safe mode */ if (ice_is_init_pkg_successful(state)) return (ICE_ERR_ALREADY_EXISTS); err_load_pkg: ice_zero_bitmap(sc->feat_cap, ICE_FEATURE_COUNT); ice_zero_bitmap(sc->feat_en, ICE_FEATURE_COUNT); ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); return (status); } /** * ice_get_ifnet_counter - Retrieve counter value for a given ifnet counter * @vsi: the vsi to retrieve the value for * @counter: the counter type to retrieve * * Returns the value for a given ifnet counter. To do so, we calculate the * value based on the matching hardware statistics. */ uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter) { struct ice_hw_port_stats *hs = &vsi->sc->stats.cur; struct ice_eth_stats *es = &vsi->hw_stats.cur; /* For some statistics, especially those related to error flows, we do * not have per-VSI counters. In this case, we just report the global * counters. */ switch (counter) { case IFCOUNTER_IPACKETS: return (es->rx_unicast + es->rx_multicast + es->rx_broadcast); case IFCOUNTER_IERRORS: return (hs->crc_errors + hs->illegal_bytes + hs->mac_local_faults + hs->mac_remote_faults + hs->rx_undersize + hs->rx_oversize + hs->rx_fragments + hs->rx_jabber); case IFCOUNTER_OPACKETS: return (es->tx_unicast + es->tx_multicast + es->tx_broadcast); case IFCOUNTER_OERRORS: return (es->tx_errors); case IFCOUNTER_COLLISIONS: return (0); case IFCOUNTER_IBYTES: return (es->rx_bytes); case IFCOUNTER_OBYTES: return (es->tx_bytes); case IFCOUNTER_IMCASTS: return (es->rx_multicast); case IFCOUNTER_OMCASTS: return (es->tx_multicast); case IFCOUNTER_IQDROPS: return (es->rx_discards); case IFCOUNTER_OQDROPS: return (hs->tx_dropped_link_down); case IFCOUNTER_NOPROTO: return (es->rx_unknown_protocol); default: return if_get_counter_default(vsi->sc->ifp, counter); } } /** * ice_save_pci_info - Save PCI configuration fields in HW struct * @hw: the ice_hw struct to save the PCI information in * @dev: the device to get the PCI information from * * This should only be called once, early in the device attach * process. */ void ice_save_pci_info(struct ice_hw *hw, device_t dev) { hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->subsystem_vendor_id = pci_get_subvendor(dev); hw->subsystem_device_id = pci_get_subdevice(dev); hw->revision_id = pci_get_revid(dev); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); } /** * ice_replay_all_vsi_cfg - Replace configuration for all VSIs after reset * @sc: the device softc * * Replace the configuration for each VSI, and then cleanup replay * information. Called after a hardware reset in order to reconfigure the * active VSIs. */ int ice_replay_all_vsi_cfg(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; int status; int i; for (i = 0 ; i < sc->num_available_vsi; i++) { struct ice_vsi *vsi = sc->all_vsi[i]; if (!vsi) continue; status = ice_replay_vsi(hw, vsi->idx); if (status) { device_printf(sc->dev, "Failed to replay VSI %d, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } /* Cleanup replay filters after successful reconfiguration */ ice_replay_post(hw); return (0); } /** * ice_clean_vsi_rss_cfg - Cleanup RSS configuration for a given VSI * @vsi: pointer to the VSI structure * * Cleanup the advanced RSS configuration for a given VSI. This is necessary * during driver removal to ensure that all RSS resources are properly * released. * * @remark this function doesn't report an error as it is expected to be * called during driver reset and unload, and there isn't much the driver can * do if freeing RSS resources fails. */ static void ice_clean_vsi_rss_cfg(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; status = ice_rem_vsi_rss_cfg(hw, vsi->idx); if (status) device_printf(dev, "Failed to remove RSS configuration for VSI %d, err %s\n", vsi->idx, ice_status_str(status)); /* Remove this VSI from the RSS list */ ice_rem_vsi_rss_list(hw, vsi->idx); } /** * ice_clean_all_vsi_rss_cfg - Cleanup RSS configuration for all VSIs * @sc: the device softc pointer * * Cleanup the advanced RSS configuration for all VSIs on a given PF * interface. * * @remark This should be called while preparing for a reset, to cleanup stale * RSS configuration for all VSIs. */ void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc) { int i; /* No need to cleanup if RSS is not enabled */ if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) return; for (i = 0; i < sc->num_available_vsi; i++) { struct ice_vsi *vsi = sc->all_vsi[i]; if (vsi) ice_clean_vsi_rss_cfg(vsi); } } /** * ice_requested_fec_mode - Return the requested FEC mode as a string * @pi: The port info structure * * Return a string representing the requested FEC mode. */ static const char * ice_requested_fec_mode(struct ice_port_info *pi) { struct ice_aqc_get_phy_caps_data pcaps = { 0 }; int status; status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status) /* Just report unknown if we can't get capabilities */ return "Unknown"; /* Check if RS-FEC has been requested first */ if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | ICE_AQC_PHY_FEC_25G_RS_544_REQ)) return ice_fec_str(ICE_FEC_RS); /* If RS FEC has not been requested, then check BASE-R */ if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | ICE_AQC_PHY_FEC_25G_KR_REQ)) return ice_fec_str(ICE_FEC_BASER); return ice_fec_str(ICE_FEC_NONE); } /** * ice_negotiated_fec_mode - Return the negotiated FEC mode as a string * @pi: The port info structure * * Return a string representing the current FEC mode. */ static const char * ice_negotiated_fec_mode(struct ice_port_info *pi) { /* First, check if RS has been requested first */ if (pi->phy.link_info.fec_info & (ICE_AQ_LINK_25G_RS_528_FEC_EN | ICE_AQ_LINK_25G_RS_544_FEC_EN)) return ice_fec_str(ICE_FEC_RS); /* If RS FEC has not been requested, then check BASE-R */ if (pi->phy.link_info.fec_info & ICE_AQ_LINK_25G_KR_FEC_EN) return ice_fec_str(ICE_FEC_BASER); return ice_fec_str(ICE_FEC_NONE); } /** * ice_autoneg_mode - Return string indicating of autoneg completed * @pi: The port info structure * * Return "True" if autonegotiation is completed, "False" otherwise. */ static const char * ice_autoneg_mode(struct ice_port_info *pi) { if (pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) return "True"; else return "False"; } /** * ice_flowcontrol_mode - Return string indicating the Flow Control mode * @pi: The port info structure * * Returns the current Flow Control mode as a string. */ static const char * ice_flowcontrol_mode(struct ice_port_info *pi) { return ice_fc_str(pi->fc.current_mode); } /** * ice_link_up_msg - Log a link up message with associated info * @sc: the device private softc * * Log a link up message with LOG_NOTICE message level. Include information * about the duplex, FEC mode, autonegotiation and flow control. */ void ice_link_up_msg(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; struct ifnet *ifp = sc->ifp; const char *speed, *req_fec, *neg_fec, *autoneg, *flowcontrol; speed = ice_aq_speed_to_str(hw->port_info); req_fec = ice_requested_fec_mode(hw->port_info); neg_fec = ice_negotiated_fec_mode(hw->port_info); autoneg = ice_autoneg_mode(hw->port_info); flowcontrol = ice_flowcontrol_mode(hw->port_info); log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", if_name(ifp), speed, req_fec, neg_fec, autoneg, flowcontrol); } /** * ice_update_laa_mac - Update MAC address if Locally Administered * @sc: the device softc * * Update the device MAC address when a Locally Administered Address is * assigned. * * This function does *not* update the MAC filter list itself. Instead, it * should be called after ice_rm_pf_default_mac_filters, so that the previous * address filter will be removed, and before ice_cfg_pf_default_mac_filters, * so that the new address filter will be assigned. */ int ice_update_laa_mac(struct ice_softc *sc) { const u8 *lladdr = (const u8 *)if_getlladdr(sc->ifp); struct ice_hw *hw = &sc->hw; int status; /* If the address is the same, then there is nothing to update */ if (!memcmp(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN)) return (0); /* Reject Multicast addresses */ if (ETHER_IS_MULTICAST(lladdr)) return (EINVAL); status = ice_aq_manage_mac_write(hw, lladdr, ICE_AQC_MAN_MAC_UPDATE_LAA_WOL, NULL); if (status) { device_printf(sc->dev, "Failed to write mac %6D to firmware, err %s aq_err %s\n", lladdr, ":", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EFAULT); } /* Copy the address into place of the LAN address. */ bcopy(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN); return (0); } /** * ice_get_and_print_bus_info - Save (PCI) bus info and print messages * @sc: device softc * * This will potentially print out a warning message if bus bandwidth * is insufficient for full-speed operation. This will not print out anything * for E82x devices since those are in SoCs, do not report valid PCIe info, * and cannot be moved to a different slot. * * This should only be called once, during the attach process, after * hw->port_info has been filled out with port link topology information * (from the Get PHY Capabilities Admin Queue command). */ void ice_get_and_print_bus_info(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; u16 pci_link_status; int offset; if (!ice_is_e810(hw) && !ice_is_e830(hw)) return; pci_find_cap(dev, PCIY_EXPRESS, &offset); pci_link_status = pci_read_config(dev, offset + PCIER_LINK_STA, 2); /* Fill out hw struct with PCIE link status info */ ice_set_pci_link_status_data(hw, pci_link_status); /* Use info to print out bandwidth messages */ ice_print_bus_link_data(dev, hw); if (ice_pcie_bandwidth_check(sc)) { device_printf(dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); device_printf(dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); } } /** * ice_pcie_bus_speed_to_rate - Convert driver bus speed enum value to * a 64-bit baudrate. * @speed: enum value to convert * * This only goes up to PCIE Gen 5. */ static uint64_t ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed) { /* If the PCI-E speed is Gen1 or Gen2, then report * only 80% of bus speed to account for encoding overhead. */ switch (speed) { case ice_pcie_speed_2_5GT: return IF_Gbps(2); case ice_pcie_speed_5_0GT: return IF_Gbps(4); case ice_pcie_speed_8_0GT: return IF_Gbps(8); case ice_pcie_speed_16_0GT: return IF_Gbps(16); case ice_pcie_speed_32_0GT: return IF_Gbps(32); case ice_pcie_speed_unknown: default: return 0; } } /** * ice_pcie_lnk_width_to_int - Convert driver pci-e width enum value to * a 32-bit number. * @width: enum value to convert */ static int ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width) { switch (width) { case ice_pcie_lnk_x1: return (1); case ice_pcie_lnk_x2: return (2); case ice_pcie_lnk_x4: return (4); case ice_pcie_lnk_x8: return (8); case ice_pcie_lnk_x12: return (12); case ice_pcie_lnk_x16: return (16); case ice_pcie_lnk_x32: return (32); case ice_pcie_lnk_width_resrv: case ice_pcie_lnk_width_unknown: default: return (0); } } /** * ice_pcie_bandwidth_check - Check if PCI-E bandwidth is sufficient for * full-speed device operation. * @sc: adapter softc * * Returns 0 if sufficient; 1 if not. */ static uint8_t ice_pcie_bandwidth_check(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; int num_ports, pcie_width; u64 pcie_speed, port_speed; MPASS(hw->port_info); num_ports = bitcount32(hw->func_caps.common_cap.valid_functions); port_speed = ice_phy_types_to_max_rate(hw->port_info); pcie_speed = ice_pcie_bus_speed_to_rate(hw->bus.speed); pcie_width = ice_pcie_lnk_width_to_int(hw->bus.width); /* * If 2x100 on E810 or 2x200 on E830, clamp ports to 1 -- 2nd port is * intended for failover. */ if ((port_speed >= IF_Gbps(100)) && ((port_speed == IF_Gbps(100) && ice_is_e810(hw)) || (port_speed == IF_Gbps(200) && ice_is_e830(hw)))) num_ports = 1; return !!((num_ports * port_speed) > pcie_speed * pcie_width); } /** * ice_print_bus_link_data - Print PCI-E bandwidth information * @dev: device to print string for * @hw: hw struct with PCI-e link information */ static void ice_print_bus_link_data(device_t dev, struct ice_hw *hw) { device_printf(dev, "PCI Express Bus: Speed %s Width %s\n", ((hw->bus.speed == ice_pcie_speed_32_0GT) ? "32.0GT/s" : (hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" : (hw->bus.speed == ice_pcie_speed_8_0GT) ? "8.0GT/s" : (hw->bus.speed == ice_pcie_speed_5_0GT) ? "5.0GT/s" : (hw->bus.speed == ice_pcie_speed_2_5GT) ? "2.5GT/s" : "Unknown"), (hw->bus.width == ice_pcie_lnk_x32) ? "x32" : (hw->bus.width == ice_pcie_lnk_x16) ? "x16" : (hw->bus.width == ice_pcie_lnk_x12) ? "x12" : (hw->bus.width == ice_pcie_lnk_x8) ? "x8" : (hw->bus.width == ice_pcie_lnk_x4) ? "x4" : (hw->bus.width == ice_pcie_lnk_x2) ? "x2" : (hw->bus.width == ice_pcie_lnk_x1) ? "x1" : "Unknown"); } /** * ice_set_pci_link_status_data - store PCI bus info * @hw: pointer to hardware structure * @link_status: the link status word from PCI config space * * Stores the PCI bus info (speed, width, type) within the ice_hw structure **/ static void ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status) { u16 reg; hw->bus.type = ice_bus_pci_express; reg = (link_status & PCIEM_LINK_STA_WIDTH) >> 4; switch (reg) { case ice_pcie_lnk_x1: case ice_pcie_lnk_x2: case ice_pcie_lnk_x4: case ice_pcie_lnk_x8: case ice_pcie_lnk_x12: case ice_pcie_lnk_x16: case ice_pcie_lnk_x32: hw->bus.width = (enum ice_pcie_link_width)reg; break; default: hw->bus.width = ice_pcie_lnk_width_unknown; break; } reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x13; switch (reg) { case ice_pcie_speed_2_5GT: case ice_pcie_speed_5_0GT: case ice_pcie_speed_8_0GT: case ice_pcie_speed_16_0GT: case ice_pcie_speed_32_0GT: hw->bus.speed = (enum ice_pcie_bus_speed)reg; break; default: hw->bus.speed = ice_pcie_speed_unknown; break; } } /** * ice_init_link_events - Initialize Link Status Events mask * @sc: the device softc * * Initialize the Link Status Events mask to disable notification of link * events we don't care about in software. Also request that link status * events be enabled. */ int ice_init_link_events(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; int status; u16 wanted_events; /* Set the bits for the events that we want to be notified by */ wanted_events = (ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL); /* request that every event except the wanted events be masked */ status = ice_aq_set_event_mask(hw, hw->port_info->lport, ~wanted_events, NULL); if (status) { device_printf(sc->dev, "Failed to set link status event mask, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } /* Request link info with the LSE bit set to enable link status events */ status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL); if (status) { device_printf(sc->dev, "Failed to enable link status events, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } return (0); } #ifndef GL_MDET_TX_TCLAN /* Temporarily use this redefinition until the definition is fixed */ #define GL_MDET_TX_TCLAN E800_GL_MDET_TX_TCLAN #define PF_MDET_TX_TCLAN E800_PF_MDET_TX_TCLAN #endif /* !defined(GL_MDET_TX_TCLAN) */ /** * ice_handle_mdd_event - Handle possibly malicious events * @sc: the device softc * * Called by the admin task if an MDD detection interrupt is triggered. * Identifies possibly malicious events coming from VFs. Also triggers for * similar incorrect behavior from the PF as well. */ void ice_handle_mdd_event(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; bool mdd_detected = false, request_reinit = false; device_t dev = sc->dev; u32 reg; if (!ice_testandclear_state(&sc->state, ICE_STATE_MDD_PENDING)) return; reg = rd32(hw, GL_MDET_TX_TCLAN); if (reg & GL_MDET_TX_TCLAN_VALID_M) { u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> GL_MDET_TX_TCLAN_PF_NUM_S; u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> GL_MDET_TX_TCLAN_VF_NUM_S; u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> GL_MDET_TX_TCLAN_MAL_TYPE_S; u16 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> GL_MDET_TX_TCLAN_QNUM_S; device_printf(dev, "Malicious Driver Detection Tx Descriptor check event '%s' on Tx queue %u PF# %u VF# %u\n", ice_mdd_tx_tclan_str(event), queue, pf_num, vf_num); /* Only clear this event if it matches this PF, that way other * PFs can read the event and determine VF and queue number. */ if (pf_num == hw->pf_id) wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); mdd_detected = true; } /* Determine what triggered the MDD event */ reg = rd32(hw, GL_MDET_TX_PQM); if (reg & GL_MDET_TX_PQM_VALID_M) { u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> GL_MDET_TX_PQM_PF_NUM_S; u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> GL_MDET_TX_PQM_VF_NUM_S; u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> GL_MDET_TX_PQM_MAL_TYPE_S; u16 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> GL_MDET_TX_PQM_QNUM_S; device_printf(dev, "Malicious Driver Detection Tx Quanta check event '%s' on Tx queue %u PF# %u VF# %u\n", ice_mdd_tx_pqm_str(event), queue, pf_num, vf_num); /* Only clear this event if it matches this PF, that way other * PFs can read the event and determine VF and queue number. */ if (pf_num == hw->pf_id) wr32(hw, GL_MDET_TX_PQM, 0xffffffff); mdd_detected = true; } reg = rd32(hw, GL_MDET_RX); if (reg & GL_MDET_RX_VALID_M) { u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> GL_MDET_RX_PF_NUM_S; u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> GL_MDET_RX_VF_NUM_S; u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> GL_MDET_RX_MAL_TYPE_S; u16 queue = (reg & GL_MDET_RX_QNUM_M) >> GL_MDET_RX_QNUM_S; device_printf(dev, "Malicious Driver Detection Rx event '%s' on Rx queue %u PF# %u VF# %u\n", ice_mdd_rx_str(event), queue, pf_num, vf_num); /* Only clear this event if it matches this PF, that way other * PFs can read the event and determine VF and queue number. */ if (pf_num == hw->pf_id) wr32(hw, GL_MDET_RX, 0xffffffff); mdd_detected = true; } /* Now, confirm that this event actually affects this PF, by checking * the PF registers. */ if (mdd_detected) { reg = rd32(hw, PF_MDET_TX_TCLAN); if (reg & PF_MDET_TX_TCLAN_VALID_M) { wr32(hw, PF_MDET_TX_TCLAN, 0xffff); sc->soft_stats.tx_mdd_count++; request_reinit = true; } reg = rd32(hw, PF_MDET_TX_PQM); if (reg & PF_MDET_TX_PQM_VALID_M) { wr32(hw, PF_MDET_TX_PQM, 0xffff); sc->soft_stats.tx_mdd_count++; request_reinit = true; } reg = rd32(hw, PF_MDET_RX); if (reg & PF_MDET_RX_VALID_M) { wr32(hw, PF_MDET_RX, 0xffff); sc->soft_stats.rx_mdd_count++; request_reinit = true; } } /* TODO: Implement logic to detect and handle events caused by VFs. */ /* request that the upper stack re-initialize the Tx/Rx queues */ if (request_reinit) ice_request_stack_reinit(sc); ice_flush(hw); } /** * ice_start_dcbx_agent - Start DCBX agent in FW via AQ command * @sc: the device softc * * @pre device is DCB capable and the FW LLDP agent has started * * Checks DCBX status and starts the DCBX agent if it is not in * a valid state via an AQ command. */ static void ice_start_dcbx_agent(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; bool dcbx_agent_status; int status; hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw); if (hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_DONE && hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) { /* * Start DCBX agent, but not LLDP. The return value isn't * checked here because a more detailed dcbx agent status is * retrieved and checked in ice_init_dcb() and elsewhere. */ status = ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL); if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EPERM) device_printf(dev, "start_stop_dcbx failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } } /** * ice_init_dcb_setup - Initialize DCB settings for HW * @sc: the device softc * * This needs to be called after the fw_lldp_agent sysctl is added, since that * can update the device's LLDP agent status if a tunable value is set. * * Get and store the initial state of DCB settings on driver load. Print out * informational messages as well. */ void ice_init_dcb_setup(struct ice_softc *sc) { struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; u8 pfcmode_ret; /* Don't do anything if DCB isn't supported */ if (!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DCB)) { device_printf(dev, "%s: No DCB support\n", __func__); return; } /* Starts DCBX agent if it needs starting */ ice_start_dcbx_agent(sc); /* This sets hw->port_info->qos_cfg.is_sw_lldp */ status = ice_init_dcb(hw, true); /* If there is an error, then FW LLDP is not in a usable state */ if (status != 0 && status != ICE_ERR_NOT_READY) { /* Don't print an error message if the return code from the AQ * cmd performed in ice_init_dcb() is EPERM; that means the * FW LLDP engine is disabled, and that is a valid state. */ if (!(status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EPERM)) { device_printf(dev, "DCB init failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } hw->port_info->qos_cfg.dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; } switch (hw->port_info->qos_cfg.dcbx_status) { case ICE_DCBX_STATUS_DIS: ice_debug(hw, ICE_DBG_DCB, "DCBX disabled\n"); break; case ICE_DCBX_STATUS_NOT_STARTED: ice_debug(hw, ICE_DBG_DCB, "DCBX not started\n"); break; case ICE_DCBX_STATUS_MULTIPLE_PEERS: ice_debug(hw, ICE_DBG_DCB, "DCBX detected multiple peers\n"); break; default: break; } /* LLDP disabled in FW */ if (hw->port_info->qos_cfg.is_sw_lldp) { ice_add_rx_lldp_filter(sc); device_printf(dev, "Firmware LLDP agent disabled\n"); } /* Query and cache PFC mode */ status = ice_aq_query_pfc_mode(hw, &pfcmode_ret, NULL); if (status) { device_printf(dev, "PFC mode query failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } local_dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg; switch (pfcmode_ret) { case ICE_AQC_PFC_VLAN_BASED_PFC: local_dcbx_cfg->pfc_mode = ICE_QOS_MODE_VLAN; break; case ICE_AQC_PFC_DSCP_BASED_PFC: local_dcbx_cfg->pfc_mode = ICE_QOS_MODE_DSCP; break; default: /* DCB is disabled, but we shouldn't get here */ break; } /* Set default SW MIB for init */ ice_set_default_local_mib_settings(sc); ice_set_bit(ICE_FEATURE_DCB, sc->feat_en); } /** * ice_dcb_get_tc_map - Scans config to get bitmap of enabled TCs * @dcbcfg: DCB configuration to examine * * Scans a TC mapping table inside dcbcfg to find traffic classes * enabled and @returns a bitmask of enabled TCs */ u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg) { u8 tc_map = 0; int i = 0; switch (dcbcfg->pfc_mode) { case ICE_QOS_MODE_VLAN: /* XXX: "i" is actually "User Priority" here, not * Traffic Class, but the max for both is 8, so it works * out here. */ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) tc_map |= BIT(dcbcfg->etscfg.prio_table[i]); break; case ICE_QOS_MODE_DSCP: for (i = 0; i < ICE_DSCP_NUM_VAL; i++) tc_map |= BIT(dcbcfg->dscp_map[i]); break; default: /* Invalid Mode */ tc_map = ICE_DFLT_TRAFFIC_CLASS; break; } return (tc_map); } /** * ice_dcb_get_num_tc - Get the number of TCs from DCBX config * @dcbcfg: config to retrieve number of TCs from * * @return number of contiguous TCs found in dcbcfg's ETS Configuration * Priority Assignment Table, a value from 1 to 8. If there are * non-contiguous TCs used (e.g. assigning 1 and 3 without using 2), * then returns 0. */ static u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg) { u8 tc_map; tc_map = ice_dcb_get_tc_map(dcbcfg); return (ice_dcb_tc_contig(tc_map)); } /** * ice_debug_print_mib_change_event - helper function to log LLDP MIB change events * @sc: the device private softc * @event: event received on a control queue * * Prints out the type and contents of an LLDP MIB change event in a DCB debug message. */ static void ice_debug_print_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event) { struct ice_aqc_lldp_get_mib *params = (struct ice_aqc_lldp_get_mib *)&event->desc.params.lldp_get_mib; u8 mib_type, bridge_type, tx_status; static const char* mib_type_strings[] = { "Local MIB", "Remote MIB", "Reserved", "Reserved" }; static const char* bridge_type_strings[] = { "Nearest Bridge", "Non-TPMR Bridge", "Reserved", "Reserved" }; static const char* tx_status_strings[] = { "Port's TX active", "Port's TX suspended and drained", "Reserved", "Port's TX suspended and drained; blocked TC pipe flushed" }; mib_type = (params->type & ICE_AQ_LLDP_MIB_TYPE_M) >> ICE_AQ_LLDP_MIB_TYPE_S; bridge_type = (params->type & ICE_AQ_LLDP_BRID_TYPE_M) >> ICE_AQ_LLDP_BRID_TYPE_S; tx_status = (params->type & ICE_AQ_LLDP_TX_M) >> ICE_AQ_LLDP_TX_S; ice_debug(&sc->hw, ICE_DBG_DCB, "LLDP MIB Change Event (%s, %s, %s)\n", mib_type_strings[mib_type], bridge_type_strings[bridge_type], tx_status_strings[tx_status]); /* Nothing else to report */ if (!event->msg_buf) return; ice_debug(&sc->hw, ICE_DBG_DCB, "- %s contents:\n", mib_type_strings[mib_type]); ice_debug_array(&sc->hw, ICE_DBG_DCB, 16, 1, event->msg_buf, event->msg_len); } /** * ice_dcb_needs_reconfig - Returns true if driver needs to reconfigure * @sc: the device private softc * @old_cfg: Old DCBX configuration to compare against * @new_cfg: New DCBX configuration to check * * @return true if something changed in new_cfg that requires the driver * to do some reconfiguration. */ static bool ice_dcb_needs_reconfig(struct ice_softc *sc, struct ice_dcbx_cfg *old_cfg, struct ice_dcbx_cfg *new_cfg) { struct ice_hw *hw = &sc->hw; bool needs_reconfig = false; /* No change detected in DCBX config */ if (!memcmp(old_cfg, new_cfg, sizeof(*old_cfg))) { ice_debug(hw, ICE_DBG_DCB, "No change detected in local DCBX configuration\n"); return (false); } /* Check if ETS config has changed */ if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, sizeof(new_cfg->etscfg))) { /* If Priority Table has changed, then driver reconfig is needed */ if (memcmp(&new_cfg->etscfg.prio_table, &old_cfg->etscfg.prio_table, sizeof(new_cfg->etscfg.prio_table))) { ice_debug(hw, ICE_DBG_DCB, "ETS UP2TC changed\n"); needs_reconfig = true; } /* These are just informational */ if (memcmp(&new_cfg->etscfg.tcbwtable, &old_cfg->etscfg.tcbwtable, sizeof(new_cfg->etscfg.tcbwtable))) { ice_debug(hw, ICE_DBG_DCB, "ETS TCBW table changed\n"); needs_reconfig = true; } if (memcmp(&new_cfg->etscfg.tsatable, &old_cfg->etscfg.tsatable, sizeof(new_cfg->etscfg.tsatable))) { ice_debug(hw, ICE_DBG_DCB, "ETS TSA table changed\n"); needs_reconfig = true; } } /* Check if PFC config has changed */ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { ice_debug(hw, ICE_DBG_DCB, "PFC config changed\n"); needs_reconfig = true; } /* Check if APP table has changed */ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) ice_debug(hw, ICE_DBG_DCB, "APP Table changed\n"); ice_debug(hw, ICE_DBG_DCB, "%s result: %d\n", __func__, needs_reconfig); return (needs_reconfig); } /** * ice_stop_pf_vsi - Stop queues for PF LAN VSI * @sc: the device private softc * * Flushes interrupts and stops the queues associated with the PF LAN VSI. */ static void ice_stop_pf_vsi(struct ice_softc *sc) { /* Dissociate the Tx and Rx queues from the interrupts */ ice_flush_txq_interrupts(&sc->pf_vsi); ice_flush_rxq_interrupts(&sc->pf_vsi); if (!ice_testandclear_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) return; /* Disable the Tx and Rx queues */ ice_vsi_disable_tx(&sc->pf_vsi); ice_control_all_rx_queues(&sc->pf_vsi, false); } /** * ice_vsi_setup_q_map - Setup a VSI queue map * @vsi: the VSI being configured * @ctxt: VSI context structure */ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { u16 qcounts[ICE_MAX_TRAFFIC_CLASS] = {}; u16 offset = 0, qmap = 0, pow = 0; u16 num_q_per_tc, qcount_rx, rem_queues; int i, j, k; if (vsi->num_tcs == 0) { /* at least TC0 should be enabled by default */ vsi->num_tcs = 1; vsi->tc_map = 0x1; } qcount_rx = vsi->num_rx_queues; num_q_per_tc = min(qcount_rx / vsi->num_tcs, ICE_MAX_RXQS_PER_TC); if (!num_q_per_tc) num_q_per_tc = 1; /* Set initial values for # of queues to use for each active TC */ ice_for_each_traffic_class(i) if (i < vsi->num_tcs) qcounts[i] = num_q_per_tc; /* If any queues are unassigned, add them to TC 0 */ rem_queues = qcount_rx % vsi->num_tcs; if (rem_queues > 0) qcounts[0] += rem_queues; /* TC mapping is a function of the number of Rx queues assigned to the * VSI for each traffic class and the offset of these queues. * The first 10 bits are for queue offset for TC0, next 4 bits for no:of * queues allocated to TC0. No:of queues is a power-of-2. * * If TC is not enabled, the queue offset is set to 0, and allocate one * queue, this way, traffic for the given TC will be sent to the default * queue. * * Setup number and offset of Rx queues for all TCs for the VSI */ ice_for_each_traffic_class(i) { if (!(vsi->tc_map & BIT(i))) { /* TC is not enabled */ vsi->tc_info[i].qoffset = 0; vsi->tc_info[i].qcount_rx = 1; vsi->tc_info[i].qcount_tx = 1; ctxt->info.tc_mapping[i] = 0; continue; } /* TC is enabled */ vsi->tc_info[i].qoffset = offset; vsi->tc_info[i].qcount_rx = qcounts[i]; vsi->tc_info[i].qcount_tx = qcounts[i]; /* find the (rounded up) log-2 of queue count for current TC */ pow = fls(qcounts[i] - 1); qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & ICE_AQ_VSI_TC_Q_OFFSET_M) | ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M); ctxt->info.tc_mapping[i] = CPU_TO_LE16(qmap); /* Store traffic class and handle data in queue structures */ for (j = offset, k = 0; j < offset + qcounts[i]; j++, k++) { vsi->tx_queues[j].q_handle = k; vsi->tx_queues[j].tc = i; vsi->rx_queues[j].tc = i; } offset += qcounts[i]; } /* Rx queue mapping */ ctxt->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_CONTIG); ctxt->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]); ctxt->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues); } /** * ice_pf_vsi_cfg_tc - Configure PF VSI for a given TC map * @sc: the device private softc * @tc_map: traffic class bitmap * * @pre VSI queues are stopped * * @return 0 if configuration is successful * @return EIO if Update VSI AQ cmd fails * @return ENODEV if updating Tx Scheduler fails */ static int ice_pf_vsi_cfg_tc(struct ice_softc *sc, u8 tc_map) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; struct ice_vsi_ctx ctx = { 0 }; device_t dev = sc->dev; int status; u8 num_tcs = 0; int i = 0; /* Count the number of enabled Traffic Classes */ ice_for_each_traffic_class(i) if (tc_map & BIT(i)) num_tcs++; vsi->tc_map = tc_map; vsi->num_tcs = num_tcs; /* Set default parameters for context */ ctx.vf_num = 0; ctx.info = vsi->info; /* Setup queue map */ ice_vsi_setup_q_map(vsi, &ctx); /* Update VSI configuration in firmware (RX queues) */ ctx.info.valid_sections = CPU_TO_LE16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); status = ice_update_vsi(hw, vsi->idx, &ctx, NULL); if (status) { device_printf(dev, "%s: Update VSI AQ call failed, err %s aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } vsi->info = ctx.info; /* Use values derived in ice_vsi_setup_q_map() */ for (i = 0; i < num_tcs; i++) max_txqs[i] = vsi->tc_info[i].qcount_tx; if (hw->debug_mask & ICE_DBG_DCB) { device_printf(dev, "%s: max_txqs:", __func__); ice_for_each_traffic_class(i) printf(" %d", max_txqs[i]); printf("\n"); } /* Update LAN Tx queue info in firmware */ status = ice_cfg_vsi_lan(hw->port_info, vsi->idx, vsi->tc_map, max_txqs); if (status) { device_printf(dev, "%s: Failed VSI lan queue config, err %s aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (ENODEV); } vsi->info.valid_sections = 0; return (0); } /** * ice_dcb_tc_contig - Count TCs if they're contiguous * @tc_map: pointer to priority table * * @return The number of traffic classes in * an 8-bit TC bitmap, or if there is a gap, then returns 0. */ static u8 ice_dcb_tc_contig(u8 tc_map) { bool tc_unused = false; u8 ret = 0; /* Scan bitmask for contiguous TCs starting with TC0 */ for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { if (tc_map & BIT(i)) { if (!tc_unused) { ret++; } else { /* Non-contiguous TCs detected */ return (0); } } else tc_unused = true; } return (ret); } /** * ice_dcb_recfg - Reconfigure VSI with new DCB settings * @sc: the device private softc * * @pre All VSIs have been disabled/stopped * * Reconfigures VSI settings based on local_dcbx_cfg. */ static void ice_dcb_recfg(struct ice_softc *sc) { struct ice_dcbx_cfg *dcbcfg = &sc->hw.port_info->qos_cfg.local_dcbx_cfg; device_t dev = sc->dev; u8 tc_map = 0; int ret; tc_map = ice_dcb_get_tc_map(dcbcfg); /* If non-contiguous TCs are used, then configure * the default TC instead. There's no support for * non-contiguous TCs being used. */ if (ice_dcb_tc_contig(tc_map) == 0) { tc_map = ICE_DFLT_TRAFFIC_CLASS; ice_set_default_local_lldp_mib(sc); } /* Reconfigure VSI queues to add/remove traffic classes */ ret = ice_pf_vsi_cfg_tc(sc, tc_map); if (ret) device_printf(dev, "Failed to configure TCs for PF VSI, err %s\n", ice_err_str(ret)); } /** * ice_set_default_local_mib_settings - Set Local LLDP MIB to default settings * @sc: device softc structure * * Overwrites the driver's SW local LLDP MIB with default settings. This * ensures the driver has a valid MIB when it next uses the Set Local LLDP MIB * admin queue command. */ static void ice_set_default_local_mib_settings(struct ice_softc *sc) { struct ice_dcbx_cfg *dcbcfg; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi; u8 maxtcs, maxtcs_ets, old_pfc_mode; pi = hw->port_info; dcbcfg = &pi->qos_cfg.local_dcbx_cfg; maxtcs = hw->func_caps.common_cap.maxtc; /* This value is only 3 bits; 8 TCs maps to 0 */ maxtcs_ets = maxtcs & ICE_IEEE_ETS_MAXTC_M; /* VLAN vs DSCP mode needs to be preserved */ old_pfc_mode = dcbcfg->pfc_mode; /** * Setup the default settings used by the driver for the Set Local * LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no * PFC, TSA=2). */ memset(dcbcfg, 0, sizeof(*dcbcfg)); dcbcfg->etscfg.willing = 1; dcbcfg->etscfg.tcbwtable[0] = 100; dcbcfg->etscfg.maxtcs = maxtcs_ets; dcbcfg->etscfg.tsatable[0] = 2; dcbcfg->etsrec = dcbcfg->etscfg; dcbcfg->etsrec.willing = 0; dcbcfg->pfc.willing = 1; dcbcfg->pfc.pfccap = maxtcs; dcbcfg->pfc_mode = old_pfc_mode; } /** * ice_do_dcb_reconfig - notify RDMA and reconfigure PF LAN VSI * @sc: the device private softc * @pending_mib: FW has a pending MIB change to execute * * @pre Determined that the DCB configuration requires a change * * Reconfigures the PF LAN VSI based on updated DCB configuration * found in the hw struct's/port_info's/ local dcbx configuration. */ void ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib) { struct ice_aqc_port_ets_elem port_ets = { 0 }; struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi; device_t dev = sc->dev; int status; pi = sc->hw.port_info; local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; ice_rdma_notify_dcb_qos_change(sc); /* If there's a pending MIB, tell the FW to execute the MIB change * now. */ if (pending_mib) { status = ice_lldp_execute_pending_mib(hw); if ((status == ICE_ERR_AQ_ERROR) && (hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)) { device_printf(dev, "Execute Pending LLDP MIB AQ call failed, no pending MIB\n"); } else if (status) { device_printf(dev, "Execute Pending LLDP MIB AQ call failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); /* This won't break traffic, but QoS will not work as expected */ } } /* Set state when there's more than one TC */ if (ice_dcb_get_num_tc(local_dcbx_cfg) > 1) { device_printf(dev, "Multiple traffic classes enabled\n"); ice_set_state(&sc->state, ICE_STATE_MULTIPLE_TCS); } else { device_printf(dev, "Multiple traffic classes disabled\n"); ice_clear_state(&sc->state, ICE_STATE_MULTIPLE_TCS); } /* Disable PF VSI since it's going to be reconfigured */ ice_stop_pf_vsi(sc); /* Query ETS configuration and update SW Tx scheduler info */ status = ice_query_port_ets(pi, &port_ets, sizeof(port_ets), NULL); if (status) { device_printf(dev, "Query Port ETS AQ call failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); /* This won't break traffic, but QoS will not work as expected */ } /* Change PF VSI configuration */ ice_dcb_recfg(sc); /* Send new configuration to RDMA client driver */ ice_rdma_dcb_qos_update(sc, pi); ice_request_stack_reinit(sc); } /** * ice_handle_mib_change_event - helper function to handle LLDP MIB change events * @sc: the device private softc * @event: event received on a control queue * * Checks the updated MIB it receives and possibly reconfigures the PF LAN * VSI depending on what has changed. This will also print out some debug * information about the MIB event if ICE_DBG_DCB is enabled in the debug_mask. */ static void ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event) { struct ice_aqc_lldp_get_mib *params = (struct ice_aqc_lldp_get_mib *)&event->desc.params.lldp_get_mib; struct ice_dcbx_cfg tmp_dcbx_cfg, *local_dcbx_cfg; struct ice_port_info *pi; device_t dev = sc->dev; struct ice_hw *hw = &sc->hw; bool needs_reconfig, mib_is_pending; int status; u8 mib_type, bridge_type; ASSERT_CFG_LOCKED(sc); ice_debug_print_mib_change_event(sc, event); pi = sc->hw.port_info; mib_type = (params->type & ICE_AQ_LLDP_MIB_TYPE_M) >> ICE_AQ_LLDP_MIB_TYPE_S; bridge_type = (params->type & ICE_AQ_LLDP_BRID_TYPE_M) >> ICE_AQ_LLDP_BRID_TYPE_S; mib_is_pending = (params->state & ICE_AQ_LLDP_MIB_CHANGE_STATE_M) >> ICE_AQ_LLDP_MIB_CHANGE_STATE_S; /* Ignore if event is not for Nearest Bridge */ if (bridge_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) return; /* Check MIB Type and return if event for Remote MIB update */ if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) { /* Update the cached remote MIB and return */ status = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, &pi->qos_cfg.remote_dcbx_cfg); if (status) device_printf(dev, "%s: Failed to get Remote DCB config; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); /* Not fatal if this fails */ return; } /* Save line length by aliasing the local dcbx cfg */ local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; /* Save off the old configuration and clear current config */ tmp_dcbx_cfg = *local_dcbx_cfg; memset(local_dcbx_cfg, 0, sizeof(*local_dcbx_cfg)); /* Update the current local_dcbx_cfg with new data */ if (mib_is_pending) { ice_get_dcb_cfg_from_mib_change(pi, event); } else { /* Get updated DCBX data from firmware */ status = ice_get_dcb_cfg(pi); if (status) { device_printf(dev, "%s: Failed to get Local DCB config; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return; } } /* Check to see if DCB needs reconfiguring */ needs_reconfig = ice_dcb_needs_reconfig(sc, &tmp_dcbx_cfg, local_dcbx_cfg); if (!needs_reconfig && !mib_is_pending) return; /* Reconfigure -- this will also notify FW that configuration is done, * if the FW MIB change is only pending instead of executed. */ ice_do_dcb_reconfig(sc, mib_is_pending); } /** * ice_send_version - Send driver version to firmware * @sc: the device private softc * * Send the driver version to the firmware. This must be called as early as * possible after ice_init_hw(). */ int ice_send_version(struct ice_softc *sc) { struct ice_driver_ver driver_version = {0}; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; driver_version.major_ver = ice_major_version; driver_version.minor_ver = ice_minor_version; driver_version.build_ver = ice_patch_version; driver_version.subbuild_ver = ice_rc_version; strlcpy((char *)driver_version.driver_string, ice_driver_version, sizeof(driver_version.driver_string)); status = ice_aq_send_driver_ver(hw, &driver_version, NULL); if (status) { device_printf(dev, "Unable to send driver version to firmware, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } return (0); } /** * ice_handle_lan_overflow_event - helper function to log LAN overflow events * @sc: device softc * @event: event received on a control queue * * Prints out a message when a LAN overflow event is detected on a receive * queue. */ static void ice_handle_lan_overflow_event(struct ice_softc *sc, struct ice_rq_event_info *event) { struct ice_aqc_event_lan_overflow *params = (struct ice_aqc_event_lan_overflow *)&event->desc.params.lan_overflow; struct ice_hw *hw = &sc->hw; ice_debug(hw, ICE_DBG_DCB, "LAN overflow event detected, prtdcb_ruptq=0x%08x, qtx_ctl=0x%08x\n", LE32_TO_CPU(params->prtdcb_ruptq), LE32_TO_CPU(params->qtx_ctl)); } /** * ice_add_ethertype_to_list - Add an Ethertype filter to a filter list * @vsi: the VSI to target packets to * @list: the list to add the filter to * @ethertype: the Ethertype to filter on * @direction: The direction of the filter (Tx or Rx) * @action: the action to take * * Add an Ethertype filter to a filter list. Used to forward a series of * filters to the firmware for configuring the switch. * * Returns 0 on success, and an error code on failure. */ static int ice_add_ethertype_to_list(struct ice_vsi *vsi, struct ice_list_head *list, u16 ethertype, u16 direction, enum ice_sw_fwd_act_type action) { struct ice_fltr_list_entry *entry; MPASS((direction == ICE_FLTR_TX) || (direction == ICE_FLTR_RX)); entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); if (!entry) return (ENOMEM); entry->fltr_info.flag = direction; entry->fltr_info.src_id = ICE_SRC_ID_VSI; entry->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; entry->fltr_info.fltr_act = action; entry->fltr_info.vsi_handle = vsi->idx; entry->fltr_info.l_data.ethertype_mac.ethertype = ethertype; LIST_ADD(&entry->list_entry, list); return 0; } #define ETHERTYPE_PAUSE_FRAMES 0x8808 #define ETHERTYPE_LLDP_FRAMES 0x88cc /** * ice_cfg_pf_ethertype_filters - Configure switch to drop ethertypes * @sc: the device private softc * * Configure the switch to drop PAUSE frames and LLDP frames transmitted from * the host. This prevents malicious VFs from sending these frames and being * able to control or configure the network. */ int ice_cfg_pf_ethertype_filters(struct ice_softc *sc) { struct ice_list_head ethertype_list; struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; int err = 0; INIT_LIST_HEAD(ðertype_list); /* * Note that the switch filters will ignore the VSI index for the drop * action, so we only need to program drop filters once for the main * VSI. */ /* Configure switch to drop all Tx pause frames coming from any VSI. */ if (sc->enable_tx_fc_filter) { err = ice_add_ethertype_to_list(vsi, ðertype_list, ETHERTYPE_PAUSE_FRAMES, ICE_FLTR_TX, ICE_DROP_PACKET); if (err) goto free_ethertype_list; } /* Configure switch to drop LLDP frames coming from any VSI */ if (sc->enable_tx_lldp_filter) { err = ice_add_ethertype_to_list(vsi, ðertype_list, ETHERTYPE_LLDP_FRAMES, ICE_FLTR_TX, ICE_DROP_PACKET); if (err) goto free_ethertype_list; } status = ice_add_eth_mac(hw, ðertype_list); if (status) { device_printf(dev, "Failed to add Tx Ethertype filters, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); } free_ethertype_list: ice_free_fltr_list(ðertype_list); return err; } /** * ice_add_rx_lldp_filter - add ethertype filter for Rx LLDP frames * @sc: the device private structure * * Add a switch ethertype filter which forwards the LLDP frames to the main PF * VSI. Called when the fw_lldp_agent is disabled, to allow the LLDP frames to * be forwarded to the stack. */ void ice_add_rx_lldp_filter(struct ice_softc *sc) { struct ice_list_head ethertype_list; struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; int err; u16 vsi_num; /* * If FW is new enough, use a direct AQ command to perform the filter * addition. */ if (ice_fw_supports_lldp_fltr_ctrl(hw)) { vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); status = ice_lldp_fltr_add_remove(hw, vsi_num, true); if (status) { device_printf(dev, "Failed to add Rx LLDP filter, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } else ice_set_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER); return; } INIT_LIST_HEAD(ðertype_list); /* Forward Rx LLDP frames to the stack */ err = ice_add_ethertype_to_list(vsi, ðertype_list, ETHERTYPE_LLDP_FRAMES, ICE_FLTR_RX, ICE_FWD_TO_VSI); if (err) { device_printf(dev, "Failed to add Rx LLDP filter, err %s\n", ice_err_str(err)); goto free_ethertype_list; } status = ice_add_eth_mac(hw, ðertype_list); if (status && status != ICE_ERR_ALREADY_EXISTS) { device_printf(dev, "Failed to add Rx LLDP filter, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } else { /* * If status == ICE_ERR_ALREADY_EXISTS, we won't treat an * already existing filter as an error case. */ ice_set_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER); } free_ethertype_list: ice_free_fltr_list(ðertype_list); } /** * ice_del_rx_lldp_filter - Remove ethertype filter for Rx LLDP frames * @sc: the device private structure * * Remove the switch filter forwarding LLDP frames to the main PF VSI, called * when the firmware LLDP agent is enabled, to stop routing LLDP frames to the * stack. */ static void ice_del_rx_lldp_filter(struct ice_softc *sc) { struct ice_list_head ethertype_list; struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; int err; u16 vsi_num; /* * Only in the scenario where the driver added the filter during * this session (while the driver was loaded) would we be able to * delete this filter. */ if (!ice_test_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER)) return; /* * If FW is new enough, use a direct AQ command to perform the filter * removal. */ if (ice_fw_supports_lldp_fltr_ctrl(hw)) { vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); status = ice_lldp_fltr_add_remove(hw, vsi_num, false); if (status) { device_printf(dev, "Failed to remove Rx LLDP filter, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } return; } INIT_LIST_HEAD(ðertype_list); /* Remove filter forwarding Rx LLDP frames to the stack */ err = ice_add_ethertype_to_list(vsi, ðertype_list, ETHERTYPE_LLDP_FRAMES, ICE_FLTR_RX, ICE_FWD_TO_VSI); if (err) { device_printf(dev, "Failed to remove Rx LLDP filter, err %s\n", ice_err_str(err)); goto free_ethertype_list; } status = ice_remove_eth_mac(hw, ðertype_list); if (status == ICE_ERR_DOES_NOT_EXIST) { ; /* Don't complain if we try to remove a filter that doesn't exist */ } else if (status) { device_printf(dev, "Failed to remove Rx LLDP filter, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } free_ethertype_list: ice_free_fltr_list(ðertype_list); } /** * ice_init_link_configuration -- Setup link in different ways depending * on whether media is available or not. * @sc: device private structure * * Called at the end of the attach process to either set default link * parameters if there is media available, or force HW link down and * set a state bit if there is no media. */ void ice_init_link_configuration(struct ice_softc *sc) { struct ice_port_info *pi = sc->hw.port_info; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; pi->phy.get_link_info = true; status = ice_get_link_status(pi, &sc->link_up); if (status) { device_printf(dev, "%s: ice_get_link_status failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return; } if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); /* Apply default link settings */ if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN)) { ice_set_link(sc, false); ice_set_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); } else ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC); } else { /* Set link down, and poll for media available in timer. This prevents the * driver from receiving spurious link-related events. */ ice_set_state(&sc->state, ICE_STATE_NO_MEDIA); status = ice_aq_set_link_restart_an(pi, false, NULL); if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE) device_printf(dev, "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } } /** * ice_apply_saved_phy_req_to_cfg -- Write saved user PHY settings to cfg data * @sc: device private structure * @cfg: new PHY config data to be modified * * Applies user settings for advertised speeds to the PHY type fields in the * supplied PHY config struct. It uses the data from pcaps to check if the * saved settings are invalid and uses the pcaps data instead if they are * invalid. */ static int ice_apply_saved_phy_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg) { struct ice_phy_data phy_data = { 0 }; struct ice_port_info *pi = sc->hw.port_info; u64 phy_low = 0, phy_high = 0; u16 link_speeds; int ret; link_speeds = pi->phy.curr_user_speed_req; if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LINK_MGMT_VER_2)) { memset(&phy_data, 0, sizeof(phy_data)); phy_data.report_mode = ICE_AQC_REPORT_DFLT_CFG; phy_data.user_speeds_orig = link_speeds; ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); if (ret != 0) { /* Error message already printed within function */ return (ret); } phy_low = phy_data.phy_low_intr; phy_high = phy_data.phy_high_intr; if (link_speeds == 0 || phy_data.user_speeds_intr) goto finalize_link_speed; if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { memset(&phy_data, 0, sizeof(phy_data)); phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; phy_data.user_speeds_orig = link_speeds; ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); if (ret != 0) { /* Error message already printed within function */ return (ret); } phy_low = phy_data.phy_low_intr; phy_high = phy_data.phy_high_intr; if (!phy_data.user_speeds_intr) { phy_low = phy_data.phy_low_orig; phy_high = phy_data.phy_high_orig; } goto finalize_link_speed; } /* If we're here, then it means the benefits of Version 2 * link management aren't utilized. We fall through to * handling Strict Link Mode the same as Version 1 link * management. */ } memset(&phy_data, 0, sizeof(phy_data)); if ((link_speeds == 0) && (sc->ldo_tlv.phy_type_low || sc->ldo_tlv.phy_type_high)) phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; else phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_MEDIA; phy_data.user_speeds_orig = link_speeds; ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); if (ret != 0) { /* Error message already printed within function */ return (ret); } phy_low = phy_data.phy_low_intr; phy_high = phy_data.phy_high_intr; if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { if (phy_low == 0 && phy_high == 0) { device_printf(sc->dev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); return (EINVAL); } } else { if (link_speeds == 0) { if (sc->ldo_tlv.phy_type_low & phy_low || sc->ldo_tlv.phy_type_high & phy_high) { phy_low &= sc->ldo_tlv.phy_type_low; phy_high &= sc->ldo_tlv.phy_type_high; } } else if (phy_low == 0 && phy_high == 0) { memset(&phy_data, 0, sizeof(phy_data)); phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; phy_data.user_speeds_orig = link_speeds; ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); if (ret != 0) { /* Error message already printed within function */ return (ret); } phy_low = phy_data.phy_low_intr; phy_high = phy_data.phy_high_intr; if (!phy_data.user_speeds_intr) { phy_low = phy_data.phy_low_orig; phy_high = phy_data.phy_high_orig; } } } finalize_link_speed: /* Cache new user settings for speeds */ pi->phy.curr_user_speed_req = phy_data.user_speeds_intr; cfg->phy_type_low = htole64(phy_low); cfg->phy_type_high = htole64(phy_high); return (ret); } /** * ice_apply_saved_fec_req_to_cfg -- Write saved user FEC mode to cfg data * @sc: device private structure * @cfg: new PHY config data to be modified * * Applies user setting for FEC mode to PHY config struct. It uses the data * from pcaps to check if the saved settings are invalid and uses the pcaps * data instead if they are invalid. */ static int ice_apply_saved_fec_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg) { struct ice_port_info *pi = sc->hw.port_info; int status; cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; status = ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); if (status) return (EIO); return (0); } /** * ice_apply_saved_fc_req_to_cfg -- Write saved user flow control mode to cfg data * @pi: port info struct * @cfg: new PHY config data to be modified * * Applies user setting for flow control mode to PHY config struct. There are * no invalid flow control mode settings; if there are, then this function * treats them like "ICE_FC_NONE". */ static void ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg) { cfg->caps &= ~(ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY); switch (pi->phy.curr_user_fc_req) { case ICE_FC_FULL: cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; break; case ICE_FC_RX_PAUSE: cfg->caps |= ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; break; case ICE_FC_TX_PAUSE: cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY; break; default: /* ICE_FC_NONE */ break; } } /** * ice_apply_saved_phy_cfg -- Re-apply user PHY config settings * @sc: device private structure * @settings: which settings to apply * * Applies user settings for advertised speeds, FEC mode, and flow * control mode to a PHY config struct; it uses the data from pcaps * to check if the saved settings are invalid and uses the pcaps * data instead if they are invalid. * * For things like sysctls where only one setting needs to be * updated, the bitmap allows the caller to specify which setting * to update. */ int ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_port_info *pi = sc->hw.port_info; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; u64 phy_low, phy_high; int status; enum ice_fec_mode dflt_fec_mode; u16 dflt_user_speed; if (!settings || settings > ICE_APPLY_LS_FEC_FC) { ice_debug(hw, ICE_DBG_LINK, "Settings out-of-bounds: %u\n", settings); } status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status) { device_printf(dev, "%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } phy_low = le64toh(pcaps.phy_type_low); phy_high = le64toh(pcaps.phy_type_high); /* Save off initial config parameters */ dflt_user_speed = ice_aq_phy_types_to_link_speeds(phy_low, phy_high); dflt_fec_mode = ice_caps_to_fec_mode(pcaps.caps, pcaps.link_fec_options); /* Setup new PHY config */ ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); /* On error, restore active configuration values */ if ((settings & ICE_APPLY_LS) && ice_apply_saved_phy_req_to_cfg(sc, &cfg)) { pi->phy.curr_user_speed_req = dflt_user_speed; cfg.phy_type_low = pcaps.phy_type_low; cfg.phy_type_high = pcaps.phy_type_high; } if ((settings & ICE_APPLY_FEC) && ice_apply_saved_fec_req_to_cfg(sc, &cfg)) { pi->phy.curr_user_fec_req = dflt_fec_mode; } if (settings & ICE_APPLY_FC) { /* No real error indicators for this process, * so we'll just have to assume it works. */ ice_apply_saved_fc_req_to_cfg(pi, &cfg); } /* Enable link and re-negotiate it */ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); if (status) { /* Don't indicate failure if there's no media in the port. * The settings have been saved and will apply when media * is inserted. */ if ((status == ICE_ERR_AQ_ERROR) && (hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)) { device_printf(dev, "%s: Setting will be applied when media is inserted\n", __func__); return (0); } else { device_printf(dev, "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } return (0); } /** * ice_print_ldo_tlv - Print out LDO TLV information * @sc: device private structure * @tlv: LDO TLV information from the adapter NVM * * Dump out the information in tlv to the kernel message buffer; intended for * debugging purposes. */ static void ice_print_ldo_tlv(struct ice_softc *sc, struct ice_link_default_override_tlv *tlv) { device_t dev = sc->dev; device_printf(dev, "TLV: -options 0x%02x\n", tlv->options); device_printf(dev, " -phy_config 0x%02x\n", tlv->phy_config); device_printf(dev, " -fec_options 0x%02x\n", tlv->fec_options); device_printf(dev, " -phy_high 0x%016llx\n", (unsigned long long)tlv->phy_type_high); device_printf(dev, " -phy_low 0x%016llx\n", (unsigned long long)tlv->phy_type_low); } /** * ice_set_link_management_mode -- Strict or lenient link management * @sc: device private structure * * Some NVMs give the adapter the option to advertise a superset of link * configurations. This checks to see if that option is enabled. * Further, the NVM could also provide a specific set of configurations * to try; these are cached in the driver's private structure if they * are available. */ void ice_set_link_management_mode(struct ice_softc *sc) { struct ice_port_info *pi = sc->hw.port_info; device_t dev = sc->dev; struct ice_link_default_override_tlv tlv = { 0 }; int status; /* Port must be in strict mode if FW version is below a certain * version. (i.e. Don't set lenient mode features) */ if (!(ice_fw_supports_link_override(&sc->hw))) return; status = ice_get_link_default_override(&tlv, pi); if (status) { device_printf(dev, "%s: ice_get_link_default_override failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); return; } if (sc->hw.debug_mask & ICE_DBG_LINK) ice_print_ldo_tlv(sc, &tlv); /* Set lenient link mode */ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LENIENT_LINK_MODE) && (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE))) ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_en); /* FW supports reporting a default configuration */ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LINK_MGMT_VER_2) && ice_fw_supports_report_dflt_cfg(&sc->hw)) { ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_en); /* Knowing we're at a high enough firmware revision to * support this link management configuration, we don't * need to check/support earlier versions. */ return; } /* Default overrides only work if in lenient link mode */ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LINK_MGMT_VER_1) && ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE) && (tlv.options & ICE_LINK_OVERRIDE_EN)) ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_en); /* Cache the LDO TLV structure in the driver, since it * won't change during the driver's lifetime. */ sc->ldo_tlv = tlv; } /** * ice_set_link -- Set up/down link on phy * @sc: device private structure * @enabled: link status to set up * * This should be called when change of link status is needed. */ void ice_set_link(struct ice_softc *sc, bool enabled) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; if (ice_driver_is_detaching(sc)) return; if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) return; if (enabled) ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC); else { status = ice_aq_set_link_restart_an(hw->port_info, false, NULL); if (status) { if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) device_printf(dev, "%s: Link control not enabled in current device mode\n", __func__); else device_printf(dev, "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } else sc->link_up = false; } } /** * ice_init_saved_phy_cfg -- Set cached user PHY cfg settings with NVM defaults * @sc: device private structure * * This should be called before the tunables for these link settings * (e.g. advertise_speed) are added -- so that these defaults don't overwrite * the cached values that the sysctl handlers will write. * * This also needs to be called before ice_init_link_configuration, to ensure * that there are sane values that can be written if there is media available * in the port. */ void ice_init_saved_phy_cfg(struct ice_softc *sc) { struct ice_port_info *pi = sc->hw.port_info; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; u64 phy_low, phy_high; u8 report_mode = ICE_AQC_REPORT_TOPO_CAP_MEDIA; if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LINK_MGMT_VER_2)) report_mode = ICE_AQC_REPORT_DFLT_CFG; status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL); if (status) { device_printf(dev, "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", __func__, report_mode == ICE_AQC_REPORT_DFLT_CFG ? "DFLT" : "w/MEDIA", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return; } phy_low = le64toh(pcaps.phy_type_low); phy_high = le64toh(pcaps.phy_type_high); /* Save off initial config parameters */ pi->phy.curr_user_speed_req = ice_aq_phy_types_to_link_speeds(phy_low, phy_high); pi->phy.curr_user_fec_req = ice_caps_to_fec_mode(pcaps.caps, pcaps.link_fec_options); pi->phy.curr_user_fc_req = ice_caps_to_fc_mode(pcaps.caps); } /** * ice_module_init - Driver callback to handle module load * * Callback for handling module load events. This function should initialize * any data structures that are used for the life of the device driver. */ static int ice_module_init(void) { ice_rdma_init(); return (0); } /** * ice_module_exit - Driver callback to handle module exit * * Callback for handling module unload events. This function should release * any resources initialized during ice_module_init. * * If this function returns non-zero, the module will not be unloaded. It * should only return such a value if the module cannot be unloaded at all, * such as due to outstanding memory references that cannot be revoked. */ static int ice_module_exit(void) { ice_rdma_exit(); return (0); } /** * ice_module_event_handler - Callback for module events * @mod: unused module_t parameter * @what: the event requested * @arg: unused event argument * * Callback used to handle module events from the stack. Used to allow the * driver to define custom behavior that should happen at module load and * unload. */ int ice_module_event_handler(module_t __unused mod, int what, void __unused *arg) { switch (what) { case MOD_LOAD: return ice_module_init(); case MOD_UNLOAD: return ice_module_exit(); default: /* TODO: do we need to handle MOD_QUIESCE and MOD_SHUTDOWN? */ return (EOPNOTSUPP); } } /** * ice_handle_nvm_access_ioctl - Handle an NVM access ioctl request * @sc: the device private softc * @ifd: ifdrv ioctl request pointer */ int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd) { union ice_nvm_access_data *data; struct ice_nvm_access_cmd *cmd; size_t ifd_len = ifd->ifd_len, malloc_len; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; u8 *nvm_buffer; int err; /* * ifioctl forwards SIOCxDRVSPEC to iflib without performing * a privilege check. In turn, iflib forwards the ioctl to the driver * without performing a privilege check. Perform one here to ensure * that non-privileged threads cannot access this interface. */ err = priv_check(curthread, PRIV_DRIVER); if (err) return (err); if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { device_printf(dev, "%s: Driver must rebuild data structures after a reset. Operation aborted.\n", __func__); return (EBUSY); } if (ifd_len < sizeof(struct ice_nvm_access_cmd)) { device_printf(dev, "%s: ifdrv length is too small. Got %zu, but expected %zu\n", __func__, ifd_len, sizeof(struct ice_nvm_access_cmd)); return (EINVAL); } if (ifd->ifd_data == NULL) { device_printf(dev, "%s: ifd data buffer not present.\n", __func__); return (EINVAL); } /* * If everything works correctly, ice_handle_nvm_access should not * modify data past the size of the ioctl length. However, it could * lead to memory corruption if it did. Make sure to allocate at least * enough space for the command and data regardless. This * ensures that any access to the data union will not access invalid * memory. */ malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd)); nvm_buffer = (u8 *)malloc(malloc_len, M_ICE, M_ZERO | M_WAITOK); if (!nvm_buffer) return (ENOMEM); /* Copy the NVM access command and data in from user space */ /* coverity[tainted_data_argument] */ err = copyin(ifd->ifd_data, nvm_buffer, ifd_len); if (err) { device_printf(dev, "%s: Copying request from user space failed, err %s\n", __func__, ice_err_str(err)); goto cleanup_free_nvm_buffer; } /* * The NVM command structure is immediately followed by data which * varies in size based on the command. */ cmd = (struct ice_nvm_access_cmd *)nvm_buffer; data = (union ice_nvm_access_data *)(nvm_buffer + sizeof(struct ice_nvm_access_cmd)); /* Handle the NVM access request */ status = ice_handle_nvm_access(hw, cmd, data); if (status) ice_debug(hw, ICE_DBG_NVM, "NVM access request failed, err %s\n", ice_status_str(status)); /* Copy the possibly modified contents of the handled request out */ err = copyout(nvm_buffer, ifd->ifd_data, ifd_len); if (err) { device_printf(dev, "%s: Copying response back to user space failed, err %s\n", __func__, ice_err_str(err)); goto cleanup_free_nvm_buffer; } /* Convert private status to an error code for proper ioctl response */ switch (status) { case 0: err = (0); break; case ICE_ERR_NO_MEMORY: err = (ENOMEM); break; case ICE_ERR_OUT_OF_RANGE: err = (ENOTTY); break; case ICE_ERR_PARAM: default: err = (EINVAL); break; } cleanup_free_nvm_buffer: free(nvm_buffer, M_ICE); return err; } /** * ice_read_sff_eeprom - Read data from SFF eeprom * @sc: device softc * @dev_addr: I2C device address (typically 0xA0 or 0xA2) * @offset: offset into the eeprom * @data: pointer to data buffer to store read data in * @length: length to read; max length is 16 * * Read from the SFF eeprom in the module for this PF's port. For more details * on the contents of an SFF eeprom, refer to SFF-8724 (SFP), SFF-8636 (QSFP), * and SFF-8024 (both). */ int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length) { struct ice_hw *hw = &sc->hw; int ret = 0, retries = 0; int status; if (length > 16) return (EINVAL); if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) return (ENXIO); do { status = ice_aq_sff_eeprom(hw, 0, dev_addr, offset, 0, 0, data, length, false, NULL); if (!status) { ret = 0; break; } if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { ret = EBUSY; continue; } if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EACCES) { /* FW says I2C access isn't supported */ ret = EACCES; break; } if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EPERM) { device_printf(sc->dev, "%s: Module pointer location specified in command does not permit the required operation.\n", __func__); ret = EPERM; break; } else { device_printf(sc->dev, "%s: Error reading I2C data: err %s aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); ret = EIO; break; } } while (retries++ < ICE_I2C_MAX_RETRIES); if (ret == EBUSY) device_printf(sc->dev, "%s: Error reading I2C data after %d retries\n", __func__, ICE_I2C_MAX_RETRIES); return (ret); } /** * ice_handle_i2c_req - Driver independent I2C request handler * @sc: device softc * @req: The I2C parameters to use * * Read from the port's I2C eeprom using the parameters from the ioctl. */ int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req) { return ice_read_sff_eeprom(sc, req->dev_addr, req->offset, req->data, req->len); } /** * ice_sysctl_read_i2c_diag_data - Read some module diagnostic data via i2c * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Read 8 bytes of diagnostic data from the SFF eeprom in the (Q)SFP module * inserted into the port. * * | SFP A2 | QSFP Lower Page * ------------|---------|---------------- * Temperature | 96-97 | 22-23 * Vcc | 98-99 | 26-27 * TX power | 102-103 | 34-35..40-41 * RX power | 104-105 | 50-51..56-57 */ static int ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; device_t dev = sc->dev; struct sbuf *sbuf; int ret; u8 data[16]; UNREFERENCED_PARAMETER(arg2); UNREFERENCED_PARAMETER(oidp); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); if (req->oldptr == NULL) { ret = SYSCTL_OUT(req, 0, 128); return (ret); } ret = ice_read_sff_eeprom(sc, 0xA0, 0, data, 1); if (ret) return (ret); /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ if (data[0] == 0x3) { /* * Check for: * - Internally calibrated data * - Diagnostic monitoring is implemented */ ice_read_sff_eeprom(sc, 0xA0, 92, data, 1); if (!(data[0] & 0x60)) { device_printf(dev, "Module doesn't support diagnostics: 0xA0[92] = %02X\n", data[0]); return (ENODEV); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ice_read_sff_eeprom(sc, 0xA2, 96, data, 4); for (int i = 0; i < 4; i++) sbuf_printf(sbuf, "%02X ", data[i]); ice_read_sff_eeprom(sc, 0xA2, 102, data, 4); for (int i = 0; i < 4; i++) sbuf_printf(sbuf, "%02X ", data[i]); } else if (data[0] == 0xD || data[0] == 0x11) { /* * QSFP+ modules are always internally calibrated, and must indicate * what types of diagnostic monitoring are implemented */ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ice_read_sff_eeprom(sc, 0xA0, 22, data, 2); for (int i = 0; i < 2; i++) sbuf_printf(sbuf, "%02X ", data[i]); ice_read_sff_eeprom(sc, 0xA0, 26, data, 2); for (int i = 0; i < 2; i++) sbuf_printf(sbuf, "%02X ", data[i]); ice_read_sff_eeprom(sc, 0xA0, 34, data, 2); for (int i = 0; i < 2; i++) sbuf_printf(sbuf, "%02X ", data[i]); ice_read_sff_eeprom(sc, 0xA0, 50, data, 2); for (int i = 0; i < 2; i++) sbuf_printf(sbuf, "%02X ", data[i]); } else { device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", data[0]); return (ENODEV); } sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_alloc_intr_tracking - Setup interrupt tracking structures * @sc: device softc structure * * Sets up the resource manager for keeping track of interrupt allocations, * and initializes the tracking maps for the PF's interrupt allocations. * * Unlike the scheme for queues, this is done in one step since both the * manager and the maps both have the same lifetime. * * @returns 0 on success, or an error code on failure. */ int ice_alloc_intr_tracking(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int err; if (hw->func_caps.common_cap.num_msix_vectors > ICE_MAX_MSIX_VECTORS) { device_printf(dev, "%s: Invalid num_msix_vectors value (%u) received from FW.\n", __func__, hw->func_caps.common_cap.num_msix_vectors); return (EINVAL); } /* Initialize the interrupt allocation manager */ err = ice_resmgr_init_contig_only(&sc->dev_imgr, hw->func_caps.common_cap.num_msix_vectors); if (err) { device_printf(dev, "Unable to initialize PF interrupt manager: %s\n", ice_err_str(err)); return (err); } /* Allocate PF interrupt mapping storage */ if (!(sc->pf_imap = (u16 *)malloc(sizeof(u16) * hw->func_caps.common_cap.num_msix_vectors, M_ICE, M_NOWAIT))) { device_printf(dev, "Unable to allocate PF imap memory\n"); err = ENOMEM; goto free_imgr; } if (!(sc->rdma_imap = (u16 *)malloc(sizeof(u16) * hw->func_caps.common_cap.num_msix_vectors, M_ICE, M_NOWAIT))) { device_printf(dev, "Unable to allocate RDMA imap memory\n"); err = ENOMEM; free(sc->pf_imap, M_ICE); goto free_imgr; } for (u32 i = 0; i < hw->func_caps.common_cap.num_msix_vectors; i++) { sc->pf_imap[i] = ICE_INVALID_RES_IDX; sc->rdma_imap[i] = ICE_INVALID_RES_IDX; } return (0); free_imgr: ice_resmgr_destroy(&sc->dev_imgr); return (err); } /** * ice_free_intr_tracking - Free PF interrupt tracking structures * @sc: device softc structure * * Frees the interrupt resource allocation manager and the PF's owned maps. * * VF maps are released when the owning VF's are destroyed, which should always * happen before this function is called. */ void ice_free_intr_tracking(struct ice_softc *sc) { if (sc->pf_imap) { ice_resmgr_release_map(&sc->dev_imgr, sc->pf_imap, sc->lan_vectors); free(sc->pf_imap, M_ICE); sc->pf_imap = NULL; } if (sc->rdma_imap) { ice_resmgr_release_map(&sc->dev_imgr, sc->rdma_imap, sc->lan_vectors); free(sc->rdma_imap, M_ICE); sc->rdma_imap = NULL; } ice_resmgr_destroy(&sc->dev_imgr); ice_resmgr_destroy(&sc->os_imgr); } /** * ice_apply_supported_speed_filter - Mask off unsupported speeds * @report_speeds: bit-field for the desired link speeds * @mod_type: type of module/sgmii connection we have * * Given a bitmap of the desired lenient mode link speeds, * this function will mask off the speeds that are not currently * supported by the device. */ static u16 ice_apply_supported_speed_filter(u16 report_speeds, u8 mod_type) { u16 speed_mask; enum { IS_SGMII, IS_SFP, IS_QSFP } module; /* * The SFF specification says 0 is unknown, so we'll * treat it like we're connected through SGMII for now. * This may need revisiting if a new type is supported * in the future. */ switch (mod_type) { case 0: module = IS_SGMII; break; case 3: module = IS_SFP; break; default: module = IS_QSFP; break; } /* We won't offer anything lower than 100M for any part, * but we'll need to mask off other speeds based on the * device and module type. */ speed_mask = ~((u16)ICE_AQ_LINK_SPEED_100MB - 1); if ((report_speeds & ICE_AQ_LINK_SPEED_10GB) && (module == IS_SFP)) speed_mask = ~((u16)ICE_AQ_LINK_SPEED_1000MB - 1); if (report_speeds & ICE_AQ_LINK_SPEED_25GB) speed_mask = ~((u16)ICE_AQ_LINK_SPEED_1000MB - 1); if (report_speeds & ICE_AQ_LINK_SPEED_50GB) { speed_mask = ~((u16)ICE_AQ_LINK_SPEED_1000MB - 1); if (module == IS_QSFP) speed_mask = ~((u16)ICE_AQ_LINK_SPEED_10GB - 1); } if ((report_speeds & ICE_AQ_LINK_SPEED_100GB) || (report_speeds & ICE_AQ_LINK_SPEED_200GB)) speed_mask = ~((u16)ICE_AQ_LINK_SPEED_25GB - 1); return (report_speeds & speed_mask); } /** * ice_init_health_events - Enable FW health event reporting * @sc: device softc * * Will try to enable firmware health event reporting, but shouldn't * cause any grief (to the caller) if this fails. */ void ice_init_health_events(struct ice_softc *sc) { int status; u8 health_mask; if ((!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_HEALTH_STATUS)) || (!sc->enable_health_events)) return; health_mask = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK | ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK; status = ice_aq_set_health_status_config(&sc->hw, health_mask, NULL); if (status) device_printf(sc->dev, "Failed to enable firmware health events, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); else ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_en); } /** * ice_print_health_status_string - Print message for given FW health event * @dev: the PCIe device * @elem: health status element containing status code * * A rather large list of possible health status codes and their associated * messages. */ static void ice_print_health_status_string(device_t dev, struct ice_aqc_health_status_elem *elem) { u16 status_code = le16toh(elem->health_status_code); switch (status_code) { case ICE_AQC_HEALTH_STATUS_INFO_RECOVERY: device_printf(dev, "The device is in firmware recovery mode.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS: device_printf(dev, "The flash chip cannot be accessed.\n"); device_printf(dev, "Possible Solution: If issue persists, call customer support.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH: device_printf(dev, "NVM authentication failed.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH: device_printf(dev, "Option ROM authentication failed.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH: device_printf(dev, "DDP package failed.\n"); device_printf(dev, "Possible Solution: Update to latest base driver and DDP package.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT: device_printf(dev, "NVM image is incompatible.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT: device_printf(dev, "Option ROM is incompatible.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB: device_printf(dev, "Supplied MIB file is invalid. DCB reverted to default configuration.\n"); device_printf(dev, "Possible Solution: Disable FW-LLDP and check DCBx system configuration.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT: device_printf(dev, "An unsupported module was detected.\n"); device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE: device_printf(dev, "Module type is not supported.\n"); device_printf(dev, "Possible Solution: Change or replace the module or cable.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL: device_printf(dev, "Module is not qualified.\n"); device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM: device_printf(dev, "Device cannot communicate with the module.\n"); device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT: device_printf(dev, "Unresolved module conflict.\n"); device_printf(dev, "Possible Solution 1: Manually set speed/duplex or use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); device_printf(dev, "Possible Solution 2: If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT: device_printf(dev, "Module is not present.\n"); device_printf(dev, "Possible Solution 1: Check that the module is inserted correctly.\n"); device_printf(dev, "Possible Solution 2: If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.\n"); break; case ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED: device_printf(dev, "Underutilized module.\n"); device_printf(dev, "Possible Solution 1: Change or replace the module or cable.\n"); device_printf(dev, "Possible Solution 2: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT: device_printf(dev, "An unsupported module was detected.\n"); device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG: device_printf(dev, "Invalid link configuration.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS: device_printf(dev, "Port hardware access error.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE: device_printf(dev, "A port is unreachable.\n"); device_printf(dev, "Possible Solution 1: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); device_printf(dev, "Possible Solution 2: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED: device_printf(dev, "Port speed is limited due to module.\n"); device_printf(dev, "Possible Solution: Change the module or use Intel(R) Ethernet Port Configuration Tool to configure the port option to match the current module speed.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT: device_printf(dev, "All configured link modes were attempted but failed to establish link.\n"); device_printf(dev, "The device will restart the process to establish link.\n"); device_printf(dev, "Possible Solution: Check link partner connection and configuration.\n"); break; case ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED: device_printf(dev, "Port speed is limited by PHY capabilities.\n"); device_printf(dev, "Possible Solution 1: Change the module to align to port option.\n"); device_printf(dev, "Possible Solution 2: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO: device_printf(dev, "LOM topology netlist is corrupted.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_NETLIST: device_printf(dev, "Unrecoverable netlist error.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT: device_printf(dev, "Port topology conflict.\n"); device_printf(dev, "Possible Solution 1: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); device_printf(dev, "Possible Solution 2: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS: device_printf(dev, "Unrecoverable hardware access error.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME: device_printf(dev, "Unrecoverable runtime error.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; case ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT: device_printf(dev, "Link management engine failed to initialize.\n"); device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); break; default: break; } } /** * ice_handle_health_status_event - helper function to output health status * @sc: device softc structure * @event: event received on a control queue * * Prints out the appropriate string based on the given Health Status Event * code. */ static void ice_handle_health_status_event(struct ice_softc *sc, struct ice_rq_event_info *event) { struct ice_aqc_health_status_elem *health_info; u16 status_count; int i; if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_HEALTH_STATUS)) return; health_info = (struct ice_aqc_health_status_elem *)event->msg_buf; status_count = le16toh(event->desc.params.get_health_status.health_status_count); if (status_count > (event->buf_len / sizeof(*health_info))) { device_printf(sc->dev, "Received a health status event with invalid event count\n"); return; } for (i = 0; i < status_count; i++) { ice_print_health_status_string(sc->dev, health_info); health_info++; } } /** * ice_set_default_local_lldp_mib - Possibly apply local LLDP MIB to FW * @sc: device softc structure * * This function needs to be called after link up; it makes sure the FW has * certain PFC/DCB settings. In certain configurations this will re-apply a * default local LLDP MIB configuration; this is intended to workaround a FW * behavior where these settings seem to be cleared on link up. */ void ice_set_default_local_lldp_mib(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; struct ice_port_info *pi; device_t dev = sc->dev; int status; /* Set Local MIB can disrupt flow control settings for * non-DCB-supported devices. */ if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_DCB)) return; pi = hw->port_info; /* Don't overwrite a custom SW configuration */ if (!pi->qos_cfg.is_sw_lldp && !ice_test_state(&sc->state, ICE_STATE_MULTIPLE_TCS)) ice_set_default_local_mib_settings(sc); status = ice_set_dcb_cfg(pi); if (status) device_printf(dev, "Error setting Local LLDP MIB: %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } /** * ice_sbuf_print_ets_cfg - Helper function to print ETS cfg * @sbuf: string buffer to print to * @name: prefix string to use * @ets: structure to pull values from * * A helper function for ice_sysctl_dump_dcbx_cfg(), this * formats the ETS rec and cfg TLVs into text. */ static void ice_sbuf_print_ets_cfg(struct sbuf *sbuf, const char *name, struct ice_dcb_ets_cfg *ets) { sbuf_printf(sbuf, "%s.willing: %u\n", name, ets->willing); sbuf_printf(sbuf, "%s.cbs: %u\n", name, ets->cbs); sbuf_printf(sbuf, "%s.maxtcs: %u\n", name, ets->maxtcs); sbuf_printf(sbuf, "%s.prio_table:", name); for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) sbuf_printf(sbuf, " %d", ets->prio_table[i]); sbuf_printf(sbuf, "\n"); sbuf_printf(sbuf, "%s.tcbwtable:", name); for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) sbuf_printf(sbuf, " %d", ets->tcbwtable[i]); sbuf_printf(sbuf, "\n"); sbuf_printf(sbuf, "%s.tsatable:", name); for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) sbuf_printf(sbuf, " %d", ets->tsatable[i]); sbuf_printf(sbuf, "\n"); } /** * ice_sysctl_dump_dcbx_cfg - Print out DCBX/DCB config info * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: AQ define for either Local or Remote MIB * @req: sysctl request pointer * * Prints out DCB/DCBX configuration, including the contents * of either the local or remote MIB, depending on the value * used in arg2. */ static int ice_sysctl_dump_dcbx_cfg(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg = {}; struct ice_dcbx_cfg dcb_buf = {}; struct ice_dcbx_cfg *dcbcfg; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; struct sbuf *sbuf; int status; u8 maxtcs, dcbx_status, is_sw_lldp; UNREFERENCED_PARAMETER(oidp); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); is_sw_lldp = hw->port_info->qos_cfg.is_sw_lldp; /* The driver doesn't receive a Remote MIB via SW */ if (is_sw_lldp && arg2 == ICE_AQ_LLDP_MIB_REMOTE) return (ENOENT); dcbcfg = &hw->port_info->qos_cfg.local_dcbx_cfg; if (!is_sw_lldp) { /* Collect information from the FW in FW LLDP mode */ dcbcfg = &dcb_buf; status = ice_aq_get_dcb_cfg(hw, (u8)arg2, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbcfg); if (status && arg2 == ICE_AQ_LLDP_MIB_REMOTE && hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) { device_printf(dev, "Unable to query Remote MIB; port has not received one yet\n"); return (ENOENT); } if (status) { device_printf(dev, "Unable to query LLDP MIB, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } status = ice_aq_get_cee_dcb_cfg(hw, &cee_cfg, NULL); if (!status) dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE; else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) dcbcfg->dcbx_mode = ICE_DCBX_MODE_IEEE; else device_printf(dev, "Get CEE DCB Cfg AQ cmd err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); maxtcs = hw->func_caps.common_cap.maxtc; dcbx_status = ice_get_dcbx_status(hw); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); /* Do the actual printing */ sbuf_printf(sbuf, "\n"); sbuf_printf(sbuf, "SW LLDP mode: %d\n", is_sw_lldp); sbuf_printf(sbuf, "Function caps maxtcs: %d\n", maxtcs); sbuf_printf(sbuf, "dcbx_status: %d\n", dcbx_status); sbuf_printf(sbuf, "numapps: %u\n", dcbcfg->numapps); sbuf_printf(sbuf, "CEE TLV status: %u\n", dcbcfg->tlv_status); sbuf_printf(sbuf, "pfc_mode: %s\n", (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) ? "DSCP" : "VLAN"); sbuf_printf(sbuf, "dcbx_mode: %s\n", (dcbcfg->dcbx_mode == ICE_DCBX_MODE_IEEE) ? "IEEE" : (dcbcfg->dcbx_mode == ICE_DCBX_MODE_CEE) ? "CEE" : "Unknown"); ice_sbuf_print_ets_cfg(sbuf, "etscfg", &dcbcfg->etscfg); ice_sbuf_print_ets_cfg(sbuf, "etsrec", &dcbcfg->etsrec); sbuf_printf(sbuf, "pfc.willing: %u\n", dcbcfg->pfc.willing); sbuf_printf(sbuf, "pfc.mbc: %u\n", dcbcfg->pfc.mbc); sbuf_printf(sbuf, "pfc.pfccap: 0x%0x\n", dcbcfg->pfc.pfccap); sbuf_printf(sbuf, "pfc.pfcena: 0x%0x\n", dcbcfg->pfc.pfcena); if (arg2 == ICE_AQ_LLDP_MIB_LOCAL) { sbuf_printf(sbuf, "dscp_map:\n"); for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) sbuf_printf(sbuf, " %d", dcbcfg->dscp_map[i * 8 + j]); sbuf_printf(sbuf, "\n"); } sbuf_printf(sbuf, "\nLocal registers:\n"); sbuf_printf(sbuf, "PRTDCB_GENC.NUMTC: %d\n", (rd32(hw, PRTDCB_GENC) & PRTDCB_GENC_NUMTC_M) >> PRTDCB_GENC_NUMTC_S); sbuf_printf(sbuf, "PRTDCB_TUP2TC: 0x%0x\n", (rd32(hw, PRTDCB_TUP2TC))); sbuf_printf(sbuf, "PRTDCB_RUP2TC: 0x%0x\n", (rd32(hw, PRTDCB_RUP2TC))); sbuf_printf(sbuf, "GLDCB_TC2PFC: 0x%0x\n", (rd32(hw, GLDCB_TC2PFC))); } /* Finish */ sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_dump_vsi_cfg - print PF LAN VSI configuration * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * XXX: This could be extended to apply to arbitrary PF-owned VSIs, * but for simplicity, this only works on the PF's LAN VSI. */ static int ice_sysctl_dump_vsi_cfg(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_vsi_ctx ctx = { 0 }; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; struct sbuf *sbuf; int status; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Get HW absolute index of a VSI */ ctx.vsi_num = ice_get_hw_vsi_num(hw, sc->pf_vsi.idx); status = ice_aq_get_vsi_params(hw, &ctx, NULL); if (status) { device_printf(dev, "Get VSI AQ call failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); /* Do the actual printing */ sbuf_printf(sbuf, "\n"); sbuf_printf(sbuf, "VSI NUM: %d\n", ctx.vsi_num); sbuf_printf(sbuf, "VF NUM: %d\n", ctx.vf_num); sbuf_printf(sbuf, "VSIs allocated: %d\n", ctx.vsis_allocd); sbuf_printf(sbuf, "VSIs unallocated: %d\n", ctx.vsis_unallocated); sbuf_printf(sbuf, "Rx Queue Map method: %d\n", LE16_TO_CPU(ctx.info.mapping_flags)); /* The PF VSI is always contiguous, so there's no if-statement here */ sbuf_printf(sbuf, "Rx Queue base: %d\n", LE16_TO_CPU(ctx.info.q_mapping[0])); sbuf_printf(sbuf, "Rx Queue count: %d\n", LE16_TO_CPU(ctx.info.q_mapping[1])); sbuf_printf(sbuf, "TC qbases :"); for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { sbuf_printf(sbuf, " %4d", ctx.info.tc_mapping[i] & ICE_AQ_VSI_TC_Q_OFFSET_M); } sbuf_printf(sbuf, "\n"); sbuf_printf(sbuf, "TC qcounts :"); for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { sbuf_printf(sbuf, " %4d", 1 << (ctx.info.tc_mapping[i] >> ICE_AQ_VSI_TC_Q_NUM_S)); } /* Finish */ sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_get_tx_rx_equalizations -- read serdes tx rx equalization params * @hw: pointer to the HW struct * @serdes_num: represents the serdes number * @ptr: structure to read all serdes parameter for given serdes * * returns all serdes equalization parameter supported per serdes number */ static int ice_get_tx_rx_equalizations(struct ice_hw *hw, u8 serdes_num, struct ice_serdes_equalization *ptr) { int err = 0; if (!ptr) return (EOPNOTSUPP); #define ICE_GET_PHY_EQUALIZATION(equ, dir, value) \ ice_aq_get_phy_equalization(hw, equ, dir, serdes_num, &(ptr->value)) err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_PRE1, ICE_AQC_OP_CODE_RX_EQU, rx_equalization_pre1); if (err) return err; err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_PRE2, ICE_AQC_OP_CODE_RX_EQU, rx_equalization_pre2); if (err) return err; err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_POST1, ICE_AQC_OP_CODE_RX_EQU, rx_equalization_post1); if (err) return err; err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_BFLF, ICE_AQC_OP_CODE_RX_EQU, rx_equalization_bflf); if (err) return err; err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_BFHF, ICE_AQC_OP_CODE_RX_EQU, rx_equalization_bfhf); if (err) return err; err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_DRATE, ICE_AQC_OP_CODE_RX_EQU, rx_equalization_drate); if (err) return err; err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_PRE1, ICE_AQC_OP_CODE_TX_EQU, tx_equalization_pre1); if (err) return err; err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_PRE2, ICE_AQC_OP_CODE_TX_EQU, tx_equalization_pre2); if (err) return err; err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_PRE3, ICE_AQC_OP_CODE_TX_EQU, tx_equalization_pre3); if (err) return err; err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_ATTEN, ICE_AQC_OP_CODE_TX_EQU, tx_equalization_atten); if (err) return err; err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_POST1, ICE_AQC_OP_CODE_TX_EQU, tx_equalization_post1); if (err) return err; return (0); } /** * ice_fec_counter_read - reads FEC stats from PHY * @hw: pointer to the HW struct * @receiver_id: pcsquad at registerlevel * @reg_offset: register for the current request * @output: pointer to the caller-supplied buffer to return requested fec stats * * Returns fec stats from phy */ static int ice_fec_counter_read(struct ice_hw *hw, u32 receiver_id, u32 reg_offset, u16 *output) { u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI); struct ice_sbq_msg_input msg = {}; int err = 0; memset(&msg, 0, sizeof(msg)); msg.msg_addr_low = ICE_LO_WORD(reg_offset); msg.msg_addr_high = ICE_LO_DWORD(receiver_id); msg.opcode = ice_sbq_msg_rd; msg.dest_dev = rmn_0; err = ice_sbq_rw_reg(hw, &msg, flag); if (err) { return err; } *output = ICE_LO_WORD(msg.data); return (0); } /** * ice_get_port_fec_stats - returns fec correctable, uncorrectable stats per pcsquad, pcsport * @hw: pointer to the HW struct * @pcs_quad: pcsquad for input port * @pcs_port: pcsport for input port * @fec_stats: buffer to hold fec statistics for given port * * Returns fec stats */ static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, struct ice_fec_stats_to_sysctl *fec_stats) { u32 uncorr_low_reg = 0, uncorr_high_reg = 0; u16 uncorr_low_val = 0, uncorr_high_val = 0; u32 corr_low_reg = 0, corr_high_reg = 0; u16 corr_low_val = 0, corr_high_val = 0; u32 receiver_id = 0; int err; switch (pcs_port) { case 0: corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT0; corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT0; uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT0; uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT0; break; case 1: corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT1; corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT1; uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT1; uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT1; break; case 2: corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT2; corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT2; uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT2; uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT2; break; case 3: corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT3; corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT3; uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT3; uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT3; break; default: return (EINVAL); } if (pcs_quad == 0) receiver_id = ICE_RS_FEC_RECEIVER_ID_PCS0; /* MTIP PCS Quad 0 -FEC */ else if (pcs_quad == 1) receiver_id = ICE_RS_FEC_RECEIVER_ID_PCS1; /* MTIP PCS Quad 1 -FEC */ else return (EINVAL); err = ice_fec_counter_read(hw, receiver_id, corr_low_reg, &corr_low_val); if (err) return err; err = ice_fec_counter_read(hw, receiver_id, corr_high_reg, &corr_high_val); if (err) return err; err = ice_fec_counter_read(hw, receiver_id, uncorr_low_reg, &uncorr_low_val); if (err) return err; err = ice_fec_counter_read(hw, receiver_id, uncorr_high_reg, &uncorr_high_val); if (err) return err; fec_stats->fec_corr_cnt_low = corr_low_val; fec_stats->fec_corr_cnt_high = corr_high_val; fec_stats->fec_uncorr_cnt_low = uncorr_low_val; fec_stats->fec_uncorr_cnt_high = uncorr_high_val; return (0); } /** * ice_is_serdes_muxed - returns whether serdes is muxed in hardware * @hw: pointer to the HW struct * * Returns True : when serdes is muxed * False: when serdes is not muxed */ static bool ice_is_serdes_muxed(struct ice_hw *hw) { return (rd32(hw, 0xB81E0) & 0x4); } /** * ice_get_maxspeed - Get the max speed for given lport * @hw: pointer to the HW struct * @lport: logical port for which max speed is requested * @max_speed: return max speed for input lport */ static int ice_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed) { struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {}; u8 option_count = ICE_AQC_PORT_OPT_MAX; bool active_valid, pending_valid; u8 active_idx, pending_idx; int status; status = ice_aq_get_port_options(hw, options, &option_count, lport, true, &active_idx, &active_valid, &pending_idx, &pending_valid); if (status || active_idx >= ICE_AQC_PORT_OPT_MAX) { ice_debug(hw, ICE_DBG_PHY, "Port split read err: %d\n", status); return (EIO); } if (active_valid) { ice_debug(hw, ICE_DBG_PHY, "Active idx: %d\n", active_idx); } else { ice_debug(hw, ICE_DBG_PHY, "No valid Active option\n"); return (EINVAL); } *max_speed = options[active_idx].max_lane_speed; return (0); } /** * ice_update_port_topology - update port topology * @lport: logical port for which physical info requested * @port_topology: buffer to hold port topology * @is_muxed: serdes is muxed in hardware */ static int ice_update_port_topology(u8 lport, struct ice_port_topology *port_topology, bool is_muxed) { switch (lport) { case 0: port_topology->pcs_quad_select = 0; port_topology->pcs_port = 0; port_topology->primary_serdes_lane = 0; break; case 1: port_topology->pcs_quad_select = 1; port_topology->pcs_port = 0; if (is_muxed == true) port_topology->primary_serdes_lane = 2; else port_topology->primary_serdes_lane = 4; break; case 2: port_topology->pcs_quad_select = 0; port_topology->pcs_port = 1; port_topology->primary_serdes_lane = 1; break; case 3: port_topology->pcs_quad_select = 1; port_topology->pcs_port = 1; if (is_muxed == true) port_topology->primary_serdes_lane = 3; else port_topology->primary_serdes_lane = 5; break; case 4: port_topology->pcs_quad_select = 0; port_topology->pcs_port = 2; port_topology->primary_serdes_lane = 2; break; case 5: port_topology->pcs_quad_select = 1; port_topology->pcs_port = 2; port_topology->primary_serdes_lane = 6; break; case 6: port_topology->pcs_quad_select = 0; port_topology->pcs_port = 3; port_topology->primary_serdes_lane = 3; break; case 7: port_topology->pcs_quad_select = 1; port_topology->pcs_port = 3; port_topology->primary_serdes_lane = 7; break; default: return (EINVAL); } return 0; } /** * ice_get_port_topology - returns physical topology * @hw: pointer to the HW struct * @lport: logical port for which physical info requested * @port_topology: buffer to hold port topology * * Returns the physical component associated with the Port like pcsquad, pcsport, serdesnumber */ static int ice_get_port_topology(struct ice_hw *hw, u8 lport, struct ice_port_topology *port_topology) { struct ice_aqc_get_link_topo cmd; bool is_muxed = false; u8 cage_type = 0; u16 node_handle; u8 ctx = 0; int err; if (!hw || !port_topology) return (EINVAL); if (hw->device_id >= ICE_DEV_ID_E810_XXV_BACKPLANE) { port_topology->serdes_lane_count = 1; if (lport == 0) { port_topology->pcs_quad_select = 0; port_topology->pcs_port = 0; port_topology->primary_serdes_lane = 0; } else if (lport == 1) { port_topology->pcs_quad_select = 1; port_topology->pcs_port = 0; port_topology->primary_serdes_lane = 1; } else { return (EINVAL); } return (0); } memset(&cmd, 0, sizeof(cmd)); ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S; ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S; cmd.addr.topo_params.node_type_ctx = ctx; cmd.addr.topo_params.index = 0; cmd.addr.topo_params.lport_num = 0; cmd.addr.topo_params.lport_num_valid = 0; err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle); if (err) return (EINVAL); is_muxed = ice_is_serdes_muxed(hw); err = ice_update_port_topology(lport, port_topology, is_muxed); if (err) return err; if (cage_type == 0x11 || /* SFP */ cage_type == 0x12) { /* SFP28 */ port_topology->serdes_lane_count = 1; } else if (cage_type == 0x13 || /* QSFP */ cage_type == 0x14) { /* QSFP28 */ u8 max_speed = 0; err = ice_get_maxspeed(hw, port_topology->primary_serdes_lane, &max_speed); if (err) return err; if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_M) device_printf(ice_hw_to_dev(hw), "%s: WARNING: reported max_lane_speed is N/A\n", __func__); if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G) port_topology->serdes_lane_count = 4; else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G) port_topology->serdes_lane_count = 2; else port_topology->serdes_lane_count = 1; } else return (EINVAL); ice_debug(hw, ICE_DBG_PHY, "%s: Port Topology (lport %d):\n", __func__, lport); ice_debug(hw, ICE_DBG_PHY, "serdes lane count %d\n", port_topology->serdes_lane_count); ice_debug(hw, ICE_DBG_PHY, "pcs quad select %d\n", port_topology->pcs_quad_select); ice_debug(hw, ICE_DBG_PHY, "pcs port %d\n", port_topology->pcs_port); ice_debug(hw, ICE_DBG_PHY, "primary serdes lane %d\n", port_topology->primary_serdes_lane); return (0); } /** * ice_sysctl_dump_phy_stats - print PHY stats * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer */ static int ice_sysctl_dump_phy_stats(SYSCTL_HANDLER_ARGS) { struct ice_regdump_to_sysctl ice_prv_regs_buf = {}; struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_port_topology port_topology; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi; device_t dev = sc->dev; u8 serdes_num = 0; unsigned int i; int err = 0; struct sbuf *sbuf; pi = hw->port_info; if (!pi) { device_printf(dev, "Port info structure is null\n"); return (EINVAL); } UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); UNREFERENCED_PARAMETER(req); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); if (ice_get_port_topology(hw, pi->lport, &port_topology) != 0) { device_printf(dev, "Extended register dump failed for Lport %d\n", pi->lport); return (EIO); } if (port_topology.serdes_lane_count > ICE_MAX_SERDES_LANE_COUNT) { device_printf(dev, "Extended register dump failed: Lport %d Serdes count %d\n", pi->lport, port_topology.serdes_lane_count); return (EINVAL); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); /* Get serdes equalization parameter for available serdes */ for (i = 0; i < port_topology.serdes_lane_count; i++) { serdes_num = port_topology.primary_serdes_lane + i; err = ice_get_tx_rx_equalizations(hw, serdes_num, &(ice_prv_regs_buf.equalization[i])); if (err) { device_printf(dev, "Serdes equalization get failed Lport %d Serdes %d Err %d\n", pi->lport,serdes_num, err); sbuf_finish(sbuf); sbuf_delete(sbuf); return (EIO); } sbuf_printf(sbuf, "\nSerdes lane: %d\n", i); sbuf_printf(sbuf, "RX PRE1 = %d\n", ice_prv_regs_buf.equalization[i].rx_equalization_pre1); sbuf_printf(sbuf, "RX PRE2 = %d\n", (s16)ice_prv_regs_buf.equalization[i].rx_equalization_pre2); sbuf_printf(sbuf, "RX POST1 = %d\n", ice_prv_regs_buf.equalization[i].rx_equalization_post1); sbuf_printf(sbuf, "RX BFLF = %d\n", ice_prv_regs_buf.equalization[i].rx_equalization_bflf); sbuf_printf(sbuf, "RX BFHF = %d\n", ice_prv_regs_buf.equalization[i].rx_equalization_bfhf); sbuf_printf(sbuf, "RX DRATE = %d\n", (s16)ice_prv_regs_buf.equalization[i].rx_equalization_drate); sbuf_printf(sbuf, "TX PRE1 = %d\n", ice_prv_regs_buf.equalization[i].tx_equalization_pre1); sbuf_printf(sbuf, "TX PRE2 = %d\n", ice_prv_regs_buf.equalization[i].tx_equalization_pre2); sbuf_printf(sbuf, "TX PRE3 = %d\n", ice_prv_regs_buf.equalization[i].tx_equalization_pre3); sbuf_printf(sbuf, "TX POST1 = %d\n", ice_prv_regs_buf.equalization[i].tx_equalization_post1); sbuf_printf(sbuf, "TX ATTEN = %d\n", ice_prv_regs_buf.equalization[i].tx_equalization_atten); } /* Get fec correctable , uncorrectable counter */ err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select, port_topology.pcs_port, &(ice_prv_regs_buf.stats)); if (err) { device_printf(dev, "failed to get FEC stats Lport %d Err %d\n", pi->lport, err); sbuf_finish(sbuf); sbuf_delete(sbuf); return (EIO); } sbuf_printf(sbuf, "\nRS FEC Corrected codeword count = %d\n", ((u32)ice_prv_regs_buf.stats.fec_corr_cnt_high << 16) | ice_prv_regs_buf.stats.fec_corr_cnt_low); sbuf_printf(sbuf, "RS FEC Uncorrected codeword count = %d\n", ((u32)ice_prv_regs_buf.stats.fec_uncorr_cnt_high << 16) | ice_prv_regs_buf.stats.fec_uncorr_cnt_low); /* Finish */ sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_ets_str_to_tbl - Parse string into ETS table * @str: input string to parse * @table: output eight values used for ETS values * @limit: max valid value to accept for ETS values * * Parses a string and converts the eight values within * into a table that can be used in setting ETS settings * in a MIB. * * @return 0 on success, EINVAL if a parsed value is * not between 0 and limit. */ static int ice_ets_str_to_tbl(const char *str, u8 *table, u8 limit) { const char *str_start = str; char *str_end; long token; for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { token = strtol(str_start, &str_end, 0); if (token < 0 || token > limit) return (EINVAL); table[i] = (u8)token; str_start = (str_end + 1); } return (0); } /** * ice_check_ets_bw - Check if ETS bw vals are valid * @table: eight values used for ETS bandwidth * * @return true if the sum of all 8 values in table * equals 100. */ static bool ice_check_ets_bw(u8 *table) { int sum = 0; for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) sum += (int)table[i]; return (sum == 100); } /** * ice_cfg_pba_num - Determine if PBA Number is retrievable * @sc: the device private softc structure * * Sets the feature flag for the existence of a PBA number * based on the success of the read command. This does not * cache the result. */ void ice_cfg_pba_num(struct ice_softc *sc) { u8 pba_string[32] = ""; if ((ice_is_bit_set(sc->feat_cap, ICE_FEATURE_HAS_PBA)) && (ice_read_pba_string(&sc->hw, pba_string, sizeof(pba_string)) == 0)) ice_set_bit(ICE_FEATURE_HAS_PBA, sc->feat_en); } /** * ice_sysctl_query_port_ets - print Port ETS Config from AQ * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer */ static int ice_sysctl_query_port_ets(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_aqc_port_ets_elem port_ets = { 0 }; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi; device_t dev = sc->dev; struct sbuf *sbuf; int status; int i = 0; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); pi = hw->port_info; status = ice_aq_query_port_ets(pi, &port_ets, sizeof(port_ets), NULL); if (status) { device_printf(dev, "Query Port ETS AQ call failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); /* Do the actual printing */ sbuf_printf(sbuf, "\n"); sbuf_printf(sbuf, "Valid TC map: 0x%x\n", port_ets.tc_valid_bits); sbuf_printf(sbuf, "TC BW %%:"); ice_for_each_traffic_class(i) { sbuf_printf(sbuf, " %3d", port_ets.tc_bw_share[i]); } sbuf_printf(sbuf, "\n"); sbuf_printf(sbuf, "EIR profile ID: %d\n", port_ets.port_eir_prof_id); sbuf_printf(sbuf, "CIR profile ID: %d\n", port_ets.port_cir_prof_id); sbuf_printf(sbuf, "TC Node prio: 0x%x\n", port_ets.tc_node_prio); sbuf_printf(sbuf, "TC Node TEIDs:\n"); ice_for_each_traffic_class(i) { sbuf_printf(sbuf, "%d: %d\n", i, port_ets.tc_node_teid[i]); } /* Finish */ sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_dscp2tc_map - Map DSCP to hardware TCs * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: which eight DSCP to UP mappings to configure (0 - 7) * @req: sysctl request pointer * * Gets or sets the current DSCP to UP table cached by the driver. Since there * are 64 possible DSCP values to configure, this sysctl only configures * chunks of 8 in that space at a time. * * This sysctl is only relevant in DSCP mode, and will only function in SW DCB * mode. */ static int ice_sysctl_dscp2tc_map(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_port_info *pi; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; struct sbuf *sbuf; int ret; /* Store input rates from user */ char dscp_user_buf[128] = ""; u8 new_dscp_table_seg[ICE_MAX_TRAFFIC_CLASS] = {}; if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); if (req->oldptr == NULL && req->newptr == NULL) { ret = SYSCTL_OUT(req, 0, 128); return (ret); } pi = hw->port_info; local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; sbuf = sbuf_new(NULL, dscp_user_buf, 128, SBUF_FIXEDLEN | SBUF_INCLUDENUL); /* Format DSCP-to-UP data for output */ for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { sbuf_printf(sbuf, "%d", local_dcbx_cfg->dscp_map[arg2 * 8 + i]); if (i != ICE_MAX_TRAFFIC_CLASS - 1) sbuf_printf(sbuf, ","); } sbuf_finish(sbuf); sbuf_delete(sbuf); /* Read in the new DSCP mapping values */ ret = sysctl_handle_string(oidp, dscp_user_buf, sizeof(dscp_user_buf), req); if ((ret) || (req->newptr == NULL)) return (ret); /* Don't allow setting changes in FW DCB mode */ if (!hw->port_info->qos_cfg.is_sw_lldp) { device_printf(dev, "%s: DSCP mapping is not allowed in FW DCBX mode\n", __func__); return (EINVAL); } /* Convert 8 values in a string to a table; this is similar to what * needs to be done for ETS settings, so this function can be re-used * for that purpose. */ ret = ice_ets_str_to_tbl(dscp_user_buf, new_dscp_table_seg, ICE_MAX_TRAFFIC_CLASS - 1); if (ret) { device_printf(dev, "%s: Could not parse input DSCP2TC table: %s\n", __func__, dscp_user_buf); return (ret); } memcpy(&local_dcbx_cfg->dscp_map[arg2 * 8], new_dscp_table_seg, sizeof(new_dscp_table_seg)); local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING; status = ice_set_dcb_cfg(pi); if (status) { device_printf(dev, "%s: Failed to set DCB config; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } ice_do_dcb_reconfig(sc, false); return (0); } /** * ice_handle_debug_dump_ioctl - Handle a debug dump ioctl request * @sc: the device private softc * @ifd: ifdrv ioctl request pointer */ int ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd) { size_t ifd_len = ifd->ifd_len; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; struct ice_debug_dump_cmd *ddc; int status; int err = 0; /* Returned arguments from the Admin Queue */ u16 ret_buf_size = 0; u16 ret_next_cluster = 0; u16 ret_next_table = 0; u32 ret_next_index = 0; /* * ifioctl forwards SIOCxDRVSPEC to iflib without performing * a privilege check. In turn, iflib forwards the ioctl to the driver * without performing a privilege check. Perform one here to ensure * that non-privileged threads cannot access this interface. */ err = priv_check(curthread, PRIV_DRIVER); if (err) return (err); if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { device_printf(dev, "%s: Driver must rebuild data structures after a reset. Operation aborted.\n", __func__); return (EBUSY); } if (ifd_len < sizeof(*ddc)) { device_printf(dev, "%s: ifdrv length is too small. Got %zu, but expected %zu\n", __func__, ifd_len, sizeof(*ddc)); return (EINVAL); } if (ifd->ifd_data == NULL) { device_printf(dev, "%s: ifd data buffer not present.\n", __func__); return (EINVAL); } ddc = (struct ice_debug_dump_cmd *)malloc(ifd_len, M_ICE, M_ZERO | M_NOWAIT); if (!ddc) return (ENOMEM); /* Copy the NVM access command and data in from user space */ /* coverity[tainted_data_argument] */ err = copyin(ifd->ifd_data, ddc, ifd_len); if (err) { device_printf(dev, "%s: Copying request from user space failed, err %s\n", __func__, ice_err_str(err)); goto out; } /* The data_size arg must be at least 1 for the AQ cmd to work */ if (ddc->data_size == 0) { device_printf(dev, "%s: data_size must be greater than 0\n", __func__); err = EINVAL; goto out; } /* ...and it can't be too long */ if (ddc->data_size > (ifd_len - sizeof(*ddc))) { device_printf(dev, "%s: data_size (%d) is larger than ifd_len space (%zu)?\n", __func__, ddc->data_size, ifd_len - sizeof(*ddc)); err = EINVAL; goto out; } /* Make sure any possible data buffer space is zeroed */ memset(ddc->data, 0, ifd_len - sizeof(*ddc)); status = ice_aq_get_internal_data(hw, ddc->cluster_id, ddc->table_id, ddc->offset, (u8 *)ddc->data, ddc->data_size, &ret_buf_size, &ret_next_cluster, &ret_next_table, &ret_next_index, NULL); ice_debug(hw, ICE_DBG_DIAG, "%s: ret_buf_size %d, ret_next_table %d, ret_next_index %d\n", __func__, ret_buf_size, ret_next_table, ret_next_index); if (status) { device_printf(dev, "%s: Get Internal Data AQ command failed, err %s aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); goto aq_error; } ddc->table_id = ret_next_table; ddc->offset = ret_next_index; ddc->data_size = ret_buf_size; ddc->cluster_id = ret_next_cluster; /* Copy the possibly modified contents of the handled request out */ err = copyout(ddc, ifd->ifd_data, ifd->ifd_len); if (err) { device_printf(dev, "%s: Copying response back to user space failed, err %s\n", __func__, ice_err_str(err)); goto out; } aq_error: /* Convert private status to an error code for proper ioctl response */ switch (status) { case 0: err = (0); break; case ICE_ERR_NO_MEMORY: err = (ENOMEM); break; case ICE_ERR_OUT_OF_RANGE: err = (ENOTTY); break; case ICE_ERR_AQ_ERROR: err = (EIO); break; case ICE_ERR_PARAM: default: err = (EINVAL); break; } out: free(ddc, M_ICE); return (err); } /** * ice_sysctl_allow_no_fec_mod_in_auto - Change Auto FEC behavior * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Allows user to let "No FEC" mode to be used in "Auto" * FEC mode during FEC negotiation. This is only supported * on newer firmware versions. */ static int ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; u8 user_flag; int ret; UNREFERENCED_PARAMETER(arg2); ret = priv_check(curthread, PRIV_DRIVER); if (ret) return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); user_flag = (u8)sc->allow_no_fec_mod_in_auto; ret = sysctl_handle_bool(oidp, &user_flag, 0, req); if ((ret) || (req->newptr == NULL)) return (ret); if (!ice_fw_supports_fec_dis_auto(hw)) { log(LOG_INFO, "%s: Enabling or disabling of auto configuration of modules that don't support FEC is unsupported by the current firmware\n", device_get_nameunit(dev)); return (ENODEV); } if (user_flag == (bool)sc->allow_no_fec_mod_in_auto) return (0); sc->allow_no_fec_mod_in_auto = (u8)user_flag; if (sc->allow_no_fec_mod_in_auto) log(LOG_INFO, "%s: Enabled auto configuration of No FEC modules\n", device_get_nameunit(dev)); else log(LOG_INFO, "%s: Auto configuration of No FEC modules reset to NVM defaults\n", device_get_nameunit(dev)); return (0); } -/** - * ice_print_dual_nac_info - Print NAC status/ID information - * @sc: device softc structure - * - * Prints out information about the NAC mode if the device is capable of - * being part of a system with multiple NACs. - * - * @pre Must be called after ice_init_hw() and ice_init_device_features() - * sometime during driver load. - */ -void -ice_print_dual_nac_info(struct ice_softc *sc) -{ - struct ice_hw *hw = &sc->hw; - device_t dev = sc->dev; - bool is_dual_nac, is_primary_nac; - u8 cpk_id; - - is_dual_nac = (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M); - is_primary_nac = (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M); - cpk_id = hw->dev_caps.nac_topo.id; - - if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DUAL_NAC)) { - log(LOG_INFO, "%s: In %s NAC mode\n", - device_get_nameunit(dev), - is_dual_nac ? "Dual" : "Single"); - - if (is_dual_nac) { - ice_set_bit(ICE_FEATURE_DUAL_NAC, sc->feat_en); - log(LOG_INFO, - "%s: PF is configured in %s mode with IP instance ID %u\n", - device_get_nameunit(dev), - is_primary_nac ? "primary" : "secondary", - cpk_id); - } - } -} - /** * ice_sysctl_temperature - Retrieve NIC temp via AQ command * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * If ICE_DBG_DIAG is set in the debug.debug_mask sysctl, then this will print * temperature threshold information in the kernel message log, too. */ static int ice_sysctl_temperature(SYSCTL_HANDLER_ARGS) { struct ice_aqc_get_sensor_reading_resp resp; struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_aq_get_sensor_reading(hw, ICE_AQC_INT_TEMP_SENSOR, ICE_AQC_INT_TEMP_FORMAT, &resp, NULL); if (status) { device_printf(dev, "Get Sensor Reading AQ call failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } ice_debug(hw, ICE_DBG_DIAG, "%s: Warning Temp Threshold: %d\n", __func__, resp.data.s0f0.temp_warning_threshold); ice_debug(hw, ICE_DBG_DIAG, "%s: Critical Temp Threshold: %d\n", __func__, resp.data.s0f0.temp_critical_threshold); ice_debug(hw, ICE_DBG_DIAG, "%s: Fatal Temp Threshold: %d\n", __func__, resp.data.s0f0.temp_fatal_threshold); return sysctl_handle_8(oidp, &resp.data.s0f0.temp, 0, req); } /** * ice_sysctl_create_mirror_interface - Create a new ifnet that monitors * traffic from the main PF VSI */ static int ice_sysctl_create_mirror_interface(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; device_t dev = sc->dev; int ret; UNREFERENCED_PARAMETER(arg2); ret = priv_check(curthread, PRIV_DRIVER); if (ret) return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* If the user hasn't written "1" to this sysctl yet: */ if (!ice_test_state(&sc->state, ICE_STATE_DO_CREATE_MIRR_INTFC)) { /* Avoid output on the first set of reads to this sysctl in * order to prevent a null byte from being written to the * end result when called via sysctl(8). */ if (req->oldptr == NULL && req->newptr == NULL) { ret = SYSCTL_OUT(req, 0, 0); return (ret); } char input_buf[2] = ""; ret = sysctl_handle_string(oidp, input_buf, sizeof(input_buf), req); if ((ret) || (req->newptr == NULL)) return (ret); /* If we get '1', then indicate we'll create the interface in * the next sysctl read call. */ if (input_buf[0] == '1') { if (sc->mirr_if) { device_printf(dev, "Mirror interface %s already exists!\n", if_name(sc->mirr_if->ifp)); return (EEXIST); } ice_set_state(&sc->state, ICE_STATE_DO_CREATE_MIRR_INTFC); return (0); } return (EINVAL); } /* --- "Do Create Mirror Interface" is set --- */ /* Caller just wants the upper bound for size */ if (req->oldptr == NULL && req->newptr == NULL) { ret = SYSCTL_OUT(req, 0, 128); return (ret); } device_printf(dev, "Creating new mirroring interface...\n"); ret = ice_create_mirror_interface(sc); if (ret) return (ret); ice_clear_state(&sc->state, ICE_STATE_DO_CREATE_MIRR_INTFC); ret = sysctl_handle_string(oidp, __DECONST(char *, "Interface attached"), 0, req); return (ret); } /** * ice_sysctl_destroy_mirror_interface - Destroy network interface that monitors * traffic from the main PF VSI */ static int ice_sysctl_destroy_mirror_interface(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; device_t dev = sc->dev; int ret; UNREFERENCED_PARAMETER(arg2); ret = priv_check(curthread, PRIV_DRIVER); if (ret) return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* If the user hasn't written "1" to this sysctl yet: */ if (!ice_test_state(&sc->state, ICE_STATE_DO_DESTROY_MIRR_INTFC)) { /* Avoid output on the first set of reads to this sysctl in * order to prevent a null byte from being written to the * end result when called via sysctl(8). */ if (req->oldptr == NULL && req->newptr == NULL) { ret = SYSCTL_OUT(req, 0, 0); return (ret); } char input_buf[2] = ""; ret = sysctl_handle_string(oidp, input_buf, sizeof(input_buf), req); if ((ret) || (req->newptr == NULL)) return (ret); /* If we get '1', then indicate we'll create the interface in * the next sysctl read call. */ if (input_buf[0] == '1') { if (!sc->mirr_if) { device_printf(dev, "No mirror interface exists!\n"); return (EINVAL); } ice_set_state(&sc->state, ICE_STATE_DO_DESTROY_MIRR_INTFC); return (0); } return (EINVAL); } /* --- "Do Destroy Mirror Interface" is set --- */ /* Caller just wants the upper bound for size */ if (req->oldptr == NULL && req->newptr == NULL) { ret = SYSCTL_OUT(req, 0, 128); return (ret); } device_printf(dev, "Destroying mirroring interface...\n"); ice_destroy_mirror_interface(sc); ice_clear_state(&sc->state, ICE_STATE_DO_DESTROY_MIRR_INTFC); ret = sysctl_handle_string(oidp, __DECONST(char *, "Interface destroyed"), 0, req); return (ret); } diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h index b524db61403c..afc03ebd3b51 100644 --- a/sys/dev/ice/ice_lib.h +++ b/sys/dev/ice/ice_lib.h @@ -1,1017 +1,1016 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2024, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * @file ice_lib.h * @brief header for generic device and sysctl functions * * Contains definitions and function declarations for the ice_lib.c file. It * does not depend on the iflib networking stack. */ #ifndef _ICE_LIB_H_ #define _ICE_LIB_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ice_dcb.h" #include "ice_type.h" #include "ice_common.h" #include "ice_flow.h" #include "ice_sched.h" #include "ice_resmgr.h" #include "ice_rdma_internal.h" #include "ice_rss.h" /* Hide debug sysctls unless INVARIANTS is enabled */ #ifdef INVARIANTS #define ICE_CTLFLAG_DEBUG 0 #else #define ICE_CTLFLAG_DEBUG CTLFLAG_SKIP #endif /** * for_each_set_bit - For loop over each set bit in a bit string * @bit: storage for the bit index * @data: address of data block to loop over * @nbits: maximum number of bits to loop over * * macro to create a for loop over a bit string, which runs the body once for * each bit that is set in the string. The bit variable will be set to the * index of each set bit in the string, with zero representing the first bit. */ #define for_each_set_bit(bit, data, nbits) \ for (bit_ffs((bitstr_t *)(data), (nbits), &(bit)); \ (bit) != -1; \ bit_ffs_at((bitstr_t *)(data), (bit) + 1, (nbits), &(bit))) /** * @var broadcastaddr * @brief broadcast MAC address * * constant defining the broadcast MAC address, used for programming the * broadcast address as a MAC filter for the PF VSI. */ static const u8 broadcastaddr[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; MALLOC_DECLARE(M_ICE); extern const char ice_driver_version[]; extern const uint8_t ice_major_version; extern const uint8_t ice_minor_version; extern const uint8_t ice_patch_version; extern const uint8_t ice_rc_version; /* global sysctl indicating whether the Tx FC filter should be enabled */ extern bool ice_enable_tx_fc_filter; /* global sysctl indicating whether the Tx LLDP filter should be enabled */ extern bool ice_enable_tx_lldp_filter; /* global sysctl indicating whether FW health status events should be enabled */ extern bool ice_enable_health_events; /* global sysctl indicating whether to enable 5-layer scheduler topology */ extern bool ice_tx_balance_en; /** * @struct ice_bar_info * @brief PCI BAR mapping information * * Contains data about a PCI BAR that the driver has mapped for use. */ struct ice_bar_info { struct resource *res; bus_space_tag_t tag; bus_space_handle_t handle; bus_size_t size; int rid; }; /* Alignment for queues */ #define DBA_ALIGN 128 /* Maximum TSO size is (256K)-1 */ #define ICE_TSO_SIZE ((256*1024) - 1) /* Minimum size for TSO MSS */ #define ICE_MIN_TSO_MSS 64 #define ICE_MAX_TX_SEGS 8 #define ICE_MAX_TSO_SEGS 128 #define ICE_MAX_DMA_SEG_SIZE ((16*1024) - 1) #define ICE_MAX_RX_SEGS 5 #define ICE_MAX_TSO_HDR_SEGS 3 #define ICE_MSIX_BAR 3 #define ICE_MAX_MSIX_VECTORS (GLINT_DYN_CTL_MAX_INDEX + 1) #define ICE_DEFAULT_DESC_COUNT 1024 #define ICE_MAX_DESC_COUNT 8160 #define ICE_MIN_DESC_COUNT 64 #define ICE_DESC_COUNT_INCR 32 /* List of hardware offloads we support */ #define ICE_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP_SCTP | \ CSUM_IP6_TCP| CSUM_IP6_UDP | CSUM_IP6_SCTP | \ CSUM_IP_TSO | CSUM_IP6_TSO) /* Macros to decide what kind of hardware offload to enable */ #define ICE_CSUM_TCP (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP) #define ICE_CSUM_UDP (CSUM_IP_UDP|CSUM_IP6_UDP) #define ICE_CSUM_SCTP (CSUM_IP_SCTP|CSUM_IP6_SCTP) #define ICE_CSUM_IP (CSUM_IP|CSUM_IP_TSO) /* List of known RX CSUM offload flags */ #define ICE_RX_CSUM_FLAGS (CSUM_L3_CALC | CSUM_L3_VALID | CSUM_L4_CALC | \ CSUM_L4_VALID | CSUM_L5_CALC | CSUM_L5_VALID | \ CSUM_COALESCED) /* List of interface capabilities supported by ice hardware */ #define ICE_FULL_CAPS \ (IFCAP_TSO4 | IFCAP_TSO6 | \ IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \ IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \ IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | \ IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO) /* Safe mode disables support for hardware checksums and TSO */ #define ICE_SAFE_CAPS \ (ICE_FULL_CAPS & ~(IFCAP_HWCSUM | IFCAP_TSO | \ IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM)) #define ICE_CAPS(sc) \ (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE) ? ICE_SAFE_CAPS : ICE_FULL_CAPS) /** * ICE_NVM_ACCESS * @brief Private ioctl command number for NVM access ioctls * * The ioctl command number used by NVM update for accessing the driver for * NVM access commands. */ #define ICE_NVM_ACCESS \ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5) /** * ICE_DEBUG_DUMP * @brief Private ioctl command number for retrieving debug dump data * * The ioctl command number used by a userspace tool for accessing the driver for * getting debug dump data from the firmware. */ #define ICE_DEBUG_DUMP \ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 6) #define ICE_AQ_LEN 1023 #define ICE_MBXQ_LEN 512 #define ICE_SBQ_LEN 512 #define ICE_CTRLQ_WORK_LIMIT 256 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) /* wait up to 50 microseconds for queue state change */ #define ICE_Q_WAIT_RETRY_LIMIT 5 #define ICE_UP_TABLE_TRANSLATE(val, i) \ (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ ICE_AQ_VSI_UP_TABLE_UP##i##_M) /* * For now, set this to the hardware maximum. Each function gets a smaller * number assigned to it in hw->func_caps.guar_num_vsi, though there * appears to be no guarantee that is the maximum number that a function * can use. */ #define ICE_MAX_VSI_AVAILABLE 768 /* Maximum size of a single frame (for Tx and Rx) */ #define ICE_MAX_FRAME_SIZE ICE_AQ_SET_MAC_FRAME_SIZE_MAX /* Maximum MTU size */ #define ICE_MAX_MTU (ICE_MAX_FRAME_SIZE - \ ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) /* * Hardware requires that TSO packets have an segment size of at least 64 * bytes. To avoid sending bad frames to the hardware, the driver forces the * MSS for all TSO packets to have a segment size of at least 64 bytes. * * However, if the MTU is reduced below a certain size, then the resulting * larger MSS can result in transmitting segmented frames with a packet size * larger than the MTU. * * Avoid this by preventing the MTU from being lowered below this limit. * Alternative solutions require changing the TCP stack to disable offloading * the segmentation when the requested segment size goes below 64 bytes. */ #define ICE_MIN_MTU 112 /* * The default number of queues reserved for a VF is 4, according to the * AVF Base Mode specification. */ #define ICE_DEFAULT_VF_QUEUES 4 /* * An invalid VSI number to indicate that mirroring should be disabled. */ #define ICE_INVALID_MIRROR_VSI ((u16)-1) /* * The maximum number of RX queues allowed per TC in a VSI. */ #define ICE_MAX_RXQS_PER_TC 256 /* * There are three settings that can be updated independently or * altogether: Link speed, FEC, and Flow Control. These macros allow * the caller to specify which setting(s) to update. */ #define ICE_APPLY_LS BIT(0) #define ICE_APPLY_FEC BIT(1) #define ICE_APPLY_FC BIT(2) #define ICE_APPLY_LS_FEC (ICE_APPLY_LS | ICE_APPLY_FEC) #define ICE_APPLY_LS_FC (ICE_APPLY_LS | ICE_APPLY_FC) #define ICE_APPLY_FEC_FC (ICE_APPLY_FEC | ICE_APPLY_FC) #define ICE_APPLY_LS_FEC_FC (ICE_APPLY_LS_FEC | ICE_APPLY_FC) /* * Mask of valid flags that can be used as an input for the * advertise_speed sysctl. */ #define ICE_SYSCTL_SPEEDS_VALID_RANGE 0xFFF /** * @enum ice_dyn_idx_t * @brief Dynamic Control ITR indexes * * This enum matches hardware bits and is meant to be used by DYN_CTLN * registers and QINT registers or more generally anywhere in the manual * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any * register but instead is a special value meaning "don't update" ITR0/1/2. */ enum ice_dyn_idx_t { ICE_IDX_ITR0 = 0, ICE_IDX_ITR1 = 1, ICE_IDX_ITR2 = 2, ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ }; /* By convenction ITR0 is used for RX, and ITR1 is used for TX */ #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 #define ICE_ITR_MAX 8160 /* Define the default Tx and Rx ITR as 50us (translates to ~20k int/sec max) */ #define ICE_DFLT_TX_ITR 50 #define ICE_DFLT_RX_ITR 50 /* RS FEC register values */ #define ICE_RS_FEC_REG_SHIFT 2 #define ICE_RS_FEC_RECV_ID_SHIFT 4 #define ICE_RS_FEC_CORR_LOW_REG_PORT0 (0x02 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_CORR_HIGH_REG_PORT0 (0x03 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_UNCORR_LOW_REG_PORT0 (0x04 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_UNCORR_HIGH_REG_PORT0 (0x05 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_CORR_LOW_REG_PORT1 (0x42 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_CORR_HIGH_REG_PORT1 (0x43 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_UNCORR_LOW_REG_PORT1 (0x44 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_UNCORR_HIGH_REG_PORT1 (0x45 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_CORR_LOW_REG_PORT2 (0x4A << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_CORR_HIGH_REG_PORT2 (0x4B << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_UNCORR_LOW_REG_PORT2 (0x4C << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_UNCORR_HIGH_REG_PORT2 (0x4D << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_CORR_LOW_REG_PORT3 (0x52 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_CORR_HIGH_REG_PORT3 (0x53 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_UNCORR_LOW_REG_PORT3 (0x54 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_UNCORR_HIGH_REG_PORT3 (0x55 << ICE_RS_FEC_REG_SHIFT) #define ICE_RS_FEC_RECEIVER_ID_PCS0 (0x33 << ICE_RS_FEC_RECV_ID_SHIFT) #define ICE_RS_FEC_RECEIVER_ID_PCS1 (0x34 << ICE_RS_FEC_RECV_ID_SHIFT) /** * ice_itr_to_reg - Convert an ITR setting into its register equivalent * @hw: The device HW structure * @itr_setting: the ITR setting to convert * * Based on the hardware ITR granularity, convert an ITR setting into the * correct value to prepare programming to the HW. */ static inline u16 ice_itr_to_reg(struct ice_hw *hw, u16 itr_setting) { return itr_setting / hw->itr_gran; } /** * @enum ice_rx_dtype * @brief DTYPE header split options * * This enum matches the Rx context bits to define whether header split is * enabled or not. */ enum ice_rx_dtype { ICE_RX_DTYPE_NO_SPLIT = 0, ICE_RX_DTYPE_HEADER_SPLIT = 1, ICE_RX_DTYPE_SPLIT_ALWAYS = 2, }; /* Strings used for displaying FEC mode * * Use ice_fec_str() to get these unless these need to be embedded in a * string constant. */ #define ICE_FEC_STRING_AUTO "Auto" #define ICE_FEC_STRING_RS "RS-FEC" #define ICE_FEC_STRING_BASER "FC-FEC/BASE-R" #define ICE_FEC_STRING_NONE "None" #define ICE_FEC_STRING_DIS_AUTO "Auto (w/ No-FEC)" /* Strings used for displaying Flow Control mode * * Use ice_fc_str() to get these unless these need to be embedded in a * string constant. */ #define ICE_FC_STRING_FULL "Full" #define ICE_FC_STRING_TX "Tx" #define ICE_FC_STRING_RX "Rx" #define ICE_FC_STRING_NONE "None" /* * The number of times the ice_handle_i2c_req function will retry reading * I2C data via the Admin Queue before returning EBUSY. */ #define ICE_I2C_MAX_RETRIES 10 /* * The Start LLDP Agent AQ command will fail if it's sent too soon after * the LLDP agent is stopped. The period between the stop and start * commands must currently be at least 2 seconds. */ #define ICE_START_LLDP_RETRY_WAIT (2 * hz) /* * Only certain clusters are valid for certain devices for the FW debug dump * functionality, so define masks of those here. */ #define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E810 0x4001AF #define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E830 0x1AF struct ice_softc; /** * @enum ice_rx_cso_stat * @brief software checksum offload statistics * * Enumeration of possible checksum offload statistics captured by software * during the Rx path. */ enum ice_rx_cso_stat { ICE_CSO_STAT_RX_IP4_ERR, ICE_CSO_STAT_RX_IP6_ERR, ICE_CSO_STAT_RX_L3_ERR, ICE_CSO_STAT_RX_TCP_ERR, ICE_CSO_STAT_RX_UDP_ERR, ICE_CSO_STAT_RX_SCTP_ERR, ICE_CSO_STAT_RX_L4_ERR, ICE_CSO_STAT_RX_COUNT }; /** * @enum ice_tx_cso_stat * @brief software checksum offload statistics * * Enumeration of possible checksum offload statistics captured by software * during the Tx path. */ enum ice_tx_cso_stat { ICE_CSO_STAT_TX_TCP, ICE_CSO_STAT_TX_UDP, ICE_CSO_STAT_TX_SCTP, ICE_CSO_STAT_TX_IP4, ICE_CSO_STAT_TX_IP6, ICE_CSO_STAT_TX_L3_ERR, ICE_CSO_STAT_TX_L4_ERR, ICE_CSO_STAT_TX_COUNT }; /** * @struct tx_stats * @brief software Tx statistics * * Contains software counted Tx statistics for a single queue */ struct tx_stats { /* Soft Stats */ u64 tx_bytes; u64 tx_packets; u64 mss_too_small; u64 tso; u64 cso[ICE_CSO_STAT_TX_COUNT]; }; /** * @struct rx_stats * @brief software Rx statistics * * Contains software counted Rx statistics for a single queue */ struct rx_stats { /* Soft Stats */ u64 rx_packets; u64 rx_bytes; u64 desc_errs; u64 cso[ICE_CSO_STAT_RX_COUNT]; }; /** * @struct ice_vsi_hw_stats * @brief hardware statistics for a VSI * * Stores statistics that are generated by hardware for a VSI. */ struct ice_vsi_hw_stats { struct ice_eth_stats prev; struct ice_eth_stats cur; bool offsets_loaded; }; /** * @struct ice_pf_hw_stats * @brief hardware statistics for a PF * * Stores statistics that are generated by hardware for each PF. */ struct ice_pf_hw_stats { struct ice_hw_port_stats prev; struct ice_hw_port_stats cur; bool offsets_loaded; }; /** * @struct ice_pf_sw_stats * @brief software statistics for a PF * * Contains software generated statistics relevant to a PF. */ struct ice_pf_sw_stats { /* # of reset events handled, by type */ u32 corer_count; u32 globr_count; u32 empr_count; u32 pfr_count; /* # of detected MDD events for Tx and Rx */ u32 tx_mdd_count; u32 rx_mdd_count; u64 rx_roc_error; /* port oversize packet stats, error_cnt \ from GLV_REPC VSI register + RxOversize */ }; /** * @struct ice_tc_info * @brief Traffic class information for a VSI * * Stores traffic class information used in configuring * a VSI. */ struct ice_tc_info { u16 qoffset; /* Offset in VSI queue space */ u16 qcount_tx; /* TX queues for this Traffic Class */ u16 qcount_rx; /* RX queues */ }; /** * @struct ice_vsi * @brief VSI structure * * Contains data relevant to a single VSI */ struct ice_vsi { /* back pointer to the softc */ struct ice_softc *sc; bool dynamic; /* if true, dynamically allocated */ enum ice_vsi_type type; /* type of this VSI */ u16 idx; /* software index to sc->all_vsi[] */ u16 *tx_qmap; /* Tx VSI to PF queue mapping */ u16 *rx_qmap; /* Rx VSI to PF queue mapping */ enum ice_resmgr_alloc_type qmap_type; struct ice_tx_queue *tx_queues; /* Tx queue array */ struct ice_rx_queue *rx_queues; /* Rx queue array */ int num_tx_queues; int num_rx_queues; int num_vectors; int16_t rx_itr; int16_t tx_itr; /* RSS configuration */ u16 rss_table_size; /* HW RSS table size */ u8 rss_lut_type; /* Used to configure Get/Set RSS LUT AQ call */ int max_frame_size; u16 mbuf_sz; struct ice_aqc_vsi_props info; /* DCB configuration */ u8 num_tcs; /* Total number of enabled TCs */ u16 tc_map; /* bitmap of enabled Traffic Classes */ /* Information for each traffic class */ struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; /* context for per-VSI sysctls */ struct sysctl_ctx_list ctx; struct sysctl_oid *vsi_node; /* context for per-txq sysctls */ struct sysctl_ctx_list txqs_ctx; struct sysctl_oid *txqs_node; /* context for per-rxq sysctls */ struct sysctl_ctx_list rxqs_ctx; struct sysctl_oid *rxqs_node; /* VSI-level stats */ struct ice_vsi_hw_stats hw_stats; /* VSI mirroring details */ u16 mirror_src_vsi; u16 rule_mir_ingress; u16 rule_mir_egress; }; /** * @struct ice_debug_dump_cmd * @brief arguments/return value for debug dump ioctl */ struct ice_debug_dump_cmd { u32 offset; /* offset to read/write from table, in bytes */ u16 cluster_id; /* also used to get next cluster id */ u16 table_id; u16 data_size; /* size of data field, in bytes */ u16 reserved1; u32 reserved2; u8 data[]; }; /** * @struct ice_serdes_equalization * @brief serdes equalization info */ struct ice_serdes_equalization { int rx_equalization_pre1; int rx_equalization_pre2; int rx_equalization_post1; int rx_equalization_bflf; int rx_equalization_bfhf; int rx_equalization_drate; int tx_equalization_pre1; int tx_equalization_pre2; int tx_equalization_pre3; int tx_equalization_atten; int tx_equalization_post1; }; /** * @struct ice_fec_stats_to_sysctl * @brief FEC stats register value of port */ struct ice_fec_stats_to_sysctl { u16 fec_corr_cnt_low; u16 fec_corr_cnt_high; u16 fec_uncorr_cnt_low; u16 fec_uncorr_cnt_high; }; #define ICE_MAX_SERDES_LANE_COUNT 4 /** * @struct ice_regdump_to_sysctl * @brief PHY stats of port */ struct ice_regdump_to_sysctl { /* A multilane port can have max 4 serdes */ struct ice_serdes_equalization equalization[ICE_MAX_SERDES_LANE_COUNT]; struct ice_fec_stats_to_sysctl stats; }; /** * @struct ice_port_topology * @brief Port topology from lport i.e. serdes mapping, pcsquad, macport, cage */ struct ice_port_topology { u16 pcs_port; u16 primary_serdes_lane; u16 serdes_lane_count; u16 pcs_quad_select; }; /** * @enum ice_state * @brief Driver state flags * * Used to indicate the status of various driver events. Intended to be * modified only using atomic operations, so that we can use it even in places * which aren't locked. */ enum ice_state { ICE_STATE_CONTROLQ_EVENT_PENDING, ICE_STATE_VFLR_PENDING, ICE_STATE_MDD_PENDING, ICE_STATE_RESET_OICR_RECV, ICE_STATE_RESET_PFR_REQ, ICE_STATE_PREPARED_FOR_RESET, ICE_STATE_SUBIF_NEEDS_REINIT, ICE_STATE_RESET_FAILED, ICE_STATE_DRIVER_INITIALIZED, ICE_STATE_NO_MEDIA, ICE_STATE_RECOVERY_MODE, ICE_STATE_ROLLBACK_MODE, ICE_STATE_LINK_STATUS_REPORTED, ICE_STATE_ATTACHING, ICE_STATE_DETACHING, ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER, ICE_STATE_MULTIPLE_TCS, ICE_STATE_DO_FW_DEBUG_DUMP, ICE_STATE_LINK_ACTIVE_ON_DOWN, ICE_STATE_FIRST_INIT_LINK, ICE_STATE_DO_CREATE_MIRR_INTFC, ICE_STATE_DO_DESTROY_MIRR_INTFC, /* This entry must be last */ ICE_STATE_LAST, }; /* Functions for setting and checking driver state. Note the functions take * bit positions, not bitmasks. The atomic_testandset_32 and * atomic_testandclear_32 operations require bit positions, while the * atomic_set_32 and atomic_clear_32 require bitmasks. This can easily lead to * programming error, so we provide wrapper functions to avoid this. */ /** * ice_set_state - Set the specified state * @s: the state bitmap * @bit: the state to set * * Atomically update the state bitmap with the specified bit set. */ static inline void ice_set_state(volatile u32 *s, enum ice_state bit) { /* atomic_set_32 expects a bitmask */ atomic_set_32(s, BIT(bit)); } /** * ice_clear_state - Clear the specified state * @s: the state bitmap * @bit: the state to clear * * Atomically update the state bitmap with the specified bit cleared. */ static inline void ice_clear_state(volatile u32 *s, enum ice_state bit) { /* atomic_clear_32 expects a bitmask */ atomic_clear_32(s, BIT(bit)); } /** * ice_testandset_state - Test and set the specified state * @s: the state bitmap * @bit: the bit to test * * Atomically update the state bitmap, setting the specified bit. Returns the * previous value of the bit. */ static inline u32 ice_testandset_state(volatile u32 *s, enum ice_state bit) { /* atomic_testandset_32 expects a bit position */ return atomic_testandset_32(s, bit); } /** * ice_testandclear_state - Test and clear the specified state * @s: the state bitmap * @bit: the bit to test * * Atomically update the state bitmap, clearing the specified bit. Returns the * previous value of the bit. */ static inline u32 ice_testandclear_state(volatile u32 *s, enum ice_state bit) { /* atomic_testandclear_32 expects a bit position */ return atomic_testandclear_32(s, bit); } /** * ice_test_state - Test the specified state * @s: the state bitmap * @bit: the bit to test * * Return true if the state is set, false otherwise. Use this only if the flow * does not need to update the state. If you must update the state as well, * prefer ice_testandset_state or ice_testandclear_state. */ static inline u32 ice_test_state(volatile u32 *s, enum ice_state bit) { return (*s & BIT(bit)) ? true : false; } /** * @struct ice_str_buf * @brief static length buffer for string returning * * Structure containing a fixed size string buffer, used to implement * numeric->string conversion functions that may want to return non-constant * strings. * * This allows returning a fixed size string that is generated by a conversion * function, and then copied to the used location without needing to use an * explicit local variable passed by reference. */ struct ice_str_buf { char str[ICE_STR_BUF_LEN]; }; struct ice_str_buf _ice_aq_str(enum ice_aq_err aq_err); struct ice_str_buf _ice_status_str(int status); struct ice_str_buf _ice_err_str(int err); struct ice_str_buf _ice_fltr_flag_str(u16 flag); struct ice_str_buf _ice_log_sev_str(u8 log_level); struct ice_str_buf _ice_mdd_tx_tclan_str(u8 event); struct ice_str_buf _ice_mdd_tx_pqm_str(u8 event); struct ice_str_buf _ice_mdd_rx_str(u8 event); struct ice_str_buf _ice_fw_lldp_status(u32 lldp_status); #define ice_aq_str(err) _ice_aq_str(err).str #define ice_status_str(err) _ice_status_str(err).str #define ice_err_str(err) _ice_err_str(err).str #define ice_fltr_flag_str(flag) _ice_fltr_flag_str(flag).str #define ice_mdd_tx_tclan_str(event) _ice_mdd_tx_tclan_str(event).str #define ice_mdd_tx_pqm_str(event) _ice_mdd_tx_pqm_str(event).str #define ice_mdd_rx_str(event) _ice_mdd_rx_str(event).str #define ice_log_sev_str(log_level) _ice_log_sev_str(log_level).str #define ice_fw_lldp_status(lldp_status) _ice_fw_lldp_status(lldp_status).str /** * ice_enable_intr - Enable interrupts for given vector * @hw: the device private HW structure * @vector: the interrupt index in PF space * * In MSI or Legacy interrupt mode, interrupt 0 is the only valid index. */ static inline void ice_enable_intr(struct ice_hw *hw, int vector) { u32 dyn_ctl; /* Use ITR_NONE so that ITR configuration is not changed. */ dyn_ctl = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl); } /** * ice_disable_intr - Disable interrupts for given vector * @hw: the device private HW structure * @vector: the interrupt index in PF space * * In MSI or Legacy interrupt mode, interrupt 0 is the only valid index. */ static inline void ice_disable_intr(struct ice_hw *hw, int vector) { u32 dyn_ctl; /* Use ITR_NONE so that ITR configuration is not changed. */ dyn_ctl = ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S; wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl); } /** * ice_is_tx_desc_done - determine if a Tx descriptor is done * @txd: the Tx descriptor to check * * Returns true if hardware is done with a Tx descriptor and software is * capable of re-using it. */ static inline bool ice_is_tx_desc_done(struct ice_tx_desc *txd) { return (((txd->cmd_type_offset_bsz & ICE_TXD_QW1_DTYPE_M) >> ICE_TXD_QW1_DTYPE_S) == ICE_TX_DESC_DTYPE_DESC_DONE); } /** * ice_get_pf_id - Get the PF id from the hardware registers * @hw: the ice hardware structure * * Reads the PF_FUNC_RID register and extracts the function number from it. * Intended to be used in cases where hw->pf_id hasn't yet been assigned by * ice_init_hw. * * @pre this function should be called only after PCI register access has been * setup, and prior to ice_init_hw. After hardware has been initialized, the * cached hw->pf_id value can be used. */ static inline u8 ice_get_pf_id(struct ice_hw *hw) { return (u8)((rd32(hw, PF_FUNC_RID) & PF_FUNC_RID_FUNCTION_NUMBER_M) >> PF_FUNC_RID_FUNCTION_NUMBER_S); } /* Details of how to re-initialize depend on the networking stack */ void ice_request_stack_reinit(struct ice_softc *sc); /* Details of how to check if the network stack is detaching us */ bool ice_driver_is_detaching(struct ice_softc *sc); /* Details of how to setup/teardown a mirror interface */ /** * @brief Create an interface for mirroring */ int ice_create_mirror_interface(struct ice_softc *sc); /** * @brief Destroy created mirroring interface */ void ice_destroy_mirror_interface(struct ice_softc *sc); const char * ice_fw_module_str(enum ice_aqc_fw_logging_mod module); void ice_add_fw_logging_tunables(struct ice_softc *sc, struct sysctl_oid *parent); void ice_handle_fw_log_event(struct ice_softc *sc, struct ice_aq_desc *desc, void *buf); int ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending); int ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num); void ice_free_bar(device_t dev, struct ice_bar_info *bar); void ice_set_ctrlq_len(struct ice_hw *hw); void ice_release_vsi(struct ice_vsi *vsi); struct ice_vsi *ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type); void ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues, const int max_rx_queues); void ice_free_vsi_qmaps(struct ice_vsi *vsi); int ice_initialize_vsi(struct ice_vsi *vsi); void ice_deinit_vsi(struct ice_vsi *vsi); uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi); int ice_get_phy_type_low(uint64_t phy_type_low); int ice_get_phy_type_high(uint64_t phy_type_high); int ice_add_media_types(struct ice_softc *sc, struct ifmedia *media); void ice_configure_rxq_interrupt(struct ice_hw *hw, u16 rxqid, u16 vector, u8 itr_idx); void ice_configure_all_rxq_interrupts(struct ice_vsi *vsi); void ice_configure_txq_interrupt(struct ice_hw *hw, u16 txqid, u16 vector, u8 itr_idx); void ice_configure_all_txq_interrupts(struct ice_vsi *vsi); void ice_flush_rxq_interrupts(struct ice_vsi *vsi); void ice_flush_txq_interrupts(struct ice_vsi *vsi); int ice_cfg_vsi_for_tx(struct ice_vsi *vsi); int ice_cfg_vsi_for_rx(struct ice_vsi *vsi); int ice_control_rx_queue(struct ice_vsi *vsi, u16 qidx, bool enable); int ice_control_all_rx_queues(struct ice_vsi *vsi, bool enable); int ice_cfg_pf_default_mac_filters(struct ice_softc *sc); int ice_rm_pf_default_mac_filters(struct ice_softc *sc); void ice_print_nvm_version(struct ice_softc *sc); void ice_update_vsi_hw_stats(struct ice_vsi *vsi); void ice_reset_vsi_stats(struct ice_vsi *vsi); void ice_update_pf_stats(struct ice_softc *sc); void ice_reset_pf_stats(struct ice_softc *sc); void ice_add_device_sysctls(struct ice_softc *sc); void ice_log_hmc_error(struct ice_hw *hw, device_t dev); void ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid *parent, struct ice_eth_stats *stats); void ice_add_vsi_sysctls(struct ice_vsi *vsi); void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid *parent, struct ice_softc *sc); void ice_configure_misc_interrupts(struct ice_softc *sc); int ice_sync_multicast_filters(struct ice_softc *sc); int ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length); int ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid); int ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length); int ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid); void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent); void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi); void ice_add_device_tunables(struct ice_softc *sc); int ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr); int ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr); int ice_vsi_disable_tx(struct ice_vsi *vsi); void ice_vsi_add_txqs_ctx(struct ice_vsi *vsi); void ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi); void ice_vsi_del_txqs_ctx(struct ice_vsi *vsi); void ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi); void ice_add_txq_sysctls(struct ice_tx_queue *txq); void ice_add_rxq_sysctls(struct ice_rx_queue *rxq); int ice_config_rss(struct ice_vsi *vsi); void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc); int ice_load_pkg_file(struct ice_softc *sc); void ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status); uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter); void ice_save_pci_info(struct ice_hw *hw, device_t dev); int ice_replay_all_vsi_cfg(struct ice_softc *sc); void ice_link_up_msg(struct ice_softc *sc); int ice_update_laa_mac(struct ice_softc *sc); void ice_get_and_print_bus_info(struct ice_softc *sc); const char *ice_fec_str(enum ice_fec_mode mode); const char *ice_fc_str(enum ice_fc_mode mode); const char *ice_fwd_act_str(enum ice_sw_fwd_act_type action); const char *ice_state_to_str(enum ice_state state); int ice_init_link_events(struct ice_softc *sc); void ice_configure_rx_itr(struct ice_vsi *vsi); void ice_configure_tx_itr(struct ice_vsi *vsi); void ice_setup_pf_vsi(struct ice_softc *sc); void ice_handle_mdd_event(struct ice_softc *sc); void ice_init_dcb_setup(struct ice_softc *sc); int ice_send_version(struct ice_softc *sc); int ice_cfg_pf_ethertype_filters(struct ice_softc *sc); void ice_init_link_configuration(struct ice_softc *sc); void ice_init_saved_phy_cfg(struct ice_softc *sc); int ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings); void ice_set_link_management_mode(struct ice_softc *sc); int ice_module_event_handler(module_t mod, int what, void *arg); int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd); int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req); int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length); int ice_alloc_intr_tracking(struct ice_softc *sc); void ice_free_intr_tracking(struct ice_softc *sc); void ice_set_default_local_lldp_mib(struct ice_softc *sc); void ice_set_link(struct ice_softc *sc, bool enabled); void ice_add_rx_lldp_filter(struct ice_softc *sc); void ice_init_health_events(struct ice_softc *sc); void ice_cfg_pba_num(struct ice_softc *sc); int ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd); u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg); -void ice_print_dual_nac_info(struct ice_softc *sc); void ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib); int ice_setup_vsi_mirroring(struct ice_vsi *vsi); #endif /* _ICE_LIB_H_ */ diff --git a/sys/dev/ice/ice_nvm.c b/sys/dev/ice/ice_nvm.c index 9a41f30386c0..ff30adfe8fa7 100644 --- a/sys/dev/ice/ice_nvm.c +++ b/sys/dev/ice/ice_nvm.c @@ -1,2204 +1,2216 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2024, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "ice_common.h" #define GL_MNG_DEF_DEVID 0x000B611C /** * ice_aq_read_nvm * @hw: pointer to the HW struct * @module_typeid: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be read (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @read_shadow_ram: tell if this is a shadow RAM read * @cd: pointer to command details structure or NULL * * Read the NVM using the admin queue commands (0x0701) */ int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, bool read_shadow_ram, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.nvm; if (offset > ICE_AQC_NVM_MAX_OFFSET) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT) cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY; /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; cmd->module_typeid = CPU_TO_LE16(module_typeid); cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); cmd->offset_high = (offset >> 16) & 0xFF; cmd->length = CPU_TO_LE16(length); return ice_aq_send_cmd(hw, &desc, data, length, cd); } /** * ice_read_flat_nvm - Read portion of NVM by flat offset * @hw: pointer to the HW struct * @offset: offset from beginning of NVM * @length: (in) number of bytes to read; (out) number of bytes actually read * @data: buffer to return data in (sized to fit the specified length) * @read_shadow_ram: if true, read from shadow RAM instead of NVM * * Reads a portion of the NVM, as a flat memory space. This function correctly * breaks read requests across Shadow RAM sectors and ensures that no single * read request exceeds the maximum 4KB read for a single AdminQ command. * * Returns a status code on failure. Note that the data pointer may be * partially updated if some reads succeed before a failure. */ int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram) { u32 inlen = *length; u32 bytes_read = 0; bool last_cmd; int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); *length = 0; /* Verify the length of the read if this is for the Shadow RAM */ if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) { ice_debug(hw, ICE_DBG_NVM, "NVM error: requested data is beyond Shadow RAM limit\n"); return ICE_ERR_PARAM; } do { u32 read_size, sector_offset; /* ice_aq_read_nvm cannot read more than 4KB at a time. * Additionally, a read from the Shadow RAM may not cross over * a sector boundary. Conveniently, the sector size is also * 4KB. */ sector_offset = offset % ICE_AQ_MAX_BUF_LEN; read_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, inlen - bytes_read); last_cmd = !(bytes_read + read_size < inlen); /* ice_aq_read_nvm takes the length as a u16. Our read_size is * calculated using a u32, but the ICE_AQ_MAX_BUF_LEN maximum * size guarantees that it will fit within the 2 bytes. */ status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, offset, (u16)read_size, data + bytes_read, last_cmd, read_shadow_ram, NULL); if (status) break; bytes_read += read_size; offset += read_size; } while (!last_cmd); *length = bytes_read; return status; } /** * ice_aq_update_nvm * @hw: pointer to the HW struct * @module_typeid: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be written (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @command_flags: command parameters * @cd: pointer to command details structure or NULL * * Update the NVM using the admin queue commands (0x0703) */ int ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.nvm; /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); cmd->cmd_flags |= command_flags; /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; cmd->module_typeid = CPU_TO_LE16(module_typeid); cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); cmd->offset_high = (offset >> 16) & 0xFF; cmd->length = CPU_TO_LE16(length); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); return ice_aq_send_cmd(hw, &desc, data, length, cd); } /** * ice_aq_erase_nvm * @hw: pointer to the HW struct * @module_typeid: module pointer location in words from the NVM beginning * @cd: pointer to command details structure or NULL * * Erase the NVM sector using the admin queue commands (0x0702) */ int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; int status; __le16 len; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* read a length value from SR, so module_typeid is equal to 0 */ /* calculate offset where module size is placed from bytes to words */ /* set last command and read from SR values to true */ status = ice_aq_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true, true, NULL); if (status) return status; cmd = &desc.params.nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase); cmd->module_typeid = CPU_TO_LE16(module_typeid); cmd->length = len; cmd->offset_low = 0; cmd->offset_high = 0; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_read_nvm_cfg - read an NVM config block * @hw: pointer to the HW struct * @cmd_flags: NVM access admin command bits * @field_id: field or feature ID * @data: buffer for result * @buf_size: buffer size * @elem_count: pointer to count of elements read by FW * @cd: pointer to command details structure or NULL * * Reads single or multiple feature/field ID and data (0x0704) */ int ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data, u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd) { struct ice_aqc_nvm_cfg *cmd; struct ice_aq_desc desc; int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.nvm_cfg; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_read); cmd->cmd_flags = cmd_flags; cmd->id = CPU_TO_LE16(field_id); status = ice_aq_send_cmd(hw, &desc, data, buf_size, cd); if (!status && elem_count) *elem_count = LE16_TO_CPU(cmd->count); return status; } /** * ice_aq_write_nvm_cfg - write an NVM config block * @hw: pointer to the HW struct * @cmd_flags: NVM access admin command bits * @data: buffer for result * @buf_size: buffer size * @elem_count: count of elements to be written * @cd: pointer to command details structure or NULL * * Writes single or multiple feature/field ID and data (0x0705) */ int ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size, u16 elem_count, struct ice_sq_cd *cd) { struct ice_aqc_nvm_cfg *cmd; struct ice_aq_desc desc; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.nvm_cfg; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_write); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->count = CPU_TO_LE16(elem_count); cmd->cmd_flags = cmd_flags; return ice_aq_send_cmd(hw, &desc, data, buf_size, cd); } /** * ice_check_sr_access_params - verify params for Shadow RAM R/W operations * @hw: pointer to the HW structure * @offset: offset in words from module start * @words: number of words to access */ static int ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words) { if ((offset + words) > hw->flash.sr_words) { ice_debug(hw, ICE_DBG_NVM, "NVM error: offset beyond SR lmt.\n"); return ICE_ERR_PARAM; } if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) { /* We can access only up to 4KB (one sector), in one AQ write */ ice_debug(hw, ICE_DBG_NVM, "NVM error: tried to access %d words, limit is %d.\n", words, ICE_SR_SECTOR_SIZE_IN_WORDS); return ICE_ERR_PARAM; } if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) != (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) { /* A single access cannot spread over two sectors */ ice_debug(hw, ICE_DBG_NVM, "NVM error: cannot spread over two sectors.\n"); return ICE_ERR_PARAM; } return 0; } /** * ice_read_sr_word_aq - Reads Shadow RAM via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. */ int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) { u32 bytes = sizeof(u16); __le16 data_local; int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Note that ice_read_flat_nvm checks if the read is past the Shadow * RAM size, and ensures we don't read across a Shadow RAM sector * boundary */ status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, (_FORCE_ u8 *)&data_local, true); if (status) return status; *data = LE16_TO_CPU(data_local); return 0; } /** * ice_write_sr_aq - Writes Shadow RAM * @hw: pointer to the HW structure * @offset: offset in words from module start * @words: number of words to write * @data: buffer with words to write to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. */ static int ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data, bool last_command) { int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_check_sr_access_params(hw, offset, words); if (!status) status = ice_aq_update_nvm(hw, 0, 2 * offset, 2 * words, data, last_command, 0, NULL); return status; } /** * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is * taken before reading the buffer and later released. */ static int ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) { u32 bytes = *words * 2, i; int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* ice_read_flat_nvm takes into account the 4KB AdminQ and Shadow RAM * sector restrictions necessary when reading from the NVM. */ status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); /* Report the number of words successfully read */ *words = (u16)(bytes / 2); /* Byte swap the words up to the amount we actually read */ for (i = 0; i < *words; i++) data[i] = LE16_TO_CPU(((_FORCE_ __le16 *)data)[i]); return status; } /** * ice_acquire_nvm - Generic request for acquiring the NVM ownership * @hw: pointer to the HW structure * @access: NVM access type (read or write) * * This function will request NVM ownership. */ int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (hw->flash.blank_nvm_mode) return 0; return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); } /** * ice_release_nvm - Generic request for releasing the NVM ownership * @hw: pointer to the HW structure * * This function will release NVM ownership. */ void ice_release_nvm(struct ice_hw *hw) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (hw->flash.blank_nvm_mode) return; ice_release_res(hw, ICE_NVM_RES_ID); } /** * ice_get_flash_bank_offset - Get offset into requested flash bank * @hw: pointer to the HW structure * @bank: whether to read from the active or inactive flash bank * @module: the module to read from * * Based on the module, lookup the module offset from the beginning of the * flash. * * Returns the flash offset. Note that a value of zero is invalid and must be * treated as an error. */ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select bank, u16 module) { struct ice_bank_info *banks = &hw->flash.banks; enum ice_flash_bank active_bank; bool second_bank_active; u32 offset, size; switch (module) { case ICE_SR_1ST_NVM_BANK_PTR: offset = banks->nvm_ptr; size = banks->nvm_size; active_bank = banks->nvm_bank; break; case ICE_SR_1ST_OROM_BANK_PTR: offset = banks->orom_ptr; size = banks->orom_size; active_bank = banks->orom_bank; break; case ICE_SR_NETLIST_BANK_PTR: offset = banks->netlist_ptr; size = banks->netlist_size; active_bank = banks->netlist_bank; break; default: ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash module: 0x%04x\n", module); return 0; } switch (active_bank) { case ICE_1ST_FLASH_BANK: second_bank_active = false; break; case ICE_2ND_FLASH_BANK: second_bank_active = true; break; default: ice_debug(hw, ICE_DBG_NVM, "Unexpected value for active flash bank: %u\n", active_bank); return 0; } /* The second flash bank is stored immediately following the first * bank. Based on whether the 1st or 2nd bank is active, and whether * we want the active or inactive bank, calculate the desired offset. */ switch (bank) { case ICE_ACTIVE_FLASH_BANK: return offset + (second_bank_active ? size : 0); case ICE_INACTIVE_FLASH_BANK: return offset + (second_bank_active ? 0 : size); } ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash bank selection: %u\n", bank); return 0; } /** * ice_read_flash_module - Read a word from one of the main NVM modules * @hw: pointer to the HW structure * @bank: which bank of the module to read * @module: the module to read * @offset: the offset into the module in bytes * @data: storage for the word read from the flash * @length: bytes of data to read * * Read data from the specified flash module. The bank parameter indicates * whether or not to read from the active bank or the inactive bank of that * module. * * The word will be read using flat NVM access, and relies on the * hw->flash.banks data being setup by ice_determine_active_flash_banks() * during initialization. */ static int ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, u32 offset, u8 *data, u32 length) { int status; u32 start; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); start = ice_get_flash_bank_offset(hw, bank, module); if (!start) { ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n", module); return ICE_ERR_PARAM; } status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; status = ice_read_flat_nvm(hw, start + offset, &length, data, false); ice_release_nvm(hw); return status; } /** * ice_read_nvm_module - Read from the active main NVM module * @hw: pointer to the HW structure * @bank: whether to read from active or inactive NVM module * @offset: offset into the NVM module to read, in words * @data: storage for returned word value * * Read the specified word from the active NVM module. This includes the CSS * header at the start of the NVM module. */ static int ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { __le16 data_local; int status; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16), (_FORCE_ u8 *)&data_local, sizeof(u16)); if (!status) *data = LE16_TO_CPU(data_local); return status; } /** * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash bank * @hdr_len: storage for header length in words * * Read the CSS header length from the NVM CSS header and add the Authentication * header size, and then convert to words. */ static int ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank, u32 *hdr_len) { u16 hdr_len_l, hdr_len_h; u32 hdr_len_dword; int status; status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L, &hdr_len_l); if (status) return status; status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H, &hdr_len_h); if (status) return status; /* CSS header length is in DWORD, so convert to words and add * authentication header size */ hdr_len_dword = hdr_len_h << 16 | hdr_len_l; *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN; return 0; } /** * ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank * @hw: pointer to the HW structure * @bank: whether to read from the active or inactive NVM module * @offset: offset into the Shadow RAM copy to read, in words * @data: storage for returned word value * * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. */ static int ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { u32 hdr_len; int status; status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len); if (status) return status; hdr_len = ROUND_UP(hdr_len, 32); return ice_read_nvm_module(hw, bank, hdr_len + offset, data); } /** * ice_read_orom_module - Read from the active Option ROM module * @hw: pointer to the HW structure * @bank: whether to read from active or inactive OROM module * @offset: offset into the OROM module to read, in words * @data: storage for returned word value * * Read the specified word from the active Option ROM module of the flash. * Note that unlike the NVM module, the CSS data is stored at the end of the * module instead of at the beginning. */ static int ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { __le16 data_local; int status; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset * sizeof(u16), (_FORCE_ u8 *)&data_local, sizeof(u16)); if (!status) *data = LE16_TO_CPU(data_local); return status; } /** * ice_read_netlist_module - Read data from the netlist module area * @hw: pointer to the HW structure * @bank: whether to read from the active or inactive module * @offset: offset into the netlist to read from * @data: storage for returned word value * * Read a word from the specified netlist bank. */ static int ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { __le16 data_local; int status; status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16), (_FORCE_ u8 *)&data_local, sizeof(u16)); if (!status) *data = LE16_TO_CPU(data_local); return status; } /** * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. */ int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) { int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { status = ice_read_sr_word_aq(hw, offset, data); ice_release_nvm(hw); } return status; } +#define check_add_overflow __builtin_add_overflow + /** * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA * @hw: pointer to hardware structure * @module_tlv: pointer to module TLV to return * @module_tlv_len: pointer to module TLV length to return * @module_type: module type requested * * Finds the requested sub module TLV type from the Preserved Field * Area (PFA) and returns the TLV pointer and length. The caller can * use these to read the variable length TLV value. */ int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { - u16 pfa_len, pfa_ptr; - u32 next_tlv; + u16 pfa_len, pfa_ptr, next_tlv, max_tlv; int status; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); if (status) { ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); return status; } status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); return status; } - /* Starting with first TLV after PFA length, iterate through the list + + if (check_add_overflow(pfa_ptr, (u16)(pfa_len - 1), &max_tlv)) { + ice_debug(hw, ICE_DBG_INIT, "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n", + pfa_ptr, pfa_len); + return ICE_ERR_INVAL_SIZE; + } + + /* The Preserved Fields Area contains a sequence of TLVs which define + * its contents. The PFA length includes all of the TLVs, plus its + * initial length word itself, *and* one final word at the end of all + * of the TLVs. + * + * Starting with first TLV after PFA length, iterate through the list * of TLVs to find the requested one. */ next_tlv = pfa_ptr + 1; - while (next_tlv < ((u32)pfa_ptr + pfa_len)) { + while (next_tlv < max_tlv) { u16 tlv_sub_module_type; u16 tlv_len; /* Read TLV type */ status = ice_read_sr_word(hw, (u16)next_tlv, &tlv_sub_module_type); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); break; } /* Read TLV length */ status = ice_read_sr_word(hw, (u16)(next_tlv + 1), &tlv_len); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); break; } - if (tlv_len > pfa_len) { - ice_debug(hw, ICE_DBG_INIT, "Invalid TLV length.\n"); - return ICE_ERR_INVAL_SIZE; - } if (tlv_sub_module_type == module_type) { if (tlv_len) { *module_tlv = (u16)next_tlv; *module_tlv_len = tlv_len; return 0; } return ICE_ERR_INVAL_SIZE; } - /* Check next TLV, i.e. current TLV pointer + length + 2 words - * (for current TLV's type and length) - */ - next_tlv = next_tlv + tlv_len + 2; + + if (check_add_overflow(next_tlv, (u16)2, &next_tlv) || + check_add_overflow(next_tlv, tlv_len, &next_tlv)) { + ice_debug(hw, ICE_DBG_INIT, "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n", + tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len); + return ICE_ERR_INVAL_SIZE; + } } /* Module does not exist */ return ICE_ERR_DOES_NOT_EXIST; } /** * ice_read_pba_string - Reads part number string from NVM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the NVM * @pba_num_size: part number string buffer length * * Reads the part number string from the NVM. */ int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) { u16 pba_tlv, pba_tlv_len; u16 pba_word, pba_size; int status; u16 i; status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, ICE_SR_PBA_BLOCK_PTR); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n"); return status; } /* pba_size is the next word */ status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n"); return status; } if (pba_tlv_len < pba_size) { ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); return ICE_ERR_INVAL_SIZE; } /* Subtract one to get PBA word count (PBA Size word is included in * total size) */ pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); return ICE_ERR_PARAM; } for (i = 0; i < pba_size; i++) { status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i); return status; } pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; pba_num[(i * 2) + 1] = pba_word & 0xFF; } pba_num[(pba_size * 2)] = '\0'; return status; } /** * ice_get_nvm_srev - Read the security revision from the NVM CSS header * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash bank * @srev: storage for security revision * * Read the security revision out of the CSS header of the active NVM module * bank. */ static int ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) { u16 srev_l, srev_h; int status; status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_L, &srev_l); if (status) return status; status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_H, &srev_h); if (status) return status; *srev = srev_h << 16 | srev_l; return 0; } /** * ice_get_nvm_ver_info - Read NVM version information * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash bank * @nvm: pointer to NVM info structure * * Read the NVM EETRACK ID and map version of the main NVM image bank, filling * in the NVM info structure. */ static int ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm) { u16 eetrack_lo, eetrack_hi, ver; int status; status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read DEV starter version.\n"); return status; } nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK lo.\n"); return status; } status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK hi.\n"); return status; } nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; status = ice_get_nvm_srev(hw, bank, &nvm->srev); if (status) ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM security revision.\n"); return 0; } /** * ice_get_inactive_nvm_ver - Read Option ROM version from the inactive bank * @hw: pointer to the HW structure * @nvm: storage for Option ROM version information * * Reads the NVM EETRACK ID, Map version, and security revision of the * inactive NVM bank. Used to access version data for a pending update that * has not yet been activated. */ int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) { return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm); } /** * ice_get_orom_srev - Read the security revision from the OROM CSS header * @hw: pointer to the HW struct * @bank: whether to read from active or inactive flash module * @srev: storage for security revision * * Read the security revision out of the CSS header of the active OROM module * bank. */ static int ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) { u32 orom_size_word = hw->flash.banks.orom_size / 2; u16 srev_l, srev_h; u32 css_start; u32 hdr_len; int status; status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len); if (status) return status; if (orom_size_word < hdr_len) { ice_debug(hw, ICE_DBG_NVM, "Unexpected Option ROM Size of %u\n", hw->flash.banks.orom_size); return ICE_ERR_CFG; } /* calculate how far into the Option ROM the CSS header starts. Note * that ice_read_orom_module takes a word offset */ css_start = orom_size_word - hdr_len; status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_L, &srev_l); if (status) return status; status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_H, &srev_h); if (status) return status; *srev = srev_h << 16 | srev_l; return 0; } /** * ice_get_orom_civd_data - Get the combo version information from Option ROM * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash module * @civd: storage for the Option ROM CIVD data. * * Searches through the Option ROM flash contents to locate the CIVD data for * the image. */ static int ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_civd_info *civd) { struct ice_orom_civd_info civd_data_section; int status; u32 offset; u32 tmp; /* The CIVD section is located in the Option ROM aligned to 512 bytes. * The first 4 bytes must contain the ASCII characters "$CIV". * A simple modulo 256 sum of all of the bytes of the structure must * equal 0. * * The exact location is unknown and varies between images but is * usually somewhere in the middle of the bank. We need to scan the * Option ROM bank to locate it. * */ /* Scan the memory buffer to locate the CIVD data section */ for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) { u8 sum = 0, i; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset, (u8 *)&tmp, sizeof(tmp)); if (status) { ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n"); return status; } /* Skip forward until we find a matching signature */ if (memcmp("$CIV", &tmp, sizeof(tmp)) != 0) continue; ice_debug(hw, ICE_DBG_NVM, "Found CIVD section at offset %u\n", offset); status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset, (u8 *)&civd_data_section, sizeof(civd_data_section)); if (status) { ice_debug(hw, ICE_DBG_NVM, "Unable to read CIVD data\n"); goto exit_error; } /* Verify that the simple checksum is zero */ for (i = 0; i < sizeof(civd_data_section); i++) sum += ((u8 *)&civd_data_section)[i]; if (sum) { ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", sum); status = ICE_ERR_NVM; goto exit_error; } *civd = civd_data_section; return 0; } status = ICE_ERR_NVM; ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n"); exit_error: return status; } /** * ice_get_orom_ver_info - Read Option ROM version information * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash module * @orom: pointer to Option ROM info structure * * Read Option ROM version and security revision from the Option ROM flash * section. */ static int ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom) { struct ice_orom_civd_info civd; u32 combo_ver; int status; status = ice_get_orom_civd_data(hw, bank, &civd); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to locate valid Option ROM CIVD data\n"); return status; } combo_ver = LE32_TO_CPU(civd.combo_ver); orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT); orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT); status = ice_get_orom_srev(hw, bank, &orom->srev); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read Option ROM security revision.\n"); return status; } return 0; } /** * ice_get_inactive_orom_ver - Read Option ROM version from the inactive bank * @hw: pointer to the HW structure * @orom: storage for Option ROM version information * * Reads the Option ROM version and security revision data for the inactive * section of flash. Used to access version data for a pending update that has * not yet been activated. */ int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) { return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom); } /** * ice_get_netlist_info * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash bank * @netlist: pointer to netlist version info structure * * Get the netlist version information from the requested bank. Reads the Link * Topology section to find the Netlist ID block and extract the relevant * information into the netlist version structure. */ static int ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_netlist_info *netlist) { u16 module_id, length, node_count, i; u16 *id_blk; int status; status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id); if (status) return status; if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) { ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n", ICE_NETLIST_LINK_TOPO_MOD_ID, module_id); return ICE_ERR_NVM; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length); if (status) return status; /* sanity check that we have at least enough words to store the netlist ID block */ if (length < ICE_NETLIST_ID_BLK_SIZE) { ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n", ICE_NETLIST_ID_BLK_SIZE, length); return ICE_ERR_NVM; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count); if (status) return status; node_count &= ICE_LINK_TOPO_NODE_COUNT_M; id_blk = (u16 *)ice_calloc(hw, ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk)); if (!id_blk) return ICE_ERR_NO_MEMORY; /* Read out the entire Netlist ID Block at once. */ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, ICE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16), (u8 *)id_blk, ICE_NETLIST_ID_BLK_SIZE * sizeof(u16)); if (status) goto exit_error; for (i = 0; i < ICE_NETLIST_ID_BLK_SIZE; i++) id_blk[i] = LE16_TO_CPU(((_FORCE_ __le16 *)id_blk)[i]); netlist->major = id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_LOW]; netlist->minor = id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_LOW]; netlist->type = id_blk[ICE_NETLIST_ID_BLK_TYPE_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_TYPE_LOW]; netlist->rev = id_blk[ICE_NETLIST_ID_BLK_REV_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_REV_LOW]; netlist->cust_ver = id_blk[ICE_NETLIST_ID_BLK_CUST_VER]; /* Read the left most 4 bytes of SHA */ netlist->hash = id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 | id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(14)]; exit_error: ice_free(hw, id_blk); return status; } /** * ice_get_netlist_ver_info * @hw: pointer to the HW struct * @netlist: pointer to netlist version info structure * * Get the netlist version information */ int ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist) { return ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, netlist); } /** * ice_get_inactive_netlist_ver * @hw: pointer to the HW struct * @netlist: pointer to netlist version info structure * * Read the netlist version data from the inactive netlist bank. Used to * extract version data of a pending flash update in order to display the * version data. */ int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) { return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist); } /** * ice_discover_flash_size - Discover the available flash size * @hw: pointer to the HW struct * * The device flash could be up to 16MB in size. However, it is possible that * the actual size is smaller. Use bisection to determine the accessible size * of flash memory. */ static int ice_discover_flash_size(struct ice_hw *hw) { u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; while ((max_size - min_size) > 1) { u32 offset = (max_size + min_size) / 2; u32 len = 1; u8 data; status = ice_read_flat_nvm(hw, offset, &len, &data, false); if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", __func__, offset); status = 0; max_size = offset; } else if (!status) { ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n", __func__, offset); min_size = offset; } else { /* an unexpected error occurred */ goto err_read_flat_nvm; } } ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size); hw->flash.flash_size = max_size; err_read_flat_nvm: ice_release_nvm(hw); return status; } /** * ice_read_sr_pointer - Read the value of a Shadow RAM pointer word * @hw: pointer to the HW structure * @offset: the word offset of the Shadow RAM word to read * @pointer: pointer value read from Shadow RAM * * Read the given Shadow RAM word, and convert it to a pointer value specified * in bytes. This function assumes the specified offset is a valid pointer * word. * * Each pointer word specifies whether it is stored in word size or 4KB * sector size by using the highest bit. The reported pointer value will be in * bytes, intended for flat NVM reads. */ static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) { int status; u16 value; status = ice_read_sr_word(hw, offset, &value); if (status) return status; /* Determine if the pointer is in 4KB or word units */ if (value & ICE_SR_NVM_PTR_4KB_UNITS) *pointer = (value & ~ICE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024; else *pointer = value * 2; return 0; } /** * ice_read_sr_area_size - Read an area size from a Shadow RAM word * @hw: pointer to the HW structure * @offset: the word offset of the Shadow RAM to read * @size: size value read from the Shadow RAM * * Read the given Shadow RAM word, and convert it to an area size value * specified in bytes. This function assumes the specified offset is a valid * area size word. * * Each area size word is specified in 4KB sector units. This function reports * the size in bytes, intended for flat NVM reads. */ static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) { int status; u16 value; status = ice_read_sr_word(hw, offset, &value); if (status) return status; /* Area sizes are always specified in 4KB units */ *size = value * 4 * 1024; return 0; } /** * ice_determine_active_flash_banks - Discover active bank for each module * @hw: pointer to the HW struct * * Read the Shadow RAM control word and determine which banks are active for * the NVM, OROM, and Netlist modules. Also read and calculate the associated * pointer and size. These values are then cached into the ice_flash_info * structure for later use in order to calculate the correct offset to read * from the active module. */ static int ice_determine_active_flash_banks(struct ice_hw *hw) { struct ice_bank_info *banks = &hw->flash.banks; u16 ctrl_word; int status; status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read the Shadow RAM control word\n"); return status; } /* Check that the control word indicates validity */ if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); return ICE_ERR_CFG; } if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK)) banks->nvm_bank = ICE_1ST_FLASH_BANK; else banks->nvm_bank = ICE_2ND_FLASH_BANK; if (!(ctrl_word & ICE_SR_CTRL_WORD_OROM_BANK)) banks->orom_bank = ICE_1ST_FLASH_BANK; else banks->orom_bank = ICE_2ND_FLASH_BANK; if (!(ctrl_word & ICE_SR_CTRL_WORD_NETLIST_BANK)) banks->netlist_bank = ICE_1ST_FLASH_BANK; else banks->netlist_bank = ICE_2ND_FLASH_BANK; status = ice_read_sr_pointer(hw, ICE_SR_1ST_NVM_BANK_PTR, &banks->nvm_ptr); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank pointer\n"); return status; } status = ice_read_sr_area_size(hw, ICE_SR_NVM_BANK_SIZE, &banks->nvm_size); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank area size\n"); return status; } status = ice_read_sr_pointer(hw, ICE_SR_1ST_OROM_BANK_PTR, &banks->orom_ptr); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank pointer\n"); return status; } status = ice_read_sr_area_size(hw, ICE_SR_OROM_BANK_SIZE, &banks->orom_size); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank area size\n"); return status; } status = ice_read_sr_pointer(hw, ICE_SR_NETLIST_BANK_PTR, &banks->netlist_ptr); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank pointer\n"); return status; } status = ice_read_sr_area_size(hw, ICE_SR_NETLIST_BANK_SIZE, &banks->netlist_size); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank area size\n"); return status; } return 0; } /** * ice_init_nvm - initializes NVM setting * @hw: pointer to the HW struct * * This function reads and populates NVM settings such as Shadow RAM size, * max_timeout, and blank_nvm_mode */ int ice_init_nvm(struct ice_hw *hw) { struct ice_flash_info *flash = &hw->flash; u32 fla, gens_stat; u8 sr_size; int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* The SR size is stored regardless of the NVM programming mode * as the blank mode may be used in the factory line. */ gens_stat = rd32(hw, GLNVM_GENS); sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; /* Switching to words (sr_size contains power of 2) */ flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; /* Check if we are in the normal or blank NVM programming mode */ fla = rd32(hw, GLNVM_FLA); if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ flash->blank_nvm_mode = false; } else { /* Blank programming mode */ flash->blank_nvm_mode = true; ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); return ICE_ERR_NVM_BLANK_MODE; } status = ice_discover_flash_size(hw); if (status) { ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n"); return status; } status = ice_determine_active_flash_banks(hw); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to determine active flash banks.\n"); return status; } status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n"); return status; } status = ice_get_orom_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->orom); if (status) ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); /* read the netlist version information */ status = ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->netlist); if (status) ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n"); return 0; } /** * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq * method. The buf read is preceded by the NVM ownership take * and followed by the release. */ int ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) { int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { status = ice_read_sr_buf_aq(hw, offset, words, data); ice_release_nvm(hw); } return status; } /** * __ice_write_sr_word - Writes Shadow RAM word * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to write * @data: word to write to the Shadow RAM * * Writes a 16 bit word to the SR using the ice_write_sr_aq method. * NVM ownership have to be acquired and released (on ARQ completion event * reception) by caller. To commit SR to NVM update checksum function * should be called. */ int __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data) { __le16 data_local = CPU_TO_LE16(*data); ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Value 0x00 below means that we treat SR as a flat mem */ return ice_write_sr_aq(hw, offset, 1, &data_local, false); } /** * __ice_write_sr_buf - Writes Shadow RAM buf * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM buffer to write * @words: number of words to write * @data: words to write to the Shadow RAM * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. * NVM ownership must be acquired before calling this function and released * on ARQ completion event reception by caller. To commit SR to NVM update * checksum function should be called. */ int __ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data) { __le16 *data_local; int status; void *vmem; u32 i; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); vmem = ice_calloc(hw, words, sizeof(u16)); if (!vmem) return ICE_ERR_NO_MEMORY; data_local = (_FORCE_ __le16 *)vmem; for (i = 0; i < words; i++) data_local[i] = CPU_TO_LE16(data[i]); /* Here we will only write one buffer as the size of the modules * mirrored in the Shadow RAM is always less than 4K. */ status = ice_write_sr_aq(hw, offset, words, data_local, false); ice_free(hw, vmem); return status; } /** * ice_calc_sr_checksum - Calculates and returns Shadow RAM SW checksum * @hw: pointer to hardware structure * @checksum: pointer to the checksum * * This function calculates SW Checksum that covers the whole 64kB shadow RAM * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD * is customer specific and unknown. Therefore, this function skips all maximum * possible size of VPD (1kB). */ static int ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum) { u16 pcie_alt_module = 0; u16 checksum_local = 0; u16 vpd_module; int status = 0; void *vmem; u16 *data; u16 i; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); vmem = ice_calloc(hw, ICE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16)); if (!vmem) return ICE_ERR_NO_MEMORY; data = (u16 *)vmem; /* read pointer to VPD area */ status = ice_read_sr_word_aq(hw, ICE_SR_VPD_PTR, &vpd_module); if (status) goto ice_calc_sr_checksum_exit; /* read pointer to PCIe Alt Auto-load module */ status = ice_read_sr_word_aq(hw, ICE_SR_PCIE_ALT_AUTO_LOAD_PTR, &pcie_alt_module); if (status) goto ice_calc_sr_checksum_exit; /* Calculate SW checksum that covers the whole 64kB shadow RAM * except the VPD and PCIe ALT Auto-load modules */ for (i = 0; i < hw->flash.sr_words; i++) { /* Read SR page */ if ((i % ICE_SR_SECTOR_SIZE_IN_WORDS) == 0) { u16 words = ICE_SR_SECTOR_SIZE_IN_WORDS; status = ice_read_sr_buf_aq(hw, i, &words, data); if (status) goto ice_calc_sr_checksum_exit; } /* Skip Checksum word */ if (i == ICE_SR_SW_CHECKSUM_WORD) continue; /* Skip VPD module (convert byte size to word count) */ if (i >= (u32)vpd_module && i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS)) continue; /* Skip PCIe ALT module (convert byte size to word count) */ if (i >= (u32)pcie_alt_module && i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS)) continue; checksum_local += data[i % ICE_SR_SECTOR_SIZE_IN_WORDS]; } *checksum = (u16)ICE_SR_SW_CHECKSUM_BASE - checksum_local; ice_calc_sr_checksum_exit: ice_free(hw, vmem); return status; } /** * ice_update_sr_checksum - Updates the Shadow RAM SW checksum * @hw: pointer to hardware structure * * NVM ownership must be acquired before calling this function and released * on ARQ completion event reception by caller. * This function will commit SR to NVM. */ int ice_update_sr_checksum(struct ice_hw *hw) { __le16 le_sum; u16 checksum; int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_calc_sr_checksum(hw, &checksum); if (!status) { le_sum = CPU_TO_LE16(checksum); status = ice_write_sr_aq(hw, ICE_SR_SW_CHECKSUM_WORD, 1, &le_sum, true); } return status; } /** * ice_validate_sr_checksum - Validate Shadow RAM SW checksum * @hw: pointer to hardware structure * @checksum: calculated checksum * * Performs checksum calculation and validates the Shadow RAM SW checksum. * If the caller does not need checksum, the value can be NULL. */ int ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum) { u16 checksum_local; u16 checksum_sr; int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { status = ice_calc_sr_checksum(hw, &checksum_local); ice_release_nvm(hw); if (status) return status; } else { return status; } ice_read_sr_word(hw, ICE_SR_SW_CHECKSUM_WORD, &checksum_sr); /* Verify read checksum from EEPROM is the same as * calculated checksum */ if (checksum_local != checksum_sr) status = ICE_ERR_NVM_CHECKSUM; /* If the user cares, return the calculated checksum */ if (checksum) *checksum = checksum_local; return status; } /** * ice_nvm_validate_checksum * @hw: pointer to the HW struct * * Verify NVM PFA checksum validity (0x0706) */ int ice_nvm_validate_checksum(struct ice_hw *hw) { struct ice_aqc_nvm_checksum *cmd; struct ice_aq_desc desc; int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; cmd = &desc.params.nvm_checksum; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY; status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); ice_release_nvm(hw); if (!status) if (LE16_TO_CPU(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) status = ICE_ERR_NVM_CHECKSUM; return status; } /** * ice_nvm_recalculate_checksum * @hw: pointer to the HW struct * * Recalculate NVM PFA checksum (0x0706) */ int ice_nvm_recalculate_checksum(struct ice_hw *hw) { struct ice_aqc_nvm_checksum *cmd; struct ice_aq_desc desc; int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; cmd = &desc.params.nvm_checksum; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); cmd->flags = ICE_AQC_NVM_CHECKSUM_RECALC; status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); ice_release_nvm(hw); return status; } /** * ice_nvm_write_activate * @hw: pointer to the HW struct * @cmd_flags: flags for write activate command * @response_flags: response indicators from firmware * * Update the control word with the required banks' validity bits * and dumps the Shadow RAM to flash (0x0707) * * cmd_flags controls which banks to activate, the preservation level to use * when activating the NVM bank, and whether an EMP reset is required for * activation. * * Note that the 16bit cmd_flags value is split between two separate 1 byte * flag values in the descriptor. * * On successful return of the firmware command, the response_flags variable * is updated with the flags reported by firmware indicating certain status, * such as whether EMP reset is enabled. */ int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags) { struct ice_aqc_nvm *cmd; struct ice_aq_desc desc; int err; cmd = &desc.params.nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); cmd->cmd_flags = (u8)(cmd_flags & 0xFF); cmd->offset_high = (u8)((cmd_flags >> 8) & 0xFF); err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); if (!err && response_flags) *response_flags = cmd->cmd_flags; return err; } /** * ice_get_nvm_minsrevs - Get the Minimum Security Revision values from flash * @hw: pointer to the HW struct * @minsrevs: structure to store NVM and OROM minsrev values * * Read the Minimum Security Revision TLV and extract the revision values from * the flash image into a readable structure for processing. */ int ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) { struct ice_aqc_nvm_minsrev data; int status; u16 valid; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; status = ice_aq_read_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data, true, false, NULL); ice_release_nvm(hw); if (status) return status; valid = LE16_TO_CPU(data.validity); /* Extract NVM minimum security revision */ if (valid & ICE_AQC_NVM_MINSREV_NVM_VALID) { u16 minsrev_l, minsrev_h; minsrev_l = LE16_TO_CPU(data.nvm_minsrev_l); minsrev_h = LE16_TO_CPU(data.nvm_minsrev_h); minsrevs->nvm = minsrev_h << 16 | minsrev_l; minsrevs->nvm_valid = true; } /* Extract the OROM minimum security revision */ if (valid & ICE_AQC_NVM_MINSREV_OROM_VALID) { u16 minsrev_l, minsrev_h; minsrev_l = LE16_TO_CPU(data.orom_minsrev_l); minsrev_h = LE16_TO_CPU(data.orom_minsrev_h); minsrevs->orom = minsrev_h << 16 | minsrev_l; minsrevs->orom_valid = true; } return 0; } /** * ice_update_nvm_minsrevs - Update minimum security revision TLV data in flash * @hw: pointer to the HW struct * @minsrevs: minimum security revision information * * Update the NVM or Option ROM minimum security revision fields in the PFA * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid * fields to determine what update is being requested. If the valid bit is not * set for that module, then the associated minsrev will be left as is. */ int ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) { struct ice_aqc_nvm_minsrev data; int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (!minsrevs->nvm_valid && !minsrevs->orom_valid) { ice_debug(hw, ICE_DBG_NVM, "At least one of NVM and OROM MinSrev must be valid"); return ICE_ERR_PARAM; } status = ice_acquire_nvm(hw, ICE_RES_WRITE); if (status) return status; /* Get current data */ status = ice_aq_read_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data, true, false, NULL); if (status) goto exit_release_res; if (minsrevs->nvm_valid) { data.nvm_minsrev_l = CPU_TO_LE16(minsrevs->nvm & 0xFFFF); data.nvm_minsrev_h = CPU_TO_LE16(minsrevs->nvm >> 16); data.validity |= CPU_TO_LE16(ICE_AQC_NVM_MINSREV_NVM_VALID); } if (minsrevs->orom_valid) { data.orom_minsrev_l = CPU_TO_LE16(minsrevs->orom & 0xFFFF); data.orom_minsrev_h = CPU_TO_LE16(minsrevs->orom >> 16); data.validity |= CPU_TO_LE16(ICE_AQC_NVM_MINSREV_OROM_VALID); } /* Update flash data */ status = ice_aq_update_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data, false, ICE_AQC_NVM_SPECIAL_UPDATE, NULL); if (status) goto exit_release_res; /* Dump the Shadow RAM to the flash */ status = ice_nvm_write_activate(hw, 0, NULL); exit_release_res: ice_release_nvm(hw); return status; } /** * ice_nvm_access_get_features - Return the NVM access features structure * @cmd: NVM access command to process * @data: storage for the driver NVM features * * Fill in the data section of the NVM access request with a copy of the NVM * features structure. */ int ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { /* The provided data_size must be at least as large as our NVM * features structure. A larger size should not be treated as an * error, to allow future extensions to the features structure to * work on older drivers. */ if (cmd->data_size < sizeof(struct ice_nvm_features)) return ICE_ERR_NO_MEMORY; /* Initialize the data buffer to zeros */ ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); /* Fill in the features data */ data->drv_features.major = ICE_NVM_ACCESS_MAJOR_VER; data->drv_features.minor = ICE_NVM_ACCESS_MINOR_VER; data->drv_features.size = sizeof(struct ice_nvm_features); data->drv_features.features[0] = ICE_NVM_FEATURES_0_REG_ACCESS; return 0; } /** * ice_nvm_access_get_module - Helper function to read module value * @cmd: NVM access command structure * * Reads the module value out of the NVM access config field. */ u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd) { return ((cmd->config & ICE_NVM_CFG_MODULE_M) >> ICE_NVM_CFG_MODULE_S); } /** * ice_nvm_access_get_flags - Helper function to read flags value * @cmd: NVM access command structure * * Reads the flags value out of the NVM access config field. */ u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd) { return ((cmd->config & ICE_NVM_CFG_FLAGS_M) >> ICE_NVM_CFG_FLAGS_S); } /** * ice_nvm_access_get_adapter - Helper function to read adapter info * @cmd: NVM access command structure * * Read the adapter info value out of the NVM access config field. */ u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd) { return ((cmd->config & ICE_NVM_CFG_ADAPTER_INFO_M) >> ICE_NVM_CFG_ADAPTER_INFO_S); } /** * ice_validate_nvm_rw_reg - Check than an NVM access request is valid * @cmd: NVM access command structure * * Validates that an NVM access structure is request to read or write a valid * register offset. First validates that the module and flags are correct, and * then ensures that the register offset is one of the accepted registers. */ static int ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) { u32 module, flags, offset; u16 i; module = ice_nvm_access_get_module(cmd); flags = ice_nvm_access_get_flags(cmd); offset = cmd->offset; /* Make sure the module and flags indicate a read/write request */ if (module != ICE_NVM_REG_RW_MODULE || flags != ICE_NVM_REG_RW_FLAGS || cmd->data_size != FIELD_SIZEOF(union ice_nvm_access_data, regval)) return ICE_ERR_PARAM; switch (offset) { case GL_HICR: case GL_HICR_EN: /* Note, this register is read only */ case GL_FWSTS: case GL_MNG_FWSM: case GLGEN_CSR_DEBUG_C: case GLGEN_RSTAT: case GLPCI_LBARCTRL: case GL_MNG_DEF_DEVID: case GLNVM_GENS: case GLNVM_FLA: case PF_FUNC_RID: return 0; default: break; } for (i = 0; i <= GL_HIDA_MAX_INDEX; i++) if (offset == (u32)GL_HIDA(i)) return 0; for (i = 0; i <= GL_HIBA_MAX_INDEX; i++) if (offset == (u32)GL_HIBA(i)) return 0; /* All other register offsets are not valid */ return ICE_ERR_OUT_OF_RANGE; } /** * ice_nvm_access_read - Handle an NVM read request * @hw: pointer to the HW struct * @cmd: NVM access command to process * @data: storage for the register value read * * Process an NVM access request to read a register. */ int ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Always initialize the output data, even on failure */ ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); /* Make sure this is a valid read/write access request */ status = ice_validate_nvm_rw_reg(cmd); if (status) return status; ice_debug(hw, ICE_DBG_NVM, "NVM access: reading register %08x\n", cmd->offset); /* Read the register and store the contents in the data field */ data->regval = rd32(hw, cmd->offset); return 0; } /** * ice_nvm_access_write - Handle an NVM write request * @hw: pointer to the HW struct * @cmd: NVM access command to process * @data: NVM access data to write * * Process an NVM access request to write a register. */ int ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { int status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Make sure this is a valid read/write access request */ status = ice_validate_nvm_rw_reg(cmd); if (status) return status; /* Reject requests to write to read-only registers */ if (hw->mac_type == ICE_MAC_E830) { if (cmd->offset == E830_GL_HICR_EN) return ICE_ERR_OUT_OF_RANGE; } else { if (cmd->offset == GL_HICR_EN) return ICE_ERR_OUT_OF_RANGE; } if (cmd->offset == GLGEN_RSTAT) return ICE_ERR_OUT_OF_RANGE; ice_debug(hw, ICE_DBG_NVM, "NVM access: writing register %08x with value %08x\n", cmd->offset, data->regval); /* Write the data field to the specified register */ wr32(hw, cmd->offset, data->regval); return 0; } /** * ice_handle_nvm_access - Handle an NVM access request * @hw: pointer to the HW struct * @cmd: NVM access command info * @data: pointer to read or return data * * Process an NVM access request. Read the command structure information and * determine if it is valid. If not, report an error indicating the command * was invalid. * * For valid commands, perform the necessary function, copying the data into * the provided data buffer. */ int ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { u32 module, flags, adapter_info; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Extended flags are currently reserved and must be zero */ if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0) return ICE_ERR_PARAM; /* Adapter info must match the HW device ID */ adapter_info = ice_nvm_access_get_adapter(cmd); if (adapter_info != hw->device_id) return ICE_ERR_PARAM; switch (cmd->command) { case ICE_NVM_CMD_READ: module = ice_nvm_access_get_module(cmd); flags = ice_nvm_access_get_flags(cmd); /* Getting the driver's NVM features structure shares the same * command type as reading a register. Read the config field * to determine if this is a request to get features. */ if (module == ICE_NVM_GET_FEATURES_MODULE && flags == ICE_NVM_GET_FEATURES_FLAGS && cmd->offset == 0) return ice_nvm_access_get_features(cmd, data); else return ice_nvm_access_read(hw, cmd, data); case ICE_NVM_CMD_WRITE: return ice_nvm_access_write(hw, cmd, data); default: return ICE_ERR_PARAM; } } /** * ice_nvm_sanitize_operate - Clear the user data * @hw: pointer to the HW struct * * Clear user data from NVM using AQ command (0x070C). * * Return: the exit code of the operation. */ s32 ice_nvm_sanitize_operate(struct ice_hw *hw) { s32 status; u8 values; u8 cmd_flags = ICE_AQ_NVM_SANITIZE_REQ_OPERATE | ICE_AQ_NVM_SANITIZE_OPERATE_SUBJECT_CLEAR; status = ice_nvm_sanitize(hw, cmd_flags, &values); if (status) return status; if ((!(values & ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_DONE) && !(values & ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_DONE)) || ((values & ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_DONE) && !(values & ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) || ((values & ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_DONE) && !(values & ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS))) return ICE_ERR_AQ_ERROR; return ICE_SUCCESS; } /** * ice_nvm_sanitize - Sanitize NVM * @hw: pointer to the HW struct * @cmd_flags: flag to the ACI command * @values: values returned from the command * * Sanitize NVM using AQ command (0x070C). * * Return: the exit code of the operation. */ s32 ice_nvm_sanitize(struct ice_hw *hw, u8 cmd_flags, u8 *values) { struct ice_aqc_nvm_sanitization *cmd; struct ice_aq_desc desc; s32 status; cmd = &desc.params.sanitization; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_sanitization); cmd->cmd_flags = cmd_flags; status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); if (values) *values = cmd->values; return status; } diff --git a/sys/dev/ice/if_ice_iflib.c b/sys/dev/ice/if_ice_iflib.c index 65690ea7e429..0fb7faecb2d7 100644 --- a/sys/dev/ice/if_ice_iflib.c +++ b/sys/dev/ice/if_ice_iflib.c @@ -1,4432 +1,4433 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2024, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * @file if_ice_iflib.c * @brief iflib driver implementation * * Contains the main entry point for the iflib driver implementation. It * implements the various ifdi driver methods, and sets up the module and * driver values to load an iflib driver. */ #include "ice_iflib.h" #include "ice_drv_info.h" #include "ice_switch.h" #include "ice_sched.h" #include #include #include #include #include /* * Device method prototypes */ static void *ice_register(device_t); static int ice_if_attach_pre(if_ctx_t); static int ice_attach_pre_recovery_mode(struct ice_softc *sc); static int ice_if_attach_post(if_ctx_t); static void ice_attach_post_recovery_mode(struct ice_softc *sc); static int ice_if_detach(if_ctx_t); static int ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int ice_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); static int ice_if_msix_intr_assign(if_ctx_t ctx, int msix); static void ice_if_queues_free(if_ctx_t ctx); static int ice_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void ice_if_intr_enable(if_ctx_t ctx); static void ice_if_intr_disable(if_ctx_t ctx); static int ice_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); static int ice_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); static int ice_if_promisc_set(if_ctx_t ctx, int flags); static void ice_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); static int ice_if_media_change(if_ctx_t ctx); static void ice_if_init(if_ctx_t ctx); static void ice_if_timer(if_ctx_t ctx, uint16_t qid); static void ice_if_update_admin_status(if_ctx_t ctx); static void ice_if_multi_set(if_ctx_t ctx); static void ice_if_vlan_register(if_ctx_t ctx, u16 vtag); static void ice_if_vlan_unregister(if_ctx_t ctx, u16 vtag); static void ice_if_stop(if_ctx_t ctx); static uint64_t ice_if_get_counter(if_ctx_t ctx, ift_counter counter); static int ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); static int ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); static int ice_if_suspend(if_ctx_t ctx); static int ice_if_resume(if_ctx_t ctx); static bool ice_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); static int ice_setup_mirror_vsi(struct ice_mirr_if *mif); static int ice_wire_mirror_intrs(struct ice_mirr_if *mif); static void ice_free_irqvs_subif(struct ice_mirr_if *mif); static void *ice_subif_register(device_t); static void ice_subif_setup_scctx(struct ice_mirr_if *mif); static int ice_subif_rebuild(struct ice_softc *sc); static int ice_subif_rebuild_vsi_qmap(struct ice_softc *sc); /* Iflib API */ static int ice_subif_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int ice_subif_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets); static int ice_subif_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); static int ice_subif_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); static void ice_subif_if_intr_enable(if_ctx_t ctx); static int ice_subif_if_msix_intr_assign(if_ctx_t ctx, int msix); static void ice_subif_if_init(if_ctx_t ctx); static void ice_subif_if_stop(if_ctx_t ctx); static void ice_subif_if_queues_free(if_ctx_t ctx); static int ice_subif_if_attach_pre(if_ctx_t); static int ice_subif_if_attach_post(if_ctx_t); static void ice_subif_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); static int ice_subif_if_promisc_set(if_ctx_t ctx, int flags); static int ice_msix_que(void *arg); static int ice_msix_admin(void *arg); /* * Helper function prototypes */ static int ice_pci_mapping(struct ice_softc *sc); static void ice_free_pci_mapping(struct ice_softc *sc); static void ice_update_link_status(struct ice_softc *sc, bool update_media); static void ice_init_device_features(struct ice_softc *sc); static void ice_init_tx_tracking(struct ice_vsi *vsi); static void ice_handle_reset_event(struct ice_softc *sc); static void ice_handle_pf_reset_request(struct ice_softc *sc); static void ice_prepare_for_reset(struct ice_softc *sc); static int ice_rebuild_pf_vsi_qmap(struct ice_softc *sc); static void ice_rebuild(struct ice_softc *sc); static void ice_rebuild_recovery_mode(struct ice_softc *sc); static void ice_free_irqvs(struct ice_softc *sc); static void ice_update_rx_mbuf_sz(struct ice_softc *sc); static void ice_poll_for_media_avail(struct ice_softc *sc); static void ice_setup_scctx(struct ice_softc *sc); static int ice_allocate_msix(struct ice_softc *sc); static void ice_admin_timer(void *arg); static void ice_transition_recovery_mode(struct ice_softc *sc); static void ice_transition_safe_mode(struct ice_softc *sc); static void ice_set_default_promisc_mask(ice_bitmap_t *promisc_mask); /* * Device Interface Declaration */ /** * @var ice_methods * @brief ice driver method entry points * * List of device methods implementing the generic device interface used by * the device stack to interact with the ice driver. Since this is an iflib * driver, most of the methods point to the generic iflib implementation. */ static device_method_t ice_methods[] = { /* Device interface */ DEVMETHOD(device_register, ice_register), DEVMETHOD(device_probe, iflib_device_probe_vendor), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; /** * @var ice_iflib_methods * @brief iflib method entry points * * List of device methods used by the iflib stack to interact with this * driver. These are the real main entry points used to interact with this * driver. */ static device_method_t ice_iflib_methods[] = { DEVMETHOD(ifdi_attach_pre, ice_if_attach_pre), DEVMETHOD(ifdi_attach_post, ice_if_attach_post), DEVMETHOD(ifdi_detach, ice_if_detach), DEVMETHOD(ifdi_tx_queues_alloc, ice_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, ice_if_rx_queues_alloc), DEVMETHOD(ifdi_msix_intr_assign, ice_if_msix_intr_assign), DEVMETHOD(ifdi_queues_free, ice_if_queues_free), DEVMETHOD(ifdi_mtu_set, ice_if_mtu_set), DEVMETHOD(ifdi_intr_enable, ice_if_intr_enable), DEVMETHOD(ifdi_intr_disable, ice_if_intr_disable), DEVMETHOD(ifdi_rx_queue_intr_enable, ice_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queue_intr_enable, ice_if_tx_queue_intr_enable), DEVMETHOD(ifdi_promisc_set, ice_if_promisc_set), DEVMETHOD(ifdi_media_status, ice_if_media_status), DEVMETHOD(ifdi_media_change, ice_if_media_change), DEVMETHOD(ifdi_init, ice_if_init), DEVMETHOD(ifdi_stop, ice_if_stop), DEVMETHOD(ifdi_timer, ice_if_timer), DEVMETHOD(ifdi_update_admin_status, ice_if_update_admin_status), DEVMETHOD(ifdi_multi_set, ice_if_multi_set), DEVMETHOD(ifdi_vlan_register, ice_if_vlan_register), DEVMETHOD(ifdi_vlan_unregister, ice_if_vlan_unregister), DEVMETHOD(ifdi_get_counter, ice_if_get_counter), DEVMETHOD(ifdi_priv_ioctl, ice_if_priv_ioctl), DEVMETHOD(ifdi_i2c_req, ice_if_i2c_req), DEVMETHOD(ifdi_suspend, ice_if_suspend), DEVMETHOD(ifdi_resume, ice_if_resume), DEVMETHOD(ifdi_needs_restart, ice_if_needs_restart), DEVMETHOD_END }; /** * @var ice_driver * @brief driver structure for the generic device stack * * driver_t definition used to setup the generic device methods. */ static driver_t ice_driver = { .name = "ice", .methods = ice_methods, .size = sizeof(struct ice_softc), }; /** * @var ice_iflib_driver * @brief driver structure for the iflib stack * * driver_t definition used to setup the iflib device methods. */ static driver_t ice_iflib_driver = { .name = "ice", .methods = ice_iflib_methods, .size = sizeof(struct ice_softc), }; extern struct if_txrx ice_txrx; extern struct if_txrx ice_recovery_txrx; /** * @var ice_sctx * @brief ice driver shared context * * Structure defining shared values (context) that is used by all instances of * the device. Primarily used to setup details about how the iflib stack * should treat this driver. Also defines the default, minimum, and maximum * number of descriptors in each ring. */ static struct if_shared_ctx ice_sctx = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = ICE_MAX_FRAME_SIZE, /* We could technically set this as high as ICE_MAX_DMA_SEG_SIZE, but * that doesn't make sense since that would be larger than the maximum * size of a single packet. */ .isc_tx_maxsegsize = ICE_MAX_FRAME_SIZE, /* XXX: This is only used by iflib to ensure that * scctx->isc_tx_tso_size_max + the VLAN header is a valid size. */ .isc_tso_maxsize = ICE_TSO_SIZE + sizeof(struct ether_vlan_header), /* XXX: This is used by iflib to set the number of segments in the TSO * DMA tag. However, scctx->isc_tx_tso_segsize_max is used to set the * related ifnet parameter. */ .isc_tso_maxsegsize = ICE_MAX_DMA_SEG_SIZE, .isc_rx_maxsize = ICE_MAX_FRAME_SIZE, .isc_rx_nsegments = ICE_MAX_RX_SEGS, .isc_rx_maxsegsize = ICE_MAX_FRAME_SIZE, .isc_nfl = 1, .isc_ntxqs = 1, .isc_nrxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = ice_vendor_info_array, .isc_driver_version = __DECONST(char *, ice_driver_version), .isc_driver = &ice_iflib_driver, /* * IFLIB_NEED_SCRATCH ensures that mbufs have scratch space available * for hardware checksum offload * * IFLIB_TSO_INIT_IP ensures that the TSO packets have zeroed out the * IP sum field, required by our hardware to calculate valid TSO * checksums. * * IFLIB_ADMIN_ALWAYS_RUN ensures that the administrative task runs * even when the interface is down. * * IFLIB_SKIP_MSIX allows the driver to handle allocating MSI-X * vectors manually instead of relying on iflib code to do this. */ .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN | IFLIB_SKIP_MSIX, .isc_nrxd_min = {ICE_MIN_DESC_COUNT}, .isc_ntxd_min = {ICE_MIN_DESC_COUNT}, .isc_nrxd_max = {ICE_IFLIB_MAX_DESC_COUNT}, .isc_ntxd_max = {ICE_IFLIB_MAX_DESC_COUNT}, .isc_nrxd_default = {ICE_DEFAULT_DESC_COUNT}, .isc_ntxd_default = {ICE_DEFAULT_DESC_COUNT}, }; DRIVER_MODULE(ice, pci, ice_driver, ice_module_event_handler, NULL); MODULE_VERSION(ice, 1); MODULE_DEPEND(ice, pci, 1, 1, 1); MODULE_DEPEND(ice, ether, 1, 1, 1); MODULE_DEPEND(ice, iflib, 1, 1, 1); IFLIB_PNP_INFO(pci, ice, ice_vendor_info_array); /* Static driver-wide sysctls */ #include "ice_iflib_sysctls.h" /** * ice_pci_mapping - Map PCI BAR memory * @sc: device private softc * * Map PCI BAR 0 for device operation. */ static int ice_pci_mapping(struct ice_softc *sc) { int rc; /* Map BAR0 */ rc = ice_map_bar(sc->dev, &sc->bar0, 0); if (rc) return rc; return 0; } /** * ice_free_pci_mapping - Release PCI BAR memory * @sc: device private softc * * Release PCI BARs which were previously mapped by ice_pci_mapping(). */ static void ice_free_pci_mapping(struct ice_softc *sc) { /* Free BAR0 */ ice_free_bar(sc->dev, &sc->bar0); } /* * Device methods */ /** * ice_register - register device method callback * @dev: the device being registered * * Returns a pointer to the shared context structure, which is used by iflib. */ static void * ice_register(device_t dev __unused) { return &ice_sctx; } /* ice_register */ /** * ice_setup_scctx - Setup the iflib softc context structure * @sc: the device private structure * * Setup the parameters in if_softc_ctx_t structure used by the iflib stack * when loading. */ static void ice_setup_scctx(struct ice_softc *sc) { if_softc_ctx_t scctx = sc->scctx; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; bool safe_mode, recovery_mode; safe_mode = ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE); recovery_mode = ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE); /* * If the driver loads in Safe mode or Recovery mode, limit iflib to * a single queue pair. */ if (safe_mode || recovery_mode) { scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1; scctx->isc_ntxqsets_max = 1; scctx->isc_nrxqsets_max = 1; } else { /* * iflib initially sets the isc_ntxqsets and isc_nrxqsets to * the values of the override sysctls. Cache these initial * values so that the driver can be aware of what the iflib * sysctl value is when setting up MSI-X vectors. */ sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets; sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets; if (scctx->isc_ntxqsets == 0) scctx->isc_ntxqsets = hw->func_caps.common_cap.rss_table_size; if (scctx->isc_nrxqsets == 0) scctx->isc_nrxqsets = hw->func_caps.common_cap.rss_table_size; scctx->isc_ntxqsets_max = hw->func_caps.common_cap.num_txq; scctx->isc_nrxqsets_max = hw->func_caps.common_cap.num_rxq; /* * Sanity check that the iflib sysctl values are within the * maximum supported range. */ if (sc->ifc_sysctl_ntxqs > scctx->isc_ntxqsets_max) sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets_max; if (sc->ifc_sysctl_nrxqs > scctx->isc_nrxqsets_max) sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets_max; } scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(struct ice_tx_desc), DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union ice_32b_rx_flex_desc), DBA_ALIGN); scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS; scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS; scctx->isc_tx_tso_size_max = ICE_TSO_SIZE; scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE; scctx->isc_msix_bar = pci_msix_table_bar(dev); scctx->isc_rss_table_size = hw->func_caps.common_cap.rss_table_size; /* * If the driver loads in recovery mode, disable Tx/Rx functionality */ if (recovery_mode) scctx->isc_txrx = &ice_recovery_txrx; else scctx->isc_txrx = &ice_txrx; /* * If the driver loads in Safe mode or Recovery mode, disable * advanced features including hardware offloads. */ if (safe_mode || recovery_mode) { scctx->isc_capenable = ICE_SAFE_CAPS; scctx->isc_tx_csum_flags = 0; } else { scctx->isc_capenable = ICE_FULL_CAPS; scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD; } scctx->isc_capabilities = scctx->isc_capenable; } /* ice_setup_scctx */ /** * ice_if_attach_pre - Early device attach logic * @ctx: the iflib context structure * * Called by iflib during the attach process. Earliest main driver entry * point which performs necessary hardware and driver initialization. Called * before the Tx and Rx queues are allocated. */ static int ice_if_attach_pre(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); enum ice_fw_modes fw_mode; int status; if_softc_ctx_t scctx; struct ice_hw *hw; device_t dev; int err; device_printf(iflib_get_dev(ctx), "Loading the iflib ice driver\n"); ice_set_state(&sc->state, ICE_STATE_ATTACHING); sc->ctx = ctx; sc->media = iflib_get_media(ctx); sc->sctx = iflib_get_sctx(ctx); sc->iflib_ctx_lock = iflib_ctx_lock_get(ctx); sc->ifp = iflib_get_ifp(ctx); dev = sc->dev = iflib_get_dev(ctx); scctx = sc->scctx = iflib_get_softc_ctx(ctx); hw = &sc->hw; hw->back = sc; snprintf(sc->admin_mtx_name, sizeof(sc->admin_mtx_name), "%s:admin", device_get_nameunit(dev)); mtx_init(&sc->admin_mtx, sc->admin_mtx_name, NULL, MTX_DEF); callout_init_mtx(&sc->admin_timer, &sc->admin_mtx, 0); ASSERT_CTX_LOCKED(sc); if (ice_pci_mapping(sc)) { err = (ENXIO); goto destroy_admin_timer; } /* Save off the PCI information */ ice_save_pci_info(hw, dev); /* create tunables as early as possible */ ice_add_device_tunables(sc); /* Setup ControlQ lengths */ ice_set_ctrlq_len(hw); reinit_hw: fw_mode = ice_get_fw_mode(hw); if (fw_mode == ICE_FW_MODE_REC) { device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); err = ice_attach_pre_recovery_mode(sc); if (err) goto free_pci_mapping; return (0); } /* Initialize the hw data structure */ status = ice_init_hw(hw); if (status) { if (status == ICE_ERR_FW_API_VER) { /* Enter recovery mode, so that the driver remains * loaded. This way, if the system administrator * cannot update the driver, they may still attempt to * downgrade the NVM. */ err = ice_attach_pre_recovery_mode(sc); if (err) goto free_pci_mapping; return (0); } else { err = EIO; device_printf(dev, "Unable to initialize hw, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } goto free_pci_mapping; } ice_init_device_features(sc); - ice_print_dual_nac_info(sc); - /* Keep flag set by default */ ice_set_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN); /* Notify firmware of the device driver version */ err = ice_send_version(sc); if (err) goto deinit_hw; /* * Success indicates a change was made that requires a reinitialization * of the hardware */ err = ice_load_pkg_file(sc); if (!err) { ice_deinit_hw(hw); goto reinit_hw; } err = ice_init_link_events(sc); if (err) { device_printf(dev, "ice_init_link_events failed: %s\n", ice_err_str(err)); goto deinit_hw; } /* Initialize VLAN mode in FW; if dual VLAN mode is supported by the package * and firmware, this will force them to use single VLAN mode. */ status = ice_set_vlan_mode(hw); if (status) { err = EIO; device_printf(dev, "Unable to initialize VLAN mode, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); goto deinit_hw; } ice_print_nvm_version(sc); /* Setup the MAC address */ iflib_set_mac(ctx, hw->port_info->mac.lan_addr); /* Setup the iflib softc context structure */ ice_setup_scctx(sc); /* Initialize the Tx queue manager */ err = ice_resmgr_init(&sc->tx_qmgr, hw->func_caps.common_cap.num_txq); if (err) { device_printf(dev, "Unable to initialize Tx queue manager: %s\n", ice_err_str(err)); goto deinit_hw; } /* Initialize the Rx queue manager */ err = ice_resmgr_init(&sc->rx_qmgr, hw->func_caps.common_cap.num_rxq); if (err) { device_printf(dev, "Unable to initialize Rx queue manager: %s\n", ice_err_str(err)); goto free_tx_qmgr; } /* Initialize the PF device interrupt resource manager */ err = ice_alloc_intr_tracking(sc); if (err) /* Errors are already printed */ goto free_rx_qmgr; /* Determine maximum number of VSIs we'll prepare for */ sc->num_available_vsi = min(ICE_MAX_VSI_AVAILABLE, hw->func_caps.guar_num_vsi); if (!sc->num_available_vsi) { err = EIO; device_printf(dev, "No VSIs allocated to host\n"); goto free_intr_tracking; } /* Allocate storage for the VSI pointers */ sc->all_vsi = (struct ice_vsi **) malloc(sizeof(struct ice_vsi *) * sc->num_available_vsi, M_ICE, M_WAITOK | M_ZERO); if (!sc->all_vsi) { err = ENOMEM; device_printf(dev, "Unable to allocate VSI array\n"); goto free_intr_tracking; } /* * Prepare the statically allocated primary PF VSI in the softc * structure. Other VSIs will be dynamically allocated as needed. */ ice_setup_pf_vsi(sc); ice_alloc_vsi_qmap(&sc->pf_vsi, scctx->isc_ntxqsets_max, scctx->isc_nrxqsets_max); /* Allocate MSI-X vectors (due to isc_flags IFLIB_SKIP_MSIX) */ err = ice_allocate_msix(sc); if (err) goto free_main_vsi; return 0; free_main_vsi: /* ice_release_vsi will free the queue maps if they were allocated */ ice_release_vsi(&sc->pf_vsi); free(sc->all_vsi, M_ICE); sc->all_vsi = NULL; free_intr_tracking: ice_free_intr_tracking(sc); free_rx_qmgr: ice_resmgr_destroy(&sc->rx_qmgr); free_tx_qmgr: ice_resmgr_destroy(&sc->tx_qmgr); deinit_hw: ice_deinit_hw(hw); free_pci_mapping: ice_free_pci_mapping(sc); destroy_admin_timer: mtx_lock(&sc->admin_mtx); callout_stop(&sc->admin_timer); mtx_unlock(&sc->admin_mtx); mtx_destroy(&sc->admin_mtx); return err; } /* ice_if_attach_pre */ /** * ice_attach_pre_recovery_mode - Limited driver attach_pre for FW recovery * @sc: the device private softc * * Loads the device driver in limited Firmware Recovery mode, intended to * allow users to update the firmware to attempt to recover the device. * * @remark We may enter recovery mode in case either (a) the firmware is * detected to be in an invalid state and must be re-programmed, or (b) the * driver detects that the loaded firmware has a non-compatible API version * that the driver cannot operate with. */ static int ice_attach_pre_recovery_mode(struct ice_softc *sc) { ice_set_state(&sc->state, ICE_STATE_RECOVERY_MODE); /* Setup the iflib softc context */ ice_setup_scctx(sc); /* Setup the PF VSI back pointer */ sc->pf_vsi.sc = sc; /* * We still need to allocate MSI-X vectors since we need one vector to * run the administrative admin interrupt */ return ice_allocate_msix(sc); } /** * ice_update_link_status - notify OS of link state change * @sc: device private softc structure * @update_media: true if we should update media even if link didn't change * * Called to notify iflib core of link status changes. Should be called once * during attach_post, and whenever link status changes during runtime. * * This call only updates the currently supported media types if the link * status changed, or if update_media is set to true. */ static void ice_update_link_status(struct ice_softc *sc, bool update_media) { struct ice_hw *hw = &sc->hw; int status; /* Never report link up when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; /* Report link status to iflib only once each time it changes */ if (!ice_testandset_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED)) { if (sc->link_up) { /* link is up */ uint64_t baudrate = ice_aq_speed_to_rate(sc->hw.port_info); if (!(hw->port_info->phy.link_info_old.link_info & ICE_AQ_LINK_UP)) ice_set_default_local_lldp_mib(sc); iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate); ice_rdma_link_change(sc, LINK_STATE_UP, baudrate); ice_link_up_msg(sc); } else { /* link is down */ iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0); ice_rdma_link_change(sc, LINK_STATE_DOWN, 0); } update_media = true; } /* Update the supported media types */ if (update_media && !ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { status = ice_add_media_types(sc, sc->media); if (status) device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } } /** * ice_if_attach_post - Late device attach logic * @ctx: the iflib context structure * * Called by iflib to finish up attaching the device. Performs any attach * logic which must wait until after the Tx and Rx queues have been * allocated. */ static int ice_if_attach_post(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); int status; int err; ASSERT_CTX_LOCKED(sc); /* We don't yet support loading if MSI-X is not supported */ if (sc->scctx->isc_intr != IFLIB_INTR_MSIX) { device_printf(sc->dev, "The ice driver does not support loading without MSI-X\n"); return (ENOTSUP); } /* The ifnet structure hasn't yet been initialized when the attach_pre * handler is called, so wait until attach_post to setup the * isc_max_frame_size. */ sc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; /* * If we are in recovery mode, only perform a limited subset of * initialization to support NVM recovery. */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { ice_attach_post_recovery_mode(sc); return (0); } sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size; err = ice_initialize_vsi(&sc->pf_vsi); if (err) { device_printf(sc->dev, "Unable to initialize Main VSI: %s\n", ice_err_str(err)); return err; } /* Enable FW health event reporting */ ice_init_health_events(sc); /* Configure the main PF VSI for RSS */ err = ice_config_rss(&sc->pf_vsi); if (err) { device_printf(sc->dev, "Unable to configure RSS for the main VSI, err %s\n", ice_err_str(err)); return err; } /* Configure switch to drop transmitted LLDP and PAUSE frames */ err = ice_cfg_pf_ethertype_filters(sc); if (err) return err; ice_get_and_print_bus_info(sc); ice_set_link_management_mode(sc); ice_init_saved_phy_cfg(sc); ice_cfg_pba_num(sc); /* Set a default value for PFC mode on attach since the FW state is unknown * before sysctl tunables are executed and it can't be queried. This fixes an * issue when loading the driver with the FW LLDP agent enabled but the FW * was previously in DSCP PFC mode. */ status = ice_aq_set_pfc_mode(&sc->hw, ICE_AQC_PFC_VLAN_BASED_PFC, NULL); if (status) device_printf(sc->dev, "Setting pfc mode failed, status %s\n", ice_status_str(status)); ice_add_device_sysctls(sc); /* Get DCBX/LLDP state and start DCBX agent */ ice_init_dcb_setup(sc); /* Setup link configuration parameters */ ice_init_link_configuration(sc); ice_update_link_status(sc, true); /* Configure interrupt causes for the administrative interrupt */ ice_configure_misc_interrupts(sc); /* Enable ITR 0 right away, so that we can handle admin interrupts */ ice_enable_intr(&sc->hw, sc->irqvs[0].me); err = ice_rdma_pf_attach(sc); if (err) return (err); /* Start the admin timer */ mtx_lock(&sc->admin_mtx); callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc); mtx_unlock(&sc->admin_mtx); if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && !ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) ice_set_state(&sc->state, ICE_STATE_FIRST_INIT_LINK); ice_clear_state(&sc->state, ICE_STATE_ATTACHING); return 0; } /* ice_if_attach_post */ /** * ice_attach_post_recovery_mode - Limited driver attach_post for FW recovery * @sc: the device private softc * * Performs minimal work to prepare the driver to recover an NVM in case the * firmware is in recovery mode. */ static void ice_attach_post_recovery_mode(struct ice_softc *sc) { /* Configure interrupt causes for the administrative interrupt */ ice_configure_misc_interrupts(sc); /* Enable ITR 0 right away, so that we can handle admin interrupts */ ice_enable_intr(&sc->hw, sc->irqvs[0].me); /* Start the admin timer */ mtx_lock(&sc->admin_mtx); callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc); mtx_unlock(&sc->admin_mtx); ice_clear_state(&sc->state, ICE_STATE_ATTACHING); } /** * ice_free_irqvs - Free IRQ vector memory * @sc: the device private softc structure * * Free IRQ vector memory allocated during ice_if_msix_intr_assign. */ static void ice_free_irqvs(struct ice_softc *sc) { struct ice_vsi *vsi = &sc->pf_vsi; if_ctx_t ctx = sc->ctx; int i; /* If the irqvs array is NULL, then there are no vectors to free */ if (sc->irqvs == NULL) return; /* Free the IRQ vectors */ for (i = 0; i < sc->num_irq_vectors; i++) iflib_irq_free(ctx, &sc->irqvs[i].irq); /* Clear the irqv pointers */ for (i = 0; i < vsi->num_rx_queues; i++) vsi->rx_queues[i].irqv = NULL; for (i = 0; i < vsi->num_tx_queues; i++) vsi->tx_queues[i].irqv = NULL; /* Release the vector array memory */ free(sc->irqvs, M_ICE); sc->irqvs = NULL; sc->num_irq_vectors = 0; } /** * ice_if_detach - Device driver detach logic * @ctx: iflib context structure * * Perform device shutdown logic to detach the device driver. * * Note that there is no guarantee of the ordering of ice_if_queues_free() and * ice_if_detach(). It is possible for the functions to be called in either * order, and they must not assume to have a strict ordering. */ static int ice_if_detach(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; int status; int i; ASSERT_CTX_LOCKED(sc); /* Indicate that we're detaching */ ice_set_state(&sc->state, ICE_STATE_DETACHING); /* Stop the admin timer */ mtx_lock(&sc->admin_mtx); callout_stop(&sc->admin_timer); mtx_unlock(&sc->admin_mtx); mtx_destroy(&sc->admin_mtx); /* Remove additional interfaces if they exist */ if (sc->mirr_if) ice_destroy_mirror_interface(sc); ice_rdma_pf_detach(sc); /* Free allocated media types */ ifmedia_removeall(sc->media); /* Free the Tx and Rx sysctl contexts, and assign NULL to the node * pointers. Note, the calls here and those in ice_if_queues_free() * are *BOTH* necessary, as we cannot guarantee which path will be * run first */ ice_vsi_del_txqs_ctx(vsi); ice_vsi_del_rxqs_ctx(vsi); /* Release MSI-X resources */ ice_free_irqvs(sc); for (i = 0; i < sc->num_available_vsi; i++) { if (sc->all_vsi[i]) ice_release_vsi(sc->all_vsi[i]); } if (sc->all_vsi) { free(sc->all_vsi, M_ICE); sc->all_vsi = NULL; } /* Release MSI-X memory */ pci_release_msi(sc->dev); if (sc->msix_table != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->msix_table), sc->msix_table); sc->msix_table = NULL; } ice_free_intr_tracking(sc); /* Destroy the queue managers */ ice_resmgr_destroy(&sc->tx_qmgr); ice_resmgr_destroy(&sc->rx_qmgr); if (!ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) ice_deinit_hw(&sc->hw); IFLIB_CTX_UNLOCK(sc); status = ice_reset(&sc->hw, ICE_RESET_PFR); IFLIB_CTX_LOCK(sc); if (status) { device_printf(sc->dev, "device PF reset failed, err %s\n", ice_status_str(status)); } ice_free_pci_mapping(sc); return 0; } /* ice_if_detach */ /** * ice_if_tx_queues_alloc - Allocate Tx queue memory * @ctx: iflib context structure * @vaddrs: virtual addresses for the queue memory * @paddrs: physical addresses for the queue memory * @ntxqs: the number of Tx queues per set (should always be 1) * @ntxqsets: the number of Tx queue sets to allocate * * Called by iflib to allocate Tx queues for the device. Allocates driver * memory to track each queue, the status arrays used for descriptor * status reporting, and Tx queue sysctls. */ static int ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int __invariant_only ntxqs, int ntxqsets) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_tx_queue *txq; int err, i, j; MPASS(ntxqs == 1); MPASS(sc->scctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT); ASSERT_CTX_LOCKED(sc); /* Do not bother allocating queues if we're in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (0); /* Allocate queue structure memory */ if (!(vsi->tx_queues = (struct ice_tx_queue *) malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_NOWAIT | M_ZERO))) { device_printf(sc->dev, "Unable to allocate Tx queue memory\n"); return (ENOMEM); } /* Allocate report status arrays */ for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { if (!(txq->tx_rsq = (uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_NOWAIT))) { device_printf(sc->dev, "Unable to allocate tx_rsq memory\n"); err = ENOMEM; goto free_tx_queues; } /* Initialize report status array */ for (j = 0; j < sc->scctx->isc_ntxd[0]; j++) txq->tx_rsq[j] = QIDX_INVALID; } /* Assign queues from PF space to the main VSI */ err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap, ntxqsets); if (err) { device_printf(sc->dev, "Unable to assign PF queues: %s\n", ice_err_str(err)); goto free_tx_queues; } vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; /* Add Tx queue sysctls context */ ice_vsi_add_txqs_ctx(vsi); for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { /* q_handle == me when only one TC */ txq->me = txq->q_handle = i; txq->vsi = vsi; /* store the queue size for easier access */ txq->desc_count = sc->scctx->isc_ntxd[0]; /* get the virtual and physical address of the hardware queues */ txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); txq->tx_base = (struct ice_tx_desc *)vaddrs[i]; txq->tx_paddr = paddrs[i]; ice_add_txq_sysctls(txq); } vsi->num_tx_queues = ntxqsets; return (0); free_tx_queues: for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { if (txq->tx_rsq != NULL) { free(txq->tx_rsq, M_ICE); txq->tx_rsq = NULL; } } free(vsi->tx_queues, M_ICE); vsi->tx_queues = NULL; return err; } /** * ice_if_rx_queues_alloc - Allocate Rx queue memory * @ctx: iflib context structure * @vaddrs: virtual addresses for the queue memory * @paddrs: physical addresses for the queue memory * @nrxqs: number of Rx queues per set (should always be 1) * @nrxqsets: number of Rx queue sets to allocate * * Called by iflib to allocate Rx queues for the device. Allocates driver * memory to track each queue, as well as sets up the Rx queue sysctls. */ static int ice_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int __invariant_only nrxqs, int nrxqsets) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_rx_queue *rxq; int err, i; MPASS(nrxqs == 1); MPASS(sc->scctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT); ASSERT_CTX_LOCKED(sc); /* Do not bother allocating queues if we're in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (0); /* Allocate queue structure memory */ if (!(vsi->rx_queues = (struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_NOWAIT | M_ZERO))) { device_printf(sc->dev, "Unable to allocate Rx queue memory\n"); return (ENOMEM); } /* Assign queues from PF space to the main VSI */ err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap, nrxqsets); if (err) { device_printf(sc->dev, "Unable to assign PF queues: %s\n", ice_err_str(err)); goto free_rx_queues; } vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; /* Add Rx queue sysctls context */ ice_vsi_add_rxqs_ctx(vsi); for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) { rxq->me = i; rxq->vsi = vsi; /* store the queue size for easier access */ rxq->desc_count = sc->scctx->isc_nrxd[0]; /* get the virtual and physical address of the hardware queues */ rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i]; rxq->rx_paddr = paddrs[i]; ice_add_rxq_sysctls(rxq); } vsi->num_rx_queues = nrxqsets; return (0); free_rx_queues: free(vsi->rx_queues, M_ICE); vsi->rx_queues = NULL; return err; } /** * ice_if_queues_free - Free queue memory * @ctx: the iflib context structure * * Free queue memory allocated by ice_if_tx_queues_alloc() and * ice_if_rx_queues_alloc(). * * There is no guarantee that ice_if_queues_free() and ice_if_detach() will be * called in the same order. It's possible for ice_if_queues_free() to be * called prior to ice_if_detach(), and vice versa. * * For this reason, the main VSI is a static member of the ice_softc, which is * not free'd until after iflib finishes calling both of these functions. * * Thus, care must be taken in how we manage the memory being freed by this * function, and in what tasks it can and must perform. */ static void ice_if_queues_free(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_tx_queue *txq; int i; /* Free the Tx and Rx sysctl contexts, and assign NULL to the node * pointers. Note, the calls here and those in ice_if_detach() * are *BOTH* necessary, as we cannot guarantee which path will be * run first */ ice_vsi_del_txqs_ctx(vsi); ice_vsi_del_rxqs_ctx(vsi); /* Release MSI-X IRQ vectors, if not yet released in ice_if_detach */ ice_free_irqvs(sc); if (vsi->tx_queues != NULL) { /* free the tx_rsq arrays */ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { if (txq->tx_rsq != NULL) { free(txq->tx_rsq, M_ICE); txq->tx_rsq = NULL; } } free(vsi->tx_queues, M_ICE); vsi->tx_queues = NULL; vsi->num_tx_queues = 0; } if (vsi->rx_queues != NULL) { free(vsi->rx_queues, M_ICE); vsi->rx_queues = NULL; vsi->num_rx_queues = 0; } } /** * ice_msix_que - Fast interrupt handler for MSI-X receive queues * @arg: The Rx queue memory * * Interrupt filter function for iflib MSI-X interrupts. Called by iflib when * an MSI-X interrupt for a given queue is triggered. Currently this just asks * iflib to schedule the main Rx thread. */ static int ice_msix_que(void *arg) { struct ice_rx_queue __unused *rxq = (struct ice_rx_queue *)arg; /* TODO: dynamic ITR algorithm?? */ return (FILTER_SCHEDULE_THREAD); } /** * ice_msix_admin - Fast interrupt handler for MSI-X admin interrupt * @arg: pointer to device softc memory * * Called by iflib when an administrative interrupt occurs. Should perform any * fast logic for handling the interrupt cause, and then indicate whether the * admin task needs to be queued. */ static int ice_msix_admin(void *arg) { struct ice_softc *sc = (struct ice_softc *)arg; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; u32 oicr; /* There is no safe way to modify the enabled miscellaneous causes of * the OICR vector at runtime, as doing so would be prone to race * conditions. Reading PFINT_OICR will unmask the associated interrupt * causes and allow future interrupts to occur. The admin interrupt * vector will not be re-enabled until after we exit this function, * but any delayed tasks must be resilient against possible "late * arrival" interrupts that occur while we're already handling the * task. This is done by using state bits and serializing these * delayed tasks via the admin status task function. */ oicr = rd32(hw, PFINT_OICR); /* Processing multiple controlq interrupts on a single vector does not * provide an indication of which controlq triggered the interrupt. * We might try reading the INTEVENT bit of the respective PFINT_*_CTL * registers. However, the INTEVENT bit is not guaranteed to be set as * it gets automatically cleared when the hardware acknowledges the * interrupt. * * This means we don't really have a good indication of whether or * which controlq triggered this interrupt. We'll just notify the * admin task that it should check all the controlqs. */ ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING); if (oicr & PFINT_OICR_VFLR_M) { ice_set_state(&sc->state, ICE_STATE_VFLR_PENDING); } if (oicr & PFINT_OICR_MAL_DETECT_M) { ice_set_state(&sc->state, ICE_STATE_MDD_PENDING); } if (oicr & PFINT_OICR_GRST_M) { u32 reset; reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> GLGEN_RSTAT_RESET_TYPE_S; if (reset == ICE_RESET_CORER) sc->soft_stats.corer_count++; else if (reset == ICE_RESET_GLOBR) sc->soft_stats.globr_count++; else sc->soft_stats.empr_count++; /* There are a couple of bits at play for handling resets. * First, the ICE_STATE_RESET_OICR_RECV bit is used to * indicate that the driver has received an OICR with a reset * bit active, indicating that a CORER/GLOBR/EMPR is about to * happen. Second, we set hw->reset_ongoing to indicate that * the hardware is in reset. We will set this back to false as * soon as the driver has determined that the hardware is out * of reset. * * If the driver wishes to trigger a request, it can set one of * the ICE_STATE_RESET_*_REQ bits, which will trigger the * correct type of reset. */ if (!ice_testandset_state(&sc->state, ICE_STATE_RESET_OICR_RECV)) { hw->reset_ongoing = true; /* * During the NVM update process, there is a driver reset and link * goes down and then up. The below if-statement prevents a second * link flap from occurring in ice_if_init(). */ if (if_getflags(sc->ifp) & IFF_UP) ice_set_state(&sc->state, ICE_STATE_FIRST_INIT_LINK); } } if (oicr & PFINT_OICR_ECC_ERR_M) { device_printf(dev, "ECC Error detected!\n"); ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); } if (oicr & (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M)) { if (oicr & PFINT_OICR_HMC_ERR_M) /* Log the HMC errors */ ice_log_hmc_error(hw, dev); ice_rdma_notify_pe_intr(sc, oicr); } if (oicr & PFINT_OICR_PCI_EXCEPTION_M) { device_printf(dev, "PCI Exception detected!\n"); ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); } return (FILTER_SCHEDULE_THREAD); } /** * ice_allocate_msix - Allocate MSI-X vectors for the interface * @sc: the device private softc * * Map the MSI-X bar, and then request MSI-X vectors in a two-stage process. * * First, determine a suitable total number of vectors based on the number * of CPUs, RSS buckets, the administrative vector, and other demands such as * RDMA. * * Request the desired amount of vectors, and see how many we obtain. If we * don't obtain as many as desired, reduce the demands by lowering the number * of requested queues or reducing the demand from other features such as * RDMA. * * @remark This function is required because the driver sets the * IFLIB_SKIP_MSIX flag indicating that the driver will manage MSI-X vectors * manually. * * @remark This driver will only use MSI-X vectors. If this is not possible, * neither MSI or legacy interrupts will be tried. * * @remark if it exists, os_imgr is initialized here for keeping track of * the assignments of extra MSIX vectors. * * @post on success this function must set the following scctx parameters: * isc_vectors, isc_nrxqsets, isc_ntxqsets, and isc_intr. * * @returns zero on success or an error code on failure. */ static int ice_allocate_msix(struct ice_softc *sc) { bool iflib_override_queue_count = false; if_softc_ctx_t scctx = sc->scctx; device_t dev = sc->dev; cpuset_t cpus; int bar, queues, vectors, requested; int err = 0; int rdma; /* Allocate the MSI-X bar */ bar = scctx->isc_msix_bar; sc->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE); if (!sc->msix_table) { device_printf(dev, "Unable to map MSI-X table\n"); return (ENOMEM); } /* Check if the iflib queue count sysctls have been set */ if (sc->ifc_sysctl_ntxqs || sc->ifc_sysctl_nrxqs) iflib_override_queue_count = true; err = bus_get_cpus(dev, INTR_CPUS, sizeof(cpus), &cpus); if (err) { device_printf(dev, "%s: Unable to fetch the CPU list: %s\n", __func__, ice_err_str(err)); CPU_COPY(&all_cpus, &cpus); } /* Attempt to mimic behavior of iflib_msix_init */ if (iflib_override_queue_count) { /* * If the override sysctls have been set, limit the queues to * the number of logical CPUs. */ queues = mp_ncpus; } else { /* * Otherwise, limit the queue count to the CPUs associated * with the NUMA node the device is associated with. */ queues = CPU_COUNT(&cpus); } /* Clamp to the number of RSS buckets */ queues = imin(queues, rss_getnumbuckets()); /* * Clamp the number of queue pairs to the minimum of the requested Tx * and Rx queues. */ queues = imin(queues, sc->ifc_sysctl_ntxqs ?: scctx->isc_ntxqsets); queues = imin(queues, sc->ifc_sysctl_nrxqs ?: scctx->isc_nrxqsets); if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RDMA)) { /* * Choose a number of RDMA vectors based on the number of CPUs * up to a maximum */ rdma = min(CPU_COUNT(&cpus), ICE_RDMA_MAX_MSIX); /* Further limit by the user configurable tunable */ rdma = min(rdma, ice_rdma_max_msix); } else { rdma = 0; } /* * Determine the number of vectors to request. Note that we also need * to allocate one vector for administrative tasks. */ requested = rdma + queues + 1; /* Add extra vectors requested by the user for later subinterface * creation. */ if_ctx_t ctx = sc->ctx; u32 extra_vectors = iflib_get_extra_msix_vectors_sysctl(ctx); requested += extra_vectors; vectors = requested; err = pci_alloc_msix(dev, &vectors); if (err) { device_printf(dev, "Failed to allocate %d MSI-X vectors, err %s\n", vectors, ice_err_str(err)); goto err_free_msix_table; } /* If we don't receive enough vectors, reduce demands */ if (vectors < requested) { int diff = requested - vectors; device_printf(dev, "Requested %d MSI-X vectors, but got only %d\n", requested, vectors); diff += extra_vectors; extra_vectors = 0; /* * The OS didn't grant us the requested number of vectors. * Check to see if we can reduce demands by limiting the * number of vectors allocated to certain features. */ if (rdma >= diff) { /* Reduce the number of RDMA vectors we reserve */ rdma -= diff; diff = 0; } else { /* Disable RDMA and reduce the difference */ ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); diff -= rdma; rdma = 0; } /* * If we still have a difference, we need to reduce the number * of queue pairs. * * However, we still need at least one vector for the admin * interrupt and one queue pair. */ if (queues <= diff) { device_printf(dev, "Unable to allocate sufficient MSI-X vectors\n"); err = (ERANGE); goto err_pci_release_msi; } queues -= diff; } device_printf(dev, "Using %d Tx and Rx queues\n", queues); if (rdma) device_printf(dev, "Reserving %d MSI-X interrupts for iRDMA\n", rdma); device_printf(dev, "Using MSI-X interrupts with %d vectors\n", vectors); /* Split resulting vectors back into requested splits */ scctx->isc_vectors = vectors; scctx->isc_nrxqsets = queues; scctx->isc_ntxqsets = queues; scctx->isc_intr = IFLIB_INTR_MSIX; sc->irdma_vectors = rdma; /* Interrupt allocation tracking isn't required in recovery mode, * since neither RDMA nor VFs are enabled. */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (0); /* Keep track of which interrupt indices are being used for what */ sc->lan_vectors = vectors - rdma; sc->lan_vectors -= extra_vectors; err = ice_resmgr_assign_contiguous(&sc->dev_imgr, sc->pf_imap, sc->lan_vectors); if (err) { device_printf(dev, "Unable to assign PF interrupt mapping: %s\n", ice_err_str(err)); goto err_pci_release_msi; } err = ice_resmgr_assign_contiguous(&sc->dev_imgr, sc->rdma_imap, rdma); if (err) { device_printf(dev, "Unable to assign PF RDMA interrupt mapping: %s\n", ice_err_str(err)); goto err_release_pf_imap; } sc->extra_vectors = extra_vectors; /* Setup another resource manager to track the assignments of extra OS * vectors. These OS interrupt allocations don't need to be contiguous, * unlike the ones that come from the device. */ err = ice_resmgr_init(&sc->os_imgr, sc->extra_vectors); if (err) { device_printf(dev, "Unable to initialize OS extra interrupt manager: %s\n", ice_err_str(err)); ice_resmgr_release_map(&sc->dev_imgr, sc->rdma_imap, rdma); goto err_release_pf_imap; } return (0); err_release_pf_imap: ice_resmgr_release_map(&sc->dev_imgr, sc->pf_imap, sc->lan_vectors); err_pci_release_msi: pci_release_msi(dev); err_free_msix_table: if (sc->msix_table != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->msix_table), sc->msix_table); sc->msix_table = NULL; } return (err); } /** * ice_if_msix_intr_assign - Assign MSI-X interrupt vectors to queues * @ctx: the iflib context structure * @msix: the number of vectors we were assigned * * Called by iflib to assign MSI-X vectors to queues. Currently requires that * we get at least the same number of vectors as we have queues, and that we * always have the same number of Tx and Rx queues. * * Tx queues use a softirq instead of using their own hardware interrupt. */ static int ice_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; int err, i, vector; ASSERT_CTX_LOCKED(sc); if (vsi->num_rx_queues != vsi->num_tx_queues) { device_printf(sc->dev, "iflib requested %d Tx queues, and %d Rx queues, but the driver isn't able to support a differing number of Tx and Rx queues\n", vsi->num_tx_queues, vsi->num_rx_queues); return (EOPNOTSUPP); } if (msix < (vsi->num_rx_queues + 1)) { device_printf(sc->dev, "Not enough MSI-X vectors to assign one vector to each queue pair\n"); return (EOPNOTSUPP); } /* Save the number of vectors for future use */ sc->num_irq_vectors = vsi->num_rx_queues + 1; /* Allocate space to store the IRQ vector data */ if (!(sc->irqvs = (struct ice_irq_vector *) malloc(sizeof(struct ice_irq_vector) * (sc->num_irq_vectors), M_ICE, M_NOWAIT))) { device_printf(sc->dev, "Unable to allocate irqv memory\n"); return (ENOMEM); } /* Administrative interrupt events will use vector 0 */ err = iflib_irq_alloc_generic(ctx, &sc->irqvs[0].irq, 1, IFLIB_INTR_ADMIN, ice_msix_admin, sc, 0, "admin"); if (err) { device_printf(sc->dev, "Failed to register Admin queue handler: %s\n", ice_err_str(err)); goto free_irqvs; } sc->irqvs[0].me = 0; /* Do not allocate queue interrupts when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (0); int rid; for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++) { struct ice_rx_queue *rxq = &vsi->rx_queues[i]; struct ice_tx_queue *txq = &vsi->tx_queues[i]; char irq_name[16]; rid = vector + 1; snprintf(irq_name, sizeof(irq_name), "rxq%d", i); err = iflib_irq_alloc_generic(ctx, &sc->irqvs[vector].irq, rid, IFLIB_INTR_RXTX, ice_msix_que, rxq, rxq->me, irq_name); if (err) { device_printf(sc->dev, "Failed to allocate q int %d err: %s\n", i, ice_err_str(err)); vector--; i--; goto fail; } sc->irqvs[vector].me = vector; rxq->irqv = &sc->irqvs[vector]; bzero(irq_name, sizeof(irq_name)); snprintf(irq_name, sizeof(irq_name), "txq%d", i); iflib_softirq_alloc_generic(ctx, &sc->irqvs[vector].irq, IFLIB_INTR_TX, txq, txq->me, irq_name); txq->irqv = &sc->irqvs[vector]; } /* For future interrupt assignments */ sc->last_rid = rid + sc->irdma_vectors; return (0); fail: for (; i >= 0; i--, vector--) iflib_irq_free(ctx, &sc->irqvs[vector].irq); iflib_irq_free(ctx, &sc->irqvs[0].irq); free_irqvs: free(sc->irqvs, M_ICE); sc->irqvs = NULL; return err; } /** * ice_if_mtu_set - Set the device MTU * @ctx: iflib context structure * @mtu: the MTU requested * * Called by iflib to configure the device's Maximum Transmission Unit (MTU). * * @pre assumes the caller holds the iflib CTX lock */ static int ice_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); ASSERT_CTX_LOCKED(sc); /* Do not support configuration when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); if (mtu < ICE_MIN_MTU || mtu > ICE_MAX_MTU) return (EINVAL); sc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size; return (0); } /** * ice_if_intr_enable - Enable device interrupts * @ctx: iflib context structure * * Called by iflib to request enabling device interrupts. */ static void ice_if_intr_enable(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; ASSERT_CTX_LOCKED(sc); /* Enable ITR 0 */ ice_enable_intr(hw, sc->irqvs[0].me); /* Do not enable queue interrupts in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; /* Enable all queue interrupts */ for (int i = 0; i < vsi->num_rx_queues; i++) ice_enable_intr(hw, vsi->rx_queues[i].irqv->me); } /** * ice_if_intr_disable - Disable device interrupts * @ctx: iflib context structure * * Called by iflib to request disabling device interrupts. */ static void ice_if_intr_disable(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_hw *hw = &sc->hw; unsigned int i; ASSERT_CTX_LOCKED(sc); /* IFDI_INTR_DISABLE may be called prior to interrupts actually being * assigned to queues. Instead of assuming that the interrupt * assignment in the rx_queues structure is valid, just disable all * possible interrupts * * Note that we choose not to disable ITR 0 because this handles the * AdminQ interrupts, and we want to keep processing these even when * the interface is offline. */ for (i = 1; i < hw->func_caps.common_cap.num_msix_vectors; i++) ice_disable_intr(hw, i); } /** * ice_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt * @ctx: iflib context structure * @rxqid: the Rx queue to enable * * Enable a specific Rx queue interrupt. * * This function is not protected by the iflib CTX lock. */ static int ice_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; /* Do not enable queue interrupts in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me); return (0); } /** * ice_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt * @ctx: iflib context structure * @txqid: the Tx queue to enable * * Enable a specific Tx queue interrupt. * * This function is not protected by the iflib CTX lock. */ static int ice_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; /* Do not enable queue interrupts in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me); return (0); } /** * ice_set_default_promisc_mask - Set default config for promisc settings * @promisc_mask: bitmask to setup * * The ice_(set|clear)_vsi_promisc() function expects a mask of promiscuous * modes to operate on. The mask used in here is the default one for the * driver, where promiscuous is enabled/disabled for all types of * non-VLAN-tagged/VLAN 0 traffic. */ static void ice_set_default_promisc_mask(ice_bitmap_t *promisc_mask) { ice_zero_bitmap(promisc_mask, ICE_PROMISC_MAX); ice_set_bit(ICE_PROMISC_UCAST_TX, promisc_mask); ice_set_bit(ICE_PROMISC_UCAST_RX, promisc_mask); ice_set_bit(ICE_PROMISC_MCAST_TX, promisc_mask); ice_set_bit(ICE_PROMISC_MCAST_RX, promisc_mask); } /** * ice_if_promisc_set - Set device promiscuous mode * @ctx: iflib context structure * @flags: promiscuous flags to configure * * Called by iflib to configure device promiscuous mode. * * @remark Calls to this function will always overwrite the previous setting */ static int ice_if_promisc_set(if_ctx_t ctx, int flags) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int status; bool promisc_enable = flags & IFF_PROMISC; bool multi_enable = flags & IFF_ALLMULTI; ice_declare_bitmap(promisc_mask, ICE_PROMISC_MAX); /* Do not support configuration when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); ice_set_default_promisc_mask(promisc_mask); if (multi_enable) return (EOPNOTSUPP); if (promisc_enable) { status = ice_set_vsi_promisc(hw, sc->pf_vsi.idx, promisc_mask, 0); if (status && status != ICE_ERR_ALREADY_EXISTS) { device_printf(dev, "Failed to enable promiscuous mode for PF VSI, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } else { status = ice_clear_vsi_promisc(hw, sc->pf_vsi.idx, promisc_mask, 0); if (status) { device_printf(dev, "Failed to disable promiscuous mode for PF VSI, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } return (0); } /** * ice_if_media_change - Change device media * @ctx: device ctx structure * * Called by iflib when a media change is requested. This operation is not * supported by the hardware, so we just return an error code. */ static int ice_if_media_change(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); device_printf(sc->dev, "Media change is not supported.\n"); return (ENODEV); } /** * ice_if_media_status - Report current device media * @ctx: iflib context structure * @ifmr: ifmedia request structure to update * * Updates the provided ifmr with current device media status, including link * status and media type. */ static void ice_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_link_status *li = &sc->hw.port_info->phy.link_info; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; /* Never report link up or media types when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; if (!sc->link_up) return; ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_FDX; if (li->phy_type_low) ifmr->ifm_active |= ice_get_phy_type_low(li->phy_type_low); else if (li->phy_type_high) ifmr->ifm_active |= ice_get_phy_type_high(li->phy_type_high); else ifmr->ifm_active |= IFM_UNKNOWN; /* Report flow control status as well */ if (li->an_info & ICE_AQ_LINK_PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; if (li->an_info & ICE_AQ_LINK_PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; } /** * ice_init_tx_tracking - Initialize Tx queue software tracking values * @vsi: the VSI to initialize * * Initialize Tx queue software tracking values, including the Report Status * queue, and related software tracking values. */ static void ice_init_tx_tracking(struct ice_vsi *vsi) { struct ice_tx_queue *txq; size_t j; int i; for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { txq->tx_rs_cidx = txq->tx_rs_pidx = 0; /* Initialize the last processed descriptor to be the end of * the ring, rather than the start, so that we avoid an * off-by-one error in ice_ift_txd_credits_update for the * first packet. */ txq->tx_cidx_processed = txq->desc_count - 1; for (j = 0; j < txq->desc_count; j++) txq->tx_rsq[j] = QIDX_INVALID; } } /** * ice_update_rx_mbuf_sz - Update the Rx buffer size for all queues * @sc: the device softc * * Called to update the Rx queue mbuf_sz parameter for configuring the receive * buffer sizes when programming hardware. */ static void ice_update_rx_mbuf_sz(struct ice_softc *sc) { uint32_t mbuf_sz = iflib_get_rx_mbuf_sz(sc->ctx); struct ice_vsi *vsi = &sc->pf_vsi; MPASS(mbuf_sz <= UINT16_MAX); vsi->mbuf_sz = mbuf_sz; } /** * ice_if_init - Initialize the device * @ctx: iflib ctx structure * * Called by iflib to bring the device up, i.e. ifconfig ice0 up. Initializes * device filters and prepares the Tx and Rx engines. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_init(if_ctx_t ctx) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); device_t dev = sc->dev; int err; ASSERT_CTX_LOCKED(sc); /* * We've seen an issue with 11.3/12.1 where sideband routines are * called after detach is called. This would call routines after * if_stop, causing issues with the teardown process. This has * seemingly been fixed in STABLE snapshots, but it seems like a * good idea to have this guard here regardless. */ if (ice_driver_is_detaching(sc)) return; if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { device_printf(sc->dev, "request to start interface cannot be completed as the device failed to reset\n"); return; } if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { device_printf(sc->dev, "request to start interface while device is prepared for impending reset\n"); return; } ice_update_rx_mbuf_sz(sc); /* Update the MAC address... User might use a LAA */ err = ice_update_laa_mac(sc); if (err) { device_printf(dev, "LAA address change failed, err %s\n", ice_err_str(err)); return; } /* Initialize software Tx tracking values */ ice_init_tx_tracking(&sc->pf_vsi); err = ice_cfg_vsi_for_tx(&sc->pf_vsi); if (err) { device_printf(dev, "Unable to configure the main VSI for Tx: %s\n", ice_err_str(err)); return; } err = ice_cfg_vsi_for_rx(&sc->pf_vsi); if (err) { device_printf(dev, "Unable to configure the main VSI for Rx: %s\n", ice_err_str(err)); goto err_cleanup_tx; } err = ice_control_all_rx_queues(&sc->pf_vsi, true); if (err) { device_printf(dev, "Unable to enable Rx rings for transmit: %s\n", ice_err_str(err)); goto err_cleanup_tx; } err = ice_cfg_pf_default_mac_filters(sc); if (err) { device_printf(dev, "Unable to configure default MAC filters: %s\n", ice_err_str(err)); goto err_stop_rx; } /* We use software interrupts for Tx, so we only program the hardware * interrupts for Rx. */ ice_configure_all_rxq_interrupts(&sc->pf_vsi); ice_configure_rx_itr(&sc->pf_vsi); /* Configure promiscuous mode */ ice_if_promisc_set(ctx, if_getflags(sc->ifp)); if (!ice_testandclear_state(&sc->state, ICE_STATE_FIRST_INIT_LINK)) if (!sc->link_up && ((if_getflags(sc->ifp) & IFF_UP) || ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN))) ice_set_link(sc, true); ice_rdma_pf_init(sc); ice_set_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED); if (sc->mirr_if && ice_testandclear_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT)) { ice_clear_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED); iflib_request_reset(sc->mirr_if->subctx); iflib_admin_intr_deferred(sc->mirr_if->subctx); } return; err_stop_rx: ice_control_all_rx_queues(&sc->pf_vsi, false); err_cleanup_tx: ice_vsi_disable_tx(&sc->pf_vsi); } /** * ice_poll_for_media_avail - Re-enable link if media is detected * @sc: device private structure * * Intended to be called from the driver's timer function, this function * sends the Get Link Status AQ command and re-enables HW link if the * command says that media is available. * * If the driver doesn't have the "NO_MEDIA" state set, then this does nothing, * since media removal events are supposed to be sent to the driver through * a link status event. */ static void ice_poll_for_media_avail(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; struct ice_port_info *pi = hw->port_info; if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) { pi->phy.get_link_info = true; ice_get_link_status(pi, &sc->link_up); if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { int status; /* Re-enable link and re-apply user link settings */ if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) || (if_getflags(sc->ifp) & IFF_UP)) { ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC); /* Update the OS about changes in media capability */ status = ice_add_media_types(sc, sc->media); if (status) device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); } } } /** * ice_if_timer - called by iflib periodically * @ctx: iflib ctx structure * @qid: the queue this timer was called for * * This callback is triggered by iflib periodically. We use it to update the * hw statistics. * * @remark this function is not protected by the iflib CTX lock. */ static void ice_if_timer(if_ctx_t ctx, uint16_t qid) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); uint64_t prev_link_xoff_rx = sc->stats.cur.link_xoff_rx; if (qid != 0) return; /* Do not attempt to update stats when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; /* Update device statistics */ ice_update_pf_stats(sc); /* * For proper watchdog management, the iflib stack needs to know if * we've been paused during the last interval. Check if the * link_xoff_rx stat changed, and set the isc_pause_frames, if so. */ if (sc->stats.cur.link_xoff_rx != prev_link_xoff_rx) sc->scctx->isc_pause_frames = 1; /* Update the primary VSI stats */ ice_update_vsi_hw_stats(&sc->pf_vsi); /* Update mirror VSI stats */ if (sc->mirr_if && sc->mirr_if->if_attached) ice_update_vsi_hw_stats(sc->mirr_if->vsi); } /** * ice_admin_timer - called periodically to trigger the admin task * @arg: callout(9) argument pointing to the device private softc structure * * Timer function used as part of a callout(9) timer that will periodically * trigger the admin task, even when the interface is down. * * @remark this function is not called by iflib and is not protected by the * iflib CTX lock. * * @remark because this is a callout function, it cannot sleep and should not * attempt taking the iflib CTX lock. */ static void ice_admin_timer(void *arg) { struct ice_softc *sc = (struct ice_softc *)arg; /* * There is a point where callout routines are no longer * cancelable. So there exists a window of time where the * driver enters detach() and tries to cancel the callout, but the * callout routine has passed the cancellation point. The detach() * routine is unaware of this and tries to free resources that the * callout routine needs. So we check for the detach state flag to * at least shrink the window of opportunity. */ if (ice_driver_is_detaching(sc)) return; /* Fire off the admin task */ iflib_admin_intr_deferred(sc->ctx); /* Reschedule the admin timer */ callout_schedule(&sc->admin_timer, hz/2); } /** * ice_transition_recovery_mode - Transition to recovery mode * @sc: the device private softc * * Called when the driver detects that the firmware has entered recovery mode * at run time. */ static void ice_transition_recovery_mode(struct ice_softc *sc) { struct ice_vsi *vsi = &sc->pf_vsi; int i; device_printf(sc->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); /* Tell the stack that the link has gone down */ iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0); /* Request that the device be re-initialized */ ice_request_stack_reinit(sc); ice_rdma_pf_detach(sc); ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap); ice_vsi_del_txqs_ctx(vsi); ice_vsi_del_rxqs_ctx(vsi); for (i = 0; i < sc->num_available_vsi; i++) { if (sc->all_vsi[i]) ice_release_vsi(sc->all_vsi[i]); } sc->num_available_vsi = 0; if (sc->all_vsi) { free(sc->all_vsi, M_ICE); sc->all_vsi = NULL; } /* Destroy the interrupt manager */ ice_resmgr_destroy(&sc->dev_imgr); /* Destroy the queue managers */ ice_resmgr_destroy(&sc->tx_qmgr); ice_resmgr_destroy(&sc->rx_qmgr); ice_deinit_hw(&sc->hw); } /** * ice_transition_safe_mode - Transition to safe mode * @sc: the device private softc * * Called when the driver attempts to reload the DDP package during a device * reset, and the new download fails. If so, we must transition to safe mode * at run time. * * @remark although safe mode normally allocates only a single queue, we can't * change the number of queues dynamically when using iflib. Due to this, we * do not attempt to reduce the number of queues. */ static void ice_transition_safe_mode(struct ice_softc *sc) { /* Indicate that we are in Safe mode */ ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); ice_rdma_pf_detach(sc); ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap); ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); ice_clear_bit(ICE_FEATURE_RSS, sc->feat_en); } /** * ice_if_update_admin_status - update admin status * @ctx: iflib ctx structure * * Called by iflib to update the admin status. For our purposes, this means * check the adminq, and update the link status. It's ultimately triggered by * our admin interrupt, or by the ice_if_timer periodically. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_update_admin_status(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); enum ice_fw_modes fw_mode; bool reschedule = false; u16 pending = 0; ASSERT_CTX_LOCKED(sc); /* Check if the firmware entered recovery mode at run time */ fw_mode = ice_get_fw_mode(&sc->hw); if (fw_mode == ICE_FW_MODE_REC) { if (!ice_testandset_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { /* If we just entered recovery mode, log a warning to * the system administrator and deinit driver state * that is no longer functional. */ ice_transition_recovery_mode(sc); } } else if (fw_mode == ICE_FW_MODE_ROLLBACK) { if (!ice_testandset_state(&sc->state, ICE_STATE_ROLLBACK_MODE)) { /* Rollback mode isn't fatal, but we don't want to * repeatedly post a message about it. */ ice_print_rollback_msg(&sc->hw); } } /* Handle global reset events */ ice_handle_reset_event(sc); /* Handle PF reset requests */ ice_handle_pf_reset_request(sc); /* Handle MDD events */ ice_handle_mdd_event(sc); if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED) || ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET) || ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { /* * If we know the control queues are disabled, skip processing * the control queues entirely. */ ; } else if (ice_testandclear_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING)) { ice_process_ctrlq(sc, ICE_CTL_Q_ADMIN, &pending); if (pending > 0) reschedule = true; if (ice_is_generic_mac(&sc->hw)) { ice_process_ctrlq(sc, ICE_CTL_Q_SB, &pending); if (pending > 0) reschedule = true; } ice_process_ctrlq(sc, ICE_CTL_Q_MAILBOX, &pending); if (pending > 0) reschedule = true; } /* Poll for link up */ ice_poll_for_media_avail(sc); /* Check and update link status */ ice_update_link_status(sc, false); /* * If there are still messages to process, we need to reschedule * ourselves. Otherwise, we can just re-enable the interrupt. We'll be * woken up at the next interrupt or timer event. */ if (reschedule) { ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING); iflib_admin_intr_deferred(ctx); } else { ice_enable_intr(&sc->hw, sc->irqvs[0].me); } } /** * ice_prepare_for_reset - Prepare device for an impending reset * @sc: The device private softc * * Prepare the driver for an impending reset, shutting down VSIs, clearing the * scheduler setup, and shutting down controlqs. Uses the * ICE_STATE_PREPARED_FOR_RESET to indicate whether we've already prepared the * driver for reset or not. */ static void ice_prepare_for_reset(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; /* If we're already prepared, there's nothing to do */ if (ice_testandset_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) return; log(LOG_INFO, "%s: preparing to reset device logic\n", if_name(sc->ifp)); /* In recovery mode, hardware is not initialized */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; /* inform the RDMA client */ ice_rdma_notify_reset(sc); /* stop the RDMA client */ ice_rdma_pf_stop(sc); /* Release the main PF VSI queue mappings */ ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, sc->pf_vsi.num_tx_queues); ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap, sc->pf_vsi.num_rx_queues); if (sc->mirr_if) { ice_resmgr_release_map(&sc->tx_qmgr, sc->mirr_if->vsi->tx_qmap, sc->mirr_if->num_irq_vectors); ice_resmgr_release_map(&sc->rx_qmgr, sc->mirr_if->vsi->rx_qmap, sc->mirr_if->num_irq_vectors); } ice_clear_hw_tbls(hw); if (hw->port_info) ice_sched_cleanup_all(hw); ice_shutdown_all_ctrlq(hw, false); } /** * ice_rebuild_pf_vsi_qmap - Rebuild the main PF VSI queue mapping * @sc: the device softc pointer * * Loops over the Tx and Rx queues for the main PF VSI and reassigns the queue * mapping after a reset occurred. */ static int ice_rebuild_pf_vsi_qmap(struct ice_softc *sc) { struct ice_vsi *vsi = &sc->pf_vsi; struct ice_tx_queue *txq; struct ice_rx_queue *rxq; int err, i; /* Re-assign Tx queues from PF space to the main VSI */ err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap, vsi->num_tx_queues); if (err) { device_printf(sc->dev, "Unable to re-assign PF Tx queues: %s\n", ice_err_str(err)); return (err); } /* Re-assign Rx queues from PF space to this VSI */ err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap, vsi->num_rx_queues); if (err) { device_printf(sc->dev, "Unable to re-assign PF Rx queues: %s\n", ice_err_str(err)); goto err_release_tx_queues; } vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; /* Re-assign Tx queue tail pointers */ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); /* Re-assign Rx queue tail pointers */ for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); return (0); err_release_tx_queues: ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, sc->pf_vsi.num_tx_queues); return (err); } /* determine if the iflib context is active */ #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING)) /** * ice_rebuild_recovery_mode - Rebuild driver state while in recovery mode * @sc: The device private softc * * Handle a driver rebuild while in recovery mode. This will only rebuild the * limited functionality supported while in recovery mode. */ static void ice_rebuild_recovery_mode(struct ice_softc *sc) { device_t dev = sc->dev; /* enable PCIe bus master */ pci_enable_busmaster(dev); /* Configure interrupt causes for the administrative interrupt */ ice_configure_misc_interrupts(sc); /* Enable ITR 0 right away, so that we can handle admin interrupts */ ice_enable_intr(&sc->hw, sc->irqvs[0].me); /* Now that the rebuild is finished, we're no longer prepared to reset */ ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); log(LOG_INFO, "%s: device rebuild successful\n", if_name(sc->ifp)); /* In order to completely restore device functionality, the iflib core * needs to be reset. We need to request an iflib reset. Additionally, * because the state of IFC_DO_RESET is cached within task_fn_admin in * the iflib core, we also want re-run the admin task so that iflib * resets immediately instead of waiting for the next interrupt. */ ice_request_stack_reinit(sc); return; } /** * ice_rebuild - Rebuild driver state post reset * @sc: The device private softc * * Restore driver state after a reset occurred. Restart the controlqs, setup * the hardware port, and re-enable the VSIs. */ static void ice_rebuild(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_ddp_state pkg_state; int status; int err; sc->rebuild_ticks = ticks; /* If we're rebuilding, then a reset has succeeded. */ ice_clear_state(&sc->state, ICE_STATE_RESET_FAILED); /* * If the firmware is in recovery mode, only restore the limited * functionality supported by recovery mode. */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { ice_rebuild_recovery_mode(sc); return; } /* enable PCIe bus master */ pci_enable_busmaster(dev); status = ice_init_all_ctrlq(hw); if (status) { device_printf(dev, "failed to re-init controlqs, err %s\n", ice_status_str(status)); goto err_shutdown_ctrlq; } /* Query the allocated resources for Tx scheduler */ status = ice_sched_query_res_alloc(hw); if (status) { device_printf(dev, "Failed to query scheduler resources, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); goto err_shutdown_ctrlq; } /* Re-enable FW logging. Keep going even if this fails */ - status = ice_fwlog_set(hw, &hw->fwlog_cfg); + status = ICE_SUCCESS; + if (hw->pf_id == 0) + status = ice_fwlog_set(hw, &hw->fwlog_cfg); if (!status) { /* * We should have the most updated cached copy of the * configuration, regardless of whether we're rebuilding * or not. So we'll simply check to see if logging was * enabled pre-rebuild. */ if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) { status = ice_fwlog_register(hw); if (status) device_printf(dev, "failed to re-register fw logging, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } } else device_printf(dev, "failed to rebuild fw logging configuration, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = ice_send_version(sc); if (err) goto err_shutdown_ctrlq; err = ice_init_link_events(sc); if (err) { device_printf(dev, "ice_init_link_events failed: %s\n", ice_err_str(err)); goto err_shutdown_ctrlq; } status = ice_clear_pf_cfg(hw); if (status) { device_printf(dev, "failed to clear PF configuration, err %s\n", ice_status_str(status)); goto err_shutdown_ctrlq; } ice_clean_all_vsi_rss_cfg(sc); ice_clear_pxe_mode(hw); status = ice_get_caps(hw); if (status) { device_printf(dev, "failed to get capabilities, err %s\n", ice_status_str(status)); goto err_shutdown_ctrlq; } status = ice_sched_init_port(hw->port_info); if (status) { device_printf(dev, "failed to initialize port, err %s\n", ice_status_str(status)); goto err_sched_cleanup; } /* If we previously loaded the package, it needs to be reloaded now */ if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) { pkg_state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); if (!ice_is_init_pkg_successful(pkg_state)) { ice_log_pkg_init(sc, pkg_state); ice_transition_safe_mode(sc); } } ice_reset_pf_stats(sc); err = ice_rebuild_pf_vsi_qmap(sc); if (err) { device_printf(sc->dev, "Unable to re-assign main VSI queues, err %s\n", ice_err_str(err)); goto err_sched_cleanup; } err = ice_initialize_vsi(&sc->pf_vsi); if (err) { device_printf(sc->dev, "Unable to re-initialize Main VSI, err %s\n", ice_err_str(err)); goto err_release_queue_allocations; } /* Replay all VSI configuration */ err = ice_replay_all_vsi_cfg(sc); if (err) goto err_deinit_pf_vsi; /* Re-enable FW health event reporting */ ice_init_health_events(sc); /* Reconfigure the main PF VSI for RSS */ err = ice_config_rss(&sc->pf_vsi); if (err) { device_printf(sc->dev, "Unable to reconfigure RSS for the main VSI, err %s\n", ice_err_str(err)); goto err_deinit_pf_vsi; } if (hw->port_info->qos_cfg.is_sw_lldp) ice_add_rx_lldp_filter(sc); /* Refresh link status */ ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); sc->hw.port_info->phy.get_link_info = true; ice_get_link_status(sc->hw.port_info, &sc->link_up); ice_update_link_status(sc, true); /* RDMA interface will be restarted by the stack re-init */ /* Configure interrupt causes for the administrative interrupt */ ice_configure_misc_interrupts(sc); /* Enable ITR 0 right away, so that we can handle admin interrupts */ ice_enable_intr(&sc->hw, sc->irqvs[0].me); /* Now that the rebuild is finished, we're no longer prepared to reset */ ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); /* Reconfigure the subinterface */ if (sc->mirr_if) { err = ice_subif_rebuild(sc); if (err) goto err_deinit_pf_vsi; } log(LOG_INFO, "%s: device rebuild successful\n", if_name(sc->ifp)); /* In order to completely restore device functionality, the iflib core * needs to be reset. We need to request an iflib reset. Additionally, * because the state of IFC_DO_RESET is cached within task_fn_admin in * the iflib core, we also want re-run the admin task so that iflib * resets immediately instead of waiting for the next interrupt. * If LLDP is enabled we need to reconfig DCB to properly reinit all TC * queues, not only 0. It contains ice_request_stack_reinit as well. */ if (hw->port_info->qos_cfg.is_sw_lldp) ice_request_stack_reinit(sc); else ice_do_dcb_reconfig(sc, false); return; err_deinit_pf_vsi: ice_deinit_vsi(&sc->pf_vsi); err_release_queue_allocations: ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, sc->pf_vsi.num_tx_queues); ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap, sc->pf_vsi.num_rx_queues); err_sched_cleanup: ice_sched_cleanup_all(hw); err_shutdown_ctrlq: ice_shutdown_all_ctrlq(hw, false); ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); device_printf(dev, "Driver rebuild failed, please reload the device driver\n"); } /** * ice_handle_reset_event - Handle reset events triggered by OICR * @sc: The device private softc * * Handle reset events triggered by an OICR notification. This includes CORER, * GLOBR, and EMPR resets triggered by software on this or any other PF or by * firmware. * * @pre assumes the iflib context lock is held, and will unlock it while * waiting for the hardware to finish reset. */ static void ice_handle_reset_event(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; int status; device_t dev = sc->dev; /* When a CORER, GLOBR, or EMPR is about to happen, the hardware will * trigger an OICR interrupt. Our OICR handler will determine when * this occurs and set the ICE_STATE_RESET_OICR_RECV bit as * appropriate. */ if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_OICR_RECV)) return; ice_prepare_for_reset(sc); /* * Release the iflib context lock and wait for the device to finish * resetting. */ IFLIB_CTX_UNLOCK(sc); status = ice_check_reset(hw); IFLIB_CTX_LOCK(sc); if (status) { device_printf(dev, "Device never came out of reset, err %s\n", ice_status_str(status)); ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); return; } /* We're done with the reset, so we can rebuild driver state */ sc->hw.reset_ongoing = false; ice_rebuild(sc); /* In the unlikely event that a PF reset request occurs at the same * time as a global reset, clear the request now. This avoids * resetting a second time right after we reset due to a global event. */ if (ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ)) device_printf(dev, "Ignoring PFR request that occurred while a reset was ongoing\n"); } /** * ice_handle_pf_reset_request - Initiate PF reset requested by software * @sc: The device private softc * * Initiate a PF reset requested by software. We handle this in the admin task * so that only one thread actually handles driver preparation and cleanup, * rather than having multiple threads possibly attempt to run this code * simultaneously. * * @pre assumes the iflib context lock is held and will unlock it while * waiting for the PF reset to complete. */ static void ice_handle_pf_reset_request(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; int status; /* Check for PF reset requests */ if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ)) return; /* Make sure we're prepared for reset */ ice_prepare_for_reset(sc); /* * Release the iflib context lock and wait for the device to finish * resetting. */ IFLIB_CTX_UNLOCK(sc); status = ice_reset(hw, ICE_RESET_PFR); IFLIB_CTX_LOCK(sc); if (status) { device_printf(sc->dev, "device PF reset failed, err %s\n", ice_status_str(status)); ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); return; } sc->soft_stats.pfr_count++; ice_rebuild(sc); } /** * ice_init_device_features - Init device driver features * @sc: driver softc structure * * @pre assumes that the function capabilities bits have been set up by * ice_init_hw(). */ static void ice_init_device_features(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; /* Set capabilities that all devices support */ ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap); ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap); ice_set_bit(ICE_FEATURE_RDMA, sc->feat_cap); ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_cap); ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_cap); ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_cap); ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap); ice_set_bit(ICE_FEATURE_HAS_PBA, sc->feat_cap); ice_set_bit(ICE_FEATURE_DCB, sc->feat_cap); ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap); ice_set_bit(ICE_FEATURE_PHY_STATISTICS, sc->feat_cap); if (ice_is_e810(hw)) ice_set_bit(ICE_FEATURE_PHY_STATISTICS, sc->feat_en); /* Set capabilities based on device */ switch (hw->device_id) { case ICE_DEV_ID_E825C_BACKPLANE: case ICE_DEV_ID_E825C_QSFP: case ICE_DEV_ID_E825C_SFP: ice_set_bit(ICE_FEATURE_DUAL_NAC, sc->feat_cap); break; default: break; } /* Disable features due to hardware limitations... */ if (!hw->func_caps.common_cap.rss_table_size) ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); - if (!hw->func_caps.common_cap.iwarp || !ice_enable_irdma) + if (!hw->func_caps.common_cap.iwarp || !ice_enable_irdma || + ice_is_e830(hw)) ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); if (!hw->func_caps.common_cap.dcb) ice_clear_bit(ICE_FEATURE_DCB, sc->feat_cap); /* Disable features due to firmware limitations... */ if (!ice_is_fw_health_report_supported(hw)) ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); if (!ice_fwlog_supported(hw)) ice_clear_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap); if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) { if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_FW_LOGGING)) ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en); else ice_fwlog_unregister(hw); } /* Disable capabilities not supported by the OS */ ice_disable_unsupported_features(sc->feat_cap); /* RSS is always enabled for iflib */ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RSS)) ice_set_bit(ICE_FEATURE_RSS, sc->feat_en); /* Disable features based on sysctl settings */ if (!ice_tx_balance_en) ice_clear_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap); if (hw->dev_caps.supported_sensors & ICE_SENSOR_SUPPORT_E810_INT_TEMP) { ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_cap); ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_en); } if (hw->func_caps.common_cap.next_cluster_id_support || hw->dev_caps.common_cap.next_cluster_id_support) { ice_set_bit(ICE_FEATURE_NEXT_CLUSTER_ID, sc->feat_cap); ice_set_bit(ICE_FEATURE_NEXT_CLUSTER_ID, sc->feat_en); } } /** * ice_if_multi_set - Callback to update Multicast filters in HW * @ctx: iflib ctx structure * * Called by iflib in response to SIOCDELMULTI and SIOCADDMULTI. Must search * the if_multiaddrs list and determine which filters have been added or * removed from the list, and update HW programming to reflect the new list. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_multi_set(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); int err; ASSERT_CTX_LOCKED(sc); /* Do not handle multicast configuration in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; err = ice_sync_multicast_filters(sc); if (err) { device_printf(sc->dev, "Failed to synchronize multicast filter list: %s\n", ice_err_str(err)); return; } } /** * ice_if_vlan_register - Register a VLAN with the hardware * @ctx: iflib ctx pointer * @vtag: VLAN to add * * Programs the main PF VSI with a hardware filter for the given VLAN. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_vlan_register(if_ctx_t ctx, u16 vtag) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); int status; ASSERT_CTX_LOCKED(sc); /* Do not handle VLAN configuration in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; status = ice_add_vlan_hw_filter(&sc->pf_vsi, vtag); if (status) { device_printf(sc->dev, "Failure adding VLAN %d to main VSI, err %s aq_err %s\n", vtag, ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); } } /** * ice_if_vlan_unregister - Remove a VLAN filter from the hardware * @ctx: iflib ctx pointer * @vtag: VLAN to add * * Removes the previously programmed VLAN filter from the main PF VSI. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_vlan_unregister(if_ctx_t ctx, u16 vtag) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); int status; ASSERT_CTX_LOCKED(sc); /* Do not handle VLAN configuration in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; status = ice_remove_vlan_hw_filter(&sc->pf_vsi, vtag); if (status) { device_printf(sc->dev, "Failure removing VLAN %d from main VSI, err %s aq_err %s\n", vtag, ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); } } /** * ice_if_stop - Stop the device * @ctx: iflib context structure * * Called by iflib to stop the device and bring it down. (i.e. ifconfig ice0 * down) * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_stop(if_ctx_t ctx) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); ASSERT_CTX_LOCKED(sc); /* * The iflib core may call IFDI_STOP prior to the first call to * IFDI_INIT. This will cause us to attempt to remove MAC filters we * don't have, and disable Tx queues which aren't yet configured. * Although it is likely these extra operations are harmless, they do * cause spurious warning messages to be displayed, which may confuse * users. * * To avoid these messages, we use a state bit indicating if we've * been initialized. It will be set when ice_if_init is called, and * cleared here in ice_if_stop. */ if (!ice_testandclear_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) return; if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { device_printf(sc->dev, "request to stop interface cannot be completed as the device failed to reset\n"); return; } if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { device_printf(sc->dev, "request to stop interface while device is prepared for impending reset\n"); return; } ice_rdma_pf_stop(sc); /* Remove the MAC filters, stop Tx, and stop Rx. We don't check the * return of these functions because there's nothing we can really do * if they fail, and the functions already print error messages. * Just try to shut down as much as we can. */ ice_rm_pf_default_mac_filters(sc); /* Dissociate the Tx and Rx queues from the interrupts */ ice_flush_txq_interrupts(&sc->pf_vsi); ice_flush_rxq_interrupts(&sc->pf_vsi); /* Disable the Tx and Rx queues */ ice_vsi_disable_tx(&sc->pf_vsi); ice_control_all_rx_queues(&sc->pf_vsi, false); if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && !(if_getflags(sc->ifp) & IFF_UP) && sc->link_up) ice_set_link(sc, false); if (sc->mirr_if && ice_test_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT)) { ice_subif_if_stop(sc->mirr_if->subctx); device_printf(sc->dev, "The subinterface also comes down and up after reset\n"); } } /** * ice_if_get_counter - Get current value of an ifnet statistic * @ctx: iflib context pointer * @counter: ifnet counter to read * * Reads the current value of an ifnet counter for the device. * * This function is not protected by the iflib CTX lock. */ static uint64_t ice_if_get_counter(if_ctx_t ctx, ift_counter counter) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); /* Return the counter for the main PF VSI */ return ice_get_ifnet_counter(&sc->pf_vsi, counter); } /** * ice_request_stack_reinit - Request that iflib re-initialize * @sc: the device private softc * * Request that the device be brought down and up, to re-initialize. For * example, this may be called when a device reset occurs, or when Tx and Rx * queues need to be re-initialized. * * This is required because the iflib state is outside the driver, and must be * re-initialized if we need to resart Tx and Rx queues. */ void ice_request_stack_reinit(struct ice_softc *sc) { if (CTX_ACTIVE(sc->ctx)) { iflib_request_reset(sc->ctx); iflib_admin_intr_deferred(sc->ctx); } } /** * ice_driver_is_detaching - Check if the driver is detaching/unloading * @sc: device private softc * * Returns true if the driver is detaching, false otherwise. * * @remark on newer kernels, take advantage of iflib_in_detach in order to * report detachment correctly as early as possible. * * @remark this function is used by various code paths that want to avoid * running if the driver is about to be removed. This includes sysctls and * other driver access points. Note that it does not fully resolve * detach-based race conditions as it is possible for a thread to race with * iflib_in_detach. */ bool ice_driver_is_detaching(struct ice_softc *sc) { return (ice_test_state(&sc->state, ICE_STATE_DETACHING) || iflib_in_detach(sc->ctx)); } /** * ice_if_priv_ioctl - Device private ioctl handler * @ctx: iflib context pointer * @command: The ioctl command issued * @data: ioctl specific data * * iflib callback for handling custom driver specific ioctls. * * @pre Assumes that the iflib context lock is held. */ static int ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ifdrv *ifd; device_t dev = sc->dev; if (data == NULL) return (EINVAL); ASSERT_CTX_LOCKED(sc); /* Make sure the command type is valid */ switch (command) { case SIOCSDRVSPEC: case SIOCGDRVSPEC: /* Accepted commands */ break; case SIOCGPRIVATE_0: /* * Although we do not support this ioctl command, it's * expected that iflib will forward it to the IFDI_PRIV_IOCTL * handler. Do not print a message in this case */ return (ENOTSUP); default: /* * If we get a different command for this function, it's * definitely unexpected, so log a message indicating what * command we got for debugging purposes. */ device_printf(dev, "%s: unexpected ioctl command %08lx\n", __func__, command); return (EINVAL); } ifd = (struct ifdrv *)data; switch (ifd->ifd_cmd) { case ICE_NVM_ACCESS: return ice_handle_nvm_access_ioctl(sc, ifd); case ICE_DEBUG_DUMP: return ice_handle_debug_dump_ioctl(sc, ifd); default: return EINVAL; } } /** * ice_if_i2c_req - I2C request handler for iflib * @ctx: iflib context pointer * @req: The I2C parameters to use * * Read from the port's I2C eeprom using the parameters from the ioctl. * * @remark The iflib-only part is pretty simple. */ static int ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); return ice_handle_i2c_req(sc, req); } /** * ice_if_suspend - PCI device suspend handler for iflib * @ctx: iflib context pointer * * Deinitializes the driver and clears HW resources in preparation for * suspend or an FLR. * * @returns 0; this return value is ignored */ static int ice_if_suspend(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); /* At least a PFR is always going to happen after this; * either via FLR or during the D3->D0 transition. */ ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ); ice_prepare_for_reset(sc); return (0); } /** * ice_if_resume - PCI device resume handler for iflib * @ctx: iflib context pointer * * Reinitializes the driver and the HW after PCI resume or after * an FLR. An init is performed by iflib after this function is finished. * * @returns 0; this return value is ignored */ static int ice_if_resume(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); ice_rebuild(sc); return (0); } /** * ice_if_needs_restart - Tell iflib when the driver needs to be reinitialized * @ctx: iflib context pointer * @event: event code to check * * Defaults to returning true for unknown events. * * @returns true if iflib needs to reinit the interface */ static bool ice_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); switch (event) { case IFLIB_RESTART_VLAN_CONFIG: if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && !(if_getflags(sc->ifp) & IFF_UP)) return false; default: return true; } } extern struct if_txrx ice_subif_txrx; /** * @var ice_subif_methods * @brief ice driver method entry points */ static device_method_t ice_subif_methods[] = { /* Device interface */ DEVMETHOD(device_register, ice_subif_register), DEVMETHOD_END }; /** * @var ice_subif_driver * @brief driver structure for the device API */ static driver_t ice_subif_driver = { .name = "ice_subif", .methods = ice_subif_methods, .size = sizeof(struct ice_mirr_if), }; static device_method_t ice_iflib_subif_methods[] = { DEVMETHOD(ifdi_attach_pre, ice_subif_if_attach_pre), DEVMETHOD(ifdi_attach_post, ice_subif_if_attach_post), DEVMETHOD(ifdi_tx_queues_alloc, ice_subif_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, ice_subif_if_rx_queues_alloc), DEVMETHOD(ifdi_msix_intr_assign, ice_subif_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, ice_subif_if_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, ice_subif_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queue_intr_enable, ice_subif_if_tx_queue_intr_enable), DEVMETHOD(ifdi_init, ice_subif_if_init), DEVMETHOD(ifdi_stop, ice_subif_if_stop), DEVMETHOD(ifdi_queues_free, ice_subif_if_queues_free), DEVMETHOD(ifdi_media_status, ice_subif_if_media_status), DEVMETHOD(ifdi_promisc_set, ice_subif_if_promisc_set), }; /** * @var ice_iflib_subif_driver * @brief driver structure for the iflib stack * * driver_t definition used to setup the iflib device methods. */ static driver_t ice_iflib_subif_driver = { .name = "ice_subif", .methods = ice_iflib_subif_methods, .size = sizeof(struct ice_mirr_if), }; /** * @var ice_subif_sctx * @brief ice driver shared context * * Similar to the existing ice_sctx, this structure has these differences: * - isc_admin_intrcnt is set to 0 * - Uses subif iflib driver methods * - Flagged as a VF for iflib */ static struct if_shared_ctx ice_subif_sctx = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = ICE_MAX_FRAME_SIZE, .isc_tx_maxsegsize = ICE_MAX_FRAME_SIZE, .isc_tso_maxsize = ICE_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = ICE_MAX_DMA_SEG_SIZE, .isc_rx_maxsize = ICE_MAX_FRAME_SIZE, .isc_rx_nsegments = ICE_MAX_RX_SEGS, .isc_rx_maxsegsize = ICE_MAX_FRAME_SIZE, .isc_nfl = 1, .isc_ntxqs = 1, .isc_nrxqs = 1, .isc_admin_intrcnt = 0, .isc_vendor_info = ice_vendor_info_array, .isc_driver_version = __DECONST(char *, ice_driver_version), .isc_driver = &ice_iflib_subif_driver, .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN | IFLIB_SKIP_MSIX | IFLIB_IS_VF, .isc_nrxd_min = {ICE_MIN_DESC_COUNT}, .isc_ntxd_min = {ICE_MIN_DESC_COUNT}, .isc_nrxd_max = {ICE_IFLIB_MAX_DESC_COUNT}, .isc_ntxd_max = {ICE_IFLIB_MAX_DESC_COUNT}, .isc_nrxd_default = {ICE_DEFAULT_DESC_COUNT}, .isc_ntxd_default = {ICE_DEFAULT_DESC_COUNT}, }; static void * ice_subif_register(device_t dev __unused) { return (&ice_subif_sctx); } static void ice_subif_setup_scctx(struct ice_mirr_if *mif) { if_softc_ctx_t scctx = mif->subscctx; scctx->isc_txrx = &ice_subif_txrx; scctx->isc_capenable = ICE_FULL_CAPS; scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD; scctx->isc_ntxqsets = 4; scctx->isc_nrxqsets = 4; scctx->isc_vectors = scctx->isc_nrxqsets; scctx->isc_ntxqsets_max = 256; scctx->isc_nrxqsets_max = 256; scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(struct ice_tx_desc), DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union ice_32b_rx_flex_desc), DBA_ALIGN); scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS; scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS; scctx->isc_tx_tso_size_max = ICE_TSO_SIZE; scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE; } static int ice_subif_if_attach_pre(if_ctx_t ctx) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); mif->subctx = ctx; mif->subdev = dev; mif->subscctx = iflib_get_softc_ctx(ctx); /* Setup the iflib softc context structure */ ice_subif_setup_scctx(mif); return (0); } static int ice_subif_if_attach_post(if_ctx_t ctx __unused) { return (0); } /** * ice_destroy_mirror_interface - destroy mirror interface * @sc: driver private data * * Destroys all resources associated with the mirroring interface. * Will not exit early on failure. * * @pre: Mirror interface already exists and is initialized. */ void ice_destroy_mirror_interface(struct ice_softc *sc) { struct ice_mirr_if *mif = sc->mirr_if; struct ice_vsi *vsi = mif->vsi; bool is_locked = false; int ret; is_locked = sx_xlocked(sc->iflib_ctx_lock); if (is_locked) IFLIB_CTX_UNLOCK(sc); if (mif->ifp) { ret = iflib_device_deregister(mif->subctx); if (ret) { device_printf(sc->dev, "iflib_device_deregister for mirror interface failed: %d\n", ret); } } bus_topo_lock(); ret = device_delete_child(sc->dev, mif->subdev); bus_topo_unlock(); if (ret) { device_printf(sc->dev, "device_delete_child for mirror interface failed: %d\n", ret); } if (is_locked) IFLIB_CTX_LOCK(sc); if (mif->if_imap) { free(mif->if_imap, M_ICE); mif->if_imap = NULL; } if (mif->os_imap) { free(mif->os_imap, M_ICE); mif->os_imap = NULL; } /* These are freed via ice_subif_queues_free_subif * vsi: * - rx_irqvs * - tx_queues * - rx_queues */ ice_release_vsi(vsi); free(mif, M_ICE); sc->mirr_if = NULL; } /** * ice_setup_mirror_vsi - Initialize mirror VSI * @mif: driver private data for mirror interface * * Allocates a VSI for a mirror interface, and sets that VSI up for use as a * mirror for the main PF VSI. * * Returns 0 on success, or a standard error code on failure. */ static int ice_setup_mirror_vsi(struct ice_mirr_if *mif) { struct ice_softc *sc = mif->back; device_t dev = sc->dev; struct ice_vsi *vsi; int ret = 0; /* vsi is for the new mirror vsi, not the PF's main VSI */ vsi = ice_alloc_vsi(sc, ICE_VSI_VMDQ2); if (!vsi) { /* Already prints an error message */ return (ENOMEM); } mif->vsi = vsi; /* Reserve VSI queue allocation from PF queues */ ice_alloc_vsi_qmap(vsi, ICE_DEFAULT_VF_QUEUES, ICE_DEFAULT_VF_QUEUES); vsi->num_tx_queues = vsi->num_rx_queues = ICE_DEFAULT_VF_QUEUES; /* Assign Tx queues from PF space */ ret = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, vsi->num_tx_queues); if (ret) { device_printf(dev, "Unable to assign mirror VSI Tx queues: %s\n", ice_err_str(ret)); goto release_vsi; } /* Assign Rx queues from PF space */ ret = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, vsi->num_rx_queues); if (ret) { device_printf(dev, "Unable to assign mirror VSI Rx queues: %s\n", ice_err_str(ret)); goto release_vsi; } vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED; vsi->max_frame_size = ICE_MAX_FRAME_SIZE; ret = ice_initialize_vsi(vsi); if (ret) { device_printf(dev, "%s: Error in ice_initialize_vsi for mirror VSI: %s\n", __func__, ice_err_str(ret)); goto release_vsi; } /* Setup this VSI for receiving traffic */ ret = ice_config_rss(vsi); if (ret) { device_printf(dev, "Unable to configure RSS for mirror VSI: %s\n", ice_err_str(ret)); goto release_vsi; } /* Set HW rules for mirroring traffic */ vsi->mirror_src_vsi = sc->pf_vsi.idx; ice_debug(&sc->hw, ICE_DBG_INIT, "Configuring mirroring from VSI %d to %d\n", vsi->mirror_src_vsi, vsi->idx); ice_debug(&sc->hw, ICE_DBG_INIT, "(HW num: VSI %d to %d)\n", ice_get_hw_vsi_num(&sc->hw, vsi->mirror_src_vsi), ice_get_hw_vsi_num(&sc->hw, vsi->idx)); ret = ice_setup_vsi_mirroring(vsi); if (ret) { device_printf(dev, "Unable to configure mirroring for VSI: %s\n", ice_err_str(ret)); goto release_vsi; } return (0); release_vsi: ice_release_vsi(vsi); mif->vsi = NULL; return (ret); } /** * ice_create_mirror_interface - Initialize mirror interface * @sc: driver private data * * Creates and sets up a mirror interface that will mirror traffic from * the main PF interface. Includes a call to iflib_device_register() in order * to setup necessary iflib structures for this new interface as well. * * If it returns successfully, a new interface will be created and will show * up in the ifconfig interface list. * * Returns 0 on success, or a standard error code on failure. */ int ice_create_mirror_interface(struct ice_softc *sc) { device_t dev = sc->dev; struct ice_mirr_if *mif; struct ifmedia *media; struct sbuf *sb; int ret = 0; mif = (struct ice_mirr_if *)malloc(sizeof(*mif), M_ICE, M_ZERO | M_NOWAIT); if (!mif) { device_printf(dev, "malloc() error allocating mirror interface\n"); return (ENOMEM); } /* Set pointers */ sc->mirr_if = mif; mif->back = sc; /* Do early setup because these will be called during iflib_device_register(): * - ice_subif_if_tx_queues_alloc * - ice_subif_if_rx_queues_alloc */ ret = ice_setup_mirror_vsi(mif); if (ret) goto out; /* Determine name for new interface: * (base interface name)(modifier name)(modifier unit number) * e.g. for ice0 with a new mirror interface (modifier m) * of index 0, this equals "ice0m0" */ sb = sbuf_new_auto(); MPASS(sb != NULL); sbuf_printf(sb, "%sm", device_get_nameunit(dev)); sbuf_finish(sb); bus_topo_lock(); mif->subdev = device_add_child(dev, sbuf_data(sb), 0); bus_topo_unlock(); if (!mif->subdev) { device_printf(dev, "device_add_child failed for %s0\n", sbuf_data(sb)); sbuf_delete(sb); free(mif, M_ICE); sc->mirr_if = NULL; return (ENOMEM); } sbuf_delete(sb); device_set_driver(mif->subdev, &ice_subif_driver); /* Use iflib_device_register() directly because the driver already * has an initialized softc to pass to iflib */ ret = iflib_device_register(mif->subdev, mif, &ice_subif_sctx, &mif->subctx); if (ret) goto out; /* Indicate that created interface will be just for monitoring */ mif->ifp = iflib_get_ifp(mif->subctx); if_setflagbits(mif->ifp, IFF_MONITOR, 0); /* Use autoselect media by default */ media = iflib_get_media(mif->subctx); ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(media, IFM_ETHER | IFM_AUTO); device_printf(dev, "Created dev %s and ifnet %s for mirroring\n", device_get_nameunit(mif->subdev), if_name(mif->ifp)); ice_add_vsi_sysctls(mif->vsi); ret = ice_wire_mirror_intrs(mif); if (ret) goto out; mif->if_attached = true; return (0); out: ice_destroy_mirror_interface(sc); return (ret); } /** * ice_wire_mirror_intrs * @mif: driver private subinterface structure * * Helper function that sets up driver interrupt data and calls * into iflib in order to setup interrupts in its data structures as well. * * Like ice_if_msix_intr_assign, currently requires that we get at least the same * number of vectors as we have queues, and that we always have the same number * of Tx and Rx queues. Unlike that function, this calls a special * iflib_irq_alloc_generic_subif() function for RX interrupts because the * driver needs to get MSI-X resources from the parent device. * * Tx queues use a softirq instead of using their own hardware interrupt so that * remains unchanged. * * Returns 0 on success or an error code from iflib_irq_alloc_generic_subctx() * on failure. */ static int ice_wire_mirror_intrs(struct ice_mirr_if *mif) { struct ice_softc *sc = mif->back; struct ice_hw *hw = &sc->hw; struct ice_vsi *vsi = mif->vsi; device_t dev = mif->subdev; int err, i, rid; if_ctx_t ctx = mif->subctx; ice_debug(hw, ICE_DBG_INIT, "%s: Last rid: %d\n", __func__, sc->last_rid); rid = sc->last_rid + 1; for (i = 0; i < vsi->num_rx_queues; i++, rid++) { struct ice_rx_queue *rxq = &vsi->rx_queues[i]; struct ice_tx_queue *txq = &vsi->tx_queues[i]; char irq_name[16]; // TODO: Change to use dynamic interface number snprintf(irq_name, sizeof(irq_name), "m0rxq%d", i); /* First arg is parent device (physical port's) iflib ctx */ err = iflib_irq_alloc_generic_subctx(sc->ctx, ctx, &mif->rx_irqvs[i].irq, rid, IFLIB_INTR_RXTX, ice_msix_que, rxq, rxq->me, irq_name); if (err) { device_printf(dev, "Failed to allocate q int %d err: %s\n", i, ice_err_str(err)); i--; goto fail; } MPASS(rid - 1 > 0); /* Set vector number used in interrupt enable/disable functions */ mif->rx_irqvs[i].me = rid - 1; rxq->irqv = &mif->rx_irqvs[i]; bzero(irq_name, sizeof(irq_name)); snprintf(irq_name, sizeof(irq_name), "m0txq%d", i); iflib_softirq_alloc_generic(ctx, &mif->rx_irqvs[i].irq, IFLIB_INTR_TX, txq, txq->me, irq_name); txq->irqv = &mif->rx_irqvs[i]; } sc->last_rid = rid - 1; ice_debug(hw, ICE_DBG_INIT, "%s: New last rid: %d\n", __func__, sc->last_rid); return (0); fail: for (; i >= 0; i--) iflib_irq_free(ctx, &mif->rx_irqvs[i].irq); return (err); } /** * ice_subif_rebuild - Rebuild subinterface post reset * @sc: The device private softc * * Restore subinterface state after a reset occurred. * Restart the VSI and enable the mirroring. */ static int ice_subif_rebuild(struct ice_softc *sc) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(sc->ctx); struct ice_vsi *vsi = sc->mirr_if->vsi; int err; err = ice_subif_rebuild_vsi_qmap(sc); if (err) { device_printf(sc->dev, "Unable to re-assign mirror VSI queues, err %s\n", ice_err_str(err)); return (err); } err = ice_initialize_vsi(vsi); if (err) { device_printf(sc->dev, "Unable to re-initialize mirror VSI, err %s\n", ice_err_str(err)); goto err_release_queue_allocations_subif; } err = ice_config_rss(vsi); if (err) { device_printf(sc->dev, "Unable to reconfigure RSS for the mirror VSI, err %s\n", ice_err_str(err)); goto err_deinit_subif_vsi; } vsi->mirror_src_vsi = sc->pf_vsi.idx; err = ice_setup_vsi_mirroring(vsi); if (err) { device_printf(sc->dev, "Unable to configure mirroring for VSI: %s\n", ice_err_str(err)); goto err_deinit_subif_vsi; } ice_set_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT); return (0); err_deinit_subif_vsi: ice_deinit_vsi(vsi); err_release_queue_allocations_subif: ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, sc->mirr_if->num_irq_vectors); ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap, sc->mirr_if->num_irq_vectors); return (err); } /** * ice_subif_rebuild_vsi_qmap - Rebuild the mirror VSI queue mapping * @sc: the device softc pointer * * Loops over the Tx and Rx queues for the mirror VSI and reassigns the queue * mapping after a reset occurred. */ static int ice_subif_rebuild_vsi_qmap(struct ice_softc *sc) { struct ice_vsi *vsi = sc->mirr_if->vsi; struct ice_tx_queue *txq; struct ice_rx_queue *rxq; int err, i; err = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, sc->mirr_if->num_irq_vectors); if (err) { device_printf(sc->dev, "Unable to assign mirror VSI Tx queues: %s\n", ice_err_str(err)); return (err); } err = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, sc->mirr_if->num_irq_vectors); if (err) { device_printf(sc->dev, "Unable to assign mirror VSI Rx queues: %s\n", ice_err_str(err)); goto err_release_tx_queues; } vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED; /* Re-assign Tx queue tail pointers */ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); /* Re-assign Rx queue tail pointers */ for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); return (0); err_release_tx_queues: ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, vsi->num_tx_queues); return (err); } /** * ice_subif_if_tx_queues_alloc - Allocate Tx queue memory for subinterfaces * @ctx: iflib context structure * @vaddrs: virtual addresses for the queue memory * @paddrs: physical addresses for the queue memory * @ntxqs: the number of Tx queues per set (should always be 1) * @ntxqsets: the number of Tx queue sets to allocate * * See ice_if_tx_queues_alloc() description. Similar to that function, but * for subinterfaces instead. */ static int ice_subif_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int __invariant_only ntxqs, int ntxqsets) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_tx_queue *txq; device_t dev = mif->subdev; struct ice_vsi *vsi; int err, i, j; MPASS(mif != NULL); MPASS(ntxqs == 1); MPASS(mif->subscctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT); vsi = mif->vsi; MPASS(vsi->num_tx_queues == ntxqsets); /* Allocate queue structure memory */ if (!(vsi->tx_queues = (struct ice_tx_queue *)malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_NOWAIT | M_ZERO))) { device_printf(dev, "%s: Unable to allocate Tx queue memory for subfunction\n", __func__); return (ENOMEM); } /* Allocate report status arrays */ for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { if (!(txq->tx_rsq = (uint16_t *)malloc(sizeof(uint16_t) * mif->subscctx->isc_ntxd[0], M_ICE, M_NOWAIT))) { device_printf(dev, "%s: Unable to allocate tx_rsq memory for subfunction\n", __func__); err = ENOMEM; goto free_tx_queues; } /* Initialize report status array */ for (j = 0; j < mif->subscctx->isc_ntxd[0]; j++) txq->tx_rsq[j] = QIDX_INVALID; } /* Add Tx queue sysctls context */ ice_vsi_add_txqs_ctx(vsi); for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { /* q_handle == me when only one TC */ txq->me = txq->q_handle = i; txq->vsi = vsi; /* store the queue size for easier access */ txq->desc_count = mif->subscctx->isc_ntxd[0]; /* get the virtual and physical address of the hardware queues */ txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); txq->tx_base = (struct ice_tx_desc *)vaddrs[i]; txq->tx_paddr = paddrs[i]; ice_add_txq_sysctls(txq); } return (0); free_tx_queues: for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { if (txq->tx_rsq != NULL) { free(txq->tx_rsq, M_ICE); txq->tx_rsq = NULL; } } free(vsi->tx_queues, M_ICE); vsi->tx_queues = NULL; return (err); } /** * ice_subif_if_rx_queues_alloc - Allocate Rx queue memory for subinterfaces * @ctx: iflib context structure * @vaddrs: virtual addresses for the queue memory * @paddrs: physical addresses for the queue memory * @nrxqs: number of Rx queues per set (should always be 1) * @nrxqsets: number of Rx queue sets to allocate * * See ice_if_rx_queues_alloc() for general summary; this is similar to that * but implemented for subinterfaces. */ static int ice_subif_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int __invariant_only nrxqs, int nrxqsets) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_rx_queue *rxq; device_t dev = mif->subdev; struct ice_vsi *vsi; int i; MPASS(mif != NULL); MPASS(nrxqs == 1); MPASS(mif->subscctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT); vsi = mif->vsi; MPASS(vsi->num_rx_queues == nrxqsets); /* Allocate queue structure memory */ if (!(vsi->rx_queues = (struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_NOWAIT | M_ZERO))) { device_printf(dev, "%s: Unable to allocate Rx queue memory for subfunction\n", __func__); return (ENOMEM); } /* Add Rx queue sysctls context */ ice_vsi_add_rxqs_ctx(vsi); for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) { rxq->me = i; rxq->vsi = vsi; /* store the queue size for easier access */ rxq->desc_count = mif->subscctx->isc_nrxd[0]; /* get the virtual and physical address of the hardware queues */ rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i]; rxq->rx_paddr = paddrs[i]; ice_add_rxq_sysctls(rxq); } return (0); } /** * ice_subif_if_msix_intr_assign - Assign MSI-X interrupts to new sub interface * @ctx: the iflib context structure * @msix: the number of vectors we were assigned * * Allocates and assigns driver private resources for MSI-X interrupt tracking. * * @pre OS MSI-X resources have been pre-allocated by parent interface. */ static int ice_subif_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_softc *sc = mif->back; struct ice_vsi *vsi = mif->vsi; device_t dev = mif->subdev; int ret; if (vsi->num_rx_queues != vsi->num_tx_queues) { device_printf(dev, "iflib requested %d Tx queues, and %d Rx queues, but the driver isn't able to support a differing number of Tx and Rx queues\n", vsi->num_tx_queues, vsi->num_rx_queues); return (EOPNOTSUPP); } if (msix > sc->extra_vectors) { device_printf(dev, "%s: Not enough spare (%d) msix vectors for new sub-interface requested (%d)\n", __func__, sc->extra_vectors, msix); return (ENOSPC); } device_printf(dev, "%s: Using %d vectors for sub-interface\n", __func__, msix); /* Allocate space to store the IRQ vector data */ mif->num_irq_vectors = vsi->num_rx_queues; mif->rx_irqvs = (struct ice_irq_vector *) malloc(sizeof(struct ice_irq_vector) * (mif->num_irq_vectors), M_ICE, M_NOWAIT); if (!mif->rx_irqvs) { device_printf(dev, "Unable to allocate RX irqv memory for mirror's %d vectors\n", mif->num_irq_vectors); return (ENOMEM); } /* Assign mirror interface interrupts from PF device space */ if (!(mif->if_imap = (u16 *)malloc(sizeof(u16) * mif->num_irq_vectors, M_ICE, M_NOWAIT))) { device_printf(dev, "Unable to allocate mirror intfc if_imap memory\n"); ret = ENOMEM; goto free_irqvs; } ret = ice_resmgr_assign_contiguous(&sc->dev_imgr, mif->if_imap, mif->num_irq_vectors); if (ret) { device_printf(dev, "Unable to assign mirror intfc PF device interrupt mapping: %s\n", ice_err_str(ret)); goto free_if_imap; } /* Assign mirror interface interrupts from OS interrupt allocation space */ if (!(mif->os_imap = (u16 *)malloc(sizeof(u16) * mif->num_irq_vectors, M_ICE, M_NOWAIT))) { device_printf(dev, "Unable to allocate mirror intfc os_imap memory\n"); ret = ENOMEM; goto free_if_imap; } ret = ice_resmgr_assign_contiguous(&sc->os_imgr, mif->os_imap, mif->num_irq_vectors); if (ret) { device_printf(dev, "Unable to assign mirror intfc OS interrupt mapping: %s\n", ice_err_str(ret)); goto free_if_imap; } return (0); free_if_imap: free(mif->if_imap, M_ICE); mif->if_imap = NULL; free_irqvs: free(mif->rx_irqvs, M_ICE); mif->rx_irqvs = NULL; return (ret); } /** * ice_subif_if_intr_enable - Enable device interrupts for a subinterface * @ctx: iflib context structure * * Called by iflib to request enabling all interrupts that belong to a * subinterface. */ static void ice_subif_if_intr_enable(if_ctx_t ctx) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_softc *sc = mif->back; struct ice_vsi *vsi = mif->vsi; struct ice_hw *hw = &sc->hw; /* Do not enable queue interrupts in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; /* Enable all queue interrupts */ for (int i = 0; i < vsi->num_rx_queues; i++) ice_enable_intr(hw, vsi->rx_queues[i].irqv->me); } /** * ice_subif_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt * @ctx: iflib context structure * @rxqid: the Rx queue to enable * * Enable a specific Rx queue interrupt. * * This function is not protected by the iflib CTX lock. */ static int ice_subif_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_softc *sc = mif->back; struct ice_vsi *vsi = mif->vsi; struct ice_hw *hw = &sc->hw; /* Do not enable queue interrupts in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me); return (0); } /** * ice_subif_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt * @ctx: iflib context structure * @txqid: the Tx queue to enable * * Enable a specific Tx queue interrupt. * * This function is not protected by the iflib CTX lock. */ static int ice_subif_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_softc *sc = mif->back; struct ice_vsi *vsi = mif->vsi; struct ice_hw *hw = &sc->hw; /* Do not enable queue interrupts in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me); return (0); } /** * ice_subif_if_init - Initialize the subinterface * @ctx: iflib ctx structure * * Called by iflib to bring the device up, i.e. ifconfig ice0m0 up. * Prepares the Tx and Rx engines and enables interrupts. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_subif_if_init(if_ctx_t ctx) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_softc *sc = mif->back; struct ice_vsi *vsi = mif->vsi; device_t dev = mif->subdev; int err; if (ice_driver_is_detaching(sc)) return; if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { device_printf(dev, "request to start interface cannot be completed as the parent device %s failed to reset\n", device_get_nameunit(sc->dev)); return; } if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { device_printf(dev, "request to start interface cannot be completed while parent device %s is prepared for impending reset\n", device_get_nameunit(sc->dev)); return; } /* XXX: Equiv to ice_update_rx_mbuf_sz */ vsi->mbuf_sz = iflib_get_rx_mbuf_sz(ctx); /* Initialize software Tx tracking values */ ice_init_tx_tracking(vsi); err = ice_cfg_vsi_for_tx(vsi); if (err) { device_printf(dev, "Unable to configure subif VSI for Tx: %s\n", ice_err_str(err)); return; } err = ice_cfg_vsi_for_rx(vsi); if (err) { device_printf(dev, "Unable to configure subif VSI for Rx: %s\n", ice_err_str(err)); goto err_cleanup_tx; } err = ice_control_all_rx_queues(vsi, true); if (err) { device_printf(dev, "Unable to enable subif Rx rings for receive: %s\n", ice_err_str(err)); goto err_cleanup_tx; } ice_configure_all_rxq_interrupts(vsi); ice_configure_rx_itr(vsi); ice_set_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED); return; err_cleanup_tx: ice_vsi_disable_tx(vsi); } /** * ice_if_stop_subif - Stop the subinterface * @ctx: iflib context structure * @ifs: subinterface context structure * * Called by iflib to stop the subinterface and bring it down. * (e.g. ifconfig ice0m0 down) * * @pre assumes the caller holds the iflib CTX lock */ static void ice_subif_if_stop(if_ctx_t ctx) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_softc *sc = mif->back; struct ice_vsi *vsi = mif->vsi; device_t dev = mif->subdev; if (!ice_testandclear_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED)) return; if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { device_printf(dev, "request to stop interface cannot be completed as the parent device %s failed to reset\n", device_get_nameunit(sc->dev)); return; } if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { device_printf(dev, "request to stop interface cannot be completed while parent device %s is prepared for impending reset\n", device_get_nameunit(sc->dev)); return; } /* Dissociate the Tx and Rx queues from the interrupts */ ice_flush_txq_interrupts(vsi); ice_flush_rxq_interrupts(vsi); /* Disable the Tx and Rx queues */ ice_vsi_disable_tx(vsi); ice_control_all_rx_queues(vsi, false); } /** * ice_free_irqvs_subif - Free IRQ vector memory for subinterfaces * @mif: Mirror interface private structure * * Free IRQ vector memory allocated during ice_subif_if_msix_intr_assign. */ static void ice_free_irqvs_subif(struct ice_mirr_if *mif) { struct ice_softc *sc = mif->back; struct ice_vsi *vsi = mif->vsi; if_ctx_t ctx = sc->ctx; int i; /* If the irqvs array is NULL, then there are no vectors to free */ if (mif->rx_irqvs == NULL) return; /* Free the IRQ vectors -- currently subinterfaces have number * of vectors equal to number of RX queues * * XXX: ctx is parent device's ctx, not the subinterface ctx */ for (i = 0; i < vsi->num_rx_queues; i++) iflib_irq_free(ctx, &mif->rx_irqvs[i].irq); ice_resmgr_release_map(&sc->os_imgr, mif->os_imap, mif->num_irq_vectors); ice_resmgr_release_map(&sc->dev_imgr, mif->if_imap, mif->num_irq_vectors); sc->last_rid -= vsi->num_rx_queues; /* Clear the irqv pointers */ for (i = 0; i < vsi->num_rx_queues; i++) vsi->rx_queues[i].irqv = NULL; for (i = 0; i < vsi->num_tx_queues; i++) vsi->tx_queues[i].irqv = NULL; /* Release the vector array memory */ free(mif->rx_irqvs, M_ICE); mif->rx_irqvs = NULL; } /** * ice_subif_if_queues_free - Free queue memory for subinterfaces * @ctx: the iflib context structure * * Free queue memory allocated by ice_subif_tx_queues_alloc() and * ice_subif_if_rx_queues_alloc(). */ static void ice_subif_if_queues_free(if_ctx_t ctx) { struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx); struct ice_vsi *vsi = mif->vsi; struct ice_tx_queue *txq; int i; /* Free the Tx and Rx sysctl contexts, and assign NULL to the node * pointers. */ ice_vsi_del_txqs_ctx(vsi); ice_vsi_del_rxqs_ctx(vsi); /* Release MSI-X IRQ vectors */ ice_free_irqvs_subif(mif); if (vsi->tx_queues != NULL) { /* free the tx_rsq arrays */ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { if (txq->tx_rsq != NULL) { free(txq->tx_rsq, M_ICE); txq->tx_rsq = NULL; } } free(vsi->tx_queues, M_ICE); vsi->tx_queues = NULL; } if (vsi->rx_queues != NULL) { free(vsi->rx_queues, M_ICE); vsi->rx_queues = NULL; } } /** * ice_subif_if_media_status - Report subinterface media * @ctx: iflib context structure * @ifmr: ifmedia request structure to update * * Updates the provided ifmr with something, in order to prevent a * "no media types?" message from ifconfig. * * Mirror interfaces are always up. */ static void ice_subif_if_media_status(if_ctx_t ctx __unused, struct ifmediareq *ifmr) { ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER | IFM_AUTO; } /** * ice_subif_if_promisc_set - Set subinterface promiscuous mode * @ctx: iflib context structure * @flags: promiscuous flags to configure * * Called by iflib to configure device promiscuous mode. * * @remark This does not need to be implemented for now. */ static int ice_subif_if_promisc_set(if_ctx_t ctx __unused, int flags __unused) { return (0); }