Index: stable/12/sys/dev/sfxge/common/ef10_nvram.c
===================================================================
--- stable/12/sys/dev/sfxge/common/ef10_nvram.c	(revision 342323)
+++ stable/12/sys/dev/sfxge/common/ef10_nvram.c	(revision 342324)
@@ -1,2388 +1,2391 @@
 /*-
  * Copyright (c) 2012-2016 Solarflare Communications Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * 1. Redistributions of source code must retain the above copyright notice,
  *    this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright notice,
  *    this list of conditions and the following disclaimer in the documentation
  *    and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * The views and conclusions contained in the software and documentation are
  * those of the authors and should not be interpreted as representing official
  * policies, either expressed or implied, of the FreeBSD Project.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include "efx.h"
 #include "efx_impl.h"
 
 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
 
 #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
 
 #include "ef10_tlv_layout.h"
 
 /* Cursor for TLV partition format */
 typedef struct tlv_cursor_s {
 	uint32_t	*block;			/* Base of data block */
 	uint32_t	*current;		/* Cursor position */
 	uint32_t	*end;			/* End tag position */
 	uint32_t	*limit;			/* Last dword of data block */
 } tlv_cursor_t;
 
 typedef struct nvram_partition_s {
 	uint16_t type;
 	uint8_t chip_select;
 	uint8_t flags;
 	/*
 	 * The full length of the NVRAM partition.
 	 * This is different from tlv_partition_header.total_length,
 	 *  which can be smaller.
 	 */
 	uint32_t length;
 	uint32_t erase_size;
 	uint32_t *data;
 	tlv_cursor_t tlv_cursor;
 } nvram_partition_t;
 
 
 static	__checkReturn		efx_rc_t
 tlv_validate_state(
 	__inout			tlv_cursor_t *cursor);
 
 
 static				void
 tlv_init_block(
 	__out	uint32_t	*block)
 {
 	*block = __CPU_TO_LE_32(TLV_TAG_END);
 }
 
 static				uint32_t
 tlv_tag(
 	__in	tlv_cursor_t	*cursor)
 {
 	uint32_t dword, tag;
 
 	dword = cursor->current[0];
 	tag = __LE_TO_CPU_32(dword);
 
 	return (tag);
 }
 
 static				size_t
 tlv_length(
 	__in	tlv_cursor_t	*cursor)
 {
 	uint32_t dword, length;
 
 	if (tlv_tag(cursor) == TLV_TAG_END)
 		return (0);
 
 	dword = cursor->current[1];
 	length = __LE_TO_CPU_32(dword);
 
 	return ((size_t)length);
 }
 
 static				uint8_t *
 tlv_value(
 	__in	tlv_cursor_t	*cursor)
 {
 	if (tlv_tag(cursor) == TLV_TAG_END)
 		return (NULL);
 
 	return ((uint8_t *)(&cursor->current[2]));
 }
 
 static				uint8_t *
 tlv_item(
 	__in	tlv_cursor_t	*cursor)
 {
 	if (tlv_tag(cursor) == TLV_TAG_END)
 		return (NULL);
 
 	return ((uint8_t *)cursor->current);
 }
 
 /*
  * TLV item DWORD length is tag + length + value (rounded up to DWORD)
  * equivalent to tlv_n_words_for_len in mc-comms tlv.c
  */
 #define	TLV_DWORD_COUNT(length) \
 	(1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t)))
 
 
 static				uint32_t *
 tlv_next_item_ptr(
 	__in	tlv_cursor_t	*cursor)
 {
 	uint32_t length;
 
 	length = tlv_length(cursor);
 
 	return (cursor->current + TLV_DWORD_COUNT(length));
 }
 
 static	__checkReturn		efx_rc_t
 tlv_advance(
 	__inout	tlv_cursor_t	*cursor)
 {
 	efx_rc_t rc;
 
 	if ((rc = tlv_validate_state(cursor)) != 0)
 		goto fail1;
 
 	if (cursor->current == cursor->end) {
 		/* No more tags after END tag */
 		cursor->current = NULL;
 		rc = ENOENT;
 		goto fail2;
 	}
 
 	/* Advance to next item and validate */
 	cursor->current = tlv_next_item_ptr(cursor);
 
 	if ((rc = tlv_validate_state(cursor)) != 0)
 		goto fail3;
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static				efx_rc_t
 tlv_rewind(
 	__in	tlv_cursor_t	*cursor)
 {
 	efx_rc_t rc;
 
 	cursor->current = cursor->block;
 
 	if ((rc = tlv_validate_state(cursor)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static				efx_rc_t
 tlv_find(
 	__inout	tlv_cursor_t	*cursor,
 	__in	uint32_t	tag)
 {
 	efx_rc_t rc;
 
 	rc = tlv_rewind(cursor);
 	while (rc == 0) {
 		if (tlv_tag(cursor) == tag)
 			break;
 
 		rc = tlv_advance(cursor);
 	}
 	return (rc);
 }
 
 static	__checkReturn		efx_rc_t
 tlv_validate_state(
 	__inout	tlv_cursor_t	*cursor)
 {
 	efx_rc_t rc;
 
 	/* Check cursor position */
 	if (cursor->current < cursor->block) {
 		rc = EINVAL;
 		goto fail1;
 	}
 	if (cursor->current > cursor->limit) {
 		rc = EINVAL;
 		goto fail2;
 	}
 
 	if (tlv_tag(cursor) != TLV_TAG_END) {
 		/* Check current item has space for tag and length */
 		if (cursor->current > (cursor->limit - 2)) {
 			cursor->current = NULL;
 			rc = EFAULT;
 			goto fail3;
 		}
 
 		/* Check we have value data for current item and another tag */
 		if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) {
 			cursor->current = NULL;
 			rc = EFAULT;
 			goto fail4;
 		}
 	}
 
 	return (0);
 
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static				efx_rc_t
 tlv_init_cursor(
 	__out	tlv_cursor_t	*cursor,
 	__in	uint32_t	*block,
 	__in	uint32_t	*limit,
 	__in	uint32_t	*current)
 {
 	cursor->block	= block;
 	cursor->limit	= limit;
 
 	cursor->current	= current;
 	cursor->end	= NULL;
 
 	return (tlv_validate_state(cursor));
 }
 
 static	__checkReturn		efx_rc_t
 tlv_init_cursor_from_size(
 	__out	tlv_cursor_t	*cursor,
 	__in_bcount(size)
 		uint8_t		*block,
 	__in	size_t		size)
 {
 	uint32_t *limit;
 	limit = (uint32_t *)(block + size - sizeof (uint32_t));
 	return (tlv_init_cursor(cursor, (uint32_t *)block,
 		limit, (uint32_t *)block));
 }
 
 static	__checkReturn		efx_rc_t
 tlv_init_cursor_at_offset(
 	__out	tlv_cursor_t	*cursor,
 	__in_bcount(size)
 		uint8_t		*block,
 	__in	size_t		size,
 	__in	size_t		offset)
 {
 	uint32_t *limit;
 	uint32_t *current;
 	limit = (uint32_t *)(block + size - sizeof (uint32_t));
 	current = (uint32_t *)(block + offset);
 	return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current));
 }
 
 static	__checkReturn		efx_rc_t
 tlv_require_end(
 	__inout	tlv_cursor_t	*cursor)
 {
 	uint32_t *pos;
 	efx_rc_t rc;
 
 	if (cursor->end == NULL) {
 		pos = cursor->current;
 		if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0)
 			goto fail1;
 
 		cursor->end = cursor->current;
 		cursor->current = pos;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static				size_t
 tlv_block_length_used(
 	__inout	tlv_cursor_t	*cursor)
 {
 	efx_rc_t rc;
 
 	if ((rc = tlv_validate_state(cursor)) != 0)
 		goto fail1;
 
 	if ((rc = tlv_require_end(cursor)) != 0)
 		goto fail2;
 
 	/* Return space used (including the END tag) */
 	return (cursor->end + 1 - cursor->block) * sizeof (uint32_t);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (0);
 }
 
 static		uint32_t *
 tlv_last_segment_end(
 	__in	tlv_cursor_t *cursor)
 {
 	tlv_cursor_t segment_cursor;
 	uint32_t *last_segment_end = cursor->block;
 	uint32_t *segment_start = cursor->block;
 
 	/*
 	 * Go through each segment and check that it has an end tag. If there
 	 * is no end tag then the previous segment was the last valid one,
 	 * so return the pointer to its end tag.
 	 */
 	for (;;) {
 		if (tlv_init_cursor(&segment_cursor, segment_start,
 		    cursor->limit, segment_start) != 0)
 			break;
 		if (tlv_require_end(&segment_cursor) != 0)
 			break;
 		last_segment_end = segment_cursor.end;
 		segment_start = segment_cursor.end + 1;
 	}
 
 	return (last_segment_end);
 }
 
 
 static				uint32_t *
 tlv_write(
 	__in			tlv_cursor_t *cursor,
 	__in			uint32_t tag,
 	__in_bcount(size)	uint8_t *data,
 	__in			size_t size)
 {
 	uint32_t len = size;
 	uint32_t *ptr;
 
 	ptr = cursor->current;
 
 	*ptr++ = __CPU_TO_LE_32(tag);
 	*ptr++ = __CPU_TO_LE_32(len);
 
 	if (len > 0) {
 		ptr[(len - 1) / sizeof (uint32_t)] = 0;
 		memcpy(ptr, data, len);
 		ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr);
 	}
 
 	return (ptr);
 }
 
 static	__checkReturn		efx_rc_t
 tlv_insert(
 	__inout	tlv_cursor_t	*cursor,
 	__in	uint32_t	tag,
 	__in_bcount(size)
 		uint8_t		*data,
 	__in	size_t		size)
 {
 	unsigned int delta;
 	uint32_t *last_segment_end;
 	efx_rc_t rc;
 
 	if ((rc = tlv_validate_state(cursor)) != 0)
 		goto fail1;
 
 	if ((rc = tlv_require_end(cursor)) != 0)
 		goto fail2;
 
 	if (tag == TLV_TAG_END) {
 		rc = EINVAL;
 		goto fail3;
 	}
 
 	last_segment_end = tlv_last_segment_end(cursor);
 
 	delta = TLV_DWORD_COUNT(size);
 	if (last_segment_end + 1 + delta > cursor->limit) {
 		rc = ENOSPC;
 		goto fail4;
 	}
 
 	/* Move data up: new space at cursor->current */
 	memmove(cursor->current + delta, cursor->current,
 	    (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
 
 	/* Adjust the end pointer */
 	cursor->end += delta;
 
 	/* Write new TLV item */
 	tlv_write(cursor, tag, data, size);
 
 	return (0);
 
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn		efx_rc_t
 tlv_delete(
 	__inout	tlv_cursor_t	*cursor)
 {
 	unsigned int delta;
 	uint32_t *last_segment_end;
 	efx_rc_t rc;
 
 	if ((rc = tlv_validate_state(cursor)) != 0)
 		goto fail1;
 
 	if (tlv_tag(cursor) == TLV_TAG_END) {
 		rc = EINVAL;
 		goto fail2;
 	}
 
 	delta = TLV_DWORD_COUNT(tlv_length(cursor));
 
 	if ((rc = tlv_require_end(cursor)) != 0)
 		goto fail3;
 
 	last_segment_end = tlv_last_segment_end(cursor);
 
 	/* Shuffle things down, destroying the item at cursor->current */
 	memmove(cursor->current, cursor->current + delta,
 	    (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
 	/* Zero the new space at the end of the TLV chain */
 	memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t));
 	/* Adjust the end pointer */
 	cursor->end -= delta;
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn		efx_rc_t
 tlv_modify(
 	__inout	tlv_cursor_t	*cursor,
 	__in	uint32_t	tag,
 	__in_bcount(size)
 		uint8_t		*data,
 	__in	size_t		size)
 {
 	uint32_t *pos;
 	unsigned int old_ndwords;
 	unsigned int new_ndwords;
 	unsigned int delta;
 	uint32_t *last_segment_end;
 	efx_rc_t rc;
 
 	if ((rc = tlv_validate_state(cursor)) != 0)
 		goto fail1;
 
 	if (tlv_tag(cursor) == TLV_TAG_END) {
 		rc = EINVAL;
 		goto fail2;
 	}
 	if (tlv_tag(cursor) != tag) {
 		rc = EINVAL;
 		goto fail3;
 	}
 
 	old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor));
 	new_ndwords = TLV_DWORD_COUNT(size);
 
 	if ((rc = tlv_require_end(cursor)) != 0)
 		goto fail4;
 
 	last_segment_end = tlv_last_segment_end(cursor);
 
 	if (new_ndwords > old_ndwords) {
 		/* Expand space used for TLV item */
 		delta = new_ndwords - old_ndwords;
 		pos = cursor->current + old_ndwords;
 
 		if (last_segment_end + 1 + delta > cursor->limit) {
 			rc = ENOSPC;
 			goto fail5;
 		}
 
 		/* Move up: new space at (cursor->current + old_ndwords) */
 		memmove(pos + delta, pos,
 		    (last_segment_end + 1 - pos) * sizeof (uint32_t));
 
 		/* Adjust the end pointer */
 		cursor->end += delta;
 
 	} else if (new_ndwords < old_ndwords) {
 		/* Shrink space used for TLV item */
 		delta = old_ndwords - new_ndwords;
 		pos = cursor->current + new_ndwords;
 
 		/* Move down: remove words at (cursor->current + new_ndwords) */
 		memmove(pos, pos + delta,
 		    (last_segment_end + 1 - pos) * sizeof (uint32_t));
 
 		/* Zero the new space at the end of the TLV chain */
 		memset(last_segment_end + 1 - delta, 0,
 		    delta * sizeof (uint32_t));
 
 		/* Adjust the end pointer */
 		cursor->end -= delta;
 	}
 
 	/* Write new data */
 	tlv_write(cursor, tag, data, size);
 
 	return (0);
 
 fail5:
 	EFSYS_PROBE(fail5);
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static uint32_t checksum_tlv_partition(
 	__in	nvram_partition_t *partition)
 {
 	tlv_cursor_t *cursor;
 	uint32_t *ptr;
 	uint32_t *end;
 	uint32_t csum;
 	size_t len;
 
 	cursor = &partition->tlv_cursor;
 	len = tlv_block_length_used(cursor);
 	EFSYS_ASSERT3U((len & 3), ==, 0);
 
 	csum = 0;
 	ptr = partition->data;
 	end = &ptr[len >> 2];
 
 	while (ptr < end)
 		csum += __LE_TO_CPU_32(*ptr++);
 
 	return (csum);
 }
 
 static	__checkReturn		efx_rc_t
 tlv_update_partition_len_and_cks(
 	__in	tlv_cursor_t *cursor)
 {
 	efx_rc_t rc;
 	nvram_partition_t partition;
 	struct tlv_partition_header *header;
 	struct tlv_partition_trailer *trailer;
 	size_t new_len;
 
 	/*
 	 * We just modified the partition, so the total length may not be
 	 * valid. Don't use tlv_find(), which performs some sanity checks
 	 * that may fail here.
 	 */
 	partition.data = cursor->block;
 	memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor));
 	header = (struct tlv_partition_header *)partition.data;
 	/* Sanity check. */
 	if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) {
 		rc = EFAULT;
 		goto fail1;
 	}
 	new_len =  tlv_block_length_used(&partition.tlv_cursor);
 	if (new_len == 0) {
 		rc = EFAULT;
 		goto fail2;
 	}
 	header->total_length = __CPU_TO_LE_32(new_len);
 	/* Ensure the modified partition always has a new generation count. */
 	header->generation = __CPU_TO_LE_32(
 	    __LE_TO_CPU_32(header->generation) + 1);
 
 	trailer = (struct tlv_partition_trailer *)((uint8_t *)header +
 	    new_len - sizeof (*trailer) - sizeof (uint32_t));
 	trailer->generation = header->generation;
 	trailer->checksum = __CPU_TO_LE_32(
 	    __LE_TO_CPU_32(trailer->checksum) -
 	    checksum_tlv_partition(&partition));
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /* Validate buffer contents (before writing to flash) */
 	__checkReturn		efx_rc_t
 ef10_nvram_buffer_validate(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__in_bcount(partn_size)	caddr_t partn_data,
 	__in			size_t partn_size)
 {
 	tlv_cursor_t cursor;
 	struct tlv_partition_header *header;
 	struct tlv_partition_trailer *trailer;
 	size_t total_length;
 	uint32_t cksum;
 	int pos;
 	efx_rc_t rc;
 
+	_NOTE(ARGUNUSED(enp, partn))
 	EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
 
 	if ((partn_data == NULL) || (partn_size == 0)) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	/* The partition header must be the first item (at offset zero) */
 	if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data,
 		    partn_size)) != 0) {
 		rc = EFAULT;
 		goto fail2;
 	}
 	if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
 		rc = EINVAL;
 		goto fail3;
 	}
 	header = (struct tlv_partition_header *)tlv_item(&cursor);
 
 	/* Check TLV partition length (includes the END tag) */
 	total_length = __LE_TO_CPU_32(header->total_length);
 	if (total_length > partn_size) {
 		rc = EFBIG;
 		goto fail4;
 	}
 
 	/* Check partition ends with PARTITION_TRAILER and END tags */
 	if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
 		rc = EINVAL;
 		goto fail5;
 	}
 	trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
 
 	if ((rc = tlv_advance(&cursor)) != 0) {
 		rc = EINVAL;
 		goto fail6;
 	}
 	if (tlv_tag(&cursor) != TLV_TAG_END) {
 		rc = EINVAL;
 		goto fail7;
 	}
 
 	/* Check generation counts are consistent */
 	if (trailer->generation != header->generation) {
 		rc = EINVAL;
 		goto fail8;
 	}
 
 	/* Verify partition checksum */
 	cksum = 0;
 	for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
 		cksum += *((uint32_t *)(partn_data + pos));
 	}
 	if (cksum != 0) {
 		rc = EINVAL;
 		goto fail9;
 	}
 
 	return (0);
 
 fail9:
 	EFSYS_PROBE(fail9);
 fail8:
 	EFSYS_PROBE(fail8);
 fail7:
 	EFSYS_PROBE(fail7);
 fail6:
 	EFSYS_PROBE(fail6);
 fail5:
 	EFSYS_PROBE(fail5);
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 
 
 	__checkReturn		efx_rc_t
 ef10_nvram_buffer_create(
 	__in			efx_nic_t *enp,
 	__in			uint16_t partn_type,
 	__in_bcount(partn_size)	caddr_t partn_data,
 	__in			size_t partn_size)
 {
 	uint32_t *buf = (uint32_t *)partn_data;
 	efx_rc_t rc;
 	tlv_cursor_t cursor;
 	struct tlv_partition_header header;
 	struct tlv_partition_trailer trailer;
 
 	unsigned int min_buf_size = sizeof (struct tlv_partition_header) +
 	    sizeof (struct tlv_partition_trailer);
 	if (partn_size < min_buf_size) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	memset(buf, 0xff, partn_size);
 
 	tlv_init_block(buf);
 	if ((rc = tlv_init_cursor(&cursor, buf,
 	    (uint32_t *)((uint8_t *)buf + partn_size),
 	    buf)) != 0) {
 		goto fail2;
 	}
 
 	header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER);
 	header.length = __CPU_TO_LE_32(sizeof (header) - 8);
 	header.type_id = __CPU_TO_LE_16(partn_type);
 	header.preset = 0;
 	header.generation = __CPU_TO_LE_32(1);
 	header.total_length = 0;  /* This will be fixed below. */
 	if ((rc = tlv_insert(
 	    &cursor, TLV_TAG_PARTITION_HEADER,
 	    (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0)
 		goto fail3;
 	if ((rc = tlv_advance(&cursor)) != 0)
 		goto fail4;
 
 	trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER);
 	trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8);
 	trailer.generation = header.generation;
 	trailer.checksum = 0;  /* This will be fixed below. */
 	if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER,
 	    (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0)
 		goto fail5;
 
 	if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
 		goto fail6;
 
 	/* Check that the partition is valid. */
 	if ((rc = ef10_nvram_buffer_validate(enp, partn_type,
 	    partn_data, partn_size)) != 0)
 		goto fail7;
 
 	return (0);
 
 fail7:
 	EFSYS_PROBE(fail7);
 fail6:
 	EFSYS_PROBE(fail6);
 fail5:
 	EFSYS_PROBE(fail5);
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static			uint32_t
 byte_offset(
 	__in		uint32_t *position,
 	__in		uint32_t *base)
 {
 	return (uint32_t)((uint8_t *)position - (uint8_t *)base);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_buffer_find_item_start(
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__out			uint32_t *startp)
 {
 	/* Read past partition header to find start address of the first key */
 	tlv_cursor_t cursor;
 	efx_rc_t rc;
 
 	/* A PARTITION_HEADER tag must be the first item (at offset zero) */
 	if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
 			buffer_size)) != 0) {
 		rc = EFAULT;
 		goto fail1;
 	}
 	if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
 		rc = EINVAL;
 		goto fail2;
 	}
 
 	if ((rc = tlv_advance(&cursor)) != 0) {
 		rc = EINVAL;
 		goto fail3;
 	}
 	*startp = byte_offset(cursor.current, cursor.block);
 
 	if ((rc = tlv_require_end(&cursor)) != 0)
 		goto fail4;
 
 	return (0);
 
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_buffer_find_end(
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *endp)
 {
 	/* Read to end of partition */
 	tlv_cursor_t cursor;
 	efx_rc_t rc;
 	uint32_t *segment_used;
 
 	_NOTE(ARGUNUSED(offset))
 
 	if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
 			buffer_size)) != 0) {
 		rc = EFAULT;
 		goto fail1;
 	}
 
 	segment_used = cursor.block;
 
 	/*
 	 * Go through each segment and check that it has an end tag. If there
 	 * is no end tag then the previous segment was the last valid one,
 	 * so return the used space including that end tag.
 	 */
 	while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
 		if (tlv_require_end(&cursor) != 0) {
 			if (segment_used == cursor.block) {
 				/*
 				 * First segment is corrupt, so there is
 				 * no valid data in partition.
 				 */
 				rc = EINVAL;
 				goto fail2;
 			}
 			break;
 		}
 		segment_used = cursor.end + 1;
 
 		cursor.current = segment_used;
 	}
 	/* Return space used (including the END tag) */
 	*endp = (segment_used - cursor.block) * sizeof (uint32_t);
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 ef10_nvram_buffer_find_item(
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *startp,
 	__out			uint32_t *lengthp)
 {
 	/* Find TLV at offset and return key start and length */
 	tlv_cursor_t cursor;
 	uint8_t *key;
 	uint32_t tag;
 
 	if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
 			buffer_size, offset) != 0) {
 		return (B_FALSE);
 	}
 
 	while ((key = tlv_item(&cursor)) != NULL) {
 		tag = tlv_tag(&cursor);
 		if (tag == TLV_TAG_PARTITION_HEADER ||
 		    tag == TLV_TAG_PARTITION_TRAILER) {
 			if (tlv_advance(&cursor) != 0) {
 				break;
 			}
 			continue;
 		}
 		*startp = byte_offset(cursor.current, cursor.block);
 		*lengthp = byte_offset(tlv_next_item_ptr(&cursor),
 		    cursor.current);
 		return (B_TRUE);
 	}
 
 	return (B_FALSE);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_buffer_get_item(
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__out_bcount_part(item_max_size, *lengthp)
 				caddr_t itemp,
 	__in			size_t item_max_size,
 	__out			uint32_t *lengthp)
 {
 	efx_rc_t rc;
 	tlv_cursor_t cursor;
 	uint32_t item_length;
 
 	if (item_max_size < length) {
 		rc = ENOSPC;
 		goto fail1;
 	}
 
 	if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
 			buffer_size, offset)) != 0) {
 		goto fail2;
 	}
 
 	item_length = tlv_length(&cursor);
 	if (length < item_length) {
 		rc = ENOSPC;
 		goto fail3;
 	}
 	memcpy(itemp, tlv_value(&cursor), item_length);
 
 	*lengthp = item_length;
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_buffer_insert_item(
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length,
 	__out			uint32_t *lengthp)
 {
 	efx_rc_t rc;
 	tlv_cursor_t cursor;
 
 	if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
 			buffer_size, offset)) != 0) {
 		goto fail1;
 	}
 
 	rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length);
 
 	if (rc != 0) {
 		goto fail2;
 	}
 
 	*lengthp = byte_offset(tlv_next_item_ptr(&cursor),
 		    cursor.current);
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_buffer_delete_item(
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__in			uint32_t end)
 {
 	efx_rc_t rc;
 	tlv_cursor_t cursor;
 
 	_NOTE(ARGUNUSED(length, end))
 
 	if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
 			buffer_size, offset)) != 0) {
 		goto fail1;
 	}
 
 	if ((rc = tlv_delete(&cursor)) != 0)
 		goto fail2;
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_buffer_finish(
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size)
 {
 	efx_rc_t rc;
 	tlv_cursor_t cursor;
 
 	if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
 			buffer_size)) != 0) {
 		rc = EFAULT;
 		goto fail1;
 	}
 
 	if ((rc = tlv_require_end(&cursor)) != 0)
 		goto fail2;
 
 	if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
 		goto fail3;
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 
 
 /*
  * Read and validate a segment from a partition. A segment is a complete
  * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may
  * be multiple segments in a partition, so seg_offset allows segments
  * beyond the first to be read.
  */
 static	__checkReturn			efx_rc_t
 ef10_nvram_read_tlv_segment(
 	__in				efx_nic_t *enp,
 	__in				uint32_t partn,
 	__in				size_t seg_offset,
 	__in_bcount(max_seg_size)	caddr_t seg_data,
 	__in				size_t max_seg_size)
 {
 	tlv_cursor_t cursor;
 	struct tlv_partition_header *header;
 	struct tlv_partition_trailer *trailer;
 	size_t total_length;
 	uint32_t cksum;
 	int pos;
 	efx_rc_t rc;
 
 	EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
 
 	if ((seg_data == NULL) || (max_seg_size == 0)) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	/* Read initial chunk of the segment, starting at offset */
 	if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data,
 		    EF10_NVRAM_CHUNK,
 		    MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) {
 		goto fail2;
 	}
 
 	/* A PARTITION_HEADER tag must be the first item at the given offset */
 	if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
 		    max_seg_size)) != 0) {
 		rc = EFAULT;
 		goto fail3;
 	}
 	if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
 		rc = EINVAL;
 		goto fail4;
 	}
 	header = (struct tlv_partition_header *)tlv_item(&cursor);
 
 	/* Check TLV segment length (includes the END tag) */
 	total_length = __LE_TO_CPU_32(header->total_length);
 	if (total_length > max_seg_size) {
 		rc = EFBIG;
 		goto fail5;
 	}
 
 	/* Read the remaining segment content */
 	if (total_length > EF10_NVRAM_CHUNK) {
 		if ((rc = ef10_nvram_partn_read_mode(enp, partn,
 			    seg_offset + EF10_NVRAM_CHUNK,
 			    seg_data + EF10_NVRAM_CHUNK,
 			    total_length - EF10_NVRAM_CHUNK,
 			    MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0)
 			goto fail6;
 	}
 
 	/* Check segment ends with PARTITION_TRAILER and END tags */
 	if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
 		rc = EINVAL;
 		goto fail7;
 	}
 	trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
 
 	if ((rc = tlv_advance(&cursor)) != 0) {
 		rc = EINVAL;
 		goto fail8;
 	}
 	if (tlv_tag(&cursor) != TLV_TAG_END) {
 		rc = EINVAL;
 		goto fail9;
 	}
 
 	/* Check data read from segment is consistent */
 	if (trailer->generation != header->generation) {
 		/*
 		 * The partition data may have been modified between successive
 		 * MCDI NVRAM_READ requests by the MC or another PCI function.
 		 *
 		 * The caller must retry to obtain consistent partition data.
 		 */
 		rc = EAGAIN;
 		goto fail10;
 	}
 
 	/* Verify segment checksum */
 	cksum = 0;
 	for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
 		cksum += *((uint32_t *)(seg_data + pos));
 	}
 	if (cksum != 0) {
 		rc = EINVAL;
 		goto fail11;
 	}
 
 	return (0);
 
 fail11:
 	EFSYS_PROBE(fail11);
 fail10:
 	EFSYS_PROBE(fail10);
 fail9:
 	EFSYS_PROBE(fail9);
 fail8:
 	EFSYS_PROBE(fail8);
 fail7:
 	EFSYS_PROBE(fail7);
 fail6:
 	EFSYS_PROBE(fail6);
 fail5:
 	EFSYS_PROBE(fail5);
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /*
  * Read a single TLV item from a host memory
  * buffer containing a TLV formatted segment.
  */
 	__checkReturn		efx_rc_t
 ef10_nvram_buf_read_tlv(
 	__in				efx_nic_t *enp,
 	__in_bcount(max_seg_size)	caddr_t seg_data,
 	__in				size_t max_seg_size,
 	__in				uint32_t tag,
 	__deref_out_bcount_opt(*sizep)	caddr_t *datap,
 	__out				size_t *sizep)
 {
 	tlv_cursor_t cursor;
 	caddr_t data;
 	size_t length;
 	caddr_t value;
 	efx_rc_t rc;
+
+	_NOTE(ARGUNUSED(enp))
 
 	if ((seg_data == NULL) || (max_seg_size == 0)) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	/* Find requested TLV tag in segment data */
 	if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
 		    max_seg_size)) != 0) {
 		rc = EFAULT;
 		goto fail2;
 	}
 	if ((rc = tlv_find(&cursor, tag)) != 0) {
 		rc = ENOENT;
 		goto fail3;
 	}
 	value = (caddr_t)tlv_value(&cursor);
 	length = tlv_length(&cursor);
 
 	if (length == 0)
 		data = NULL;
 	else {
 		/* Copy out data from TLV item */
 		EFSYS_KMEM_ALLOC(enp->en_esip, length, data);
 		if (data == NULL) {
 			rc = ENOMEM;
 			goto fail4;
 		}
 		memcpy(data, value, length);
 	}
 
 	*datap = data;
 	*sizep = length;
 
 	return (0);
 
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /* Read a single TLV item from the first segment in a TLV formatted partition */
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_read_tlv(
 	__in					efx_nic_t *enp,
 	__in					uint32_t partn,
 	__in					uint32_t tag,
 	__deref_out_bcount_opt(*seg_sizep)	caddr_t *seg_datap,
 	__out					size_t *seg_sizep)
 {
 	caddr_t seg_data = NULL;
 	size_t partn_size = 0;
 	size_t length;
 	caddr_t data;
 	int retry;
 	efx_rc_t rc;
 
 	/* Allocate sufficient memory for the entire partition */
 	if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
 		goto fail1;
 
 	if (partn_size == 0) {
 		rc = ENOENT;
 		goto fail2;
 	}
 
 	EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data);
 	if (seg_data == NULL) {
 		rc = ENOMEM;
 		goto fail3;
 	}
 
 	/*
 	 * Read the first segment in a TLV partition. Retry until consistent
 	 * segment contents are returned. Inconsistent data may be read if:
 	 *  a) the segment contents are invalid
 	 *  b) the MC has rebooted while we were reading the partition
 	 *  c) the partition has been modified while we were reading it
 	 * Limit retry attempts to ensure forward progress.
 	 */
 	retry = 10;
 	do {
 		rc = ef10_nvram_read_tlv_segment(enp, partn, 0,
 		    seg_data, partn_size);
 	} while ((rc == EAGAIN) && (--retry > 0));
 
 	if (rc != 0) {
 		/* Failed to obtain consistent segment data */
 		goto fail4;
 	}
 
 	if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size,
 		    tag, &data, &length)) != 0)
 		goto fail5;
 
 	EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
 
 	*seg_datap = data;
 	*seg_sizep = length;
 
 	return (0);
 
 fail5:
 	EFSYS_PROBE(fail5);
 fail4:
 	EFSYS_PROBE(fail4);
 
 	EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /* Compute the size of a segment. */
 	static	__checkReturn	efx_rc_t
 ef10_nvram_buf_segment_size(
 	__in			caddr_t seg_data,
 	__in			size_t max_seg_size,
 	__out			size_t *seg_sizep)
 {
 	efx_rc_t rc;
 	tlv_cursor_t cursor;
 	struct tlv_partition_header *header;
 	uint32_t cksum;
 	int pos;
 	uint32_t *end_tag_position;
 	uint32_t segment_length;
 
 	/* A PARTITION_HEADER tag must be the first item at the given offset */
 	if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
 		    max_seg_size)) != 0) {
 		rc = EFAULT;
 		goto fail1;
 	}
 	if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
 		rc = EINVAL;
 		goto fail2;
 	}
 	header = (struct tlv_partition_header *)tlv_item(&cursor);
 
 	/* Check TLV segment length (includes the END tag) */
 	*seg_sizep = __LE_TO_CPU_32(header->total_length);
 	if (*seg_sizep > max_seg_size) {
 		rc = EFBIG;
 		goto fail3;
 	}
 
 	/* Check segment ends with PARTITION_TRAILER and END tags */
 	if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
 		rc = EINVAL;
 		goto fail4;
 	}
 
 	if ((rc = tlv_advance(&cursor)) != 0) {
 		rc = EINVAL;
 		goto fail5;
 	}
 	if (tlv_tag(&cursor) != TLV_TAG_END) {
 		rc = EINVAL;
 		goto fail6;
 	}
 	end_tag_position = cursor.current;
 
 	/* Verify segment checksum */
 	cksum = 0;
 	for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) {
 		cksum += *((uint32_t *)(seg_data + pos));
 	}
 	if (cksum != 0) {
 		rc = EINVAL;
 		goto fail7;
 	}
 
 	/*
 	 * Calculate total length from HEADER to END tags and compare to
 	 * max_seg_size and the total_length field in the HEADER tag.
 	 */
 	segment_length = tlv_block_length_used(&cursor);
 
 	if (segment_length > max_seg_size) {
 		rc = EINVAL;
 		goto fail8;
 	}
 
 	if (segment_length != *seg_sizep) {
 		rc = EINVAL;
 		goto fail9;
 	}
 
 	/* Skip over the first HEADER tag. */
 	rc = tlv_rewind(&cursor);
 	rc = tlv_advance(&cursor);
 
 	while (rc == 0) {
 		if (tlv_tag(&cursor) == TLV_TAG_END) {
 			/* Check that the END tag is the one found earlier. */
 			if (cursor.current != end_tag_position)
 				goto fail10;
 			break;
 		}
 		/* Check for duplicate HEADER tags before the END tag. */
 		if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
 			rc = EINVAL;
 			goto fail11;
 		}
 
 		rc = tlv_advance(&cursor);
 	}
 	if (rc != 0)
 		goto fail12;
 
 	return (0);
 
 fail12:
 	EFSYS_PROBE(fail12);
 fail11:
 	EFSYS_PROBE(fail11);
 fail10:
 	EFSYS_PROBE(fail10);
 fail9:
 	EFSYS_PROBE(fail9);
 fail8:
 	EFSYS_PROBE(fail8);
 fail7:
 	EFSYS_PROBE(fail7);
 fail6:
 	EFSYS_PROBE(fail6);
 fail5:
 	EFSYS_PROBE(fail5);
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /*
  * Add or update a single TLV item in a host memory buffer containing a TLV
  * formatted segment. Historically partitions consisted of only one segment.
  */
 	__checkReturn			efx_rc_t
 ef10_nvram_buf_write_tlv(
 	__inout_bcount(max_seg_size)	caddr_t seg_data,
 	__in				size_t max_seg_size,
 	__in				uint32_t tag,
 	__in_bcount(tag_size)		caddr_t tag_data,
 	__in				size_t tag_size,
 	__out				size_t *total_lengthp)
 {
 	tlv_cursor_t cursor;
 	struct tlv_partition_header *header;
 	struct tlv_partition_trailer *trailer;
 	uint32_t generation;
 	uint32_t cksum;
 	int pos;
 	efx_rc_t rc;
 
 	/* A PARTITION_HEADER tag must be the first item (at offset zero) */
 	if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
 			max_seg_size)) != 0) {
 		rc = EFAULT;
 		goto fail1;
 	}
 	if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
 		rc = EINVAL;
 		goto fail2;
 	}
 	header = (struct tlv_partition_header *)tlv_item(&cursor);
 
 	/* Update the TLV chain to contain the new data */
 	if ((rc = tlv_find(&cursor, tag)) == 0) {
 		/* Modify existing TLV item */
 		if ((rc = tlv_modify(&cursor, tag,
 			    (uint8_t *)tag_data, tag_size)) != 0)
 			goto fail3;
 	} else {
 		/* Insert a new TLV item before the PARTITION_TRAILER */
 		rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER);
 		if (rc != 0) {
 			rc = EINVAL;
 			goto fail4;
 		}
 		if ((rc = tlv_insert(&cursor, tag,
 			    (uint8_t *)tag_data, tag_size)) != 0) {
 			rc = EINVAL;
 			goto fail5;
 		}
 	}
 
 	/* Find the trailer tag */
 	if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
 		rc = EINVAL;
 		goto fail6;
 	}
 	trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
 
 	/* Update PARTITION_HEADER and PARTITION_TRAILER fields */
 	*total_lengthp = tlv_block_length_used(&cursor);
 	if (*total_lengthp > max_seg_size) {
 		rc = ENOSPC;
 		goto fail7;
 	}
 	generation = __LE_TO_CPU_32(header->generation) + 1;
 
 	header->total_length	= __CPU_TO_LE_32(*total_lengthp);
 	header->generation	= __CPU_TO_LE_32(generation);
 	trailer->generation	= __CPU_TO_LE_32(generation);
 
 	/* Recompute PARTITION_TRAILER checksum */
 	trailer->checksum = 0;
 	cksum = 0;
 	for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) {
 		cksum += *((uint32_t *)(seg_data + pos));
 	}
 	trailer->checksum = ~cksum + 1;
 
 	return (0);
 
 fail7:
 	EFSYS_PROBE(fail7);
 fail6:
 	EFSYS_PROBE(fail6);
 fail5:
 	EFSYS_PROBE(fail5);
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /*
  * Add or update a single TLV item in the first segment of a TLV formatted
  * dynamic config partition. The first segment is the current active
  * configuration.
  */
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_write_tlv(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__in			uint32_t tag,
 	__in_bcount(size)	caddr_t data,
 	__in			size_t size)
 {
 	return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data,
 	    size, B_FALSE);
 }
 
 /*
  * Read a segment from nvram at the given offset into a buffer (segment_data)
  * and optionally write a new tag to it.
  */
 static	__checkReturn		efx_rc_t
 ef10_nvram_segment_write_tlv(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__in			uint32_t tag,
 	__in_bcount(size)	caddr_t data,
 	__in			size_t size,
 	__inout			caddr_t *seg_datap,
 	__inout			size_t *partn_offsetp,
 	__inout			size_t *src_remain_lenp,
 	__inout			size_t *dest_remain_lenp,
 	__in			boolean_t write)
 {
 	efx_rc_t rc;
 	efx_rc_t status;
 	size_t original_segment_size;
 	size_t modified_segment_size;
 
 	/*
 	 * Read the segment from NVRAM into the segment_data buffer and validate
 	 * it, returning if it does not validate. This is not a failure unless
 	 * this is the first segment in a partition. In this case the caller
 	 * must propagate the error.
 	 */
 	status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp,
 	    *seg_datap, *src_remain_lenp);
 	if (status != 0) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	status = ef10_nvram_buf_segment_size(*seg_datap,
 	    *src_remain_lenp, &original_segment_size);
 	if (status != 0) {
 		rc = EINVAL;
 		goto fail2;
 	}
 
 	if (write) {
 		/* Update the contents of the segment in the buffer */
 		if ((rc = ef10_nvram_buf_write_tlv(*seg_datap,
 			*dest_remain_lenp, tag, data, size,
 			&modified_segment_size)) != 0) {
 			goto fail3;
 		}
 		*dest_remain_lenp -= modified_segment_size;
 		*seg_datap += modified_segment_size;
 	} else {
 		/*
 		 * We won't modify this segment, but still need to update the
 		 * remaining lengths and pointers.
 		 */
 		*dest_remain_lenp -= original_segment_size;
 		*seg_datap += original_segment_size;
 	}
 
 	*partn_offsetp += original_segment_size;
 	*src_remain_lenp -= original_segment_size;
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /*
  * Add or update a single TLV item in either the first segment or in all
  * segments in a TLV formatted dynamic config partition. Dynamic config
  * partitions on boards that support RFID are divided into a number of segments,
  * each formatted like a partition, with header, trailer and end tags. The first
  * segment is the current active configuration.
  *
  * The segments are initialised by manftest and each contain a different
  * configuration e.g. firmware variant. The firmware can be instructed
  * via RFID to copy a segment to replace the first segment, hence changing the
  * active configuration.  This allows ops to change the configuration of a board
  * prior to shipment using RFID.
  *
  * Changes to the dynamic config may need to be written to all segments (e.g.
  * firmware versions) or just the first segment (changes to the active
  * configuration). See SF-111324-SW "The use of RFID in Solarflare Products".
  * If only the first segment is written the code still needs to be aware of the
  * possible presence of subsequent segments as writing to a segment may cause
  * its size to increase, which would overwrite the subsequent segments and
  * invalidate them.
  */
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_write_segment_tlv(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__in			uint32_t tag,
 	__in_bcount(size)	caddr_t data,
 	__in			size_t size,
 	__in			boolean_t all_segments)
 {
 	size_t partn_size = 0;
 	caddr_t partn_data;
 	size_t total_length = 0;
 	efx_rc_t rc;
 	size_t current_offset = 0;
 	size_t remaining_original_length;
 	size_t remaining_modified_length;
 	caddr_t segment_data;
 
 	EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG);
 
 	/* Allocate sufficient memory for the entire partition */
 	if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
 		goto fail1;
 
 	EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data);
 	if (partn_data == NULL) {
 		rc = ENOMEM;
 		goto fail2;
 	}
 
 	remaining_original_length = partn_size;
 	remaining_modified_length = partn_size;
 	segment_data = partn_data;
 
 	/* Lock the partition */
 	if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
 		goto fail3;
 
 	/* Iterate over each (potential) segment to update it. */
 	do {
 		boolean_t write = all_segments || current_offset == 0;
 
 		rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size,
 		    &segment_data, &current_offset, &remaining_original_length,
 		    &remaining_modified_length, write);
 		if (rc != 0) {
 			if (current_offset == 0) {
 				/*
 				 * If no data has been read then the first
 				 * segment is invalid, which is an error.
 				 */
 				goto fail4;
 			}
 			break;
 		}
 	} while (current_offset < partn_size);
 
 	total_length = segment_data - partn_data;
 
 	/*
 	 * We've run out of space.  This should actually be dealt with by
 	 * ef10_nvram_buf_write_tlv returning ENOSPC.
 	 */
 	if (total_length > partn_size) {
 		rc = ENOSPC;
 		goto fail5;
 	}
 
 	/* Erase the whole partition in NVRAM */
 	if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0)
 		goto fail6;
 
 	/* Write new partition contents from the buffer to NVRAM */
 	if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data,
 		    total_length)) != 0)
 		goto fail7;
 
 	/* Unlock the partition */
 	ef10_nvram_partn_unlock(enp, partn, NULL);
 
 	EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
 
 	return (0);
 
 fail7:
 	EFSYS_PROBE(fail7);
 fail6:
 	EFSYS_PROBE(fail6);
 fail5:
 	EFSYS_PROBE(fail5);
 fail4:
 	EFSYS_PROBE(fail4);
 
 	ef10_nvram_partn_unlock(enp, partn, NULL);
 fail3:
 	EFSYS_PROBE(fail3);
 
 	EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /*
  * Get the size of a NVRAM partition. This is the total size allocated in nvram,
  * not the data used by the segments in the partition.
  */
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_size(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__out			size_t *sizep)
 {
 	efx_rc_t rc;
 
 	if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
 	    NULL, NULL, NULL)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_lock(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn)
 {
 	efx_rc_t rc;
 
 	if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_read_mode(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__in			unsigned int offset,
 	__out_bcount(size)	caddr_t data,
 	__in			size_t size,
 	__in			uint32_t mode)
 {
 	size_t chunk;
 	efx_rc_t rc;
 
 	while (size > 0) {
 		chunk = MIN(size, EF10_NVRAM_CHUNK);
 
 		if ((rc = efx_mcdi_nvram_read(enp, partn, offset,
 			    data, chunk, mode)) != 0) {
 			goto fail1;
 		}
 
 		size -= chunk;
 		data += chunk;
 		offset += chunk;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_read(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__in			unsigned int offset,
 	__out_bcount(size)	caddr_t data,
 	__in			size_t size)
 {
 	/*
 	 * Read requests which come in through the EFX API expect to
 	 * read the current, active partition.
 	 */
 	return ef10_nvram_partn_read_mode(enp, partn, offset, data, size,
 			    MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_erase(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__in			unsigned int offset,
 	__in			size_t size)
 {
 	efx_rc_t rc;
 	uint32_t erase_size;
 
 	if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
 	    &erase_size, NULL)) != 0)
 		goto fail1;
 
 	if (erase_size == 0) {
 		if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0)
 			goto fail2;
 	} else {
 		if (size % erase_size != 0) {
 			rc = EINVAL;
 			goto fail3;
 		}
 		while (size > 0) {
 			if ((rc = efx_mcdi_nvram_erase(enp, partn, offset,
 			    erase_size)) != 0)
 				goto fail4;
 			offset += erase_size;
 			size -= erase_size;
 		}
 	}
 
 	return (0);
 
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_write(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__in			unsigned int offset,
 	__out_bcount(size)	caddr_t data,
 	__in			size_t size)
 {
 	size_t chunk;
 	uint32_t write_size;
 	efx_rc_t rc;
 
 	if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
 	    NULL, &write_size)) != 0)
 		goto fail1;
 
 	if (write_size != 0) {
 		/*
 		 * Check that the size is a multiple of the write chunk size if
 		 * the write chunk size is available.
 		 */
 		if (size % write_size != 0) {
 			rc = EINVAL;
 			goto fail2;
 		}
 	} else {
 		write_size = EF10_NVRAM_CHUNK;
 	}
 
 	while (size > 0) {
 		chunk = MIN(size, write_size);
 
 		if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
 			    data, chunk)) != 0) {
 			goto fail3;
 		}
 
 		size -= chunk;
 		data += chunk;
 		offset += chunk;
 	}
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_unlock(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__out_opt		uint32_t *resultp)
 {
 	boolean_t reboot = B_FALSE;
 	efx_rc_t rc;
 
 	if (resultp != NULL)
 		*resultp = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
 
 	rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, resultp);
 	if (rc != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_set_version(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__in_ecount(4)		uint16_t version[4])
 {
 	struct tlv_partition_version partn_version;
 	size_t size;
 	efx_rc_t rc;
 
 	/* Add or modify partition version TLV item */
 	partn_version.version_w = __CPU_TO_LE_16(version[0]);
 	partn_version.version_x = __CPU_TO_LE_16(version[1]);
 	partn_version.version_y = __CPU_TO_LE_16(version[2]);
 	partn_version.version_z = __CPU_TO_LE_16(version[3]);
 
 	size = sizeof (partn_version) - (2 * sizeof (uint32_t));
 
 	/* Write the version number to all segments in the partition */
 	if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
 		    NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
 		    TLV_TAG_PARTITION_VERSION(partn),
 		    (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
 
 #if EFSYS_OPT_NVRAM
 
 typedef struct ef10_parttbl_entry_s {
 	unsigned int		partn;
 	unsigned int		port;
 	efx_nvram_type_t	nvtype;
 } ef10_parttbl_entry_t;
 
 /* Translate EFX NVRAM types to firmware partition types */
 static ef10_parttbl_entry_t hunt_parttbl[] = {
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   1, EFX_NVRAM_MC_FIRMWARE},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   2, EFX_NVRAM_MC_FIRMWARE},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   3, EFX_NVRAM_MC_FIRMWARE},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   4, EFX_NVRAM_MC_FIRMWARE},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP,  1, EFX_NVRAM_MC_GOLDEN},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP,  2, EFX_NVRAM_MC_GOLDEN},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP,  3, EFX_NVRAM_MC_GOLDEN},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP,  4, EFX_NVRAM_MC_GOLDEN},
 	{NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   1, EFX_NVRAM_BOOTROM},
 	{NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   2, EFX_NVRAM_BOOTROM},
 	{NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   3, EFX_NVRAM_BOOTROM},
 	{NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   4, EFX_NVRAM_BOOTROM},
 	{NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
 	{NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
 	{NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 3, EFX_NVRAM_BOOTROM_CFG},
 	{NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 4, EFX_NVRAM_BOOTROM_CFG},
 	{NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   1, EFX_NVRAM_DYNAMIC_CFG},
 	{NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   2, EFX_NVRAM_DYNAMIC_CFG},
 	{NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   3, EFX_NVRAM_DYNAMIC_CFG},
 	{NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   4, EFX_NVRAM_DYNAMIC_CFG},
 	{NVRAM_PARTITION_TYPE_FPGA,		   1, EFX_NVRAM_FPGA},
 	{NVRAM_PARTITION_TYPE_FPGA,		   2, EFX_NVRAM_FPGA},
 	{NVRAM_PARTITION_TYPE_FPGA,		   3, EFX_NVRAM_FPGA},
 	{NVRAM_PARTITION_TYPE_FPGA,		   4, EFX_NVRAM_FPGA},
 	{NVRAM_PARTITION_TYPE_FPGA_BACKUP,	   1, EFX_NVRAM_FPGA_BACKUP},
 	{NVRAM_PARTITION_TYPE_FPGA_BACKUP,	   2, EFX_NVRAM_FPGA_BACKUP},
 	{NVRAM_PARTITION_TYPE_FPGA_BACKUP,	   3, EFX_NVRAM_FPGA_BACKUP},
 	{NVRAM_PARTITION_TYPE_FPGA_BACKUP,	   4, EFX_NVRAM_FPGA_BACKUP},
 	{NVRAM_PARTITION_TYPE_LICENSE,		   1, EFX_NVRAM_LICENSE},
 	{NVRAM_PARTITION_TYPE_LICENSE,		   2, EFX_NVRAM_LICENSE},
 	{NVRAM_PARTITION_TYPE_LICENSE,		   3, EFX_NVRAM_LICENSE},
 	{NVRAM_PARTITION_TYPE_LICENSE,		   4, EFX_NVRAM_LICENSE}
 };
 
 static ef10_parttbl_entry_t medford_parttbl[] = {
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   1, EFX_NVRAM_MC_FIRMWARE},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   2, EFX_NVRAM_MC_FIRMWARE},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   3, EFX_NVRAM_MC_FIRMWARE},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   4, EFX_NVRAM_MC_FIRMWARE},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP,  1, EFX_NVRAM_MC_GOLDEN},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP,  2, EFX_NVRAM_MC_GOLDEN},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP,  3, EFX_NVRAM_MC_GOLDEN},
 	{NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP,  4, EFX_NVRAM_MC_GOLDEN},
 	{NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   1, EFX_NVRAM_BOOTROM},
 	{NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   2, EFX_NVRAM_BOOTROM},
 	{NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   3, EFX_NVRAM_BOOTROM},
 	{NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   4, EFX_NVRAM_BOOTROM},
 	{NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
 	{NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 2, EFX_NVRAM_BOOTROM_CFG},
 	{NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 3, EFX_NVRAM_BOOTROM_CFG},
 	{NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 4, EFX_NVRAM_BOOTROM_CFG},
 	{NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   1, EFX_NVRAM_DYNAMIC_CFG},
 	{NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   2, EFX_NVRAM_DYNAMIC_CFG},
 	{NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   3, EFX_NVRAM_DYNAMIC_CFG},
 	{NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   4, EFX_NVRAM_DYNAMIC_CFG},
 	{NVRAM_PARTITION_TYPE_FPGA,		   1, EFX_NVRAM_FPGA},
 	{NVRAM_PARTITION_TYPE_FPGA,		   2, EFX_NVRAM_FPGA},
 	{NVRAM_PARTITION_TYPE_FPGA,		   3, EFX_NVRAM_FPGA},
 	{NVRAM_PARTITION_TYPE_FPGA,		   4, EFX_NVRAM_FPGA},
 	{NVRAM_PARTITION_TYPE_FPGA_BACKUP,	   1, EFX_NVRAM_FPGA_BACKUP},
 	{NVRAM_PARTITION_TYPE_FPGA_BACKUP,	   2, EFX_NVRAM_FPGA_BACKUP},
 	{NVRAM_PARTITION_TYPE_FPGA_BACKUP,	   3, EFX_NVRAM_FPGA_BACKUP},
 	{NVRAM_PARTITION_TYPE_FPGA_BACKUP,	   4, EFX_NVRAM_FPGA_BACKUP},
 	{NVRAM_PARTITION_TYPE_LICENSE,		   1, EFX_NVRAM_LICENSE},
 	{NVRAM_PARTITION_TYPE_LICENSE,		   2, EFX_NVRAM_LICENSE},
 	{NVRAM_PARTITION_TYPE_LICENSE,		   3, EFX_NVRAM_LICENSE},
 	{NVRAM_PARTITION_TYPE_LICENSE,		   4, EFX_NVRAM_LICENSE},
 	{NVRAM_PARTITION_TYPE_EXPANSION_UEFI,	   1, EFX_NVRAM_UEFIROM},
 	{NVRAM_PARTITION_TYPE_EXPANSION_UEFI,	   2, EFX_NVRAM_UEFIROM},
 	{NVRAM_PARTITION_TYPE_EXPANSION_UEFI,	   3, EFX_NVRAM_UEFIROM},
 	{NVRAM_PARTITION_TYPE_EXPANSION_UEFI,	   4, EFX_NVRAM_UEFIROM}
 };
 
 static	__checkReturn		efx_rc_t
 ef10_parttbl_get(
 	__in			efx_nic_t *enp,
 	__out			ef10_parttbl_entry_t **parttblp,
 	__out			size_t *parttbl_rowsp)
 {
 	switch (enp->en_family) {
 	case EFX_FAMILY_HUNTINGTON:
 		*parttblp = hunt_parttbl;
 		*parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl);
 		break;
 
 	case EFX_FAMILY_MEDFORD:
 		*parttblp = medford_parttbl;
 		*parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl);
 		break;
 
 	default:
 		EFSYS_ASSERT(B_FALSE);
 		return (EINVAL);
 	}
 	return (0);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_type_to_partn(
 	__in			efx_nic_t *enp,
 	__in			efx_nvram_type_t type,
 	__out			uint32_t *partnp)
 {
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	ef10_parttbl_entry_t *parttbl = NULL;
 	size_t parttbl_rows = 0;
 	unsigned int i;
 
 	EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
 	EFSYS_ASSERT(partnp != NULL);
 
 	if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
 		for (i = 0; i < parttbl_rows; i++) {
 			ef10_parttbl_entry_t *entry = &parttbl[i];
 
 			if (entry->nvtype == type &&
 			    entry->port == emip->emi_port) {
 				*partnp = entry->partn;
 				return (0);
 			}
 		}
 	}
 
 	return (ENOTSUP);
 }
 
 #if EFSYS_OPT_DIAG
 
 static	__checkReturn		efx_rc_t
 ef10_nvram_partn_to_type(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__out			efx_nvram_type_t *typep)
 {
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	ef10_parttbl_entry_t *parttbl = NULL;
 	size_t parttbl_rows = 0;
 	unsigned int i;
 
 	EFSYS_ASSERT(typep != NULL);
 
 	if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
 		for (i = 0; i < parttbl_rows; i++) {
 			ef10_parttbl_entry_t *entry = &parttbl[i];
 
 			if (entry->partn == partn &&
 			    entry->port == emip->emi_port) {
 				*typep = entry->nvtype;
 				return (0);
 			}
 		}
 	}
 
 	return (ENOTSUP);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_test(
 	__in			efx_nic_t *enp)
 {
 	efx_nvram_type_t type;
 	unsigned int npartns = 0;
 	uint32_t *partns = NULL;
 	size_t size;
 	unsigned int i;
 	efx_rc_t rc;
 
 	/* Read available partitions from NVRAM partition map */
 	size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t);
 	EFSYS_KMEM_ALLOC(enp->en_esip, size, partns);
 	if (partns == NULL) {
 		rc = ENOMEM;
 		goto fail1;
 	}
 
 	if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size,
 		    &npartns)) != 0) {
 		goto fail2;
 	}
 
 	for (i = 0; i < npartns; i++) {
 		/* Check if the partition is supported for this port */
 		if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0)
 			continue;
 
 		if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0)
 			goto fail3;
 	}
 
 	EFSYS_KMEM_FREE(enp->en_esip, size, partns);
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 	EFSYS_KMEM_FREE(enp->en_esip, size, partns);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 	return (rc);
 }
 
 #endif	/* EFSYS_OPT_DIAG */
 
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_get_version(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__out			uint32_t *subtypep,
 	__out_ecount(4)		uint16_t version[4])
 {
 	efx_rc_t rc;
 
 	/* FIXME: get highest partn version from all ports */
 	/* FIXME: return partn description if available */
 
 	if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep,
 		    version, NULL, 0)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_rw_start(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn,
 	__out			size_t *chunk_sizep)
 {
 	efx_rc_t rc;
 
 	if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
 		goto fail1;
 
 	if (chunk_sizep != NULL)
 		*chunk_sizep = EF10_NVRAM_CHUNK;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 ef10_nvram_partn_rw_finish(
 	__in			efx_nic_t *enp,
 	__in			uint32_t partn)
 {
 	efx_rc_t rc;
 
 	if ((rc = ef10_nvram_partn_unlock(enp, partn, NULL)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #endif	/* EFSYS_OPT_NVRAM */
 
 #endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
Index: stable/12/sys/dev/sfxge/common/ef10_rx.c
===================================================================
--- stable/12/sys/dev/sfxge/common/ef10_rx.c	(revision 342323)
+++ stable/12/sys/dev/sfxge/common/ef10_rx.c	(revision 342324)
@@ -1,841 +1,843 @@
 /*-
  * Copyright (c) 2012-2016 Solarflare Communications Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * 1. Redistributions of source code must retain the above copyright notice,
  *    this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright notice,
  *    this list of conditions and the following disclaimer in the documentation
  *    and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * The views and conclusions contained in the software and documentation are
  * those of the authors and should not be interpreted as representing official
  * policies, either expressed or implied, of the FreeBSD Project.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include "efx.h"
 #include "efx_impl.h"
 
 
 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
 
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_init_rxq(
 	__in		efx_nic_t *enp,
 	__in		uint32_t size,
 	__in		uint32_t target_evq,
 	__in		uint32_t label,
 	__in		uint32_t instance,
 	__in		efsys_mem_t *esmp,
 	__in		boolean_t disable_scatter)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[
 	    MC_CMD_INIT_RXQ_IN_LEN(EFX_RXQ_NBUFS(EFX_RXQ_MAXNDESCS))];
 	int npages = EFX_RXQ_NBUFS(size);
 	int i;
 	efx_qword_t *dma_addr;
 	uint64_t addr;
 	efx_rc_t rc;
 
 	/* If this changes, then the payload size might need to change. */
 	EFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0);
 	EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_INIT_RXQ;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_INIT_RXQ_IN_LEN(npages);
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_INIT_RXQ_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_SIZE, size);
 	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_TARGET_EVQ, target_evq);
 	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_LABEL, label);
 	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_INSTANCE, instance);
 	MCDI_IN_POPULATE_DWORD_6(req, INIT_RXQ_IN_FLAGS,
 			    INIT_RXQ_IN_FLAG_BUFF_MODE, 0,
 			    INIT_RXQ_IN_FLAG_HDR_SPLIT, 0,
 			    INIT_RXQ_IN_FLAG_TIMESTAMP, 0,
 			    INIT_RXQ_IN_CRC_MODE, 0,
 			    INIT_RXQ_IN_FLAG_PREFIX, 1,
 			    INIT_RXQ_IN_FLAG_DISABLE_SCATTER, disable_scatter);
 	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_OWNER_ID, 0);
 	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
 
 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
 	addr = EFSYS_MEM_ADDR(esmp);
 
 	for (i = 0; i < npages; i++) {
 		EFX_POPULATE_QWORD_2(*dma_addr,
 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
 
 		dma_addr++;
 		addr += EFX_BUF_SIZE;
 	}
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_fini_rxq(
 	__in		efx_nic_t *enp,
 	__in		uint32_t instance)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
 			    MC_CMD_FINI_RXQ_OUT_LEN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_FINI_RXQ;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
 
 	efx_mcdi_execute_quiet(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	return (0);
 
 fail1:
 	/*
 	 * EALREADY is not an error, but indicates that the MC has rebooted and
 	 * that the RXQ has already been destroyed.
 	 */
 	if (rc != EALREADY)
 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #if EFSYS_OPT_RX_SCALE
 static	__checkReturn	efx_rc_t
 efx_mcdi_rss_context_alloc(
 	__in		efx_nic_t *enp,
 	__in		efx_rx_scale_support_t scale_support,
 	__in		uint32_t num_queues,
 	__out		uint32_t *rss_contextp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
 			    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)];
 	uint32_t rss_context;
 	uint32_t context_type;
 	efx_rc_t rc;
 
 	if (num_queues > EFX_MAXRSS) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	switch (scale_support) {
 	case EFX_RX_SCALE_EXCLUSIVE:
 		context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE;
 		break;
 	case EFX_RX_SCALE_SHARED:
 		context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
 		break;
 	default:
 		rc = EINVAL;
 		goto fail2;
 	}
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
 	    EVB_PORT_ID_ASSIGNED);
 	MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type);
 	/* NUM_QUEUES is only used to validate indirection table offsets */
 	MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail3;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail4;
 	}
 
 	rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
 	if (rss_context == EF10_RSS_CONTEXT_INVALID) {
 		rc = ENOENT;
 		goto fail5;
 	}
 
 	*rss_contextp = rss_context;
 
 	return (0);
 
 fail5:
 	EFSYS_PROBE(fail5);
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 #endif /* EFSYS_OPT_RX_SCALE */
 
 #if EFSYS_OPT_RX_SCALE
 static			efx_rc_t
 efx_mcdi_rss_context_free(
 	__in		efx_nic_t *enp,
 	__in		uint32_t rss_context)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
 			    MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)];
 	efx_rc_t rc;
 
 	if (rss_context == EF10_RSS_CONTEXT_INVALID) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context);
 
 	efx_mcdi_execute_quiet(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail2;
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 #endif /* EFSYS_OPT_RX_SCALE */
 
 #if EFSYS_OPT_RX_SCALE
 static			efx_rc_t
 efx_mcdi_rss_context_set_flags(
 	__in		efx_nic_t *enp,
 	__in		uint32_t rss_context,
 	__in		efx_rx_hash_type_t type)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
 			    MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
 	efx_rc_t rc;
 
 	if (rss_context == EF10_RSS_CONTEXT_INVALID) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
 	    rss_context);
 
 	MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
 	    RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
 	    (type & EFX_RX_HASH_IPV4) ? 1 : 0,
 	    RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
 	    (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0,
 	    RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
 	    (type & EFX_RX_HASH_IPV6) ? 1 : 0,
 	    RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
 	    (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail2;
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 #endif /* EFSYS_OPT_RX_SCALE */
 
 #if EFSYS_OPT_RX_SCALE
 static			efx_rc_t
 efx_mcdi_rss_context_set_key(
 	__in		efx_nic_t *enp,
 	__in		uint32_t rss_context,
 	__in_ecount(n)	uint8_t *key,
 	__in		size_t n)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
 			    MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)];
 	efx_rc_t rc;
 
 	if (rss_context == EF10_RSS_CONTEXT_INVALID) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
 	    rss_context);
 
 	EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
 	if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) {
 		rc = EINVAL;
 		goto fail2;
 	}
 
 	memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY),
 	    key, n);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail3;
 	}
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 #endif /* EFSYS_OPT_RX_SCALE */
 
 #if EFSYS_OPT_RX_SCALE
 static			efx_rc_t
 efx_mcdi_rss_context_set_table(
 	__in		efx_nic_t *enp,
 	__in		uint32_t rss_context,
 	__in_ecount(n)	unsigned int *table,
 	__in		size_t n)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
 			    MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)];
 	uint8_t *req_table;
 	int i, rc;
 
 	if (rss_context == EF10_RSS_CONTEXT_INVALID) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
 	    rss_context);
 
 	req_table =
 	    MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE);
 
 	for (i = 0;
 	    i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN;
 	    i++) {
 		req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0;
 	}
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail2;
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 #endif /* EFSYS_OPT_RX_SCALE */
 
 
 	__checkReturn	efx_rc_t
 ef10_rx_init(
 	__in		efx_nic_t *enp)
 {
 #if EFSYS_OPT_RX_SCALE
 
 	if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS,
 		&enp->en_rss_context) == 0) {
 		/*
 		 * Allocated an exclusive RSS context, which allows both the
 		 * indirection table and key to be modified.
 		 */
 		enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
 		enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
 	} else {
 		/*
 		 * Failed to allocate an exclusive RSS context. Continue
 		 * operation without support for RSS. The pseudo-header in
 		 * received packets will not contain a Toeplitz hash value.
 		 */
 		enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
 		enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE;
 	}
 
 #endif /* EFSYS_OPT_RX_SCALE */
 
 	return (0);
 }
 
 #if EFSYS_OPT_RX_SCATTER
 	__checkReturn	efx_rc_t
 ef10_rx_scatter_enable(
 	__in		efx_nic_t *enp,
 	__in		unsigned int buf_size)
 {
 	_NOTE(ARGUNUSED(enp, buf_size))
 	return (0);
 }
 #endif	/* EFSYS_OPT_RX_SCATTER */
 
 #if EFSYS_OPT_RX_SCALE
 	__checkReturn	efx_rc_t
 ef10_rx_scale_mode_set(
 	__in		efx_nic_t *enp,
 	__in		efx_rx_hash_alg_t alg,
 	__in		efx_rx_hash_type_t type,
 	__in		boolean_t insert)
 {
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ);
 	EFSYS_ASSERT3U(insert, ==, B_TRUE);
 
 	if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
 		rc = ENOTSUP;
 		goto fail2;
 	}
 
 	if ((rc = efx_mcdi_rss_context_set_flags(enp,
 		    enp->en_rss_context, type)) != 0)
 		goto fail3;
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 #endif /* EFSYS_OPT_RX_SCALE */
 
 #if EFSYS_OPT_RX_SCALE
 	__checkReturn	efx_rc_t
 ef10_rx_scale_key_set(
 	__in		efx_nic_t *enp,
 	__in_ecount(n)	uint8_t *key,
 	__in		size_t n)
 {
 	efx_rc_t rc;
 
 	if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
 		rc = ENOTSUP;
 		goto fail1;
 	}
 
 	if ((rc = efx_mcdi_rss_context_set_key(enp,
 	    enp->en_rss_context, key, n)) != 0)
 		goto fail2;
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 #endif /* EFSYS_OPT_RX_SCALE */
 
 #if EFSYS_OPT_RX_SCALE
 	__checkReturn	efx_rc_t
 ef10_rx_scale_tbl_set(
 	__in		efx_nic_t *enp,
 	__in_ecount(n)	unsigned int *table,
 	__in		size_t n)
 {
 	efx_rc_t rc;
 
 	if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
 		rc = ENOTSUP;
 		goto fail1;
 	}
 
 	if ((rc = efx_mcdi_rss_context_set_table(enp,
 	    enp->en_rss_context, table, n)) != 0)
 		goto fail2;
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 #endif /* EFSYS_OPT_RX_SCALE */
 
 
 /*
  * EF10 RX pseudo-header
  * ---------------------
  *
  * Receive packets are prefixed by an (optional) 14 byte pseudo-header:
  *
  *  +00: Toeplitz hash value.
  *       (32bit little-endian)
  *  +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.
  *       (16bit big-endian)
  *  +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.
  *       (16bit big-endian)
  *  +08: Packet Length. Zero if the RX datapath was in cut-through mode.
  *       (16bit little-endian)
  *  +10: MAC timestamp. Zero if timestamping is not enabled.
  *       (32bit little-endian)
  *
  * See "The RX Pseudo-header" in SF-109306-TC.
  */
 
 	__checkReturn	efx_rc_t
 ef10_rx_prefix_pktlen(
 	__in		efx_nic_t *enp,
 	__in		uint8_t *buffer,
 	__out		uint16_t *lengthp)
 {
 	_NOTE(ARGUNUSED(enp))
 
 	/*
 	 * The RX pseudo-header contains the packet length, excluding the
 	 * pseudo-header. If the hardware receive datapath was operating in
 	 * cut-through mode then the length in the RX pseudo-header will be
 	 * zero, and the packet length must be obtained from the DMA length
 	 * reported in the RX event.
 	 */
 	*lengthp = buffer[8] | (buffer[9] << 8);
 	return (0);
 }
 
 #if EFSYS_OPT_RX_SCALE
 	__checkReturn	uint32_t
 ef10_rx_prefix_hash(
 	__in		efx_nic_t *enp,
 	__in		efx_rx_hash_alg_t func,
 	__in		uint8_t *buffer)
 {
 	_NOTE(ARGUNUSED(enp))
 
 	switch (func) {
 	case EFX_RX_HASHALG_TOEPLITZ:
 		return (buffer[0] |
 		    (buffer[1] << 8) |
 		    (buffer[2] << 16) |
 		    (buffer[3] << 24));
 
 	default:
 		EFSYS_ASSERT(0);
 		return (0);
 	}
 }
 #endif /* EFSYS_OPT_RX_SCALE */
 
 			void
 ef10_rx_qpost(
 	__in		efx_rxq_t *erp,
 	__in_ecount(n)	efsys_dma_addr_t *addrp,
 	__in		size_t size,
 	__in		unsigned int n,
 	__in		unsigned int completed,
 	__in		unsigned int added)
 {
 	efx_qword_t qword;
 	unsigned int i;
 	unsigned int offset;
 	unsigned int id;
 
+	_NOTE(ARGUNUSED(completed))
+
 	/* The client driver must not overfill the queue */
 	EFSYS_ASSERT3U(added - completed + n, <=,
 	    EFX_RXQ_LIMIT(erp->er_mask + 1));
 
 	id = added & (erp->er_mask);
 	for (i = 0; i < n; i++) {
 		EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
 		    unsigned int, id, efsys_dma_addr_t, addrp[i],
 		    size_t, size);
 
 		EFX_POPULATE_QWORD_3(qword,
 		    ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),
 		    ESF_DZ_RX_KER_BUF_ADDR_DW0,
 		    (uint32_t)(addrp[i] & 0xffffffff),
 		    ESF_DZ_RX_KER_BUF_ADDR_DW1,
 		    (uint32_t)(addrp[i] >> 32));
 
 		offset = id * sizeof (efx_qword_t);
 		EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
 
 		id = (id + 1) & (erp->er_mask);
 	}
 }
 
 			void
 ef10_rx_qpush(
 	__in	efx_rxq_t *erp,
 	__in	unsigned int added,
 	__inout	unsigned int *pushedp)
 {
 	efx_nic_t *enp = erp->er_enp;
 	unsigned int pushed = *pushedp;
 	uint32_t wptr;
 	efx_dword_t dword;
 
 	/* Hardware has alignment restriction for WPTR */
 	wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);
 	if (pushed == wptr)
 		return;
 
 	*pushedp = wptr;
 
 	/* Push the populated descriptors out */
 	wptr &= erp->er_mask;
 
 	EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);
 
 	/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
 	EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
 	    wptr, pushed & erp->er_mask);
 	EFSYS_PIO_WRITE_BARRIER();
 	EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
 			    erp->er_index, &dword, B_FALSE);
 }
 
 	__checkReturn	efx_rc_t
 ef10_rx_qflush(
 	__in	efx_rxq_t *erp)
 {
 	efx_nic_t *enp = erp->er_enp;
 	efx_rc_t rc;
 
 	if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	/*
 	 * EALREADY is not an error, but indicates that the MC has rebooted and
 	 * that the RXQ has already been destroyed. Callers need to know that
 	 * the RXQ flush has completed to avoid waiting until timeout for a
 	 * flush done event that will not be delivered.
 	 */
 	if (rc != EALREADY)
 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 		void
 ef10_rx_qenable(
 	__in	efx_rxq_t *erp)
 {
 	/* FIXME */
 	_NOTE(ARGUNUSED(erp))
 	/* FIXME */
 }
 
 	__checkReturn	efx_rc_t
 ef10_rx_qcreate(
 	__in		efx_nic_t *enp,
 	__in		unsigned int index,
 	__in		unsigned int label,
 	__in		efx_rxq_type_t type,
 	__in		efsys_mem_t *esmp,
 	__in		size_t n,
 	__in		uint32_t id,
 	__in		efx_evq_t *eep,
 	__in		efx_rxq_t *erp)
 {
 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
 	efx_rc_t rc;
 	boolean_t disable_scatter;
 
 	_NOTE(ARGUNUSED(id, erp))
 
 	EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
 	EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
 	EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
 
 	EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
 	EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
 
 	if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
 		rc = EINVAL;
 		goto fail1;
 	}
 	if (index >= encp->enc_rxq_limit) {
 		rc = EINVAL;
 		goto fail2;
 	}
 
 	/* Scatter can only be disabled if the firmware supports doing so */
 	if (type == EFX_RXQ_TYPE_SCATTER)
 		disable_scatter = B_FALSE;
 	else
 		disable_scatter = encp->enc_rx_disable_scatter_supported;
 
 	if ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index,
 	    esmp, disable_scatter)) != 0)
 		goto fail3;
 
 	erp->er_eep = eep;
 	erp->er_label = label;
 
 	ef10_ev_rxlabel_init(eep, erp, label);
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 		void
 ef10_rx_qdestroy(
 	__in	efx_rxq_t *erp)
 {
 	efx_nic_t *enp = erp->er_enp;
 	efx_evq_t *eep = erp->er_eep;
 	unsigned int label = erp->er_label;
 
 	ef10_ev_rxlabel_fini(eep, label);
 
 	EFSYS_ASSERT(enp->en_rx_qcount != 0);
 	--enp->en_rx_qcount;
 
 	EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
 }
 
 		void
 ef10_rx_fini(
 	__in	efx_nic_t *enp)
 {
 #if EFSYS_OPT_RX_SCALE
 	if (enp->en_rss_support != EFX_RX_SCALE_UNAVAILABLE) {
 		(void) efx_mcdi_rss_context_free(enp, enp->en_rss_context);
 	}
 	enp->en_rss_context = 0;
 	enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
 #else
 	_NOTE(ARGUNUSED(enp))
 #endif /* EFSYS_OPT_RX_SCALE */
 }
 
 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
Index: stable/12/sys/dev/sfxge/common/ef10_tx.c
===================================================================
--- stable/12/sys/dev/sfxge/common/ef10_tx.c	(revision 342323)
+++ stable/12/sys/dev/sfxge/common/ef10_tx.c	(revision 342324)
@@ -1,755 +1,763 @@
 /*-
  * Copyright (c) 2012-2016 Solarflare Communications Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * 1. Redistributions of source code must retain the above copyright notice,
  *    this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright notice,
  *    this list of conditions and the following disclaimer in the documentation
  *    and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * The views and conclusions contained in the software and documentation are
  * those of the authors and should not be interpreted as representing official
  * policies, either expressed or implied, of the FreeBSD Project.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include "efx.h"
 #include "efx_impl.h"
 
 
 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
 
 #if EFSYS_OPT_QSTATS
 #define	EFX_TX_QSTAT_INCR(_etp, _stat)					\
 	do {								\
 		(_etp)->et_stat[_stat]++;				\
 	_NOTE(CONSTANTCONDITION)					\
 	} while (B_FALSE)
 #else
 #define	EFX_TX_QSTAT_INCR(_etp, _stat)
 #endif
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_init_txq(
 	__in		efx_nic_t *enp,
 	__in		uint32_t size,
 	__in		uint32_t target_evq,
 	__in		uint32_t label,
 	__in		uint32_t instance,
 	__in		uint16_t flags,
 	__in		efsys_mem_t *esmp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
 			    MC_CMD_INIT_TXQ_OUT_LEN)];
 	efx_qword_t *dma_addr;
 	uint64_t addr;
 	int npages;
 	int i;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
 	    EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
 
 	npages = EFX_TXQ_NBUFS(size);
 	if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_INIT_TXQ;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
 
 	MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
 	    INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
 	    INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
 	    INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
 	    INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
 	    INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
 	    INIT_TXQ_IN_CRC_MODE, 0,
 	    INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
 
 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
 	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
 
 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
 	addr = EFSYS_MEM_ADDR(esmp);
 
 	for (i = 0; i < npages; i++) {
 		EFX_POPULATE_QWORD_2(*dma_addr,
 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
 
 		dma_addr++;
 		addr += EFX_BUF_SIZE;
 	}
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail2;
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_fini_txq(
 	__in		efx_nic_t *enp,
 	__in		uint32_t instance)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
 			    MC_CMD_FINI_TXQ_OUT_LEN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_FINI_TXQ;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
 
 	efx_mcdi_execute_quiet(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	return (0);
 
 fail1:
 	/*
 	 * EALREADY is not an error, but indicates that the MC has rebooted and
 	 * that the TXQ has already been destroyed.
 	 */
 	if (rc != EALREADY)
 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 ef10_tx_init(
 	__in		efx_nic_t *enp)
 {
 	_NOTE(ARGUNUSED(enp))
 	return (0);
 }
 
 			void
 ef10_tx_fini(
 	__in		efx_nic_t *enp)
 {
 	_NOTE(ARGUNUSED(enp))
 }
 
 	__checkReturn	efx_rc_t
 ef10_tx_qcreate(
 	__in		efx_nic_t *enp,
 	__in		unsigned int index,
 	__in		unsigned int label,
 	__in		efsys_mem_t *esmp,
 	__in		size_t n,
 	__in		uint32_t id,
 	__in		uint16_t flags,
 	__in		efx_evq_t *eep,
 	__in		efx_txq_t *etp,
 	__out		unsigned int *addedp)
 {
 	efx_qword_t desc;
 	efx_rc_t rc;
 
 	_NOTE(ARGUNUSED(id))
 
 	if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
 	    esmp)) != 0)
 		goto fail1;
 
 	/*
 	 * A previous user of this TX queue may have written a descriptor to the
 	 * TX push collector, but not pushed the doorbell (e.g. after a crash).
 	 * The next doorbell write would then push the stale descriptor.
 	 *
 	 * Ensure the (per network port) TX push collector is cleared by writing
 	 * a no-op TX option descriptor. See bug29981 for details.
 	 */
 	*addedp = 1;
 	EFX_POPULATE_QWORD_4(desc,
 	    ESF_DZ_TX_DESC_IS_OPT, 1,
 	    ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
 	    ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
 	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
 	    ESF_DZ_TX_OPTION_IP_CSUM,
 	    (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
 
 	EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
 	ef10_tx_qpush(etp, *addedp, 0);
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 		void
 ef10_tx_qdestroy(
 	__in	efx_txq_t *etp)
 {
 	/* FIXME */
 	_NOTE(ARGUNUSED(etp))
 	/* FIXME */
 }
 
 	__checkReturn	efx_rc_t
 ef10_tx_qpio_enable(
 	__in		efx_txq_t *etp)
 {
 	efx_nic_t *enp = etp->et_enp;
 	efx_piobuf_handle_t handle;
 	efx_rc_t rc;
 
 	if (etp->et_pio_size != 0) {
 		rc = EALREADY;
 		goto fail1;
 	}
 
 	/* Sub-allocate a PIO block from a piobuf */
 	if ((rc = ef10_nic_pio_alloc(enp,
 		    &etp->et_pio_bufnum,
 		    &handle,
 		    &etp->et_pio_blknum,
 		    &etp->et_pio_offset,
 		    &etp->et_pio_size)) != 0) {
 		goto fail2;
 	}
 	EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
 
 	/* Link the piobuf to this TXQ */
 	if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
 		goto fail3;
 	}
 
 	/*
 	 * et_pio_offset is the offset of the sub-allocated block within the
 	 * hardware PIO buffer. It is used as the buffer address in the PIO
 	 * option descriptor.
 	 *
 	 * et_pio_write_offset is the offset of the sub-allocated block from the
 	 * start of the write-combined memory mapping, and is used for writing
 	 * data into the PIO buffer.
 	 */
 	etp->et_pio_write_offset =
 	    (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
 	    ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 	ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
 fail2:
 	EFSYS_PROBE(fail2);
 	etp->et_pio_size = 0;
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 			void
 ef10_tx_qpio_disable(
 	__in		efx_txq_t *etp)
 {
 	efx_nic_t *enp = etp->et_enp;
 
 	if (etp->et_pio_size != 0) {
 		/* Unlink the piobuf from this TXQ */
 		ef10_nic_pio_unlink(enp, etp->et_index);
 
 		/* Free the sub-allocated PIO block */
 		ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
 		etp->et_pio_size = 0;
 		etp->et_pio_write_offset = 0;
 	}
 }
 
 	__checkReturn	efx_rc_t
 ef10_tx_qpio_write(
 	__in			efx_txq_t *etp,
 	__in_ecount(length)	uint8_t *buffer,
 	__in			size_t length,
 	__in			size_t offset)
 {
 	efx_nic_t *enp = etp->et_enp;
 	efsys_bar_t *esbp = enp->en_esbp;
 	uint32_t write_offset;
 	uint32_t write_offset_limit;
 	efx_qword_t *eqp;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
 
 	if (etp->et_pio_size == 0) {
 		rc = ENOENT;
 		goto fail1;
 	}
 	if (offset + length > etp->et_pio_size)	{
 		rc = ENOSPC;
 		goto fail2;
 	}
 
 	/*
 	 * Writes to PIO buffers must be 64 bit aligned, and multiples of
 	 * 64 bits.
 	 */
 	write_offset = etp->et_pio_write_offset + offset;
 	write_offset_limit = write_offset + length;
 	eqp = (efx_qword_t *)buffer;
 	while (write_offset < write_offset_limit) {
 		EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
 		eqp++;
 		write_offset += sizeof (efx_qword_t);
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 ef10_tx_qpio_post(
 	__in			efx_txq_t *etp,
 	__in			size_t pkt_length,
 	__in			unsigned int completed,
 	__inout			unsigned int *addedp)
 {
 	efx_qword_t pio_desc;
 	unsigned int id;
 	size_t offset;
 	unsigned int added = *addedp;
 	efx_rc_t rc;
 
 
 	if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
 		rc = ENOSPC;
 		goto fail1;
 	}
 
 	if (etp->et_pio_size == 0) {
 		rc = ENOENT;
 		goto fail2;
 	}
 
 	id = added++ & etp->et_mask;
 	offset = id * sizeof (efx_qword_t);
 
 	EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
 		    unsigned int, id, uint32_t, etp->et_pio_offset,
 		    size_t, pkt_length);
 
 	EFX_POPULATE_QWORD_5(pio_desc,
 			ESF_DZ_TX_DESC_IS_OPT, 1,
 			ESF_DZ_TX_OPTION_TYPE, 1,
 			ESF_DZ_TX_PIO_CONT, 0,
 			ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
 			ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
 
 	EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
 
 	EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
 
 	*addedp = added;
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 ef10_tx_qpost(
 	__in		efx_txq_t *etp,
 	__in_ecount(n)	efx_buffer_t *eb,
 	__in		unsigned int n,
 	__in		unsigned int completed,
 	__inout		unsigned int *addedp)
 {
 	unsigned int added = *addedp;
 	unsigned int i;
 	efx_rc_t rc;
 
 	if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
 		rc = ENOSPC;
 		goto fail1;
 	}
 
 	for (i = 0; i < n; i++) {
 		efx_buffer_t *ebp = &eb[i];
 		efsys_dma_addr_t addr = ebp->eb_addr;
 		size_t size = ebp->eb_size;
 		boolean_t eop = ebp->eb_eop;
 		unsigned int id;
 		size_t offset;
 		efx_qword_t qword;
 
 		/* No limitations on boundary crossing */
 		EFSYS_ASSERT(size <=
 		    etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
 
 		id = added++ & etp->et_mask;
 		offset = id * sizeof (efx_qword_t);
 
 		EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
 		    unsigned int, id, efsys_dma_addr_t, addr,
 		    size_t, size, boolean_t, eop);
 
 		EFX_POPULATE_QWORD_5(qword,
 		    ESF_DZ_TX_KER_TYPE, 0,
 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
 
 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
 	}
 
 	EFX_TX_QSTAT_INCR(etp, TX_POST);
 
 	*addedp = added;
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /*
  * This improves performance by, when possible, pushing a TX descriptor at the
  * same time as the doorbell. The descriptor must be added to the TXQ, so that
  * can be used if the hardware decides not to use the pushed descriptor.
  */
 			void
 ef10_tx_qpush(
 	__in		efx_txq_t *etp,
 	__in		unsigned int added,
 	__in		unsigned int pushed)
 {
 	efx_nic_t *enp = etp->et_enp;
 	unsigned int wptr;
 	unsigned int id;
 	size_t offset;
 	efx_qword_t desc;
 	efx_oword_t oword;
 
 	wptr = added & etp->et_mask;
 	id = pushed & etp->et_mask;
 	offset = id * sizeof (efx_qword_t);
 
 	EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
 
 	/*
 	 * SF Bug 65776: TSO option descriptors cannot be pushed if pacer bypass
 	 * is enabled on the event queue this transmit queue is attached to.
 	 *
 	 * To ensure the code is safe, it is easiest to simply test the type of
 	 * the descriptor to push, and only push it is if it not a TSO option
 	 * descriptor.
 	 */
 	if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) ||
 	    (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) !=
 	    ESE_DZ_TX_OPTION_DESC_TSO)) {
 		/* Push the descriptor and update the wptr. */
 		EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr,
 		    ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
 		    ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
 
 		/* Ensure ordering of memory (descriptors) and PIO (doorbell) */
 		EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
 					    wptr, id);
 		EFSYS_PIO_WRITE_BARRIER();
 		EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
 					    etp->et_index, &oword);
 	} else {
 		efx_dword_t dword;
 
 		/*
 		 * Only update the wptr. This is signalled to the hardware by
 		 * only writing one DWORD of the doorbell register.
 		 */
 		EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr);
 		dword = oword.eo_dword[2];
 
 		/* Ensure ordering of memory (descriptors) and PIO (doorbell) */
 		EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
 					    wptr, id);
 		EFSYS_PIO_WRITE_BARRIER();
 		EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
 				    etp->et_index, &dword, B_FALSE);
 	}
 }
 
 	__checkReturn	efx_rc_t
 ef10_tx_qdesc_post(
 	__in		efx_txq_t *etp,
 	__in_ecount(n)	efx_desc_t *ed,
 	__in		unsigned int n,
 	__in		unsigned int completed,
 	__inout		unsigned int *addedp)
 {
 	unsigned int added = *addedp;
 	unsigned int i;
 	efx_rc_t rc;
 
 	if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
 		rc = ENOSPC;
 		goto fail1;
 	}
 
 	for (i = 0; i < n; i++) {
 		efx_desc_t *edp = &ed[i];
 		unsigned int id;
 		size_t offset;
 
 		id = added++ & etp->et_mask;
 		offset = id * sizeof (efx_desc_t);
 
 		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
 	}
 
 	EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
 		    unsigned int, added, unsigned int, n);
 
 	EFX_TX_QSTAT_INCR(etp, TX_POST);
 
 	*addedp = added;
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	void
 ef10_tx_qdesc_dma_create(
 	__in	efx_txq_t *etp,
 	__in	efsys_dma_addr_t addr,
 	__in	size_t size,
 	__in	boolean_t eop,
 	__out	efx_desc_t *edp)
 {
+	_NOTE(ARGUNUSED(etp))
+
 	/* No limitations on boundary crossing */
 	EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
 
 	EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
 		    efsys_dma_addr_t, addr,
 		    size_t, size, boolean_t, eop);
 
 	EFX_POPULATE_QWORD_5(edp->ed_eq,
 		    ESF_DZ_TX_KER_TYPE, 0,
 		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
 		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
 		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
 		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
 }
 
 	void
 ef10_tx_qdesc_tso_create(
 	__in	efx_txq_t *etp,
 	__in	uint16_t ipv4_id,
 	__in	uint32_t tcp_seq,
 	__in	uint8_t  tcp_flags,
 	__out	efx_desc_t *edp)
 {
+	_NOTE(ARGUNUSED(etp))
+
 	EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
 		    uint8_t, tcp_flags);
 
 	EFX_POPULATE_QWORD_5(edp->ed_eq,
 			    ESF_DZ_TX_DESC_IS_OPT, 1,
 			    ESF_DZ_TX_OPTION_TYPE,
 			    ESE_DZ_TX_OPTION_DESC_TSO,
 			    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
 }
 
 	void
 ef10_tx_qdesc_tso2_create(
 	__in			efx_txq_t *etp,
 	__in			uint16_t ipv4_id,
 	__in			uint32_t tcp_seq,
 	__in			uint16_t tcp_mss,
 	__out_ecount(count)	efx_desc_t *edp,
 	__in			int count)
 {
+	_NOTE(ARGUNUSED(etp, count))
+
 	EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
 		    uint16_t, ipv4_id, uint32_t, tcp_seq,
 		    uint16_t, tcp_mss);
 
 	EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
 
 	EFX_POPULATE_QWORD_5(edp[0].ed_eq,
 			    ESF_DZ_TX_DESC_IS_OPT, 1,
 			    ESF_DZ_TX_OPTION_TYPE,
 			    ESE_DZ_TX_OPTION_DESC_TSO,
 			    ESF_DZ_TX_TSO_OPTION_TYPE,
 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
 			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
 			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
 	EFX_POPULATE_QWORD_4(edp[1].ed_eq,
 			    ESF_DZ_TX_DESC_IS_OPT, 1,
 			    ESF_DZ_TX_OPTION_TYPE,
 			    ESE_DZ_TX_OPTION_DESC_TSO,
 			    ESF_DZ_TX_TSO_OPTION_TYPE,
 			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
 			    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
 }
 
 	void
 ef10_tx_qdesc_vlantci_create(
 	__in	efx_txq_t *etp,
 	__in	uint16_t  tci,
 	__out	efx_desc_t *edp)
 {
+	_NOTE(ARGUNUSED(etp))
+
 	EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
 		    uint16_t, tci);
 
 	EFX_POPULATE_QWORD_4(edp->ed_eq,
 			    ESF_DZ_TX_DESC_IS_OPT, 1,
 			    ESF_DZ_TX_OPTION_TYPE,
 			    ESE_DZ_TX_OPTION_DESC_VLAN,
 			    ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
 			    ESF_DZ_TX_VLAN_TAG1, tci);
 }
 
 
 	__checkReturn	efx_rc_t
 ef10_tx_qpace(
 	__in		efx_txq_t *etp,
 	__in		unsigned int ns)
 {
 	efx_rc_t rc;
 
 	/* FIXME */
 	_NOTE(ARGUNUSED(etp, ns))
 	_NOTE(CONSTANTCONDITION)
 	if (B_FALSE) {
 		rc = ENOTSUP;
 		goto fail1;
 	}
 	/* FIXME */
 
 	return (0);
 
 fail1:
 	/*
 	 * EALREADY is not an error, but indicates that the MC has rebooted and
 	 * that the TXQ has already been destroyed. Callers need to know that
 	 * the TXQ flush has completed to avoid waiting until timeout for a
 	 * flush done event that will not be delivered.
 	 */
 	if (rc != EALREADY)
 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 ef10_tx_qflush(
 	__in		efx_txq_t *etp)
 {
 	efx_nic_t *enp = etp->et_enp;
 	efx_rc_t rc;
 
 	if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 			void
 ef10_tx_qenable(
 	__in		efx_txq_t *etp)
 {
 	/* FIXME */
 	_NOTE(ARGUNUSED(etp))
 	/* FIXME */
 }
 
 #if EFSYS_OPT_QSTATS
 			void
 ef10_tx_qstats_update(
 	__in				efx_txq_t *etp,
 	__inout_ecount(TX_NQSTATS)	efsys_stat_t *stat)
 {
 	unsigned int id;
 
 	for (id = 0; id < TX_NQSTATS; id++) {
 		efsys_stat_t *essp = &stat[id];
 
 		EFSYS_STAT_INCR(essp, etp->et_stat[id]);
 		etp->et_stat[id] = 0;
 	}
 }
 
 #endif /* EFSYS_OPT_QSTATS */
 
 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
Index: stable/12/sys/dev/sfxge/common/efx_ev.c
===================================================================
--- stable/12/sys/dev/sfxge/common/efx_ev.c	(revision 342323)
+++ stable/12/sys/dev/sfxge/common/efx_ev.c	(revision 342324)
@@ -1,1475 +1,1477 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
  * Copyright (c) 2007-2016 Solarflare Communications Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * 1. Redistributions of source code must retain the above copyright notice,
  *    this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright notice,
  *    this list of conditions and the following disclaimer in the documentation
  *    and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * The views and conclusions contained in the software and documentation are
  * those of the authors and should not be interpreted as representing official
  * policies, either expressed or implied, of the FreeBSD Project.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include "efx.h"
 #include "efx_impl.h"
 #if EFSYS_OPT_MON_MCDI
 #include "mcdi_mon.h"
 #endif
 
 #if EFSYS_OPT_QSTATS
 #define	EFX_EV_QSTAT_INCR(_eep, _stat)					\
 	do {								\
 		(_eep)->ee_stat[_stat]++;				\
 	_NOTE(CONSTANTCONDITION)					\
 	} while (B_FALSE)
 #else
 #define	EFX_EV_QSTAT_INCR(_eep, _stat)
 #endif
 
 #define	EFX_EV_PRESENT(_qword)						\
 	(EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff &&	\
 	EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
 
 
 
 #if EFSYS_OPT_SIENA
 
 static	__checkReturn	efx_rc_t
 siena_ev_init(
 	__in		efx_nic_t *enp);
 
 static			void
 siena_ev_fini(
 	__in		efx_nic_t *enp);
 
 static	__checkReturn	efx_rc_t
 siena_ev_qcreate(
 	__in		efx_nic_t *enp,
 	__in		unsigned int index,
 	__in		efsys_mem_t *esmp,
 	__in		size_t n,
 	__in		uint32_t id,
 	__in		uint32_t us,
 	__in		uint32_t flags,
 	__in		efx_evq_t *eep);
 
 static			void
 siena_ev_qdestroy(
 	__in		efx_evq_t *eep);
 
 static	__checkReturn	efx_rc_t
 siena_ev_qprime(
 	__in		efx_evq_t *eep,
 	__in		unsigned int count);
 
 static			void
 siena_ev_qpost(
 	__in	efx_evq_t *eep,
 	__in	uint16_t data);
 
 static	__checkReturn	efx_rc_t
 siena_ev_qmoderate(
 	__in		efx_evq_t *eep,
 	__in		unsigned int us);
 
 #if EFSYS_OPT_QSTATS
 static			void
 siena_ev_qstats_update(
 	__in				efx_evq_t *eep,
 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat);
 
 #endif
 
 #endif /* EFSYS_OPT_SIENA */
 
 #if EFSYS_OPT_SIENA
 static const efx_ev_ops_t	__efx_ev_siena_ops = {
 	siena_ev_init,				/* eevo_init */
 	siena_ev_fini,				/* eevo_fini */
 	siena_ev_qcreate,			/* eevo_qcreate */
 	siena_ev_qdestroy,			/* eevo_qdestroy */
 	siena_ev_qprime,			/* eevo_qprime */
 	siena_ev_qpost,				/* eevo_qpost */
 	siena_ev_qmoderate,			/* eevo_qmoderate */
 #if EFSYS_OPT_QSTATS
 	siena_ev_qstats_update,			/* eevo_qstats_update */
 #endif
 };
 #endif /* EFSYS_OPT_SIENA */
 
 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
 static const efx_ev_ops_t	__efx_ev_ef10_ops = {
 	ef10_ev_init,				/* eevo_init */
 	ef10_ev_fini,				/* eevo_fini */
 	ef10_ev_qcreate,			/* eevo_qcreate */
 	ef10_ev_qdestroy,			/* eevo_qdestroy */
 	ef10_ev_qprime,				/* eevo_qprime */
 	ef10_ev_qpost,				/* eevo_qpost */
 	ef10_ev_qmoderate,			/* eevo_qmoderate */
 #if EFSYS_OPT_QSTATS
 	ef10_ev_qstats_update,			/* eevo_qstats_update */
 #endif
 };
 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
 
 
 	__checkReturn	efx_rc_t
 efx_ev_init(
 	__in		efx_nic_t *enp)
 {
 	const efx_ev_ops_t *eevop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
 
 	if (enp->en_mod_flags & EFX_MOD_EV) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	switch (enp->en_family) {
 #if EFSYS_OPT_SIENA
 	case EFX_FAMILY_SIENA:
 		eevop = &__efx_ev_siena_ops;
 		break;
 #endif /* EFSYS_OPT_SIENA */
 
 #if EFSYS_OPT_HUNTINGTON
 	case EFX_FAMILY_HUNTINGTON:
 		eevop = &__efx_ev_ef10_ops;
 		break;
 #endif /* EFSYS_OPT_HUNTINGTON */
 
 #if EFSYS_OPT_MEDFORD
 	case EFX_FAMILY_MEDFORD:
 		eevop = &__efx_ev_ef10_ops;
 		break;
 #endif /* EFSYS_OPT_MEDFORD */
 
 	default:
 		EFSYS_ASSERT(0);
 		rc = ENOTSUP;
 		goto fail1;
 	}
 
 	EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
 
 	if ((rc = eevop->eevo_init(enp)) != 0)
 		goto fail2;
 
 	enp->en_eevop = eevop;
 	enp->en_mod_flags |= EFX_MOD_EV;
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	enp->en_eevop = NULL;
 	enp->en_mod_flags &= ~EFX_MOD_EV;
 	return (rc);
 }
 
 		void
 efx_ev_fini(
 	__in	efx_nic_t *enp)
 {
 	const efx_ev_ops_t *eevop = enp->en_eevop;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
 	EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
 	EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
 	EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
 
 	eevop->eevo_fini(enp);
 
 	enp->en_eevop = NULL;
 	enp->en_mod_flags &= ~EFX_MOD_EV;
 }
 
 
 	__checkReturn	efx_rc_t
 efx_ev_qcreate(
 	__in		efx_nic_t *enp,
 	__in		unsigned int index,
 	__in		efsys_mem_t *esmp,
 	__in		size_t n,
 	__in		uint32_t id,
 	__in		uint32_t us,
 	__in		uint32_t flags,
 	__deref_out	efx_evq_t **eepp)
 {
 	const efx_ev_ops_t *eevop = enp->en_eevop;
 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
 	efx_evq_t *eep;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
 
 	EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit);
 
 	switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
 	case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
 		break;
 	case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
 		if (us != 0) {
 			rc = EINVAL;
 			goto fail1;
 		}
 		break;
 	default:
 		rc = EINVAL;
 		goto fail2;
 	}
 
 	/* Allocate an EVQ object */
 	EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
 	if (eep == NULL) {
 		rc = ENOMEM;
 		goto fail3;
 	}
 
 	eep->ee_magic = EFX_EVQ_MAGIC;
 	eep->ee_enp = enp;
 	eep->ee_index = index;
 	eep->ee_mask = n - 1;
 	eep->ee_flags = flags;
 	eep->ee_esmp = esmp;
 
 	/*
 	 * Set outputs before the queue is created because interrupts may be
 	 * raised for events immediately after the queue is created, before the
 	 * function call below returns. See bug58606.
 	 *
 	 * The eepp pointer passed in by the client must therefore point to data
 	 * shared with the client's event processing context.
 	 */
 	enp->en_ev_qcount++;
 	*eepp = eep;
 
 	if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, us, flags,
 	    eep)) != 0)
 		goto fail4;
 
 	return (0);
 
 fail4:
 	EFSYS_PROBE(fail4);
 
 	*eepp = NULL;
 	enp->en_ev_qcount--;
 	EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 	return (rc);
 }
 
 		void
 efx_ev_qdestroy(
 	__in	efx_evq_t *eep)
 {
 	efx_nic_t *enp = eep->ee_enp;
 	const efx_ev_ops_t *eevop = enp->en_eevop;
 
 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
 
 	EFSYS_ASSERT(enp->en_ev_qcount != 0);
 	--enp->en_ev_qcount;
 
 	eevop->eevo_qdestroy(eep);
 
 	/* Free the EVQ object */
 	EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
 }
 
 	__checkReturn	efx_rc_t
 efx_ev_qprime(
 	__in		efx_evq_t *eep,
 	__in		unsigned int count)
 {
 	efx_nic_t *enp = eep->ee_enp;
 	const efx_ev_ops_t *eevop = enp->en_eevop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
 
 	if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	if ((rc = eevop->eevo_qprime(eep, count)) != 0)
 		goto fail2;
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 	return (rc);
 }
 
 	__checkReturn	boolean_t
 efx_ev_qpending(
 	__in		efx_evq_t *eep,
 	__in		unsigned int count)
 {
 	size_t offset;
 	efx_qword_t qword;
 
 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
 
 	offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
 	EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
 
 	return (EFX_EV_PRESENT(qword));
 }
 
 #if EFSYS_OPT_EV_PREFETCH
 
 			void
 efx_ev_qprefetch(
 	__in		efx_evq_t *eep,
 	__in		unsigned int count)
 {
 	unsigned int offset;
 
 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
 
 	offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
 	EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
 }
 
 #endif	/* EFSYS_OPT_EV_PREFETCH */
 
 #define	EFX_EV_BATCH	8
 
 			void
 efx_ev_qpoll(
 	__in		efx_evq_t *eep,
 	__inout		unsigned int *countp,
 	__in		const efx_ev_callbacks_t *eecp,
 	__in_opt	void *arg)
 {
 	efx_qword_t ev[EFX_EV_BATCH];
 	unsigned int batch;
 	unsigned int total;
 	unsigned int count;
 	unsigned int index;
 	size_t offset;
 
 	/* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
 	EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
 	EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
 
 	EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
 	EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
 	EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
 	EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
 	    FSE_AZ_EV_CODE_DRV_GEN_EV);
 #if EFSYS_OPT_MCDI
 	EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
 	    FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
 #endif
 
 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
 	EFSYS_ASSERT(countp != NULL);
 	EFSYS_ASSERT(eecp != NULL);
 
 	count = *countp;
 	do {
 		/* Read up until the end of the batch period */
 		batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
 		offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
 		for (total = 0; total < batch; ++total) {
 			EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
 
 			if (!EFX_EV_PRESENT(ev[total]))
 				break;
 
 			EFSYS_PROBE3(event, unsigned int, eep->ee_index,
 			    uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
 			    uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
 
 			offset += sizeof (efx_qword_t);
 		}
 
 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
 		/*
 		 * Prefetch the next batch when we get within PREFETCH_PERIOD
 		 * of a completed batch. If the batch is smaller, then prefetch
 		 * immediately.
 		 */
 		if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
 			EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
 #endif	/* EFSYS_OPT_EV_PREFETCH */
 
 		/* Process the batch of events */
 		for (index = 0; index < total; ++index) {
 			boolean_t should_abort;
 			uint32_t code;
 
 #if EFSYS_OPT_EV_PREFETCH
 			/* Prefetch if we've now reached the batch period */
 			if (total == batch &&
 			    index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
 				offset = (count + batch) & eep->ee_mask;
 				offset *= sizeof (efx_qword_t);
 
 				EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
 			}
 #endif	/* EFSYS_OPT_EV_PREFETCH */
 
 			EFX_EV_QSTAT_INCR(eep, EV_ALL);
 
 			code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
 			switch (code) {
 			case FSE_AZ_EV_CODE_RX_EV:
 				should_abort = eep->ee_rx(eep,
 				    &(ev[index]), eecp, arg);
 				break;
 			case FSE_AZ_EV_CODE_TX_EV:
 				should_abort = eep->ee_tx(eep,
 				    &(ev[index]), eecp, arg);
 				break;
 			case FSE_AZ_EV_CODE_DRIVER_EV:
 				should_abort = eep->ee_driver(eep,
 				    &(ev[index]), eecp, arg);
 				break;
 			case FSE_AZ_EV_CODE_DRV_GEN_EV:
 				should_abort = eep->ee_drv_gen(eep,
 				    &(ev[index]), eecp, arg);
 				break;
 #if EFSYS_OPT_MCDI
 			case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
 				should_abort = eep->ee_mcdi(eep,
 				    &(ev[index]), eecp, arg);
 				break;
 #endif
 			case FSE_AZ_EV_CODE_GLOBAL_EV:
 				if (eep->ee_global) {
 					should_abort = eep->ee_global(eep,
 					    &(ev[index]), eecp, arg);
 					break;
 				}
 				/* else fallthrough */
 			default:
 				EFSYS_PROBE3(bad_event,
 				    unsigned int, eep->ee_index,
 				    uint32_t,
 				    EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
 				    uint32_t,
 				    EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
 
 				EFSYS_ASSERT(eecp->eec_exception != NULL);
 				(void) eecp->eec_exception(arg,
 					EFX_EXCEPTION_EV_ERROR, code);
 				should_abort = B_TRUE;
 			}
 			if (should_abort) {
 				/* Ignore subsequent events */
 				total = index + 1;
 				break;
 			}
 		}
 
 		/*
 		 * Now that the hardware has most likely moved onto dma'ing
 		 * into the next cache line, clear the processed events. Take
 		 * care to only clear out events that we've processed
 		 */
 		EFX_SET_QWORD(ev[0]);
 		offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
 		for (index = 0; index < total; ++index) {
 			EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
 			offset += sizeof (efx_qword_t);
 		}
 
 		count += total;
 
 	} while (total == batch);
 
 	*countp = count;
 }
 
 			void
 efx_ev_qpost(
 	__in	efx_evq_t *eep,
 	__in	uint16_t data)
 {
 	efx_nic_t *enp = eep->ee_enp;
 	const efx_ev_ops_t *eevop = enp->en_eevop;
 
 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
 
 	EFSYS_ASSERT(eevop != NULL &&
 	    eevop->eevo_qpost != NULL);
 
 	eevop->eevo_qpost(eep, data);
 }
 
 	__checkReturn	efx_rc_t
 efx_ev_usecs_to_ticks(
 	__in		efx_nic_t *enp,
 	__in		unsigned int us,
 	__out		unsigned int *ticksp)
 {
 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
 	unsigned int ticks;
 
 	/* Convert microseconds to a timer tick count */
 	if (us == 0)
 		ticks = 0;
 	else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
 		ticks = 1;	/* Never round down to zero */
 	else
 		ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
 
 	*ticksp = ticks;
 	return (0);
 }
 
 	__checkReturn	efx_rc_t
 efx_ev_qmoderate(
 	__in		efx_evq_t *eep,
 	__in		unsigned int us)
 {
 	efx_nic_t *enp = eep->ee_enp;
 	const efx_ev_ops_t *eevop = enp->en_eevop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
 
 	if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
 	    EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
 		goto fail2;
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 	return (rc);
 }
 
 #if EFSYS_OPT_QSTATS
 					void
 efx_ev_qstats_update(
 	__in				efx_evq_t *eep,
 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
 
 {	efx_nic_t *enp = eep->ee_enp;
 	const efx_ev_ops_t *eevop = enp->en_eevop;
 
 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
 
 	eevop->eevo_qstats_update(eep, stat);
 }
 
 #endif	/* EFSYS_OPT_QSTATS */
 
 #if EFSYS_OPT_SIENA
 
 static	__checkReturn	efx_rc_t
 siena_ev_init(
 	__in		efx_nic_t *enp)
 {
 	efx_oword_t oword;
 
 	/*
 	 * Program the event queue for receive and transmit queue
 	 * flush events.
 	 */
 	EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
 	EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
 	EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
 
 	return (0);
 
 }
 
 static  __checkReturn   boolean_t
 siena_ev_rx_not_ok(
 	__in		efx_evq_t *eep,
 	__in		efx_qword_t *eqp,
 	__in		uint32_t label,
 	__in		uint32_t id,
 	__inout		uint16_t *flagsp)
 {
 	boolean_t ignore = B_FALSE;
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
 		EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
 		EFSYS_PROBE(tobe_disc);
 		/*
 		 * Assume this is a unicast address mismatch, unless below
 		 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
 		 * EV_RX_PAUSE_FRM_ERR is set.
 		 */
 		(*flagsp) |= EFX_ADDR_MISMATCH;
 	}
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
 		EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
 		(*flagsp) |= EFX_DISCARD;
 
 #if EFSYS_OPT_RX_SCATTER
 		/*
 		 * Lookout for payload queue ran dry errors and ignore them.
 		 *
 		 * Sadly for the header/data split cases, the descriptor
 		 * pointer in this event refers to the header queue and
 		 * therefore cannot be easily detected as duplicate.
 		 * So we drop these and rely on the receive processing seeing
 		 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
 		 * the partially received packet.
 		 */
 		if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
 		    (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
 		    (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
 			ignore = B_TRUE;
 #endif	/* EFSYS_OPT_RX_SCATTER */
 	}
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
 		EFSYS_PROBE(crc_err);
 		(*flagsp) &= ~EFX_ADDR_MISMATCH;
 		(*flagsp) |= EFX_DISCARD;
 	}
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
 		EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
 		EFSYS_PROBE(pause_frm_err);
 		(*flagsp) &= ~EFX_ADDR_MISMATCH;
 		(*flagsp) |= EFX_DISCARD;
 	}
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
 		EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
 		EFSYS_PROBE(owner_id_err);
 		(*flagsp) |= EFX_DISCARD;
 	}
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
 		EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
 		EFSYS_PROBE(ipv4_err);
 		(*flagsp) &= ~EFX_CKSUM_IPV4;
 	}
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
 		EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
 		EFSYS_PROBE(udp_chk_err);
 		(*flagsp) &= ~EFX_CKSUM_TCPUDP;
 	}
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
 		EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
 
 		/*
 		 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
 		 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
 		 * condition.
 		 */
 		(*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
 	}
 
 	return (ignore);
 }
 
 static	__checkReturn	boolean_t
 siena_ev_rx(
 	__in		efx_evq_t *eep,
 	__in		efx_qword_t *eqp,
 	__in		const efx_ev_callbacks_t *eecp,
 	__in_opt	void *arg)
 {
 	uint32_t id;
 	uint32_t size;
 	uint32_t label;
 	boolean_t ok;
 #if EFSYS_OPT_RX_SCATTER
 	boolean_t sop;
 	boolean_t jumbo_cont;
 #endif	/* EFSYS_OPT_RX_SCATTER */
 	uint32_t hdr_type;
 	boolean_t is_v6;
 	uint16_t flags;
 	boolean_t ignore;
 	boolean_t should_abort;
 
 	EFX_EV_QSTAT_INCR(eep, EV_RX);
 
 	/* Basic packet information */
 	id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
 	size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
 	label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
 	ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
 
 #if EFSYS_OPT_RX_SCATTER
 	sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
 	jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
 #endif	/* EFSYS_OPT_RX_SCATTER */
 
 	hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
 
 	is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
 
 	/*
 	 * If packet is marked as OK and packet type is TCP/IP or
 	 * UDP/IP or other IP, then we can rely on the hardware checksums.
 	 */
 	switch (hdr_type) {
 	case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
 		flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
 		if (is_v6) {
 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
 			flags |= EFX_PKT_IPV6;
 		} else {
 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
 			flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
 		}
 		break;
 
 	case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
 		flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
 		if (is_v6) {
 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
 			flags |= EFX_PKT_IPV6;
 		} else {
 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
 			flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
 		}
 		break;
 
 	case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
 		if (is_v6) {
 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
 			flags = EFX_PKT_IPV6;
 		} else {
 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
 			flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
 		}
 		break;
 
 	case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
 		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
 		flags = 0;
 		break;
 
 	default:
 		EFSYS_ASSERT(B_FALSE);
 		flags = 0;
 		break;
 	}
 
 #if EFSYS_OPT_RX_SCATTER
 	/* Report scatter and header/lookahead split buffer flags */
 	if (sop)
 		flags |= EFX_PKT_START;
 	if (jumbo_cont)
 		flags |= EFX_PKT_CONT;
 #endif	/* EFSYS_OPT_RX_SCATTER */
 
 	/* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
 	if (!ok) {
 		ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
 		if (ignore) {
 			EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
 			    uint32_t, size, uint16_t, flags);
 
 			return (B_FALSE);
 		}
 	}
 
 	/* If we're not discarding the packet then it is ok */
 	if (~flags & EFX_DISCARD)
 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
 
 	/* Detect multicast packets that didn't match the filter */
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
 		EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
 
 		if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
 			EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
 		} else {
 			EFSYS_PROBE(mcast_mismatch);
 			flags |= EFX_ADDR_MISMATCH;
 		}
 	} else {
 		flags |= EFX_PKT_UNICAST;
 	}
 
 	/*
 	 * The packet parser in Siena can abort parsing packets under
 	 * certain error conditions, setting the PKT_NOT_PARSED bit
 	 * (which clears PKT_OK). If this is set, then don't trust
 	 * the PKT_TYPE field.
 	 */
 	if (!ok) {
 		uint32_t parse_err;
 
 		parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
 		if (parse_err != 0)
 			flags |= EFX_CHECK_VLAN;
 	}
 
 	if (~flags & EFX_CHECK_VLAN) {
 		uint32_t pkt_type;
 
 		pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
 		if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
 			flags |= EFX_PKT_VLAN_TAGGED;
 	}
 
 	EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
 	    uint32_t, size, uint16_t, flags);
 
 	EFSYS_ASSERT(eecp->eec_rx != NULL);
 	should_abort = eecp->eec_rx(arg, label, id, size, flags);
 
 	return (should_abort);
 }
 
 static	__checkReturn	boolean_t
 siena_ev_tx(
 	__in		efx_evq_t *eep,
 	__in		efx_qword_t *eqp,
 	__in		const efx_ev_callbacks_t *eecp,
 	__in_opt	void *arg)
 {
 	uint32_t id;
 	uint32_t label;
 	boolean_t should_abort;
 
 	EFX_EV_QSTAT_INCR(eep, EV_TX);
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
 	    EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
 	    EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
 	    EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
 
 		id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
 		label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
 
 		EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
 
 		EFSYS_ASSERT(eecp->eec_tx != NULL);
 		should_abort = eecp->eec_tx(arg, label, id);
 
 		return (should_abort);
 	}
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
 		EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
 		EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
 
 	if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
 		EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
 
 	EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
 	return (B_FALSE);
 }
 
 static	__checkReturn	boolean_t
 siena_ev_global(
 	__in		efx_evq_t *eep,
 	__in		efx_qword_t *eqp,
 	__in		const efx_ev_callbacks_t *eecp,
 	__in_opt	void *arg)
 {
 	_NOTE(ARGUNUSED(eqp, eecp, arg))
 
 	EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
 
 	return (B_FALSE);
 }
 
 static	__checkReturn	boolean_t
 siena_ev_driver(
 	__in		efx_evq_t *eep,
 	__in		efx_qword_t *eqp,
 	__in		const efx_ev_callbacks_t *eecp,
 	__in_opt	void *arg)
 {
 	boolean_t should_abort;
 
 	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
 	should_abort = B_FALSE;
 
 	switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
 	case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
 		uint32_t txq_index;
 
 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
 
 		txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
 
 		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
 
 		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
 		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
 
 		break;
 	}
 	case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
 		uint32_t rxq_index;
 		uint32_t failed;
 
 		rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
 		failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
 
 		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
 		EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
 
 		if (failed) {
 			EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
 
 			EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
 
 			should_abort = eecp->eec_rxq_flush_failed(arg,
 								    rxq_index);
 		} else {
 			EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
 
 			EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
 
 			should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
 		}
 
 		break;
 	}
 	case FSE_AZ_EVQ_INIT_DONE_EV:
 		EFSYS_ASSERT(eecp->eec_initialized != NULL);
 		should_abort = eecp->eec_initialized(arg);
 
 		break;
 
 	case FSE_AZ_EVQ_NOT_EN_EV:
 		EFSYS_PROBE(evq_not_en);
 		break;
 
 	case FSE_AZ_SRM_UPD_DONE_EV: {
 		uint32_t code;
 
 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
 
 		code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
 
 		EFSYS_ASSERT(eecp->eec_sram != NULL);
 		should_abort = eecp->eec_sram(arg, code);
 
 		break;
 	}
 	case FSE_AZ_WAKE_UP_EV: {
 		uint32_t id;
 
 		id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
 
 		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
 		should_abort = eecp->eec_wake_up(arg, id);
 
 		break;
 	}
 	case FSE_AZ_TX_PKT_NON_TCP_UDP:
 		EFSYS_PROBE(tx_pkt_non_tcp_udp);
 		break;
 
 	case FSE_AZ_TIMER_EV: {
 		uint32_t id;
 
 		id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
 
 		EFSYS_ASSERT(eecp->eec_timer != NULL);
 		should_abort = eecp->eec_timer(arg, id);
 
 		break;
 	}
 	case FSE_AZ_RX_DSC_ERROR_EV:
 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
 
 		EFSYS_PROBE(rx_dsc_error);
 
 		EFSYS_ASSERT(eecp->eec_exception != NULL);
 		should_abort = eecp->eec_exception(arg,
 			EFX_EXCEPTION_RX_DSC_ERROR, 0);
 
 		break;
 
 	case FSE_AZ_TX_DSC_ERROR_EV:
 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
 
 		EFSYS_PROBE(tx_dsc_error);
 
 		EFSYS_ASSERT(eecp->eec_exception != NULL);
 		should_abort = eecp->eec_exception(arg,
 			EFX_EXCEPTION_TX_DSC_ERROR, 0);
 
 		break;
 
 	default:
 		break;
 	}
 
 	return (should_abort);
 }
 
 static	__checkReturn	boolean_t
 siena_ev_drv_gen(
 	__in		efx_evq_t *eep,
 	__in		efx_qword_t *eqp,
 	__in		const efx_ev_callbacks_t *eecp,
 	__in_opt	void *arg)
 {
 	uint32_t data;
 	boolean_t should_abort;
 
 	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
 
 	data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
 	if (data >= ((uint32_t)1 << 16)) {
 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
 		return (B_TRUE);
 	}
 
 	EFSYS_ASSERT(eecp->eec_software != NULL);
 	should_abort = eecp->eec_software(arg, (uint16_t)data);
 
 	return (should_abort);
 }
 
 #if EFSYS_OPT_MCDI
 
 static	__checkReturn	boolean_t
 siena_ev_mcdi(
 	__in		efx_evq_t *eep,
 	__in		efx_qword_t *eqp,
 	__in		const efx_ev_callbacks_t *eecp,
 	__in_opt	void *arg)
 {
 	efx_nic_t *enp = eep->ee_enp;
 	unsigned int code;
 	boolean_t should_abort = B_FALSE;
 
 	EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
 
 	if (enp->en_family != EFX_FAMILY_SIENA)
 		goto out;
 
 	EFSYS_ASSERT(eecp->eec_link_change != NULL);
 	EFSYS_ASSERT(eecp->eec_exception != NULL);
 #if EFSYS_OPT_MON_STATS
 	EFSYS_ASSERT(eecp->eec_monitor != NULL);
 #endif
 
 	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
 
 	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
 	switch (code) {
 	case MCDI_EVENT_CODE_BADSSERT:
 		efx_mcdi_ev_death(enp, EINTR);
 		break;
 
 	case MCDI_EVENT_CODE_CMDDONE:
 		efx_mcdi_ev_cpl(enp,
 		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
 		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
 		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
 		break;
 
 	case MCDI_EVENT_CODE_LINKCHANGE: {
 		efx_link_mode_t link_mode;
 
 		siena_phy_link_ev(enp, eqp, &link_mode);
 		should_abort = eecp->eec_link_change(arg, link_mode);
 		break;
 	}
 	case MCDI_EVENT_CODE_SENSOREVT: {
 #if EFSYS_OPT_MON_STATS
 		efx_mon_stat_t id;
 		efx_mon_stat_value_t value;
 		efx_rc_t rc;
 
 		if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
 			should_abort = eecp->eec_monitor(arg, id, value);
 		else if (rc == ENOTSUP) {
 			should_abort = eecp->eec_exception(arg,
 				EFX_EXCEPTION_UNKNOWN_SENSOREVT,
 				MCDI_EV_FIELD(eqp, DATA));
 		} else
 			EFSYS_ASSERT(rc == ENODEV);	/* Wrong port */
 #else
 		should_abort = B_FALSE;
 #endif
 		break;
 	}
 	case MCDI_EVENT_CODE_SCHEDERR:
 		/* Informational only */
 		break;
 
 	case MCDI_EVENT_CODE_REBOOT:
 		efx_mcdi_ev_death(enp, EIO);
 		break;
 
 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
 #if EFSYS_OPT_MAC_STATS
 		if (eecp->eec_mac_stats != NULL) {
 			eecp->eec_mac_stats(arg,
 			    MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
 		}
 #endif
 		break;
 
 	case MCDI_EVENT_CODE_FWALERT: {
 		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
 
 		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
 			should_abort = eecp->eec_exception(arg,
 				EFX_EXCEPTION_FWALERT_SRAM,
 				MCDI_EV_FIELD(eqp, FWALERT_DATA));
 		else
 			should_abort = eecp->eec_exception(arg,
 				EFX_EXCEPTION_UNKNOWN_FWALERT,
 				MCDI_EV_FIELD(eqp, DATA));
 		break;
 	}
 
 	default:
 		EFSYS_PROBE1(mc_pcol_error, int, code);
 		break;
 	}
 
 out:
 	return (should_abort);
 }
 
 #endif	/* EFSYS_OPT_MCDI */
 
 static	__checkReturn	efx_rc_t
 siena_ev_qprime(
 	__in		efx_evq_t *eep,
 	__in		unsigned int count)
 {
 	efx_nic_t *enp = eep->ee_enp;
 	uint32_t rptr;
 	efx_dword_t dword;
 
 	rptr = count & eep->ee_mask;
 
 	EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
 
 	EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
 			    &dword, B_FALSE);
 
 	return (0);
 }
 
 static		void
 siena_ev_qpost(
 	__in	efx_evq_t *eep,
 	__in	uint16_t data)
 {
 	efx_nic_t *enp = eep->ee_enp;
 	efx_qword_t ev;
 	efx_oword_t oword;
 
 	EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
 	    FSF_AZ_EV_DATA_DW0, (uint32_t)data);
 
 	EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
 	    EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
 	    EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
 
 	EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
 }
 
 static	__checkReturn	efx_rc_t
 siena_ev_qmoderate(
 	__in		efx_evq_t *eep,
 	__in		unsigned int us)
 {
 	efx_nic_t *enp = eep->ee_enp;
 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
 	unsigned int locked;
 	efx_dword_t dword;
 	efx_rc_t rc;
 
 	if (us > encp->enc_evq_timer_max_us) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	/* If the value is zero then disable the timer */
 	if (us == 0) {
 		EFX_POPULATE_DWORD_2(dword,
 		    FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
 		    FRF_CZ_TC_TIMER_VAL, 0);
 	} else {
 		unsigned int ticks;
 
 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
 			goto fail2;
 
 		EFSYS_ASSERT(ticks > 0);
 		EFX_POPULATE_DWORD_2(dword,
 		    FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
 		    FRF_CZ_TC_TIMER_VAL, ticks - 1);
 	}
 
 	locked = (eep->ee_index == 0) ? 1 : 0;
 
 	EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
 	    eep->ee_index, &dword, locked);
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn	efx_rc_t
 siena_ev_qcreate(
 	__in		efx_nic_t *enp,
 	__in		unsigned int index,
 	__in		efsys_mem_t *esmp,
 	__in		size_t n,
 	__in		uint32_t id,
 	__in		uint32_t us,
 	__in		uint32_t flags,
 	__in		efx_evq_t *eep)
 {
 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
 	uint32_t size;
 	efx_oword_t oword;
 	efx_rc_t rc;
 	boolean_t notify_mode;
 
 	_NOTE(ARGUNUSED(esmp))
 
 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
 
 	if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
 		rc = EINVAL;
 		goto fail1;
 	}
 	if (index >= encp->enc_evq_limit) {
 		rc = EINVAL;
 		goto fail2;
 	}
 #if EFSYS_OPT_RX_SCALE
 	if (enp->en_intr.ei_type == EFX_INTR_LINE &&
 	    index >= EFX_MAXRSS_LEGACY) {
 		rc = EINVAL;
 		goto fail3;
 	}
 #endif
 	for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
 	    size++)
 		if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS))
 			break;
 	if (id + (1 << size) >= encp->enc_buftbl_limit) {
 		rc = EINVAL;
 		goto fail4;
 	}
 
 	/* Set up the handler table */
 	eep->ee_rx	= siena_ev_rx;
 	eep->ee_tx	= siena_ev_tx;
 	eep->ee_driver	= siena_ev_driver;
 	eep->ee_global	= siena_ev_global;
 	eep->ee_drv_gen	= siena_ev_drv_gen;
 #if EFSYS_OPT_MCDI
 	eep->ee_mcdi	= siena_ev_mcdi;
 #endif	/* EFSYS_OPT_MCDI */
 
 	notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
 
 	/* Set up the new event queue */
 	EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
 	    FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
 	    FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
 	EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
 
 	EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
 	    FRF_AZ_EVQ_BUF_BASE_ID, id);
 
 	EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
 
 	/* Set initial interrupt moderation */
 	siena_ev_qmoderate(eep, us);
 
 	return (0);
 
 fail4:
 	EFSYS_PROBE(fail4);
 #if EFSYS_OPT_RX_SCALE
 fail3:
 	EFSYS_PROBE(fail3);
 #endif
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #endif /* EFSYS_OPT_SIENA */
 
 #if EFSYS_OPT_QSTATS
 #if EFSYS_OPT_NAMES
 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */
 static const char * const __efx_ev_qstat_name[] = {
 	"all",
 	"rx",
 	"rx_ok",
 	"rx_frm_trunc",
 	"rx_tobe_disc",
 	"rx_pause_frm_err",
 	"rx_buf_owner_id_err",
 	"rx_ipv4_hdr_chksum_err",
 	"rx_tcp_udp_chksum_err",
 	"rx_eth_crc_err",
 	"rx_ip_frag_err",
 	"rx_mcast_pkt",
 	"rx_mcast_hash_match",
 	"rx_tcp_ipv4",
 	"rx_tcp_ipv6",
 	"rx_udp_ipv4",
 	"rx_udp_ipv6",
 	"rx_other_ipv4",
 	"rx_other_ipv6",
 	"rx_non_ip",
 	"rx_batch",
 	"tx",
 	"tx_wq_ff_full",
 	"tx_pkt_err",
 	"tx_pkt_too_big",
 	"tx_unexpected",
 	"global",
 	"global_mnt",
 	"driver",
 	"driver_srm_upd_done",
 	"driver_tx_descq_fls_done",
 	"driver_rx_descq_fls_done",
 	"driver_rx_descq_fls_failed",
 	"driver_rx_dsc_error",
 	"driver_tx_dsc_error",
 	"drv_gen",
 	"mcdi_response",
 };
 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
 
 		const char *
 efx_ev_qstat_name(
 	__in	efx_nic_t *enp,
 	__in	unsigned int id)
 {
+	_NOTE(ARGUNUSED(enp))
+
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(id, <, EV_NQSTATS);
 
 	return (__efx_ev_qstat_name[id]);
 }
 #endif	/* EFSYS_OPT_NAMES */
 #endif	/* EFSYS_OPT_QSTATS */
 
 #if EFSYS_OPT_SIENA
 
 #if EFSYS_OPT_QSTATS
 static					void
 siena_ev_qstats_update(
 	__in				efx_evq_t *eep,
 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
 {
 	unsigned int id;
 
 	for (id = 0; id < EV_NQSTATS; id++) {
 		efsys_stat_t *essp = &stat[id];
 
 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
 		eep->ee_stat[id] = 0;
 	}
 }
 #endif	/* EFSYS_OPT_QSTATS */
 
 static		void
 siena_ev_qdestroy(
 	__in	efx_evq_t *eep)
 {
 	efx_nic_t *enp = eep->ee_enp;
 	efx_oword_t oword;
 
 	/* Purge event queue */
 	EFX_ZERO_OWORD(oword);
 
 	EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
 	    eep->ee_index, &oword, B_TRUE);
 
 	EFX_ZERO_OWORD(oword);
 	EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
 }
 
 static		void
 siena_ev_fini(
 	__in	efx_nic_t *enp)
 {
 	_NOTE(ARGUNUSED(enp))
 }
 
 #endif /* EFSYS_OPT_SIENA */
Index: stable/12/sys/dev/sfxge/common/efx_lic.c
===================================================================
--- stable/12/sys/dev/sfxge/common/efx_lic.c	(revision 342323)
+++ stable/12/sys/dev/sfxge/common/efx_lic.c	(revision 342324)
@@ -1,1754 +1,1754 @@
 /*-
  * Copyright (c) 2009-2016 Solarflare Communications Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * 1. Redistributions of source code must retain the above copyright notice,
  *    this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright notice,
  *    this list of conditions and the following disclaimer in the documentation
  *    and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * The views and conclusions contained in the software and documentation are
  * those of the authors and should not be interpreted as representing official
  * policies, either expressed or implied, of the FreeBSD Project.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include "efx.h"
 #include "efx_impl.h"
 
 #if EFSYS_OPT_LICENSING
 
 #include "ef10_tlv_layout.h"
 
 #if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_find_start(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__out			uint32_t *startp
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_find_end(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *endp
 	);
 
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 efx_lic_v1v2_find_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *startp,
 	__out			uint32_t *lengthp
 	);
 
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 efx_lic_v1v2_validate_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_read_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__out_bcount_part(key_max_size, *lengthp)
 				caddr_t keyp,
 	__in			size_t key_max_size,
 	__out			uint32_t *lengthp
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_write_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length,
 	__out			uint32_t *lengthp
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_delete_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__in			uint32_t end,
 	__out			uint32_t *deltap
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_create_partition(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_finish_partition(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size
 	);
 
 #endif	/* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
 
 
 #if EFSYS_OPT_SIENA
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_fc_license_update_license(
 	__in		efx_nic_t *enp);
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_fc_license_get_key_stats(
 	__in		efx_nic_t *enp,
 	__out		efx_key_stats_t *eksp);
 
 static const efx_lic_ops_t	__efx_lic_v1_ops = {
 	efx_mcdi_fc_license_update_license,	/* elo_update_licenses */
 	efx_mcdi_fc_license_get_key_stats,	/* elo_get_key_stats */
 	NULL,					/* elo_app_state */
 	NULL,					/* elo_get_id */
 	efx_lic_v1v2_find_start,		/* elo_find_start */
 	efx_lic_v1v2_find_end,			/* elo_find_end */
 	efx_lic_v1v2_find_key,			/* elo_find_key */
 	efx_lic_v1v2_validate_key,		/* elo_validate_key */
 	efx_lic_v1v2_read_key,			/* elo_read_key */
 	efx_lic_v1v2_write_key,			/* elo_write_key */
 	efx_lic_v1v2_delete_key,		/* elo_delete_key */
 	efx_lic_v1v2_create_partition,		/* elo_create_partition */
 	efx_lic_v1v2_finish_partition,		/* elo_finish_partition */
 };
 
 #endif	/* EFSYS_OPT_SIENA */
 
 #if EFSYS_OPT_HUNTINGTON
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_update_licenses(
 	__in		efx_nic_t *enp);
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_get_key_stats(
 	__in		efx_nic_t *enp,
 	__out		efx_key_stats_t *eksp);
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensed_app_state(
 	__in		efx_nic_t *enp,
 	__in		uint64_t app_id,
 	__out		boolean_t *licensedp);
 
 static const efx_lic_ops_t	__efx_lic_v2_ops = {
 	efx_mcdi_licensing_update_licenses,	/* elo_update_licenses */
 	efx_mcdi_licensing_get_key_stats,	/* elo_get_key_stats */
 	efx_mcdi_licensed_app_state,		/* elo_app_state */
 	NULL,					/* elo_get_id */
 	efx_lic_v1v2_find_start,		/* elo_find_start */
 	efx_lic_v1v2_find_end,			/* elo_find_end */
 	efx_lic_v1v2_find_key,			/* elo_find_key */
 	efx_lic_v1v2_validate_key,		/* elo_validate_key */
 	efx_lic_v1v2_read_key,			/* elo_read_key */
 	efx_lic_v1v2_write_key,			/* elo_write_key */
 	efx_lic_v1v2_delete_key,		/* elo_delete_key */
 	efx_lic_v1v2_create_partition,		/* elo_create_partition */
 	efx_lic_v1v2_finish_partition,		/* elo_finish_partition */
 };
 
 #endif	/* EFSYS_OPT_HUNTINGTON */
 
 #if EFSYS_OPT_MEDFORD
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_v3_update_licenses(
 	__in		efx_nic_t *enp);
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_v3_report_license(
 	__in		efx_nic_t *enp,
 	__out		efx_key_stats_t *eksp);
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_v3_app_state(
 	__in		efx_nic_t *enp,
 	__in		uint64_t app_id,
 	__out		boolean_t *licensedp);
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_v3_get_id(
 	__in		efx_nic_t *enp,
 	__in		size_t buffer_size,
 	__out		uint32_t *typep,
 	__out		size_t *lengthp,
 	__out_bcount_part_opt(buffer_size, *lengthp)
 			uint8_t *bufferp);
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_find_start(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__out			uint32_t *startp
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_find_end(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *endp
 	);
 
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 efx_lic_v3_find_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *startp,
 	__out			uint32_t *lengthp
 	);
 
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 efx_lic_v3_validate_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_read_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__out_bcount_part(key_max_size, *lengthp)
 				caddr_t keyp,
 	__in			size_t key_max_size,
 	__out			uint32_t *lengthp
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_write_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length,
 	__out			uint32_t *lengthp
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_delete_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__in			uint32_t end,
 	__out			uint32_t *deltap
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_create_partition(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size
 	);
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_finish_partition(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size
 	);
 
 static const efx_lic_ops_t	__efx_lic_v3_ops = {
 	efx_mcdi_licensing_v3_update_licenses,	/* elo_update_licenses */
 	efx_mcdi_licensing_v3_report_license,	/* elo_get_key_stats */
 	efx_mcdi_licensing_v3_app_state,	/* elo_app_state */
 	efx_mcdi_licensing_v3_get_id,		/* elo_get_id */
 	efx_lic_v3_find_start,			/* elo_find_start*/
 	efx_lic_v3_find_end,			/* elo_find_end */
 	efx_lic_v3_find_key,			/* elo_find_key */
 	efx_lic_v3_validate_key,		/* elo_validate_key */
 	efx_lic_v3_read_key,			/* elo_read_key */
 	efx_lic_v3_write_key,			/* elo_write_key */
 	efx_lic_v3_delete_key,			/* elo_delete_key */
 	efx_lic_v3_create_partition,		/* elo_create_partition */
 	efx_lic_v3_finish_partition,		/* elo_finish_partition */
 };
 
 #endif	/* EFSYS_OPT_MEDFORD */
 
 
 /* V1 Licensing - used in Siena Modena only */
 
 #if EFSYS_OPT_SIENA
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_fc_license_update_license(
 	__in		efx_nic_t *enp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MC_CMD_FC_IN_LICENSE_LEN];
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_FC;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = 0;
 
 	MCDI_IN_SET_DWORD(req, FC_IN_CMD,
 	    MC_CMD_FC_OP_LICENSE);
 
 	MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
 	    MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used != 0) {
 		rc = EIO;
 		goto fail2;
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_fc_license_get_key_stats(
 	__in		efx_nic_t *enp,
 	__out		efx_key_stats_t *eksp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_FC_IN_LICENSE_LEN,
 			    MC_CMD_FC_OUT_LICENSE_LEN)];
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_FC;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_FC_OUT_LICENSE_LEN;
 
 	MCDI_IN_SET_DWORD(req, FC_IN_CMD,
 	    MC_CMD_FC_OP_LICENSE);
 
 	MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
 	    MC_CMD_FC_IN_LICENSE_GET_KEY_STATS);
 
 	efx_mcdi_execute_quiet(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_FC_OUT_LICENSE_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	eksp->eks_valid =
 		MCDI_OUT_DWORD(req, FC_OUT_LICENSE_VALID_KEYS);
 	eksp->eks_invalid =
 		MCDI_OUT_DWORD(req, FC_OUT_LICENSE_INVALID_KEYS);
 	eksp->eks_blacklisted =
 		MCDI_OUT_DWORD(req, FC_OUT_LICENSE_BLACKLISTED_KEYS);
 	eksp->eks_unverifiable = 0;
 	eksp->eks_wrong_node = 0;
 	eksp->eks_licensed_apps_lo = 0;
 	eksp->eks_licensed_apps_hi = 0;
 	eksp->eks_licensed_features_lo = 0;
 	eksp->eks_licensed_features_hi = 0;
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #endif	/* EFSYS_OPT_SIENA */
 
 /* V1 and V2 Partition format - based on a 16-bit TLV format */
 
 #if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
 
 /*
  * V1/V2 format - defined in SF-108542-TC section 4.2:
  *  Type (T):   16bit - revision/HMAC algorithm
  *  Length (L): 16bit - value length in bytes
  *  Value (V):  L bytes - payload
  */
 #define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX    (256)
 #define EFX_LICENSE_V1V2_HEADER_LENGTH         (2 * sizeof(uint16_t))
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_find_start(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__out			uint32_t *startp
 	)
 {
 	_NOTE(ARGUNUSED(enp, bufferp, buffer_size))
 
 	*startp = 0;
 	return (0);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_find_end(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *endp
 	)
 {
 	_NOTE(ARGUNUSED(enp, bufferp, buffer_size))
 
 	*endp = offset + EFX_LICENSE_V1V2_HEADER_LENGTH;
 	return (0);
 }
 
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 efx_lic_v1v2_find_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *startp,
 	__out			uint32_t *lengthp
 	)
 {
 	boolean_t found;
 	uint16_t tlv_type;
 	uint16_t tlv_length;
 
 	_NOTE(ARGUNUSED(enp))
 
 	if ((size_t)buffer_size - offset < EFX_LICENSE_V1V2_HEADER_LENGTH)
 		goto fail1;
 
 	tlv_type = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[0]);
 	tlv_length = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[1]);
 	if ((tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) ||
 	    (tlv_type == 0 && tlv_length == 0)) {
 		found = B_FALSE;
 	} else {
 		*startp = offset;
 		*lengthp = tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH;
 		found = B_TRUE;
 	}
 	return (found);
 
 fail1:
 	EFSYS_PROBE1(fail1, boolean_t, B_FALSE);
 
 	return (B_FALSE);
 }
 
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 efx_lic_v1v2_validate_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length
 	)
 {
 	uint16_t tlv_type;
 	uint16_t tlv_length;
 
 	_NOTE(ARGUNUSED(enp))
 
 	if (length < EFX_LICENSE_V1V2_HEADER_LENGTH) {
 		goto fail1;
 	}
 
 	tlv_type = __LE_TO_CPU_16(((uint16_t *)keyp)[0]);
 	tlv_length = __LE_TO_CPU_16(((uint16_t *)keyp)[1]);
 
 	if (tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) {
 		goto fail2;
 	}
 	if (tlv_type == 0) {
 		goto fail3;
 	}
 	if ((tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH) != length) {
 		goto fail4;
 	}
 
 	return (B_TRUE);
 
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, boolean_t, B_FALSE);
 
 	return (B_FALSE);
 }
 
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_read_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__out_bcount_part(key_max_size, *lengthp)
 				caddr_t keyp,
 	__in			size_t key_max_size,
 	__out			uint32_t *lengthp
 	)
 {
 	efx_rc_t rc;
 
-	_NOTE(ARGUNUSED(enp))
+	_NOTE(ARGUNUSED(enp, buffer_size))
 	EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
 	    EFX_LICENSE_V1V2_HEADER_LENGTH));
 
 	if (key_max_size < length) {
 		rc = ENOSPC;
 		goto fail1;
 	}
 	memcpy(keyp, &bufferp[offset], length);
 
 	*lengthp = length;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_write_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length,
 	__out			uint32_t *lengthp
 	)
 {
 	efx_rc_t rc;
 
 	_NOTE(ARGUNUSED(enp))
 	EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
 	    EFX_LICENSE_V1V2_HEADER_LENGTH));
 
 	/* Ensure space for terminator remains */
 	if ((offset + length) >
 	    (buffer_size - EFX_LICENSE_V1V2_HEADER_LENGTH)) {
 		rc = ENOSPC;
 		goto fail1;
 	}
 
 	memcpy(bufferp + offset, keyp, length);
 
 	*lengthp = length;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_delete_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__in			uint32_t end,
 	__out			uint32_t *deltap
 	)
 {
 	uint32_t move_start = offset + length;
 	uint32_t move_length = end - move_start;
 
-	_NOTE(ARGUNUSED(enp))
+	_NOTE(ARGUNUSED(enp, buffer_size))
 	EFSYS_ASSERT(end <= buffer_size);
 
 	/* Shift everything after the key down */
 	memmove(bufferp + offset, bufferp + move_start, move_length);
 
 	*deltap = length;
 
 	return (0);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_create_partition(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size
 	)
 {
-	_NOTE(ARGUNUSED(enp))
+	_NOTE(ARGUNUSED(enp, buffer_size))
 	EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size);
 
 	/* Write terminator */
 	memset(bufferp, '\0', EFX_LICENSE_V1V2_HEADER_LENGTH);
 	return (0);
 }
 
 
 	__checkReturn		efx_rc_t
 efx_lic_v1v2_finish_partition(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size
 	)
 {
 	_NOTE(ARGUNUSED(enp, bufferp, buffer_size))
 
 	return (0);
 }
 
 #endif	/* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
 
 
 /* V2 Licensing - used by Huntington family only. See SF-113611-TC */
 
 #if EFSYS_OPT_HUNTINGTON
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensed_app_state(
 	__in		efx_nic_t *enp,
 	__in		uint64_t app_id,
 	__out		boolean_t *licensedp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN,
 			    MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)];
 	uint32_t app_state;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
 
 	/* V2 licensing supports 32bit app id only */
 	if ((app_id >> 32) != 0) {
 		rc = EINVAL;
 		goto fail1;
 	}
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, GET_LICENSED_APP_STATE_IN_APP_ID,
 		    app_id & 0xffffffff);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail2;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail3;
 	}
 
 	app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_APP_STATE_OUT_STATE));
 	if (app_state != MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED) {
 		*licensedp = B_TRUE;
 	} else {
 		*licensedp = B_FALSE;
 	}
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_update_licenses(
 	__in		efx_nic_t *enp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MC_CMD_LICENSING_IN_LEN];
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_LICENSING;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = 0;
 
 	MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
 	    MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used != 0) {
 		rc = EIO;
 		goto fail2;
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_get_key_stats(
 	__in		efx_nic_t *enp,
 	__out		efx_key_stats_t *eksp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_LICENSING_IN_LEN,
 			    MC_CMD_LICENSING_OUT_LEN)];
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_LICENSING;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_LICENSING_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
 	    MC_CMD_LICENSING_IN_OP_GET_KEY_STATS);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_LICENSING_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	eksp->eks_valid =
 		MCDI_OUT_DWORD(req, LICENSING_OUT_VALID_APP_KEYS);
 	eksp->eks_invalid =
 		MCDI_OUT_DWORD(req, LICENSING_OUT_INVALID_APP_KEYS);
 	eksp->eks_blacklisted =
 		MCDI_OUT_DWORD(req, LICENSING_OUT_BLACKLISTED_APP_KEYS);
 	eksp->eks_unverifiable =
 		MCDI_OUT_DWORD(req, LICENSING_OUT_UNVERIFIABLE_APP_KEYS);
 	eksp->eks_wrong_node =
 		MCDI_OUT_DWORD(req, LICENSING_OUT_WRONG_NODE_APP_KEYS);
 	eksp->eks_licensed_apps_lo = 0;
 	eksp->eks_licensed_apps_hi = 0;
 	eksp->eks_licensed_features_lo = 0;
 	eksp->eks_licensed_features_hi = 0;
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #endif	/* EFSYS_OPT_HUNTINGTON */
 
 /* V3 Licensing - used starting from Medford family. See SF-114884-SW */
 
 #if EFSYS_OPT_MEDFORD
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_v3_update_licenses(
 	__in		efx_nic_t *enp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN];
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_LICENSING_V3;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
 	req.emr_out_buf = NULL;
 	req.emr_out_length = 0;
 
 	MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
 	    MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_v3_report_license(
 	__in		efx_nic_t *enp,
 	__out		efx_key_stats_t *eksp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_LICENSING_V3_IN_LEN,
 			    MC_CMD_LICENSING_V3_OUT_LEN)];
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_LICENSING_V3;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_LICENSING_V3_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
 	    MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
 
 	efx_mcdi_execute_quiet(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_LICENSING_V3_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	eksp->eks_valid =
 		MCDI_OUT_DWORD(req, LICENSING_V3_OUT_VALID_KEYS);
 	eksp->eks_invalid =
 		MCDI_OUT_DWORD(req, LICENSING_V3_OUT_INVALID_KEYS);
 	eksp->eks_blacklisted = 0;
 	eksp->eks_unverifiable =
 		MCDI_OUT_DWORD(req, LICENSING_V3_OUT_UNVERIFIABLE_KEYS);
 	eksp->eks_wrong_node =
 		MCDI_OUT_DWORD(req, LICENSING_V3_OUT_WRONG_NODE_KEYS);
 	eksp->eks_licensed_apps_lo =
 		MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_LO);
 	eksp->eks_licensed_apps_hi =
 		MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_HI);
 	eksp->eks_licensed_features_lo =
 		MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_LO);
 	eksp->eks_licensed_features_hi =
 		MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_HI);
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_v3_app_state(
 	__in		efx_nic_t *enp,
 	__in		uint64_t app_id,
 	__out		boolean_t *licensedp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN,
 			    MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN)];
 	uint32_t app_state;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO,
 		    app_id & 0xffffffff);
 	MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI,
 		    app_id >> 32);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_V3_APP_STATE_OUT_STATE));
 	if (app_state != MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED) {
 		*licensedp = B_TRUE;
 	} else {
 		*licensedp = B_FALSE;
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_licensing_v3_get_id(
 	__in		efx_nic_t *enp,
 	__in		size_t buffer_size,
 	__out		uint32_t *typep,
 	__out		size_t *lengthp,
 	__out_bcount_part_opt(buffer_size, *lengthp)
 			uint8_t *bufferp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_LICENSING_GET_ID_V3_IN_LEN,
 			    MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN)];
 	efx_rc_t rc;
 
 	req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3;
 
 	if (bufferp == NULL) {
 		/* Request id type and length only */
 		req.emr_in_buf = bufferp;
 		req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
 		req.emr_out_buf = bufferp;
 		req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
 		(void) memset(payload, 0, sizeof (payload));
 	} else {
 		/* Request full buffer */
 		req.emr_in_buf = bufferp;
 		req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
 		req.emr_out_buf = bufferp;
 		req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
 		(void) memset(bufferp, 0, req.emr_out_length);
 	}
 
 	efx_mcdi_execute_quiet(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	*typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE);
 	*lengthp = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH);
 
 	if (bufferp == NULL) {
 		/* modify length requirements to indicate to caller the extra buffering
 		** needed to read the complete output.
 		*/
 		*lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
 	} else {
 		/* Shift ID down to start of buffer */
 		memmove(bufferp,
 		    bufferp + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST,
 		    *lengthp);
 		memset(bufferp + (*lengthp), 0,
 		    MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST);
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /* V3 format uses Huntington TLV format partition. See SF-108797-SW */
 #define EFX_LICENSE_V3_KEY_LENGTH_MIN    (64)
 #define EFX_LICENSE_V3_KEY_LENGTH_MAX    (160)
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_find_start(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__out			uint32_t *startp
 	)
 {
 	_NOTE(ARGUNUSED(enp))
 
 	return ef10_nvram_buffer_find_item_start(bufferp, buffer_size, startp);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_find_end(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *endp
 	)
 {
 	_NOTE(ARGUNUSED(enp))
 
 	return ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp);
 }
 
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 efx_lic_v3_find_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *startp,
 	__out			uint32_t *lengthp
 	)
 {
 	_NOTE(ARGUNUSED(enp))
 
 	return ef10_nvram_buffer_find_item(bufferp, buffer_size,
 	    offset, startp, lengthp);
 }
 
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 efx_lic_v3_validate_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length
 	)
 {
 	/* Check key is a valid V3 key */
 	uint8_t key_type;
 	uint8_t key_length;
 
 	_NOTE(ARGUNUSED(enp))
 
 	if (length < EFX_LICENSE_V3_KEY_LENGTH_MIN) {
 		goto fail1;
 	}
 
 	if (length > EFX_LICENSE_V3_KEY_LENGTH_MAX) {
 		goto fail2;
 	}
 
 	key_type = ((uint8_t *)keyp)[0];
 	key_length = ((uint8_t *)keyp)[1];
 
 	if (key_type < 3) {
 		goto fail3;
 	}
 	if (key_length > length) {
 		goto fail4;
 	}
 	return (B_TRUE);
 
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, boolean_t, B_FALSE);
 
 	return (B_FALSE);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_read_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__out_bcount_part(key_max_size, *lengthp)
 				caddr_t keyp,
 	__in			size_t key_max_size,
 	__out			uint32_t *lengthp
 	)
 {
 	_NOTE(ARGUNUSED(enp))
 
 	return ef10_nvram_buffer_get_item(bufferp, buffer_size,
 		    offset, length, keyp, key_max_size, lengthp);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_write_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length,
 	__out			uint32_t *lengthp
 	)
 {
 	_NOTE(ARGUNUSED(enp))
 	EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX);
 
 	return ef10_nvram_buffer_insert_item(bufferp, buffer_size,
 		    offset, keyp, length, lengthp);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_delete_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__in			uint32_t end,
 	__out			uint32_t *deltap
 	)
 {
 	efx_rc_t rc;
 
 	_NOTE(ARGUNUSED(enp))
 
 	if ((rc = ef10_nvram_buffer_delete_item(bufferp,
 			buffer_size, offset, length, end)) != 0) {
 		goto fail1;
 	}
 
 	*deltap = length;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_create_partition(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size
 	)
 {
 	efx_rc_t rc;
 
 	/* Construct empty partition */
 	if ((rc = ef10_nvram_buffer_create(enp,
 	    NVRAM_PARTITION_TYPE_LICENSE,
 	    bufferp, buffer_size)) != 0) {
 		rc = EFAULT;
 		goto fail1;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_v3_finish_partition(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size
 	)
 {
 	efx_rc_t rc;
 
 	if ((rc = ef10_nvram_buffer_finish(bufferp,
 			buffer_size)) != 0) {
 		goto fail1;
 	}
 
 	/* Validate completed partition */
 	if ((rc = ef10_nvram_buffer_validate(enp, NVRAM_PARTITION_TYPE_LICENSE,
 					bufferp, buffer_size)) != 0) {
 		goto fail2;
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 
 #endif	/* EFSYS_OPT_MEDFORD */
 
 	__checkReturn		efx_rc_t
 efx_lic_init(
 	__in			efx_nic_t *enp)
 {
 	const efx_lic_ops_t *elop;
 	efx_key_stats_t eks;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
 	EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_LIC));
 
 	switch (enp->en_family) {
 
 #if EFSYS_OPT_SIENA
 	case EFX_FAMILY_SIENA:
 		elop = &__efx_lic_v1_ops;
 		break;
 #endif	/* EFSYS_OPT_SIENA */
 
 #if EFSYS_OPT_HUNTINGTON
 	case EFX_FAMILY_HUNTINGTON:
 		elop = &__efx_lic_v2_ops;
 		break;
 #endif	/* EFSYS_OPT_HUNTINGTON */
 
 #if EFSYS_OPT_MEDFORD
 	case EFX_FAMILY_MEDFORD:
 		elop = &__efx_lic_v3_ops;
 		break;
 #endif	/* EFSYS_OPT_MEDFORD */
 
 	default:
 		EFSYS_ASSERT(0);
 		rc = ENOTSUP;
 		goto fail1;
 	}
 
 	enp->en_elop = elop;
 	enp->en_mod_flags |= EFX_MOD_LIC;
 
 	/* Probe for support */
 	if (efx_lic_get_key_stats(enp, &eks) == 0) {
 		enp->en_licensing_supported = B_TRUE;
 	} else {
 		enp->en_licensing_supported = B_FALSE;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 extern	__checkReturn	boolean_t
 efx_lic_check_support(
 	__in			efx_nic_t *enp)
 {
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	return enp->en_licensing_supported;
 }
 
 				void
 efx_lic_fini(
 	__in			efx_nic_t *enp)
 {
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	enp->en_elop = NULL;
 	enp->en_mod_flags &= ~EFX_MOD_LIC;
 }
 
 
 	__checkReturn	efx_rc_t
 efx_lic_update_licenses(
 	__in		efx_nic_t *enp)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if ((rc = elop->elo_update_licenses(enp)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 efx_lic_get_key_stats(
 	__in		efx_nic_t *enp,
 	__out		efx_key_stats_t *eksp)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if ((rc = elop->elo_get_key_stats(enp, eksp)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 efx_lic_app_state(
 	__in		efx_nic_t *enp,
 	__in		uint64_t app_id,
 	__out		boolean_t *licensedp)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if (elop->elo_app_state == NULL)
 		return (ENOTSUP);
 
 	if ((rc = elop->elo_app_state(enp, app_id, licensedp)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 efx_lic_get_id(
 	__in		efx_nic_t *enp,
 	__in		size_t buffer_size,
 	__out		uint32_t *typep,
 	__out		size_t *lengthp,
 	__out_opt	uint8_t *bufferp
 	)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if (elop->elo_get_id == NULL)
 		return (ENOTSUP);
 
 	if ((rc = elop->elo_get_id(enp, buffer_size, typep,
 				    lengthp, bufferp)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /* Buffer management API - abstracts varying TLV format used for License partition */
 
 	__checkReturn		efx_rc_t
 efx_lic_find_start(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__out			uint32_t *startp
 	)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if ((rc = elop->elo_find_start(enp, bufferp, buffer_size, startp)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_find_end(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *endp
 	)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if ((rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 efx_lic_find_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__out			uint32_t *startp,
 	__out			uint32_t *lengthp
 	)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	EFSYS_ASSERT(bufferp);
 	EFSYS_ASSERT(startp);
 	EFSYS_ASSERT(lengthp);
 
 	return (elop->elo_find_key(enp, bufferp, buffer_size, offset,
 				    startp, lengthp));
 }
 
 
 /* Validate that the buffer contains a single key in a recognised format.
 ** An empty or terminator buffer is not accepted as a valid key.
 */
 	__checkReturn	__success(return != B_FALSE)	boolean_t
 efx_lic_validate_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length
 	)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	boolean_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if ((rc = elop->elo_validate_key(enp, keyp, length)) == B_FALSE)
 		goto fail1;
 
 	return (B_TRUE);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_read_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__out_bcount_part(key_max_size, *lengthp)
 				caddr_t keyp,
 	__in			size_t key_max_size,
 	__out			uint32_t *lengthp
 	)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if ((rc = elop->elo_read_key(enp, bufferp, buffer_size, offset,
 				    length, keyp, key_max_size, lengthp)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_write_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in_bcount(length)	caddr_t keyp,
 	__in			uint32_t length,
 	__out			uint32_t *lengthp
 	)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if ((rc = elop->elo_write_key(enp, bufferp, buffer_size, offset,
 				    keyp, length, lengthp)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_delete_key(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size,
 	__in			uint32_t offset,
 	__in			uint32_t length,
 	__in			uint32_t end,
 	__out			uint32_t *deltap
 	)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if ((rc = elop->elo_delete_key(enp, bufferp, buffer_size, offset,
 				    length, end, deltap)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_lic_create_partition(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size
 	)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if ((rc = elop->elo_create_partition(enp, bufferp, buffer_size)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 
 	__checkReturn		efx_rc_t
 efx_lic_finish_partition(
 	__in			efx_nic_t *enp,
 	__in_bcount(buffer_size)
 				caddr_t bufferp,
 	__in			size_t buffer_size
 	)
 {
 	const efx_lic_ops_t *elop = enp->en_elop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
 
 	if ((rc = elop->elo_finish_partition(enp, bufferp, buffer_size)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #endif	/* EFSYS_OPT_LICENSING */
Index: stable/12/sys/dev/sfxge/common/efx_mcdi.c
===================================================================
--- stable/12/sys/dev/sfxge/common/efx_mcdi.c	(revision 342323)
+++ stable/12/sys/dev/sfxge/common/efx_mcdi.c	(revision 342324)
@@ -1,2278 +1,2280 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
  * Copyright (c) 2008-2016 Solarflare Communications Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * 1. Redistributions of source code must retain the above copyright notice,
  *    this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright notice,
  *    this list of conditions and the following disclaimer in the documentation
  *    and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * The views and conclusions contained in the software and documentation are
  * those of the authors and should not be interpreted as representing official
  * policies, either expressed or implied, of the FreeBSD Project.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include "efx.h"
 #include "efx_impl.h"
 
 #if EFSYS_OPT_MCDI
 
 /*
  * There are three versions of the MCDI interface:
  *  - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers.
  *  - MCDIv1: Siena firmware and Huntington BootROM.
  *  - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM.
  *            Transport uses MCDIv2 headers.
  *
  * MCDIv2 Header NOT_EPOCH flag
  * ----------------------------
  * A new epoch begins at initial startup or after an MC reboot, and defines when
  * the MC should reject stale MCDI requests.
  *
  * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all
  * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1.
  *
  * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a
  * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0.
  */
 
 
 
 #if EFSYS_OPT_SIENA
 
 static const efx_mcdi_ops_t	__efx_mcdi_siena_ops = {
 	siena_mcdi_init,		/* emco_init */
 	siena_mcdi_send_request,	/* emco_send_request */
 	siena_mcdi_poll_reboot,		/* emco_poll_reboot */
 	siena_mcdi_poll_response,	/* emco_poll_response */
 	siena_mcdi_read_response,	/* emco_read_response */
 	siena_mcdi_fini,		/* emco_fini */
 	siena_mcdi_feature_supported,	/* emco_feature_supported */
 	siena_mcdi_get_timeout,		/* emco_get_timeout */
 };
 
 #endif	/* EFSYS_OPT_SIENA */
 
 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
 
 static const efx_mcdi_ops_t	__efx_mcdi_ef10_ops = {
 	ef10_mcdi_init,			/* emco_init */
 	ef10_mcdi_send_request,		/* emco_send_request */
 	ef10_mcdi_poll_reboot,		/* emco_poll_reboot */
 	ef10_mcdi_poll_response,	/* emco_poll_response */
 	ef10_mcdi_read_response,	/* emco_read_response */
 	ef10_mcdi_fini,			/* emco_fini */
 	ef10_mcdi_feature_supported,	/* emco_feature_supported */
 	ef10_mcdi_get_timeout,		/* emco_get_timeout */
 };
 
 #endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
 
 
 
 	__checkReturn	efx_rc_t
 efx_mcdi_init(
 	__in		efx_nic_t *enp,
 	__in		const efx_mcdi_transport_t *emtp)
 {
 	const efx_mcdi_ops_t *emcop;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
 
 	switch (enp->en_family) {
 #if EFSYS_OPT_SIENA
 	case EFX_FAMILY_SIENA:
 		emcop = &__efx_mcdi_siena_ops;
 		break;
 #endif	/* EFSYS_OPT_SIENA */
 
 #if EFSYS_OPT_HUNTINGTON
 	case EFX_FAMILY_HUNTINGTON:
 		emcop = &__efx_mcdi_ef10_ops;
 		break;
 #endif	/* EFSYS_OPT_HUNTINGTON */
 
 #if EFSYS_OPT_MEDFORD
 	case EFX_FAMILY_MEDFORD:
 		emcop = &__efx_mcdi_ef10_ops;
 		break;
 #endif	/* EFSYS_OPT_MEDFORD */
 
 	default:
 		EFSYS_ASSERT(0);
 		rc = ENOTSUP;
 		goto fail1;
 	}
 
 	if (enp->en_features & EFX_FEATURE_MCDI_DMA) {
 		/* MCDI requires a DMA buffer in host memory */
 		if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) {
 			rc = EINVAL;
 			goto fail2;
 		}
 	}
 	enp->en_mcdi.em_emtp = emtp;
 
 	if (emcop != NULL && emcop->emco_init != NULL) {
 		if ((rc = emcop->emco_init(enp, emtp)) != 0)
 			goto fail3;
 	}
 
 	enp->en_mcdi.em_emcop = emcop;
 	enp->en_mod_flags |= EFX_MOD_MCDI;
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	enp->en_mcdi.em_emcop = NULL;
 	enp->en_mcdi.em_emtp = NULL;
 	enp->en_mod_flags &= ~EFX_MOD_MCDI;
 
 	return (rc);
 }
 
 			void
 efx_mcdi_fini(
 	__in		efx_nic_t *enp)
 {
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
 
 	if (emcop != NULL && emcop->emco_fini != NULL)
 		emcop->emco_fini(enp);
 
 	emip->emi_port = 0;
 	emip->emi_aborted = 0;
 
 	enp->en_mcdi.em_emcop = NULL;
 	enp->en_mod_flags &= ~EFX_MOD_MCDI;
 }
 
 			void
 efx_mcdi_new_epoch(
 	__in		efx_nic_t *enp)
 {
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	efsys_lock_state_t state;
 
 	/* Start a new epoch (allow fresh MCDI requests to succeed) */
 	EFSYS_LOCK(enp->en_eslp, state);
 	emip->emi_new_epoch = B_TRUE;
 	EFSYS_UNLOCK(enp->en_eslp, state);
 }
 
 static			void
 efx_mcdi_send_request(
 	__in		efx_nic_t *enp,
 	__in		void *hdrp,
 	__in		size_t hdr_len,
 	__in		void *sdup,
 	__in		size_t sdu_len)
 {
 	const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
 
 	emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len);
 }
 
 static			efx_rc_t
 efx_mcdi_poll_reboot(
 	__in		efx_nic_t *enp)
 {
 	const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
 	efx_rc_t rc;
 
 	rc = emcop->emco_poll_reboot(enp);
 	return (rc);
 }
 
 static			boolean_t
 efx_mcdi_poll_response(
 	__in		efx_nic_t *enp)
 {
 	const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
 	boolean_t available;
 
 	available = emcop->emco_poll_response(enp);
 	return (available);
 }
 
 static			void
 efx_mcdi_read_response(
 	__in		efx_nic_t *enp,
 	__out		void *bufferp,
 	__in		size_t offset,
 	__in		size_t length)
 {
 	const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
 
 	emcop->emco_read_response(enp, bufferp, offset, length);
 }
 
 			void
 efx_mcdi_request_start(
 	__in		efx_nic_t *enp,
 	__in		efx_mcdi_req_t *emrp,
 	__in		boolean_t ev_cpl)
 {
 #if EFSYS_OPT_MCDI_LOGGING
 	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
 #endif
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	efx_dword_t hdr[2];
 	size_t hdr_len;
 	unsigned int max_version;
 	unsigned int seq;
 	unsigned int xflags;
 	boolean_t new_epoch;
 	efsys_lock_state_t state;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
 	EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
 
 	/*
 	 * efx_mcdi_request_start() is naturally serialised against both
 	 * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(),
 	 * by virtue of there only being one outstanding MCDI request.
 	 * Unfortunately, upper layers may also call efx_mcdi_request_abort()
 	 * at any time, to timeout a pending mcdi request, That request may
 	 * then subsequently complete, meaning efx_mcdi_ev_cpl() or
 	 * efx_mcdi_ev_death() may end up running in parallel with
 	 * efx_mcdi_request_start(). This race is handled by ensuring that
 	 * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the
 	 * en_eslp lock.
 	 */
 	EFSYS_LOCK(enp->en_eslp, state);
 	EFSYS_ASSERT(emip->emi_pending_req == NULL);
 	emip->emi_pending_req = emrp;
 	emip->emi_ev_cpl = ev_cpl;
 	emip->emi_poll_cnt = 0;
 	seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ);
 	new_epoch = emip->emi_new_epoch;
 	max_version = emip->emi_max_version;
 	EFSYS_UNLOCK(enp->en_eslp, state);
 
 	xflags = 0;
 	if (ev_cpl)
 		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
 
 	/*
 	 * Huntington firmware supports MCDIv2, but the Huntington BootROM only
 	 * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where
 	 * possible to support this.
 	 */
 	if ((max_version >= 2) &&
 	    ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) ||
 	    (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1))) {
 		/* Construct MCDI v2 header */
 		hdr_len = sizeof (hdr);
 		EFX_POPULATE_DWORD_8(hdr[0],
 		    MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
 		    MCDI_HEADER_RESYNC, 1,
 		    MCDI_HEADER_DATALEN, 0,
 		    MCDI_HEADER_SEQ, seq,
 		    MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
 		    MCDI_HEADER_ERROR, 0,
 		    MCDI_HEADER_RESPONSE, 0,
 		    MCDI_HEADER_XFLAGS, xflags);
 
 		EFX_POPULATE_DWORD_2(hdr[1],
 		    MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd,
 		    MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length);
 	} else {
 		/* Construct MCDI v1 header */
 		hdr_len = sizeof (hdr[0]);
 		EFX_POPULATE_DWORD_8(hdr[0],
 		    MCDI_HEADER_CODE, emrp->emr_cmd,
 		    MCDI_HEADER_RESYNC, 1,
 		    MCDI_HEADER_DATALEN, emrp->emr_in_length,
 		    MCDI_HEADER_SEQ, seq,
 		    MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
 		    MCDI_HEADER_ERROR, 0,
 		    MCDI_HEADER_RESPONSE, 0,
 		    MCDI_HEADER_XFLAGS, xflags);
 	}
 
 #if EFSYS_OPT_MCDI_LOGGING
 	if (emtp->emt_logger != NULL) {
 		emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST,
 		    &hdr, hdr_len,
 		    emrp->emr_in_buf, emrp->emr_in_length);
 	}
 #endif /* EFSYS_OPT_MCDI_LOGGING */
 
 	efx_mcdi_send_request(enp, &hdr[0], hdr_len,
 	    emrp->emr_in_buf, emrp->emr_in_length);
 }
 
 
 static			void
 efx_mcdi_read_response_header(
 	__in		efx_nic_t *enp,
 	__inout		efx_mcdi_req_t *emrp)
 {
 #if EFSYS_OPT_MCDI_LOGGING
 	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
 #endif /* EFSYS_OPT_MCDI_LOGGING */
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	efx_dword_t hdr[2];
 	unsigned int hdr_len;
 	unsigned int data_len;
 	unsigned int seq;
 	unsigned int cmd;
 	unsigned int error;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT(emrp != NULL);
 
 	efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0]));
 	hdr_len = sizeof (hdr[0]);
 
 	cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE);
 	seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ);
 	error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR);
 
 	if (cmd != MC_CMD_V2_EXTN) {
 		data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN);
 	} else {
 		efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
 		hdr_len += sizeof (hdr[1]);
 
 		cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD);
 		data_len =
 		    EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
 	}
 
 	if (error && (data_len == 0)) {
 		/* The MC has rebooted since the request was sent. */
 		EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
 		efx_mcdi_poll_reboot(enp);
 		rc = EIO;
 		goto fail1;
 	}
 	if ((cmd != emrp->emr_cmd) ||
 	    (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
 		/* Response is for a different request */
 		rc = EIO;
 		goto fail2;
 	}
 	if (error) {
 		efx_dword_t err[2];
 		unsigned int err_len = MIN(data_len, sizeof (err));
 		int err_code = MC_CMD_ERR_EPROTO;
 		int err_arg = 0;
 
 		/* Read error code (and arg num for MCDI v2 commands) */
 		efx_mcdi_read_response(enp, &err, hdr_len, err_len);
 
 		if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t)))
 			err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0);
 #ifdef WITH_MCDI_V2
 		if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t)))
 			err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0);
 #endif
 		emrp->emr_err_code = err_code;
 		emrp->emr_err_arg = err_arg;
 
 #if EFSYS_OPT_MCDI_PROXY_AUTH
 		if ((err_code == MC_CMD_ERR_PROXY_PENDING) &&
 		    (err_len == sizeof (err))) {
 			/*
 			 * The MCDI request would normally fail with EPERM, but
 			 * firmware has forwarded it to an authorization agent
 			 * attached to a privileged PF.
 			 *
 			 * Save the authorization request handle. The client
 			 * must wait for a PROXY_RESPONSE event, or timeout.
 			 */
 			emrp->emr_proxy_handle = err_arg;
 		}
 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
 
 #if EFSYS_OPT_MCDI_LOGGING
 		if (emtp->emt_logger != NULL) {
 			emtp->emt_logger(emtp->emt_context,
 			    EFX_LOG_MCDI_RESPONSE,
 			    &hdr, hdr_len,
 			    &err, err_len);
 		}
 #endif /* EFSYS_OPT_MCDI_LOGGING */
 
 		if (!emrp->emr_quiet) {
 			EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd,
 			    int, err_code, int, err_arg);
 		}
 
 		rc = efx_mcdi_request_errcode(err_code);
 		goto fail3;
 	}
 
 	emrp->emr_rc = 0;
 	emrp->emr_out_length_used = data_len;
 #if EFSYS_OPT_MCDI_PROXY_AUTH
 	emrp->emr_proxy_handle = 0;
 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
 	return;
 
 fail3:
 fail2:
 fail1:
 	emrp->emr_rc = rc;
 	emrp->emr_out_length_used = 0;
 }
 
 static			void
 efx_mcdi_finish_response(
 	__in		efx_nic_t *enp,
 	__in		efx_mcdi_req_t *emrp)
 {
 #if EFSYS_OPT_MCDI_LOGGING
 	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
 #endif /* EFSYS_OPT_MCDI_LOGGING */
 	efx_dword_t hdr[2];
 	unsigned int hdr_len;
 	size_t bytes;
 
 	if (emrp->emr_out_buf == NULL)
 		return;
 
 	/* Read the command header to detect MCDI response format */
 	hdr_len = sizeof (hdr[0]);
 	efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len);
 	if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) {
 		/*
 		 * Read the actual payload length. The length given in the event
 		 * is only correct for responses with the V1 format.
 		 */
 		efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
 		hdr_len += sizeof (hdr[1]);
 
 		emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1],
 					    MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
 	}
 
 	/* Copy payload out into caller supplied buffer */
 	bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length);
 	efx_mcdi_read_response(enp, emrp->emr_out_buf, hdr_len, bytes);
 
 #if EFSYS_OPT_MCDI_LOGGING
 	if (emtp->emt_logger != NULL) {
 		emtp->emt_logger(emtp->emt_context,
 		    EFX_LOG_MCDI_RESPONSE,
 		    &hdr, hdr_len,
 		    emrp->emr_out_buf, bytes);
 	}
 #endif /* EFSYS_OPT_MCDI_LOGGING */
 }
 
 
 	__checkReturn	boolean_t
 efx_mcdi_request_poll(
 	__in		efx_nic_t *enp)
 {
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	efx_mcdi_req_t *emrp;
 	efsys_lock_state_t state;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
 	EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
 
 	/* Serialise against post-watchdog efx_mcdi_ev* */
 	EFSYS_LOCK(enp->en_eslp, state);
 
 	EFSYS_ASSERT(emip->emi_pending_req != NULL);
 	EFSYS_ASSERT(!emip->emi_ev_cpl);
 	emrp = emip->emi_pending_req;
 
 	/* Check for reboot atomically w.r.t efx_mcdi_request_start */
 	if (emip->emi_poll_cnt++ == 0) {
 		if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
 			emip->emi_pending_req = NULL;
 			EFSYS_UNLOCK(enp->en_eslp, state);
 
 			/* Reboot/Assertion */
 			if (rc == EIO || rc == EINTR)
 				efx_mcdi_raise_exception(enp, emrp, rc);
 
 			goto fail1;
 		}
 	}
 
 	/* Check if a response is available */
 	if (efx_mcdi_poll_response(enp) == B_FALSE) {
 		EFSYS_UNLOCK(enp->en_eslp, state);
 		return (B_FALSE);
 	}
 
 	/* Read the response header */
 	efx_mcdi_read_response_header(enp, emrp);
 
 	/* Request complete */
 	emip->emi_pending_req = NULL;
 
 	/* Ensure stale MCDI requests fail after an MC reboot. */
 	emip->emi_new_epoch = B_FALSE;
 
 	EFSYS_UNLOCK(enp->en_eslp, state);
 
 	if ((rc = emrp->emr_rc) != 0)
 		goto fail2;
 
 	efx_mcdi_finish_response(enp, emrp);
 	return (B_TRUE);
 
 fail2:
 	if (!emrp->emr_quiet)
 		EFSYS_PROBE(fail2);
 fail1:
 	if (!emrp->emr_quiet)
 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (B_TRUE);
 }
 
 	__checkReturn	boolean_t
 efx_mcdi_request_abort(
 	__in		efx_nic_t *enp)
 {
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	efx_mcdi_req_t *emrp;
 	boolean_t aborted;
 	efsys_lock_state_t state;
 
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
 	EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
 
 	/*
 	 * efx_mcdi_ev_* may have already completed this event, and be
 	 * spinning/blocked on the upper layer lock. So it *is* legitimate
 	 * to for emi_pending_req to be NULL. If there is a pending event
 	 * completed request, then provide a "credit" to allow
 	 * efx_mcdi_ev_cpl() to accept a single spurious completion.
 	 */
 	EFSYS_LOCK(enp->en_eslp, state);
 	emrp = emip->emi_pending_req;
 	aborted = (emrp != NULL);
 	if (aborted) {
 		emip->emi_pending_req = NULL;
 
 		/* Error the request */
 		emrp->emr_out_length_used = 0;
 		emrp->emr_rc = ETIMEDOUT;
 
 		/* Provide a credit for seqno/emr_pending_req mismatches */
 		if (emip->emi_ev_cpl)
 			++emip->emi_aborted;
 
 		/*
 		 * The upper layer has called us, so we don't
 		 * need to complete the request.
 		 */
 	}
 	EFSYS_UNLOCK(enp->en_eslp, state);
 
 	return (aborted);
 }
 
 			void
 efx_mcdi_get_timeout(
 	__in		efx_nic_t *enp,
 	__in		efx_mcdi_req_t *emrp,
 	__out		uint32_t *timeoutp)
 {
 	const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
 
 	emcop->emco_get_timeout(enp, emrp, timeoutp);
 }
 
 	__checkReturn	efx_rc_t
 efx_mcdi_request_errcode(
 	__in		unsigned int err)
 {
 
 	switch (err) {
 		/* MCDI v1 */
 	case MC_CMD_ERR_EPERM:
 		return (EACCES);
 	case MC_CMD_ERR_ENOENT:
 		return (ENOENT);
 	case MC_CMD_ERR_EINTR:
 		return (EINTR);
 	case MC_CMD_ERR_EACCES:
 		return (EACCES);
 	case MC_CMD_ERR_EBUSY:
 		return (EBUSY);
 	case MC_CMD_ERR_EINVAL:
 		return (EINVAL);
 	case MC_CMD_ERR_EDEADLK:
 		return (EDEADLK);
 	case MC_CMD_ERR_ENOSYS:
 		return (ENOTSUP);
 	case MC_CMD_ERR_ETIME:
 		return (ETIMEDOUT);
 	case MC_CMD_ERR_ENOTSUP:
 		return (ENOTSUP);
 	case MC_CMD_ERR_EALREADY:
 		return (EALREADY);
 
 		/* MCDI v2 */
 	case MC_CMD_ERR_EEXIST:
 		return (EEXIST);
 #ifdef MC_CMD_ERR_EAGAIN
 	case MC_CMD_ERR_EAGAIN:
 		return (EAGAIN);
 #endif
 #ifdef MC_CMD_ERR_ENOSPC
 	case MC_CMD_ERR_ENOSPC:
 		return (ENOSPC);
 #endif
 	case MC_CMD_ERR_ERANGE:
 		return (ERANGE);
 
 	case MC_CMD_ERR_ALLOC_FAIL:
 		return (ENOMEM);
 	case MC_CMD_ERR_NO_VADAPTOR:
 		return (ENOENT);
 	case MC_CMD_ERR_NO_EVB_PORT:
 		return (ENOENT);
 	case MC_CMD_ERR_NO_VSWITCH:
 		return (ENODEV);
 	case MC_CMD_ERR_VLAN_LIMIT:
 		return (EINVAL);
 	case MC_CMD_ERR_BAD_PCI_FUNC:
 		return (ENODEV);
 	case MC_CMD_ERR_BAD_VLAN_MODE:
 		return (EINVAL);
 	case MC_CMD_ERR_BAD_VSWITCH_TYPE:
 		return (EINVAL);
 	case MC_CMD_ERR_BAD_VPORT_TYPE:
 		return (EINVAL);
 	case MC_CMD_ERR_MAC_EXIST:
 		return (EEXIST);
 
 	case MC_CMD_ERR_PROXY_PENDING:
 		return (EAGAIN);
 
 	default:
 		EFSYS_PROBE1(mc_pcol_error, int, err);
 		return (EIO);
 	}
 }
 
 			void
 efx_mcdi_raise_exception(
 	__in		efx_nic_t *enp,
 	__in_opt	efx_mcdi_req_t *emrp,
 	__in		int rc)
 {
 	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
 	efx_mcdi_exception_t exception;
 
 	/* Reboot or Assertion failure only */
 	EFSYS_ASSERT(rc == EIO || rc == EINTR);
 
 	/*
 	 * If MC_CMD_REBOOT causes a reboot (dependent on parameters),
 	 * then the EIO is not worthy of an exception.
 	 */
 	if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO)
 		return;
 
 	exception = (rc == EIO)
 		? EFX_MCDI_EXCEPTION_MC_REBOOT
 		: EFX_MCDI_EXCEPTION_MC_BADASSERT;
 
 	emtp->emt_exception(emtp->emt_context, exception);
 }
 
 			void
 efx_mcdi_execute(
 	__in		efx_nic_t *enp,
 	__inout		efx_mcdi_req_t *emrp)
 {
 	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
 
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
 	EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
 
 	emrp->emr_quiet = B_FALSE;
 	emtp->emt_execute(emtp->emt_context, emrp);
 }
 
 			void
 efx_mcdi_execute_quiet(
 	__in		efx_nic_t *enp,
 	__inout		efx_mcdi_req_t *emrp)
 {
 	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
 
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
 	EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
 
 	emrp->emr_quiet = B_TRUE;
 	emtp->emt_execute(emtp->emt_context, emrp);
 }
 
 			void
 efx_mcdi_ev_cpl(
 	__in		efx_nic_t *enp,
 	__in		unsigned int seq,
 	__in		unsigned int outlen,
 	__in		int errcode)
 {
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
 	efx_mcdi_req_t *emrp;
 	efsys_lock_state_t state;
 
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
 	EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
 
 	/*
 	 * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start()
 	 * when we're completing an aborted request.
 	 */
 	EFSYS_LOCK(enp->en_eslp, state);
 	if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl ||
 	    (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
 		EFSYS_ASSERT(emip->emi_aborted > 0);
 		if (emip->emi_aborted > 0)
 			--emip->emi_aborted;
 		EFSYS_UNLOCK(enp->en_eslp, state);
 		return;
 	}
 
 	emrp = emip->emi_pending_req;
 	emip->emi_pending_req = NULL;
 	EFSYS_UNLOCK(enp->en_eslp, state);
 
 	if (emip->emi_max_version >= 2) {
 		/* MCDIv2 response details do not fit into an event. */
 		efx_mcdi_read_response_header(enp, emrp);
 	} else {
 		if (errcode != 0) {
 			if (!emrp->emr_quiet) {
 				EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd,
 				    int, errcode);
 			}
 			emrp->emr_out_length_used = 0;
 			emrp->emr_rc = efx_mcdi_request_errcode(errcode);
 		} else {
 			emrp->emr_out_length_used = outlen;
 			emrp->emr_rc = 0;
 		}
 	}
 	if (errcode == 0) {
 		efx_mcdi_finish_response(enp, emrp);
 	}
 
 	emtp->emt_ev_cpl(emtp->emt_context);
 }
 
 #if EFSYS_OPT_MCDI_PROXY_AUTH
 
 	__checkReturn	efx_rc_t
 efx_mcdi_get_proxy_handle(
 	__in		efx_nic_t *enp,
 	__in		efx_mcdi_req_t *emrp,
 	__out		uint32_t *handlep)
 {
 	efx_rc_t rc;
 
+	_NOTE(ARGUNUSED(enp))
+
 	/*
 	 * Return proxy handle from MCDI request that returned with error
 	 * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching
 	 * PROXY_RESPONSE event.
 	 */
 	if ((emrp == NULL) || (handlep == NULL)) {
 		rc = EINVAL;
 		goto fail1;
 	}
 	if ((emrp->emr_rc != 0) &&
 	    (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) {
 		*handlep = emrp->emr_proxy_handle;
 		rc = 0;
 	} else {
 		*handlep = 0;
 		rc = ENOENT;
 	}
 	return (rc);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 	return (rc);
 }
 
 			void
 efx_mcdi_ev_proxy_response(
 	__in		efx_nic_t *enp,
 	__in		unsigned int handle,
 	__in		unsigned int status)
 {
 	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
 	efx_rc_t rc;
 
 	/*
 	 * Handle results of an authorization request for a privileged MCDI
 	 * command. If authorization was granted then we must re-issue the
 	 * original MCDI request. If authorization failed or timed out,
 	 * then the original MCDI request should be completed with the
 	 * result code from this event.
 	 */
 	rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status);
 
 	emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc);
 }
 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
 
 			void
 efx_mcdi_ev_death(
 	__in		efx_nic_t *enp,
 	__in		int rc)
 {
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
 	efx_mcdi_req_t *emrp = NULL;
 	boolean_t ev_cpl;
 	efsys_lock_state_t state;
 
 	/*
 	 * The MCDI request (if there is one) has been terminated, either
 	 * by a BADASSERT or REBOOT event.
 	 *
 	 * If there is an outstanding event-completed MCDI operation, then we
 	 * will never receive the completion event (because both MCDI
 	 * completions and BADASSERT events are sent to the same evq). So
 	 * complete this MCDI op.
 	 *
 	 * This function might run in parallel with efx_mcdi_request_poll()
 	 * for poll completed mcdi requests, and also with
 	 * efx_mcdi_request_start() for post-watchdog completions.
 	 */
 	EFSYS_LOCK(enp->en_eslp, state);
 	emrp = emip->emi_pending_req;
 	ev_cpl = emip->emi_ev_cpl;
 	if (emrp != NULL && emip->emi_ev_cpl) {
 		emip->emi_pending_req = NULL;
 
 		emrp->emr_out_length_used = 0;
 		emrp->emr_rc = rc;
 		++emip->emi_aborted;
 	}
 
 	/*
 	 * Since we're running in parallel with a request, consume the
 	 * status word before dropping the lock.
 	 */
 	if (rc == EIO || rc == EINTR) {
 		EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
 		(void) efx_mcdi_poll_reboot(enp);
 		emip->emi_new_epoch = B_TRUE;
 	}
 
 	EFSYS_UNLOCK(enp->en_eslp, state);
 
 	efx_mcdi_raise_exception(enp, emrp, rc);
 
 	if (emrp != NULL && ev_cpl)
 		emtp->emt_ev_cpl(emtp->emt_context);
 }
 
 	__checkReturn		efx_rc_t
 efx_mcdi_version(
 	__in			efx_nic_t *enp,
 	__out_ecount_opt(4)	uint16_t versionp[4],
 	__out_opt		uint32_t *buildp,
 	__out_opt		efx_mcdi_boot_t *statusp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MAX(MC_CMD_GET_VERSION_IN_LEN,
 				MC_CMD_GET_VERSION_OUT_LEN),
 			    MAX(MC_CMD_GET_BOOT_STATUS_IN_LEN,
 				MC_CMD_GET_BOOT_STATUS_OUT_LEN))];
 	efx_word_t *ver_words;
 	uint16_t version[4];
 	uint32_t build;
 	efx_mcdi_boot_t status;
 	efx_rc_t rc;
 
 	EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_GET_VERSION;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN;
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	/* bootrom support */
 	if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) {
 		version[0] = version[1] = version[2] = version[3] = 0;
 		build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
 
 		goto version;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION);
 	version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0);
 	version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0);
 	version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0);
 	version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0);
 	build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
 
 version:
 	/* The bootrom doesn't understand BOOT_STATUS */
 	if (MC_FW_VERSION_IS_BOOTLOADER(build)) {
 		status = EFX_MCDI_BOOT_ROM;
 		goto out;
 	}
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN;
 
 	efx_mcdi_execute_quiet(enp, &req);
 
 	if (req.emr_rc == EACCES) {
 		/* Unprivileged functions cannot access BOOT_STATUS */
 		status = EFX_MCDI_BOOT_PRIMARY;
 		version[0] = version[1] = version[2] = version[3] = 0;
 		build = 0;
 		goto out;
 	}
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail3;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail4;
 	}
 
 	if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS,
 	    GET_BOOT_STATUS_OUT_FLAGS_PRIMARY))
 		status = EFX_MCDI_BOOT_PRIMARY;
 	else
 		status = EFX_MCDI_BOOT_SECONDARY;
 
 out:
 	if (versionp != NULL)
 		memcpy(versionp, version, sizeof (version));
 	if (buildp != NULL)
 		*buildp = build;
 	if (statusp != NULL)
 		*statusp = status;
 
 	return (0);
 
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_do_reboot(
 	__in		efx_nic_t *enp,
 	__in		boolean_t after_assertion)
 {
 	uint8_t payload[MAX(MC_CMD_REBOOT_IN_LEN, MC_CMD_REBOOT_OUT_LEN)];
 	efx_mcdi_req_t req;
 	efx_rc_t rc;
 
 	/*
 	 * We could require the caller to have caused en_mod_flags=0 to
 	 * call this function. This doesn't help the other port though,
 	 * who's about to get the MC ripped out from underneath them.
 	 * Since they have to cope with the subsequent fallout of MCDI
 	 * failures, we should as well.
 	 */
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_REBOOT;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_REBOOT_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS,
 	    (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0));
 
 	efx_mcdi_execute_quiet(enp, &req);
 
 	if (req.emr_rc == EACCES) {
 		/* Unprivileged functions cannot reboot the MC. */
 		goto out;
 	}
 
 	/* A successful reboot request returns EIO. */
 	if (req.emr_rc != 0 && req.emr_rc != EIO) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 out:
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 efx_mcdi_reboot(
 	__in		efx_nic_t *enp)
 {
 	return (efx_mcdi_do_reboot(enp, B_FALSE));
 }
 
 	__checkReturn	efx_rc_t
 efx_mcdi_exit_assertion_handler(
 	__in		efx_nic_t *enp)
 {
 	return (efx_mcdi_do_reboot(enp, B_TRUE));
 }
 
 	__checkReturn	efx_rc_t
 efx_mcdi_read_assertion(
 	__in		efx_nic_t *enp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN,
 			    MC_CMD_GET_ASSERTS_OUT_LEN)];
 	const char *reason;
 	unsigned int flags;
 	unsigned int index;
 	unsigned int ofst;
 	int retry;
 	efx_rc_t rc;
 
 	/*
 	 * Before we attempt to chat to the MC, we should verify that the MC
 	 * isn't in its assertion handler, either due to a previous reboot,
 	 * or because we're reinitializing due to an eec_exception().
 	 *
 	 * Use GET_ASSERTS to read any assertion state that may be present.
 	 * Retry this command twice. Once because a boot-time assertion failure
 	 * might cause the 1st MCDI request to fail. And once again because
 	 * we might race with efx_mcdi_exit_assertion_handler() running on
 	 * partner port(s) on the same NIC.
 	 */
 	retry = 2;
 	do {
 		(void) memset(payload, 0, sizeof (payload));
 		req.emr_cmd = MC_CMD_GET_ASSERTS;
 		req.emr_in_buf = payload;
 		req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN;
 		req.emr_out_buf = payload;
 		req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN;
 
 		MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1);
 		efx_mcdi_execute_quiet(enp, &req);
 
 	} while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0);
 
 	if (req.emr_rc != 0) {
 		if (req.emr_rc == EACCES) {
 			/* Unprivileged functions cannot clear assertions. */
 			goto out;
 		}
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	/* Print out any assertion state recorded */
 	flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS);
 	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
 		return (0);
 
 	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
 		? "system-level assertion"
 		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
 		? "thread-level assertion"
 		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
 		? "watchdog reset"
 		: (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP)
 		? "illegal address trap"
 		: "unknown assertion";
 	EFSYS_PROBE3(mcpu_assertion,
 	    const char *, reason, unsigned int,
 	    MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS),
 	    unsigned int,
 	    MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS));
 
 	/* Print out the registers (r1 ... r31) */
 	ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
 	for (index = 1;
 		index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
 		index++) {
 		EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int,
 			    EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst),
 					    EFX_DWORD_0));
 		ofst += sizeof (efx_dword_t);
 	}
 	EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN);
 
 out:
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 
 /*
  * Internal routines for for specific MCDI requests.
  */
 
 	__checkReturn	efx_rc_t
 efx_mcdi_drv_attach(
 	__in		efx_nic_t *enp,
 	__in		boolean_t attach)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_DRV_ATTACH_IN_LEN,
 			    MC_CMD_DRV_ATTACH_EXT_OUT_LEN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_DRV_ATTACH;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN;
 
 	/*
 	 * Use DONT_CARE for the datapath firmware type to ensure that the
 	 * driver can attach to an unprivileged function. The datapath firmware
 	 * type to use is controlled by the 'sfboot' utility.
 	 */
 	MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0);
 	MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
 	MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_mcdi_get_board_cfg(
 	__in			efx_nic_t *enp,
 	__out_opt		uint32_t *board_typep,
 	__out_opt		efx_dword_t *capabilitiesp,
 	__out_ecount_opt(6)	uint8_t mac_addrp[6])
 {
 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
 			    MC_CMD_GET_BOARD_CFG_OUT_LENMIN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_GET_BOARD_CFG;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN;
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	if (mac_addrp != NULL) {
 		uint8_t *addrp;
 
 		if (emip->emi_port == 1) {
 			addrp = MCDI_OUT2(req, uint8_t,
 			    GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0);
 		} else if (emip->emi_port == 2) {
 			addrp = MCDI_OUT2(req, uint8_t,
 			    GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1);
 		} else {
 			rc = EINVAL;
 			goto fail3;
 		}
 
 		EFX_MAC_ADDR_COPY(mac_addrp, addrp);
 	}
 
 	if (capabilitiesp != NULL) {
 		if (emip->emi_port == 1) {
 			*capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
 			    GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
 		} else if (emip->emi_port == 2) {
 			*capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
 			    GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
 		} else {
 			rc = EINVAL;
 			goto fail4;
 		}
 	}
 
 	if (board_typep != NULL) {
 		*board_typep = MCDI_OUT_DWORD(req,
 		    GET_BOARD_CFG_OUT_BOARD_TYPE);
 	}
 
 	return (0);
 
 fail4:
 	EFSYS_PROBE(fail4);
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 efx_mcdi_get_resource_limits(
 	__in		efx_nic_t *enp,
 	__out_opt	uint32_t *nevqp,
 	__out_opt	uint32_t *nrxqp,
 	__out_opt	uint32_t *ntxqp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
 			    MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN;
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	if (nevqp != NULL)
 		*nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ);
 	if (nrxqp != NULL)
 		*nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ);
 	if (ntxqp != NULL)
 		*ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ);
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 efx_mcdi_get_phy_cfg(
 	__in		efx_nic_t *enp)
 {
 	efx_port_t *epp = &(enp->en_port);
 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN,
 			    MC_CMD_GET_PHY_CFG_OUT_LEN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_GET_PHY_CFG;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN;
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
 #if EFSYS_OPT_NAMES
 	(void) strncpy(encp->enc_phy_name,
 		MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME),
 		MIN(sizeof (encp->enc_phy_name) - 1,
 		    MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
 #endif	/* EFSYS_OPT_NAMES */
 	(void) memset(encp->enc_phy_revision, 0,
 	    sizeof (encp->enc_phy_revision));
 	memcpy(encp->enc_phy_revision,
 		MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION),
 		MIN(sizeof (encp->enc_phy_revision) - 1,
 		    MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN));
 #if EFSYS_OPT_PHY_LED_CONTROL
 	encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) |
 			    (1 << EFX_PHY_LED_OFF) |
 			    (1 << EFX_PHY_LED_ON));
 #endif	/* EFSYS_OPT_PHY_LED_CONTROL */
 
 	/* Get the media type of the fixed port, if recognised. */
 	EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
 	EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
 	EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4);
 	EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP);
 	EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
 	EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
 	EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS);
 	epp->ep_fixed_port_type =
 		MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
 	if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
 		epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
 
 	epp->ep_phy_cap_mask =
 		MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP);
 #if EFSYS_OPT_PHY_FLAGS
 	encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS);
 #endif	/* EFSYS_OPT_PHY_FLAGS */
 
 	encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT);
 
 	/* Populate internal state */
 	encp->enc_mcdi_mdio_channel =
 		(uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL);
 
 #if EFSYS_OPT_PHY_STATS
 	encp->enc_mcdi_phy_stat_mask =
 		MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK);
 #endif	/* EFSYS_OPT_PHY_STATS */
 
 #if EFSYS_OPT_BIST
 	encp->enc_bist_mask = 0;
 	if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
 	    GET_PHY_CFG_OUT_BIST_CABLE_SHORT))
 		encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT);
 	if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
 	    GET_PHY_CFG_OUT_BIST_CABLE_LONG))
 		encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG);
 	if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
 	    GET_PHY_CFG_OUT_BIST))
 		encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL);
 #endif  /* EFSYS_OPT_BIST */
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_mcdi_firmware_update_supported(
 	__in			efx_nic_t *enp,
 	__out			boolean_t *supportedp)
 {
 	const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
 	efx_rc_t rc;
 
 	if (emcop != NULL) {
 		if ((rc = emcop->emco_feature_supported(enp,
 			    EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0)
 			goto fail1;
 	} else {
 		/* Earlier devices always supported updates */
 		*supportedp = B_TRUE;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_mcdi_macaddr_change_supported(
 	__in			efx_nic_t *enp,
 	__out			boolean_t *supportedp)
 {
 	const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
 	efx_rc_t rc;
 
 	if (emcop != NULL) {
 		if ((rc = emcop->emco_feature_supported(enp,
 			    EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0)
 			goto fail1;
 	} else {
 		/* Earlier devices always supported MAC changes */
 		*supportedp = B_TRUE;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_mcdi_link_control_supported(
 	__in			efx_nic_t *enp,
 	__out			boolean_t *supportedp)
 {
 	const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
 	efx_rc_t rc;
 
 	if (emcop != NULL) {
 		if ((rc = emcop->emco_feature_supported(enp,
 			    EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0)
 			goto fail1;
 	} else {
 		/* Earlier devices always supported link control */
 		*supportedp = B_TRUE;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_mcdi_mac_spoofing_supported(
 	__in			efx_nic_t *enp,
 	__out			boolean_t *supportedp)
 {
 	const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
 	efx_rc_t rc;
 
 	if (emcop != NULL) {
 		if ((rc = emcop->emco_feature_supported(enp,
 			    EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0)
 			goto fail1;
 	} else {
 		/* Earlier devices always supported MAC spoofing */
 		*supportedp = B_TRUE;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #if EFSYS_OPT_BIST
 
 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
 /*
  * Enter bist offline mode. This is a fw mode which puts the NIC into a state
  * where memory BIST tests can be run and not much else can interfere or happen.
  * A reboot is required to exit this mode.
  */
 	__checkReturn		efx_rc_t
 efx_mcdi_bist_enable_offline(
 	__in			efx_nic_t *enp)
 {
 	efx_mcdi_req_t req;
 	efx_rc_t rc;
 
 	EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0);
 	EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0);
 
 	req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST;
 	req.emr_in_buf = NULL;
 	req.emr_in_length = 0;
 	req.emr_out_buf = NULL;
 	req.emr_out_length = 0;
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
 
 	__checkReturn		efx_rc_t
 efx_mcdi_bist_start(
 	__in			efx_nic_t *enp,
 	__in			efx_bist_type_t type)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_START_BIST_IN_LEN,
 			    MC_CMD_START_BIST_OUT_LEN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_START_BIST;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_START_BIST_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_START_BIST_OUT_LEN;
 
 	switch (type) {
 	case EFX_BIST_TYPE_PHY_NORMAL:
 		MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST);
 		break;
 	case EFX_BIST_TYPE_PHY_CABLE_SHORT:
 		MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
 		    MC_CMD_PHY_BIST_CABLE_SHORT);
 		break;
 	case EFX_BIST_TYPE_PHY_CABLE_LONG:
 		MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
 		    MC_CMD_PHY_BIST_CABLE_LONG);
 		break;
 	case EFX_BIST_TYPE_MC_MEM:
 		MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
 		    MC_CMD_MC_MEM_BIST);
 		break;
 	case EFX_BIST_TYPE_SAT_MEM:
 		MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
 		    MC_CMD_PORT_MEM_BIST);
 		break;
 	case EFX_BIST_TYPE_REG:
 		MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
 		    MC_CMD_REG_BIST);
 		break;
 	default:
 		EFSYS_ASSERT(0);
 	}
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #endif /* EFSYS_OPT_BIST */
 
 
 /* Enable logging of some events (e.g. link state changes) */
 	__checkReturn	efx_rc_t
 efx_mcdi_log_ctrl(
 	__in		efx_nic_t *enp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_LOG_CTRL_IN_LEN,
 			    MC_CMD_LOG_CTRL_OUT_LEN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_LOG_CTRL;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST,
 		    MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ);
 	MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 
 #if EFSYS_OPT_MAC_STATS
 
 typedef enum efx_stats_action_e {
 	EFX_STATS_CLEAR,
 	EFX_STATS_UPLOAD,
 	EFX_STATS_ENABLE_NOEVENTS,
 	EFX_STATS_ENABLE_EVENTS,
 	EFX_STATS_DISABLE,
 } efx_stats_action_t;
 
 static	__checkReturn	efx_rc_t
 efx_mcdi_mac_stats(
 	__in		efx_nic_t *enp,
 	__in_opt	efsys_mem_t *esmp,
 	__in		efx_stats_action_t action,
 	__in		uint16_t period_ms)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
 			    MC_CMD_MAC_STATS_OUT_DMA_LEN)];
 	int clear = (action == EFX_STATS_CLEAR);
 	int upload = (action == EFX_STATS_UPLOAD);
 	int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
 	int events = (action == EFX_STATS_ENABLE_EVENTS);
 	int disable = (action == EFX_STATS_DISABLE);
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_MAC_STATS;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN;
 
 	MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
 	    MAC_STATS_IN_DMA, upload,
 	    MAC_STATS_IN_CLEAR, clear,
 	    MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable,
 	    MAC_STATS_IN_PERIODIC_ENABLE, enable | events,
 	    MAC_STATS_IN_PERIODIC_NOEVENT, !events,
 	    MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
 
 	if (esmp != NULL) {
 		int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
 
 		EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <=
 		    EFX_MAC_STATS_SIZE);
 
 		MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
 			    EFSYS_MEM_ADDR(esmp) & 0xffffffff);
 		MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
 			    EFSYS_MEM_ADDR(esmp) >> 32);
 		MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
 	} else {
 		EFSYS_ASSERT(!upload && !enable && !events);
 	}
 
 	/*
 	 * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats,
 	 *	 as this may fail (and leave periodic DMA enabled) if the
 	 *	 vadapter has already been deleted.
 	 */
 	MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID,
 	    (disable ? EVB_PORT_ID_NULL : enp->en_vport_id));
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		/* EF10: Expect ENOENT if no DMA queues are initialised */
 		if ((req.emr_rc != ENOENT) ||
 		    (enp->en_rx_qcount + enp->en_tx_qcount != 0)) {
 			rc = req.emr_rc;
 			goto fail1;
 		}
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 efx_mcdi_mac_stats_clear(
 	__in		efx_nic_t *enp)
 {
 	efx_rc_t rc;
 
 	if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR, 0)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 efx_mcdi_mac_stats_upload(
 	__in		efx_nic_t *enp,
 	__in		efsys_mem_t *esmp)
 {
 	efx_rc_t rc;
 
 	/*
 	 * The MC DMAs aggregate statistics for our convenience, so we can
 	 * avoid having to pull the statistics buffer into the cache to
 	 * maintain cumulative statistics.
 	 */
 	if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD, 0)) != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn	efx_rc_t
 efx_mcdi_mac_stats_periodic(
 	__in		efx_nic_t *enp,
 	__in		efsys_mem_t *esmp,
 	__in		uint16_t period_ms,
 	__in		boolean_t events)
 {
 	efx_rc_t rc;
 
 	/*
 	 * The MC DMAs aggregate statistics for our convenience, so we can
 	 * avoid having to pull the statistics buffer into the cache to
 	 * maintain cumulative statistics.
 	 * Huntington uses a fixed 1sec period.
 	 * Medford uses a fixed 1sec period before v6.2.1.1033 firmware.
 	 */
 	if (period_ms == 0)
 		rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE, 0);
 	else if (events)
 		rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS,
 		    period_ms);
 	else
 		rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS,
 		    period_ms);
 
 	if (rc != 0)
 		goto fail1;
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #endif	/* EFSYS_OPT_MAC_STATS */
 
 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
 
 /*
  * This function returns the pf and vf number of a function.  If it is a pf the
  * vf number is 0xffff.  The vf number is the index of the vf on that
  * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0),
  * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff).
  */
 	__checkReturn		efx_rc_t
 efx_mcdi_get_function_info(
 	__in			efx_nic_t *enp,
 	__out			uint32_t *pfp,
 	__out_opt		uint32_t *vfp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN,
 			    MC_CMD_GET_FUNCTION_INFO_OUT_LEN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN;
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	*pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF);
 	if (vfp != NULL)
 		*vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF);
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 	__checkReturn		efx_rc_t
 efx_mcdi_privilege_mask(
 	__in			efx_nic_t *enp,
 	__in			uint32_t pf,
 	__in			uint32_t vf,
 	__out			uint32_t *maskp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN,
 			    MC_CMD_PRIVILEGE_MASK_OUT_LEN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN;
 
 	MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION,
 	    PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
 	    PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	*maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK);
 
 	return (0);
 
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
 
 	__checkReturn		efx_rc_t
 efx_mcdi_set_workaround(
 	__in			efx_nic_t *enp,
 	__in			uint32_t type,
 	__in			boolean_t enabled,
 	__out_opt		uint32_t *flagsp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_WORKAROUND_IN_LEN,
 			    MC_CMD_WORKAROUND_EXT_OUT_LEN)];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_WORKAROUND;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN;
 
 	MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type);
 	MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0);
 
 	efx_mcdi_execute_quiet(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (flagsp != NULL) {
 		if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
 			*flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS);
 		else
 			*flagsp = 0;
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 
 	__checkReturn		efx_rc_t
 efx_mcdi_get_workarounds(
 	__in			efx_nic_t *enp,
 	__out_opt		uint32_t *implementedp,
 	__out_opt		uint32_t *enabledp)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MC_CMD_GET_WORKAROUNDS_OUT_LEN];
 	efx_rc_t rc;
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_GET_WORKAROUNDS;
 	req.emr_in_buf = NULL;
 	req.emr_in_length = 0;
 	req.emr_out_buf = payload;
 	req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN;
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (implementedp != NULL) {
 		*implementedp =
 		    MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED);
 	}
 
 	if (enabledp != NULL) {
 		*enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED);
 	}
 
 	return (0);
 
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /*
  * Size of media information page in accordance with SFF-8472 and SFF-8436.
  * It is used in MCDI interface as well.
  */
 #define	EFX_PHY_MEDIA_INFO_PAGE_SIZE		0x80
 
 static	__checkReturn		efx_rc_t
 efx_mcdi_get_phy_media_info(
 	__in			efx_nic_t *enp,
 	__in			uint32_t mcdi_page,
 	__in			uint8_t offset,
 	__in			uint8_t len,
 	__out_bcount(len)	uint8_t *data)
 {
 	efx_mcdi_req_t req;
 	uint8_t payload[MAX(MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
 			    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
 				EFX_PHY_MEDIA_INFO_PAGE_SIZE))];
 	efx_rc_t rc;
 
 	EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
 
 	(void) memset(payload, 0, sizeof (payload));
 	req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
 	req.emr_in_buf = payload;
 	req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
 	req.emr_out_buf = payload;
 	req.emr_out_length =
 	    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE);
 
 	MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page);
 
 	efx_mcdi_execute(enp, &req);
 
 	if (req.emr_rc != 0) {
 		rc = req.emr_rc;
 		goto fail1;
 	}
 
 	if (req.emr_out_length_used !=
 	    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) {
 		rc = EMSGSIZE;
 		goto fail2;
 	}
 
 	if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) !=
 	    EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
 		rc = EIO;
 		goto fail3;
 	}
 
 	memcpy(data,
 	    MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
 	    len);
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 /*
  * 2-wire device address of the base information in accordance with SFF-8472
  * Diagnostic Monitoring Interface for Optical Transceivers section
  * 4 Memory Organization.
  */
 #define	EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE	0xA0
 
 /*
  * 2-wire device address of the digital diagnostics monitoring interface
  * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical
  * Transceivers section 4 Memory Organization.
  */
 #define	EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM	0xA2
 
 /*
  * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436
  * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and
  * Operation.
  */
 #define	EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP	0xA0
 
 	__checkReturn		efx_rc_t
 efx_mcdi_phy_module_get_info(
 	__in			efx_nic_t *enp,
 	__in			uint8_t dev_addr,
 	__in			uint8_t offset,
 	__in			uint8_t len,
 	__out_bcount(len)	uint8_t *data)
 {
 	efx_port_t *epp = &(enp->en_port);
 	efx_rc_t rc;
 	uint32_t mcdi_lower_page;
 	uint32_t mcdi_upper_page;
 
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
 
 	/*
 	 * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages.
 	 * Offset plus length interface allows to access page 0 only.
 	 * I.e. non-zero upper pages are not accessible.
 	 * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6
 	 * QSFP+ Memory Map for details on how information is structured
 	 * and accessible.
 	 */
 	switch (epp->ep_fixed_port_type) {
 	case EFX_PHY_MEDIA_SFP_PLUS:
 		/*
 		 * In accordance with SFF-8472 Diagnostic Monitoring
 		 * Interface for Optical Transceivers section 4 Memory
 		 * Organization two 2-wire addresses are defined.
 		 */
 		switch (dev_addr) {
 		/* Base information */
 		case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE:
 			/*
 			 * MCDI page 0 should be used to access lower
 			 * page 0 (0x00 - 0x7f) at the device address 0xA0.
 			 */
 			mcdi_lower_page = 0;
 			/*
 			 * MCDI page 1 should be used to access  upper
 			 * page 0 (0x80 - 0xff) at the device address 0xA0.
 			 */
 			mcdi_upper_page = 1;
 			break;
 		/* Diagnostics */
 		case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM:
 			/*
 			 * MCDI page 2 should be used to access lower
 			 * page 0 (0x00 - 0x7f) at the device address 0xA2.
 			 */
 			mcdi_lower_page = 2;
 			/*
 			 * MCDI page 3 should be used to access upper
 			 * page 0 (0x80 - 0xff) at the device address 0xA2.
 			 */
 			mcdi_upper_page = 3;
 			break;
 		default:
 			rc = ENOTSUP;
 			goto fail1;
 		}
 		break;
 	case EFX_PHY_MEDIA_QSFP_PLUS:
 		switch (dev_addr) {
 		case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP:
 			/*
 			 * MCDI page -1 should be used to access lower page 0
 			 * (0x00 - 0x7f).
 			 */
 			mcdi_lower_page = (uint32_t)-1;
 			/*
 			 * MCDI page 0 should be used to access upper page 0
 			 * (0x80h - 0xff).
 			 */
 			mcdi_upper_page = 0;
 			break;
 		default:
 			rc = ENOTSUP;
 			goto fail1;
 		}
 		break;
 	default:
 		rc = ENOTSUP;
 		goto fail1;
 	}
 
 	if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
 		uint8_t read_len =
 		    MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset);
 
 		rc = efx_mcdi_get_phy_media_info(enp,
 		    mcdi_lower_page, offset, read_len, data);
 		if (rc != 0)
 			goto fail2;
 
 		data += read_len;
 		len -= read_len;
 
 		offset = 0;
 	} else {
 		offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE;
 	}
 
 	if (len > 0) {
 		EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
 		EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
 
 		rc = efx_mcdi_get_phy_media_info(enp,
 		    mcdi_upper_page, offset, len, data);
 		if (rc != 0)
 			goto fail3;
 	}
 
 	return (0);
 
 fail3:
 	EFSYS_PROBE(fail3);
 fail2:
 	EFSYS_PROBE(fail2);
 fail1:
 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
 	return (rc);
 }
 
 #endif	/* EFSYS_OPT_MCDI */
Index: stable/12
===================================================================
--- stable/12	(revision 342323)
+++ stable/12	(revision 342324)

Property changes on: stable/12
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
   Merged /head:r340888