Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixgbe/if_sriov.c
Show First 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | device_printf(dev, | ||||
"Error %d setting up SR-IOV\n", *error); | "Error %d setting up SR-IOV\n", *error); | ||||
} | } | ||||
} /* ixgbe_define_iov_schemas */ | } /* ixgbe_define_iov_schemas */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_align_all_queue_indices | * ixgbe_align_all_queue_indices | ||||
************************************************************************/ | ************************************************************************/ | ||||
inline void | inline void | ||||
ixgbe_align_all_queue_indices(struct adapter *adapter) | ixgbe_align_all_queue_indices(struct ixgbe_softc *sc) | ||||
{ | { | ||||
int i; | int i; | ||||
int index; | int index; | ||||
for (i = 0; i < adapter->num_rx_queues; i++) { | for (i = 0; i < sc->num_rx_queues; i++) { | ||||
index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i); | index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i); | ||||
adapter->rx_queues[i].rxr.me = index; | sc->rx_queues[i].rxr.me = index; | ||||
} | } | ||||
for (i = 0; i < adapter->num_tx_queues; i++) { | for (i = 0; i < sc->num_tx_queues; i++) { | ||||
index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i); | index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i); | ||||
adapter->tx_queues[i].txr.me = index; | sc->tx_queues[i].txr.me = index; | ||||
} | } | ||||
} | } | ||||
/* Support functions for SR-IOV/VF management */ | /* Support functions for SR-IOV/VF management */ | ||||
static inline void | static inline void | ||||
ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) | ixgbe_send_vf_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg) | ||||
{ | { | ||||
if (vf->flags & IXGBE_VF_CTS) | if (vf->flags & IXGBE_VF_CTS) | ||||
msg |= IXGBE_VT_MSGTYPE_CTS; | msg |= IXGBE_VT_MSGTYPE_CTS; | ||||
adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool); | sc->hw.mbx.ops.write(&sc->hw, &msg, 1, vf->pool); | ||||
} | } | ||||
static inline void | static inline void | ||||
ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) | ixgbe_send_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg) | ||||
{ | { | ||||
msg &= IXGBE_VT_MSG_MASK; | msg &= IXGBE_VT_MSG_MASK; | ||||
ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK); | ixgbe_send_vf_msg(sc, vf, msg | IXGBE_VT_MSGTYPE_ACK); | ||||
} | } | ||||
static inline void | static inline void | ||||
ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) | ixgbe_send_vf_nack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg) | ||||
{ | { | ||||
msg &= IXGBE_VT_MSG_MASK; | msg &= IXGBE_VT_MSG_MASK; | ||||
ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK); | ixgbe_send_vf_msg(sc, vf, msg | IXGBE_VT_MSGTYPE_NACK); | ||||
} | } | ||||
static inline void | static inline void | ||||
ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf) | ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf) | ||||
{ | { | ||||
if (!(vf->flags & IXGBE_VF_CTS)) | if (!(vf->flags & IXGBE_VF_CTS)) | ||||
ixgbe_send_vf_nack(adapter, vf, 0); | ixgbe_send_vf_nack(sc, vf, 0); | ||||
} | } | ||||
static inline boolean_t | static inline boolean_t | ||||
ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac) | ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac) | ||||
{ | { | ||||
return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0); | return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0); | ||||
} | } | ||||
Show All 13 Lines | |||||
inline int | inline int | ||||
ixgbe_vf_que_index(int mode, int vfnum, int num) | ixgbe_vf_que_index(int mode, int vfnum, int num) | ||||
{ | { | ||||
return ((vfnum * ixgbe_vf_queues(mode)) + num); | return ((vfnum * ixgbe_vf_queues(mode)) + num); | ||||
} | } | ||||
static inline void | static inline void | ||||
ixgbe_update_max_frame(struct adapter * adapter, int max_frame) | ixgbe_update_max_frame(struct ixgbe_softc * sc, int max_frame) | ||||
{ | { | ||||
if (adapter->max_frame_size < max_frame) | if (sc->max_frame_size < max_frame) | ||||
adapter->max_frame_size = max_frame; | sc->max_frame_size = max_frame; | ||||
} | } | ||||
inline u32 | inline u32 | ||||
ixgbe_get_mrqc(int iov_mode) | ixgbe_get_mrqc(int iov_mode) | ||||
{ | { | ||||
u32 mrqc; | u32 mrqc; | ||||
switch (iov_mode) { | switch (iov_mode) { | ||||
Show All 32 Lines | ixgbe_get_mtqc(int iov_mode) | ||||
default: | default: | ||||
panic("Unexpected SR-IOV mode %d", iov_mode); | panic("Unexpected SR-IOV mode %d", iov_mode); | ||||
} | } | ||||
return mtqc; | return mtqc; | ||||
} | } | ||||
void | void | ||||
ixgbe_ping_all_vfs(struct adapter *adapter) | ixgbe_ping_all_vfs(struct ixgbe_softc *sc) | ||||
{ | { | ||||
struct ixgbe_vf *vf; | struct ixgbe_vf *vf; | ||||
for (int i = 0; i < adapter->num_vfs; i++) { | for (int i = 0; i < sc->num_vfs; i++) { | ||||
vf = &adapter->vfs[i]; | vf = &sc->vfs[i]; | ||||
if (vf->flags & IXGBE_VF_ACTIVE) | if (vf->flags & IXGBE_VF_ACTIVE) | ||||
ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); | ixgbe_send_vf_msg(sc, vf, IXGBE_PF_CONTROL_MSG); | ||||
} | } | ||||
} /* ixgbe_ping_all_vfs */ | } /* ixgbe_ping_all_vfs */ | ||||
static void | static void | ||||
ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf, | ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, | ||||
uint16_t tag) | uint16_t tag) | ||||
{ | { | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
uint32_t vmolr, vmvir; | uint32_t vmolr, vmvir; | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
vf->vlan_tag = tag; | vf->vlan_tag = tag; | ||||
vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool)); | vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool)); | ||||
/* Do not receive packets that pass inexact filters. */ | /* Do not receive packets that pass inexact filters. */ | ||||
vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); | vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); | ||||
Show All 17 Lines | if (tag == 0) { | ||||
vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT); | vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT); | ||||
} | } | ||||
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr); | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr); | ||||
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir); | IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir); | ||||
} /* ixgbe_vf_set_default_vlan */ | } /* ixgbe_vf_set_default_vlan */ | ||||
static boolean_t | static boolean_t | ||||
ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf) | ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf) | ||||
{ | { | ||||
/* | /* | ||||
* Frame size compatibility between PF and VF is only a problem on | * Frame size compatibility between PF and VF is only a problem on | ||||
* 82599-based cards. X540 and later support any combination of jumbo | * 82599-based cards. X540 and later support any combination of jumbo | ||||
* frames on PFs and VFs. | * frames on PFs and VFs. | ||||
*/ | */ | ||||
if (adapter->hw.mac.type != ixgbe_mac_82599EB) | if (sc->hw.mac.type != ixgbe_mac_82599EB) | ||||
return (true); | return (true); | ||||
switch (vf->api_ver) { | switch (vf->api_ver) { | ||||
case IXGBE_API_VER_1_0: | case IXGBE_API_VER_1_0: | ||||
case IXGBE_API_VER_UNKNOWN: | case IXGBE_API_VER_UNKNOWN: | ||||
/* | /* | ||||
* On legacy (1.0 and older) VF versions, we don't support jumbo | * On legacy (1.0 and older) VF versions, we don't support jumbo | ||||
* frames on either the PF or the VF. | * frames on either the PF or the VF. | ||||
*/ | */ | ||||
if (adapter->max_frame_size > ETHER_MAX_LEN || | if (sc->max_frame_size > ETHER_MAX_LEN || | ||||
vf->maximum_frame_size > ETHER_MAX_LEN) | vf->maximum_frame_size > ETHER_MAX_LEN) | ||||
return (false); | return (false); | ||||
return (true); | return (true); | ||||
break; | break; | ||||
case IXGBE_API_VER_1_1: | case IXGBE_API_VER_1_1: | ||||
default: | default: | ||||
/* | /* | ||||
* 1.1 or later VF versions always work if they aren't using | * 1.1 or later VF versions always work if they aren't using | ||||
* jumbo frames. | * jumbo frames. | ||||
*/ | */ | ||||
if (vf->maximum_frame_size <= ETHER_MAX_LEN) | if (vf->maximum_frame_size <= ETHER_MAX_LEN) | ||||
return (true); | return (true); | ||||
/* | /* | ||||
* Jumbo frames only work with VFs if the PF is also using jumbo | * Jumbo frames only work with VFs if the PF is also using jumbo | ||||
* frames. | * frames. | ||||
*/ | */ | ||||
if (adapter->max_frame_size <= ETHER_MAX_LEN) | if (sc->max_frame_size <= ETHER_MAX_LEN) | ||||
return (true); | return (true); | ||||
return (false); | return (false); | ||||
} | } | ||||
} /* ixgbe_vf_frame_size_compatible */ | } /* ixgbe_vf_frame_size_compatible */ | ||||
static void | static void | ||||
ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf) | ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf) | ||||
{ | { | ||||
ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan); | ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan); | ||||
// XXX clear multicast addresses | // XXX clear multicast addresses | ||||
ixgbe_clear_rar(&adapter->hw, vf->rar_index); | ixgbe_clear_rar(&sc->hw, vf->rar_index); | ||||
vf->api_ver = IXGBE_API_VER_UNKNOWN; | vf->api_ver = IXGBE_API_VER_UNKNOWN; | ||||
} /* ixgbe_process_vf_reset */ | } /* ixgbe_process_vf_reset */ | ||||
static void | static void | ||||
ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf) | ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf) | ||||
{ | { | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
uint32_t vf_index, vfte; | uint32_t vf_index, vfte; | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
vf_index = IXGBE_VF_INDEX(vf->pool); | vf_index = IXGBE_VF_INDEX(vf->pool); | ||||
vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index)); | vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index)); | ||||
vfte |= IXGBE_VF_BIT(vf->pool); | vfte |= IXGBE_VF_BIT(vf->pool); | ||||
IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte); | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte); | ||||
} /* ixgbe_vf_enable_transmit */ | } /* ixgbe_vf_enable_transmit */ | ||||
static void | static void | ||||
ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf) | ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf) | ||||
{ | { | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
uint32_t vf_index, vfre; | uint32_t vf_index, vfre; | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
vf_index = IXGBE_VF_INDEX(vf->pool); | vf_index = IXGBE_VF_INDEX(vf->pool); | ||||
vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index)); | vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index)); | ||||
if (ixgbe_vf_frame_size_compatible(adapter, vf)) | if (ixgbe_vf_frame_size_compatible(sc, vf)) | ||||
vfre |= IXGBE_VF_BIT(vf->pool); | vfre |= IXGBE_VF_BIT(vf->pool); | ||||
else | else | ||||
vfre &= ~IXGBE_VF_BIT(vf->pool); | vfre &= ~IXGBE_VF_BIT(vf->pool); | ||||
IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre); | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre); | ||||
} /* ixgbe_vf_enable_receive */ | } /* ixgbe_vf_enable_receive */ | ||||
static void | static void | ||||
ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) | ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg) | ||||
{ | { | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
uint32_t ack; | uint32_t ack; | ||||
uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN]; | uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN]; | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
ixgbe_process_vf_reset(adapter, vf); | ixgbe_process_vf_reset(sc, vf); | ||||
if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { | if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { | ||||
ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, | ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, | ||||
vf->pool, true); | vf->pool, true); | ||||
ack = IXGBE_VT_MSGTYPE_ACK; | ack = IXGBE_VT_MSGTYPE_ACK; | ||||
} else | } else | ||||
ack = IXGBE_VT_MSGTYPE_NACK; | ack = IXGBE_VT_MSGTYPE_NACK; | ||||
ixgbe_vf_enable_transmit(adapter, vf); | ixgbe_vf_enable_transmit(sc, vf); | ||||
ixgbe_vf_enable_receive(adapter, vf); | ixgbe_vf_enable_receive(sc, vf); | ||||
vf->flags |= IXGBE_VF_CTS; | vf->flags |= IXGBE_VF_CTS; | ||||
resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS; | resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS; | ||||
bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN); | bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN); | ||||
resp[3] = hw->mac.mc_filter_type; | resp[3] = hw->mac.mc_filter_type; | ||||
hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool); | hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool); | ||||
} /* ixgbe_vf_reset_msg */ | } /* ixgbe_vf_reset_msg */ | ||||
static void | static void | ||||
ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) | ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg) | ||||
{ | { | ||||
uint8_t *mac; | uint8_t *mac; | ||||
mac = (uint8_t*)&msg[1]; | mac = (uint8_t*)&msg[1]; | ||||
/* Check that the VF has permission to change the MAC address. */ | /* Check that the VF has permission to change the MAC address. */ | ||||
if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) { | if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) { | ||||
ixgbe_send_vf_nack(adapter, vf, msg[0]); | ixgbe_send_vf_nack(sc, vf, msg[0]); | ||||
return; | return; | ||||
} | } | ||||
if (ixgbe_validate_mac_addr(mac) != 0) { | if (ixgbe_validate_mac_addr(mac) != 0) { | ||||
ixgbe_send_vf_nack(adapter, vf, msg[0]); | ixgbe_send_vf_nack(sc, vf, msg[0]); | ||||
return; | return; | ||||
} | } | ||||
bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); | bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); | ||||
ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool, | ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool, | ||||
true); | true); | ||||
ixgbe_send_vf_ack(adapter, vf, msg[0]); | ixgbe_send_vf_ack(sc, vf, msg[0]); | ||||
} /* ixgbe_vf_set_mac */ | } /* ixgbe_vf_set_mac */ | ||||
/* | /* | ||||
* VF multicast addresses are set by using the appropriate bit in | * VF multicast addresses are set by using the appropriate bit in | ||||
* 1 of 128 32 bit addresses (4096 possible). | * 1 of 128 32 bit addresses (4096 possible). | ||||
*/ | */ | ||||
static void | static void | ||||
ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg) | ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg) | ||||
{ | { | ||||
u16 *list = (u16*)&msg[1]; | u16 *list = (u16*)&msg[1]; | ||||
int entries; | int entries; | ||||
u32 vmolr, vec_bit, vec_reg, mta_reg; | u32 vmolr, vec_bit, vec_reg, mta_reg; | ||||
entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; | entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; | ||||
entries = min(entries, IXGBE_MAX_VF_MC); | entries = min(entries, IXGBE_MAX_VF_MC); | ||||
vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool)); | vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool)); | ||||
vf->num_mc_hashes = entries; | vf->num_mc_hashes = entries; | ||||
/* Set the appropriate MTA bit */ | /* Set the appropriate MTA bit */ | ||||
for (int i = 0; i < entries; i++) { | for (int i = 0; i < entries; i++) { | ||||
vf->mc_hash[i] = list[i]; | vf->mc_hash[i] = list[i]; | ||||
vec_reg = (vf->mc_hash[i] >> 5) & 0x7F; | vec_reg = (vf->mc_hash[i] >> 5) & 0x7F; | ||||
vec_bit = vf->mc_hash[i] & 0x1F; | vec_bit = vf->mc_hash[i] & 0x1F; | ||||
mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg)); | mta_reg = IXGBE_READ_REG(&sc->hw, IXGBE_MTA(vec_reg)); | ||||
mta_reg |= (1 << vec_bit); | mta_reg |= (1 << vec_bit); | ||||
IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg); | IXGBE_WRITE_REG(&sc->hw, IXGBE_MTA(vec_reg), mta_reg); | ||||
} | } | ||||
vmolr |= IXGBE_VMOLR_ROMPE; | vmolr |= IXGBE_VMOLR_ROMPE; | ||||
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr); | IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr); | ||||
ixgbe_send_vf_ack(adapter, vf, msg[0]); | ixgbe_send_vf_ack(sc, vf, msg[0]); | ||||
} /* ixgbe_vf_set_mc_addr */ | } /* ixgbe_vf_set_mc_addr */ | ||||
static void | static void | ||||
ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) | ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg) | ||||
{ | { | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
int enable; | int enable; | ||||
uint16_t tag; | uint16_t tag; | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
enable = IXGBE_VT_MSGINFO(msg[0]); | enable = IXGBE_VT_MSGINFO(msg[0]); | ||||
tag = msg[1] & IXGBE_VLVF_VLANID_MASK; | tag = msg[1] & IXGBE_VLVF_VLANID_MASK; | ||||
if (!(vf->flags & IXGBE_VF_CAP_VLAN)) { | if (!(vf->flags & IXGBE_VF_CAP_VLAN)) { | ||||
ixgbe_send_vf_nack(adapter, vf, msg[0]); | ixgbe_send_vf_nack(sc, vf, msg[0]); | ||||
return; | return; | ||||
} | } | ||||
/* It is illegal to enable vlan tag 0. */ | /* It is illegal to enable vlan tag 0. */ | ||||
if (tag == 0 && enable != 0) { | if (tag == 0 && enable != 0) { | ||||
ixgbe_send_vf_nack(adapter, vf, msg[0]); | ixgbe_send_vf_nack(sc, vf, msg[0]); | ||||
return; | return; | ||||
} | } | ||||
ixgbe_set_vfta(hw, tag, vf->pool, enable, false); | ixgbe_set_vfta(hw, tag, vf->pool, enable, false); | ||||
ixgbe_send_vf_ack(adapter, vf, msg[0]); | ixgbe_send_vf_ack(sc, vf, msg[0]); | ||||
} /* ixgbe_vf_set_vlan */ | } /* ixgbe_vf_set_vlan */ | ||||
static void | static void | ||||
ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) | ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg) | ||||
{ | { | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
uint32_t vf_max_size, pf_max_size, mhadd; | uint32_t vf_max_size, pf_max_size, mhadd; | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
vf_max_size = msg[1]; | vf_max_size = msg[1]; | ||||
if (vf_max_size < ETHER_CRC_LEN) { | if (vf_max_size < ETHER_CRC_LEN) { | ||||
/* We intentionally ACK invalid LPE requests. */ | /* We intentionally ACK invalid LPE requests. */ | ||||
ixgbe_send_vf_ack(adapter, vf, msg[0]); | ixgbe_send_vf_ack(sc, vf, msg[0]); | ||||
return; | return; | ||||
} | } | ||||
vf_max_size -= ETHER_CRC_LEN; | vf_max_size -= ETHER_CRC_LEN; | ||||
if (vf_max_size > IXGBE_MAX_FRAME_SIZE) { | if (vf_max_size > IXGBE_MAX_FRAME_SIZE) { | ||||
/* We intentionally ACK invalid LPE requests. */ | /* We intentionally ACK invalid LPE requests. */ | ||||
ixgbe_send_vf_ack(adapter, vf, msg[0]); | ixgbe_send_vf_ack(sc, vf, msg[0]); | ||||
return; | return; | ||||
} | } | ||||
vf->maximum_frame_size = vf_max_size; | vf->maximum_frame_size = vf_max_size; | ||||
ixgbe_update_max_frame(adapter, vf->maximum_frame_size); | ixgbe_update_max_frame(sc, vf->maximum_frame_size); | ||||
/* | /* | ||||
* We might have to disable reception to this VF if the frame size is | * We might have to disable reception to this VF if the frame size is | ||||
* not compatible with the config on the PF. | * not compatible with the config on the PF. | ||||
*/ | */ | ||||
ixgbe_vf_enable_receive(adapter, vf); | ixgbe_vf_enable_receive(sc, vf); | ||||
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | ||||
pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; | pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; | ||||
if (pf_max_size < adapter->max_frame_size) { | if (pf_max_size < sc->max_frame_size) { | ||||
mhadd &= ~IXGBE_MHADD_MFS_MASK; | mhadd &= ~IXGBE_MHADD_MFS_MASK; | ||||
mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; | mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; | ||||
IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); | IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); | ||||
} | } | ||||
ixgbe_send_vf_ack(adapter, vf, msg[0]); | ixgbe_send_vf_ack(sc, vf, msg[0]); | ||||
} /* ixgbe_vf_set_lpe */ | } /* ixgbe_vf_set_lpe */ | ||||
static void | static void | ||||
ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf, | ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, | ||||
uint32_t *msg) | uint32_t *msg) | ||||
{ | { | ||||
//XXX implement this | //XXX implement this | ||||
ixgbe_send_vf_nack(adapter, vf, msg[0]); | ixgbe_send_vf_nack(sc, vf, msg[0]); | ||||
} /* ixgbe_vf_set_macvlan */ | } /* ixgbe_vf_set_macvlan */ | ||||
static void | static void | ||||
ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf, | ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf, | ||||
uint32_t *msg) | uint32_t *msg) | ||||
{ | { | ||||
switch (msg[1]) { | switch (msg[1]) { | ||||
case IXGBE_API_VER_1_0: | case IXGBE_API_VER_1_0: | ||||
case IXGBE_API_VER_1_1: | case IXGBE_API_VER_1_1: | ||||
vf->api_ver = msg[1]; | vf->api_ver = msg[1]; | ||||
ixgbe_send_vf_ack(adapter, vf, msg[0]); | ixgbe_send_vf_ack(sc, vf, msg[0]); | ||||
break; | break; | ||||
default: | default: | ||||
vf->api_ver = IXGBE_API_VER_UNKNOWN; | vf->api_ver = IXGBE_API_VER_UNKNOWN; | ||||
ixgbe_send_vf_nack(adapter, vf, msg[0]); | ixgbe_send_vf_nack(sc, vf, msg[0]); | ||||
break; | break; | ||||
} | } | ||||
} /* ixgbe_vf_api_negotiate */ | } /* ixgbe_vf_api_negotiate */ | ||||
static void | static void | ||||
ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) | ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg) | ||||
{ | { | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN]; | uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN]; | ||||
int num_queues; | int num_queues; | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
/* GET_QUEUES is not supported on pre-1.1 APIs. */ | /* GET_QUEUES is not supported on pre-1.1 APIs. */ | ||||
switch (msg[0]) { | switch (msg[0]) { | ||||
case IXGBE_API_VER_1_0: | case IXGBE_API_VER_1_0: | ||||
case IXGBE_API_VER_UNKNOWN: | case IXGBE_API_VER_UNKNOWN: | ||||
ixgbe_send_vf_nack(adapter, vf, msg[0]); | ixgbe_send_vf_nack(sc, vf, msg[0]); | ||||
return; | return; | ||||
} | } | ||||
resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK | | resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK | | ||||
IXGBE_VT_MSGTYPE_CTS; | IXGBE_VT_MSGTYPE_CTS; | ||||
num_queues = ixgbe_vf_queues(adapter->iov_mode); | num_queues = ixgbe_vf_queues(sc->iov_mode); | ||||
resp[IXGBE_VF_TX_QUEUES] = num_queues; | resp[IXGBE_VF_TX_QUEUES] = num_queues; | ||||
resp[IXGBE_VF_RX_QUEUES] = num_queues; | resp[IXGBE_VF_RX_QUEUES] = num_queues; | ||||
resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0); | resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0); | ||||
resp[IXGBE_VF_DEF_QUEUE] = 0; | resp[IXGBE_VF_DEF_QUEUE] = 0; | ||||
hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool); | hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool); | ||||
} /* ixgbe_vf_get_queues */ | } /* ixgbe_vf_get_queues */ | ||||
static void | static void | ||||
ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf) | ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
#ifdef KTR | #ifdef KTR | ||||
struct ifnet *ifp = iflib_get_ifp(ctx); | struct ifnet *ifp = iflib_get_ifp(ctx); | ||||
#endif | #endif | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
uint32_t msg[IXGBE_VFMAILBOX_SIZE]; | uint32_t msg[IXGBE_VFMAILBOX_SIZE]; | ||||
int error; | int error; | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool); | error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool); | ||||
if (error != 0) | if (error != 0) | ||||
return; | return; | ||||
CTR3(KTR_MALLOC, "%s: received msg %x from %d", ifp->if_xname, | CTR3(KTR_MALLOC, "%s: received msg %x from %d", ifp->if_xname, | ||||
msg[0], vf->pool); | msg[0], vf->pool); | ||||
if (msg[0] == IXGBE_VF_RESET) { | if (msg[0] == IXGBE_VF_RESET) { | ||||
ixgbe_vf_reset_msg(adapter, vf, msg); | ixgbe_vf_reset_msg(sc, vf, msg); | ||||
return; | return; | ||||
} | } | ||||
if (!(vf->flags & IXGBE_VF_CTS)) { | if (!(vf->flags & IXGBE_VF_CTS)) { | ||||
ixgbe_send_vf_nack(adapter, vf, msg[0]); | ixgbe_send_vf_nack(sc, vf, msg[0]); | ||||
return; | return; | ||||
} | } | ||||
switch (msg[0] & IXGBE_VT_MSG_MASK) { | switch (msg[0] & IXGBE_VT_MSG_MASK) { | ||||
case IXGBE_VF_SET_MAC_ADDR: | case IXGBE_VF_SET_MAC_ADDR: | ||||
ixgbe_vf_set_mac(adapter, vf, msg); | ixgbe_vf_set_mac(sc, vf, msg); | ||||
break; | break; | ||||
case IXGBE_VF_SET_MULTICAST: | case IXGBE_VF_SET_MULTICAST: | ||||
ixgbe_vf_set_mc_addr(adapter, vf, msg); | ixgbe_vf_set_mc_addr(sc, vf, msg); | ||||
break; | break; | ||||
case IXGBE_VF_SET_VLAN: | case IXGBE_VF_SET_VLAN: | ||||
ixgbe_vf_set_vlan(adapter, vf, msg); | ixgbe_vf_set_vlan(sc, vf, msg); | ||||
break; | break; | ||||
case IXGBE_VF_SET_LPE: | case IXGBE_VF_SET_LPE: | ||||
ixgbe_vf_set_lpe(adapter, vf, msg); | ixgbe_vf_set_lpe(sc, vf, msg); | ||||
break; | break; | ||||
case IXGBE_VF_SET_MACVLAN: | case IXGBE_VF_SET_MACVLAN: | ||||
ixgbe_vf_set_macvlan(adapter, vf, msg); | ixgbe_vf_set_macvlan(sc, vf, msg); | ||||
break; | break; | ||||
case IXGBE_VF_API_NEGOTIATE: | case IXGBE_VF_API_NEGOTIATE: | ||||
ixgbe_vf_api_negotiate(adapter, vf, msg); | ixgbe_vf_api_negotiate(sc, vf, msg); | ||||
break; | break; | ||||
case IXGBE_VF_GET_QUEUES: | case IXGBE_VF_GET_QUEUES: | ||||
ixgbe_vf_get_queues(adapter, vf, msg); | ixgbe_vf_get_queues(sc, vf, msg); | ||||
break; | break; | ||||
default: | default: | ||||
ixgbe_send_vf_nack(adapter, vf, msg[0]); | ixgbe_send_vf_nack(sc, vf, msg[0]); | ||||
} | } | ||||
} /* ixgbe_process_vf_msg */ | } /* ixgbe_process_vf_msg */ | ||||
/* Tasklet for handling VF -> PF mailbox messages */ | /* Tasklet for handling VF -> PF mailbox messages */ | ||||
void | void | ||||
ixgbe_handle_mbx(void *context) | ixgbe_handle_mbx(void *context) | ||||
{ | { | ||||
if_ctx_t ctx = context; | if_ctx_t ctx = context; | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
struct ixgbe_vf *vf; | struct ixgbe_vf *vf; | ||||
int i; | int i; | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
for (i = 0; i < adapter->num_vfs; i++) { | for (i = 0; i < sc->num_vfs; i++) { | ||||
vf = &adapter->vfs[i]; | vf = &sc->vfs[i]; | ||||
if (vf->flags & IXGBE_VF_ACTIVE) { | if (vf->flags & IXGBE_VF_ACTIVE) { | ||||
if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0) | if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0) | ||||
ixgbe_process_vf_reset(adapter, vf); | ixgbe_process_vf_reset(sc, vf); | ||||
if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0) | if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0) | ||||
ixgbe_process_vf_msg(ctx, vf); | ixgbe_process_vf_msg(ctx, vf); | ||||
if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0) | if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0) | ||||
ixgbe_process_vf_ack(adapter, vf); | ixgbe_process_vf_ack(sc, vf); | ||||
} | } | ||||
} | } | ||||
} /* ixgbe_handle_mbx */ | } /* ixgbe_handle_mbx */ | ||||
int | int | ||||
ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config) | ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config) | ||||
{ | { | ||||
struct adapter *adapter; | struct ixgbe_softc *sc; | ||||
int retval = 0; | int retval = 0; | ||||
adapter = iflib_get_softc(ctx); | sc = iflib_get_softc(ctx); | ||||
adapter->iov_mode = IXGBE_NO_VM; | sc->iov_mode = IXGBE_NO_VM; | ||||
if (num_vfs == 0) { | if (num_vfs == 0) { | ||||
/* Would we ever get num_vfs = 0? */ | /* Would we ever get num_vfs = 0? */ | ||||
retval = EINVAL; | retval = EINVAL; | ||||
goto err_init_iov; | goto err_init_iov; | ||||
} | } | ||||
/* | /* | ||||
* We've got to reserve a VM's worth of queues for the PF, | * We've got to reserve a VM's worth of queues for the PF, | ||||
* thus we go into "64 VF mode" if 32+ VFs are requested. | * thus we go into "64 VF mode" if 32+ VFs are requested. | ||||
* With 64 VFs, you can only have two queues per VF. | * With 64 VFs, you can only have two queues per VF. | ||||
* With 32 VFs, you can have up to four queues per VF. | * With 32 VFs, you can have up to four queues per VF. | ||||
*/ | */ | ||||
if (num_vfs >= IXGBE_32_VM) | if (num_vfs >= IXGBE_32_VM) | ||||
adapter->iov_mode = IXGBE_64_VM; | sc->iov_mode = IXGBE_64_VM; | ||||
else | else | ||||
adapter->iov_mode = IXGBE_32_VM; | sc->iov_mode = IXGBE_32_VM; | ||||
/* Again, reserving 1 VM's worth of queues for the PF */ | /* Again, reserving 1 VM's worth of queues for the PF */ | ||||
adapter->pool = adapter->iov_mode - 1; | sc->pool = sc->iov_mode - 1; | ||||
if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) { | if ((num_vfs > sc->pool) || (num_vfs >= IXGBE_64_VM)) { | ||||
retval = ENOSPC; | retval = ENOSPC; | ||||
goto err_init_iov; | goto err_init_iov; | ||||
} | } | ||||
adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV, | sc->vfs = malloc(sizeof(*sc->vfs) * num_vfs, M_IXGBE_SRIOV, | ||||
M_NOWAIT | M_ZERO); | M_NOWAIT | M_ZERO); | ||||
if (adapter->vfs == NULL) { | if (sc->vfs == NULL) { | ||||
retval = ENOMEM; | retval = ENOMEM; | ||||
goto err_init_iov; | goto err_init_iov; | ||||
} | } | ||||
adapter->num_vfs = num_vfs; | sc->num_vfs = num_vfs; | ||||
ixgbe_if_init(adapter->ctx); | ixgbe_if_init(sc->ctx); | ||||
adapter->feat_en |= IXGBE_FEATURE_SRIOV; | sc->feat_en |= IXGBE_FEATURE_SRIOV; | ||||
return (retval); | return (retval); | ||||
err_init_iov: | err_init_iov: | ||||
adapter->num_vfs = 0; | sc->num_vfs = 0; | ||||
adapter->pool = 0; | sc->pool = 0; | ||||
adapter->iov_mode = IXGBE_NO_VM; | sc->iov_mode = IXGBE_NO_VM; | ||||
return (retval); | return (retval); | ||||
} /* ixgbe_if_iov_init */ | } /* ixgbe_if_iov_init */ | ||||
void | void | ||||
ixgbe_if_iov_uninit(if_ctx_t ctx) | ixgbe_if_iov_uninit(if_ctx_t ctx) | ||||
{ | { | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
struct adapter *adapter; | struct ixgbe_softc *sc; | ||||
uint32_t pf_reg, vf_reg; | uint32_t pf_reg, vf_reg; | ||||
adapter = iflib_get_softc(ctx); | sc = iflib_get_softc(ctx); | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
/* Enable rx/tx for the PF and disable it for all VFs. */ | /* Enable rx/tx for the PF and disable it for all VFs. */ | ||||
pf_reg = IXGBE_VF_INDEX(adapter->pool); | pf_reg = IXGBE_VF_INDEX(sc->pool); | ||||
IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool)); | IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(sc->pool)); | ||||
IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool)); | IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(sc->pool)); | ||||
if (pf_reg == 0) | if (pf_reg == 0) | ||||
vf_reg = 1; | vf_reg = 1; | ||||
else | else | ||||
vf_reg = 0; | vf_reg = 0; | ||||
IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0); | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0); | ||||
IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0); | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0); | ||||
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); | ||||
free(adapter->vfs, M_IXGBE_SRIOV); | free(sc->vfs, M_IXGBE_SRIOV); | ||||
adapter->vfs = NULL; | sc->vfs = NULL; | ||||
adapter->num_vfs = 0; | sc->num_vfs = 0; | ||||
adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; | sc->feat_en &= ~IXGBE_FEATURE_SRIOV; | ||||
} /* ixgbe_if_iov_uninit */ | } /* ixgbe_if_iov_uninit */ | ||||
static void | static void | ||||
ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf) | ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf) | ||||
{ | { | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
uint32_t vf_index, pfmbimr; | uint32_t vf_index, pfmbimr; | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
if (!(vf->flags & IXGBE_VF_ACTIVE)) | if (!(vf->flags & IXGBE_VF_ACTIVE)) | ||||
return; | return; | ||||
vf_index = IXGBE_VF_INDEX(vf->pool); | vf_index = IXGBE_VF_INDEX(vf->pool); | ||||
pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index)); | pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index)); | ||||
pfmbimr |= IXGBE_VF_BIT(vf->pool); | pfmbimr |= IXGBE_VF_BIT(vf->pool); | ||||
IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr); | IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr); | ||||
ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag); | ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag); | ||||
// XXX multicast addresses | // XXX multicast addresses | ||||
if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { | if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { | ||||
ixgbe_set_rar(&adapter->hw, vf->rar_index, | ixgbe_set_rar(&sc->hw, vf->rar_index, | ||||
vf->ether_addr, vf->pool, true); | vf->ether_addr, vf->pool, true); | ||||
} | } | ||||
ixgbe_vf_enable_transmit(adapter, vf); | ixgbe_vf_enable_transmit(sc, vf); | ||||
ixgbe_vf_enable_receive(adapter, vf); | ixgbe_vf_enable_receive(sc, vf); | ||||
ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); | ixgbe_send_vf_msg(sc, vf, IXGBE_PF_CONTROL_MSG); | ||||
} /* ixgbe_init_vf */ | } /* ixgbe_init_vf */ | ||||
void | void | ||||
ixgbe_initialize_iov(struct adapter *adapter) | ixgbe_initialize_iov(struct ixgbe_softc *sc) | ||||
{ | { | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie; | uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie; | ||||
int i; | int i; | ||||
if (adapter->iov_mode == IXGBE_NO_VM) | if (sc->iov_mode == IXGBE_NO_VM) | ||||
return; | return; | ||||
/* RMW appropriate registers based on IOV mode */ | /* RMW appropriate registers based on IOV mode */ | ||||
/* Read... */ | /* Read... */ | ||||
mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); | mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); | ||||
gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | ||||
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | ||||
/* Modify... */ | /* Modify... */ | ||||
mrqc &= ~IXGBE_MRQC_MRQE_MASK; | mrqc &= ~IXGBE_MRQC_MRQE_MASK; | ||||
mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */ | mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */ | ||||
gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; | gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; | ||||
gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; | gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; | ||||
gpie &= ~IXGBE_GPIE_VTMODE_MASK; | gpie &= ~IXGBE_GPIE_VTMODE_MASK; | ||||
switch (adapter->iov_mode) { | switch (sc->iov_mode) { | ||||
case IXGBE_64_VM: | case IXGBE_64_VM: | ||||
mrqc |= IXGBE_MRQC_VMDQRSS64EN; | mrqc |= IXGBE_MRQC_VMDQRSS64EN; | ||||
mtqc |= IXGBE_MTQC_64VF; | mtqc |= IXGBE_MTQC_64VF; | ||||
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; | ||||
gpie |= IXGBE_GPIE_VTMODE_64; | gpie |= IXGBE_GPIE_VTMODE_64; | ||||
break; | break; | ||||
case IXGBE_32_VM: | case IXGBE_32_VM: | ||||
mrqc |= IXGBE_MRQC_VMDQRSS32EN; | mrqc |= IXGBE_MRQC_VMDQRSS32EN; | ||||
mtqc |= IXGBE_MTQC_32VF; | mtqc |= IXGBE_MTQC_32VF; | ||||
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; | ||||
gpie |= IXGBE_GPIE_VTMODE_32; | gpie |= IXGBE_GPIE_VTMODE_32; | ||||
break; | break; | ||||
default: | default: | ||||
panic("Unexpected SR-IOV mode %d", adapter->iov_mode); | panic("Unexpected SR-IOV mode %d", sc->iov_mode); | ||||
} | } | ||||
/* Write... */ | /* Write... */ | ||||
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | ||||
IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); | IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); | ||||
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); | ||||
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | ||||
/* Enable rx/tx for the PF. */ | /* Enable rx/tx for the PF. */ | ||||
vf_reg = IXGBE_VF_INDEX(adapter->pool); | vf_reg = IXGBE_VF_INDEX(sc->pool); | ||||
IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool)); | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(sc->pool)); | ||||
IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool)); | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(sc->pool)); | ||||
/* Allow VM-to-VM communication. */ | /* Allow VM-to-VM communication. */ | ||||
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | ||||
vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; | vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; | ||||
vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT); | vt_ctl |= (sc->pool << IXGBE_VT_CTL_POOL_SHIFT); | ||||
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); | ||||
for (i = 0; i < adapter->num_vfs; i++) | for (i = 0; i < sc->num_vfs; i++) | ||||
ixgbe_init_vf(adapter, &adapter->vfs[i]); | ixgbe_init_vf(sc, &sc->vfs[i]); | ||||
} /* ixgbe_initialize_iov */ | } /* ixgbe_initialize_iov */ | ||||
/* Check the max frame setting of all active VF's */ | /* Check the max frame setting of all active VF's */ | ||||
void | void | ||||
ixgbe_recalculate_max_frame(struct adapter *adapter) | ixgbe_recalculate_max_frame(struct ixgbe_softc *sc) | ||||
{ | { | ||||
struct ixgbe_vf *vf; | struct ixgbe_vf *vf; | ||||
for (int i = 0; i < adapter->num_vfs; i++) { | for (int i = 0; i < sc->num_vfs; i++) { | ||||
vf = &adapter->vfs[i]; | vf = &sc->vfs[i]; | ||||
if (vf->flags & IXGBE_VF_ACTIVE) | if (vf->flags & IXGBE_VF_ACTIVE) | ||||
ixgbe_update_max_frame(adapter, vf->maximum_frame_size); | ixgbe_update_max_frame(sc, vf->maximum_frame_size); | ||||
} | } | ||||
} /* ixgbe_recalculate_max_frame */ | } /* ixgbe_recalculate_max_frame */ | ||||
int | int | ||||
ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config) | ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config) | ||||
{ | { | ||||
struct adapter *adapter; | struct ixgbe_softc *sc; | ||||
struct ixgbe_vf *vf; | struct ixgbe_vf *vf; | ||||
const void *mac; | const void *mac; | ||||
adapter = iflib_get_softc(ctx); | sc = iflib_get_softc(ctx); | ||||
KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d", | KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d", | ||||
vfnum, adapter->num_vfs)); | vfnum, sc->num_vfs)); | ||||
vf = &adapter->vfs[vfnum]; | vf = &sc->vfs[vfnum]; | ||||
vf->pool= vfnum; | vf->pool= vfnum; | ||||
/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */ | /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */ | ||||
vf->rar_index = vfnum + 1; | vf->rar_index = vfnum + 1; | ||||
vf->default_vlan = 0; | vf->default_vlan = 0; | ||||
vf->maximum_frame_size = ETHER_MAX_LEN; | vf->maximum_frame_size = ETHER_MAX_LEN; | ||||
ixgbe_update_max_frame(adapter, vf->maximum_frame_size); | ixgbe_update_max_frame(sc, vf->maximum_frame_size); | ||||
if (nvlist_exists_binary(config, "mac-addr")) { | if (nvlist_exists_binary(config, "mac-addr")) { | ||||
mac = nvlist_get_binary(config, "mac-addr", NULL); | mac = nvlist_get_binary(config, "mac-addr", NULL); | ||||
bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); | bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); | ||||
if (nvlist_get_bool(config, "allow-set-mac")) | if (nvlist_get_bool(config, "allow-set-mac")) | ||||
vf->flags |= IXGBE_VF_CAP_MAC; | vf->flags |= IXGBE_VF_CAP_MAC; | ||||
} else | } else | ||||
/* | /* | ||||
* If the administrator has not specified a MAC address then | * If the administrator has not specified a MAC address then | ||||
* we must allow the VF to choose one. | * we must allow the VF to choose one. | ||||
*/ | */ | ||||
vf->flags |= IXGBE_VF_CAP_MAC; | vf->flags |= IXGBE_VF_CAP_MAC; | ||||
vf->flags |= IXGBE_VF_ACTIVE; | vf->flags |= IXGBE_VF_ACTIVE; | ||||
ixgbe_init_vf(adapter, vf); | ixgbe_init_vf(sc, vf); | ||||
return (0); | return (0); | ||||
} /* ixgbe_if_iov_vf_add */ | } /* ixgbe_if_iov_vf_add */ | ||||
#else | #else | ||||
void | void | ||||
ixgbe_handle_mbx(void *context) | ixgbe_handle_mbx(void *context) | ||||
{ | { | ||||
UNREFERENCED_PARAMETER(context); | UNREFERENCED_PARAMETER(context); | ||||
} /* ixgbe_handle_mbx */ | } /* ixgbe_handle_mbx */ | ||||
#endif | #endif |