Changeset View
Changeset View
Standalone View
Standalone View
sys/net/if_lagg.c
Show First 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | |||||
#include <net/if_arp.h> | #include <net/if_arp.h> | ||||
#include <net/if_dl.h> | #include <net/if_dl.h> | ||||
#include <net/if_media.h> | #include <net/if_media.h> | ||||
#include <net/if_types.h> | #include <net/if_types.h> | ||||
#include <net/if_var.h> | #include <net/if_var.h> | ||||
#include <net/bpf.h> | #include <net/bpf.h> | ||||
#include <net/route.h> | #include <net/route.h> | ||||
#include <net/vnet.h> | #include <net/vnet.h> | ||||
#include <net/infiniband.h> | |||||
#if defined(INET) || defined(INET6) | #if defined(INET) || defined(INET6) | ||||
#include <netinet/in.h> | #include <netinet/in.h> | ||||
#include <netinet/ip.h> | #include <netinet/ip.h> | ||||
#endif | #endif | ||||
#ifdef INET | #ifdef INET | ||||
#include <netinet/in_systm.h> | #include <netinet/in_systm.h> | ||||
#include <netinet/if_ether.h> | #include <netinet/if_ether.h> | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | |||||
#define V_lagg_list_mtx VNET(lagg_list_mtx) | #define V_lagg_list_mtx VNET(lagg_list_mtx) | ||||
#define LAGG_LIST_LOCK_INIT(x) mtx_init(&V_lagg_list_mtx, \ | #define LAGG_LIST_LOCK_INIT(x) mtx_init(&V_lagg_list_mtx, \ | ||||
"if_lagg list", NULL, MTX_DEF) | "if_lagg list", NULL, MTX_DEF) | ||||
#define LAGG_LIST_LOCK_DESTROY(x) mtx_destroy(&V_lagg_list_mtx) | #define LAGG_LIST_LOCK_DESTROY(x) mtx_destroy(&V_lagg_list_mtx) | ||||
#define LAGG_LIST_LOCK(x) mtx_lock(&V_lagg_list_mtx) | #define LAGG_LIST_LOCK(x) mtx_lock(&V_lagg_list_mtx) | ||||
#define LAGG_LIST_UNLOCK(x) mtx_unlock(&V_lagg_list_mtx) | #define LAGG_LIST_UNLOCK(x) mtx_unlock(&V_lagg_list_mtx) | ||||
eventhandler_tag lagg_detach_cookie = NULL; | eventhandler_tag lagg_detach_cookie = NULL; | ||||
static int lagg_clone_create(struct if_clone *, int, caddr_t); | static int lagg_clone_create_ethernet(struct if_clone *, int, caddr_t); | ||||
static void lagg_clone_destroy(struct ifnet *); | static void lagg_clone_destroy_ethernet(struct ifnet *); | ||||
static int lagg_clone_create_infiniband(struct if_clone *, int, caddr_t); | |||||
static void lagg_clone_destroy_infiniband(struct ifnet *); | |||||
VNET_DEFINE_STATIC(struct if_clone *, lagg_cloner); | VNET_DEFINE_STATIC(struct if_clone *, lagg_cloner); | ||||
VNET_DEFINE_STATIC(struct if_clone *, bond_cloner); | |||||
#define V_lagg_cloner VNET(lagg_cloner) | #define V_lagg_cloner VNET(lagg_cloner) | ||||
#define V_bond_cloner VNET(bond_cloner) | |||||
static const char laggname[] = "lagg"; | static const char laggname[] = "lagg"; | ||||
static const char bondname[] = "bond"; | |||||
static MALLOC_DEFINE(M_LAGG, laggname, "802.3AD Link Aggregation Interface"); | static MALLOC_DEFINE(M_LAGG, laggname, "802.3AD Link Aggregation Interface"); | ||||
static void lagg_capabilities(struct lagg_softc *); | static void lagg_capabilities(struct lagg_softc *); | ||||
static int lagg_port_create(struct lagg_softc *, struct ifnet *); | static int lagg_port_create(struct lagg_softc *, struct ifnet *); | ||||
static int lagg_port_destroy(struct lagg_port *, int); | static int lagg_port_destroy(struct lagg_port *, int); | ||||
static struct mbuf *lagg_input(struct ifnet *, struct mbuf *); | static struct mbuf *lagg_input_ethernet(struct ifnet *, struct mbuf *); | ||||
static struct mbuf *lagg_input_infiniband(struct ifnet *, struct mbuf *); | |||||
static void lagg_linkstate(struct lagg_softc *); | static void lagg_linkstate(struct lagg_softc *); | ||||
static void lagg_port_state(struct ifnet *, int); | static void lagg_port_state(struct ifnet *, int); | ||||
static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t); | static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t); | ||||
static int lagg_port_output(struct ifnet *, struct mbuf *, | static int lagg_port_output(struct ifnet *, struct mbuf *, | ||||
const struct sockaddr *, struct route *); | const struct sockaddr *, struct route *); | ||||
static void lagg_port_ifdetach(void *arg __unused, struct ifnet *); | static void lagg_port_ifdetach(void *arg __unused, struct ifnet *); | ||||
#ifdef LAGG_PORT_STACKING | #ifdef LAGG_PORT_STACKING | ||||
static int lagg_port_checkstacking(struct lagg_softc *); | static int lagg_port_checkstacking(struct lagg_softc *); | ||||
Show All 16 Lines | |||||
#endif | #endif | ||||
static int lagg_setmulti(struct lagg_port *); | static int lagg_setmulti(struct lagg_port *); | ||||
static int lagg_clrmulti(struct lagg_port *); | static int lagg_clrmulti(struct lagg_port *); | ||||
static int lagg_setcaps(struct lagg_port *, int cap); | static int lagg_setcaps(struct lagg_port *, int cap); | ||||
static int lagg_setflag(struct lagg_port *, int, int, | static int lagg_setflag(struct lagg_port *, int, int, | ||||
int (*func)(struct ifnet *, int)); | int (*func)(struct ifnet *, int)); | ||||
static int lagg_setflags(struct lagg_port *, int status); | static int lagg_setflags(struct lagg_port *, int status); | ||||
static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt); | static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt); | ||||
static int lagg_transmit(struct ifnet *, struct mbuf *); | static int lagg_transmit_ethernet(struct ifnet *, struct mbuf *); | ||||
static int lagg_transmit_infiniband(struct ifnet *, struct mbuf *); | |||||
static void lagg_qflush(struct ifnet *); | static void lagg_qflush(struct ifnet *); | ||||
static int lagg_media_change(struct ifnet *); | static int lagg_media_change(struct ifnet *); | ||||
static void lagg_media_status(struct ifnet *, struct ifmediareq *); | static void lagg_media_status(struct ifnet *, struct ifmediareq *); | ||||
static struct lagg_port *lagg_link_active(struct lagg_softc *, | static struct lagg_port *lagg_link_active(struct lagg_softc *, | ||||
struct lagg_port *); | struct lagg_port *); | ||||
/* Simple round robin */ | /* Simple round robin */ | ||||
static void lagg_rr_attach(struct lagg_softc *); | static void lagg_rr_attach(struct lagg_softc *); | ||||
▲ Show 20 Lines • Show All 124 Lines • ▼ Show 20 Lines | SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift, CTLFLAG_RWTUN, | ||||
"Default setting for flowid shift for load sharing"); | "Default setting for flowid shift for load sharing"); | ||||
static void | static void | ||||
vnet_lagg_init(const void *unused __unused) | vnet_lagg_init(const void *unused __unused) | ||||
{ | { | ||||
LAGG_LIST_LOCK_INIT(); | LAGG_LIST_LOCK_INIT(); | ||||
SLIST_INIT(&V_lagg_list); | SLIST_INIT(&V_lagg_list); | ||||
V_lagg_cloner = if_clone_simple(laggname, lagg_clone_create, | V_lagg_cloner = if_clone_simple(laggname, lagg_clone_create_ethernet, | ||||
lagg_clone_destroy, 0); | lagg_clone_destroy_ethernet, 0); | ||||
V_bond_cloner = if_clone_simple(bondname, lagg_clone_create_infiniband, | |||||
lagg_clone_destroy_infiniband, 0); | |||||
} | } | ||||
VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, | VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, | ||||
vnet_lagg_init, NULL); | vnet_lagg_init, NULL); | ||||
static void | static void | ||||
vnet_lagg_uninit(const void *unused __unused) | vnet_lagg_uninit(const void *unused __unused) | ||||
{ | { | ||||
if_clone_detach(V_bond_cloner); | |||||
if_clone_detach(V_lagg_cloner); | if_clone_detach(V_lagg_cloner); | ||||
LAGG_LIST_LOCK_DESTROY(); | LAGG_LIST_LOCK_DESTROY(); | ||||
} | } | ||||
VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY, | VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY, | ||||
vnet_lagg_uninit, NULL); | vnet_lagg_uninit, NULL); | ||||
static int | static int | ||||
lagg_modevent(module_t mod, int type, void *data) | lagg_modevent(module_t mod, int type, void *data) | ||||
{ | { | ||||
switch (type) { | switch (type) { | ||||
case MOD_LOAD: | case MOD_LOAD: | ||||
lagg_input_p = lagg_input; | lagg_input_ethernet_p = lagg_input_ethernet; | ||||
lagg_input_infiniband_p = lagg_input_infiniband; | |||||
lagg_linkstate_p = lagg_port_state; | lagg_linkstate_p = lagg_port_state; | ||||
lagg_detach_cookie = EVENTHANDLER_REGISTER( | lagg_detach_cookie = EVENTHANDLER_REGISTER( | ||||
ifnet_departure_event, lagg_port_ifdetach, NULL, | ifnet_departure_event, lagg_port_ifdetach, NULL, | ||||
EVENTHANDLER_PRI_ANY); | EVENTHANDLER_PRI_ANY); | ||||
break; | break; | ||||
case MOD_UNLOAD: | case MOD_UNLOAD: | ||||
EVENTHANDLER_DEREGISTER(ifnet_departure_event, | EVENTHANDLER_DEREGISTER(ifnet_departure_event, | ||||
lagg_detach_cookie); | lagg_detach_cookie); | ||||
lagg_input_p = NULL; | lagg_input_ethernet_p = NULL; | ||||
lagg_input_infiniband_p = NULL; | |||||
lagg_linkstate_p = NULL; | lagg_linkstate_p = NULL; | ||||
break; | break; | ||||
default: | default: | ||||
return (EOPNOTSUPP); | return (EOPNOTSUPP); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
static moduledata_t lagg_mod = { | static moduledata_t lagg_mod = { | ||||
"if_lagg", | "if_lagg", | ||||
lagg_modevent, | lagg_modevent, | ||||
0 | 0 | ||||
}; | }; | ||||
DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); | DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); | ||||
MODULE_VERSION(if_lagg, 1); | MODULE_VERSION(if_lagg, 1); | ||||
MODULE_DEPEND(if_lagg, if_infiniband, 1, 1, 1); | |||||
static void | static void | ||||
lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr) | lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr) | ||||
{ | { | ||||
LAGG_XLOCK_ASSERT(sc); | LAGG_XLOCK_ASSERT(sc); | ||||
KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto", | KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto", | ||||
__func__, sc)); | __func__, sc)); | ||||
▲ Show 20 Lines • Show All 133 Lines • ▼ Show 20 Lines | lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) | ||||
LAGG_RLOCK(); | LAGG_RLOCK(); | ||||
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | ||||
EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag); | EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag); | ||||
LAGG_RUNLOCK(); | LAGG_RUNLOCK(); | ||||
} | } | ||||
static int | static int | ||||
lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params) | lagg_clone_create_common(struct if_clone *ifc, int unit, caddr_t params, int if_type) | ||||
{ | { | ||||
struct lagg_softc *sc; | struct lagg_softc *sc; | ||||
struct ifnet *ifp; | struct ifnet *ifp; | ||||
static const u_char eaddr[6]; /* 00:00:00:00:00:00 */ | static const uint8_t eaddr[LAGG_ADDR_LEN]; | ||||
static const uint8_t ib_bcast_addr[INFINIBAND_ADDR_LEN] = { | |||||
0x00, 0xff, 0xff, 0xff, | |||||
0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, | |||||
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff | |||||
}; | |||||
sc = malloc(sizeof(*sc), M_LAGG, M_WAITOK|M_ZERO); | sc = malloc(sizeof(*sc), M_LAGG, M_WAITOK|M_ZERO); | ||||
ifp = sc->sc_ifp = if_alloc(IFT_ETHER); | ifp = sc->sc_ifp = if_alloc(if_type); | ||||
if (ifp == NULL) { | if (ifp == NULL) { | ||||
free(sc, M_LAGG); | free(sc, M_LAGG); | ||||
return (ENOSPC); | return (ENOSPC); | ||||
} | } | ||||
LAGG_SX_INIT(sc); | LAGG_SX_INIT(sc); | ||||
mtx_init(&sc->sc_mtx, "lagg-mtx", NULL, MTX_DEF); | |||||
callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0); | |||||
LAGG_XLOCK(sc); | LAGG_XLOCK(sc); | ||||
if (V_def_use_flowid) | if (V_def_use_flowid) | ||||
sc->sc_opts |= LAGG_OPT_USE_FLOWID; | sc->sc_opts |= LAGG_OPT_USE_FLOWID; | ||||
if (V_def_use_numa) | if (V_def_use_numa) | ||||
sc->sc_opts |= LAGG_OPT_USE_NUMA; | sc->sc_opts |= LAGG_OPT_USE_NUMA; | ||||
sc->flowid_shift = V_def_flowid_shift; | sc->flowid_shift = V_def_flowid_shift; | ||||
/* Hash all layers by default */ | /* Hash all layers by default */ | ||||
sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4; | sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4; | ||||
lagg_proto_attach(sc, LAGG_PROTO_DEFAULT); | lagg_proto_attach(sc, LAGG_PROTO_DEFAULT); | ||||
CK_SLIST_INIT(&sc->sc_ports); | CK_SLIST_INIT(&sc->sc_ports); | ||||
switch (if_type) { | |||||
case IFT_ETHER: | |||||
/* Initialise pseudo media types */ | /* Initialise pseudo media types */ | ||||
ifmedia_init(&sc->sc_media, 0, lagg_media_change, | ifmedia_init(&sc->sc_media, 0, lagg_media_change, | ||||
lagg_media_status); | lagg_media_status); | ||||
ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); | ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); | ||||
ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); | ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); | ||||
if_initname(ifp, laggname, unit); | if_initname(ifp, laggname, unit); | ||||
ifp->if_transmit = lagg_transmit_ethernet; | |||||
break; | |||||
case IFT_INFINIBAND: | |||||
if_initname(ifp, bondname, unit); | |||||
ifp->if_transmit = lagg_transmit_infiniband; | |||||
break; | |||||
default: | |||||
break; | |||||
} | |||||
ifp->if_softc = sc; | ifp->if_softc = sc; | ||||
ifp->if_transmit = lagg_transmit; | |||||
ifp->if_qflush = lagg_qflush; | ifp->if_qflush = lagg_qflush; | ||||
ifp->if_init = lagg_init; | ifp->if_init = lagg_init; | ||||
ifp->if_ioctl = lagg_ioctl; | ifp->if_ioctl = lagg_ioctl; | ||||
ifp->if_get_counter = lagg_get_counter; | ifp->if_get_counter = lagg_get_counter; | ||||
ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; | ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; | ||||
#if defined(KERN_TLS) || defined(RATELIMIT) | #if defined(KERN_TLS) || defined(RATELIMIT) | ||||
ifp->if_snd_tag_alloc = lagg_snd_tag_alloc; | ifp->if_snd_tag_alloc = lagg_snd_tag_alloc; | ||||
ifp->if_snd_tag_modify = lagg_snd_tag_modify; | ifp->if_snd_tag_modify = lagg_snd_tag_modify; | ||||
ifp->if_snd_tag_query = lagg_snd_tag_query; | ifp->if_snd_tag_query = lagg_snd_tag_query; | ||||
ifp->if_snd_tag_free = lagg_snd_tag_free; | ifp->if_snd_tag_free = lagg_snd_tag_free; | ||||
ifp->if_ratelimit_query = lagg_ratelimit_query; | ifp->if_ratelimit_query = lagg_ratelimit_query; | ||||
#endif | #endif | ||||
ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS; | ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS; | ||||
/* | /* | ||||
* Attach as an ordinary ethernet device, children will be attached | * Attach as an ordinary ethernet device, children will be attached | ||||
* as special device IFT_IEEE8023ADLAG. | * as special device IFT_IEEE8023ADLAG or IFT_INFINIBANDLAG. | ||||
*/ | */ | ||||
switch (if_type) { | |||||
case IFT_ETHER: | |||||
ether_ifattach(ifp, eaddr); | ether_ifattach(ifp, eaddr); | ||||
break; | |||||
case IFT_INFINIBAND: | |||||
infiniband_ifattach(ifp, eaddr, ib_bcast_addr); | |||||
break; | |||||
default: | |||||
break; | |||||
} | |||||
sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, | sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, | ||||
lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST); | lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST); | ||||
sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, | sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, | ||||
lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); | lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); | ||||
/* Insert into the global list of laggs */ | /* Insert into the global list of laggs */ | ||||
LAGG_LIST_LOCK(); | LAGG_LIST_LOCK(); | ||||
SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries); | SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries); | ||||
LAGG_LIST_UNLOCK(); | LAGG_LIST_UNLOCK(); | ||||
LAGG_XUNLOCK(sc); | LAGG_XUNLOCK(sc); | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | |||||
lagg_clone_create_ethernet(struct if_clone *ifc, int unit, caddr_t params) | |||||
{ | |||||
return (lagg_clone_create_common(ifc, unit, params, IFT_ETHER)); | |||||
} | |||||
static int | |||||
lagg_clone_create_infiniband(struct if_clone *ifc, int unit, caddr_t params) | |||||
{ | |||||
return (lagg_clone_create_common(ifc, unit, params, IFT_INFINIBAND)); | |||||
} | |||||
static void | static void | ||||
lagg_clone_destroy(struct ifnet *ifp) | lagg_clone_destroy_common(struct ifnet *ifp, int if_type) | ||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
LAGG_XLOCK(sc); | LAGG_XLOCK(sc); | ||||
sc->sc_destroying = 1; | sc->sc_destroying = 1; | ||||
lagg_stop(sc); | lagg_stop(sc); | ||||
ifp->if_flags &= ~IFF_UP; | ifp->if_flags &= ~IFF_UP; | ||||
EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); | EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); | ||||
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); | EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); | ||||
/* Shutdown and remove lagg ports */ | /* Shutdown and remove lagg ports */ | ||||
while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL) | while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL) | ||||
lagg_port_destroy(lp, 1); | lagg_port_destroy(lp, 1); | ||||
/* Unhook the aggregation protocol */ | /* Unhook the aggregation protocol */ | ||||
lagg_proto_detach(sc); | lagg_proto_detach(sc); | ||||
LAGG_XUNLOCK(sc); | LAGG_XUNLOCK(sc); | ||||
switch (if_type) { | |||||
case IFT_ETHER: | |||||
ifmedia_removeall(&sc->sc_media); | ifmedia_removeall(&sc->sc_media); | ||||
ether_ifdetach(ifp); | ether_ifdetach(ifp); | ||||
break; | |||||
case IFT_INFINIBAND: | |||||
infiniband_ifdetach(ifp); | |||||
break; | |||||
default: | |||||
break; | |||||
} | |||||
if_free(ifp); | if_free(ifp); | ||||
LAGG_LIST_LOCK(); | LAGG_LIST_LOCK(); | ||||
SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries); | SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries); | ||||
LAGG_LIST_UNLOCK(); | LAGG_LIST_UNLOCK(); | ||||
mtx_destroy(&sc->sc_mtx); | |||||
LAGG_SX_DESTROY(sc); | LAGG_SX_DESTROY(sc); | ||||
free(sc, M_LAGG); | free(sc, M_LAGG); | ||||
} | } | ||||
static void | static void | ||||
lagg_clone_destroy_ethernet(struct ifnet *ifp) | |||||
{ | |||||
lagg_clone_destroy_common(ifp, IFT_ETHER); | |||||
} | |||||
static void | |||||
lagg_clone_destroy_infiniband(struct ifnet *ifp) | |||||
{ | |||||
lagg_clone_destroy_common(ifp, IFT_INFINIBAND); | |||||
} | |||||
static void | |||||
lagg_capabilities(struct lagg_softc *sc) | lagg_capabilities(struct lagg_softc *sc) | ||||
{ | { | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
int cap, ena, pena; | int cap, ena, pena; | ||||
uint64_t hwa; | uint64_t hwa; | ||||
struct ifnet_hw_tsomax hw_tsomax; | struct ifnet_hw_tsomax hw_tsomax; | ||||
LAGG_XLOCK_ASSERT(sc); | LAGG_XLOCK_ASSERT(sc); | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | |||||
static int | static int | ||||
lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp) | lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp) | ||||
{ | { | ||||
struct lagg_softc *sc_ptr; | struct lagg_softc *sc_ptr; | ||||
struct lagg_port *lp, *tlp; | struct lagg_port *lp, *tlp; | ||||
struct ifreq ifr; | struct ifreq ifr; | ||||
int error, i, oldmtu; | int error, i, oldmtu; | ||||
int if_type; | |||||
uint64_t *pval; | uint64_t *pval; | ||||
LAGG_XLOCK_ASSERT(sc); | LAGG_XLOCK_ASSERT(sc); | ||||
if (sc->sc_ifp == ifp) { | if (sc->sc_ifp == ifp) { | ||||
if_printf(sc->sc_ifp, | if_printf(sc->sc_ifp, | ||||
"cannot add a lagg to itself as a port\n"); | "cannot add a lagg to itself as a port\n"); | ||||
return (EINVAL); | return (EINVAL); | ||||
Show All 10 Lines | lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp) | ||||
if (ifp->if_lagg != NULL) { | if (ifp->if_lagg != NULL) { | ||||
/* Port is already in the current lagg? */ | /* Port is already in the current lagg? */ | ||||
lp = (struct lagg_port *)ifp->if_lagg; | lp = (struct lagg_port *)ifp->if_lagg; | ||||
if (lp->lp_softc == sc) | if (lp->lp_softc == sc) | ||||
return (EEXIST); | return (EEXIST); | ||||
return (EBUSY); | return (EBUSY); | ||||
} | } | ||||
switch (sc->sc_ifp->if_type) { | |||||
case IFT_ETHER: | |||||
/* XXX Disallow non-ethernet interfaces (this should be any of 802) */ | /* XXX Disallow non-ethernet interfaces (this should be any of 802) */ | ||||
if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN) | if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN) | ||||
return (EPROTONOSUPPORT); | return (EPROTONOSUPPORT); | ||||
if_type = IFT_IEEE8023ADLAG; | |||||
break; | |||||
case IFT_INFINIBAND: | |||||
/* XXX Disallow non-infiniband interfaces */ | |||||
if (ifp->if_type != IFT_INFINIBAND) | |||||
return (EPROTONOSUPPORT); | |||||
if_type = IFT_INFINIBANDLAG; | |||||
break; | |||||
default: | |||||
break; | |||||
} | |||||
/* Allow the first Ethernet member to define the MTU */ | /* Allow the first Ethernet member to define the MTU */ | ||||
oldmtu = -1; | oldmtu = -1; | ||||
if (CK_SLIST_EMPTY(&sc->sc_ports)) { | if (CK_SLIST_EMPTY(&sc->sc_ports)) { | ||||
sc->sc_ifp->if_mtu = ifp->if_mtu; | sc->sc_ifp->if_mtu = ifp->if_mtu; | ||||
} else if (sc->sc_ifp->if_mtu != ifp->if_mtu) { | } else if (sc->sc_ifp->if_mtu != ifp->if_mtu) { | ||||
if (ifp->if_ioctl == NULL) { | if (ifp->if_ioctl == NULL) { | ||||
if_printf(sc->sc_ifp, "cannot change MTU for %s\n", | if_printf(sc->sc_ifp, "cannot change MTU for %s\n", | ||||
Show All 40 Lines | |||||
#endif | #endif | ||||
} | } | ||||
} | } | ||||
LAGG_LIST_UNLOCK(); | LAGG_LIST_UNLOCK(); | ||||
if_ref(ifp); | if_ref(ifp); | ||||
lp->lp_ifp = ifp; | lp->lp_ifp = ifp; | ||||
bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN); | bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ifp->if_addrlen); | ||||
lp->lp_ifcapenable = ifp->if_capenable; | lp->lp_ifcapenable = ifp->if_capenable; | ||||
if (CK_SLIST_EMPTY(&sc->sc_ports)) { | if (CK_SLIST_EMPTY(&sc->sc_ports)) { | ||||
bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); | bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ifp->if_addrlen); | ||||
lagg_proto_lladdr(sc); | lagg_proto_lladdr(sc); | ||||
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); | EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); | ||||
} else { | } else { | ||||
if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); | if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ifp->if_addrlen); | ||||
} | } | ||||
lagg_setflags(lp, 1); | lagg_setflags(lp, 1); | ||||
if (CK_SLIST_EMPTY(&sc->sc_ports)) | if (CK_SLIST_EMPTY(&sc->sc_ports)) | ||||
sc->sc_primary = lp; | sc->sc_primary = lp; | ||||
/* Change the interface type */ | /* Change the interface type */ | ||||
lp->lp_iftype = ifp->if_type; | lp->lp_iftype = ifp->if_type; | ||||
ifp->if_type = IFT_IEEE8023ADLAG; | ifp->if_type = if_type; | ||||
ifp->if_lagg = lp; | ifp->if_lagg = lp; | ||||
lp->lp_ioctl = ifp->if_ioctl; | lp->lp_ioctl = ifp->if_ioctl; | ||||
ifp->if_ioctl = lagg_port_ioctl; | ifp->if_ioctl = lagg_port_ioctl; | ||||
lp->lp_output = ifp->if_output; | lp->lp_output = ifp->if_output; | ||||
ifp->if_output = lagg_port_output; | ifp->if_output = lagg_port_output; | ||||
/* Read port counters */ | /* Read port counters */ | ||||
pval = lp->port_counters.val; | pval = lp->port_counters.val; | ||||
▲ Show 20 Lines • Show All 100 Lines • ▼ Show 20 Lines | lagg_port_destroy(struct lagg_port *lp, int rundelport) | ||||
} | } | ||||
/* Finally, remove the port from the lagg */ | /* Finally, remove the port from the lagg */ | ||||
CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries); | CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries); | ||||
sc->sc_count--; | sc->sc_count--; | ||||
/* Update the primary interface */ | /* Update the primary interface */ | ||||
if (lp == sc->sc_primary) { | if (lp == sc->sc_primary) { | ||||
uint8_t lladdr[ETHER_ADDR_LEN]; | uint8_t lladdr[LAGG_ADDR_LEN]; | ||||
if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL) | if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL) | ||||
bzero(&lladdr, ETHER_ADDR_LEN); | bzero(&lladdr, LAGG_ADDR_LEN); | ||||
else | else | ||||
bcopy(lp0->lp_lladdr, lladdr, ETHER_ADDR_LEN); | bcopy(lp0->lp_lladdr, lladdr, LAGG_ADDR_LEN); | ||||
sc->sc_primary = lp0; | sc->sc_primary = lp0; | ||||
if (sc->sc_destroying == 0) { | if (sc->sc_destroying == 0) { | ||||
bcopy(lladdr, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); | bcopy(lladdr, IF_LLADDR(sc->sc_ifp), sc->sc_ifp->if_addrlen); | ||||
lagg_proto_lladdr(sc); | lagg_proto_lladdr(sc); | ||||
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); | EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); | ||||
} | } | ||||
/* | /* | ||||
* Update lladdr for each port (new primary needs update | * Update lladdr for each port (new primary needs update | ||||
* as well, to switch from old lladdr to its 'real' one) | * as well, to switch from old lladdr to its 'real' one) | ||||
*/ | */ | ||||
CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries) | CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries) | ||||
if_setlladdr(lp_ptr->lp_ifp, lladdr, ETHER_ADDR_LEN); | if_setlladdr(lp_ptr->lp_ifp, lladdr, lp_ptr->lp_ifp->if_addrlen); | ||||
} | } | ||||
if (lp->lp_ifflags) | if (lp->lp_ifflags) | ||||
if_printf(ifp, "%s: lp_ifflags unclean\n", __func__); | if_printf(ifp, "%s: lp_ifflags unclean\n", __func__); | ||||
if (lp->lp_detaching == 0) { | if (lp->lp_detaching == 0) { | ||||
lagg_setflags(lp, 0); | lagg_setflags(lp, 0); | ||||
lagg_setcaps(lp, lp->lp_ifcapenable); | lagg_setcaps(lp, lp->lp_ifcapenable); | ||||
if_setlladdr(ifp, lp->lp_lladdr, ETHER_ADDR_LEN); | if_setlladdr(ifp, lp->lp_lladdr, ifp->if_addrlen); | ||||
} | } | ||||
/* | /* | ||||
* free port and release it's ifnet reference after a grace period has | * free port and release it's ifnet reference after a grace period has | ||||
* elapsed. | * elapsed. | ||||
*/ | */ | ||||
NET_EPOCH_CALL(lagg_port_destroy_cb, &lp->lp_epoch_ctx); | NET_EPOCH_CALL(lagg_port_destroy_cb, &lp->lp_epoch_ctx); | ||||
/* Update lagg capabilities */ | /* Update lagg capabilities */ | ||||
lagg_capabilities(sc); | lagg_capabilities(sc); | ||||
lagg_linkstate(sc); | lagg_linkstate(sc); | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) | lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) | ||||
{ | { | ||||
struct lagg_reqport *rp = (struct lagg_reqport *)data; | struct lagg_reqport *rp = (struct lagg_reqport *)data; | ||||
struct lagg_softc *sc; | struct lagg_softc *sc; | ||||
struct lagg_port *lp = NULL; | struct lagg_port *lp = NULL; | ||||
int error = 0; | int error = 0; | ||||
/* Should be checked by the caller */ | /* Should be checked by the caller */ | ||||
if (ifp->if_type != IFT_IEEE8023ADLAG || | switch (ifp->if_type) { | ||||
(lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL) | case IFT_IEEE8023ADLAG: | ||||
case IFT_INFINIBANDLAG: | |||||
if ((lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL) | |||||
goto fallback; | goto fallback; | ||||
break; | |||||
default: | |||||
goto fallback; | |||||
} | |||||
switch (cmd) { | switch (cmd) { | ||||
case SIOCGLAGGPORT: | case SIOCGLAGGPORT: | ||||
if (rp->rp_portname[0] == '\0' || | if (rp->rp_portname[0] == '\0' || | ||||
ifunit(rp->rp_portname) != ifp) { | ifunit(rp->rp_portname) != ifp) { | ||||
error = EINVAL; | error = EINVAL; | ||||
break; | break; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 173 Lines • ▼ Show 20 Lines | case LAGG_PROTO_LACP: | ||||
if (lacp_isdistributing(lp)) | if (lacp_isdistributing(lp)) | ||||
rp->rp_flags |= LAGG_PORT_DISTRIBUTING; | rp->rp_flags |= LAGG_PORT_DISTRIBUTING; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
lagg_watchdog_infiniband(void *arg) | |||||
{ | |||||
struct lagg_softc *sc; | |||||
struct lagg_port *lp; | |||||
struct ifnet *ifp; | |||||
struct ifnet *lp_ifp; | |||||
sc = arg; | |||||
/* | |||||
* Because infiniband nodes have a fixed mac address, we need | |||||
melifaro: Sorry, it's not particularly clear to me from the text, why if "nodes have. fixed mac address"… | |||||
hselaskyAuthorUnsubmitted Done Inline ActionsThe MAC address in IPOIB comes from the so-called GID, which is hardcoded per port. It is not possible to have two different ports use the same MAC. It might be we could piggy-back some link-up/down events, but I don't see any problem with this timer. It is low bandwidth. The timer also provides a guarantee that the iflladdr events don't happen too frequently. hselasky: The MAC address in IPOIB comes from the so-called GID, which is hardcoded per port. It is not… | |||||
melifaroUnsubmitted Done Inline ActionsYep, now it is clear :-) melifaro: Yep, now it is clear :-)
Would it be possible if you could update the comment to reflect link… | |||||
* to regularly update the link level address of the parent | |||||
* bond<N> device instead. This operation does not have to be | |||||
* atomic. | |||||
*/ | |||||
LAGG_RLOCK(); | |||||
lp = lagg_link_active(sc, sc->sc_primary); | |||||
if (lp != NULL) { | |||||
ifp = sc->sc_ifp; | |||||
lp_ifp = lp->lp_ifp; | |||||
if (ifp != NULL && lp_ifp != NULL && | |||||
memcmp(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen) != 0) { | |||||
memcpy(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen); | |||||
CURVNET_SET(ifp->if_vnet); | |||||
EVENTHANDLER_INVOKE(iflladdr_event, ifp); | |||||
CURVNET_RESTORE(); | |||||
} | |||||
} | |||||
LAGG_RUNLOCK(); | |||||
callout_reset(&sc->sc_watchdog, hz, &lagg_watchdog_infiniband, arg); | |||||
} | |||||
static void | |||||
lagg_init(void *xsc) | lagg_init(void *xsc) | ||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)xsc; | struct lagg_softc *sc = (struct lagg_softc *)xsc; | ||||
struct ifnet *ifp = sc->sc_ifp; | struct ifnet *ifp = sc->sc_ifp; | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
LAGG_XLOCK(sc); | LAGG_XLOCK(sc); | ||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | ||||
LAGG_XUNLOCK(sc); | LAGG_XUNLOCK(sc); | ||||
return; | return; | ||||
} | } | ||||
ifp->if_drv_flags |= IFF_DRV_RUNNING; | ifp->if_drv_flags |= IFF_DRV_RUNNING; | ||||
/* | /* | ||||
* Update the port lladdrs if needed. | * Update the port lladdrs if needed. | ||||
* This might be if_setlladdr() notification | * This might be if_setlladdr() notification | ||||
* that lladdr has been changed. | * that lladdr has been changed. | ||||
*/ | */ | ||||
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | ||||
if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp), | if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp), | ||||
ETHER_ADDR_LEN) != 0) | ifp->if_addrlen) != 0) | ||||
if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ETHER_ADDR_LEN); | if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ifp->if_addrlen); | ||||
} | } | ||||
lagg_proto_init(sc); | lagg_proto_init(sc); | ||||
if (ifp->if_type == IFT_INFINIBAND) { | |||||
mtx_lock(&sc->sc_mtx); | |||||
lagg_watchdog_infiniband(sc); | |||||
mtx_unlock(&sc->sc_mtx); | |||||
} | |||||
LAGG_XUNLOCK(sc); | LAGG_XUNLOCK(sc); | ||||
} | } | ||||
static void | static void | ||||
lagg_stop(struct lagg_softc *sc) | lagg_stop(struct lagg_softc *sc) | ||||
{ | { | ||||
struct ifnet *ifp = sc->sc_ifp; | struct ifnet *ifp = sc->sc_ifp; | ||||
LAGG_XLOCK_ASSERT(sc); | LAGG_XLOCK_ASSERT(sc); | ||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) | ||||
return; | return; | ||||
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; | ifp->if_drv_flags &= ~IFF_DRV_RUNNING; | ||||
lagg_proto_stop(sc); | lagg_proto_stop(sc); | ||||
mtx_lock(&sc->sc_mtx); | |||||
callout_stop(&sc->sc_watchdog); | |||||
mtx_unlock(&sc->sc_mtx); | |||||
callout_drain(&sc->sc_watchdog); | |||||
} | } | ||||
static int | static int | ||||
lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) | lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) | ||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | ||||
struct lagg_reqall *ra = (struct lagg_reqall *)data; | struct lagg_reqall *ra = (struct lagg_reqall *)data; | ||||
struct lagg_reqopts *ro = (struct lagg_reqopts *)data; | struct lagg_reqopts *ro = (struct lagg_reqopts *)data; | ||||
Show All 39 Lines | lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) | ||||
case SIOCSLAGG: | case SIOCSLAGG: | ||||
error = priv_check(td, PRIV_NET_LAGG); | error = priv_check(td, PRIV_NET_LAGG); | ||||
if (error) | if (error) | ||||
break; | break; | ||||
if (ra->ra_proto >= LAGG_PROTO_MAX) { | if (ra->ra_proto >= LAGG_PROTO_MAX) { | ||||
error = EPROTONOSUPPORT; | error = EPROTONOSUPPORT; | ||||
break; | break; | ||||
} | } | ||||
/* Infiniband only supports the failover protocol. */ | |||||
if (ra->ra_proto != LAGG_PROTO_FAILOVER && | |||||
ifp->if_type == IFT_INFINIBAND) { | |||||
error = EPROTONOSUPPORT; | |||||
break; | |||||
} | |||||
LAGG_XLOCK(sc); | LAGG_XLOCK(sc); | ||||
lagg_proto_detach(sc); | lagg_proto_detach(sc); | ||||
LAGG_UNLOCK_ASSERT(); | LAGG_UNLOCK_ASSERT(); | ||||
lagg_proto_attach(sc, ra->ra_proto); | lagg_proto_attach(sc, ra->ra_proto); | ||||
LAGG_XUNLOCK(sc); | LAGG_XUNLOCK(sc); | ||||
break; | break; | ||||
case SIOCGLAGGOPTS: | case SIOCGLAGGOPTS: | ||||
LAGG_XLOCK(sc); | LAGG_XLOCK(sc); | ||||
▲ Show 20 Lines • Show All 301 Lines • ▼ Show 20 Lines | CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | ||||
lagg_clrmulti(lp); | lagg_clrmulti(lp); | ||||
lagg_setmulti(lp); | lagg_setmulti(lp); | ||||
} | } | ||||
LAGG_XUNLOCK(sc); | LAGG_XUNLOCK(sc); | ||||
error = 0; | error = 0; | ||||
break; | break; | ||||
case SIOCSIFMEDIA: | case SIOCSIFMEDIA: | ||||
case SIOCGIFMEDIA: | case SIOCGIFMEDIA: | ||||
if (ifp->if_type == IFT_INFINIBAND) | |||||
error = EINVAL; | |||||
else | |||||
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); | error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); | ||||
break; | break; | ||||
case SIOCSIFCAP: | case SIOCSIFCAP: | ||||
LAGG_XLOCK(sc); | LAGG_XLOCK(sc); | ||||
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | ||||
if (lp->lp_ioctl != NULL) | if (lp->lp_ioctl != NULL) | ||||
(*lp->lp_ioctl)(lp->lp_ifp, cmd, data); | (*lp->lp_ioctl)(lp->lp_ifp, cmd, data); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 292 Lines • ▼ Show 20 Lines | error = lagg_setflag(lp, lagg_pflags[i].flag, | ||||
status, lagg_pflags[i].func); | status, lagg_pflags[i].func); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
lagg_transmit(struct ifnet *ifp, struct mbuf *m) | lagg_transmit_ethernet(struct ifnet *ifp, struct mbuf *m) | ||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | ||||
int error; | int error; | ||||
#if defined(KERN_TLS) || defined(RATELIMIT) | #if defined(KERN_TLS) || defined(RATELIMIT) | ||||
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) | if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) | ||||
MPASS(m->m_pkthdr.snd_tag->ifp == ifp); | MPASS(m->m_pkthdr.snd_tag->ifp == ifp); | ||||
#endif | #endif | ||||
LAGG_RLOCK(); | LAGG_RLOCK(); | ||||
/* We need a Tx algorithm and at least one port */ | /* We need a Tx algorithm and at least one port */ | ||||
if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) { | if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) { | ||||
LAGG_RUNLOCK(); | LAGG_RUNLOCK(); | ||||
m_freem(m); | m_freem(m); | ||||
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); | if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
ETHER_BPF_MTAP(ifp, m); | ETHER_BPF_MTAP(ifp, m); | ||||
error = lagg_proto_start(sc, m); | error = lagg_proto_start(sc, m); | ||||
LAGG_RUNLOCK(); | LAGG_RUNLOCK(); | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | |||||
lagg_transmit_infiniband(struct ifnet *ifp, struct mbuf *m) | |||||
{ | |||||
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | |||||
int error; | |||||
#if defined(KERN_TLS) || defined(RATELIMIT) | |||||
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) | |||||
MPASS(m->m_pkthdr.snd_tag->ifp == ifp); | |||||
#endif | |||||
LAGG_RLOCK(); | |||||
/* We need a Tx algorithm and at least one port */ | |||||
if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) { | |||||
LAGG_RUNLOCK(); | |||||
m_freem(m); | |||||
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); | |||||
return (ENXIO); | |||||
} | |||||
INFINIBAND_BPF_MTAP(ifp, m); | |||||
error = lagg_proto_start(sc, m); | |||||
LAGG_RUNLOCK(); | |||||
return (error); | |||||
} | |||||
/* | /* | ||||
* The ifp->if_qflush entry point for lagg(4) is no-op. | * The ifp->if_qflush entry point for lagg(4) is no-op. | ||||
*/ | */ | ||||
static void | static void | ||||
lagg_qflush(struct ifnet *ifp __unused) | lagg_qflush(struct ifnet *ifp __unused) | ||||
{ | { | ||||
} | } | ||||
static struct mbuf * | static struct mbuf * | ||||
lagg_input(struct ifnet *ifp, struct mbuf *m) | lagg_input_ethernet(struct ifnet *ifp, struct mbuf *m) | ||||
{ | { | ||||
struct lagg_port *lp = ifp->if_lagg; | struct lagg_port *lp = ifp->if_lagg; | ||||
struct lagg_softc *sc = lp->lp_softc; | struct lagg_softc *sc = lp->lp_softc; | ||||
struct ifnet *scifp = sc->sc_ifp; | struct ifnet *scifp = sc->sc_ifp; | ||||
LAGG_RLOCK(); | LAGG_RLOCK(); | ||||
if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || | if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || | ||||
lp->lp_detaching != 0 || | lp->lp_detaching != 0 || | ||||
Show All 10 Lines | if (m != NULL && (scifp->if_flags & IFF_MONITOR) != 0) { | ||||
m_freem(m); | m_freem(m); | ||||
m = NULL; | m = NULL; | ||||
} | } | ||||
LAGG_RUNLOCK(); | LAGG_RUNLOCK(); | ||||
return (m); | return (m); | ||||
} | } | ||||
static struct mbuf * | |||||
lagg_input_infiniband(struct ifnet *ifp, struct mbuf *m) | |||||
{ | |||||
struct lagg_port *lp = ifp->if_lagg; | |||||
struct lagg_softc *sc = lp->lp_softc; | |||||
struct ifnet *scifp = sc->sc_ifp; | |||||
LAGG_RLOCK(); | |||||
if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || | |||||
lp->lp_detaching != 0 || | |||||
sc->sc_proto == LAGG_PROTO_NONE) { | |||||
LAGG_RUNLOCK(); | |||||
m_freem(m); | |||||
return (NULL); | |||||
} | |||||
INFINIBAND_BPF_MTAP(scifp, m); | |||||
m = lagg_proto_input(sc, lp, m); | |||||
if (m != NULL && (scifp->if_flags & IFF_MONITOR) != 0) { | |||||
m_freem(m); | |||||
m = NULL; | |||||
} | |||||
LAGG_RUNLOCK(); | |||||
return (m); | |||||
} | |||||
static int | static int | ||||
lagg_media_change(struct ifnet *ifp) | lagg_media_change(struct ifnet *ifp) | ||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | ||||
if (sc->sc_ifflags & IFF_DEBUG) | if (sc->sc_ifflags & IFF_DEBUG) | ||||
printf("%s\n", __func__); | printf("%s\n", __func__); | ||||
▲ Show 20 Lines • Show All 304 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
lagg_lb_attach(struct lagg_softc *sc) | lagg_lb_attach(struct lagg_softc *sc) | ||||
{ | { | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
struct lagg_lb *lb; | struct lagg_lb *lb; | ||||
LAGG_XLOCK_ASSERT(sc); | LAGG_XLOCK_ASSERT(sc); | ||||
lb = malloc(sizeof(struct lagg_lb), M_LAGG, M_WAITOK | M_ZERO); | lb = malloc(sizeof(struct lagg_lb), M_LAGG, M_WAITOK | M_ZERO); | ||||
if (sc->sc_ifp->if_type == IFT_INFINIBAND) | |||||
lb->lb_key = m_infiniband_tcpip_hash_init(); | |||||
else | |||||
lb->lb_key = m_ether_tcpip_hash_init(); | lb->lb_key = m_ether_tcpip_hash_init(); | ||||
sc->sc_psc = lb; | sc->sc_psc = lb; | ||||
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | ||||
lagg_lb_port_create(lp); | lagg_lb_port_create(lp); | ||||
} | } | ||||
static void | static void | ||||
lagg_lb_detach(struct lagg_softc *sc) | lagg_lb_detach(struct lagg_softc *sc) | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc; | struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc; | ||||
struct lagg_port *lp = NULL; | struct lagg_port *lp = NULL; | ||||
uint32_t p = 0; | uint32_t p = 0; | ||||
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && | if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && | ||||
M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) | M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) | ||||
p = m->m_pkthdr.flowid >> sc->flowid_shift; | p = m->m_pkthdr.flowid >> sc->flowid_shift; | ||||
else if (sc->sc_ifp->if_type == IFT_INFINIBAND) | |||||
p = m_infiniband_tcpip_hash(sc->sc_flags, m, lb->lb_key); | |||||
else | else | ||||
p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key); | p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key); | ||||
p %= sc->sc_count; | p %= sc->sc_count; | ||||
lp = lb->lb_ports[p]; | lp = lb->lb_ports[p]; | ||||
/* | /* | ||||
* Check the port's link state. This will return the next active | * Check the port's link state. This will return the next active | ||||
* port if the link is down or the port is NULL. | * port if the link is down or the port is NULL. | ||||
▲ Show 20 Lines • Show All 112 Lines • Show Last 20 Lines |
Sorry, it's not particularly clear to me from the text, why if "nodes have. fixed mac address" we need to "regularly update". Also, is't a bit unclear if this update can be triggered by some other event, instead of checking once per second? Would it be possible to elaborate?