Changeset View
Changeset View
Standalone View
Standalone View
sys/net/if_lagg.c
Show First 20 Lines • Show All 306 Lines • ▼ Show 20 Lines | static moduledata_t lagg_mod = { | ||||
"if_lagg", | "if_lagg", | ||||
lagg_modevent, | lagg_modevent, | ||||
0 | 0 | ||||
}; | }; | ||||
DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); | DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); | ||||
MODULE_VERSION(if_lagg, 1); | MODULE_VERSION(if_lagg, 1); | ||||
/* | |||||
* Locking | |||||
* | |||||
* There are two locks in play, a rmlock and an sx lock. The rmlock | |||||
* is used for unsleepable reads in the fast path, and the sx lock is | |||||
* used in the control path and protects against changes. The sx lock | |||||
* is needed as if_addmulti() and if_delmulti() and other things called | |||||
* from lagg_port_ioctl() may sleep. | |||||
* | |||||
* The rm lock only protects sc->sc_slowpath. If this value is true, | |||||
* the control path holds or is obtaining an exclusive sx lock, and the | |||||
* fastpath needs to block. This is done via lagg_rlock() and | |||||
* lagg_runlock(). These should only be used where sleeping is not | |||||
* allowed. | |||||
* | |||||
* The sx lock protects the softc and can only be exclusive when | |||||
* sc->sc_slowpath is true. However, a shared lock can co-exist with | |||||
* the rm lock. lagg_slock/lagg_sunlock obtain shared locks, and | |||||
* lagg_xlock/lagg_xunlock handle the exclusive locks. | |||||
*/ | |||||
/* | |||||
* Grab the rmlock, ensuring sc_slowpath is false. | |||||
*/ | |||||
static inline void | |||||
lagg_rlock(struct lagg_softc *sc, struct rm_priotracker *tracker) | |||||
{ | |||||
sx_assert(&sc->sc_sx, SA_UNLOCKED); | |||||
do { | |||||
rm_rlock(&sc->sc_mtx, tracker); | |||||
if (sc->sc_slowpath == false) | |||||
break; | |||||
rm_runlock(&sc->sc_mtx, tracker); | |||||
cpu_spinwait(); | |||||
} while(1); | |||||
} | |||||
static inline void | |||||
lagg_runlock(struct lagg_softc *sc, struct rm_priotracker *tracker) | |||||
{ | |||||
rm_runlock(&sc->sc_mtx, tracker); | |||||
} | |||||
/* | |||||
* Shared sx lock. Sleeps until sc_slowpath is false. | |||||
*/ | |||||
static void | static void | ||||
lagg_slock(struct lagg_softc *sc) | |||||
{ | |||||
struct rm_priotracker tracker; | |||||
rm_assert(&sc->sc_mtx, RA_UNLOCKED); | |||||
do { | |||||
sx_slock(&sc->sc_sx); | |||||
rm_rlock(&sc->sc_mtx, &tracker); | |||||
if (sc->sc_slowpath == false) { | |||||
rm_runlock(&sc->sc_mtx, &tracker); | |||||
break; | |||||
} | |||||
rm_runlock(&sc->sc_mtx, &tracker); | |||||
sx_sunlock(&sc->sc_sx); | |||||
DELAY(1); | |||||
} while(1); | |||||
} | |||||
/* | |||||
* Exclusive sx lock. Sleeps until sc_slowpath is false, | |||||
* then sets it to true and obtains the exclusive lock. | |||||
*/ | |||||
static void | |||||
lagg_xlock(struct lagg_softc *sc) | |||||
{ | |||||
rm_assert(&sc->sc_mtx, RA_UNLOCKED); | |||||
do { | |||||
rm_wlock(&sc->sc_mtx); | |||||
if (sc->sc_slowpath == false) { | |||||
sc->sc_slowpath = true; | |||||
rm_wunlock(&sc->sc_mtx); | |||||
break; | |||||
} | |||||
DELAY(1); | |||||
rm_wunlock(&sc->sc_mtx); | |||||
} while(1); | |||||
sx_xlock(&sc->sc_sx); | |||||
} | |||||
static inline void | |||||
lagg_sunlock(struct lagg_softc *sc) | |||||
{ | |||||
sx_sunlock(&sc->sc_sx); | |||||
} | |||||
static inline void | |||||
lagg_xunlock(struct lagg_softc *sc) | |||||
{ | |||||
sx_xunlock(&sc->sc_sx); | |||||
rm_wlock(&sc->sc_mtx); | |||||
sc->sc_slowpath = false; | |||||
rm_wunlock(&sc->sc_mtx); | |||||
} | |||||
static void | |||||
lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr) | lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr) | ||||
{ | { | ||||
LAGG_XLOCK_ASSERT(sc); | LAGG_XLOCK_ASSERT(sc); | ||||
KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto", | KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto", | ||||
__func__, sc)); | __func__, sc)); | ||||
if (sc->sc_ifflags & IFF_DEBUG) | if (sc->sc_ifflags & IFF_DEBUG) | ||||
if_printf(sc->sc_ifp, "using proto %u\n", pr); | if_printf(sc->sc_ifp, "using proto %u\n", pr); | ||||
if (lagg_protos[pr].pr_attach != NULL) | if (lagg_protos[pr].pr_attach != NULL) | ||||
lagg_protos[pr].pr_attach(sc); | lagg_protos[pr].pr_attach(sc); | ||||
sc->sc_proto = pr; | sc->sc_proto = pr; | ||||
} | } | ||||
static void | static void | ||||
lagg_proto_detach(struct lagg_softc *sc) | lagg_proto_detach(struct lagg_softc *sc) | ||||
{ | { | ||||
lagg_proto pr; | lagg_proto pr; | ||||
LAGG_XLOCK_ASSERT(sc); | LAGG_XLOCK_ASSERT(sc); | ||||
LAGG_WLOCK_ASSERT(sc); | |||||
pr = sc->sc_proto; | pr = sc->sc_proto; | ||||
sc->sc_proto = LAGG_PROTO_NONE; | sc->sc_proto = LAGG_PROTO_NONE; | ||||
if (lagg_protos[pr].pr_detach != NULL) | if (lagg_protos[pr].pr_detach != NULL) | ||||
lagg_protos[pr].pr_detach(sc); | lagg_protos[pr].pr_detach(sc); | ||||
else | |||||
LAGG_WUNLOCK(sc); | |||||
} | } | ||||
static int | static int | ||||
lagg_proto_start(struct lagg_softc *sc, struct mbuf *m) | lagg_proto_start(struct lagg_softc *sc, struct mbuf *m) | ||||
{ | { | ||||
return (lagg_protos[sc->sc_proto].pr_start(sc, m)); | return (lagg_protos[sc->sc_proto].pr_start(sc, m)); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | |||||
lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) | lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) | ||||
{ | { | ||||
struct lagg_softc *sc = ifp->if_softc; | struct lagg_softc *sc = ifp->if_softc; | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
if (ifp->if_softc != arg) /* Not our event */ | if (ifp->if_softc != arg) /* Not our event */ | ||||
return; | return; | ||||
LAGG_SLOCK(sc); | lagg_slock(sc); | ||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | ||||
EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag); | EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag); | ||||
LAGG_SUNLOCK(sc); | lagg_sunlock(sc); | ||||
} | } | ||||
/* | /* | ||||
* This routine is run via an vlan | * This routine is run via an vlan | ||||
* unconfig EVENT | * unconfig EVENT | ||||
*/ | */ | ||||
static void | static void | ||||
lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) | lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) | ||||
{ | { | ||||
struct lagg_softc *sc = ifp->if_softc; | struct lagg_softc *sc = ifp->if_softc; | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
if (ifp->if_softc != arg) /* Not our event */ | if (ifp->if_softc != arg) /* Not our event */ | ||||
return; | return; | ||||
LAGG_SLOCK(sc); | lagg_slock(sc); | ||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | ||||
EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag); | EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag); | ||||
LAGG_SUNLOCK(sc); | lagg_sunlock(sc); | ||||
} | } | ||||
static int | static int | ||||
lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params) | lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params) | ||||
{ | { | ||||
struct lagg_softc *sc; | struct lagg_softc *sc; | ||||
struct ifnet *ifp; | struct ifnet *ifp; | ||||
static const u_char eaddr[6]; /* 00:00:00:00:00:00 */ | static const u_char eaddr[6]; /* 00:00:00:00:00:00 */ | ||||
sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); | sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); | ||||
ifp = sc->sc_ifp = if_alloc(IFT_ETHER); | ifp = sc->sc_ifp = if_alloc(IFT_ETHER); | ||||
if (ifp == NULL) { | if (ifp == NULL) { | ||||
free(sc, M_DEVBUF); | free(sc, M_DEVBUF); | ||||
return (ENOSPC); | return (ENOSPC); | ||||
} | } | ||||
LAGG_LOCK_INIT(sc); | LAGG_LOCK_INIT(sc); | ||||
LAGG_SX_INIT(sc); | LAGG_SX_INIT(sc); | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
if (V_def_use_flowid) | if (V_def_use_flowid) | ||||
sc->sc_opts |= LAGG_OPT_USE_FLOWID; | sc->sc_opts |= LAGG_OPT_USE_FLOWID; | ||||
sc->flowid_shift = V_def_flowid_shift; | sc->flowid_shift = V_def_flowid_shift; | ||||
/* Hash all layers by default */ | /* Hash all layers by default */ | ||||
sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4; | sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4; | ||||
lagg_proto_attach(sc, LAGG_PROTO_DEFAULT); | lagg_proto_attach(sc, LAGG_PROTO_DEFAULT); | ||||
Show All 31 Lines | sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, | ||||
lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST); | lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST); | ||||
sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, | sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, | ||||
lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); | lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); | ||||
/* Insert into the global list of laggs */ | /* Insert into the global list of laggs */ | ||||
LAGG_LIST_LOCK(); | LAGG_LIST_LOCK(); | ||||
SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries); | SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries); | ||||
LAGG_LIST_UNLOCK(); | LAGG_LIST_UNLOCK(); | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
lagg_clone_destroy(struct ifnet *ifp) | lagg_clone_destroy(struct ifnet *ifp) | ||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
sc->sc_destroying = 1; | sc->sc_destroying = 1; | ||||
lagg_stop(sc); | lagg_stop(sc); | ||||
ifp->if_flags &= ~IFF_UP; | ifp->if_flags &= ~IFF_UP; | ||||
EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); | EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); | ||||
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); | EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); | ||||
/* Shutdown and remove lagg ports */ | /* Shutdown and remove lagg ports */ | ||||
while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL) | while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL) | ||||
lagg_port_destroy(lp, 1); | lagg_port_destroy(lp, 1); | ||||
/* Unhook the aggregation protocol */ | /* Unhook the aggregation protocol */ | ||||
LAGG_WLOCK(sc); | |||||
lagg_proto_detach(sc); | lagg_proto_detach(sc); | ||||
LAGG_UNLOCK_ASSERT(sc); | lagg_xunlock(sc); | ||||
LAGG_XUNLOCK(sc); | |||||
ifmedia_removeall(&sc->sc_media); | ifmedia_removeall(&sc->sc_media); | ||||
ether_ifdetach(ifp); | ether_ifdetach(ifp); | ||||
if_free(ifp); | if_free(ifp); | ||||
LAGG_LIST_LOCK(); | LAGG_LIST_LOCK(); | ||||
SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries); | SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries); | ||||
LAGG_LIST_UNLOCK(); | LAGG_LIST_UNLOCK(); | ||||
LAGG_SX_DESTROY(sc); | LAGG_SX_DESTROY(sc); | ||||
LAGG_LOCK_DESTROY(sc); | |||||
free(sc, M_DEVBUF); | free(sc, M_DEVBUF); | ||||
} | } | ||||
static void | static void | ||||
lagg_capabilities(struct lagg_softc *sc) | lagg_capabilities(struct lagg_softc *sc) | ||||
{ | { | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
int cap, ena, pena; | int cap, ena, pena; | ||||
▲ Show 20 Lines • Show All 108 Lines • ▼ Show 20 Lines | #endif | ||||
LAGG_LIST_UNLOCK(); | LAGG_LIST_UNLOCK(); | ||||
if_ref(ifp); | if_ref(ifp); | ||||
lp->lp_ifp = ifp; | lp->lp_ifp = ifp; | ||||
bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN); | bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN); | ||||
lp->lp_ifcapenable = ifp->if_capenable; | lp->lp_ifcapenable = ifp->if_capenable; | ||||
if (SLIST_EMPTY(&sc->sc_ports)) { | if (SLIST_EMPTY(&sc->sc_ports)) { | ||||
LAGG_WLOCK(sc); | |||||
bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); | bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); | ||||
lagg_proto_lladdr(sc); | lagg_proto_lladdr(sc); | ||||
LAGG_WUNLOCK(sc); | |||||
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); | EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); | ||||
} else { | } else { | ||||
if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); | if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); | ||||
} | } | ||||
lagg_setflags(lp, 1); | lagg_setflags(lp, 1); | ||||
LAGG_WLOCK(sc); | |||||
if (SLIST_EMPTY(&sc->sc_ports)) | if (SLIST_EMPTY(&sc->sc_ports)) | ||||
sc->sc_primary = lp; | sc->sc_primary = lp; | ||||
/* Change the interface type */ | /* Change the interface type */ | ||||
lp->lp_iftype = ifp->if_type; | lp->lp_iftype = ifp->if_type; | ||||
ifp->if_type = IFT_IEEE8023ADLAG; | ifp->if_type = IFT_IEEE8023ADLAG; | ||||
ifp->if_lagg = lp; | ifp->if_lagg = lp; | ||||
lp->lp_ioctl = ifp->if_ioctl; | lp->lp_ioctl = ifp->if_ioctl; | ||||
Show All 22 Lines | #endif | ||||
if (tlp != NULL) | if (tlp != NULL) | ||||
SLIST_INSERT_AFTER(tlp, lp, lp_entries); | SLIST_INSERT_AFTER(tlp, lp, lp_entries); | ||||
else | else | ||||
SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries); | SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries); | ||||
sc->sc_count++; | sc->sc_count++; | ||||
lagg_setmulti(lp); | lagg_setmulti(lp); | ||||
LAGG_WUNLOCK(sc); | |||||
if ((error = lagg_proto_addport(sc, lp)) != 0) { | if ((error = lagg_proto_addport(sc, lp)) != 0) { | ||||
/* Remove the port, without calling pr_delport. */ | /* Remove the port, without calling pr_delport. */ | ||||
LAGG_WLOCK(sc); | |||||
lagg_port_destroy(lp, 0); | lagg_port_destroy(lp, 0); | ||||
LAGG_UNLOCK_ASSERT(sc); | |||||
return (error); | return (error); | ||||
} | } | ||||
/* Update lagg capabilities */ | /* Update lagg capabilities */ | ||||
lagg_capabilities(sc); | lagg_capabilities(sc); | ||||
lagg_linkstate(sc); | lagg_linkstate(sc); | ||||
return (0); | return (0); | ||||
Show All 25 Lines | lagg_port_destroy(struct lagg_port *lp, int rundelport) | ||||
struct lagg_softc *sc = lp->lp_softc; | struct lagg_softc *sc = lp->lp_softc; | ||||
struct lagg_port *lp_ptr, *lp0; | struct lagg_port *lp_ptr, *lp0; | ||||
struct ifnet *ifp = lp->lp_ifp; | struct ifnet *ifp = lp->lp_ifp; | ||||
uint64_t *pval, vdiff; | uint64_t *pval, vdiff; | ||||
int i; | int i; | ||||
LAGG_XLOCK_ASSERT(sc); | LAGG_XLOCK_ASSERT(sc); | ||||
if (rundelport) { | if (rundelport) | ||||
LAGG_WLOCK(sc); | |||||
lagg_proto_delport(sc, lp); | lagg_proto_delport(sc, lp); | ||||
} else | |||||
LAGG_WLOCK_ASSERT(sc); | |||||
if (lp->lp_detaching == 0) | if (lp->lp_detaching == 0) | ||||
lagg_clrmulti(lp); | lagg_clrmulti(lp); | ||||
/* Restore interface */ | /* Restore interface */ | ||||
ifp->if_type = lp->lp_iftype; | ifp->if_type = lp->lp_iftype; | ||||
ifp->if_ioctl = lp->lp_ioctl; | ifp->if_ioctl = lp->lp_ioctl; | ||||
ifp->if_output = lp->lp_output; | ifp->if_output = lp->lp_output; | ||||
Show All 17 Lines | if (lp == sc->sc_primary) { | ||||
if ((lp0 = SLIST_FIRST(&sc->sc_ports)) == NULL) | if ((lp0 = SLIST_FIRST(&sc->sc_ports)) == NULL) | ||||
bzero(&lladdr, ETHER_ADDR_LEN); | bzero(&lladdr, ETHER_ADDR_LEN); | ||||
else | else | ||||
bcopy(lp0->lp_lladdr, lladdr, ETHER_ADDR_LEN); | bcopy(lp0->lp_lladdr, lladdr, ETHER_ADDR_LEN); | ||||
sc->sc_primary = lp0; | sc->sc_primary = lp0; | ||||
if (sc->sc_destroying == 0) { | if (sc->sc_destroying == 0) { | ||||
bcopy(lladdr, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); | bcopy(lladdr, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); | ||||
lagg_proto_lladdr(sc); | lagg_proto_lladdr(sc); | ||||
LAGG_WUNLOCK(sc); | |||||
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); | EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); | ||||
} else | } | ||||
LAGG_WUNLOCK(sc); | |||||
/* | /* | ||||
* Update lladdr for each port (new primary needs update | * Update lladdr for each port (new primary needs update | ||||
* as well, to switch from old lladdr to its 'real' one) | * as well, to switch from old lladdr to its 'real' one) | ||||
*/ | */ | ||||
SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries) | SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries) | ||||
if_setlladdr(lp_ptr->lp_ifp, lladdr, ETHER_ADDR_LEN); | if_setlladdr(lp_ptr->lp_ifp, lladdr, ETHER_ADDR_LEN); | ||||
} else | } | ||||
LAGG_WUNLOCK(sc); | |||||
if (lp->lp_ifflags) | if (lp->lp_ifflags) | ||||
if_printf(ifp, "%s: lp_ifflags unclean\n", __func__); | if_printf(ifp, "%s: lp_ifflags unclean\n", __func__); | ||||
if (lp->lp_detaching == 0) { | if (lp->lp_detaching == 0) { | ||||
lagg_setflags(lp, 0); | lagg_setflags(lp, 0); | ||||
lagg_setcaps(lp, lp->lp_ifcapenable); | lagg_setcaps(lp, lp->lp_ifcapenable); | ||||
if_setlladdr(ifp, lp->lp_lladdr, ETHER_ADDR_LEN); | if_setlladdr(ifp, lp->lp_lladdr, ETHER_ADDR_LEN); | ||||
Show All 25 Lines | lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) | ||||
switch (cmd) { | switch (cmd) { | ||||
case SIOCGLAGGPORT: | case SIOCGLAGGPORT: | ||||
if (rp->rp_portname[0] == '\0' || | if (rp->rp_portname[0] == '\0' || | ||||
ifunit(rp->rp_portname) != ifp) { | ifunit(rp->rp_portname) != ifp) { | ||||
error = EINVAL; | error = EINVAL; | ||||
break; | break; | ||||
} | } | ||||
LAGG_SLOCK(sc); | lagg_slock(sc); | ||||
if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) { | if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) { | ||||
error = ENOENT; | error = ENOENT; | ||||
LAGG_SUNLOCK(sc); | lagg_sunlock(sc); | ||||
break; | break; | ||||
} | } | ||||
lagg_port2req(lp, rp); | lagg_port2req(lp, rp); | ||||
LAGG_SUNLOCK(sc); | lagg_sunlock(sc); | ||||
break; | break; | ||||
case SIOCSIFCAP: | case SIOCSIFCAP: | ||||
if (lp->lp_ioctl == NULL) { | if (lp->lp_ioctl == NULL) { | ||||
error = EINVAL; | error = EINVAL; | ||||
break; | break; | ||||
} | } | ||||
error = (*lp->lp_ioctl)(ifp, cmd, data); | error = (*lp->lp_ioctl)(ifp, cmd, data); | ||||
if (error) | if (error) | ||||
break; | break; | ||||
/* Update lagg interface capabilities */ | /* Update lagg interface capabilities */ | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
lagg_capabilities(sc); | lagg_capabilities(sc); | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
VLAN_CAPABILITIES(sc->sc_ifp); | VLAN_CAPABILITIES(sc->sc_ifp); | ||||
break; | break; | ||||
case SIOCSIFMTU: | case SIOCSIFMTU: | ||||
/* Do not allow the MTU to be changed once joined */ | /* Do not allow the MTU to be changed once joined */ | ||||
error = EINVAL; | error = EINVAL; | ||||
break; | break; | ||||
Show All 24 Lines | |||||
* current counters data to detached_counters array. | * current counters data to detached_counters array. | ||||
*/ | */ | ||||
static uint64_t | static uint64_t | ||||
lagg_get_counter(struct ifnet *ifp, ift_counter cnt) | lagg_get_counter(struct ifnet *ifp, ift_counter cnt) | ||||
{ | { | ||||
struct lagg_softc *sc; | struct lagg_softc *sc; | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
struct ifnet *lpifp; | struct ifnet *lpifp; | ||||
struct rm_priotracker tracker; | |||||
uint64_t newval, oldval, vsum; | uint64_t newval, oldval, vsum; | ||||
struct rm_priotracker tracker; | |||||
/* Revise this when we've got non-generic counters. */ | /* Revise this when we've got non-generic counters. */ | ||||
KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); | KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); | ||||
sc = (struct lagg_softc *)ifp->if_softc; | sc = (struct lagg_softc *)ifp->if_softc; | ||||
LAGG_RLOCK(sc, &tracker); | lagg_rlock(sc, &tracker); | ||||
vsum = 0; | vsum = 0; | ||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | ||||
/* Saved attached value */ | /* Saved attached value */ | ||||
oldval = lp->port_counters.val[cnt]; | oldval = lp->port_counters.val[cnt]; | ||||
/* current value */ | /* current value */ | ||||
lpifp = lp->lp_ifp; | lpifp = lp->lp_ifp; | ||||
newval = lpifp->if_get_counter(lpifp, cnt); | newval = lpifp->if_get_counter(lpifp, cnt); | ||||
/* Calculate diff and save new */ | /* Calculate diff and save new */ | ||||
vsum += newval - oldval; | vsum += newval - oldval; | ||||
} | } | ||||
/* | /* | ||||
* Add counter data which might be added by upper | * Add counter data which might be added by upper | ||||
* layer protocols operating on logical interface. | * layer protocols operating on logical interface. | ||||
*/ | */ | ||||
vsum += if_get_counter_default(ifp, cnt); | vsum += if_get_counter_default(ifp, cnt); | ||||
/* | /* | ||||
* Add counter data from detached ports counters | * Add counter data from detached ports counters | ||||
*/ | */ | ||||
vsum += sc->detached_counters.val[cnt]; | vsum += sc->detached_counters.val[cnt]; | ||||
LAGG_RUNLOCK(sc, &tracker); | lagg_runlock(sc, &tracker); | ||||
return (vsum); | return (vsum); | ||||
} | } | ||||
/* | /* | ||||
* For direct output to child ports. | * For direct output to child ports. | ||||
*/ | */ | ||||
static int | static int | ||||
Show All 22 Lines | lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp) | ||||
if ((lp = ifp->if_lagg) == NULL) | if ((lp = ifp->if_lagg) == NULL) | ||||
return; | return; | ||||
/* If the ifnet is just being renamed, don't do anything. */ | /* If the ifnet is just being renamed, don't do anything. */ | ||||
if (ifp->if_flags & IFF_RENAMING) | if (ifp->if_flags & IFF_RENAMING) | ||||
return; | return; | ||||
sc = lp->lp_softc; | sc = lp->lp_softc; | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
lp->lp_detaching = 1; | lp->lp_detaching = 1; | ||||
lagg_port_destroy(lp, 1); | lagg_port_destroy(lp, 1); | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
VLAN_CAPABILITIES(sc->sc_ifp); | VLAN_CAPABILITIES(sc->sc_ifp); | ||||
} | } | ||||
static void | static void | ||||
lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp) | lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp) | ||||
{ | { | ||||
struct lagg_softc *sc = lp->lp_softc; | struct lagg_softc *sc = lp->lp_softc; | ||||
Show All 34 Lines | |||||
static void | static void | ||||
lagg_init(void *xsc) | lagg_init(void *xsc) | ||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)xsc; | struct lagg_softc *sc = (struct lagg_softc *)xsc; | ||||
struct ifnet *ifp = sc->sc_ifp; | struct ifnet *ifp = sc->sc_ifp; | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
return; | return; | ||||
} | } | ||||
ifp->if_drv_flags |= IFF_DRV_RUNNING; | ifp->if_drv_flags |= IFF_DRV_RUNNING; | ||||
/* | /* | ||||
* Update the port lladdrs if needed. | * Update the port lladdrs if needed. | ||||
* This might be if_setlladdr() notification | * This might be if_setlladdr() notification | ||||
* that lladdr has been changed. | * that lladdr has been changed. | ||||
*/ | */ | ||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | ||||
if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp), | if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp), | ||||
ETHER_ADDR_LEN) != 0) | ETHER_ADDR_LEN) != 0) | ||||
if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ETHER_ADDR_LEN); | if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ETHER_ADDR_LEN); | ||||
} | } | ||||
lagg_proto_init(sc); | lagg_proto_init(sc); | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
} | } | ||||
static void | static void | ||||
lagg_stop(struct lagg_softc *sc) | lagg_stop(struct lagg_softc *sc) | ||||
{ | { | ||||
struct ifnet *ifp = sc->sc_ifp; | struct ifnet *ifp = sc->sc_ifp; | ||||
LAGG_XLOCK_ASSERT(sc); | LAGG_XLOCK_ASSERT(sc); | ||||
Show All 20 Lines | lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) | ||||
struct thread *td = curthread; | struct thread *td = curthread; | ||||
char *buf, *outbuf; | char *buf, *outbuf; | ||||
int count, buflen, len, error = 0; | int count, buflen, len, error = 0; | ||||
bzero(&rpbuf, sizeof(rpbuf)); | bzero(&rpbuf, sizeof(rpbuf)); | ||||
switch (cmd) { | switch (cmd) { | ||||
case SIOCGLAGG: | case SIOCGLAGG: | ||||
LAGG_SLOCK(sc); | lagg_slock(sc); | ||||
buflen = sc->sc_count * sizeof(struct lagg_reqport); | buflen = sc->sc_count * sizeof(struct lagg_reqport); | ||||
outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); | outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); | ||||
ra->ra_proto = sc->sc_proto; | ra->ra_proto = sc->sc_proto; | ||||
lagg_proto_request(sc, &ra->ra_psc); | lagg_proto_request(sc, &ra->ra_psc); | ||||
count = 0; | count = 0; | ||||
buf = outbuf; | buf = outbuf; | ||||
len = min(ra->ra_size, buflen); | len = min(ra->ra_size, buflen); | ||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | ||||
if (len < sizeof(rpbuf)) | if (len < sizeof(rpbuf)) | ||||
break; | break; | ||||
lagg_port2req(lp, &rpbuf); | lagg_port2req(lp, &rpbuf); | ||||
memcpy(buf, &rpbuf, sizeof(rpbuf)); | memcpy(buf, &rpbuf, sizeof(rpbuf)); | ||||
count++; | count++; | ||||
buf += sizeof(rpbuf); | buf += sizeof(rpbuf); | ||||
len -= sizeof(rpbuf); | len -= sizeof(rpbuf); | ||||
} | } | ||||
LAGG_SUNLOCK(sc); | lagg_sunlock(sc); | ||||
ra->ra_ports = count; | ra->ra_ports = count; | ||||
ra->ra_size = count * sizeof(rpbuf); | ra->ra_size = count * sizeof(rpbuf); | ||||
error = copyout(outbuf, ra->ra_port, ra->ra_size); | error = copyout(outbuf, ra->ra_port, ra->ra_size); | ||||
free(outbuf, M_TEMP); | free(outbuf, M_TEMP); | ||||
break; | break; | ||||
case SIOCSLAGG: | case SIOCSLAGG: | ||||
error = priv_check(td, PRIV_NET_LAGG); | error = priv_check(td, PRIV_NET_LAGG); | ||||
if (error) | if (error) | ||||
break; | break; | ||||
if (ra->ra_proto >= LAGG_PROTO_MAX) { | if (ra->ra_proto >= LAGG_PROTO_MAX) { | ||||
error = EPROTONOSUPPORT; | error = EPROTONOSUPPORT; | ||||
break; | break; | ||||
} | } | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
LAGG_WLOCK(sc); | |||||
lagg_proto_detach(sc); | lagg_proto_detach(sc); | ||||
LAGG_UNLOCK_ASSERT(sc); | |||||
lagg_proto_attach(sc, ra->ra_proto); | lagg_proto_attach(sc, ra->ra_proto); | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
break; | break; | ||||
case SIOCGLAGGOPTS: | case SIOCGLAGGOPTS: | ||||
LAGG_SLOCK(sc); | lagg_slock(sc); | ||||
ro->ro_opts = sc->sc_opts; | ro->ro_opts = sc->sc_opts; | ||||
if (sc->sc_proto == LAGG_PROTO_LACP) { | if (sc->sc_proto == LAGG_PROTO_LACP) { | ||||
struct lacp_softc *lsc; | struct lacp_softc *lsc; | ||||
lsc = (struct lacp_softc *)sc->sc_psc; | lsc = (struct lacp_softc *)sc->sc_psc; | ||||
if (lsc->lsc_debug.lsc_tx_test != 0) | if (lsc->lsc_debug.lsc_tx_test != 0) | ||||
ro->ro_opts |= LAGG_OPT_LACP_TXTEST; | ro->ro_opts |= LAGG_OPT_LACP_TXTEST; | ||||
if (lsc->lsc_debug.lsc_rx_test != 0) | if (lsc->lsc_debug.lsc_rx_test != 0) | ||||
ro->ro_opts |= LAGG_OPT_LACP_RXTEST; | ro->ro_opts |= LAGG_OPT_LACP_RXTEST; | ||||
if (lsc->lsc_strict_mode != 0) | if (lsc->lsc_strict_mode != 0) | ||||
ro->ro_opts |= LAGG_OPT_LACP_STRICT; | ro->ro_opts |= LAGG_OPT_LACP_STRICT; | ||||
if (lsc->lsc_fast_timeout != 0) | if (lsc->lsc_fast_timeout != 0) | ||||
ro->ro_opts |= LAGG_OPT_LACP_TIMEOUT; | ro->ro_opts |= LAGG_OPT_LACP_TIMEOUT; | ||||
ro->ro_active = sc->sc_active; | ro->ro_active = sc->sc_active; | ||||
} else { | } else { | ||||
ro->ro_active = 0; | ro->ro_active = 0; | ||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | ||||
ro->ro_active += LAGG_PORTACTIVE(lp); | ro->ro_active += LAGG_PORTACTIVE(lp); | ||||
} | } | ||||
ro->ro_bkt = sc->sc_bkt; | ro->ro_bkt = sc->sc_bkt; | ||||
ro->ro_flapping = sc->sc_flapping; | ro->ro_flapping = sc->sc_flapping; | ||||
ro->ro_flowid_shift = sc->flowid_shift; | ro->ro_flowid_shift = sc->flowid_shift; | ||||
LAGG_SUNLOCK(sc); | lagg_sunlock(sc); | ||||
break; | break; | ||||
case SIOCSLAGGOPTS: | case SIOCSLAGGOPTS: | ||||
if (sc->sc_proto == LAGG_PROTO_ROUNDROBIN) { | if (sc->sc_proto == LAGG_PROTO_ROUNDROBIN) { | ||||
if (ro->ro_bkt == 0) | if (ro->ro_bkt == 0) | ||||
sc->sc_bkt = 1; // Minimum 1 packet per iface. | sc->sc_bkt = 1; // Minimum 1 packet per iface. | ||||
else | else | ||||
sc->sc_bkt = ro->ro_bkt; | sc->sc_bkt = ro->ro_bkt; | ||||
} | } | ||||
Show All 25 Lines | case SIOCSLAGGOPTS: | ||||
case -LAGG_OPT_LACP_TIMEOUT: | case -LAGG_OPT_LACP_TIMEOUT: | ||||
valid = lacp = 1; | valid = lacp = 1; | ||||
break; | break; | ||||
default: | default: | ||||
valid = lacp = 0; | valid = lacp = 0; | ||||
break; | break; | ||||
} | } | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
if (valid == 0 || | if (valid == 0 || | ||||
(lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) { | (lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) { | ||||
/* Invalid combination of options specified. */ | /* Invalid combination of options specified. */ | ||||
error = EINVAL; | error = EINVAL; | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
break; /* Return from SIOCSLAGGOPTS. */ | break; /* Return from SIOCSLAGGOPTS. */ | ||||
} | } | ||||
/* | /* | ||||
* Store new options into sc->sc_opts except for | * Store new options into sc->sc_opts except for | ||||
* FLOWIDSHIFT and LACP options. | * FLOWIDSHIFT and LACP options. | ||||
*/ | */ | ||||
if (lacp == 0) { | if (lacp == 0) { | ||||
if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT) | if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT) | ||||
Show All 38 Lines | if (lacp == 0) { | ||||
LACP_LOCK(lsc); | LACP_LOCK(lsc); | ||||
LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) | LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) | ||||
lp->lp_state &= ~LACP_STATE_TIMEOUT; | lp->lp_state &= ~LACP_STATE_TIMEOUT; | ||||
LACP_UNLOCK(lsc); | LACP_UNLOCK(lsc); | ||||
lsc->lsc_fast_timeout = 0; | lsc->lsc_fast_timeout = 0; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
break; | break; | ||||
case SIOCGLAGGFLAGS: | case SIOCGLAGGFLAGS: | ||||
rf->rf_flags = 0; | rf->rf_flags = 0; | ||||
LAGG_SLOCK(sc); | lagg_slock(sc); | ||||
if (sc->sc_flags & MBUF_HASHFLAG_L2) | if (sc->sc_flags & MBUF_HASHFLAG_L2) | ||||
rf->rf_flags |= LAGG_F_HASHL2; | rf->rf_flags |= LAGG_F_HASHL2; | ||||
if (sc->sc_flags & MBUF_HASHFLAG_L3) | if (sc->sc_flags & MBUF_HASHFLAG_L3) | ||||
rf->rf_flags |= LAGG_F_HASHL3; | rf->rf_flags |= LAGG_F_HASHL3; | ||||
if (sc->sc_flags & MBUF_HASHFLAG_L4) | if (sc->sc_flags & MBUF_HASHFLAG_L4) | ||||
rf->rf_flags |= LAGG_F_HASHL4; | rf->rf_flags |= LAGG_F_HASHL4; | ||||
LAGG_SUNLOCK(sc); | lagg_sunlock(sc); | ||||
break; | break; | ||||
case SIOCSLAGGHASH: | case SIOCSLAGGHASH: | ||||
error = priv_check(td, PRIV_NET_LAGG); | error = priv_check(td, PRIV_NET_LAGG); | ||||
if (error) | if (error) | ||||
break; | break; | ||||
if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) { | if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) { | ||||
error = EINVAL; | error = EINVAL; | ||||
break; | break; | ||||
} | } | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
sc->sc_flags = 0; | sc->sc_flags = 0; | ||||
if (rf->rf_flags & LAGG_F_HASHL2) | if (rf->rf_flags & LAGG_F_HASHL2) | ||||
sc->sc_flags |= MBUF_HASHFLAG_L2; | sc->sc_flags |= MBUF_HASHFLAG_L2; | ||||
if (rf->rf_flags & LAGG_F_HASHL3) | if (rf->rf_flags & LAGG_F_HASHL3) | ||||
sc->sc_flags |= MBUF_HASHFLAG_L3; | sc->sc_flags |= MBUF_HASHFLAG_L3; | ||||
if (rf->rf_flags & LAGG_F_HASHL4) | if (rf->rf_flags & LAGG_F_HASHL4) | ||||
sc->sc_flags |= MBUF_HASHFLAG_L4; | sc->sc_flags |= MBUF_HASHFLAG_L4; | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
break; | break; | ||||
case SIOCGLAGGPORT: | case SIOCGLAGGPORT: | ||||
if (rp->rp_portname[0] == '\0' || | if (rp->rp_portname[0] == '\0' || | ||||
(tpif = ifunit_ref(rp->rp_portname)) == NULL) { | (tpif = ifunit_ref(rp->rp_portname)) == NULL) { | ||||
error = EINVAL; | error = EINVAL; | ||||
break; | break; | ||||
} | } | ||||
LAGG_SLOCK(sc); | lagg_slock(sc); | ||||
if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL || | if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL || | ||||
lp->lp_softc != sc) { | lp->lp_softc != sc) { | ||||
error = ENOENT; | error = ENOENT; | ||||
LAGG_SUNLOCK(sc); | lagg_sunlock(sc); | ||||
if_rele(tpif); | if_rele(tpif); | ||||
break; | break; | ||||
} | } | ||||
lagg_port2req(lp, rp); | lagg_port2req(lp, rp); | ||||
LAGG_SUNLOCK(sc); | lagg_sunlock(sc); | ||||
if_rele(tpif); | if_rele(tpif); | ||||
break; | break; | ||||
case SIOCSLAGGPORT: | case SIOCSLAGGPORT: | ||||
error = priv_check(td, PRIV_NET_LAGG); | error = priv_check(td, PRIV_NET_LAGG); | ||||
if (error) | if (error) | ||||
break; | break; | ||||
if (rp->rp_portname[0] == '\0' || | if (rp->rp_portname[0] == '\0' || | ||||
(tpif = ifunit_ref(rp->rp_portname)) == NULL) { | (tpif = ifunit_ref(rp->rp_portname)) == NULL) { | ||||
Show All 15 Lines | if (in6ifa_llaonifp(tpif)) { | ||||
in6_ifdetach(tpif); | in6_ifdetach(tpif); | ||||
if_printf(sc->sc_ifp, | if_printf(sc->sc_ifp, | ||||
"IPv6 addresses on %s have been removed " | "IPv6 addresses on %s have been removed " | ||||
"before adding it as a member to prevent " | "before adding it as a member to prevent " | ||||
"IPv6 address scope violation.\n", | "IPv6 address scope violation.\n", | ||||
tpif->if_xname); | tpif->if_xname); | ||||
} | } | ||||
#endif | #endif | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
error = lagg_port_create(sc, tpif); | error = lagg_port_create(sc, tpif); | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
if_rele(tpif); | if_rele(tpif); | ||||
VLAN_CAPABILITIES(ifp); | VLAN_CAPABILITIES(ifp); | ||||
break; | break; | ||||
case SIOCSLAGGDELPORT: | case SIOCSLAGGDELPORT: | ||||
error = priv_check(td, PRIV_NET_LAGG); | error = priv_check(td, PRIV_NET_LAGG); | ||||
if (error) | if (error) | ||||
break; | break; | ||||
if (rp->rp_portname[0] == '\0' || | if (rp->rp_portname[0] == '\0' || | ||||
(tpif = ifunit_ref(rp->rp_portname)) == NULL) { | (tpif = ifunit_ref(rp->rp_portname)) == NULL) { | ||||
error = EINVAL; | error = EINVAL; | ||||
break; | break; | ||||
} | } | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL || | if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL || | ||||
lp->lp_softc != sc) { | lp->lp_softc != sc) { | ||||
error = ENOENT; | error = ENOENT; | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
if_rele(tpif); | if_rele(tpif); | ||||
break; | break; | ||||
} | } | ||||
error = lagg_port_destroy(lp, 1); | error = lagg_port_destroy(lp, 1); | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
if_rele(tpif); | if_rele(tpif); | ||||
VLAN_CAPABILITIES(ifp); | VLAN_CAPABILITIES(ifp); | ||||
break; | break; | ||||
case SIOCSIFFLAGS: | case SIOCSIFFLAGS: | ||||
/* Set flags on ports too */ | /* Set flags on ports too */ | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | ||||
lagg_setflags(lp, 1); | lagg_setflags(lp, 1); | ||||
} | } | ||||
if (!(ifp->if_flags & IFF_UP) && | if (!(ifp->if_flags & IFF_UP) && | ||||
(ifp->if_drv_flags & IFF_DRV_RUNNING)) { | (ifp->if_drv_flags & IFF_DRV_RUNNING)) { | ||||
/* | /* | ||||
* If interface is marked down and it is running, | * If interface is marked down and it is running, | ||||
* then stop and disable it. | * then stop and disable it. | ||||
*/ | */ | ||||
lagg_stop(sc); | lagg_stop(sc); | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
} else if ((ifp->if_flags & IFF_UP) && | } else if ((ifp->if_flags & IFF_UP) && | ||||
!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { | !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { | ||||
/* | /* | ||||
* If interface is marked up and it is stopped, then | * If interface is marked up and it is stopped, then | ||||
* start it. | * start it. | ||||
*/ | */ | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
(*ifp->if_init)(sc); | (*ifp->if_init)(sc); | ||||
} else | } else | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
break; | break; | ||||
case SIOCADDMULTI: | case SIOCADDMULTI: | ||||
case SIOCDELMULTI: | case SIOCDELMULTI: | ||||
LAGG_WLOCK(sc); | lagg_xlock(sc); | ||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | ||||
lagg_clrmulti(lp); | lagg_clrmulti(lp); | ||||
lagg_setmulti(lp); | lagg_setmulti(lp); | ||||
} | } | ||||
LAGG_WUNLOCK(sc); | lagg_xunlock(sc); | ||||
error = 0; | error = 0; | ||||
break; | break; | ||||
case SIOCSIFMEDIA: | case SIOCSIFMEDIA: | ||||
case SIOCGIFMEDIA: | case SIOCGIFMEDIA: | ||||
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); | error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); | ||||
break; | break; | ||||
case SIOCSIFCAP: | case SIOCSIFCAP: | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | ||||
if (lp->lp_ioctl != NULL) | if (lp->lp_ioctl != NULL) | ||||
(*lp->lp_ioctl)(lp->lp_ifp, cmd, data); | (*lp->lp_ioctl)(lp->lp_ifp, cmd, data); | ||||
} | } | ||||
lagg_capabilities(sc); | lagg_capabilities(sc); | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
VLAN_CAPABILITIES(ifp); | VLAN_CAPABILITIES(ifp); | ||||
error = 0; | error = 0; | ||||
break; | break; | ||||
case SIOCSIFMTU: | case SIOCSIFMTU: | ||||
/* Do not allow the MTU to be directly changed */ | /* Do not allow the MTU to be directly changed */ | ||||
error = EINVAL; | error = EINVAL; | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 56 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
struct lagg_softc *sc = lp->lp_softc; | struct lagg_softc *sc = lp->lp_softc; | ||||
struct ifnet *ifp = lp->lp_ifp; | struct ifnet *ifp = lp->lp_ifp; | ||||
struct ifnet *scifp = sc->sc_ifp; | struct ifnet *scifp = sc->sc_ifp; | ||||
struct lagg_mc *mc; | struct lagg_mc *mc; | ||||
struct ifmultiaddr *ifma; | struct ifmultiaddr *ifma; | ||||
int error; | int error; | ||||
LAGG_WLOCK_ASSERT(sc); | LAGG_XLOCK_ASSERT(sc); | ||||
IF_ADDR_WLOCK(scifp); | IF_ADDR_WLOCK(scifp); | ||||
TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) { | TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) { | ||||
if (ifma->ifma_addr->sa_family != AF_LINK) | if (ifma->ifma_addr->sa_family != AF_LINK) | ||||
continue; | continue; | ||||
mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT); | mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT); | ||||
if (mc == NULL) { | if (mc == NULL) { | ||||
IF_ADDR_WUNLOCK(scifp); | IF_ADDR_WUNLOCK(scifp); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
Show All 14 Lines | lagg_setmulti(struct lagg_port *lp) | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
lagg_clrmulti(struct lagg_port *lp) | lagg_clrmulti(struct lagg_port *lp) | ||||
{ | { | ||||
struct lagg_mc *mc; | struct lagg_mc *mc; | ||||
LAGG_WLOCK_ASSERT(lp->lp_softc); | LAGG_XLOCK_ASSERT(lp->lp_softc); | ||||
while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) { | while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) { | ||||
SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries); | SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries); | ||||
if (mc->mc_ifma && lp->lp_detaching == 0) | if (mc->mc_ifma && lp->lp_detaching == 0) | ||||
if_delmulti_ifma(mc->mc_ifma); | if_delmulti_ifma(mc->mc_ifma); | ||||
free(mc, M_DEVBUF); | free(mc, M_DEVBUF); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 69 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | ||||
int error, len, mcast; | int error, len, mcast; | ||||
struct rm_priotracker tracker; | struct rm_priotracker tracker; | ||||
len = m->m_pkthdr.len; | len = m->m_pkthdr.len; | ||||
mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; | mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; | ||||
LAGG_RLOCK(sc, &tracker); | lagg_rlock(sc, &tracker); | ||||
/* We need a Tx algorithm and at least one port */ | /* We need a Tx algorithm and at least one port */ | ||||
if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) { | if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) { | ||||
LAGG_RUNLOCK(sc, &tracker); | lagg_runlock(sc, &tracker); | ||||
m_freem(m); | m_freem(m); | ||||
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); | if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
ETHER_BPF_MTAP(ifp, m); | ETHER_BPF_MTAP(ifp, m); | ||||
error = lagg_proto_start(sc, m); | error = lagg_proto_start(sc, m); | ||||
LAGG_RUNLOCK(sc, &tracker); | lagg_runlock(sc, &tracker); | ||||
if (error != 0) | if (error != 0) | ||||
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); | if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* The ifp->if_qflush entry point for lagg(4) is no-op. | * The ifp->if_qflush entry point for lagg(4) is no-op. | ||||
*/ | */ | ||||
static void | static void | ||||
lagg_qflush(struct ifnet *ifp __unused) | lagg_qflush(struct ifnet *ifp __unused) | ||||
{ | { | ||||
} | } | ||||
static struct mbuf * | static struct mbuf * | ||||
lagg_input(struct ifnet *ifp, struct mbuf *m) | lagg_input(struct ifnet *ifp, struct mbuf *m) | ||||
{ | { | ||||
struct lagg_port *lp = ifp->if_lagg; | struct lagg_port *lp = ifp->if_lagg; | ||||
struct lagg_softc *sc = lp->lp_softc; | struct lagg_softc *sc = lp->lp_softc; | ||||
struct ifnet *scifp = sc->sc_ifp; | struct ifnet *scifp = sc->sc_ifp; | ||||
struct rm_priotracker tracker; | struct rm_priotracker tracker; | ||||
LAGG_RLOCK(sc, &tracker); | lagg_rlock(sc, &tracker); | ||||
mmacy: I don't think we want to be acquiring sleep locks in the data path. A more substantive… | |||||
Done Inline ActionsWhy do you think it is possible to have DELAY(1) on the datapath? melifaro: Why do you think it is possible to have DELAY(1) on the datapath? | |||||
Not Done Inline ActionsFixed (lagg_rlock() uses cpu_spinwait() now). shurd: Fixed (lagg_rlock() uses cpu_spinwait() now). | |||||
if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || | if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || | ||||
(lp->lp_flags & LAGG_PORT_DISABLED) || | (lp->lp_flags & LAGG_PORT_DISABLED) || | ||||
sc->sc_proto == LAGG_PROTO_NONE) { | sc->sc_proto == LAGG_PROTO_NONE) { | ||||
LAGG_RUNLOCK(sc, &tracker); | lagg_runlock(sc, &tracker); | ||||
m_freem(m); | m_freem(m); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
ETHER_BPF_MTAP(scifp, m); | ETHER_BPF_MTAP(scifp, m); | ||||
if (lp->lp_detaching != 0) { | if (lp->lp_detaching != 0) { | ||||
m_freem(m); | m_freem(m); | ||||
m = NULL; | m = NULL; | ||||
} else | } else | ||||
m = lagg_proto_input(sc, lp, m); | m = lagg_proto_input(sc, lp, m); | ||||
if (m != NULL) { | if (m != NULL) { | ||||
if (scifp->if_flags & IFF_MONITOR) { | if (scifp->if_flags & IFF_MONITOR) { | ||||
m_freem(m); | m_freem(m); | ||||
m = NULL; | m = NULL; | ||||
} | } | ||||
} | } | ||||
LAGG_RUNLOCK(sc, &tracker); | lagg_runlock(sc, &tracker); | ||||
return (m); | return (m); | ||||
} | } | ||||
static int | static int | ||||
lagg_media_change(struct ifnet *ifp) | lagg_media_change(struct ifnet *ifp) | ||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | ||||
if (sc->sc_ifflags & IFF_DEBUG) | if (sc->sc_ifflags & IFF_DEBUG) | ||||
printf("%s\n", __func__); | printf("%s\n", __func__); | ||||
/* Ignore */ | /* Ignore */ | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr) | lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr) | ||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
imr->ifm_status = IFM_AVALID; | imr->ifm_status = IFM_AVALID; | ||||
imr->ifm_active = IFM_ETHER | IFM_AUTO; | imr->ifm_active = IFM_ETHER | IFM_AUTO; | ||||
LAGG_SLOCK(sc); | lagg_slock(sc); | ||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | ||||
if (LAGG_PORTACTIVE(lp)) | if (LAGG_PORTACTIVE(lp)) | ||||
imr->ifm_status |= IFM_ACTIVE; | imr->ifm_status |= IFM_ACTIVE; | ||||
} | } | ||||
LAGG_SUNLOCK(sc); | lagg_sunlock(sc); | ||||
} | } | ||||
static void | static void | ||||
lagg_linkstate(struct lagg_softc *sc) | lagg_linkstate(struct lagg_softc *sc) | ||||
{ | { | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
int new_link = LINK_STATE_DOWN; | int new_link = LINK_STATE_DOWN; | ||||
uint64_t speed; | uint64_t speed; | ||||
Show All 35 Lines | lagg_port_state(struct ifnet *ifp, int state) | ||||
struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg; | struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg; | ||||
struct lagg_softc *sc = NULL; | struct lagg_softc *sc = NULL; | ||||
if (lp != NULL) | if (lp != NULL) | ||||
sc = lp->lp_softc; | sc = lp->lp_softc; | ||||
if (sc == NULL) | if (sc == NULL) | ||||
return; | return; | ||||
LAGG_XLOCK(sc); | lagg_xlock(sc); | ||||
lagg_linkstate(sc); | lagg_linkstate(sc); | ||||
lagg_proto_linkstate(sc, lp); | lagg_proto_linkstate(sc, lp); | ||||
LAGG_XUNLOCK(sc); | lagg_xunlock(sc); | ||||
} | } | ||||
struct lagg_port * | struct lagg_port * | ||||
lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp) | lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp) | ||||
{ | { | ||||
struct lagg_port *lp_next, *rval = NULL; | struct lagg_port *lp_next, *rval = NULL; | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 212 Lines • ▼ Show 20 Lines | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | ||||
lagg_lb_port_create(lp); | lagg_lb_port_create(lp); | ||||
} | } | ||||
static void | static void | ||||
lagg_lb_detach(struct lagg_softc *sc) | lagg_lb_detach(struct lagg_softc *sc) | ||||
{ | { | ||||
struct lagg_lb *lb; | struct lagg_lb *lb; | ||||
LAGG_XLOCK_ASSERT(sc); | |||||
lb = (struct lagg_lb *)sc->sc_psc; | lb = (struct lagg_lb *)sc->sc_psc; | ||||
LAGG_WUNLOCK(sc); | |||||
if (lb != NULL) | if (lb != NULL) | ||||
free(lb, M_DEVBUF); | free(lb, M_DEVBUF); | ||||
} | } | ||||
static int | static int | ||||
lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp) | lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp) | ||||
{ | { | ||||
struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc; | struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc; | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static void | static void | ||||
lagg_lacp_detach(struct lagg_softc *sc) | lagg_lacp_detach(struct lagg_softc *sc) | ||||
{ | { | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
void *psc; | void *psc; | ||||
LAGG_XLOCK_ASSERT(sc); | |||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) | ||||
lacp_port_destroy(lp); | lacp_port_destroy(lp); | ||||
psc = sc->sc_psc; | psc = sc->sc_psc; | ||||
sc->sc_psc = NULL; | sc->sc_psc = NULL; | ||||
LAGG_WUNLOCK(sc); | |||||
lacp_detach(psc); | lacp_detach(psc); | ||||
} | } | ||||
static void | static void | ||||
lagg_lacp_lladdr(struct lagg_softc *sc) | lagg_lacp_lladdr(struct lagg_softc *sc) | ||||
{ | { | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
▲ Show 20 Lines • Show All 57 Lines • Show Last 20 Lines |
I don't think we want to be acquiring sleep locks in the data path. A more substantive refactoring is in order.