Changeset View
Standalone View
sys/net/if_lagg.c
Show First 20 Lines • Show All 95 Lines • ▼ Show 20 Lines | |||||
static void lagg_clone_destroy(struct ifnet *); | static void lagg_clone_destroy(struct ifnet *); | ||||
static VNET_DEFINE(struct if_clone *, lagg_cloner); | static VNET_DEFINE(struct if_clone *, lagg_cloner); | ||||
#define V_lagg_cloner VNET(lagg_cloner) | #define V_lagg_cloner VNET(lagg_cloner) | ||||
static const char laggname[] = "lagg"; | static const char laggname[] = "lagg"; | ||||
static void lagg_lladdr(struct lagg_softc *, uint8_t *); | static void lagg_lladdr(struct lagg_softc *, uint8_t *); | ||||
static void lagg_capabilities(struct lagg_softc *); | static void lagg_capabilities(struct lagg_softc *); | ||||
static void lagg_port_lladdr(struct lagg_port *, uint8_t *, lagg_llqtype); | static void lagg_port_lladdr(struct lagg_port *, uint8_t *, lagg_llqtype); | ||||
static void lagg_port_setlladdr(void *, int); | static void lagg_port_ops(void *, int); | ||||
static void lagg_llq_action_mtu(struct lagg_softc *, | |||||
struct lagg_llq_slist_entry *); | |||||
smh: style(9) should be 4 space indented, more below | |||||
Not Done Inline ActionsThis still seems incorrect (tab intended) smh: This still seems incorrect (tab intended) | |||||
Not Done Inline ActionsAh, thanks. I did it the way it is because other stuff in the file is done that way. For example, see the prototypes for lagg_port_output(), lagg_setflag(), lagg_rr_input(), lagg_fail_input(), lagg_lb_input(), lagg_bcast_input(), lagg_lacp_input(). If you still want me to change this, I can, but I'd rather go with the in-file precedent. rpokala: Ah, thanks.
I did it the way it is because other stuff in the file is done that way. For… | |||||
Not Done Inline ActionsIf that's the case then all good :) smh: If that's the case then all good :) | |||||
Not Done Inline ActionsExcellent. Can you mark this approved for the record, so I can commit? rpokala: Excellent. Can you mark this approved for the record, so I can commit? | |||||
static void lagg_llq_action_lladdr(struct lagg_softc *, | |||||
struct lagg_llq_slist_entry *); | |||||
static int lagg_port_create(struct lagg_softc *, struct ifnet *); | static int lagg_port_create(struct lagg_softc *, struct ifnet *); | ||||
static int lagg_port_destroy(struct lagg_port *, int); | static int lagg_port_destroy(struct lagg_port *, int); | ||||
static struct mbuf *lagg_input(struct ifnet *, struct mbuf *); | static struct mbuf *lagg_input(struct ifnet *, struct mbuf *); | ||||
static void lagg_linkstate(struct lagg_softc *); | static void lagg_linkstate(struct lagg_softc *); | ||||
static void lagg_port_state(struct ifnet *, int); | static void lagg_port_state(struct ifnet *, int); | ||||
static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t); | static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t); | ||||
static int lagg_port_output(struct ifnet *, struct mbuf *, | static int lagg_port_output(struct ifnet *, struct mbuf *, | ||||
const struct sockaddr *, struct route *); | const struct sockaddr *, struct route *); | ||||
Show All 12 Lines | |||||
static int lagg_setflags(struct lagg_port *, int status); | static int lagg_setflags(struct lagg_port *, int status); | ||||
static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt); | static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt); | ||||
static int lagg_transmit(struct ifnet *, struct mbuf *); | static int lagg_transmit(struct ifnet *, struct mbuf *); | ||||
static void lagg_qflush(struct ifnet *); | static void lagg_qflush(struct ifnet *); | ||||
static int lagg_media_change(struct ifnet *); | static int lagg_media_change(struct ifnet *); | ||||
static void lagg_media_status(struct ifnet *, struct ifmediareq *); | static void lagg_media_status(struct ifnet *, struct ifmediareq *); | ||||
static struct lagg_port *lagg_link_active(struct lagg_softc *, | static struct lagg_port *lagg_link_active(struct lagg_softc *, | ||||
struct lagg_port *); | struct lagg_port *); | ||||
static int lagg_change_mtu(struct ifnet *, struct ifreq *); | |||||
static void _lagg_free_llq_entries(struct lagg_llq_slist_entry *); | |||||
static void lagg_free_llq_entries(struct lagg_softc *, lagg_llq_idx); | |||||
/* Simple round robin */ | /* Simple round robin */ | ||||
static void lagg_rr_attach(struct lagg_softc *); | static void lagg_rr_attach(struct lagg_softc *); | ||||
static int lagg_rr_start(struct lagg_softc *, struct mbuf *); | static int lagg_rr_start(struct lagg_softc *, struct mbuf *); | ||||
static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *, | static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *, | ||||
struct mbuf *); | struct mbuf *); | ||||
/* Active failover */ | /* Active failover */ | ||||
Show All 19 Lines | |||||
/* 802.3ad LACP */ | /* 802.3ad LACP */ | ||||
static void lagg_lacp_attach(struct lagg_softc *); | static void lagg_lacp_attach(struct lagg_softc *); | ||||
static void lagg_lacp_detach(struct lagg_softc *); | static void lagg_lacp_detach(struct lagg_softc *); | ||||
static int lagg_lacp_start(struct lagg_softc *, struct mbuf *); | static int lagg_lacp_start(struct lagg_softc *, struct mbuf *); | ||||
static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *, | static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *, | ||||
struct mbuf *); | struct mbuf *); | ||||
static void lagg_lacp_lladdr(struct lagg_softc *); | static void lagg_lacp_lladdr(struct lagg_softc *); | ||||
/* | |||||
* This action handler shall be called from taskqueue handler for each | |||||
* submitted operation | |||||
*/ | |||||
typedef void (*lagg_llq_action)(struct lagg_softc *, | |||||
struct lagg_llq_slist_entry *); | |||||
/* | |||||
* lagg llq action Table: Called at the taskqueue context for each | |||||
* submitted operations. | |||||
* Contents SHOULD be in sync with lagg_llq_idx index. | |||||
* New entries shall be appended. | |||||
*/ | |||||
static const lagg_llq_action llq_action[LAGG_LLQ_MAX] = { | |||||
lagg_llq_action_lladdr, /* Maps to LAGG_LLQ_LLADDR index */ | |||||
lagg_llq_action_mtu, /* Maps to LAGG_LLQ_MTU index */ | |||||
}; | |||||
/* lagg protocol table */ | /* lagg protocol table */ | ||||
static const struct lagg_proto { | static const struct lagg_proto { | ||||
lagg_proto pr_num; | lagg_proto pr_num; | ||||
void (*pr_attach)(struct lagg_softc *); | void (*pr_attach)(struct lagg_softc *); | ||||
void (*pr_detach)(struct lagg_softc *); | void (*pr_detach)(struct lagg_softc *); | ||||
int (*pr_start)(struct lagg_softc *, struct mbuf *); | int (*pr_start)(struct lagg_softc *, struct mbuf *); | ||||
struct mbuf * (*pr_input)(struct lagg_softc *, struct lagg_port *, | struct mbuf * (*pr_input)(struct lagg_softc *, struct lagg_port *, | ||||
struct mbuf *); | struct mbuf *); | ||||
▲ Show 20 Lines • Show All 306 Lines • ▼ Show 20 Lines | lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params) | ||||
/* Hash all layers by default */ | /* Hash all layers by default */ | ||||
sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4; | sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4; | ||||
lagg_proto_attach(sc, LAGG_PROTO_DEFAULT); | lagg_proto_attach(sc, LAGG_PROTO_DEFAULT); | ||||
LAGG_LOCK_INIT(sc); | LAGG_LOCK_INIT(sc); | ||||
SLIST_INIT(&sc->sc_ports); | SLIST_INIT(&sc->sc_ports); | ||||
TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc); | TASK_INIT(&sc->sc_llq_task, 0, lagg_port_ops, sc); | ||||
/* Initialise pseudo media types */ | /* Initialise pseudo media types */ | ||||
ifmedia_init(&sc->sc_media, 0, lagg_media_change, | ifmedia_init(&sc->sc_media, 0, lagg_media_change, | ||||
lagg_media_status); | lagg_media_status); | ||||
ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); | ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); | ||||
ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); | ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); | ||||
if_initname(ifp, laggname, unit); | if_initname(ifp, laggname, unit); | ||||
ifp->if_softc = sc; | ifp->if_softc = sc; | ||||
ifp->if_transmit = lagg_transmit; | ifp->if_transmit = lagg_transmit; | ||||
ifp->if_qflush = lagg_qflush; | ifp->if_qflush = lagg_qflush; | ||||
ifp->if_init = lagg_init; | ifp->if_init = lagg_init; | ||||
ifp->if_ioctl = lagg_ioctl; | ifp->if_ioctl = lagg_ioctl; | ||||
ifp->if_get_counter = lagg_get_counter; | ifp->if_get_counter = lagg_get_counter; | ||||
ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; | ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; | ||||
ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS; | ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS; | ||||
mtx_init(&sc->sc_mtu_ctxt.mtu_sync.lock, ifp->if_xname, | |||||
"mtu_sync_lock", MTX_DEF); | |||||
cv_init(&sc->sc_mtu_ctxt.mtu_sync.cv, "mtu_sync_cv"); | |||||
/* | /* | ||||
* Attach as an ordinary ethernet device, children will be attached | * Attach as an ordinary ethernet device, children will be attached | ||||
* as special device IFT_IEEE8023ADLAG. | * as special device IFT_IEEE8023ADLAG. | ||||
*/ | */ | ||||
ether_ifattach(ifp, eaddr); | ether_ifattach(ifp, eaddr); | ||||
sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, | sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, | ||||
lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST); | lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST); | ||||
Show All 32 Lines | lagg_clone_destroy(struct ifnet *ifp) | ||||
ifmedia_removeall(&sc->sc_media); | ifmedia_removeall(&sc->sc_media); | ||||
ether_ifdetach(ifp); | ether_ifdetach(ifp); | ||||
if_free(ifp); | if_free(ifp); | ||||
LAGG_LIST_LOCK(); | LAGG_LIST_LOCK(); | ||||
SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries); | SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries); | ||||
LAGG_LIST_UNLOCK(); | LAGG_LIST_UNLOCK(); | ||||
taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task); | taskqueue_drain(taskqueue_swi, &sc->sc_llq_task); | ||||
cv_destroy(&sc->sc_mtu_ctxt.mtu_sync.cv); | |||||
mtx_destroy(&sc->sc_mtu_ctxt.mtu_sync.lock); | |||||
LAGG_LOCK_DESTROY(sc); | LAGG_LOCK_DESTROY(sc); | ||||
free(sc, M_DEVBUF); | free(sc, M_DEVBUF); | ||||
} | } | ||||
/* | /* | ||||
* Set link-layer address on the lagg interface itself. | * Set link-layer address on the lagg interface itself. | ||||
* | * | ||||
* Set noinline to be dtrace-friendly | * Set noinline to be dtrace-friendly | ||||
▲ Show 20 Lines • Show All 75 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* Set noinline to be dtrace-friendly | * Set noinline to be dtrace-friendly | ||||
*/ | */ | ||||
static __noinline void | static __noinline void | ||||
lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr, lagg_llqtype llq_type) | lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr, lagg_llqtype llq_type) | ||||
{ | { | ||||
struct lagg_softc *sc = lp->lp_softc; | struct lagg_softc *sc = lp->lp_softc; | ||||
struct ifnet *ifp = lp->lp_ifp; | struct ifnet *ifp = lp->lp_ifp; | ||||
struct lagg_llq *llq; | struct lagg_llq_slist_entry *cmn_llq; | ||||
struct lagg_lladdr_llq_ctxt *llq_ctxt; | |||||
LAGG_WLOCK_ASSERT(sc); | LAGG_WLOCK_ASSERT(sc); | ||||
/* | /* | ||||
* Do not enqueue requests where lladdr is the same for | * Do not enqueue requests where lladdr is the same for | ||||
* "physical" interfaces (e.g. ports in lagg) | * "physical" interfaces (e.g. ports in lagg) | ||||
*/ | */ | ||||
if (llq_type == LAGG_LLQTYPE_PHYS && | if (llq_type == LAGG_LLQTYPE_PHYS && | ||||
memcmp(IF_LLADDR(ifp), lladdr, ETHER_ADDR_LEN) == 0) | memcmp(IF_LLADDR(ifp), lladdr, ETHER_ADDR_LEN) == 0) | ||||
return; | return; | ||||
/* Check to make sure its not already queued to be changed */ | /* Check to make sure its not already queued to be changed */ | ||||
SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) { | SLIST_FOREACH(cmn_llq, &sc->sc_llq[LAGG_LLQ_LLADDR], | ||||
if (llq->llq_ifp == ifp) { | llq_entries) { | ||||
llq_ctxt = (struct lagg_lladdr_llq_ctxt *)cmn_llq; | |||||
if (llq_ctxt->llq_ifp == ifp) { | |||||
/* Update lladdr, it may have changed */ | /* Update lladdr, it may have changed */ | ||||
bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN); | bcopy(lladdr, llq_ctxt->llq_lladdr, ETHER_ADDR_LEN); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT | M_ZERO); | llq_ctxt = malloc(sizeof(struct lagg_lladdr_llq_ctxt), M_DEVBUF, | ||||
if (llq == NULL) /* XXX what to do */ | M_NOWAIT | M_ZERO); | ||||
if (llq_ctxt == NULL) /* XXX what to do */ | |||||
return; | return; | ||||
llq->llq_ifp = ifp; | llq_ctxt->llq_ifp = ifp; | ||||
llq->llq_type = llq_type; | llq_ctxt->llq_type = llq_type; | ||||
bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN); | bcopy(lladdr, llq_ctxt->llq_lladdr, ETHER_ADDR_LEN); | ||||
/* XXX: We should insert to tail */ | /* XXX: We should insert to tail */ | ||||
SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries); | SLIST_INSERT_HEAD(&sc->sc_llq[LAGG_LLQ_LLADDR], | ||||
(struct lagg_llq_slist_entry *)llq_ctxt, llq_entries); | |||||
taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task); | taskqueue_enqueue(taskqueue_swi, &sc->sc_llq_task); | ||||
} | } | ||||
/* | /* | ||||
* Set the interface MAC address from a taskqueue to avoid a LOR. | * Set the interface MTU, MAC address from a taskqueue to avoid a LOR. | ||||
* | * | ||||
* Set noinline to be dtrace-friendly | * Set noinline to be dtrace-friendly | ||||
*/ | */ | ||||
static __noinline void | static __noinline void | ||||
lagg_port_setlladdr(void *arg, int pending) | lagg_port_ops(void *arg, int pending) | ||||
{ | { | ||||
struct lagg_softc *sc = (struct lagg_softc *)arg; | struct lagg_softc *sc = (struct lagg_softc *)arg; | ||||
struct lagg_llq *llq, *head; | struct lagg_llq_slist_entry *llq_first; | ||||
struct ifnet *ifp; | lagg_llq_idx llq_idx; | ||||
/* Grab a local reference of the queue and remove it from the softc */ | for (llq_idx = LAGG_LLQ_MIN; llq_idx < LAGG_LLQ_MAX; llq_idx++) { | ||||
LAGG_WLOCK(sc); | LAGG_WLOCK(sc); | ||||
head = SLIST_FIRST(&sc->sc_llq_head); | llq_first = SLIST_FIRST(&sc->sc_llq[llq_idx]); | ||||
SLIST_FIRST(&sc->sc_llq_head) = NULL; | SLIST_INIT(&sc->sc_llq[llq_idx]); | ||||
LAGG_WUNLOCK(sc); | LAGG_WUNLOCK(sc); | ||||
if (llq_first != NULL) | |||||
llq_action[llq_idx](sc, llq_first); | |||||
} | |||||
} | |||||
static void | |||||
lagg_llq_action_mtu(struct lagg_softc *sc, struct lagg_llq_slist_entry *first) | |||||
{ | |||||
struct lagg_llq_slist_entry *llq; | |||||
int err; | |||||
/* Set the new MTU on the lagg interface */ | |||||
LAGG_WLOCK(sc); | |||||
sc->sc_ifp->if_mtu = ((struct lagg_mtu_llq_ctxt *)first)->llq_ifr.ifr_mtu; | |||||
LAGG_WUNLOCK(sc); | |||||
/* | /* | ||||
Done Inline Actionsstyle(9) bool use of pointer type. smh: style(9) bool use of pointer type. | |||||
* Traverse the queue and set the mtu on each ifp. It is safe to do | |||||
* unlocked as we have the only reference to it. | |||||
*/ | |||||
err = EIO; /* In case the list is empty. */ | |||||
llq = first; | |||||
Done Inline ActionsNot sure if its possible but if we have no llq_entries then err will be uninitialised on return. smh: Not sure if its possible but if we have no llq_entries then err will be uninitialised on return. | |||||
SLIST_FOREACH_FROM(llq, (struct __llqhd *)NULL, llq_entries) { | |||||
struct lagg_mtu_llq_ctxt *llq_ctxt; | |||||
llq_ctxt = (struct lagg_mtu_llq_ctxt *)llq; | |||||
/* Set the new MTU on the physical interface */ | |||||
err = (*llq_ctxt->llq_ioctl)(llq_ctxt->llq_ifp, SIOCSIFMTU, | |||||
(caddr_t)&llq_ctxt->llq_ifr); | |||||
if (err) { | |||||
if_printf(llq_ctxt->llq_ifp, | |||||
"Failed to change MTU from %d to %d (err %d)\n", | |||||
llq_ctxt->llq_old_mtu, llq_ctxt->llq_ifr.ifr_mtu, err); | |||||
break; | |||||
} | |||||
} | |||||
if (err) { | |||||
/* Restore the old MTU on the lagg interface */ | |||||
LAGG_WLOCK(sc); | |||||
sc->sc_ifp->if_mtu = ((struct lagg_mtu_llq_ctxt *)first)->llq_old_mtu; | |||||
LAGG_WUNLOCK(sc); | |||||
/* Restore the old MTU on the physical interface */ | |||||
llq = first; | |||||
Done Inline Actionsstyle(9) blank line after vars. smh: style(9) blank line after vars. | |||||
SLIST_FOREACH_FROM(llq, (struct __llqhd *)NULL, llq_entries) { | |||||
struct lagg_mtu_llq_ctxt *llq_ctxt; | |||||
llq_ctxt = (struct lagg_mtu_llq_ctxt *)llq; | |||||
llq_ctxt->llq_ifr.ifr_mtu = llq_ctxt->llq_old_mtu; | |||||
err = (*llq_ctxt->llq_ioctl) | |||||
(llq_ctxt->llq_ifp, SIOCSIFMTU, (caddr_t)&llq_ctxt->llq_ifr); | |||||
if (err) { | |||||
if_printf(llq_ctxt->llq_ifp, | |||||
"Failed to restore MTU to %d (err %d)\n", | |||||
llq_ctxt->llq_old_mtu, err); | |||||
} | |||||
} | |||||
} | |||||
/* Free the MTU LLQ entries */ | |||||
_lagg_free_llq_entries(first); | |||||
mtx_lock(&sc->sc_mtu_ctxt.mtu_sync.lock); | |||||
sc->sc_mtu_ctxt.mtu_cmd_ret = err; | |||||
/* Signal for command completion */ | |||||
cv_signal(&sc->sc_mtu_ctxt.mtu_sync.cv); | |||||
mtx_unlock(&sc->sc_mtu_ctxt.mtu_sync.lock); | |||||
} | |||||
Done Inline Actionsstatic void should be on a separate line, few more below. Also not keen on the name for this method may be something like lagg_llq_free_entries? smh: static void should be on a separate line, few more below.
Also not keen on the name for this… | |||||
static void | |||||
_lagg_free_llq_entries(struct lagg_llq_slist_entry *llq) | |||||
{ | |||||
struct lagg_llq_slist_entry *tmp_llq; | |||||
SLIST_FOREACH_FROM_SAFE(llq, (struct __llqhd *)NULL, llq_entries, | |||||
tmp_llq) { | |||||
free(llq, M_DEVBUF); | |||||
} | |||||
} | |||||
static void | |||||
lagg_free_llq_entries(struct lagg_softc *sc, lagg_llq_idx idx) | |||||
{ | |||||
struct lagg_llq_slist_entry *llq; | |||||
LAGG_WLOCK_ASSERT(sc); | |||||
llq = SLIST_FIRST(&sc->sc_llq[idx]); | |||||
SLIST_INIT(&sc->sc_llq[idx]); | |||||
_lagg_free_llq_entries(llq); | |||||
} | |||||
static int | |||||
lagg_change_mtu(struct ifnet *ifp, struct ifreq *ifr) | |||||
{ | |||||
struct lagg_softc *sc; | |||||
struct lagg_port *lp; | |||||
struct lagg_mtu_llq_ctxt *llq_ctxt; | |||||
int ret; | |||||
bool done; | |||||
Done Inline ActionsNo need for == TRUE on bool. smh: No need for == TRUE on bool. | |||||
sc = (struct lagg_softc *)ifp->if_softc; | |||||
ret = 0; | |||||
done = FALSE; | |||||
LAGG_WLOCK(sc); | |||||
if (SLIST_EMPTY(&sc->sc_ports)) { | |||||
Done Inline ActionsI assume this is checked after cv_has_waiters as that indicates an mtu change is in progress hence there's no easy way to confirm what it will become? Its a shame ioctl doesn't currently have EBUSY as a possible return error as that may be simpler. smh: I assume this is checked after cv_has_waiters as that indicates an mtu change is in progress… | |||||
ret = EIO; | |||||
smhUnsubmitted Done Inline ActionsThis looks a bit messy given you only need LAGG_WUNLOCK(sc); I would do something like (also eliminates done var too). Alternatively just use goto out, which while it sets busy to false that should be fine. if (SLIST_EMPTY(&sc->sc_ports)) { LAGG_WUNLOCK(sc); return (EIO); } else if (sc->sc_mtu_ctxt.busy) { LAGG_WUNLOCK(sc); return (EBUSY); } else if (ifp->if_mtu == ifr->ifr_mtu) { LAGG_WUNLOCK(sc); return (0); } sc->sc_mtu_ctxt.busy = TRUE; smh: This looks a bit messy given you only need LAGG_WUNLOCK(sc); I would do something like (also… | |||||
done = TRUE; | |||||
} else if (sc->sc_mtu_ctxt.busy) { | |||||
ret = EBUSY; | |||||
done = TRUE; | |||||
} else if (ifp->if_mtu == ifr->ifr_mtu) { | |||||
done = TRUE; | |||||
} else { | |||||
sc->sc_mtu_ctxt.busy = TRUE; | |||||
} | |||||
LAGG_WUNLOCK(sc); | |||||
/* Early return cases */ | |||||
if (done) { | |||||
return (ret); | |||||
} | |||||
LAGG_WLOCK(sc); | |||||
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { | |||||
llq_ctxt = malloc(sizeof(struct lagg_mtu_llq_ctxt), M_DEVBUF, | |||||
M_NOWAIT); | |||||
if (llq_ctxt == NULL) { | |||||
lagg_free_llq_entries(sc, LAGG_LLQ_MTU); | |||||
ret = ENOMEM; | |||||
goto out; | |||||
} | |||||
SLIST_INSERT_HEAD(&sc->sc_llq[LAGG_LLQ_MTU], | |||||
(struct lagg_llq_slist_entry *)llq_ctxt, llq_entries); | |||||
bcopy(ifr, &llq_ctxt->llq_ifr, sizeof(struct ifreq)); | |||||
llq_ctxt->llq_old_mtu = ifp->if_mtu; | |||||
llq_ctxt->llq_ifp = lp->lp_ifp; | |||||
llq_ctxt->llq_ioctl = lp->lp_ioctl; | |||||
} | |||||
mtx_lock(&sc->sc_mtu_ctxt.mtu_sync.lock); | |||||
taskqueue_enqueue(taskqueue_swi, &sc->sc_llq_task); | |||||
LAGG_WUNLOCK(sc); | |||||
/* Wait for the command completion */ | |||||
cv_wait(&sc->sc_mtu_ctxt.mtu_sync.cv, &sc->sc_mtu_ctxt.mtu_sync.lock); | |||||
ret = sc->sc_mtu_ctxt.mtu_cmd_ret; | |||||
mtx_unlock(&sc->sc_mtu_ctxt.mtu_sync.lock); | |||||
LAGG_WLOCK(sc); | |||||
out: | |||||
sc->sc_mtu_ctxt.busy = FALSE; | |||||
LAGG_WUNLOCK(sc); | |||||
return (ret); | |||||
} | |||||
static void | |||||
lagg_llq_action_lladdr(struct lagg_softc *sc, struct lagg_llq_slist_entry *head) | |||||
{ | |||||
struct lagg_lladdr_llq_ctxt *llq_ctxt; | |||||
struct lagg_llq_slist_entry *llq; | |||||
struct ifnet *ifp; | |||||
/* | |||||
* Traverse the queue and set the lladdr on each ifp. It is safe to do | * Traverse the queue and set the lladdr on each ifp. It is safe to do | ||||
* unlocked as we have the only reference to it. | * unlocked as we have the only reference to it. | ||||
*/ | */ | ||||
for (llq = head; llq != NULL; llq = head) { | for (llq = head; llq != NULL; llq = head) { | ||||
Done Inline ActionsThis traversal and freeing an entry after processing it should be done in lagg_port_ops(). hrs: This traversal and freeing an entry after processing it should be done in lagg_port_ops(). | |||||
ifp = llq->llq_ifp; | llq_ctxt = (struct lagg_lladdr_llq_ctxt *)llq; | ||||
ifp = llq_ctxt->llq_ifp; | |||||
CURVNET_SET(ifp->if_vnet); | CURVNET_SET(ifp->if_vnet); | ||||
/* | /* | ||||
* Set the link layer address on the laggport interface. | * Set the link layer address on the laggport interface. | ||||
* Note that if_setlladdr() or iflladdr_event handler | * Note that if_setlladdr() or iflladdr_event handler | ||||
* may result in arp transmission / lltable updates. | * may result in arp transmission / lltable updates. | ||||
*/ | */ | ||||
if (llq->llq_type == LAGG_LLQTYPE_PHYS) | if (llq_ctxt->llq_type == LAGG_LLQTYPE_PHYS) | ||||
if_setlladdr(ifp, llq->llq_lladdr, | if_setlladdr(ifp, llq_ctxt->llq_lladdr, ETHER_ADDR_LEN); | ||||
ETHER_ADDR_LEN); | |||||
else | else | ||||
EVENTHANDLER_INVOKE(iflladdr_event, ifp); | EVENTHANDLER_INVOKE(iflladdr_event, ifp); | ||||
CURVNET_RESTORE(); | CURVNET_RESTORE(); | ||||
Done Inline ActionsWhat if we have multiple events queued on tasq? e.g mtu AND mac change melifaro: What if we have multiple events queued on tasq? e.g mtu AND mac change | |||||
head = SLIST_NEXT(llq, llq_entries); | head = SLIST_NEXT(llq, llq_entries); | ||||
free(llq, M_DEVBUF); | free(llq, M_DEVBUF); | ||||
} | } | ||||
Done Inline Actionsstyle(9) four space additional indent only should be used, more below. smh: style(9) four space additional indent only should be used, more below. | |||||
} | } | ||||
static int | static int | ||||
lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp) | lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp) | ||||
{ | { | ||||
struct lagg_softc *sc_ptr; | struct lagg_softc *sc_ptr; | ||||
struct lagg_port *lp, *tlp; | struct lagg_port *lp, *tlp; | ||||
int error, i; | int error, i; | ||||
uint64_t *pval; | uint64_t *pval; | ||||
LAGG_WLOCK_ASSERT(sc); | LAGG_WLOCK_ASSERT(sc); | ||||
/* Limit the maximal number of lagg ports */ | /* Limit the maximal number of lagg ports */ | ||||
if (sc->sc_count >= LAGG_MAX_PORTS) | if (sc->sc_count >= LAGG_MAX_PORTS) | ||||
return (ENOSPC); | return (ENOSPC); | ||||
/* Check if port has already been associated to a lagg */ | /* Check if port has already been associated to a lagg */ | ||||
if (ifp->if_lagg != NULL) { | if (ifp->if_lagg != NULL) { | ||||
/* Port is already in the current lagg? */ | /* Port is already in the current lagg? */ | ||||
lp = (struct lagg_port *)ifp->if_lagg; | lp = (struct lagg_port *)ifp->if_lagg; | ||||
if (lp->lp_softc == sc) | if (lp->lp_softc == sc) | ||||
return (EEXIST); | return (EEXIST); | ||||
Done Inline ActionsPlease separate a llq loop from a handler for per-port configuration. A llq traversal should be required only once in lagg_port_ops() if the handlers process a single lagg_llq entry. hrs: Please separate a llq loop from a handler for per-port configuration. A llq traversal should be… | |||||
return (EBUSY); | return (EBUSY); | ||||
} | } | ||||
/* XXX Disallow non-ethernet interfaces (this should be any of 802) */ | /* XXX Disallow non-ethernet interfaces (this should be any of 802) */ | ||||
if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN) | if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN) | ||||
return (EPROTONOSUPPORT); | return (EPROTONOSUPPORT); | ||||
/* Allow the first Ethernet member to define the MTU */ | /* Allow the first Ethernet member to define the MTU */ | ||||
if (SLIST_EMPTY(&sc->sc_ports)) | if (SLIST_EMPTY(&sc->sc_ports)) | ||||
sc->sc_ifp->if_mtu = ifp->if_mtu; | sc->sc_ifp->if_mtu = ifp->if_mtu; | ||||
Done Inline ActionsNot that easy, unfortunately. melifaro: Not that easy, unfortunately.
At this moment original ioctl returned 0, so other things/events… | |||||
else if (sc->sc_ifp->if_mtu != ifp->if_mtu) { | else if (sc->sc_ifp->if_mtu != ifp->if_mtu) { | ||||
if_printf(sc->sc_ifp, "invalid MTU for %s\n", | if_printf(sc->sc_ifp, "invalid MTU for %s\n", | ||||
ifp->if_xname); | ifp->if_xname); | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
Done Inline ActionsLooks like this attempts to change more that would have been done above before the error, is that intended? smh: Looks like this attempts to change more that would have been done above before the error, is… | |||||
Done Inline ActionsFor the interfaces after the one that failed (and therefore which still have the old MTU), this is a noop. rpokala: For the interfaces after the one that failed (and therefore which still have the old MTU), this… | |||||
if ((lp = malloc(sizeof(struct lagg_port), | if ((lp = malloc(sizeof(struct lagg_port), | ||||
M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) | M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) | ||||
return (ENOMEM); | return (ENOMEM); | ||||
/* Check if port is a stacked lagg */ | /* Check if port is a stacked lagg */ | ||||
LAGG_LIST_LOCK(); | LAGG_LIST_LOCK(); | ||||
SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) { | SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) { | ||||
if (ifp == sc_ptr->sc_ifp) { | if (ifp == sc_ptr->sc_ifp) { | ||||
LAGG_LIST_UNLOCK(); | LAGG_LIST_UNLOCK(); | ||||
free(lp, M_DEVBUF); | free(lp, M_DEVBUF); | ||||
return (EINVAL); | return (EINVAL); | ||||
/* XXX disable stacking for the moment, its untested */ | /* XXX disable stacking for the moment, its untested */ | ||||
#ifdef LAGG_PORT_STACKING | #ifdef LAGG_PORT_STACKING | ||||
lp->lp_flags |= LAGG_PORT_STACK; | lp->lp_flags |= LAGG_PORT_STACK; | ||||
Done Inline ActionsLooks like the error gets lost, although printed, is there no way we can avoid this? smh: Looks like the error gets lost, although printed, is there no way we can avoid this? | |||||
if (lagg_port_checkstacking(sc_ptr) >= | if (lagg_port_checkstacking(sc_ptr) >= | ||||
LAGG_MAX_STACKING) { | LAGG_MAX_STACKING) { | ||||
LAGG_LIST_UNLOCK(); | LAGG_LIST_UNLOCK(); | ||||
free(lp, M_DEVBUF); | free(lp, M_DEVBUF); | ||||
return (E2BIG); | return (E2BIG); | ||||
} | } | ||||
#endif | #endif | ||||
} | } | ||||
Show All 12 Lines | #endif | ||||
lp->lp_ifp = ifp; | lp->lp_ifp = ifp; | ||||
lp->lp_softc = sc; | lp->lp_softc = sc; | ||||
/* Save port link layer address */ | /* Save port link layer address */ | ||||
bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN); | bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN); | ||||
if (SLIST_EMPTY(&sc->sc_ports)) { | if (SLIST_EMPTY(&sc->sc_ports)) { | ||||
sc->sc_primary = lp; | sc->sc_primary = lp; | ||||
/* First port in lagg. Update/notify lagg lladdress */ | /* First port in lagg. Update/notify lagg lladdress */ | ||||
Done Inline Actionsstyle(9) bracing around return. smh: style(9) bracing around return. | |||||
Done Inline ActionsI rather hate that idiom, but fine. :-P rpokala: I rather hate that idiom, but fine. :-P | |||||
lagg_lladdr(sc, IF_LLADDR(ifp)); | lagg_lladdr(sc, IF_LLADDR(ifp)); | ||||
} else { | } else { | ||||
/* | /* | ||||
* Update link layer address for this port and | * Update link layer address for this port and | ||||
* send notifications to other subsystems. | * send notifications to other subsystems. | ||||
Done Inline Actionsstyle(9) init of vars in declaration should be avoided. Moving to down to where its first needed can avoid setting it at all. smh: style(9) init of vars in declaration should be avoided.
Moving to down to where its first… | |||||
*/ | */ | ||||
lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp), LAGG_LLQTYPE_PHYS); | lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp), LAGG_LLQTYPE_PHYS); | ||||
} | } | ||||
/* | /* | ||||
* Insert into the list of ports. | * Insert into the list of ports. | ||||
* Keep ports sorted by if_index. It is handy, when configuration | * Keep ports sorted by if_index. It is handy, when configuration | ||||
* is predictable and `ifconfig laggN create ...` command | * is predictable and `ifconfig laggN create ...` command | ||||
* will lead to the same result each time. | * will lead to the same result each time. | ||||
*/ | */ | ||||
SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) { | SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) { | ||||
if (tlp->lp_ifp->if_index < ifp->if_index && ( | if (tlp->lp_ifp->if_index < ifp->if_index && ( | ||||
SLIST_NEXT(tlp, lp_entries) == NULL || | SLIST_NEXT(tlp, lp_entries) == NULL || | ||||
SLIST_NEXT(tlp, lp_entries)->lp_ifp->if_index > | SLIST_NEXT(tlp, lp_entries)->lp_ifp->if_index > | ||||
ifp->if_index)) | ifp->if_index)) | ||||
break; | break; | ||||
} | } | ||||
if (tlp != NULL) | if (tlp != NULL) | ||||
SLIST_INSERT_AFTER(tlp, lp, lp_entries); | SLIST_INSERT_AFTER(tlp, lp, lp_entries); | ||||
Done Inline Actionsstyle(9) bool use of pointer type. smh: style(9) bool use of pointer type. | |||||
Done Inline ActionsIs this (llq == NULL), not (llq != NULL)? hrs: Is this (llq == NULL), not (llq != NULL)? | |||||
else | else | ||||
SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries); | SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries); | ||||
sc->sc_count++; | sc->sc_count++; | ||||
Done Inline ActionsWhy is cleanup required here? This removes all of tasks not limited to MTU change. hrs: Why is cleanup required here? This removes all of tasks not limited to MTU change. | |||||
/* Update lagg capabilities */ | /* Update lagg capabilities */ | ||||
lagg_capabilities(sc); | lagg_capabilities(sc); | ||||
Done Inline Actionsstyle(9) bool use of pointer type. smh: style(9) bool use of pointer type. | |||||
lagg_linkstate(sc); | lagg_linkstate(sc); | ||||
/* Read port counters */ | /* Read port counters */ | ||||
pval = lp->port_counters.val; | pval = lp->port_counters.val; | ||||
for (i = 0; i < IFCOUNTERS; i++, pval++) | for (i = 0; i < IFCOUNTERS; i++, pval++) | ||||
*pval = ifp->if_get_counter(ifp, i); | *pval = ifp->if_get_counter(ifp, i); | ||||
/* Add multicast addresses and interface flags to this port */ | /* Add multicast addresses and interface flags to this port */ | ||||
lagg_ether_cmdmulti(lp, 1); | lagg_ether_cmdmulti(lp, 1); | ||||
lagg_setflags(lp, 1); | lagg_setflags(lp, 1); | ||||
Done Inline Actionsconsider: "it might have been updated." smh: consider: "it might have been updated." | |||||
if ((error = lagg_proto_addport(sc, lp)) != 0) { | if ((error = lagg_proto_addport(sc, lp)) != 0) { | ||||
/* Remove the port, without calling pr_delport. */ | /* Remove the port, without calling pr_delport. */ | ||||
lagg_port_destroy(lp, 0); | lagg_port_destroy(lp, 0); | ||||
return (error); | return (error); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
Done Inline Actionsstyle(9) bool use of pointer type. smh: style(9) bool use of pointer type. | |||||
Done Inline Actionsfree(NULL) does nothing. Checking if NULL or not is useless. hrs: free(NULL) does nothing. Checking if NULL or not is useless. | |||||
#ifdef LAGG_PORT_STACKING | #ifdef LAGG_PORT_STACKING | ||||
static int | static int | ||||
lagg_port_checkstacking(struct lagg_softc *sc) | lagg_port_checkstacking(struct lagg_softc *sc) | ||||
{ | { | ||||
struct lagg_softc *sc_ptr; | struct lagg_softc *sc_ptr; | ||||
struct lagg_port *lp; | struct lagg_port *lp; | ||||
int m = 0; | int m = 0; | ||||
Show All 11 Lines | |||||
} | } | ||||
#endif | #endif | ||||
static int | static int | ||||
lagg_port_destroy(struct lagg_port *lp, int rundelport) | lagg_port_destroy(struct lagg_port *lp, int rundelport) | ||||
{ | { | ||||
struct lagg_softc *sc = lp->lp_softc; | struct lagg_softc *sc = lp->lp_softc; | ||||
struct lagg_port *lp_ptr, *lp0; | struct lagg_port *lp_ptr, *lp0; | ||||
struct lagg_llq *llq; | struct lagg_llq_slist_entry *cmn_llq; | ||||
struct lagg_lladdr_llq_ctxt *llq_ctxt; | |||||
struct ifnet *ifp = lp->lp_ifp; | struct ifnet *ifp = lp->lp_ifp; | ||||
uint64_t *pval, vdiff; | uint64_t *pval, vdiff; | ||||
int i; | int i; | ||||
LAGG_WLOCK_ASSERT(sc); | LAGG_WLOCK_ASSERT(sc); | ||||
if (rundelport) | if (rundelport) | ||||
lagg_proto_delport(sc, lp); | lagg_proto_delport(sc, lp); | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | if (lp == sc->sc_primary) { | ||||
* old lladdr to its 'real' one). | * old lladdr to its 'real' one). | ||||
*/ | */ | ||||
SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries) | SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries) | ||||
lagg_port_lladdr(lp_ptr, lladdr, LAGG_LLQTYPE_PHYS); | lagg_port_lladdr(lp_ptr, lladdr, LAGG_LLQTYPE_PHYS); | ||||
} | } | ||||
/* Remove any pending lladdr changes from the queue */ | /* Remove any pending lladdr changes from the queue */ | ||||
if (lp->lp_detaching) { | if (lp->lp_detaching) { | ||||
SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) { | SLIST_FOREACH(cmn_llq, &sc->sc_llq[LAGG_LLQ_LLADDR], llq_entries) { | ||||
if (llq->llq_ifp == ifp) { | llq_ctxt = (struct lagg_lladdr_llq_ctxt *)cmn_llq; | ||||
SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq, | if (llq_ctxt->llq_ifp == ifp) { | ||||
llq_entries); | SLIST_REMOVE(&sc->sc_llq[LAGG_LLQ_LLADDR], cmn_llq, | ||||
free(llq, M_DEVBUF); | lagg_llq_slist_entry, llq_entries); | ||||
free(cmn_llq, M_DEVBUF); | |||||
break; /* Only appears once */ | break; /* Only appears once */ | ||||
} | } | ||||
} | } | ||||
} | } | ||||
if (lp->lp_ifflags) | if (lp->lp_ifflags) | ||||
if_printf(ifp, "%s: lp_ifflags unclean\n", __func__); | if_printf(ifp, "%s: lp_ifflags unclean\n", __func__); | ||||
▲ Show 20 Lines • Show All 568 Lines • ▼ Show 20 Lines | case SIOCDELMULTI: | ||||
LAGG_WUNLOCK(sc); | LAGG_WUNLOCK(sc); | ||||
break; | break; | ||||
case SIOCSIFMEDIA: | case SIOCSIFMEDIA: | ||||
case SIOCGIFMEDIA: | case SIOCGIFMEDIA: | ||||
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); | error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); | ||||
break; | break; | ||||
case SIOCSIFCAP: | case SIOCSIFCAP: | ||||
case SIOCSIFMTU: | /* Do not allow the CAPs to be directly changed. */ | ||||
/* Do not allow the MTU or caps to be directly changed */ | |||||
error = EINVAL; | error = EINVAL; | ||||
break; | |||||
case SIOCSIFMTU: | |||||
error = lagg_change_mtu(ifp, ifr); | |||||
break; | break; | ||||
default: | default: | ||||
error = ether_ioctl(ifp, cmd, data); | error = ether_ioctl(ifp, cmd, data); | ||||
break; | break; | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
Show All 16 Lines | |||||
static int | static int | ||||
lagg_ether_cmdmulti(struct lagg_port *lp, int set) | lagg_ether_cmdmulti(struct lagg_port *lp, int set) | ||||
{ | { | ||||
struct lagg_softc *sc = lp->lp_softc; | struct lagg_softc *sc = lp->lp_softc; | ||||
struct ifnet *ifp = lp->lp_ifp; | struct ifnet *ifp = lp->lp_ifp; | ||||
struct ifnet *scifp = sc->sc_ifp; | struct ifnet *scifp = sc->sc_ifp; | ||||
struct lagg_mc *mc; | struct lagg_mc *mc; | ||||
struct ifmultiaddr *ifma; | struct ifmultiaddr *ifma; | ||||
Done Inline Actionsstyle(9) says to not include unnecessary braces (which I personally disagree with, but what can you do?) rstone: style(9) says to not include unnecessary braces (which I personally disagree with, but what can… | |||||
int error; | int error; | ||||
Done Inline Actionsstyle(9): put brackets around the return value: return (0); rstone: style(9): put brackets around the return value:
return (0); | |||||
LAGG_WLOCK_ASSERT(sc); | LAGG_WLOCK_ASSERT(sc); | ||||
if (set) { | if (set) { | ||||
IF_ADDR_WLOCK(scifp); | IF_ADDR_WLOCK(scifp); | ||||
TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) { | TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) { | ||||
if (ifma->ifma_addr->sa_family != AF_LINK) | if (ifma->ifma_addr->sa_family != AF_LINK) | ||||
continue; | continue; | ||||
Show All 21 Lines | while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) { | ||||
if (mc->mc_ifma && !lp->lp_detaching) | if (mc->mc_ifma && !lp->lp_detaching) | ||||
if_delmulti_ifma(mc->mc_ifma); | if_delmulti_ifma(mc->mc_ifma); | ||||
free(mc, M_DEVBUF); | free(mc, M_DEVBUF); | ||||
} | } | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
/* Handle a ref counted flag that should be set on the lagg port as well */ | /* Handle a ref counted flag that should be set on the lagg port as well */ | ||||
Done Inline ActionsI find the flow control here a bit confusing (my first read through, I thought that err2 could be used unitinialized). Given that you have a continue in the if block, I would find it clearer to not have an else here rstone: I find the flow control here a bit confusing (my first read through, I thought that err2 could… | |||||
static int | static int | ||||
lagg_setflag(struct lagg_port *lp, int flag, int status, | lagg_setflag(struct lagg_port *lp, int flag, int status, | ||||
int (*func)(struct ifnet *, int)) | int (*func)(struct ifnet *, int)) | ||||
{ | { | ||||
struct lagg_softc *sc = lp->lp_softc; | struct lagg_softc *sc = lp->lp_softc; | ||||
struct ifnet *scifp = sc->sc_ifp; | struct ifnet *scifp = sc->sc_ifp; | ||||
struct ifnet *ifp = lp->lp_ifp; | struct ifnet *ifp = lp->lp_ifp; | ||||
int error; | int error; | ||||
▲ Show 20 Lines • Show All 605 Lines • Show Last 20 Lines |
style(9) should be 4 space indented, more below