Index: sys/net/if_lagg.h =================================================================== --- sys/net/if_lagg.h +++ sys/net/if_lagg.h @@ -244,9 +244,14 @@ struct callout sc_callout; u_int sc_opts; int flowid_shift; /* shift the flowid */ - uint32_t sc_bkt; /* packates bucket for roundrobin */ - uint32_t sc_bkt_count; /* packates bucket count for roundrobin */ - struct lagg_counters detached_counters; /* detached ports sum */ + /* packates bucket for roundrobin */ + uint32_t sc_bkt; + /* packates bucket count for roundrobin */ + uint32_t sc_bkt_count; + /* detached ports sum */ + struct lagg_counters detached_counters; + /* saved sum of ports' counters */ + struct lagg_counters saved_port_counters; }; struct lagg_port { @@ -276,6 +281,7 @@ #define LAGG_LOCK_INIT(_sc) rm_init(&(_sc)->sc_mtx, "if_lagg rmlock") #define LAGG_LOCK_DESTROY(_sc) rm_destroy(&(_sc)->sc_mtx) #define LAGG_RLOCK(_sc, _p) rm_rlock(&(_sc)->sc_mtx, (_p)) +#define LAGG_TRY_RLOCK(_sc, _p) rm_try_rlock(&(_sc)->sc_mtx, (_p)) #define LAGG_WLOCK(_sc) rm_wlock(&(_sc)->sc_mtx) #define LAGG_RUNLOCK(_sc, _p) rm_runlock(&(_sc)->sc_mtx, (_p)) #define LAGG_WUNLOCK(_sc) rm_wunlock(&(_sc)->sc_mtx) Index: sys/net/if_lagg.c =================================================================== --- sys/net/if_lagg.c +++ sys/net/if_lagg.c @@ -1033,8 +1033,14 @@ * * Counter value is calculated the following way: * 1) for each port, sum difference between current and "initial" measurements. - * 2) add lagg logical interface counters. - * 3) add data from detached_counters array. + * 2) save this in the saved_port_counters + * 3) add count from detached_counters array. + * 4) add count from detached_counters array. + * + * If we can't acquire the lagg softc lock, just return a value computed from + * the saved_port_counters. The user will see what looks like a momentary + * pause in lagg traffic, but it will catch up to the correct value the next + * time he fetches counters (assuming he can get the lock then) * * We also do the following things on ports attach/detach: * 1) On port attach we store all counters it has into port_counter array. @@ -1048,39 +1054,45 @@ struct lagg_port *lp; struct ifnet *lpifp; struct rm_priotracker tracker; - uint64_t newval, oldval, vsum; + uint64_t laggsum = 0; /* Revise this when we've got non-generic counters. */ KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); sc = (struct lagg_softc *)ifp->if_softc; - LAGG_RLOCK(sc, &tracker); + if (LAGG_TRY_RLOCK(sc, &tracker)) { + /* locked the lagg */ + SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { + uint64_t origval, newval, portsum; - vsum = 0; - SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { - /* Saved attached value */ - oldval = lp->port_counters.val[cnt]; - /* current value */ - lpifp = lp->lp_ifp; - newval = lpifp->if_get_counter(lpifp, cnt); - /* Calculate diff and save new */ - vsum += newval - oldval; + /* Saved value from port creation */ + origval = lp->port_counters.val[cnt]; + /* current value */ + lpifp = lp->lp_ifp; + newval = lpifp->if_get_counter(lpifp, cnt); + /* Accumulate difference */ + portsum = newval - origval; + laggsum += portsum; + } + sc->saved_port_counters.val[cnt] = laggsum; + LAGG_RUNLOCK(sc, &tracker); + } else { + /* failed to get the lock. Report old values */ + laggsum = sc->saved_port_counters.val[cnt]; } /* * Add counter data which might be added by upper * layer protocols operating on logical interface. */ - vsum += if_get_counter_default(ifp, cnt); + laggsum += if_get_counter_default(ifp, cnt); /* * Add counter data from detached ports counters */ - vsum += sc->detached_counters.val[cnt]; - - LAGG_RUNLOCK(sc, &tracker); + laggsum += sc->detached_counters.val[cnt]; - return (vsum); + return (laggsum); } /*