Index: sys/net/if_bridge.c =================================================================== --- sys/net/if_bridge.c +++ sys/net/if_bridge.c @@ -237,6 +237,8 @@ uint32_t bif_addrmax; /* max # of addresses */ uint32_t bif_addrcnt; /* cur. # of addresses */ uint32_t bif_addrexceeded;/* # of address violations */ + + struct epoch_context bif_epoch_ctx; }; /* @@ -250,6 +252,9 @@ uint8_t brt_flags; /* address flags */ uint8_t brt_addr[ETHER_ADDR_LEN]; uint16_t brt_vlan; /* vlan id */ + + struct vnet *brt_vnet; + struct epoch_context brt_epoch_ctx; }; #define brt_ifp brt_dst->bif_ifp @@ -598,6 +603,11 @@ if_clone_detach(V_bridge_cloner); V_bridge_cloner = NULL; BRIDGE_LIST_LOCK_DESTROY(); + + /* Before we can destroy the uma zone, because there are callbacks that + * use it. */ + NET_EPOCH_WAIT(); + uma_zdestroy(V_bridge_rtnode_zone); } VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, @@ -760,6 +770,17 @@ return (0); } +static void +bridge_clone_destroy_cb(struct epoch_context *ctx) +{ + struct bridge_softc *sc; + + sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx); + + BRIDGE_LOCK_DESTROY(sc); + free(sc, M_DEVBUF); +} + /* * bridge_clone_destroy: * @@ -798,8 +819,7 @@ ether_ifdetach(ifp); if_free(ifp); - BRIDGE_LOCK_DESTROY(sc); - free(sc, M_DEVBUF); + NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx); } /* @@ -825,6 +845,9 @@ struct ifdrv *ifd = (struct ifdrv *) data; const struct bridge_control *bc; int error = 0, oldmtu; + struct epoch_tracker et; + + NET_EPOCH_ENTER(et); switch (cmd) { @@ -946,6 +969,8 @@ break; } + NET_EPOCH_EXIT(et); + return (error); } @@ -960,6 +985,8 @@ struct bridge_iflist *bif; int enabled, mask; + BRIDGE_LOCK_ASSERT(sc); + /* Initial bitmask of capabilities to test */ mask = BRIDGE_IFCAPS_MASK; @@ -1021,7 +1048,7 @@ struct bridge_iflist *bif; struct ifnet *ifp; - BRIDGE_LOCK_ASSERT(sc); + NET_EPOCH_ASSERT(); CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { ifp = bif->bif_ifp; @@ -1042,7 +1069,7 @@ { struct bridge_iflist *bif; - BRIDGE_LOCK_ASSERT(sc); + NET_EPOCH_ASSERT(); CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { if (bif->bif_ifp == member_ifp) @@ -1052,6 +1079,16 @@ return (NULL); } +static void +bridge_delete_member_cb(struct epoch_context *ctx) +{ + struct bridge_iflist *bif; + + bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx); + + free(bif, M_DEVBUF); +} + /* * bridge_delete_member: * @@ -1130,7 +1167,8 @@ } bstp_destroy(&bif->bif_stp); /* prepare to free */ BRIDGE_LOCK(sc); - free(bif, M_DEVBUF); + + NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx); } /* @@ -1147,7 +1185,8 @@ ("%s: not a span interface", __func__)); CK_LIST_REMOVE(bif, bif_next); - free(bif, M_DEVBUF); + + NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx); } static int @@ -1530,12 +1569,17 @@ struct bridge_iflist *bif; int error; + NET_EPOCH_ASSERT(); + bif = bridge_lookup_member(sc, req->ifba_ifsname); if (bif == NULL) return (ENOENT); + /* bridge_rtupdate() may acquire the lock. */ + BRIDGE_UNLOCK(sc); error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, req->ifba_flags); + BRIDGE_LOCK(sc); return (error); } @@ -1874,6 +1918,7 @@ { struct bridge_softc *sc = ifp->if_bridge; struct bridge_iflist *bif; + struct epoch_tracker et; if (ifp->if_flags & IFF_RENAMING) return; @@ -1884,6 +1929,7 @@ */ return; } + NET_EPOCH_ENTER(et); /* Check if the interface is a bridge member */ if (sc != NULL) { BRIDGE_LOCK(sc); @@ -1893,6 +1939,7 @@ bridge_delete_member(sc, bif, 1); BRIDGE_UNLOCK(sc); + NET_EPOCH_EXIT(et); return; } @@ -1909,6 +1956,7 @@ BRIDGE_UNLOCK(sc); } BRIDGE_LIST_UNLOCK(); + NET_EPOCH_EXIT(et); } /* @@ -2068,6 +2116,8 @@ struct bridge_softc *sc; uint16_t vlan; + NET_EPOCH_ASSERT(); + if (m->m_len < ETHER_HDR_LEN) { m = m_pullup(m, ETHER_HDR_LEN); if (m == NULL) @@ -2078,7 +2128,6 @@ sc = ifp->if_bridge; vlan = VLANTAGOF(m); - BRIDGE_LOCK(sc); bifp = sc->sc_ifp; /* @@ -2105,16 +2154,10 @@ if (dst_if == NULL) { struct bridge_iflist *bif; struct mbuf *mc; - int error = 0, used = 0; + int used = 0; bridge_span(sc, m); - BRIDGE_LOCK2REF(sc, error); - if (error) { - m_freem(m); - return (0); - } - CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { dst_if = bif->bif_ifp; @@ -2148,7 +2191,6 @@ } if (used == 0) m_freem(m); - BRIDGE_UNREF(sc); return (0); } @@ -2160,11 +2202,9 @@ bridge_span(sc, m); if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) { m_freem(m); - BRIDGE_UNLOCK(sc); return (0); } - BRIDGE_UNLOCK(sc); bridge_enqueue(sc, dst_if, m); return (0); } @@ -2189,10 +2229,8 @@ eh = mtod(m, struct ether_header *); - BRIDGE_LOCK(sc); if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) && (dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1)) != NULL) { - BRIDGE_UNLOCK(sc); error = bridge_enqueue(sc, dst_if, m); } else bridge_broadcast(sc, ifp, m, 0); @@ -2226,6 +2264,8 @@ uint8_t *dst; int error; + NET_EPOCH_ASSERT(); + src_if = m->m_pkthdr.rcvif; ifp = sc->sc_ifp; @@ -2304,12 +2344,10 @@ || PFIL_HOOKED_IN(V_inet6_pfil_head) #endif ) { - BRIDGE_UNLOCK(sc); if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) return; if (m == NULL) return; - BRIDGE_LOCK(sc); } if (dst_if == NULL) { @@ -2337,8 +2375,6 @@ dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) goto drop; - BRIDGE_UNLOCK(sc); - if (PFIL_HOOKED_OUT(V_inet_pfil_head) #ifdef INET6 || PFIL_HOOKED_OUT(V_inet6_pfil_head) @@ -2354,7 +2390,6 @@ return; drop: - BRIDGE_UNLOCK(sc); m_freem(m); } @@ -2375,6 +2410,8 @@ uint16_t vlan; int error; + NET_EPOCH_ASSERT(); + if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return (m); @@ -2395,10 +2432,8 @@ m_freem(m); return (NULL); } - BRIDGE_LOCK(sc); bif = bridge_lookup_member_if(sc, ifp); if (bif == NULL) { - BRIDGE_UNLOCK(sc); return (m); } @@ -2411,13 +2446,11 @@ if (memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) { bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */ - BRIDGE_UNLOCK(sc); return (NULL); } if ((bif->bif_flags & IFBIF_STP) && bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { - BRIDGE_UNLOCK(sc); return (m); } @@ -2428,7 +2461,6 @@ */ mc = m_dup(m, M_NOWAIT); if (mc == NULL) { - BRIDGE_UNLOCK(sc); return (m); } @@ -2460,7 +2492,6 @@ if ((bif->bif_flags & IFBIF_STP) && bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { - BRIDGE_UNLOCK(sc); return (m); } @@ -2494,7 +2525,6 @@ error = bridge_rtupdate(sc, eh->ether_shost, \ vlan, bif, 0, IFBAF_DYNAMIC); \ if (error && bif->bif_addrmax) { \ - BRIDGE_UNLOCK(sc); \ m_freem(m); \ return (NULL); \ } \ @@ -2502,7 +2532,6 @@ m->m_pkthdr.rcvif = iface; \ if ((iface) == ifp) { \ /* Skip bridge processing... src == dest */ \ - BRIDGE_UNLOCK(sc); \ return (m); \ } \ /* It's passing over or to the bridge, locally. */ \ @@ -2514,13 +2543,11 @@ OR_PFIL_HOOKED_INET6)) { \ if (bridge_pfil(&m, NULL, ifp, \ PFIL_IN) != 0 || m == NULL) { \ - BRIDGE_UNLOCK(sc); \ return (NULL); \ } \ } \ if ((iface) != bifp) \ ETHER_BPF_MTAP(iface, m); \ - BRIDGE_UNLOCK(sc); \ return (m); \ } \ \ @@ -2528,7 +2555,6 @@ if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \ OR_CARP_CHECK_WE_ARE_SRC((iface)) \ ) { \ - BRIDGE_UNLOCK(sc); \ m_freem(m); \ return (NULL); \ } @@ -2579,15 +2605,11 @@ struct bridge_iflist *dbif, *sbif; struct mbuf *mc; struct ifnet *dst_if; - int error = 0, used = 0, i; + int used = 0, i; - sbif = bridge_lookup_member_if(sc, src_if); + NET_EPOCH_ASSERT(); - BRIDGE_LOCK2REF(sc, error); - if (error) { - m_freem(m); - return; - } + sbif = bridge_lookup_member_if(sc, src_if); /* Filter on the bridge interface before broadcasting */ if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head) @@ -2596,9 +2618,9 @@ #endif )) { if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) - goto out; + return; if (m == NULL) - goto out; + return; } CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) { @@ -2661,9 +2683,6 @@ } if (used == 0) m_freem(m); - -out: - BRIDGE_UNREF(sc); } /* @@ -2679,6 +2698,8 @@ struct ifnet *dst_if; struct mbuf *mc; + NET_EPOCH_ASSERT(); + if (CK_LIST_EMPTY(&sc->sc_spanlist)) return; @@ -2710,7 +2731,8 @@ struct bridge_rtnode *brt; int error; - BRIDGE_LOCK_ASSERT(sc); + NET_EPOCH_ASSERT(); + BRIDGE_UNLOCK_ASSERT(sc); /* Check the source address is valid and not multicast. */ if (ETHER_IS_MULTICAST(dst) || @@ -2727,13 +2749,24 @@ * update it, otherwise create a new one. */ if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { + BRIDGE_LOCK(sc); + + /* Check again, now that we have the lock. There could have + * been a race and we only want to insert this once. */ + if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) != NULL) { + BRIDGE_UNLOCK(sc); + return (0); + } + if (sc->sc_brtcnt >= sc->sc_brtmax) { sc->sc_brtexceeded++; + BRIDGE_UNLOCK(sc); return (ENOSPC); } /* Check per interface address limits (if enabled) */ if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { bif->bif_addrexceeded++; + BRIDGE_UNLOCK(sc); return (ENOSPC); } @@ -2743,8 +2776,11 @@ * address. */ brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO); - if (brt == NULL) + if (brt == NULL) { + BRIDGE_UNLOCK(sc); return (ENOMEM); + } + brt->brt_vnet = curvnet; if (bif->bif_flags & IFBIF_STICKY) brt->brt_flags = IFBAF_STICKY; @@ -2756,17 +2792,22 @@ if ((error = bridge_rtnode_insert(sc, brt)) != 0) { uma_zfree(V_bridge_rtnode_zone, brt); + BRIDGE_UNLOCK(sc); return (error); } brt->brt_dst = bif; bif->bif_addrcnt++; + + BRIDGE_UNLOCK(sc); } if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && brt->brt_dst != bif) { + BRIDGE_LOCK(sc); brt->brt_dst->bif_addrcnt--; brt->brt_dst = bif; brt->brt_dst->bif_addrcnt++; + BRIDGE_UNLOCK(sc); } if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) @@ -2787,7 +2828,7 @@ { struct bridge_rtnode *brt; - BRIDGE_LOCK_ASSERT(sc); + NET_EPOCH_ASSERT(); if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) return (NULL); @@ -3026,7 +3067,7 @@ uint32_t hash; int dir; - BRIDGE_LOCK_ASSERT(sc); + NET_EPOCH_ASSERT(); hash = bridge_rthash(sc, addr); CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { @@ -3089,6 +3130,18 @@ return (0); } +static void +bridge_rtnode_destroy_cb(struct epoch_context *ctx) +{ + struct bridge_rtnode *brt; + + brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx); + + CURVNET_SET(brt->brt_vnet); + uma_zfree(V_bridge_rtnode_zone, brt); + CURVNET_RESTORE(); +} + /* * bridge_rtnode_destroy: * @@ -3104,7 +3157,8 @@ CK_LIST_REMOVE(brt, brt_list); sc->sc_brtcnt--; brt->brt_dst->bif_addrcnt--; - uma_zfree(V_bridge_rtnode_zone, brt); + + NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx); } /* @@ -3643,17 +3697,20 @@ { struct bridge_softc *sc = ifp->if_bridge; struct bridge_iflist *bif; + struct epoch_tracker et; + + NET_EPOCH_ENTER(et); - BRIDGE_LOCK(sc); bif = bridge_lookup_member_if(sc, ifp); if (bif == NULL) { - BRIDGE_UNLOCK(sc); + NET_EPOCH_EXIT(et); return; } bridge_linkcheck(sc); - BRIDGE_UNLOCK(sc); bstp_linkstate(&bif->bif_stp); + + NET_EPOCH_EXIT(et); } static void @@ -3662,7 +3719,8 @@ struct bridge_iflist *bif; int new_link, hasls; - BRIDGE_LOCK_ASSERT(sc); + NET_EPOCH_ASSERT(); + new_link = LINK_STATE_DOWN; hasls = 0; /* Our link is considered up if at least one of our ports is active */