Page MenuHomeFreeBSD

D14969.id41284.diff
No OneTemporary

D14969.id41284.diff

Index: sys/kern/subr_witness.c
===================================================================
--- sys/kern/subr_witness.c
+++ sys/kern/subr_witness.c
@@ -530,19 +530,23 @@
* IPv4 multicast:
* protocol locks before interface locks, after UDP locks.
*/
+ { "in_multi_sx", &lock_class_sx },
{ "udpinp", &lock_class_rw },
- { "in_multi_mtx", &lock_class_mtx_sleep },
+ { "in_multi_list_mtx", &lock_class_mtx_sleep },
{ "igmp_mtx", &lock_class_mtx_sleep },
{ "if_addr_lock", &lock_class_rw },
+ { "ifnet_rw", &lock_class_rw },
{ NULL, NULL },
/*
* IPv6 multicast:
* protocol locks before interface locks, after UDP locks.
*/
+ { "in6_multi_sx", &lock_class_sx },
{ "udpinp", &lock_class_rw },
- { "in6_multi_mtx", &lock_class_mtx_sleep },
+ { "in6_multi_list_mtx", &lock_class_mtx_sleep },
{ "mld_mtx", &lock_class_mtx_sleep },
{ "if_addr_lock", &lock_class_rw },
+ { "ifnet_rw", &lock_class_rw },
{ NULL, NULL },
/*
* UNIX Domain Sockets
Index: sys/net/if.c
===================================================================
--- sys/net/if.c
+++ sys/net/if.c
@@ -3317,6 +3317,8 @@
struct sockaddr_dl sdl;
int error;
+ IN_MULTI_LIST_UNLOCK_ASSERT();
+ IN6_MULTI_LIST_UNLOCK_ASSERT();
/*
* If the address is already present, return a new reference to it;
* otherwise, allocate storage and set up a new address.
@@ -3498,6 +3500,8 @@
struct ifnet *ifp;
int lastref;
+ IN_MULTI_LIST_UNLOCK_ASSERT();
+ IN_MULTI_LOCK_ASSERT();
ifp = ifma->ifma_ifp;
#ifdef DIAGNOSTIC
if (ifp == NULL) {
Index: sys/netinet/igmp.c
===================================================================
--- sys/netinet/igmp.c
+++ sys/netinet/igmp.c
@@ -136,7 +136,7 @@
struct in_multi *, const int, const int, const int);
static int igmp_v3_enqueue_filter_change(struct mbufq *,
struct in_multi *);
-static void igmp_v3_process_group_timers(struct igmp_ifsoftc *,
+static void igmp_v3_process_group_timers(struct in_multi_head *,
struct mbufq *, struct mbufq *, struct in_multi *,
const int);
static int igmp_v3_merge_state_changes(struct in_multi *,
@@ -162,12 +162,12 @@
* themselves are not virtualized.
*
* Locking:
- * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
+ * * The permitted lock order is: IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
* Any may be taken independently; if any are held at the same
* time, the above lock order must be followed.
* * All output is delegated to the netisr.
* Now that Giant has been eliminated, the netisr may be inlined.
- * * IN_MULTI_LOCK covers in_multi.
+ * * IN_MULTI_LIST_LOCK covers in_multi.
* * IGMP_LOCK covers igmp_ifsoftc and any global variables in this file,
* including the output queue.
* * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
@@ -441,7 +441,7 @@
if (error)
return (error);
- IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
IGMP_LOCK();
if (name[0] <= 0 || name[0] > V_if_index) {
@@ -475,7 +475,7 @@
out_locked:
IGMP_UNLOCK();
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
return (error);
}
@@ -586,7 +586,6 @@
igi->igi_qi = IGMP_QI_INIT;
igi->igi_qri = IGMP_QRI_INIT;
igi->igi_uri = IGMP_URI_INIT;
- SLIST_INIT(&igi->igi_relinmhead);
mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
@@ -612,11 +611,12 @@
{
struct igmp_ifsoftc *igi;
struct ifmultiaddr *ifma;
- struct in_multi *inm, *tinm;
-
+ struct in_multi *inm;
+ struct in_multi_head inm_free_tmp;
CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
ifp->if_xname);
+ SLIST_INIT(&inm_free_tmp);
IGMP_LOCK();
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
@@ -632,23 +632,18 @@
#endif
inm = (struct in_multi *)ifma->ifma_protospec;
if (inm->inm_state == IGMP_LEAVING_MEMBER) {
- SLIST_INSERT_HEAD(&igi->igi_relinmhead,
- inm, inm_nrele);
+ if (--inm->inm_refcount == 0) {
+ SLIST_INSERT_HEAD(&inm_free_tmp,
+ inm, inm_nrele);
+ }
}
inm_clear_recorded(inm);
}
IF_ADDR_RUNLOCK(ifp);
- /*
- * Free the in_multi reference(s) for this IGMP lifecycle.
- */
- SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele,
- tinm) {
- SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
- inm_release_locked(inm);
- }
+ inm_release_list_deferred(&inm_free_tmp);
}
-
IGMP_UNLOCK();
+
}
/*
@@ -684,11 +679,6 @@
mbufq_drain(&igi->igi_gq);
LIST_REMOVE(igi, igi_link);
-
- KASSERT(SLIST_EMPTY(&igi->igi_relinmhead),
- ("%s: there are dangling in_multi references",
- __func__));
-
free(igi, M_IGMP);
return;
}
@@ -722,7 +712,7 @@
}
IGMPSTAT_INC(igps_rcv_gen_queries);
- IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
IGMP_LOCK();
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
@@ -778,7 +768,7 @@
out_locked:
IGMP_UNLOCK();
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
return (0);
}
@@ -816,7 +806,7 @@
IGMPSTAT_INC(igps_rcv_group_queries);
}
- IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
IGMP_LOCK();
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
@@ -872,7 +862,7 @@
out_locked:
IGMP_UNLOCK();
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
return (0);
}
@@ -899,7 +889,7 @@
CTR4(KTR_IGMPV3, "0x%08x: %s/%s timer=%d", __func__,
ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname, timer);
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
switch (inm->inm_state) {
case IGMP_NOT_MEMBER:
@@ -1011,7 +1001,7 @@
IGMPSTAT_INC(igps_rcv_gsr_queries);
}
- IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
IGMP_LOCK();
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
@@ -1092,7 +1082,7 @@
out_locked:
IGMP_UNLOCK();
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
return (0);
}
@@ -1109,7 +1099,7 @@
int retval;
uint16_t nsrc;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
retval = 0;
@@ -1246,7 +1236,7 @@
* If we are a member of this group, and our membership should be
* reported, stop our group timer and transition to the 'lazy' state.
*/
- IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
inm = inm_lookup(ifp, igmp->igmp_group);
if (inm != NULL) {
struct igmp_ifsoftc *igi;
@@ -1305,7 +1295,7 @@
}
out_locked:
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
return (0);
}
@@ -1373,7 +1363,7 @@
* reported, and our group timer is pending or about to be reset,
* stop our group timer by transitioning to the 'lazy' state.
*/
- IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
inm = inm_lookup(ifp, igmp->igmp_group);
if (inm != NULL) {
struct igmp_ifsoftc *igi;
@@ -1418,7 +1408,7 @@
}
out_locked:
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
return (0);
}
@@ -1647,6 +1637,7 @@
struct igmp_ifsoftc *igi;
struct ifmultiaddr *ifma;
struct in_multi *inm;
+ struct in_multi_head inm_free_tmp;
int loop, uri_fasthz;
loop = 0;
@@ -1662,7 +1653,8 @@
!V_state_change_timers_running)
return;
- IN_MULTI_LOCK();
+ SLIST_INIT(&inm_free_tmp);
+ IN_MULTI_LIST_LOCK();
IGMP_LOCK();
/*
@@ -1720,7 +1712,7 @@
igi->igi_version);
break;
case IGMP_VERSION_3:
- igmp_v3_process_group_timers(igi, &qrq,
+ igmp_v3_process_group_timers(&inm_free_tmp, &qrq,
&scq, inm, uri_fasthz);
break;
}
@@ -1728,8 +1720,6 @@
IF_ADDR_RUNLOCK(ifp);
if (igi->igi_version == IGMP_VERSION_3) {
- struct in_multi *tinm;
-
igmp_dispatch_queue(&qrq, 0, loop);
igmp_dispatch_queue(&scq, 0, loop);
@@ -1737,18 +1727,13 @@
* Free the in_multi reference(s) for this
* IGMP lifecycle.
*/
- SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead,
- inm_nrele, tinm) {
- SLIST_REMOVE_HEAD(&igi->igi_relinmhead,
- inm_nrele);
- inm_release_locked(inm);
- }
+ inm_release_list_deferred(&inm_free_tmp);
}
}
out_locked:
IGMP_UNLOCK();
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
}
/*
@@ -1760,7 +1745,7 @@
{
int report_timer_expired;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
if (inm->inm_timer == 0) {
@@ -1802,14 +1787,14 @@
* Note: Unlocked read from igi.
*/
static void
-igmp_v3_process_group_timers(struct igmp_ifsoftc *igi,
+igmp_v3_process_group_timers(struct in_multi_head *inmh,
struct mbufq *qrq, struct mbufq *scq,
struct in_multi *inm, const int uri_fasthz)
{
int query_response_timer_expired;
int state_change_retransmit_timer_expired;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
query_response_timer_expired = 0;
@@ -1907,8 +1892,8 @@
if (inm->inm_state == IGMP_LEAVING_MEMBER &&
inm->inm_scrv == 0) {
inm->inm_state = IGMP_NOT_MEMBER;
- SLIST_INSERT_HEAD(&igi->igi_relinmhead,
- inm, inm_nrele);
+ if (--inm->inm_refcount == 0)
+ SLIST_INSERT_HEAD(inmh, inm, inm_nrele);
}
}
break;
@@ -1929,7 +1914,7 @@
igmp_v3_suppress_group_record(struct in_multi *inm)
{
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
("%s: not IGMPv3 mode on link", __func__));
@@ -2003,13 +1988,15 @@
{
struct ifmultiaddr *ifma;
struct ifnet *ifp;
- struct in_multi *inm, *tinm;
+ struct in_multi *inm;
+ struct in_multi_head inm_free_tmp;
CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
igi->igi_ifp, igi->igi_ifp->if_xname);
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
+ SLIST_INIT(&inm_free_tmp);
/*
* Stop the v3 General Query Response on this link stone dead.
@@ -2050,7 +2037,8 @@
* message is sent upstream to the old querier --
* transition to NOT would lose the leave and race.
*/
- SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele);
+ if (--inm->inm_refcount == 0)
+ SLIST_INSERT_HEAD(&inm_free_tmp, inm, inm_nrele);
/* FALLTHROUGH */
case IGMP_G_QUERY_PENDING_MEMBER:
case IGMP_SG_QUERY_PENDING_MEMBER:
@@ -2069,10 +2057,8 @@
mbufq_drain(&inm->inm_scq);
}
IF_ADDR_RUNLOCK(ifp);
- SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, tinm) {
- SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
- inm_release_locked(inm);
- }
+
+ inm_release_list_deferred(&inm_free_tmp);
}
/*
@@ -2199,7 +2185,7 @@
struct ip *ip;
struct mbuf *m;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
ifp = inm->inm_ifp;
@@ -2276,10 +2262,8 @@
struct ifnet *ifp;
int error;
- IN_MULTI_LOCK_ASSERT();
-
error = 0;
-
+ IN_MULTI_LOCK_ASSERT();
/*
* Try to detect if the upper layer just asked us to change state
* for an interface which has now gone away.
@@ -2379,9 +2363,10 @@
* group around for the final INCLUDE {} enqueue.
*/
if (igi->igi_version == IGMP_VERSION_3 &&
- inm->inm_state == IGMP_LEAVING_MEMBER)
- inm_release_locked(inm);
-
+ inm->inm_state == IGMP_LEAVING_MEMBER) {
+ MPASS(inm->inm_refcount > 1);
+ --inm->inm_refcount;
+ }
inm->inm_state = IGMP_REPORTING_MEMBER;
switch (igi->igi_version) {
@@ -2473,7 +2458,7 @@
ifp = inm->inm_ifp;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
@@ -2531,7 +2516,7 @@
__func__, ntohl(inm->inm_addr.s_addr), inm->inm_ifp,
inm->inm_ifp->if_xname);
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
switch (inm->inm_state) {
@@ -2658,7 +2643,7 @@
in_addr_t naddr;
uint8_t mode;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
error = 0;
ifp = inm->inm_ifp;
@@ -3018,7 +3003,7 @@
uint8_t mode, now, then;
rectype_t crt, drt, nrt;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
if (inm->inm_nsrc == 0 ||
(inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
@@ -3221,7 +3206,7 @@
domerge = 0;
recslen = 0;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
/*
@@ -3320,7 +3305,7 @@
struct in_multi *inm;
int retval, loop;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
KASSERT(igi->igi_version == IGMP_VERSION_3,
@@ -3632,7 +3617,6 @@
db_printf(" qi %u\n", igi->igi_qi);
db_printf(" qri %u\n", igi->igi_qri);
db_printf(" uri %u\n", igi->igi_uri);
- /* SLIST_HEAD(,in_multi) igi_relinmhead */
/* struct mbufq igi_gq; */
db_printf("\n");
}
Index: sys/netinet/igmp_var.h
===================================================================
--- sys/netinet/igmp_var.h
+++ sys/netinet/igmp_var.h
@@ -214,7 +214,6 @@
uint32_t igi_qi; /* IGMPv3 Query Interval (s) */
uint32_t igi_qri; /* IGMPv3 Query Response Interval (s) */
uint32_t igi_uri; /* IGMPv3 Unsolicited Report Interval (s) */
- SLIST_HEAD(,in_multi) igi_relinmhead; /* released groups */
struct mbufq igi_gq; /* general query responses queue */
};
Index: sys/netinet/in.c
===================================================================
--- sys/netinet/in.c
+++ sys/netinet/in.c
@@ -632,12 +632,10 @@
struct in_ifinfo *ii;
ii = ((struct in_ifinfo *)ifp->if_afdata[AF_INET]);
- IN_MULTI_LOCK();
if (ii->ii_allhosts) {
- (void)in_leavegroup_locked(ii->ii_allhosts, NULL);
+ (void)in_leavegroup(ii->ii_allhosts, NULL);
ii->ii_allhosts = NULL;
}
- IN_MULTI_UNLOCK();
}
IF_ADDR_WLOCK(ifp);
@@ -1011,12 +1009,12 @@
static void
in_purgemaddrs(struct ifnet *ifp)
{
- LIST_HEAD(,in_multi) purgeinms;
- struct in_multi *inm, *tinm;
+ struct in_multi_head purgeinms;
+ struct in_multi *inm;
struct ifmultiaddr *ifma;
- LIST_INIT(&purgeinms);
- IN_MULTI_LOCK();
+ SLIST_INIT(&purgeinms);
+ IN_MULTI_LIST_LOCK();
/*
* Extract list of in_multi associated with the detaching ifp
@@ -1034,17 +1032,14 @@
("%s: ifma_protospec is NULL", __func__));
#endif
inm = (struct in_multi *)ifma->ifma_protospec;
- LIST_INSERT_HEAD(&purgeinms, inm, inm_link);
+ if (--inm->inm_refcount == 0)
+ SLIST_INSERT_HEAD(&purgeinms, inm, inm_nrele);
}
IF_ADDR_RUNLOCK(ifp);
- LIST_FOREACH_SAFE(inm, &purgeinms, inm_link, tinm) {
- LIST_REMOVE(inm, inm_link);
- inm_release_locked(inm);
- }
+ inm_release_list_deferred(&purgeinms);
igmp_ifdetach(ifp);
-
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
}
struct in_llentry {
Index: sys/netinet/in_mcast.c
===================================================================
--- sys/netinet/in_mcast.c
+++ sys/netinet/in_mcast.c
@@ -51,6 +51,7 @@
#include <sys/sysctl.h>
#include <sys/ktr.h>
#include <sys/taskqueue.h>
+#include <sys/gtaskqueue.h>
#include <sys/tree.h>
#include <net/if.h>
@@ -59,6 +60,9 @@
#include <net/route.h>
#include <net/vnet.h>
+#include <net/ethernet.h>
+#include <net/iflib.h>
+
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_fib.h>
@@ -91,17 +95,23 @@
/*
* Locking:
- * - Lock order is: Giant, INP_WLOCK, IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
+ * - Lock order is: Giant, INP_WLOCK, IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
* - The IF_ADDR_LOCK is implicitly taken by inm_lookup() earlier, however
* it can be taken by code in net/if.c also.
* - ip_moptions and in_mfilter are covered by the INP_WLOCK.
*
- * struct in_multi is covered by IN_MULTI_LOCK. There isn't strictly
+ * struct in_multi is covered by IN_MULTI_LIST_LOCK. There isn't strictly
* any need for in_multi itself to be virtualized -- it is bound to an ifp
* anyway no matter what happens.
*/
-struct mtx in_multi_mtx;
-MTX_SYSINIT(in_multi_mtx, &in_multi_mtx, "in_multi_mtx", MTX_DEF);
+struct mtx in_multi_list_mtx;
+MTX_SYSINIT(in_multi_mtx, &in_multi_list_mtx, "in_multi_list_mtx", MTX_DEF);
+
+struct mtx in_multi_free_mtx;
+MTX_SYSINIT(in_multi_free_mtx, &in_multi_free_mtx, "in_multi_free_mtx", MTX_DEF);
+
+struct sx in_multi_sx;
+SX_SYSINIT(in_multi_sx, &in_multi_sx, "in_multi_sx");
/*
* Functions with non-static linkage defined in this file should be
@@ -151,6 +161,7 @@
static int inm_merge(struct in_multi *, /*const*/ struct in_mfilter *);
static void inm_purge(struct in_multi *);
static void inm_reap(struct in_multi *);
+static void inm_release(struct in_multi *);
static struct ip_moptions *
inp_findmoptions(struct inpcb *);
static void inp_freemoptions_internal(struct ip_moptions *);
@@ -216,6 +227,58 @@
}
#endif
+static struct grouptask free_gtask;
+static struct in_multi_head inm_free_list;
+static void inm_release_task(void *arg __unused);
+static void inm_init()
+{
+ SLIST_INIT(&inm_free_list);
+ iflib_config_gtask_init(NULL, &free_gtask, inm_release_task, "inm release task");
+}
+
+SYSINIT(inm_init, SI_SUB_SMP + 1, SI_ORDER_FIRST,
+ inm_init, NULL);
+
+
+void
+inm_release_list_deferred(struct in_multi_head *inmh)
+{
+ mtx_lock(&in_multi_free_mtx);
+ SLIST_CONCAT(&inm_free_list, inmh, in_multi, inm_nrele);
+ mtx_unlock(&in_multi_free_mtx);
+ GROUPTASK_ENQUEUE(&free_gtask);
+}
+
+void
+inm_release_deferred(struct in_multi *inm)
+{
+ struct in_multi_head tmp;
+
+ if (--inm->inm_refcount == 0) {
+ SLIST_INIT(&tmp);
+ SLIST_INSERT_HEAD(&tmp, inm, inm_nrele);
+ inm_release_list_deferred(&tmp);
+ }
+}
+
+static void
+inm_release_task(void *arg __unused)
+{
+ struct in_multi_head inm_free_tmp;
+ struct in_multi *inm, *tinm;
+
+ SLIST_INIT(&inm_free_tmp);
+ mtx_lock(&in_multi_free_mtx);
+ SLIST_CONCAT(&inm_free_tmp, &inm_free_list, in_multi, inm_nrele);
+ mtx_unlock(&in_multi_free_mtx);
+ IN_MULTI_LOCK();
+ SLIST_FOREACH_SAFE(inm, &inm_free_tmp, inm_nrele, tinm) {
+ SLIST_REMOVE_HEAD(&inm_free_tmp, inm_nrele);
+ inm_release(inm);
+ }
+ IN_MULTI_UNLOCK();
+}
+
/*
* Initialize an in_mfilter structure to a known state at t0, t1
* with an empty source filter list.
@@ -232,7 +295,7 @@
/*
* Function for looking up an in_multi record for an IPv4 multicast address
* on a given interface. ifp must be valid. If no record found, return NULL.
- * The IN_MULTI_LOCK and IF_ADDR_LOCK on ifp must be held.
+ * The IN_MULTI_LIST_LOCK and IF_ADDR_LOCK on ifp must be held.
*/
struct in_multi *
inm_lookup_locked(struct ifnet *ifp, const struct in_addr ina)
@@ -240,7 +303,7 @@
struct ifmultiaddr *ifma;
struct in_multi *inm;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IF_ADDR_LOCK_ASSERT(ifp);
inm = NULL;
@@ -264,7 +327,7 @@
{
struct in_multi *inm;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
IF_ADDR_RLOCK(ifp);
inm = inm_lookup_locked(ifp, ina);
IF_ADDR_RUNLOCK(ifp);
@@ -451,7 +514,7 @@
IN_MULTI_LOCK_ASSERT();
ii = (struct in_ifinfo *)ifp->if_afdata[AF_INET];
-
+ IN_MULTI_LIST_LOCK();
inm = inm_lookup(ifp, *group);
if (inm != NULL) {
/*
@@ -462,9 +525,11 @@
("%s: bad refcount %d", __func__, inm->inm_refcount));
++inm->inm_refcount;
*pinm = inm;
- return (0);
}
-
+ IN_MULTI_LIST_UNLOCK();
+ if (inm != NULL)
+ return (0);
+
memset(&gsin, 0, sizeof(gsin));
gsin.sin_family = AF_INET;
gsin.sin_len = sizeof(struct sockaddr_in);
@@ -550,21 +615,14 @@
* If the refcount drops to 0, free the in_multi record and
* delete the underlying link-layer membership.
*/
-void
-inm_release_locked(struct in_multi *inm)
+static void
+inm_release(struct in_multi *inm)
{
struct ifmultiaddr *ifma;
- IN_MULTI_LOCK_ASSERT();
-
+ IN_MULTI_LIST_LOCK();
CTR2(KTR_IGMPV3, "%s: refcount is %d", __func__, inm->inm_refcount);
-
- if (--inm->inm_refcount > 0) {
- CTR2(KTR_IGMPV3, "%s: refcount is now %d", __func__,
- inm->inm_refcount);
- return;
- }
-
+ MPASS(inm->inm_refcount == 0);
CTR2(KTR_IGMPV3, "%s: freeing inm %p", __func__, inm);
ifma = inm->inm_ifma;
@@ -576,7 +634,7 @@
ifma->ifma_protospec = NULL;
inm_purge(inm);
-
+ IN_MULTI_LIST_UNLOCK();
free(inm, M_IPMADDR);
if_delmulti_ifma(ifma);
@@ -592,7 +650,7 @@
{
struct ip_msource *ims;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) {
if (ims->ims_stp) {
@@ -632,7 +690,7 @@
struct ip_msource find;
struct ip_msource *ims, *nims;
- IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_LOCK_ASSERT();
find.ims_haddr = ntohl(naddr);
ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find);
@@ -959,6 +1017,7 @@
schanged = 0;
error = 0;
nsrc1 = nsrc0 = 0;
+ IN_MULTI_LIST_LOCK_ASSERT();
/*
* Update the source filters first, as this may fail.
@@ -1165,6 +1224,7 @@
int error;
IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_UNLOCK_ASSERT();
CTR4(KTR_IGMPV3, "%s: join 0x%08x on %p(%s))", __func__,
ntohl(gina->s_addr), ifp, ifp->if_xname);
@@ -1186,7 +1246,7 @@
CTR1(KTR_IGMPV3, "%s: in_getmulti() failure", __func__);
return (error);
}
-
+ IN_MULTI_LIST_LOCK();
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
error = inm_merge(inm, imf);
if (error) {
@@ -1201,10 +1261,12 @@
goto out_inm_release;
}
-out_inm_release:
+ out_inm_release:
+ IN_MULTI_LIST_UNLOCK();
if (error) {
+
CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm);
- inm_release_locked(inm);
+ inm_release_deferred(inm);
} else {
*pinm = inm;
}
@@ -1249,6 +1311,7 @@
error = 0;
IN_MULTI_LOCK_ASSERT();
+ IN_MULTI_LIST_UNLOCK_ASSERT();
CTR5(KTR_IGMPV3, "%s: leave inm %p, 0x%08x/%s, imf %p", __func__,
inm, ntohl(inm->inm_addr.s_addr),
@@ -1272,18 +1335,20 @@
* the transaction, it MUST NOT fail.
*/
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
+ IN_MULTI_LIST_LOCK();
error = inm_merge(inm, imf);
KASSERT(error == 0, ("%s: failed to merge inm state", __func__));
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
CURVNET_SET(inm->inm_ifp->if_vnet);
error = igmp_change_state(inm);
+ IN_MULTI_LIST_UNLOCK();
CURVNET_RESTORE();
if (error)
CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm);
- inm_release_locked(inm);
+ inm_release_deferred(inm);
return (error);
}
@@ -1487,7 +1552,7 @@
* Begin state merge transaction at IGMP layer.
*/
IN_MULTI_LOCK();
-
+ IN_MULTI_LIST_LOCK();
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
error = inm_merge(inm, imf);
if (error) {
@@ -1503,7 +1568,7 @@
out_in_multi_locked:
IN_MULTI_UNLOCK();
-
+ IN_MULTI_UNLOCK();
out_imf_rollback:
if (error)
imf_rollback(imf);
@@ -1582,9 +1647,9 @@
{
KASSERT(imo != NULL, ("%s: ip_moptions is NULL", __func__));
- IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
STAILQ_INSERT_TAIL(&imo_gc_list, imo, imo_link);
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
taskqueue_enqueue(taskqueue_thread, &imo_gc_task);
}
@@ -1615,15 +1680,15 @@
{
struct ip_moptions *imo;
- IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
while (!STAILQ_EMPTY(&imo_gc_list)) {
imo = STAILQ_FIRST(&imo_gc_list);
STAILQ_REMOVE_HEAD(&imo_gc_list, imo_link);
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
inp_freemoptions_internal(imo);
- IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
}
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
}
/*
@@ -2163,6 +2228,8 @@
/*
* Begin state merge transaction at IGMP layer.
*/
+ in_pcbref(inp);
+ INP_WUNLOCK(inp);
IN_MULTI_LOCK();
if (is_new) {
@@ -2171,20 +2238,23 @@
if (error) {
CTR1(KTR_IGMPV3, "%s: in_joingroup_locked failed",
__func__);
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
goto out_imo_free;
}
imo->imo_membership[idx] = inm;
} else {
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
+ IN_MULTI_LIST_LOCK();
error = inm_merge(inm, imf);
if (error) {
CTR1(KTR_IGMPV3, "%s: failed to merge inm state",
- __func__);
+ __func__);
+ IN_MULTI_LIST_UNLOCK();
goto out_in_multi_locked;
}
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
error = igmp_change_state(inm);
+ IN_MULTI_LIST_UNLOCK();
if (error) {
CTR1(KTR_IGMPV3, "%s: failed igmp downcall",
__func__);
@@ -2195,7 +2265,8 @@
out_in_multi_locked:
IN_MULTI_UNLOCK();
-
+ INP_WLOCK(inp);
+ in_pcbrele_wlocked(inp);
INP_WLOCK_ASSERT(inp);
if (error) {
imf_rollback(imf);
@@ -2395,6 +2466,7 @@
(void)in_leavegroup_locked(inm, imf);
} else {
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
+ IN_MULTI_LIST_LOCK();
error = inm_merge(inm, imf);
if (error) {
CTR1(KTR_IGMPV3, "%s: failed to merge inm state",
@@ -2404,6 +2476,7 @@
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
error = igmp_change_state(inm);
+ IN_MULTI_LIST_UNLOCK();
if (error) {
CTR1(KTR_IGMPV3, "%s: failed igmp downcall",
__func__);
@@ -2639,6 +2712,7 @@
INP_WLOCK_ASSERT(inp);
IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
/*
* Begin state merge transaction at IGMP layer.
@@ -2647,11 +2721,13 @@
error = inm_merge(inm, imf);
if (error) {
CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
+ IN_MULTI_LIST_UNLOCK();
goto out_in_multi_locked;
}
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
error = igmp_change_state(inm);
+ IN_MULTI_LIST_UNLOCK();
if (error)
CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
@@ -2883,7 +2959,7 @@
if (retval)
return (retval);
- IN_MULTI_LOCK();
+ IN_MULTI_LIST_LOCK();
IF_ADDR_RLOCK(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
@@ -2916,7 +2992,7 @@
}
IF_ADDR_RUNLOCK(ifp);
- IN_MULTI_UNLOCK();
+ IN_MULTI_LIST_UNLOCK();
return (retval);
}
Index: sys/netinet/in_var.h
===================================================================
--- sys/netinet/in_var.h
+++ sys/netinet/in_var.h
@@ -55,6 +55,7 @@
struct igmp_ifsoftc;
struct in_multi;
struct lltable;
+SLIST_HEAD(in_multi_head, in_multi);
/*
* IPv4 per-interface state.
@@ -329,11 +330,18 @@
* consumers of IN_*_MULTI() macros should acquire the locks before
* calling them; users of the in_{add,del}multi() functions should not.
*/
-extern struct mtx in_multi_mtx;
-#define IN_MULTI_LOCK() mtx_lock(&in_multi_mtx)
-#define IN_MULTI_UNLOCK() mtx_unlock(&in_multi_mtx)
-#define IN_MULTI_LOCK_ASSERT() mtx_assert(&in_multi_mtx, MA_OWNED)
-#define IN_MULTI_UNLOCK_ASSERT() mtx_assert(&in_multi_mtx, MA_NOTOWNED)
+extern struct mtx in_multi_list_mtx;
+extern struct sx in_multi_sx;
+
+#define IN_MULTI_LIST_LOCK() mtx_lock(&in_multi_list_mtx)
+#define IN_MULTI_LIST_UNLOCK() mtx_unlock(&in_multi_list_mtx)
+#define IN_MULTI_LIST_LOCK_ASSERT() mtx_assert(&in_multi_list_mtx, MA_OWNED)
+#define IN_MULTI_LIST_UNLOCK_ASSERT() mtx_assert(&in_multi_list_mtx, MA_NOTOWNED)
+
+#define IN_MULTI_LOCK() sx_xlock(&in_multi_sx)
+#define IN_MULTI_UNLOCK() sx_xunlock(&in_multi_sx)
+#define IN_MULTI_LOCK_ASSERT() sx_assert(&in_multi_sx, SA_XLOCKED)
+#define IN_MULTI_UNLOCK_ASSERT() sx_assert(&in_multi_sx, SA_XUNLOCKED)
/* Acquire an in_multi record. */
static __inline void
@@ -364,10 +372,10 @@
void inm_clear_recorded(struct in_multi *);
void inm_print(const struct in_multi *);
int inm_record_source(struct in_multi *inm, const in_addr_t);
-void inm_release(struct in_multi *);
-void inm_release_locked(struct in_multi *);
+void inm_release_deferred(struct in_multi *);
+void inm_release_list_deferred(struct in_multi_head *);
struct in_multi *
- in_addmulti(struct in_addr *, struct ifnet *);
+in_addmulti(struct in_addr *, struct ifnet *);
void in_delmulti(struct in_multi *);
int in_joingroup(struct ifnet *, const struct in_addr *,
/*const*/ struct in_mfilter *, struct in_multi **);
Index: sys/netinet/ip_carp.c
===================================================================
--- sys/netinet/ip_carp.c
+++ sys/netinet/ip_carp.c
@@ -1406,7 +1406,7 @@
break;
}
in6m = NULL;
- if ((error = in6_mc_join(ifp, &in6, NULL, &in6m, 0)) != 0) {
+ if ((error = in6_joingroup(ifp, &in6, NULL, &in6m, 0)) != 0) {
free(im6o->im6o_membership, M_CARP);
break;
}
@@ -1421,13 +1421,13 @@
in6.s6_addr32[3] = 0;
in6.s6_addr8[12] = 0xff;
if ((error = in6_setscope(&in6, ifp, NULL)) != 0) {
- in6_mc_leave(im6o->im6o_membership[0], NULL);
+ in6_leavegroup(im6o->im6o_membership[0], NULL);
free(im6o->im6o_membership, M_CARP);
break;
}
in6m = NULL;
- if ((error = in6_mc_join(ifp, &in6, NULL, &in6m, 0)) != 0) {
- in6_mc_leave(im6o->im6o_membership[0], NULL);
+ if ((error = in6_joingroup(ifp, &in6, NULL, &in6m, 0)) != 0) {
+ in6_leavegroup(im6o->im6o_membership[0], NULL);
free(im6o->im6o_membership, M_CARP);
break;
}
@@ -1470,8 +1470,8 @@
if (cif->cif_naddrs6 == 0) {
struct ip6_moptions *im6o = &cif->cif_im6o;
- in6_mc_leave(im6o->im6o_membership[0], NULL);
- in6_mc_leave(im6o->im6o_membership[1], NULL);
+ in6_leavegroup(im6o->im6o_membership[0], NULL);
+ in6_leavegroup(im6o->im6o_membership[1], NULL);
KASSERT(im6o->im6o_mfilters == NULL,
("%s: im6o_mfilters != NULL", __func__));
free(im6o->im6o_membership, M_CARP);
Index: sys/netinet6/in6.c
===================================================================
--- sys/netinet6/in6.c
+++ sys/netinet6/in6.c
@@ -80,6 +80,7 @@
#include <sys/systm.h>
#include <sys/priv.h>
#include <sys/proc.h>
+#include <sys/protosw.h>
#include <sys/time.h>
#include <sys/kernel.h>
#include <sys/lock.h>
@@ -733,6 +734,30 @@
}
+static struct in6_multi_mship *
+in6_joingroup_legacy(struct ifnet *ifp, const struct in6_addr *mcaddr,
+ int *errorp, int delay)
+{
+ struct in6_multi_mship *imm;
+ int error;
+
+ imm = malloc(sizeof(*imm), M_IP6MADDR, M_NOWAIT);
+ if (imm == NULL) {
+ *errorp = ENOBUFS;
+ return (NULL);
+ }
+
+ delay = (delay * PR_FASTHZ) / hz;
+
+ error = in6_joingroup(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay);
+ if (error) {
+ *errorp = error;
+ free(imm, M_IP6MADDR);
+ return (NULL);
+ }
+
+ return (imm);
+}
/*
* Join necessary multicast groups. Factored out from in6_update_ifa().
* This entire work should only be done once, for the default FIB.
@@ -769,7 +794,7 @@
*/
delay = arc4random() % (MAX_RTR_SOLICITATION_DELAY * hz);
}
- imm = in6_joingroup(ifp, &mltaddr, &error, delay);
+ imm = in6_joingroup_legacy(ifp, &mltaddr, &error, delay);
if (imm == NULL) {
nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s "
"(errno=%d)\n", __func__, ip6_sprintf(ip6buf, &mltaddr),
@@ -786,7 +811,7 @@
if ((error = in6_setscope(&mltaddr, ifp, NULL)) != 0)
goto cleanup; /* XXX: should not fail */
- imm = in6_joingroup(ifp, &mltaddr, &error, 0);
+ imm = in6_joingroup_legacy(ifp, &mltaddr, &error, 0);
if (imm == NULL) {
nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s "
"(errno=%d)\n", __func__, ip6_sprintf(ip6buf, &mltaddr),
@@ -808,7 +833,7 @@
}
if (in6_nigroup(ifp, NULL, -1, &mltaddr) == 0) {
/* XXX jinmei */
- imm = in6_joingroup(ifp, &mltaddr, &error, delay);
+ imm = in6_joingroup_legacy(ifp, &mltaddr, &error, delay);
if (imm == NULL)
nd6log((LOG_WARNING,
"%s: in6_joingroup failed for %s on %s "
@@ -820,7 +845,7 @@
}
if (V_icmp6_nodeinfo_oldmcprefix &&
in6_nigroup_oldmcprefix(ifp, NULL, -1, &mltaddr) == 0) {
- imm = in6_joingroup(ifp, &mltaddr, &error, delay);
+ imm = in6_joingroup_legacy(ifp, &mltaddr, &error, delay);
if (imm == NULL)
nd6log((LOG_WARNING,
"%s: in6_joingroup failed for %s on %s "
@@ -839,7 +864,7 @@
if ((error = in6_setscope(&mltaddr, ifp, NULL)) != 0)
goto cleanup; /* XXX: should not fail */
- imm = in6_joingroup(ifp, &mltaddr, &error, 0);
+ imm = in6_joingroup_legacy(ifp, &mltaddr, &error, 0);
if (imm == NULL) {
nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s "
"(errno=%d)\n", __func__, ip6_sprintf(ip6buf,
@@ -1274,7 +1299,9 @@
/* Leave multicast groups. */
while ((imm = LIST_FIRST(&ia->ia6_memberships)) != NULL) {
LIST_REMOVE(imm, i6mm_chain);
- in6_leavegroup(imm);
+ if (imm->i6mm_maddr != NULL)
+ in6_leavegroup(imm->i6mm_maddr, NULL);
+ free(imm, M_IP6MADDR);
}
plen = in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); /* XXX */
if ((ia->ia_flags & IFA_ROUTE) && plen == 128) {
Index: sys/netinet6/in6_ifattach.c
===================================================================
--- sys/netinet6/in6_ifattach.c
+++ sys/netinet6/in6_ifattach.c
@@ -867,13 +867,13 @@
static void
in6_purgemaddrs(struct ifnet *ifp)
{
- LIST_HEAD(,in6_multi) purgeinms;
- struct in6_multi *inm, *tinm;
+ struct in6_multi_head purgeinms;
+ struct in6_multi *inm;
struct ifmultiaddr *ifma;
- LIST_INIT(&purgeinms);
+ SLIST_INIT(&purgeinms);
IN6_MULTI_LOCK();
-
+ IN6_MULTI_LIST_UNLOCK();
/*
* Extract list of in6_multi associated with the detaching ifp
* which the PF_INET6 layer is about to release.
@@ -886,17 +886,14 @@
ifma->ifma_protospec == NULL)
continue;
inm = (struct in6_multi *)ifma->ifma_protospec;
- LIST_INSERT_HEAD(&purgeinms, inm, in6m_entry);
+ if (--inm->in6m_refcount == 0)
+ SLIST_INSERT_HEAD(&purgeinms, inm, in6m_nrele);
}
IF_ADDR_RUNLOCK(ifp);
-
- LIST_FOREACH_SAFE(inm, &purgeinms, in6m_entry, tinm) {
- LIST_REMOVE(inm, in6m_entry);
- in6m_release_locked(inm);
- }
mld_ifdetach(ifp);
-
+ IN6_MULTI_LIST_UNLOCK();
IN6_MULTI_UNLOCK();
+ in6m_release_list_deferred(&purgeinms);
}
void
Index: sys/netinet6/in6_mcast.c
===================================================================
--- sys/netinet6/in6_mcast.c
+++ sys/netinet6/in6_mcast.c
@@ -47,7 +47,6 @@
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
-#include <sys/protosw.h>
#include <sys/sysctl.h>
#include <sys/priv.h>
#include <sys/ktr.h>
@@ -58,6 +57,9 @@
#include <net/if_dl.h>
#include <net/route.h>
#include <net/vnet.h>
+#include <net/ethernet.h>
+#include <net/iflib.h>
+
#include <netinet/in.h>
#include <netinet/in_var.h>
@@ -89,7 +91,7 @@
static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter",
"IPv6 multicast PCB-layer source filter");
-static MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group");
+MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group");
static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options");
static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource",
"IPv6 multicast MLD-layer source filter");
@@ -107,8 +109,16 @@
* any need for in6_multi itself to be virtualized -- it is bound to an ifp
* anyway no matter what happens.
*/
-struct mtx in6_multi_mtx;
-MTX_SYSINIT(in6_multi_mtx, &in6_multi_mtx, "in6_multi_mtx", MTX_DEF);
+struct mtx in6_multi_list_mtx;
+MTX_SYSINIT(in6_multi_mtx, &in6_multi_list_mtx, "in6_multi_list_mtx", MTX_DEF);
+
+struct mtx in6_multi_free_mtx;
+MTX_SYSINIT(in6_multi_free_mtx, &in6_multi_free_mtx, "in6_multi_free_mtx", MTX_DEF);
+
+struct sx in6_multi_sx;
+SX_SYSINIT(in6_multi_sx, &in6_multi_sx, "in6_multi_sx");
+
+
static void im6f_commit(struct in6_mfilter *);
static int im6f_get_source(struct in6_mfilter *imf,
@@ -130,7 +140,7 @@
const struct sockaddr *);
static void im6s_merge(struct ip6_msource *ims,
const struct in6_msource *lims, const int rollback);
-static int in6_mc_get(struct ifnet *, const struct in6_addr *,
+static int in6_getmulti(struct ifnet *, const struct in6_addr *,
struct in6_multi **);
static int in6m_get_source(struct in6_multi *inm,
const struct in6_addr *addr, const int noalloc,
@@ -389,7 +399,7 @@
* Return 0 if successful, otherwise return an appropriate error code.
*/
static int
-in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
+in6_getmulti(struct ifnet *ifp, const struct in6_addr *group,
struct in6_multi **pinm)
{
struct sockaddr_in6 gsin6;
@@ -406,7 +416,6 @@
*/
IN6_MULTI_LOCK_ASSERT();
IF_ADDR_WLOCK(ifp);
-
inm = in6m_lookup_locked(ifp, group);
if (inm != NULL) {
/*
@@ -417,8 +426,9 @@
("%s: bad refcount %d", __func__, inm->in6m_refcount));
++inm->in6m_refcount;
*pinm = inm;
- goto out_locked;
}
+ if (inm != NULL)
+ goto out_locked;
memset(&gsin6, 0, sizeof(gsin6));
gsin6.sin6_family = AF_INET6;
@@ -491,7 +501,7 @@
ifma->ifma_protospec = inm;
*pinm = inm;
-out_locked:
+ out_locked:
IF_ADDR_WUNLOCK(ifp);
return (error);
}
@@ -502,21 +512,15 @@
* If the refcount drops to 0, free the in6_multi record and
* delete the underlying link-layer membership.
*/
-void
-in6m_release_locked(struct in6_multi *inm)
+static void
+in6m_release(struct in6_multi *inm)
{
struct ifmultiaddr *ifma;
- IN6_MULTI_LOCK_ASSERT();
CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount);
- if (--inm->in6m_refcount > 0) {
- CTR2(KTR_MLD, "%s: refcount is now %d", __func__,
- inm->in6m_refcount);
- return;
- }
-
+ MPASS(inm->in6m_refcount == 0);
CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm);
ifma = inm->in6m_ifma;
@@ -528,12 +532,63 @@
ifma->ifma_protospec = NULL;
in6m_purge(inm);
-
free(inm, M_IP6MADDR);
if_delmulti_ifma(ifma);
}
+static struct grouptask free_gtask;
+static struct in6_multi_head in6m_free_list;
+static void in6m_release_task(void *arg __unused);
+static void in6m_init()
+{
+ SLIST_INIT(&in6m_free_list);
+ iflib_config_gtask_init(NULL, &free_gtask, in6m_release_task, "in6m release task");
+}
+
+SYSINIT(in6m_init, SI_SUB_SMP + 1, SI_ORDER_FIRST,
+ in6m_init, NULL);
+
+
+void
+in6m_release_list_deferred(struct in6_multi_head *inmh)
+{
+ mtx_lock(&in6_multi_free_mtx);
+ SLIST_CONCAT(&in6m_free_list, inmh, in6_multi, in6m_nrele);
+ mtx_unlock(&in6_multi_free_mtx);
+ GROUPTASK_ENQUEUE(&free_gtask);
+}
+
+void
+in6m_release_deferred(struct in6_multi *inm)
+{
+ struct in6_multi_head tmp;
+
+ if (--inm->in6m_refcount == 0) {
+ SLIST_INIT(&tmp);
+ SLIST_INSERT_HEAD(&tmp, inm, in6m_nrele);
+ in6m_release_list_deferred(&tmp);
+ }
+}
+
+static void
+in6m_release_task(void *arg __unused)
+{
+ struct in6_multi_head in6m_free_tmp;
+ struct in6_multi *inm, *tinm;
+
+ SLIST_INIT(&in6m_free_tmp);
+ mtx_lock(&in6_multi_free_mtx);
+ SLIST_CONCAT(&in6m_free_tmp, &in6m_free_list, in6_multi, in6m_nrele);
+ mtx_unlock(&in6_multi_free_mtx);
+ IN6_MULTI_LOCK();
+ SLIST_FOREACH_SAFE(inm, &in6m_free_tmp, in6m_nrele, tinm) {
+ SLIST_REMOVE_HEAD(&in6m_free_tmp, in6m_nrele);
+ in6m_release(inm);
+ }
+ IN6_MULTI_UNLOCK();
+}
+
/*
* Clear recorded source entries for a group.
* Used by the MLD code. Caller must hold the IN6_MULTI lock.
@@ -544,7 +599,7 @@
{
struct ip6_msource *ims;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
if (ims->im6s_stp) {
@@ -584,7 +639,7 @@
struct ip6_msource find;
struct ip6_msource *ims, *nims;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
find.im6s_addr = *addr;
ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find);
@@ -911,6 +966,7 @@
schanged = 0;
error = 0;
nsrc1 = nsrc0 = 0;
+ IN6_MULTI_LIST_LOCK_ASSERT();
/*
* Update the source filters first, as this may fail.
@@ -1087,65 +1143,16 @@
*
* SMPng: Assume no mc locks held by caller.
*/
-struct in6_multi_mship *
-in6_joingroup(struct ifnet *ifp, struct in6_addr *mcaddr,
- int *errorp, int delay)
-{
- struct in6_multi_mship *imm;
- int error;
-
- imm = malloc(sizeof(*imm), M_IP6MADDR, M_NOWAIT);
- if (imm == NULL) {
- *errorp = ENOBUFS;
- return (NULL);
- }
-
- delay = (delay * PR_FASTHZ) / hz;
-
- error = in6_mc_join(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay);
- if (error) {
- *errorp = error;
- free(imm, M_IP6MADDR);
- return (NULL);
- }
-
- return (imm);
-}
-
-/*
- * Leave a multicast address w/o sources.
- * KAME compatibility entry point.
- *
- * SMPng: Assume no mc locks held by caller.
- */
-int
-in6_leavegroup(struct in6_multi_mship *imm)
-{
-
- if (imm->i6mm_maddr != NULL)
- in6_mc_leave(imm->i6mm_maddr, NULL);
- free(imm, M_IP6MADDR);
- return 0;
-}
-
-/*
- * Join a multicast group; unlocked entry point.
- *
- * SMPng: XXX: in6_mc_join() is called from in6_control() when upper
- * locks are not held. Fortunately, ifp is unlikely to have been detached
- * at this point, so we assume it's OK to recurse.
- */
int
-in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr,
+in6_joingroup(struct ifnet *ifp, const struct in6_addr *mcaddr,
/*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
const int delay)
{
int error;
IN6_MULTI_LOCK();
- error = in6_mc_join_locked(ifp, mcaddr, imf, pinm, delay);
+ error = in6_joingroup_locked(ifp, mcaddr, NULL, pinm, delay);
IN6_MULTI_UNLOCK();
-
return (error);
}
@@ -1159,7 +1166,7 @@
* code is returned.
*/
int
-in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
+in6_joingroup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
/*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
const int delay)
{
@@ -1185,6 +1192,7 @@
#endif
IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_UNLOCK_ASSERT();
CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__,
ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp));
@@ -1200,13 +1208,13 @@
im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE);
imf = &timf;
}
-
- error = in6_mc_get(ifp, mcaddr, &inm);
+ error = in6_getmulti(ifp, mcaddr, &inm);
if (error) {
- CTR1(KTR_MLD, "%s: in6_mc_get() failure", __func__);
+ CTR1(KTR_MLD, "%s: in6_getmulti() failure", __func__);
return (error);
}
+ IN6_MULTI_LIST_LOCK();
CTR1(KTR_MLD, "%s: merge inm state", __func__);
error = in6m_merge(inm, imf);
if (error) {
@@ -1222,9 +1230,10 @@
}
out_in6m_release:
+ IN6_MULTI_LIST_UNLOCK();
if (error) {
CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm);
- in6m_release_locked(inm);
+ in6m_release_deferred(inm);
} else {
*pinm = inm;
}
@@ -1236,14 +1245,13 @@
* Leave a multicast group; unlocked entry point.
*/
int
-in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
+in6_leavegroup(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
{
int error;
IN6_MULTI_LOCK();
- error = in6_mc_leave_locked(inm, imf);
+ error = in6_leavegroup_locked(inm, imf);
IN6_MULTI_UNLOCK();
-
return (error);
}
@@ -1261,7 +1269,7 @@
* makes a state change downcall into MLD.
*/
int
-in6_mc_leave_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
+in6_leavegroup_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
{
struct in6_mfilter timf;
int error;
@@ -1304,7 +1312,7 @@
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm);
- in6m_release_locked(inm);
+ in6m_release_deferred(inm);
return (error);
}
@@ -1446,8 +1454,7 @@
/*
* Begin state merge transaction at MLD layer.
*/
- IN6_MULTI_LOCK();
-
+ IN6_MULTI_LIST_LOCK();
CTR1(KTR_MLD, "%s: merge inm state", __func__);
error = in6m_merge(inm, imf);
if (error)
@@ -1459,7 +1466,7 @@
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
}
- IN6_MULTI_UNLOCK();
+ IN6_MULTI_LIST_UNLOCK();
out_im6f_rollback:
if (error)
@@ -1543,7 +1550,7 @@
if (imf)
im6f_leave(imf);
/* XXX this will thrash the lock(s) */
- (void)in6_mc_leave(imo->im6o_membership[idx], imf);
+ (void)in6_leavegroup(imo->im6o_membership[idx], imf);
if (imf)
im6f_purge(imf);
}
@@ -2037,7 +2044,7 @@
IN6_MULTI_LOCK();
if (is_new) {
- error = in6_mc_join_locked(ifp, &gsa->sin6.sin6_addr, imf,
+ error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf,
&inm, 0);
if (error) {
IN6_MULTI_UNLOCK();
@@ -2046,6 +2053,7 @@
imo->im6o_membership[idx] = inm;
} else {
CTR1(KTR_MLD, "%s: merge inm state", __func__);
+ IN6_MULTI_LIST_LOCK();
error = in6m_merge(inm, imf);
if (error)
CTR1(KTR_MLD, "%s: failed to merge inm state",
@@ -2057,6 +2065,7 @@
CTR1(KTR_MLD, "%s: failed mld downcall",
__func__);
}
+ IN6_MULTI_LIST_UNLOCK();
}
IN6_MULTI_UNLOCK();
@@ -2282,9 +2291,10 @@
* Give up the multicast address record to which
* the membership points.
*/
- (void)in6_mc_leave_locked(inm, imf);
+ (void)in6_leavegroup_locked(inm, imf);
} else {
CTR1(KTR_MLD, "%s: merge inm state", __func__);
+ IN6_MULTI_LIST_LOCK();
error = in6m_merge(inm, imf);
if (error)
CTR1(KTR_MLD, "%s: failed to merge inm state",
@@ -2296,6 +2306,7 @@
CTR1(KTR_MLD, "%s: failed mld downcall",
__func__);
}
+ IN6_MULTI_LIST_UNLOCK();
}
IN6_MULTI_UNLOCK();
@@ -2505,7 +2516,7 @@
goto out_im6f_rollback;
INP_WLOCK_ASSERT(inp);
- IN6_MULTI_LOCK();
+ IN6_MULTI_LIST_LOCK();
/*
* Begin state merge transaction at MLD layer.
@@ -2521,7 +2532,7 @@
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
}
- IN6_MULTI_UNLOCK();
+ IN6_MULTI_LIST_UNLOCK();
out_im6f_rollback:
if (error)
@@ -2712,7 +2723,7 @@
return (retval);
IN6_MULTI_LOCK();
-
+ IN6_MULTI_LIST_LOCK();
IF_ADDR_RLOCK(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_INET6 ||
@@ -2744,6 +2755,7 @@
}
IF_ADDR_RUNLOCK(ifp);
+ IN6_MULTI_LIST_UNLOCK();
IN6_MULTI_UNLOCK();
return (retval);
Index: sys/netinet6/in6_pcb.c
===================================================================
--- sys/netinet6/in6_pcb.c
+++ sys/netinet6/in6_pcb.c
@@ -805,8 +805,7 @@
for (i = 0; i < im6o->im6o_num_memberships; i++) {
if (im6o->im6o_membership[i]->in6m_ifp ==
ifp) {
- in6_mc_leave(im6o->im6o_membership[i],
- NULL);
+ in6_leavegroup(im6o->im6o_membership[i], NULL);
gap++;
} else if (gap != 0) {
im6o->im6o_membership[i - gap] =
Index: sys/netinet6/in6_var.h
===================================================================
--- sys/netinet6/in6_var.h
+++ sys/netinet6/in6_var.h
@@ -100,6 +100,9 @@
struct scope6_id;
struct lltable;
struct mld_ifsoftc;
+struct in6_multi;
+SLIST_HEAD(in6_multi_head, in6_multi);
+MALLOC_DECLARE(M_IP6MADDR);
struct in6_ifextra {
counter_u64_t *in6_ifstat;
@@ -630,7 +633,6 @@
* w/o breaking the ABI for ifmcstat.
*/
struct in6_multi {
- LIST_ENTRY(in6_multi) in6m_entry; /* list glue */
struct in6_addr in6m_addr; /* IPv6 multicast address */
struct ifnet *in6m_ifp; /* back pointer to ifnet */
struct ifmultiaddr *in6m_ifma; /* back pointer to ifmultiaddr */
@@ -694,11 +696,18 @@
* consumers of IN_*_MULTI() macros should acquire the locks before
* calling them; users of the in_{add,del}multi() functions should not.
*/
-extern struct mtx in6_multi_mtx;
-#define IN6_MULTI_LOCK() mtx_lock(&in6_multi_mtx)
-#define IN6_MULTI_UNLOCK() mtx_unlock(&in6_multi_mtx)
-#define IN6_MULTI_LOCK_ASSERT() mtx_assert(&in6_multi_mtx, MA_OWNED)
-#define IN6_MULTI_UNLOCK_ASSERT() mtx_assert(&in6_multi_mtx, MA_NOTOWNED)
+extern struct mtx in6_multi_list_mtx;
+extern struct sx in6_multi_sx;
+
+#define IN6_MULTI_LIST_LOCK() mtx_lock(&in6_multi_list_mtx)
+#define IN6_MULTI_LIST_UNLOCK() mtx_unlock(&in6_multi_list_mtx)
+#define IN6_MULTI_LIST_LOCK_ASSERT() mtx_assert(&in6_multi_list_mtx, MA_OWNED)
+#define IN6_MULTI_LIST_UNLOCK_ASSERT() mtx_assert(&in6_multi_list_mtx, MA_NOTOWNED)
+
+#define IN6_MULTI_LOCK() sx_xlock(&in6_multi_sx)
+#define IN6_MULTI_UNLOCK() sx_xunlock(&in6_multi_sx)
+#define IN6_MULTI_LOCK_ASSERT() sx_assert(&in6_multi_sx, SA_XLOCKED)
+#define IN6_MULTI_UNLOCK_ASSERT() sx_assert(&in6_multi_sx, SA_XUNLOCKED)
/*
* Look up an in6_multi record for an IPv6 multicast address
@@ -713,7 +722,6 @@
struct ifmultiaddr *ifma;
struct in6_multi *inm;
- IN6_MULTI_LOCK_ASSERT();
IF_ADDR_LOCK_ASSERT(ifp);
inm = NULL;
@@ -738,11 +746,11 @@
{
struct in6_multi *inm;
- IN6_MULTI_LOCK();
+ IN6_MULTI_LIST_LOCK();
IF_ADDR_RLOCK(ifp);
inm = in6m_lookup_locked(ifp, mcaddr);
IF_ADDR_RUNLOCK(ifp);
- IN6_MULTI_UNLOCK();
+ IN6_MULTI_LIST_UNLOCK();
return (inm);
}
@@ -752,7 +760,7 @@
in6m_acquire_locked(struct in6_multi *inm)
{
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
++inm->in6m_refcount;
}
@@ -762,26 +770,22 @@
/* Multicast KPIs. */
int im6o_mc_filter(const struct ip6_moptions *, const struct ifnet *,
const struct sockaddr *, const struct sockaddr *);
-int in6_mc_join(struct ifnet *, const struct in6_addr *,
+int in6_joingroup(struct ifnet *, const struct in6_addr *,
struct in6_mfilter *, struct in6_multi **, int);
-int in6_mc_join_locked(struct ifnet *, const struct in6_addr *,
+int in6_joingroup_locked(struct ifnet *, const struct in6_addr *,
struct in6_mfilter *, struct in6_multi **, int);
-int in6_mc_leave(struct in6_multi *, struct in6_mfilter *);
-int in6_mc_leave_locked(struct in6_multi *, struct in6_mfilter *);
+int in6_leavegroup(struct in6_multi *, struct in6_mfilter *);
+int in6_leavegroup_locked(struct in6_multi *, struct in6_mfilter *);
void in6m_clear_recorded(struct in6_multi *);
void in6m_commit(struct in6_multi *);
void in6m_print(const struct in6_multi *);
int in6m_record_source(struct in6_multi *, const struct in6_addr *);
-void in6m_release_locked(struct in6_multi *);
+void in6m_release_deferred(struct in6_multi *);
+void in6m_release_list_deferred(struct in6_multi_head *);
void ip6_freemoptions(struct ip6_moptions *);
int ip6_getmoptions(struct inpcb *, struct sockopt *);
int ip6_setmoptions(struct inpcb *, struct sockopt *);
-/* Legacy KAME multicast KPIs. */
-struct in6_multi_mship *
- in6_joingroup(struct ifnet *, struct in6_addr *, int *, int);
-int in6_leavegroup(struct in6_multi_mship *);
-
/* flags to in6_update_ifa */
#define IN6_IFAUPDATE_DADDELAY 0x1 /* first time to configure an address */
Index: sys/netinet6/mld6.c
===================================================================
--- sys/netinet6/mld6.c
+++ sys/netinet6/mld6.c
@@ -124,7 +124,7 @@
/*const*/ struct mld_hdr *);
static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
/*const*/ struct mld_hdr *);
-static void mld_v1_process_group_timer(struct mld_ifsoftc *,
+static void mld_v1_process_group_timer(struct in6_multi_head *,
struct in6_multi *);
static void mld_v1_process_querier_timers(struct mld_ifsoftc *);
static int mld_v1_transmit_report(struct in6_multi *, const int);
@@ -142,7 +142,7 @@
struct mbuf *, const int, const int);
static int mld_v2_merge_state_changes(struct in6_multi *,
struct mbufq *);
-static void mld_v2_process_group_timers(struct mld_ifsoftc *,
+static void mld_v2_process_group_timers(struct in6_multi_head *,
struct mbufq *, struct mbufq *,
struct in6_multi *, const int);
static int mld_v2_process_group_query(struct in6_multi *,
@@ -377,6 +377,7 @@
return (error);
IN6_MULTI_LOCK();
+ IN6_MULTI_LIST_LOCK();
MLD_LOCK();
if (name[0] <= 0 || name[0] > V_if_index) {
@@ -409,6 +410,7 @@
out_locked:
MLD_UNLOCK();
+ IN6_MULTI_LIST_UNLOCK();
IN6_MULTI_UNLOCK();
return (error);
}
@@ -508,7 +510,6 @@
mli->mli_qi = MLD_QI_INIT;
mli->mli_qri = MLD_QRI_INIT;
mli->mli_uri = MLD_URI_INIT;
- SLIST_INIT(&mli->mli_relinmhead);
mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
@@ -536,12 +537,14 @@
{
struct mld_ifsoftc *mli;
struct ifmultiaddr *ifma;
- struct in6_multi *inm, *tinm;
+ struct in6_multi *inm;
+ struct in6_multi_head inmh;
CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
if_name(ifp));
- IN6_MULTI_LOCK_ASSERT();
+ SLIST_INIT(&inmh);
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK();
mli = MLD_IFINFO(ifp);
@@ -553,17 +556,13 @@
continue;
inm = (struct in6_multi *)ifma->ifma_protospec;
if (inm->in6m_state == MLD_LEAVING_MEMBER) {
- SLIST_INSERT_HEAD(&mli->mli_relinmhead,
- inm, in6m_nrele);
+ if (--inm->in6m_refcount == 0)
+ SLIST_INSERT_HEAD(&inmh, inm, in6m_nrele);
}
in6m_clear_recorded(inm);
}
IF_ADDR_RUNLOCK(ifp);
- SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele,
- tinm) {
- SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
- in6m_release_locked(inm);
- }
+ in6m_release_list_deferred(&inmh);
}
MLD_UNLOCK();
@@ -606,10 +605,6 @@
LIST_REMOVE(mli, mli_link);
- KASSERT(SLIST_EMPTY(&mli->mli_relinmhead),
- ("%s: there are dangling in_multi references",
- __func__));
-
free(mli, M_MLD);
return;
}
@@ -680,7 +675,7 @@
in6_setscope(&mld->mld_addr, ifp, NULL);
}
- IN6_MULTI_LOCK();
+ IN6_MULTI_LIST_LOCK();
MLD_LOCK();
/*
@@ -728,7 +723,7 @@
IF_ADDR_RUNLOCK(ifp);
MLD_UNLOCK();
- IN6_MULTI_UNLOCK();
+ IN6_MULTI_LIST_UNLOCK();
return (0);
}
@@ -882,7 +877,7 @@
in6_setscope(&mld->mld_addr, ifp, NULL);
}
- IN6_MULTI_LOCK();
+ IN6_MULTI_LIST_LOCK();
MLD_LOCK();
mli = MLD_IFINFO(ifp);
@@ -965,7 +960,7 @@
out_locked:
MLD_UNLOCK();
- IN6_MULTI_UNLOCK();
+ IN6_MULTI_LIST_UNLOCK();
return (0);
}
@@ -983,7 +978,7 @@
int retval;
uint16_t nsrc;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
retval = 0;
@@ -1168,7 +1163,7 @@
if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
in6_setscope(&mld->mld_addr, ifp, NULL);
- IN6_MULTI_LOCK();
+ IN6_MULTI_LIST_LOCK();
MLD_LOCK();
IF_ADDR_RLOCK(ifp);
@@ -1220,7 +1215,7 @@
out_locked:
IF_ADDR_RUNLOCK(ifp);
MLD_UNLOCK();
- IN6_MULTI_UNLOCK();
+ IN6_MULTI_LIST_UNLOCK();
/* XXX Clear embedded scope ID as userland won't expect it. */
in6_clearscope(&mld->mld_addr);
@@ -1333,6 +1328,7 @@
struct mld_ifsoftc *mli;
struct ifmultiaddr *ifma;
struct in6_multi *inm, *tinm;
+ struct in6_multi_head inmh;
int uri_fasthz;
uri_fasthz = 0;
@@ -1347,7 +1343,8 @@
!V_state_change_timers_running6)
return;
- IN6_MULTI_LOCK();
+ SLIST_INIT(&inmh);
+ IN6_MULTI_LIST_LOCK();
MLD_LOCK();
/*
@@ -1399,10 +1396,10 @@
inm = (struct in6_multi *)ifma->ifma_protospec;
switch (mli->mli_version) {
case MLD_VERSION_1:
- mld_v1_process_group_timer(mli, inm);
+ mld_v1_process_group_timer(&inmh, inm);
break;
case MLD_VERSION_2:
- mld_v2_process_group_timers(mli, &qrq,
+ mld_v2_process_group_timers(&inmh, &qrq,
&scq, inm, uri_fasthz);
break;
}
@@ -1419,9 +1416,8 @@
* IF_ADDR_LOCK internally as well as
* ip6_output() to transmit a packet.
*/
- SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
- in6m_nrele, tinm) {
- SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
+ SLIST_FOREACH_SAFE(inm, &inmh, in6m_nrele, tinm) {
+ SLIST_REMOVE_HEAD(&inmh,
in6m_nrele);
(void)mld_v1_transmit_report(inm,
MLD_LISTENER_REPORT);
@@ -1435,19 +1431,14 @@
* Free the in_multi reference(s) for
* this lifecycle.
*/
- SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
- in6m_nrele, tinm) {
- SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
- in6m_nrele);
- in6m_release_locked(inm);
- }
+ in6m_release_list_deferred(&inmh);
break;
}
}
out_locked:
MLD_UNLOCK();
- IN6_MULTI_UNLOCK();
+ IN6_MULTI_LIST_UNLOCK();
}
/*
@@ -1455,11 +1446,11 @@
* Will update the global pending timer flags.
*/
static void
-mld_v1_process_group_timer(struct mld_ifsoftc *mli, struct in6_multi *inm)
+mld_v1_process_group_timer(struct in6_multi_head *inmh, struct in6_multi *inm)
{
int report_timer_expired;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
if (inm->in6m_timer == 0) {
@@ -1482,8 +1473,8 @@
case MLD_REPORTING_MEMBER:
if (report_timer_expired) {
inm->in6m_state = MLD_IDLE_MEMBER;
- SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
- in6m_nrele);
+ if (--inm->in6m_refcount == 0)
+ SLIST_INSERT_HEAD(inmh, inm, in6m_nrele);
}
break;
case MLD_G_QUERY_PENDING_MEMBER:
@@ -1499,7 +1490,7 @@
* Note: Unlocked read from mli.
*/
static void
-mld_v2_process_group_timers(struct mld_ifsoftc *mli,
+mld_v2_process_group_timers(struct in6_multi_head *inmh,
struct mbufq *qrq, struct mbufq *scq,
struct in6_multi *inm, const int uri_fasthz)
{
@@ -1509,7 +1500,7 @@
char ip6tbuf[INET6_ADDRSTRLEN];
#endif
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
query_response_timer_expired = 0;
@@ -1607,8 +1598,8 @@
if (inm->in6m_state == MLD_LEAVING_MEMBER &&
inm->in6m_scrv == 0) {
inm->in6m_state = MLD_NOT_MEMBER;
- SLIST_INSERT_HEAD(&mli->mli_relinmhead,
- inm, in6m_nrele);
+ if (--inm->in6m_refcount == 0)
+ SLIST_INSERT_HEAD(inmh, inm, in6m_nrele);
}
}
break;
@@ -1654,12 +1645,14 @@
{
struct ifmultiaddr *ifma;
struct ifnet *ifp;
- struct in6_multi *inm, *tinm;
+ struct in6_multi *inm;
+ struct in6_multi_head inmh;
CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
mli->mli_ifp, if_name(mli->mli_ifp));
- IN6_MULTI_LOCK_ASSERT();
+ SLIST_INIT(&inmh);
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
/*
@@ -1694,8 +1687,8 @@
* version, we need to release the final
* reference held for issuing the INCLUDE {}.
*/
- SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
- in6m_nrele);
+ if (--inm->in6m_refcount == 0)
+ SLIST_INSERT_HEAD(&inmh, inm, in6m_nrele);
/* FALLTHROUGH */
case MLD_G_QUERY_PENDING_MEMBER:
case MLD_SG_QUERY_PENDING_MEMBER:
@@ -1713,10 +1706,7 @@
}
}
IF_ADDR_RUNLOCK(ifp);
- SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele, tinm) {
- SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
- in6m_release_locked(inm);
- }
+ in6m_release_list_deferred(&inmh);
}
/*
@@ -1788,7 +1778,7 @@
struct mbuf *mh, *md;
struct mld_hdr *mld;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
ifp = in6m->in6m_ifp;
@@ -1879,7 +1869,7 @@
struct ifnet *ifp;
int error;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
error = 0;
@@ -1963,7 +1953,7 @@
ifp = inm->in6m_ifp;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
@@ -1993,7 +1983,7 @@
*/
if (mli->mli_version == MLD_VERSION_2 &&
inm->in6m_state == MLD_LEAVING_MEMBER)
- in6m_release_locked(inm);
+ in6m_release_deferred(inm);
inm->in6m_state = MLD_REPORTING_MEMBER;
@@ -2106,7 +2096,7 @@
ifp = inm->in6m_ifp;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
KASSERT(mli && mli->mli_ifp == ifp,
@@ -2169,7 +2159,7 @@
__func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
inm->in6m_ifp, if_name(inm->in6m_ifp));
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
switch (inm->in6m_state) {
@@ -2296,7 +2286,7 @@
char ip6tbuf[INET6_ADDRSTRLEN];
#endif
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
ifp = inm->in6m_ifp;
is_filter_list_change = 0;
@@ -2679,7 +2669,7 @@
char ip6tbuf[INET6_ADDRSTRLEN];
#endif
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
if (inm->in6m_nsrc == 0 ||
(inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
@@ -2879,7 +2869,7 @@
domerge = 0;
recslen = 0;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
/*
@@ -2978,7 +2968,7 @@
struct in6_multi *inm;
int retval;
- IN6_MULTI_LOCK_ASSERT();
+ IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
KASSERT(mli->mli_version == MLD_VERSION_2,
Index: sys/netinet6/mld6_var.h
===================================================================
--- sys/netinet6/mld6_var.h
+++ sys/netinet6/mld6_var.h
@@ -136,7 +136,6 @@
uint32_t mli_qi; /* MLDv2 Query Interval (s) */
uint32_t mli_qri; /* MLDv2 Query Response Interval (s) */
uint32_t mli_uri; /* MLDv2 Unsolicited Report Interval (s) */
- SLIST_HEAD(,in6_multi) mli_relinmhead; /* released groups */
struct mbufq mli_gq; /* queue of general query responses */
};

File Metadata

Mime Type
text/plain
Expires
Mon, Apr 6, 3:18 PM (5 h, 26 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
30980068
Default Alt Text
D14969.id41284.diff (58 KB)

Event Timeline