diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h --- a/sys/net/pfvar.h +++ b/sys/net/pfvar.h @@ -551,6 +551,7 @@ TAILQ_HEAD(pf_kpalist, pf_kpooladdr); struct pf_kpool { + struct mtx mtx; struct pf_kpalist list; struct pf_kpooladdr *cur; struct pf_poolhashkey key; diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c --- a/sys/netpfil/pf/pf_ioctl.c +++ b/sys/netpfil/pf/pf_ioctl.c @@ -1542,6 +1542,8 @@ counter_u64_free(rule->states_cur); counter_u64_free(rule->states_tot); counter_u64_free(rule->src_nodes); + + mtx_destroy(&rule->rpool.mtx); free(rule, M_PFRULE); } @@ -1999,6 +2001,8 @@ TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, rule, entries); ruleset->rules[rs_num].inactive.rcount++; + + mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); PF_RULES_WUNLOCK(); return (0); diff --git a/sys/netpfil/pf/pf_lb.c b/sys/netpfil/pf/pf_lb.c --- a/sys/netpfil/pf/pf_lb.c +++ b/sys/netpfil/pf/pf_lb.c @@ -374,36 +374,45 @@ return (0); } + mtx_lock(&rpool->mtx); /* Find the route using chosen algorithm. Store the found route in src_node if it was given or found. */ - if (rpool->cur->addr.type == PF_ADDR_NOROUTE) + if (rpool->cur->addr.type == PF_ADDR_NOROUTE) { + mtx_unlock(&rpool->mtx); return (1); + } if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { switch (af) { #ifdef INET case AF_INET: if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 && (rpool->opts & PF_POOL_TYPEMASK) != - PF_POOL_ROUNDROBIN) + PF_POOL_ROUNDROBIN) { + mtx_unlock(&rpool->mtx); return (1); - raddr = &rpool->cur->addr.p.dyn->pfid_addr4; - rmask = &rpool->cur->addr.p.dyn->pfid_mask4; + } + raddr = &rpool->cur->addr.p.dyn->pfid_addr4; + rmask = &rpool->cur->addr.p.dyn->pfid_mask4; break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && (rpool->opts & PF_POOL_TYPEMASK) != - PF_POOL_ROUNDROBIN) + PF_POOL_ROUNDROBIN) { + mtx_unlock(&rpool->mtx); return (1); + } raddr = &rpool->cur->addr.p.dyn->pfid_addr6; rmask = &rpool->cur->addr.p.dyn->pfid_mask6; break; #endif /* INET6 */ } } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { - if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) + if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) { + mtx_unlock(&rpool->mtx); return (1); /* unsupported */ + } } else { raddr = &rpool->cur->addr.v.a.addr; rmask = &rpool->cur->addr.v.a.mask; @@ -467,27 +476,6 @@ { struct pf_kpooladdr *acur = rpool->cur; - /* - * XXXGL: in the round-robin case we need to store - * the round-robin machine state in the rule, thus - * forwarding thread needs to modify rule. - * - * This is done w/o locking, because performance is assumed - * more important than round-robin precision. - * - * In the simpliest case we just update the "rpool->cur" - * pointer. However, if pool contains tables or dynamic - * addresses, then "tblidx" is also used to store machine - * state. Since "tblidx" is int, concurrent access to it can't - * lead to inconsistence, only to lost of precision. - * - * Things get worse, if table contains not hosts, but - * prefixes. In this case counter also stores machine state, - * and for IPv6 address, counter can't be updated atomically. - * Probably, using round-robin on a table containing IPv6 - * prefixes (or even IPv4) would cause a panic. - */ - if (rpool->cur->addr.type == PF_ADDR_TABLE) { if (!pfr_pool_get(rpool->cur->addr.p.tbl, &rpool->tblidx, &rpool->counter, af)) @@ -511,6 +499,7 @@ /* table contains no address of type 'af' */ if (rpool->cur != acur) goto try_next; + mtx_unlock(&rpool->mtx); return (1); } } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { @@ -520,6 +509,7 @@ /* table contains no address of type 'af' */ if (rpool->cur != acur) goto try_next; + mtx_unlock(&rpool->mtx); return (1); } } else { @@ -539,6 +529,8 @@ if (*sn != NULL) PF_ACPY(&(*sn)->raddr, naddr, af); + mtx_unlock(&rpool->mtx); + if (V_pf_status.debug >= PF_DEBUG_NOISY && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { printf("pf_map_addr: selected address ");