Changeset View
Standalone View
sys/netpfil/pf/pf.c
Show First 20 Lines • Show All 277 Lines • ▼ Show 20 Lines | |||||
MTX_SYSINIT(pf_table_stats_lock, &pf_table_stats_lock, "pf table stats", | MTX_SYSINIT(pf_table_stats_lock, &pf_table_stats_lock, "pf table stats", | ||||
MTX_DEF); | MTX_DEF); | ||||
VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z); | VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z); | ||||
#define V_pf_sources_z VNET(pf_sources_z) | #define V_pf_sources_z VNET(pf_sources_z) | ||||
uma_zone_t pf_mtag_z; | uma_zone_t pf_mtag_z; | ||||
VNET_DEFINE(uma_zone_t, pf_state_z); | VNET_DEFINE(uma_zone_t, pf_state_z); | ||||
VNET_DEFINE(uma_zone_t, pf_state_key_z); | VNET_DEFINE(uma_zone_t, pf_state_key_z); | ||||
VNET_DEFINE(uma_zone_t, pf_udp_mapping_z); | |||||
VNET_DEFINE(struct unrhdr64, pf_stateid); | VNET_DEFINE(struct unrhdr64, pf_stateid); | ||||
static void pf_src_tree_remove_state(struct pf_kstate *); | static void pf_src_tree_remove_state(struct pf_kstate *); | ||||
static void pf_init_threshold(struct pf_threshold *, u_int32_t, | static void pf_init_threshold(struct pf_threshold *, u_int32_t, | ||||
u_int32_t); | u_int32_t); | ||||
static void pf_add_threshold(struct pf_threshold *); | static void pf_add_threshold(struct pf_threshold *); | ||||
static int pf_check_threshold(struct pf_threshold *); | static int pf_check_threshold(struct pf_threshold *); | ||||
Show All 31 Lines | static int pf_test_rule(struct pf_krule **, struct pf_kstate **, | ||||
struct pf_pdesc *, struct pf_krule **, | struct pf_pdesc *, struct pf_krule **, | ||||
struct pf_kruleset **, struct inpcb *); | struct pf_kruleset **, struct inpcb *); | ||||
static int pf_create_state(struct pf_krule *, struct pf_krule *, | static int pf_create_state(struct pf_krule *, struct pf_krule *, | ||||
struct pf_krule *, struct pf_pdesc *, | struct pf_krule *, struct pf_pdesc *, | ||||
struct pf_ksrc_node *, struct pf_state_key *, | struct pf_ksrc_node *, struct pf_state_key *, | ||||
struct pf_state_key *, struct mbuf *, int, | struct pf_state_key *, struct mbuf *, int, | ||||
u_int16_t, u_int16_t, int *, struct pfi_kkif *, | u_int16_t, u_int16_t, int *, struct pfi_kkif *, | ||||
struct pf_kstate **, int, u_int16_t, u_int16_t, | struct pf_kstate **, int, u_int16_t, u_int16_t, | ||||
int, struct pf_krule_slist *); | int, struct pf_krule_slist *, struct pf_udp_mapping *); | ||||
static int pf_test_fragment(struct pf_krule **, struct pfi_kkif *, | static int pf_test_fragment(struct pf_krule **, struct pfi_kkif *, | ||||
struct mbuf *, void *, struct pf_pdesc *, | struct mbuf *, void *, struct pf_pdesc *, | ||||
struct pf_krule **, struct pf_kruleset **); | struct pf_krule **, struct pf_kruleset **); | ||||
static int pf_tcp_track_full(struct pf_kstate **, | static int pf_tcp_track_full(struct pf_kstate **, | ||||
struct pfi_kkif *, struct mbuf *, int, | struct pfi_kkif *, struct mbuf *, int, | ||||
struct pf_pdesc *, u_short *, int *); | struct pf_pdesc *, u_short *, int *); | ||||
static int pf_tcp_track_sloppy(struct pf_kstate **, | static int pf_tcp_track_sloppy(struct pf_kstate **, | ||||
struct pf_pdesc *, u_short *); | struct pf_pdesc *, u_short *); | ||||
▲ Show 20 Lines • Show All 140 Lines • ▼ Show 20 Lines | if (s->anchor.ptr != NULL) \ | ||||
counter_u64_add(s->anchor.ptr->states_cur, -1); \ | counter_u64_add(s->anchor.ptr->states_cur, -1); \ | ||||
counter_u64_add(s->rule.ptr->states_cur, -1); \ | counter_u64_add(s->rule.ptr->states_cur, -1); \ | ||||
SLIST_FOREACH(mrm, &s->match_rules, entry) \ | SLIST_FOREACH(mrm, &s->match_rules, entry) \ | ||||
counter_u64_add(mrm->r->states_cur, -1); \ | counter_u64_add(mrm->r->states_cur, -1); \ | ||||
} while (0) | } while (0) | ||||
MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures"); | MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures"); | ||||
MALLOC_DEFINE(M_PF_RULE_ITEM, "pf_krule_item", "pf(4) rule items"); | MALLOC_DEFINE(M_PF_RULE_ITEM, "pf_krule_item", "pf(4) rule items"); | ||||
VNET_DEFINE(struct pf_keyhash *, pf_keyhash); | VNET_DEFINE(struct pf_keyhash *, pf_keyhash); | ||||
kp: I seem to be missing the allocation (and freeing) of this. Am I looking in the wrong place or… | |||||
VNET_DEFINE(struct pf_idhash *, pf_idhash); | VNET_DEFINE(struct pf_idhash *, pf_idhash); | ||||
VNET_DEFINE(struct pf_srchash *, pf_srchash); | VNET_DEFINE(struct pf_srchash *, pf_srchash); | ||||
VNET_DEFINE(struct pf_udpendpointhash *, pf_udpendpointhash); | |||||
VNET_DEFINE(struct pf_udpendpointmapping *, pf_udpendpointmapping); | |||||
SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, | SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, | ||||
"pf(4)"); | "pf(4)"); | ||||
VNET_DEFINE(u_long, pf_hashmask); | VNET_DEFINE(u_long, pf_hashmask); | ||||
VNET_DEFINE(u_long, pf_srchashmask); | VNET_DEFINE(u_long, pf_srchashmask); | ||||
VNET_DEFINE(u_long, pf_udpendpointhashmask); | |||||
VNET_DEFINE_STATIC(u_long, pf_hashsize); | VNET_DEFINE_STATIC(u_long, pf_hashsize); | ||||
#define V_pf_hashsize VNET(pf_hashsize) | #define V_pf_hashsize VNET(pf_hashsize) | ||||
VNET_DEFINE_STATIC(u_long, pf_srchashsize); | VNET_DEFINE_STATIC(u_long, pf_srchashsize); | ||||
#define V_pf_srchashsize VNET(pf_srchashsize) | #define V_pf_srchashsize VNET(pf_srchashsize) | ||||
VNET_DEFINE_STATIC(u_long, pf_udpendpointhashsize); | |||||
#define V_pf_udpendpointhashsize VNET(pf_udpendpointhashsize) | |||||
u_long pf_ioctl_maxcount = 65535; | u_long pf_ioctl_maxcount = 65535; | ||||
SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, | SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, | ||||
&VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable"); | &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable"); | ||||
SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, | SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, | ||||
&VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable"); | &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable"); | ||||
SYSCTL_ULONG(_net_pf, OID_AUTO, udpendpoint_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, | |||||
&VNET_NAME(pf_udpendpointhashsize), 0, "Size of pf(4) endpoint hashtable"); | |||||
kpUnsubmitted Done Inline ActionsThat'll want an entry in the pf(4) man page. kp: That'll want an entry in the pf(4) man page. | |||||
SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN, | SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN, | ||||
&pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call"); | &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call"); | ||||
VNET_DEFINE(void *, pf_swi_cookie); | VNET_DEFINE(void *, pf_swi_cookie); | ||||
VNET_DEFINE(struct intr_event *, pf_swi_ie); | VNET_DEFINE(struct intr_event *, pf_swi_ie); | ||||
VNET_DEFINE(uint32_t, pf_hashseed); | VNET_DEFINE(uint32_t, pf_hashseed); | ||||
#define V_pf_hashseed VNET(pf_hashseed) | #define V_pf_hashseed VNET(pf_hashseed) | ||||
▲ Show 20 Lines • Show All 165 Lines • ▼ Show 20 Lines | pf_hashsrc(struct pf_addr *addr, sa_family_t af) | ||||
switch (af) { | switch (af) { | ||||
case AF_INET: | case AF_INET: | ||||
h = murmur3_32_hash32((uint32_t *)&addr->v4, | h = murmur3_32_hash32((uint32_t *)&addr->v4, | ||||
sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed); | sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed); | ||||
break; | break; | ||||
case AF_INET6: | case AF_INET6: | ||||
h = murmur3_32_hash32((uint32_t *)&addr->v6, | h = murmur3_32_hash32((uint32_t *)&addr->v6, | ||||
sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed); | sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed); | ||||
Not Done Inline ActionsPlease use standard C keyword "inline" instead of "__inline" gcc-ism. glebius: Please use standard C keyword "inline" instead of "__inline" gcc-ism. | |||||
break; | break; | ||||
default: | default: | ||||
panic("%s: unknown address family %u", __func__, af); | panic("%s: unknown address family %u", __func__, af); | ||||
} | } | ||||
return (h & V_pf_srchashmask); | return (h & V_pf_srchashmask); | ||||
} | } | ||||
static inline uint32_t | |||||
pf_hashudpendpoint(struct pf_udp_endpoint *endpoint) | |||||
{ | |||||
uint32_t h; | |||||
h = murmur3_32_hash32((uint32_t *)endpoint, | |||||
sizeof(struct pf_udp_endpoint_cmp)/sizeof(uint32_t), | |||||
V_pf_hashseed); | |||||
return (h & V_pf_hashmask); | |||||
kpUnsubmitted Done Inline ActionsShouldn't that be V_pf_udpendpointhashmask ? kp: Shouldn't that be V_pf_udpendpointhashmask ? | |||||
} | |||||
#ifdef ALTQ | #ifdef ALTQ | ||||
static int | static int | ||||
pf_state_hash(struct pf_kstate *s) | pf_state_hash(struct pf_kstate *s) | ||||
{ | { | ||||
u_int32_t hv = (intptr_t)s / sizeof(*s); | u_int32_t hv = (intptr_t)s / sizeof(*s); | ||||
hv ^= crc32(&s->src, sizeof(s->src)); | hv ^= crc32(&s->src, sizeof(s->src)); | ||||
hv ^= crc32(&s->dst, sizeof(s->dst)); | hv ^= crc32(&s->dst, sizeof(s->dst)); | ||||
▲ Show 20 Lines • Show All 371 Lines • ▼ Show 20 Lines | |||||
/* Per-vnet data storage structures initialization. */ | /* Per-vnet data storage structures initialization. */ | ||||
void | void | ||||
pf_initialize(void) | pf_initialize(void) | ||||
{ | { | ||||
struct pf_keyhash *kh; | struct pf_keyhash *kh; | ||||
struct pf_idhash *ih; | struct pf_idhash *ih; | ||||
struct pf_srchash *sh; | struct pf_srchash *sh; | ||||
struct pf_udpendpointhash *uh; | |||||
u_int i; | u_int i; | ||||
if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize)) | if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize)) | ||||
V_pf_hashsize = PF_HASHSIZ; | V_pf_hashsize = PF_HASHSIZ; | ||||
if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize)) | if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize)) | ||||
V_pf_srchashsize = PF_SRCHASHSIZ; | V_pf_srchashsize = PF_SRCHASHSIZ; | ||||
if (V_pf_udpendpointhashsize == 0 || !powerof2(V_pf_udpendpointhashsize)) | |||||
V_pf_udpendpointhashsize = PF_HASHSIZ; | |||||
kpUnsubmitted Done Inline ActionsDo we want to default to the same size as the state table, or maybe something smaller? It's hardly scientific, but my local gateway has about twice as much TCP as UDP, and on my server box it's more like 4 to 1, so I'd go with half or even a quarter of PF_HASHSIZ as a default. kp: Do we want to default to the same size as the state table, or maybe something smaller?
It's… | |||||
V_pf_hashseed = arc4random(); | V_pf_hashseed = arc4random(); | ||||
/* States and state keys storage. */ | /* States and state keys storage. */ | ||||
V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_kstate), | V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_kstate), | ||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); | NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); | ||||
V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z; | V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z; | ||||
uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT); | uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT); | ||||
Show All 17 Lines | if (V_pf_keyhash == NULL || V_pf_idhash == NULL) { | ||||
V_pf_hashsize = PF_HASHSIZ; | V_pf_hashsize = PF_HASHSIZ; | ||||
V_pf_keyhash = mallocarray(V_pf_hashsize, | V_pf_keyhash = mallocarray(V_pf_hashsize, | ||||
sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO); | sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO); | ||||
V_pf_idhash = mallocarray(V_pf_hashsize, | V_pf_idhash = mallocarray(V_pf_hashsize, | ||||
sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO); | sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO); | ||||
} | } | ||||
V_pf_hashmask = V_pf_hashsize - 1; | V_pf_hashmask = V_pf_hashsize - 1; | ||||
for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; | for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; | ||||
i <= V_pf_hashmask; | |||||
kpUnsubmitted Done Inline ActionsI'd leave this out. kp: I'd leave this out. | |||||
i++, kh++, ih++) { | i++, kh++, ih++) { | ||||
mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK); | mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK); | ||||
mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF); | mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF); | ||||
} | } | ||||
Not Done Inline ActionsOh dear god. Another lock for the data path? At the very least it needs thorough documentation w.r.t what it locks and what the lock order expectations are. kp: Oh dear god. Another lock for the data path?
This is worrying.
At the very least it needs… | |||||
Not Done Inline Actions
Ick. Probably required for the same reason we need it for pf_keyhash, but ick nonetheless. kp: > MTX_DUPOK
Ick. Probably required for the same reason we need it for pf_keyhash, but ick… | |||||
/* Source nodes. */ | /* Source nodes. */ | ||||
V_pf_sources_z = uma_zcreate("pf source nodes", | V_pf_sources_z = uma_zcreate("pf source nodes", | ||||
sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, | sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, | ||||
0); | 0); | ||||
V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z; | V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z; | ||||
uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT); | uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT); | ||||
uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached"); | uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached"); | ||||
V_pf_srchash = mallocarray(V_pf_srchashsize, | V_pf_srchash = mallocarray(V_pf_srchashsize, | ||||
sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO); | sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO); | ||||
if (V_pf_srchash == NULL) { | if (V_pf_srchash == NULL) { | ||||
printf("pf: Unable to allocate memory for " | printf("pf: Unable to allocate memory for " | ||||
"source_hashsize %lu.\n", V_pf_srchashsize); | "source_hashsize %lu.\n", V_pf_srchashsize); | ||||
V_pf_srchashsize = PF_SRCHASHSIZ; | V_pf_srchashsize = PF_SRCHASHSIZ; | ||||
V_pf_srchash = mallocarray(V_pf_srchashsize, | V_pf_srchash = mallocarray(V_pf_srchashsize, | ||||
sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO); | sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO); | ||||
} | } | ||||
V_pf_srchashmask = V_pf_srchashsize - 1; | V_pf_srchashmask = V_pf_srchashsize - 1; | ||||
for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) | for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) | ||||
mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF); | mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF); | ||||
/* UDP endpoint mappings. */ | |||||
V_pf_udp_mapping_z = uma_zcreate("pf UDP mappings", | |||||
sizeof(struct pf_udp_mapping), NULL, NULL, NULL, NULL, | |||||
UMA_ALIGN_PTR, 0); | |||||
V_pf_udpendpointhash = mallocarray(V_pf_udpendpointhashsize, | |||||
sizeof(struct pf_udpendpointhash), M_PFHASH, M_NOWAIT | M_ZERO); | |||||
if (V_pf_udpendpointhash == NULL) { | |||||
printf("pf: Unable to allocate memory for " | |||||
"udpendpoint_hashsize %lu.\n", V_pf_udpendpointhashsize); | |||||
V_pf_udpendpointhashsize = PF_HASHSIZ; | |||||
kpUnsubmitted Done Inline ActionsAnd given that we have a hardcoded default for the udp hash size we probably want a define (at least, if we end up choosing a different default for it than PF_HASHSIZ). kp: And given that we have a hardcoded default for the udp hash size we probably want a define (at… | |||||
V_pf_udpendpointhash = mallocarray(V_pf_udpendpointhashsize, | |||||
sizeof(struct pf_udpendpointhash), M_PFHASH, M_WAITOK | M_ZERO); | |||||
} | |||||
V_pf_udpendpointhashmask = V_pf_udpendpointhashsize - 1; | |||||
for (i = 0, uh = V_pf_udpendpointhash; i <= V_pf_hashmask; i++, uh++) { | |||||
kpUnsubmitted Done Inline Actions<= V_pf_udpendpointhashmask ? Arguably another reason for having a different default size for the udp hash table is that we'd encounter issues like this in default configurations. Now we're only going to see this when a user twiddles the setting. kp: <= V_pf_udpendpointhashmask ?
Arguably another reason for having a different default size for… | |||||
mtx_init(&uh->lock, "pf_udpendpointhash", NULL, | |||||
MTX_DEF | MTX_DUPOK); | |||||
} | |||||
/* ALTQ */ | /* ALTQ */ | ||||
TAILQ_INIT(&V_pf_altqs[0]); | TAILQ_INIT(&V_pf_altqs[0]); | ||||
TAILQ_INIT(&V_pf_altqs[1]); | TAILQ_INIT(&V_pf_altqs[1]); | ||||
TAILQ_INIT(&V_pf_altqs[2]); | TAILQ_INIT(&V_pf_altqs[2]); | ||||
TAILQ_INIT(&V_pf_altqs[3]); | TAILQ_INIT(&V_pf_altqs[3]); | ||||
TAILQ_INIT(&V_pf_pabuf); | TAILQ_INIT(&V_pf_pabuf); | ||||
V_pf_altqs_active = &V_pf_altqs[0]; | V_pf_altqs_active = &V_pf_altqs[0]; | ||||
V_pf_altq_ifs_active = &V_pf_altqs[1]; | V_pf_altq_ifs_active = &V_pf_altqs[1]; | ||||
Show All 17 Lines | |||||
} | } | ||||
void | void | ||||
pf_cleanup(void) | pf_cleanup(void) | ||||
{ | { | ||||
struct pf_keyhash *kh; | struct pf_keyhash *kh; | ||||
struct pf_idhash *ih; | struct pf_idhash *ih; | ||||
struct pf_srchash *sh; | struct pf_srchash *sh; | ||||
struct pf_udpendpointhash *uh; | |||||
struct pf_send_entry *pfse, *next; | struct pf_send_entry *pfse, *next; | ||||
u_int i; | u_int i; | ||||
for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; | for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash, uh = V_pf_udpendpointhash; | ||||
kpUnsubmitted Done Inline ActionsIs that right? What happens if V_pf_hashmask != V_pf_udpendpointhashmask? kp: Is that right? What happens if V_pf_hashmask != V_pf_udpendpointhashmask? | |||||
i++, kh++, ih++) { | i <= V_pf_hashmask; | ||||
i++, kh++, ih++, uh++) { | |||||
KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty", | KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty", | ||||
__func__)); | __func__)); | ||||
KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty", | KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty", | ||||
__func__)); | __func__)); | ||||
mtx_destroy(&kh->lock); | mtx_destroy(&kh->lock); | ||||
mtx_destroy(&ih->lock); | mtx_destroy(&ih->lock); | ||||
} | } | ||||
free(V_pf_keyhash, M_PFHASH); | free(V_pf_keyhash, M_PFHASH); | ||||
free(V_pf_idhash, M_PFHASH); | free(V_pf_idhash, M_PFHASH); | ||||
free(V_pf_udpendpointhash, M_PFHASH); | |||||
for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) { | for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) { | ||||
KASSERT(LIST_EMPTY(&sh->nodes), | KASSERT(LIST_EMPTY(&sh->nodes), | ||||
("%s: source node hash not empty", __func__)); | ("%s: source node hash not empty", __func__)); | ||||
mtx_destroy(&sh->lock); | mtx_destroy(&sh->lock); | ||||
} | } | ||||
free(V_pf_srchash, M_PFHASH); | free(V_pf_srchash, M_PFHASH); | ||||
STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) { | STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) { | ||||
m_freem(pfse->pfse_m); | m_freem(pfse->pfse_m); | ||||
free(pfse, M_PFTEMP); | free(pfse, M_PFTEMP); | ||||
} | } | ||||
MPASS(RB_EMPTY(&V_pf_sctp_endpoints)); | MPASS(RB_EMPTY(&V_pf_sctp_endpoints)); | ||||
uma_zdestroy(V_pf_sources_z); | uma_zdestroy(V_pf_sources_z); | ||||
uma_zdestroy(V_pf_state_z); | uma_zdestroy(V_pf_state_z); | ||||
uma_zdestroy(V_pf_state_key_z); | uma_zdestroy(V_pf_state_key_z); | ||||
uma_zdestroy(V_pf_udp_mapping_z); | |||||
} | } | ||||
static int | static int | ||||
pf_mtag_uminit(void *mem, int size, int how) | pf_mtag_uminit(void *mem, int size, int how) | ||||
{ | { | ||||
struct m_tag *t; | struct m_tag *t; | ||||
t = (struct m_tag *)mem; | t = (struct m_tag *)mem; | ||||
▲ Show 20 Lines • Show All 496 Lines • ▼ Show 20 Lines | second_run: | ||||
return (ret); | return (ret); | ||||
} | } | ||||
/* | /* | ||||
* FIXME | * FIXME | ||||
* This routine is inefficient -- locks the state only to unlock immediately on | * This routine is inefficient -- locks the state only to unlock immediately on | ||||
* return. | * return. | ||||
* It is racy -- after the state is unlocked nothing stops other threads from | * It is racy -- after the state is unlocked nothing stops other threads from | ||||
Not Done Inline ActionsPlease follow style(9), do not initialize variables in declaration. They rule may be violated sometimes, but calling function in initializer is too much! glebius: Please follow style(9), do not initialize variables in declaration. They rule may be violated… | |||||
* removing it. | * removing it. | ||||
*/ | */ | ||||
bool | bool | ||||
pf_find_state_all_exists(const struct pf_state_key_cmp *key, u_int dir) | pf_find_state_all_exists(const struct pf_state_key_cmp *key, u_int dir) | ||||
{ | { | ||||
struct pf_kstate *s; | struct pf_kstate *s; | ||||
s = pf_find_state_all(key, dir, NULL); | s = pf_find_state_all(key, dir, NULL); | ||||
if (s != NULL) { | if (s != NULL) { | ||||
PF_STATE_UNLOCK(s); | PF_STATE_UNLOCK(s); | ||||
return (true); | return (true); | ||||
} | } | ||||
Not Done Inline ActionsPlease put return values into braces, to follow style(9). This refers to all new returns in this patch. glebius: Please put return values into braces, to follow style(9). This refers to all new returns in… | |||||
return (false); | return (false); | ||||
} | } | ||||
struct pf_udp_mapping * | |||||
pf_udp_mapping_create(sa_family_t af, struct pf_addr *src_addr, uint16_t src_port, | |||||
struct pf_addr *nat_addr, uint16_t nat_port) | |||||
{ | |||||
struct pf_udp_mapping *mapping; | |||||
mapping = uma_zalloc(V_pf_udp_mapping_z, M_NOWAIT | M_ZERO); | |||||
if (mapping == NULL) | |||||
return NULL; | |||||
Not Done Inline Actionsstyle(9) kp: style(9) | |||||
Not Done Inline Actionsreturn (NULL); please. kp: return (NULL); please. | |||||
PF_ACPY(&mapping->endpoints[0].addr, src_addr, af); | |||||
mapping->endpoints[0].port = src_port; | |||||
mapping->endpoints[0].af = af; | |||||
mapping->endpoints[0].mapping = mapping; | |||||
PF_ACPY(&mapping->endpoints[1].addr, nat_addr, af); | |||||
mapping->endpoints[1].port = nat_port; | |||||
mapping->endpoints[1].af = af; | |||||
mapping->endpoints[1].mapping = mapping; | |||||
refcount_init(&mapping->refs, 1); | |||||
return (mapping); | |||||
} | |||||
int | |||||
pf_udp_mapping_insert(struct pf_udp_mapping *mapping) | |||||
{ | |||||
struct pf_udpendpointhash *h0, *h1; | |||||
struct pf_udp_endpoint *endpoint; | |||||
int ret = 1; | |||||
kpUnsubmitted Done Inline ActionsI'd default to EEXIST, or maybe just switch to a bool return value. kp: I'd default to EEXIST, or maybe just switch to a bool return value. | |||||
h0 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[0])]; | |||||
h1 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[1])]; | |||||
if (h0 == h1) { | |||||
PF_HASHROW_LOCK(h0); | |||||
} else if (h0 < h1) { | |||||
PF_HASHROW_LOCK(h0); | |||||
PF_HASHROW_LOCK(h1); | |||||
} else { | |||||
PF_HASHROW_LOCK(h1); | |||||
PF_HASHROW_LOCK(h0); | |||||
} | |||||
LIST_FOREACH(endpoint, &h0->endpoints, entry) { | |||||
if (bcmp(endpoint, &mapping->endpoints[0], | |||||
sizeof(struct pf_udp_endpoint_cmp)) == 0) | |||||
break; | |||||
} | |||||
if (endpoint != NULL) | |||||
goto cleanup; | |||||
LIST_FOREACH(endpoint, &h1->endpoints, entry) { | |||||
Not Done Inline ActionsPlease put empty line after declarations. glebius: Please put empty line after declarations. | |||||
if (bcmp(endpoint, &mapping->endpoints[1], | |||||
sizeof(struct pf_udp_endpoint_cmp)) == 0) | |||||
break; | |||||
} | |||||
if (endpoint != NULL) | |||||
goto cleanup; | |||||
LIST_INSERT_HEAD(&h0->endpoints, &mapping->endpoints[0], entry); | |||||
LIST_INSERT_HEAD(&h1->endpoints, &mapping->endpoints[1], entry); | |||||
ret = 0; | |||||
cleanup: | |||||
if (h0 != h1) { | |||||
Done Inline ActionsSeems to have wound up with spaces to rather than tabs here. kp: Seems to have wound up with spaces to rather than tabs here. | |||||
PF_HASHROW_UNLOCK(h0); | |||||
PF_HASHROW_UNLOCK(h1); | |||||
} else { | |||||
PF_HASHROW_UNLOCK(h0); | |||||
} | |||||
return (ret); | |||||
} | |||||
void | |||||
pf_udp_mapping_release(struct pf_udp_mapping *mapping) | |||||
{ | |||||
/* refcount is synchronized on the source endpoint's row lock */ | |||||
kpUnsubmitted Done Inline ActionsI like that that's documented, but wonder if it wouldn't be more useful to put that in the struct definition instead. Or possibly both here and there. kp: I like that that's documented, but wonder if it wouldn't be more useful to put that in the… | |||||
struct pf_udpendpointhash *h0, *h1; | |||||
h0 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[0])]; | |||||
PF_HASHROW_LOCK(h0); | |||||
if (refcount_release(&mapping->refs)) { | |||||
LIST_REMOVE(&mapping->endpoints[0], entry); | |||||
PF_HASHROW_UNLOCK(h0); | |||||
h1 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[1])]; | |||||
PF_HASHROW_LOCK(h1); | |||||
Not Done Inline ActionsIs there a risk of races because we remove separate entries (why are there two?) non-atomically? kp: Is there a risk of races because we remove separate entries (why are there two?) non-atomically? | |||||
LIST_REMOVE(&mapping->endpoints[1], entry); | |||||
PF_HASHROW_UNLOCK(h1); | |||||
uma_zfree(V_pf_udp_mapping_z, mapping); | |||||
} else { | |||||
PF_HASHROW_UNLOCK(h0); | |||||
} | |||||
} | |||||
struct pf_udp_mapping * | |||||
pf_udp_mapping_find(struct pf_udp_endpoint_cmp *key) | |||||
{ | |||||
struct pf_udpendpointhash *uh; | |||||
struct pf_udp_endpoint *endpoint; | |||||
uh = &V_pf_udpendpointhash[pf_hashudpendpoint((struct pf_udp_endpoint*)key)]; | |||||
PF_HASHROW_LOCK(uh); | |||||
LIST_FOREACH(endpoint, &uh->endpoints, entry) { | |||||
if (bcmp(endpoint, key, sizeof(struct pf_udp_endpoint_cmp)) == 0 && | |||||
bcmp(endpoint, &endpoint->mapping->endpoints[0], | |||||
sizeof(struct pf_udp_endpoint_cmp)) == 0) | |||||
break; | |||||
} | |||||
if (endpoint == NULL) { | |||||
PF_HASHROW_UNLOCK(uh); | |||||
return NULL; | |||||
kpUnsubmitted Done Inline Actionsstyle(9). kp: style(9). | |||||
} | |||||
refcount_acquire(&endpoint->mapping->refs); | |||||
PF_HASHROW_UNLOCK(uh); | |||||
return (endpoint->mapping); | |||||
} | |||||
/* END state table stuff */ | /* END state table stuff */ | ||||
static void | static void | ||||
pf_send(struct pf_send_entry *pfse) | pf_send(struct pf_send_entry *pfse) | ||||
{ | { | ||||
PF_SENDQ_LOCK(); | PF_SENDQ_LOCK(); | ||||
STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next); | STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next); | ||||
▲ Show 20 Lines • Show All 600 Lines • ▼ Show 20 Lines | pf_unlink_state(struct pf_kstate *s) | ||||
/* Ensure we remove it from the list of halfopen states, if needed. */ | /* Ensure we remove it from the list of halfopen states, if needed. */ | ||||
if (s->key[PF_SK_STACK] != NULL && | if (s->key[PF_SK_STACK] != NULL && | ||||
s->key[PF_SK_STACK]->proto == IPPROTO_TCP) | s->key[PF_SK_STACK]->proto == IPPROTO_TCP) | ||||
pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED); | pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED); | ||||
PF_HASHROW_UNLOCK(ih); | PF_HASHROW_UNLOCK(ih); | ||||
pf_detach_state(s); | pf_detach_state(s); | ||||
if (s->udp_mapping) | |||||
pf_udp_mapping_release(s->udp_mapping); | |||||
/* pf_state_insert() initialises refs to 2 */ | /* pf_state_insert() initialises refs to 2 */ | ||||
return (pf_release_staten(s, 2)); | return (pf_release_staten(s, 2)); | ||||
} | } | ||||
struct pf_kstate * | struct pf_kstate * | ||||
pf_alloc_state(int flags) | pf_alloc_state(int flags) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 2,247 Lines • ▼ Show 20 Lines | pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, struct pfi_kkif *kif, | ||||
int tag = -1; | int tag = -1; | ||||
int asd = 0; | int asd = 0; | ||||
int match = 0; | int match = 0; | ||||
int state_icmp = 0, icmp_dir, multi; | int state_icmp = 0, icmp_dir, multi; | ||||
u_int16_t sport = 0, dport = 0, virtual_type, virtual_id; | u_int16_t sport = 0, dport = 0, virtual_type, virtual_id; | ||||
u_int16_t bproto_sum = 0, bip_sum = 0; | u_int16_t bproto_sum = 0, bip_sum = 0; | ||||
u_int8_t icmptype = 0, icmpcode = 0; | u_int8_t icmptype = 0, icmpcode = 0; | ||||
struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; | struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; | ||||
struct pf_udp_mapping *udp_mapping = NULL; | |||||
Done Inline ActionsAlign *udp_mapping with the other variables with a tab. kp: Align *udp_mapping with the other variables with a tab. | |||||
PF_RULES_RASSERT(); | PF_RULES_RASSERT(); | ||||
SLIST_INIT(&match_rules); | SLIST_INIT(&match_rules); | ||||
if (inp != NULL) { | if (inp != NULL) { | ||||
INP_LOCK_ASSERT(inp); | INP_LOCK_ASSERT(inp); | ||||
pd->lookup.uid = inp->inp_cred->cr_uid; | pd->lookup.uid = inp->inp_cred->cr_uid; | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | default: | ||||
sport = dport = hdrlen = 0; | sport = dport = hdrlen = 0; | ||||
break; | break; | ||||
} | } | ||||
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); | r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); | ||||
/* check packet for BINAT/NAT/RDR */ | /* check packet for BINAT/NAT/RDR */ | ||||
transerror = pf_get_translation(pd, m, off, kif, &nsn, &sk, | transerror = pf_get_translation(pd, m, off, kif, &nsn, &sk, | ||||
&nk, saddr, daddr, sport, dport, anchor_stack, &nr); | &nk, saddr, daddr, sport, dport, anchor_stack, &nr, &udp_mapping); | ||||
switch (transerror) { | switch (transerror) { | ||||
default: | default: | ||||
/* A translation error occurred. */ | /* A translation error occurred. */ | ||||
REASON_SET(&reason, transerror); | REASON_SET(&reason, transerror); | ||||
goto cleanup; | goto cleanup; | ||||
case PFRES_MAX: | case PFRES_MAX: | ||||
/* No match. */ | /* No match. */ | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 281 Lines • ▼ Show 20 Lines | #endif /* INET */ | ||||
if (pd->act.rtableid >= 0) | if (pd->act.rtableid >= 0) | ||||
M_SETFIB(m, pd->act.rtableid); | M_SETFIB(m, pd->act.rtableid); | ||||
if (!state_icmp && (r->keep_state || nr != NULL || | if (!state_icmp && (r->keep_state || nr != NULL || | ||||
(pd->flags & PFDESC_TCP_NORM))) { | (pd->flags & PFDESC_TCP_NORM))) { | ||||
int action; | int action; | ||||
action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off, | action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off, | ||||
sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum, | sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum, | ||||
hdrlen, &match_rules); | hdrlen, &match_rules, udp_mapping); | ||||
if (action != PF_PASS) { | if (action != PF_PASS) { | ||||
if (udp_mapping != NULL) | |||||
kpUnsubmitted Done Inline ActionsI wonder if we shouldn't put the NULL check in pf_udp_mapping_release() instead. It looks like there are at least three callers, so that'd save a couple of lines of code too. kp: I wonder if we shouldn't put the NULL check in pf_udp_mapping_release() instead.
Similar to… | |||||
pf_udp_mapping_release(udp_mapping); | |||||
if (action == PF_DROP && | if (action == PF_DROP && | ||||
(r->rule_flag & PFRULE_RETURN)) | (r->rule_flag & PFRULE_RETURN)) | ||||
pf_return(r, nr, pd, sk, off, m, th, kif, | pf_return(r, nr, pd, sk, off, m, th, kif, | ||||
bproto_sum, bip_sum, hdrlen, &reason, | bproto_sum, bip_sum, hdrlen, &reason, | ||||
pd->act.rtableid); | pd->act.rtableid); | ||||
return (action); | return (action); | ||||
} | } | ||||
} else { | } else { | ||||
while ((ri = SLIST_FIRST(&match_rules))) { | while ((ri = SLIST_FIRST(&match_rules))) { | ||||
SLIST_REMOVE_HEAD(&match_rules, entry); | SLIST_REMOVE_HEAD(&match_rules, entry); | ||||
free(ri, M_PF_RULE_ITEM); | free(ri, M_PF_RULE_ITEM); | ||||
} | } | ||||
uma_zfree(V_pf_state_key_z, sk); | uma_zfree(V_pf_state_key_z, sk); | ||||
uma_zfree(V_pf_state_key_z, nk); | uma_zfree(V_pf_state_key_z, nk); | ||||
if (udp_mapping != NULL) | |||||
pf_udp_mapping_release(udp_mapping); | |||||
} | } | ||||
/* copy back packet headers if we performed NAT operations */ | /* copy back packet headers if we performed NAT operations */ | ||||
if (rewrite) | if (rewrite) | ||||
m_copyback(m, off, hdrlen, pd->hdr.any); | m_copyback(m, off, hdrlen, pd->hdr.any); | ||||
if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) && | if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) && | ||||
pd->dir == PF_OUT && | pd->dir == PF_OUT && | ||||
Show All 11 Lines | |||||
cleanup: | cleanup: | ||||
while ((ri = SLIST_FIRST(&match_rules))) { | while ((ri = SLIST_FIRST(&match_rules))) { | ||||
SLIST_REMOVE_HEAD(&match_rules, entry); | SLIST_REMOVE_HEAD(&match_rules, entry); | ||||
free(ri, M_PF_RULE_ITEM); | free(ri, M_PF_RULE_ITEM); | ||||
} | } | ||||
uma_zfree(V_pf_state_key_z, sk); | uma_zfree(V_pf_state_key_z, sk); | ||||
uma_zfree(V_pf_state_key_z, nk); | uma_zfree(V_pf_state_key_z, nk); | ||||
if (udp_mapping != NULL) | |||||
pf_udp_mapping_release(udp_mapping); | |||||
Done Inline ActionsDoesn't need the NULL check any more. kp: Doesn't need the NULL check any more. | |||||
return (PF_DROP); | return (PF_DROP); | ||||
} | } | ||||
static int | static int | ||||
pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a, | pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a, | ||||
struct pf_pdesc *pd, struct pf_ksrc_node *nsn, struct pf_state_key *nk, | struct pf_pdesc *pd, struct pf_ksrc_node *nsn, struct pf_state_key *nk, | ||||
struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport, | struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport, | ||||
u_int16_t dport, int *rewrite, struct pfi_kkif *kif, struct pf_kstate **sm, | u_int16_t dport, int *rewrite, struct pfi_kkif *kif, struct pf_kstate **sm, | ||||
int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen, | int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen, | ||||
struct pf_krule_slist *match_rules) | struct pf_krule_slist *match_rules, struct pf_udp_mapping *udp_mapping) | ||||
{ | { | ||||
struct pf_kstate *s = NULL; | struct pf_kstate *s = NULL; | ||||
struct pf_ksrc_node *sn = NULL; | struct pf_ksrc_node *sn = NULL; | ||||
struct tcphdr *th = &pd->hdr.tcp; | struct tcphdr *th = &pd->hdr.tcp; | ||||
u_int16_t mss = V_tcp_mssdflt; | u_int16_t mss = V_tcp_mssdflt; | ||||
u_short reason, sn_reason; | u_short reason, sn_reason; | ||||
struct pf_krule_item *ri; | struct pf_krule_item *ri; | ||||
▲ Show 20 Lines • Show All 199 Lines • ▼ Show 20 Lines | if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == | ||||
s->src.mss = mss; | s->src.mss = mss; | ||||
pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, | pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, | ||||
th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, | th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, | ||||
TH_SYN|TH_ACK, 0, s->src.mss, 0, true, 0, 0, | TH_SYN|TH_ACK, 0, s->src.mss, 0, true, 0, 0, | ||||
pd->act.rtableid); | pd->act.rtableid); | ||||
REASON_SET(&reason, PFRES_SYNPROXY); | REASON_SET(&reason, PFRES_SYNPROXY); | ||||
return (PF_SYNPROXY_DROP); | return (PF_SYNPROXY_DROP); | ||||
} | } | ||||
s->udp_mapping = udp_mapping; | |||||
return (PF_PASS); | return (PF_PASS); | ||||
csfailed: | csfailed: | ||||
while ((ri = SLIST_FIRST(match_rules))) { | while ((ri = SLIST_FIRST(match_rules))) { | ||||
SLIST_REMOVE_HEAD(match_rules, entry); | SLIST_REMOVE_HEAD(match_rules, entry); | ||||
free(ri, M_PF_RULE_ITEM); | free(ri, M_PF_RULE_ITEM); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 4,163 Lines • Show Last 20 Lines |
I seem to be missing the allocation (and freeing) of this. Am I looking in the wrong place or is it just not part of the patch?