Changeset View
Changeset View
Standalone View
Standalone View
sys/netpfil/pf/pf_norm.c
Show First 20 Lines • Show All 1,080 Lines • ▼ Show 20 Lines | if (srs) { | ||||
* if one of rules has matched and it's not a "no scrub" rule */ | * if one of rules has matched and it's not a "no scrub" rule */ | ||||
if (r == NULL || r->action == PF_NOSCRUB) | if (r == NULL || r->action == PF_NOSCRUB) | ||||
return (PF_PASS); | return (PF_PASS); | ||||
pf_counter_u64_critical_enter(); | pf_counter_u64_critical_enter(); | ||||
pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); | pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); | ||||
pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); | pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); | ||||
pf_counter_u64_critical_exit(); | pf_counter_u64_critical_exit(); | ||||
pf_rule_to_actions(r, &pd->act); | |||||
} else if ((!V_pf_status.reass && (h->ip_off & htons(IP_MF | IP_OFFMASK)))) { | } else if ((!V_pf_status.reass && (h->ip_off & htons(IP_MF | IP_OFFMASK)))) { | ||||
/* With no scrub rules IPv4 fragment reassembly depends on the | /* With no scrub rules IPv4 fragment reassembly depends on the | ||||
* global switch. Fragments can be dropped early if reassembly | * global switch. Fragments can be dropped early if reassembly | ||||
* is disabled. */ | * is disabled. */ | ||||
REASON_SET(reason, PFRES_NORM); | REASON_SET(reason, PFRES_NORM); | ||||
goto drop; | goto drop; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 68 Lines • ▼ Show 20 Lines | no_fragment: | ||||
/* At this point, only IP_DF is allowed in ip_off */ | /* At this point, only IP_DF is allowed in ip_off */ | ||||
if (h->ip_off & ~htons(IP_DF)) { | if (h->ip_off & ~htons(IP_DF)) { | ||||
u_int16_t ip_off = h->ip_off; | u_int16_t ip_off = h->ip_off; | ||||
h->ip_off &= htons(IP_DF); | h->ip_off &= htons(IP_DF); | ||||
h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); | h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); | ||||
} | } | ||||
} | } | ||||
if (r != NULL) { | |||||
int scrub_flags = pf_rule_to_scrub_flags(r->rule_flag); | |||||
pf_scrub_ip(&m, scrub_flags, r->min_ttl, r->set_tos); | |||||
} | |||||
return (PF_PASS); | return (PF_PASS); | ||||
bad: | bad: | ||||
DPFPRINTF(("dropping bad fragment\n")); | DPFPRINTF(("dropping bad fragment\n")); | ||||
REASON_SET(reason, PFRES_FRAG); | REASON_SET(reason, PFRES_FRAG); | ||||
drop: | drop: | ||||
if (r != NULL && r->log) | if (r != NULL && r->log) | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | if (srs) { | ||||
* if one of rules has matched and it's not a "no scrub" rule */ | * if one of rules has matched and it's not a "no scrub" rule */ | ||||
if (r == NULL || r->action == PF_NOSCRUB) | if (r == NULL || r->action == PF_NOSCRUB) | ||||
return (PF_PASS); | return (PF_PASS); | ||||
pf_counter_u64_critical_enter(); | pf_counter_u64_critical_enter(); | ||||
pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); | pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); | ||||
pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); | pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); | ||||
pf_counter_u64_critical_exit(); | pf_counter_u64_critical_exit(); | ||||
pf_rule_to_actions(r, &pd->act); | |||||
} | } | ||||
/* Check for illegal packets */ | /* Check for illegal packets */ | ||||
if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len) | if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len) | ||||
goto drop; | goto drop; | ||||
plen = ntohs(h->ip6_plen); | plen = ntohs(h->ip6_plen); | ||||
/* jumbo payload option not supported */ | /* jumbo payload option not supported */ | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | default: | ||||
terminal = 1; | terminal = 1; | ||||
break; | break; | ||||
} | } | ||||
} while (!terminal); | } while (!terminal); | ||||
if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) | if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) | ||||
goto shortpkt; | goto shortpkt; | ||||
if (r != NULL) { | |||||
int scrub_flags = pf_rule_to_scrub_flags(r->rule_flag); | |||||
pf_scrub_ip6(&m, scrub_flags, r->min_ttl, r->set_tos); | |||||
} | |||||
return (PF_PASS); | return (PF_PASS); | ||||
fragment: | fragment: | ||||
if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) | if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) | ||||
goto shortpkt; | goto shortpkt; | ||||
if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6)) | if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6)) | ||||
goto shortpkt; | goto shortpkt; | ||||
▲ Show 20 Lines • Show All 80 Lines • ▼ Show 20 Lines | if (srs) { | ||||
* if one of rules has matched and it's not a "no scrub" rule */ | * if one of rules has matched and it's not a "no scrub" rule */ | ||||
if (rm == NULL || rm->action == PF_NOSCRUB) | if (rm == NULL || rm->action == PF_NOSCRUB) | ||||
return (PF_PASS); | return (PF_PASS); | ||||
pf_counter_u64_critical_enter(); | pf_counter_u64_critical_enter(); | ||||
pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); | pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); | ||||
pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); | pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); | ||||
pf_counter_u64_critical_exit(); | pf_counter_u64_critical_exit(); | ||||
pf_rule_to_actions(rm, &pd->act); | |||||
} | } | ||||
if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP) | if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP) | ||||
pd->flags |= PFDESC_TCP_NORM; | pd->flags |= PFDESC_TCP_NORM; | ||||
flags = th->th_flags; | flags = th->th_flags; | ||||
if (flags & TH_SYN) { | if (flags & TH_SYN) { | ||||
/* Illegal packet */ | /* Illegal packet */ | ||||
Show All 34 Lines | pf_normalize_tcp(struct pfi_kkif *kif, struct mbuf *m, int ipoff, | ||||
/* Remove urgent pointer, if TH_URG is not set */ | /* Remove urgent pointer, if TH_URG is not set */ | ||||
if (!(flags & TH_URG) && th->th_urp) { | if (!(flags & TH_URG) && th->th_urp) { | ||||
th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp, | th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp, | ||||
0, 0); | 0, 0); | ||||
th->th_urp = 0; | th->th_urp = 0; | ||||
rewrite = 1; | rewrite = 1; | ||||
} | } | ||||
/* Set MSS for old-style scrub rules. | |||||
* The function performs its own copyback. */ | |||||
if (rm != NULL && rm->max_mss) | |||||
pf_normalize_mss(m, off, pd, rm->max_mss); | |||||
/* copy back packet headers if we sanitized */ | /* copy back packet headers if we sanitized */ | ||||
if (rewrite) | if (rewrite) | ||||
m_copyback(m, off, sizeof(*th), (caddr_t)th); | m_copyback(m, off, sizeof(*th), (caddr_t)th); | ||||
return (PF_PASS); | return (PF_PASS); | ||||
tcp_drop: | tcp_drop: | ||||
REASON_SET(&reason, PFRES_NORM); | REASON_SET(&reason, PFRES_NORM); | ||||
▲ Show 20 Lines • Show All 483 Lines • ▼ Show 20 Lines | if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & | ||||
} | } | ||||
} | } | ||||
/* I have a dream.... TCP segment reassembly.... */ | /* I have a dream.... TCP segment reassembly.... */ | ||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd, u_int16_t maxmss) | pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd) | ||||
{ | { | ||||
struct tcphdr *th = &pd->hdr.tcp; | struct tcphdr *th = &pd->hdr.tcp; | ||||
u_int16_t *mss; | u_int16_t *mss; | ||||
int thoff; | int thoff; | ||||
int opt, cnt, optlen = 0; | int opt, cnt, optlen = 0; | ||||
u_char opts[TCP_MAXOLEN]; | u_char opts[TCP_MAXOLEN]; | ||||
u_char *optp = opts; | u_char *optp = opts; | ||||
size_t startoff; | size_t startoff; | ||||
Show All 17 Lines | else { | ||||
break; | break; | ||||
optlen = optp[1]; | optlen = optp[1]; | ||||
if (optlen < 2 || optlen > cnt) | if (optlen < 2 || optlen > cnt) | ||||
break; | break; | ||||
} | } | ||||
switch (opt) { | switch (opt) { | ||||
case TCPOPT_MAXSEG: | case TCPOPT_MAXSEG: | ||||
mss = (u_int16_t *)(optp + 2); | mss = (u_int16_t *)(optp + 2); | ||||
if ((ntohs(*mss)) > maxmss) { | if ((ntohs(*mss)) > pd->act.max_mss) { | ||||
pf_patch_16_unaligned(m, | pf_patch_16_unaligned(m, | ||||
&th->th_sum, | &th->th_sum, | ||||
mss, htons(maxmss), | mss, htons(pd->act.max_mss), | ||||
PF_ALGNMNT(startoff), | PF_ALGNMNT(startoff), | ||||
0); | 0); | ||||
m_copyback(m, off + sizeof(*th), | m_copyback(m, off + sizeof(*th), | ||||
thoff - sizeof(*th), opts); | thoff - sizeof(*th), opts); | ||||
m_copyback(m, off, sizeof(*th), (caddr_t)th); | m_copyback(m, off, sizeof(*th), (caddr_t)th); | ||||
} | } | ||||
break; | break; | ||||
default: | default: | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
u_int16_t | |||||
pf_rule_to_scrub_flags(u_int32_t rule_flags) | |||||
{ | |||||
/* | |||||
* Translate pf_krule->rule_flag to pf_krule->scrub_flags. | |||||
* The pf_scrub_ip functions have been adapted to the new style of pass | |||||
* rules but they might get called if old scrub rules are used. | |||||
*/ | |||||
int scrub_flags = 0; | |||||
if (rule_flags & PFRULE_SET_TOS) { | |||||
scrub_flags |= PFSTATE_SETTOS; | |||||
} | |||||
if (rule_flags & PFRULE_RANDOMID) | |||||
scrub_flags |= PFSTATE_RANDOMID; | |||||
return scrub_flags; | |||||
} | |||||
#ifdef INET | #ifdef INET | ||||
void | void | ||||
pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos) | pf_scrub_ip(struct mbuf **m0, struct pf_pdesc *pd) | ||||
{ | { | ||||
struct mbuf *m = *m0; | struct mbuf *m = *m0; | ||||
struct ip *h = mtod(m, struct ip *); | struct ip *h = mtod(m, struct ip *); | ||||
/* Clear IP_DF if no-df was requested */ | /* Clear IP_DF if no-df was requested */ | ||||
if (flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) { | if (pd->act.flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) { | ||||
u_int16_t ip_off = h->ip_off; | u_int16_t ip_off = h->ip_off; | ||||
h->ip_off &= htons(~IP_DF); | h->ip_off &= htons(~IP_DF); | ||||
h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); | h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); | ||||
} | } | ||||
/* Enforce a minimum ttl, may cause endless packet loops */ | /* Enforce a minimum ttl, may cause endless packet loops */ | ||||
if (min_ttl && h->ip_ttl < min_ttl) { | if (pd->act.min_ttl && h->ip_ttl < pd->act.min_ttl) { | ||||
u_int16_t ip_ttl = h->ip_ttl; | u_int16_t ip_ttl = h->ip_ttl; | ||||
h->ip_ttl = min_ttl; | h->ip_ttl = pd->act.min_ttl; | ||||
h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); | h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); | ||||
} | } | ||||
/* Enforce tos */ | /* Enforce tos */ | ||||
if (flags & PFSTATE_SETTOS) { | if (pd->act.flags & PFSTATE_SETTOS) { | ||||
u_int16_t ov, nv; | u_int16_t ov, nv; | ||||
ov = *(u_int16_t *)h; | ov = *(u_int16_t *)h; | ||||
h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK); | h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK); | ||||
nv = *(u_int16_t *)h; | nv = *(u_int16_t *)h; | ||||
h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); | h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); | ||||
} | } | ||||
/* random-id, but not for fragments */ | /* random-id, but not for fragments */ | ||||
if (flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { | if (pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { | ||||
uint16_t ip_id = h->ip_id; | uint16_t ip_id = h->ip_id; | ||||
ip_fillid(h); | ip_fillid(h); | ||||
h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); | h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); | ||||
} | } | ||||
} | } | ||||
#endif /* INET */ | #endif /* INET */ | ||||
#ifdef INET6 | #ifdef INET6 | ||||
void | void | ||||
pf_scrub_ip6(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos) | pf_scrub_ip6(struct mbuf **m0, struct pf_pdesc *pd) | ||||
{ | { | ||||
struct mbuf *m = *m0; | struct mbuf *m = *m0; | ||||
struct ip6_hdr *h = mtod(m, struct ip6_hdr *); | struct ip6_hdr *h = mtod(m, struct ip6_hdr *); | ||||
/* Enforce a minimum ttl, may cause endless packet loops */ | /* Enforce a minimum ttl, may cause endless packet loops */ | ||||
if (min_ttl && h->ip6_hlim < min_ttl) | if (pd->act.min_ttl && h->ip6_hlim < pd->act.min_ttl) | ||||
h->ip6_hlim = min_ttl; | h->ip6_hlim = pd->act.min_ttl; | ||||
/* Enforce tos. Set traffic class bits */ | /* Enforce tos. Set traffic class bits */ | ||||
if (flags & PFSTATE_SETTOS) { | if (pd->act.flags & PFSTATE_SETTOS) { | ||||
h->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK; | h->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK; | ||||
h->ip6_flow |= htonl((tos | IPV6_ECN(h)) << 20); | h->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h)) << 20); | ||||
} | } | ||||
} | } | ||||
#endif | #endif |