diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c --- a/sys/netpfil/pf/pf.c +++ b/sys/netpfil/pf/pf.c @@ -320,6 +320,8 @@ static void pf_purge_unlinked_rules(void); static int pf_mtag_uminit(void *, int, int); static void pf_mtag_free(struct m_tag *); +static void pf_packet_rework_nat(struct mbuf *, struct pf_pdesc *, + int, struct pf_state_key *); #ifdef INET static void pf_route(struct mbuf **, struct pf_krule *, int, struct ifnet *, struct pf_kstate *, @@ -341,6 +343,16 @@ VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]); +#define PACKET_UNDO_NAT(_m, _pd, _off, _s, _dir) \ + do { \ + struct pf_state_key *nk; \ + if ((_dir) == PF_OUT) \ + nk = (_s)->key[PF_SK_STACK]; \ + else \ + nk = (_s)->key[PF_SK_WIRE]; \ + pf_packet_rework_nat(_m, _pd, _off, nk); \ + } while (0) + #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \ (pd)->pf_mtag->flags & PF_PACKET_LOOPED) @@ -446,6 +458,83 @@ return (0); } +static void +pf_packet_rework_nat(struct mbuf *m, struct pf_pdesc *pd, int off, + struct pf_state_key *nk) +{ + + switch (pd->proto) { + case IPPROTO_TCP: { + struct tcphdr *th = &pd->hdr.tcp; + + if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) + pf_change_ap(m, pd->src, &th->th_sport, pd->ip_sum, + &th->th_sum, &nk->addr[pd->sidx], + nk->port[pd->sidx], 0, pd->af); + if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) + pf_change_ap(m, pd->dst, &th->th_dport, pd->ip_sum, + &th->th_sum, &nk->addr[pd->didx], + nk->port[pd->didx], 0, pd->af); + m_copyback(m, off, sizeof(*th), (caddr_t)th); + break; + } + case IPPROTO_UDP: { + struct udphdr *uh = &pd->hdr.udp; + + if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) + pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum, + &uh->uh_sum, &nk->addr[pd->sidx], + nk->port[pd->sidx], 1, pd->af); + if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) + pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum, + &uh->uh_sum, &nk->addr[pd->didx], + nk->port[pd->didx], 1, pd->af); + m_copyback(m, off, sizeof(*uh), (caddr_t)uh); + break; + } + case IPPROTO_ICMP: { + struct icmp *ih = &pd->hdr.icmp; + + if (nk->port[pd->sidx] != ih->icmp_id) { + pd->hdr.icmp.icmp_cksum = pf_cksum_fixup( + ih->icmp_cksum, ih->icmp_id, + nk->port[pd->sidx], 0); + ih->icmp_id = nk->port[pd->sidx]; + pd->sport = &ih->icmp_id; + + m_copyback(m, off, ICMP_MINLEN, (caddr_t)ih); + } + /* FALLTHROUGH */ + } + default: + if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) { + switch (pd->af) { + case AF_INET: + pf_change_a(&pd->src->v4.s_addr, + pd->ip_sum, nk->addr[pd->sidx].v4.s_addr, + 0); + break; + case AF_INET6: + PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); + break; + } + } + if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) { + switch (pd->af) { + case AF_INET: + pf_change_a(&pd->dst->v4.s_addr, + pd->ip_sum, nk->addr[pd->didx].v4.s_addr, + 0); + break; + case AF_INET6: + PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); + break; + } + } + break; + } +} + static __inline uint32_t pf_hashkey(struct pf_state_key *sk) { @@ -5937,6 +6026,11 @@ error = EMSGSIZE; KMOD_IPSTAT_INC(ips_cantfrag); if (r->rt != PF_DUPTO) { + if (s && pd->nat_rule != NULL) + PACKET_UNDO_NAT(m0, pd, + (ip->ip_hl << 2) + (ip_off & IP_OFFMASK), + s, dir); + icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, ifp->if_mtu); goto done; @@ -6104,9 +6198,14 @@ nd6_output_ifp(ifp, ifp, m0, &dst, NULL); else { in6_ifstat_inc(ifp, ifs6_in_toobig); - if (r->rt != PF_DUPTO) + if (r->rt != PF_DUPTO) { + if (s && pd->nat_rule != NULL) + PACKET_UNDO_NAT(m0, pd, + ((caddr_t)ip6 - m0->m_data) + + sizeof(struct ip6_hdr), s, dir); + icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); - else + } else goto bad; }