diff --git a/sys/netinet/ip_divert.c b/sys/netinet/ip_divert.c index 70d3fbd1f230..c3f9c43b8f70 100644 --- a/sys/netinet/ip_divert.c +++ b/sys/netinet/ip_divert.c @@ -1,844 +1,850 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1988, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_sctp.h" #ifndef INET #error "IPDIVERT requires INET" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #endif #if defined(SCTP) || defined(SCTP_SUPPORT) #include #endif #include /* * Divert sockets */ /* * Allocate enough space to hold a full IP packet */ #define DIVSNDQ (65536 + 100) #define DIVRCVQ (65536 + 100) /* * Divert sockets work in conjunction with ipfw or other packet filters, * see the divert(4) manpage for features. * Packets are selected by the packet filter and tagged with an * MTAG_IPFW_RULE tag carrying the 'divert port' number (as set by * the packet filter) and information on the matching filter rule for * subsequent reinjection. The divert_port is used to put the packet * on the corresponding divert socket, while the rule number is passed * up (at least partially) as the sin_port in the struct sockaddr. * * Packets written to the divert socket carry in sin_addr a * destination address, and in sin_port the number of the filter rule * after which to continue processing. * If the destination address is INADDR_ANY, the packet is treated as * as outgoing and sent to ip_output(); otherwise it is treated as * incoming and sent to ip_input(). * Further, sin_zero carries some information on the interface, * which can be used in the reinject -- see comments in the code. * * On reinjection, processing in ip_input() and ip_output() * will be exactly the same as for the original packet, except that * packet filter processing will start at the rule number after the one * written in the sin_port (ipfw does not allow a rule #0, so sin_port=0 * will apply the entire ruleset to the packet). */ /* Internal variables. */ VNET_DEFINE_STATIC(struct inpcbhead, divcb); VNET_DEFINE_STATIC(struct inpcbinfo, divcbinfo); #define V_divcb VNET(divcb) #define V_divcbinfo VNET(divcbinfo) static u_long div_sendspace = DIVSNDQ; /* XXX sysctl ? */ static u_long div_recvspace = DIVRCVQ; /* XXX sysctl ? */ static eventhandler_tag ip_divert_event_tag; static int div_output_inbound(int fmaily, struct socket *so, struct mbuf *m, struct sockaddr_in *sin); static int div_output_outbound(int family, struct socket *so, struct mbuf *m); /* * Initialize divert connection block queue. */ static void div_zone_change(void *tag) { uma_zone_set_max(V_divcbinfo.ipi_zone, maxsockets); } static int div_inpcb_init(void *mem, int size, int flags) { struct inpcb *inp = mem; INP_LOCK_INIT(inp, "inp", "divinp"); return (0); } static void div_init(void) { /* * XXX We don't use the hash list for divert IP, but it's easier to * allocate one-entry hash lists than it is to check all over the * place for hashbase == NULL. */ in_pcbinfo_init(&V_divcbinfo, "div", &V_divcb, 1, 1, "divcb", div_inpcb_init, IPI_HASHFIELDS_NONE); } static void div_destroy(void *unused __unused) { in_pcbinfo_destroy(&V_divcbinfo); } VNET_SYSUNINIT(divert, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, div_destroy, NULL); /* * IPPROTO_DIVERT is not in the real IP protocol number space; this * function should never be called. Just in case, drop any packets. */ static int div_input(struct mbuf **mp, int *offp, int proto) { struct mbuf *m = *mp; KMOD_IPSTAT_INC(ips_noproto); m_freem(m); return (IPPROTO_DONE); } /* * Divert a packet by passing it up to the divert socket at port 'port'. * * Setup generic address and protocol structures for div_input routine, * then pass them along with mbuf chain. */ static void divert_packet(struct mbuf *m, bool incoming) { struct ip *ip; struct inpcb *inp; struct socket *sa; u_int16_t nport; struct sockaddr_in divsrc; struct m_tag *mtag; NET_EPOCH_ASSERT(); mtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL); if (mtag == NULL) { m_freem(m); return; } /* Assure header */ if (m->m_len < sizeof(struct ip) && (m = m_pullup(m, sizeof(struct ip))) == NULL) return; ip = mtod(m, struct ip *); /* Delayed checksums are currently not compatible with divert. */ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { + m = mb_unmapped_to_ext(m); + if (m == NULL) + return; in_delayed_cksum(m); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } #if defined(SCTP) || defined(SCTP_SUPPORT) if (m->m_pkthdr.csum_flags & CSUM_SCTP) { + m = mb_unmapped_to_ext(m); + if (m == NULL) + return; sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2)); m->m_pkthdr.csum_flags &= ~CSUM_SCTP; } #endif bzero(&divsrc, sizeof(divsrc)); divsrc.sin_len = sizeof(divsrc); divsrc.sin_family = AF_INET; /* record matching rule, in host format */ divsrc.sin_port = ((struct ipfw_rule_ref *)(mtag+1))->rulenum; /* * Record receive interface address, if any. * But only for incoming packets. */ if (incoming) { struct ifaddr *ifa; struct ifnet *ifp; /* Sanity check */ M_ASSERTPKTHDR(m); /* Find IP address for receive interface */ ifp = m->m_pkthdr.rcvif; CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET) continue; divsrc.sin_addr = ((struct sockaddr_in *) ifa->ifa_addr)->sin_addr; break; } } /* * Record the incoming interface name whenever we have one. */ if (m->m_pkthdr.rcvif) { /* * Hide the actual interface name in there in the * sin_zero array. XXX This needs to be moved to a * different sockaddr type for divert, e.g. * sockaddr_div with multiple fields like * sockaddr_dl. Presently we have only 7 bytes * but that will do for now as most interfaces * are 4 or less + 2 or less bytes for unit. * There is probably a faster way of doing this, * possibly taking it from the sockaddr_dl on the iface. * This solves the problem of a P2P link and a LAN interface * having the same address, which can result in the wrong * interface being assigned to the packet when fed back * into the divert socket. Theoretically if the daemon saves * and re-uses the sockaddr_in as suggested in the man pages, * this iface name will come along for the ride. * (see div_output for the other half of this.) */ strlcpy(divsrc.sin_zero, m->m_pkthdr.rcvif->if_xname, sizeof(divsrc.sin_zero)); } /* Put packet on socket queue, if any */ sa = NULL; nport = htons((u_int16_t)(((struct ipfw_rule_ref *)(mtag+1))->info)); CK_LIST_FOREACH(inp, &V_divcb, inp_list) { /* XXX why does only one socket match? */ if (inp->inp_lport == nport) { INP_RLOCK(inp); if (__predict_false(inp->inp_flags2 & INP_FREED)) { INP_RUNLOCK(inp); continue; } sa = inp->inp_socket; SOCKBUF_LOCK(&sa->so_rcv); if (sbappendaddr_locked(&sa->so_rcv, (struct sockaddr *)&divsrc, m, (struct mbuf *)0) == 0) { SOCKBUF_UNLOCK(&sa->so_rcv); sa = NULL; /* force mbuf reclaim below */ } else sorwakeup_locked(sa); INP_RUNLOCK(inp); break; } } if (sa == NULL) { m_freem(m); KMOD_IPSTAT_INC(ips_noproto); KMOD_IPSTAT_DEC(ips_delivered); } } /* * Deliver packet back into the IP processing machinery. * * If no address specified, or address is 0.0.0.0, send to ip_output(); * otherwise, send to ip_input() and mark as having been received on * the interface with that address. */ static int div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin, struct mbuf *control) { struct epoch_tracker et; const struct ip *ip; struct m_tag *mtag; struct ipfw_rule_ref *dt; int error, family; /* * An mbuf may hasn't come from userland, but we pretend * that it has. */ m->m_pkthdr.rcvif = NULL; m->m_nextpkt = NULL; M_SETFIB(m, so->so_fibnum); if (control) m_freem(control); /* XXX */ mtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL); if (mtag == NULL) { /* this should be normal */ mtag = m_tag_alloc(MTAG_IPFW_RULE, 0, sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO); if (mtag == NULL) { m_freem(m); return (ENOBUFS); } m_tag_prepend(m, mtag); } dt = (struct ipfw_rule_ref *)(mtag+1); /* Loopback avoidance and state recovery */ if (sin) { int i; /* set the starting point. We provide a non-zero slot, * but a non_matching chain_id to skip that info and use * the rulenum/rule_id. */ dt->slot = 1; /* dummy, chain_id is invalid */ dt->chain_id = 0; dt->rulenum = sin->sin_port+1; /* host format ? */ dt->rule_id = 0; /* XXX: broken for IPv6 */ /* * Find receive interface with the given name, stuffed * (if it exists) in the sin_zero[] field. * The name is user supplied data so don't trust its size * or that it is zero terminated. */ for (i = 0; i < sizeof(sin->sin_zero) && sin->sin_zero[i]; i++) ; if ( i > 0 && i < sizeof(sin->sin_zero)) m->m_pkthdr.rcvif = ifunit(sin->sin_zero); } ip = mtod(m, struct ip *); switch (ip->ip_v) { case IPVERSION: family = AF_INET; break; #ifdef INET6 case IPV6_VERSION >> 4: family = AF_INET6; break; #endif default: m_freem(m); return (EAFNOSUPPORT); } /* Reinject packet into the system as incoming or outgoing */ NET_EPOCH_ENTER(et); if (!sin || sin->sin_addr.s_addr == 0) { dt->info |= IPFW_IS_DIVERT | IPFW_INFO_OUT; error = div_output_outbound(family, so, m); } else { dt->info |= IPFW_IS_DIVERT | IPFW_INFO_IN; error = div_output_inbound(family, so, m, sin); } NET_EPOCH_EXIT(et); if (error != 0) m_freem(m); return (error); } /* * Sends mbuf @m to the wire via ip[6]_output(). * * Returns 0 on success, @m is consumed. * On failure, returns error code. It is caller responsibility to free @m. */ static int div_output_outbound(int family, struct socket *so, struct mbuf *m) { struct ip *const ip = mtod(m, struct ip *); struct mbuf *options; struct inpcb *inp; int error; inp = sotoinpcb(so); INP_RLOCK(inp); switch (family) { case AF_INET: /* * Don't allow both user specified and setsockopt * options, and don't allow packet length sizes that * will crash. */ if ((((ip->ip_hl << 2) != sizeof(struct ip)) && inp->inp_options != NULL) || ((u_short)ntohs(ip->ip_len) > m->m_pkthdr.len)) { INP_RUNLOCK(inp); return (EINVAL); } break; #ifdef INET6 case AF_INET6: { struct ip6_hdr *const ip6 = mtod(m, struct ip6_hdr *); /* Don't allow packet length sizes that will crash */ if (((u_short)ntohs(ip6->ip6_plen) > m->m_pkthdr.len)) { INP_RUNLOCK(inp); return (EINVAL); } break; } #endif } /* Send packet to output processing */ KMOD_IPSTAT_INC(ips_rawout); /* XXX */ #ifdef MAC mac_inpcb_create_mbuf(inp, m); #endif /* * Get ready to inject the packet into ip_output(). * Just in case socket options were specified on the * divert socket, we duplicate them. This is done * to avoid having to hold the PCB locks over the call * to ip_output(), as doing this results in a number of * lock ordering complexities. * * Note that we set the multicast options argument for * ip_output() to NULL since it should be invariant that * they are not present. */ KASSERT(inp->inp_moptions == NULL, ("multicast options set on a divert socket")); /* * XXXCSJP: It is unclear to me whether or not it makes * sense for divert sockets to have options. However, * for now we will duplicate them with the INP locks * held so we can use them in ip_output() without * requring a reference to the pcb. */ options = NULL; if (inp->inp_options != NULL) { options = m_dup(inp->inp_options, M_NOWAIT); if (options == NULL) { INP_RUNLOCK(inp); return (ENOBUFS); } } INP_RUNLOCK(inp); error = 0; switch (family) { case AF_INET: error = ip_output(m, options, NULL, ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | IP_ALLOWBROADCAST | IP_RAWOUTPUT, NULL, NULL); break; #ifdef INET6 case AF_INET6: error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); break; #endif } if (options != NULL) m_freem(options); return (error); } /* * Schedules mbuf @m for local processing via IPv4/IPv6 netisr queue. * * Returns 0 on success, @m is consumed. * Returns error code on failure. It is caller responsibility to free @m. */ static int div_output_inbound(int family, struct socket *so, struct mbuf *m, struct sockaddr_in *sin) { const struct ip *ip; struct ifaddr *ifa; if (m->m_pkthdr.rcvif == NULL) { /* * No luck with the name, check by IP address. * Clear the port and the ifname to make sure * there are no distractions for ifa_ifwithaddr. */ /* XXX: broken for IPv6 */ bzero(sin->sin_zero, sizeof(sin->sin_zero)); sin->sin_port = 0; ifa = ifa_ifwithaddr((struct sockaddr *) sin); if (ifa == NULL) return (EADDRNOTAVAIL); m->m_pkthdr.rcvif = ifa->ifa_ifp; } #ifdef MAC mac_socket_create_mbuf(so, m); #endif /* Send packet to input processing via netisr */ switch (family) { case AF_INET: ip = mtod(m, struct ip *); /* * Restore M_BCAST flag when destination address is * broadcast. It is expected by ip_tryforward(). */ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) m->m_flags |= M_MCAST; else if (in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) m->m_flags |= M_BCAST; netisr_queue_src(NETISR_IP, (uintptr_t)so, m); break; #ifdef INET6 case AF_INET6: netisr_queue_src(NETISR_IPV6, (uintptr_t)so, m); break; #endif default: return (EINVAL); } return (0); } static int div_attach(struct socket *so, int proto, struct thread *td) { struct inpcb *inp; int error; inp = sotoinpcb(so); KASSERT(inp == NULL, ("div_attach: inp != NULL")); if (td != NULL) { error = priv_check(td, PRIV_NETINET_DIVERT); if (error) return (error); } error = soreserve(so, div_sendspace, div_recvspace); if (error) return error; INP_INFO_WLOCK(&V_divcbinfo); error = in_pcballoc(so, &V_divcbinfo); if (error) { INP_INFO_WUNLOCK(&V_divcbinfo); return error; } inp = (struct inpcb *)so->so_pcb; INP_INFO_WUNLOCK(&V_divcbinfo); inp->inp_ip_p = proto; inp->inp_vflag |= INP_IPV4; inp->inp_flags |= INP_HDRINCL; INP_WUNLOCK(inp); return 0; } static void div_detach(struct socket *so) { struct inpcb *inp; inp = sotoinpcb(so); KASSERT(inp != NULL, ("div_detach: inp == NULL")); INP_INFO_WLOCK(&V_divcbinfo); INP_WLOCK(inp); in_pcbdetach(inp); in_pcbfree(inp); INP_INFO_WUNLOCK(&V_divcbinfo); } static int div_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { struct inpcb *inp; int error; inp = sotoinpcb(so); KASSERT(inp != NULL, ("div_bind: inp == NULL")); /* in_pcbbind assumes that nam is a sockaddr_in * and in_pcbbind requires a valid address. Since divert * sockets don't we need to make sure the address is * filled in properly. * XXX -- divert should not be abusing in_pcbind * and should probably have its own family. */ if (nam->sa_family != AF_INET) return EAFNOSUPPORT; ((struct sockaddr_in *)nam)->sin_addr.s_addr = INADDR_ANY; INP_INFO_WLOCK(&V_divcbinfo); INP_WLOCK(inp); INP_HASH_WLOCK(&V_divcbinfo); error = in_pcbbind(inp, nam, td->td_ucred); INP_HASH_WUNLOCK(&V_divcbinfo); INP_WUNLOCK(inp); INP_INFO_WUNLOCK(&V_divcbinfo); return error; } static int div_shutdown(struct socket *so) { struct inpcb *inp; inp = sotoinpcb(so); KASSERT(inp != NULL, ("div_shutdown: inp == NULL")); INP_WLOCK(inp); socantsendmore(so); INP_WUNLOCK(inp); return 0; } static int div_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { /* Packet must have a header (but that's about it) */ if (m->m_len < sizeof (struct ip) && (m = m_pullup(m, sizeof (struct ip))) == NULL) { KMOD_IPSTAT_INC(ips_toosmall); m_freem(m); return EINVAL; } /* Send packet */ return div_output(so, m, (struct sockaddr_in *)nam, control); } static void div_ctlinput(int cmd, struct sockaddr *sa, void *vip) { struct in_addr faddr; faddr = ((struct sockaddr_in *)sa)->sin_addr; if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) return; if (PRC_IS_REDIRECT(cmd)) return; } static int div_pcblist(SYSCTL_HANDLER_ARGS) { struct xinpgen xig; struct epoch_tracker et; struct inpcb *inp; int error; if (req->newptr != 0) return EPERM; if (req->oldptr == 0) { int n; n = V_divcbinfo.ipi_count; n += imax(n / 8, 10); req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb); return 0; } if ((error = sysctl_wire_old_buffer(req, 0)) != 0) return (error); bzero(&xig, sizeof(xig)); xig.xig_len = sizeof xig; xig.xig_count = V_divcbinfo.ipi_count; xig.xig_gen = V_divcbinfo.ipi_gencnt; xig.xig_sogen = so_gencnt; error = SYSCTL_OUT(req, &xig, sizeof xig); if (error) return error; NET_EPOCH_ENTER(et); for (inp = CK_LIST_FIRST(V_divcbinfo.ipi_listhead); inp != NULL; inp = CK_LIST_NEXT(inp, inp_list)) { INP_RLOCK(inp); if (inp->inp_gencnt <= xig.xig_gen) { struct xinpcb xi; in_pcbtoxinpcb(inp, &xi); INP_RUNLOCK(inp); error = SYSCTL_OUT(req, &xi, sizeof xi); } else INP_RUNLOCK(inp); } NET_EPOCH_EXIT(et); if (!error) { /* * Give the user an updated idea of our state. * If the generation differs from what we told * her before, she knows that something happened * while we were processing this request, and it * might be necessary to retry. */ xig.xig_gen = V_divcbinfo.ipi_gencnt; xig.xig_sogen = so_gencnt; xig.xig_count = V_divcbinfo.ipi_count; error = SYSCTL_OUT(req, &xig, sizeof xig); } return (error); } #ifdef SYSCTL_NODE static SYSCTL_NODE(_net_inet, IPPROTO_DIVERT, divert, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "IPDIVERT"); SYSCTL_PROC(_net_inet_divert, OID_AUTO, pcblist, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, div_pcblist, "S,xinpcb", "List of active divert sockets"); #endif struct pr_usrreqs div_usrreqs = { .pru_attach = div_attach, .pru_bind = div_bind, .pru_control = in_control, .pru_detach = div_detach, .pru_peeraddr = in_getpeeraddr, .pru_send = div_send, .pru_shutdown = div_shutdown, .pru_sockaddr = in_getsockaddr, .pru_sosetlabel = in_pcbsosetlabel }; struct protosw div_protosw = { .pr_type = SOCK_RAW, .pr_protocol = IPPROTO_DIVERT, .pr_flags = PR_ATOMIC|PR_ADDR, .pr_input = div_input, .pr_ctlinput = div_ctlinput, .pr_ctloutput = ip_ctloutput, .pr_init = div_init, .pr_usrreqs = &div_usrreqs }; static int div_modevent(module_t mod, int type, void *unused) { int err = 0; switch (type) { case MOD_LOAD: /* * Protocol will be initialized by pf_proto_register(). * We don't have to register ip_protox because we are not * a true IP protocol that goes over the wire. */ err = pf_proto_register(PF_INET, &div_protosw); if (err != 0) return (err); ip_divert_ptr = divert_packet; ip_divert_event_tag = EVENTHANDLER_REGISTER(maxsockets_change, div_zone_change, NULL, EVENTHANDLER_PRI_ANY); break; case MOD_QUIESCE: /* * IPDIVERT may normally not be unloaded because of the * potential race conditions. Tell kldunload we can't be * unloaded unless the unload is forced. */ err = EPERM; break; case MOD_UNLOAD: /* * Forced unload. * * Module ipdivert can only be unloaded if no sockets are * connected. Maybe this can be changed later to forcefully * disconnect any open sockets. * * XXXRW: Note that there is a slight race here, as a new * socket open request could be spinning on the lock and then * we destroy the lock. */ INP_INFO_WLOCK(&V_divcbinfo); if (V_divcbinfo.ipi_count != 0) { err = EBUSY; INP_INFO_WUNLOCK(&V_divcbinfo); break; } ip_divert_ptr = NULL; err = pf_proto_unregister(PF_INET, IPPROTO_DIVERT, SOCK_RAW); INP_INFO_WUNLOCK(&V_divcbinfo); #ifndef VIMAGE div_destroy(NULL); #endif EVENTHANDLER_DEREGISTER(maxsockets_change, ip_divert_event_tag); break; default: err = EOPNOTSUPP; break; } return err; } static moduledata_t ipdivertmod = { "ipdivert", div_modevent, 0 }; DECLARE_MODULE(ipdivert, ipdivertmod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY); MODULE_DEPEND(ipdivert, ipfw, 3, 3, 3); MODULE_VERSION(ipdivert, 1); diff --git a/sys/netpfil/ipfw/ip_fw_nat.c b/sys/netpfil/ipfw/ip_fw_nat.c index bcda3cff011c..d7b31c29d4ec 100644 --- a/sys/netpfil/ipfw/ip_fw_nat.c +++ b/sys/netpfil/ipfw/ip_fw_nat.c @@ -1,1247 +1,1248 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Paolo Pisati * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXX for in_cksum */ struct cfg_spool { LIST_ENTRY(cfg_spool) _next; /* chain of spool instances */ struct in_addr addr; uint16_t port; }; /* Nat redirect configuration. */ struct cfg_redir { LIST_ENTRY(cfg_redir) _next; /* chain of redir instances */ uint16_t mode; /* type of redirect mode */ uint16_t proto; /* protocol: tcp/udp */ struct in_addr laddr; /* local ip address */ struct in_addr paddr; /* public ip address */ struct in_addr raddr; /* remote ip address */ uint16_t lport; /* local port */ uint16_t pport; /* public port */ uint16_t rport; /* remote port */ uint16_t pport_cnt; /* number of public ports */ uint16_t rport_cnt; /* number of remote ports */ struct alias_link **alink; u_int16_t spool_cnt; /* num of entry in spool chain */ /* chain of spool instances */ LIST_HEAD(spool_chain, cfg_spool) spool_chain; }; /* Nat configuration data struct. */ struct cfg_nat { /* chain of nat instances */ LIST_ENTRY(cfg_nat) _next; int id; /* nat id */ struct in_addr ip; /* nat ip address */ struct libalias *lib; /* libalias instance */ int mode; /* aliasing mode */ int redir_cnt; /* number of entry in spool chain */ /* chain of redir instances */ LIST_HEAD(redir_chain, cfg_redir) redir_chain; char if_name[IF_NAMESIZE]; /* interface name */ u_short alias_port_lo; /* low range for port aliasing */ u_short alias_port_hi; /* high range for port aliasing */ }; static eventhandler_tag ifaddr_event_tag; static void ifaddr_change(void *arg __unused, struct ifnet *ifp) { struct cfg_nat *ptr; struct ifaddr *ifa; struct ip_fw_chain *chain; KASSERT(curvnet == ifp->if_vnet, ("curvnet(%p) differs from iface vnet(%p)", curvnet, ifp->if_vnet)); if (V_ipfw_vnet_ready == 0 || V_ipfw_nat_ready == 0) return; chain = &V_layer3_chain; IPFW_UH_WLOCK(chain); /* Check every nat entry... */ LIST_FOREACH(ptr, &chain->nat, _next) { struct epoch_tracker et; /* ...using nic 'ifp->if_xname' as dynamic alias address. */ if (strncmp(ptr->if_name, ifp->if_xname, IF_NAMESIZE) != 0) continue; NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr == NULL) continue; if (ifa->ifa_addr->sa_family != AF_INET) continue; IPFW_WLOCK(chain); ptr->ip = ((struct sockaddr_in *) (ifa->ifa_addr))->sin_addr; LibAliasSetAddress(ptr->lib, ptr->ip); IPFW_WUNLOCK(chain); } NET_EPOCH_EXIT(et); } IPFW_UH_WUNLOCK(chain); } /* * delete the pointers for nat entry ix, or all of them if ix < 0 */ static void flush_nat_ptrs(struct ip_fw_chain *chain, const int ix) { ipfw_insn_nat *cmd; int i; IPFW_WLOCK_ASSERT(chain); for (i = 0; i < chain->n_rules; i++) { cmd = (ipfw_insn_nat *)ipfw_get_action(chain->map[i]); if (cmd->o.opcode == O_NAT && cmd->nat != NULL && (ix < 0 || cmd->nat->id == ix)) cmd->nat = NULL; } } static void del_redir_spool_cfg(struct cfg_nat *n, struct redir_chain *head) { struct cfg_redir *r, *tmp_r; struct cfg_spool *s, *tmp_s; int i, num; LIST_FOREACH_SAFE(r, head, _next, tmp_r) { num = 1; /* Number of alias_link to delete. */ switch (r->mode) { case NAT44_REDIR_PORT: num = r->pport_cnt; /* FALLTHROUGH */ case NAT44_REDIR_ADDR: case NAT44_REDIR_PROTO: /* Delete all libalias redirect entry. */ for (i = 0; i < num; i++) LibAliasRedirectDelete(n->lib, r->alink[i]); /* Del spool cfg if any. */ LIST_FOREACH_SAFE(s, &r->spool_chain, _next, tmp_s) { LIST_REMOVE(s, _next); free(s, M_IPFW); } free(r->alink, M_IPFW); LIST_REMOVE(r, _next); free(r, M_IPFW); break; default: printf("unknown redirect mode: %u\n", r->mode); /* XXX - panic?!?!? */ break; } } } static int add_redir_spool_cfg(char *buf, struct cfg_nat *ptr) { struct cfg_redir *r; struct cfg_spool *s; struct nat44_cfg_redir *ser_r; struct nat44_cfg_spool *ser_s; int cnt, off, i; for (cnt = 0, off = 0; cnt < ptr->redir_cnt; cnt++) { ser_r = (struct nat44_cfg_redir *)&buf[off]; r = malloc(sizeof(*r), M_IPFW, M_WAITOK | M_ZERO); r->mode = ser_r->mode; r->laddr = ser_r->laddr; r->paddr = ser_r->paddr; r->raddr = ser_r->raddr; r->lport = ser_r->lport; r->pport = ser_r->pport; r->rport = ser_r->rport; r->pport_cnt = ser_r->pport_cnt; r->rport_cnt = ser_r->rport_cnt; r->proto = ser_r->proto; r->spool_cnt = ser_r->spool_cnt; //memcpy(r, ser_r, SOF_REDIR); LIST_INIT(&r->spool_chain); off += sizeof(struct nat44_cfg_redir); r->alink = malloc(sizeof(struct alias_link *) * r->pport_cnt, M_IPFW, M_WAITOK | M_ZERO); switch (r->mode) { case NAT44_REDIR_ADDR: r->alink[0] = LibAliasRedirectAddr(ptr->lib, r->laddr, r->paddr); break; case NAT44_REDIR_PORT: for (i = 0 ; i < r->pport_cnt; i++) { /* If remotePort is all ports, set it to 0. */ u_short remotePortCopy = r->rport + i; if (r->rport_cnt == 1 && r->rport == 0) remotePortCopy = 0; r->alink[i] = LibAliasRedirectPort(ptr->lib, r->laddr, htons(r->lport + i), r->raddr, htons(remotePortCopy), r->paddr, htons(r->pport + i), r->proto); if (r->alink[i] == NULL) { r->alink[0] = NULL; break; } } break; case NAT44_REDIR_PROTO: r->alink[0] = LibAliasRedirectProto(ptr->lib ,r->laddr, r->raddr, r->paddr, r->proto); break; default: printf("unknown redirect mode: %u\n", r->mode); break; } if (r->alink[0] == NULL) { printf("LibAliasRedirect* returned NULL\n"); free(r->alink, M_IPFW); free(r, M_IPFW); return (EINVAL); } /* LSNAT handling. */ for (i = 0; i < r->spool_cnt; i++) { ser_s = (struct nat44_cfg_spool *)&buf[off]; s = malloc(sizeof(*s), M_IPFW, M_WAITOK | M_ZERO); s->addr = ser_s->addr; s->port = ser_s->port; LibAliasAddServer(ptr->lib, r->alink[0], s->addr, htons(s->port)); off += sizeof(struct nat44_cfg_spool); /* Hook spool entry. */ LIST_INSERT_HEAD(&r->spool_chain, s, _next); } /* And finally hook this redir entry. */ LIST_INSERT_HEAD(&ptr->redir_chain, r, _next); } return (0); } static void free_nat_instance(struct cfg_nat *ptr) { del_redir_spool_cfg(ptr, &ptr->redir_chain); LibAliasUninit(ptr->lib); free(ptr, M_IPFW); } /* * ipfw_nat - perform mbuf header translation. * * Note V_layer3_chain has to be locked while calling ipfw_nat() in * 'global' operation mode (t == NULL). * */ static int ipfw_nat(struct ip_fw_args *args, struct cfg_nat *t, struct mbuf *m) { struct mbuf *mcl; struct ip *ip; /* XXX - libalias duct tape */ int ldt, retval, found; struct ip_fw_chain *chain; char *c; ldt = 0; retval = 0; mcl = m_megapullup(m, m->m_pkthdr.len); if (mcl == NULL) { args->m = NULL; return (IP_FW_DENY); } + M_ASSERTMAPPED(mcl); ip = mtod(mcl, struct ip *); /* * XXX - Libalias checksum offload 'duct tape': * * locally generated packets have only pseudo-header checksum * calculated and libalias will break it[1], so mark them for * later fix. Moreover there are cases when libalias modifies * tcp packet data[2], mark them for later fix too. * * [1] libalias was never meant to run in kernel, so it does * not have any knowledge about checksum offloading, and * expects a packet with a full internet checksum. * Unfortunately, packets generated locally will have just the * pseudo header calculated, and when libalias tries to adjust * the checksum it will actually compute a wrong value. * * [2] when libalias modifies tcp's data content, full TCP * checksum has to be recomputed: the problem is that * libalias does not have any idea about checksum offloading. * To work around this, we do not do checksumming in LibAlias, * but only mark the packets in th_x2 field. If we receive a * marked packet, we calculate correct checksum for it * aware of offloading. Why such a terrible hack instead of * recalculating checksum for each packet? * Because the previous checksum was not checked! * Recalculating checksums for EVERY packet will hide ALL * transmission errors. Yes, marked packets still suffer from * this problem. But, sigh, natd(8) has this problem, too. * * TODO: -make libalias mbuf aware (so * it can handle delayed checksum and tso) */ if (mcl->m_pkthdr.rcvif == NULL && mcl->m_pkthdr.csum_flags & CSUM_DELAY_DATA) ldt = 1; c = mtod(mcl, char *); /* Check if this is 'global' instance */ if (t == NULL) { if (args->flags & IPFW_ARGS_IN) { /* Wrong direction, skip processing */ args->m = mcl; return (IP_FW_NAT); } found = 0; chain = &V_layer3_chain; IPFW_RLOCK_ASSERT(chain); /* Check every nat entry... */ LIST_FOREACH(t, &chain->nat, _next) { if ((t->mode & PKT_ALIAS_SKIP_GLOBAL) != 0) continue; retval = LibAliasOutTry(t->lib, c, mcl->m_len + M_TRAILINGSPACE(mcl), 0); if (retval == PKT_ALIAS_OK) { /* Nat instance recognises state */ found = 1; break; } } if (found != 1) { /* No instance found, return ignore */ args->m = mcl; return (IP_FW_NAT); } } else { if (args->flags & IPFW_ARGS_IN) retval = LibAliasIn(t->lib, c, mcl->m_len + M_TRAILINGSPACE(mcl)); else retval = LibAliasOut(t->lib, c, mcl->m_len + M_TRAILINGSPACE(mcl)); } /* * We drop packet when: * 1. libalias returns PKT_ALIAS_ERROR; * 2. For incoming packets: * a) for unresolved fragments; * b) libalias returns PKT_ALIAS_IGNORED and * PKT_ALIAS_DENY_INCOMING flag is set. */ if (retval == PKT_ALIAS_ERROR || ((args->flags & IPFW_ARGS_IN) && (retval == PKT_ALIAS_UNRESOLVED_FRAGMENT || (retval == PKT_ALIAS_IGNORED && (t->mode & PKT_ALIAS_DENY_INCOMING) != 0)))) { /* XXX - should i add some logging? */ m_free(mcl); args->m = NULL; return (IP_FW_DENY); } if (retval == PKT_ALIAS_RESPOND) mcl->m_flags |= M_SKIP_FIREWALL; mcl->m_pkthdr.len = mcl->m_len = ntohs(ip->ip_len); /* * XXX - libalias checksum offload * 'duct tape' (see above) */ if ((ip->ip_off & htons(IP_OFFMASK)) == 0 && ip->ip_p == IPPROTO_TCP) { struct tcphdr *th; th = (struct tcphdr *)(ip + 1); if (th->th_x2) ldt = 1; } if (ldt) { struct tcphdr *th; struct udphdr *uh; uint16_t ip_len, cksum; ip_len = ntohs(ip->ip_len); cksum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(ip->ip_p + ip_len - (ip->ip_hl << 2))); switch (ip->ip_p) { case IPPROTO_TCP: th = (struct tcphdr *)(ip + 1); /* * Maybe it was set in * libalias... */ th->th_x2 = 0; th->th_sum = cksum; mcl->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); break; case IPPROTO_UDP: uh = (struct udphdr *)(ip + 1); uh->uh_sum = cksum; mcl->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); break; } /* No hw checksum offloading: do it ourselves */ if ((mcl->m_pkthdr.csum_flags & CSUM_DELAY_DATA) == 0) { in_delayed_cksum(mcl); mcl->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } } args->m = mcl; return (IP_FW_NAT); } static struct cfg_nat * lookup_nat(struct nat_list *l, int nat_id) { struct cfg_nat *res; LIST_FOREACH(res, l, _next) { if (res->id == nat_id) break; } return res; } static struct cfg_nat * lookup_nat_name(struct nat_list *l, char *name) { struct cfg_nat *res; int id; char *errptr; id = strtol(name, &errptr, 10); if (id == 0 || *errptr != '\0') return (NULL); LIST_FOREACH(res, l, _next) { if (res->id == id) break; } return (res); } /* IP_FW3 configuration routines */ static void nat44_config(struct ip_fw_chain *chain, struct nat44_cfg_nat *ucfg) { struct cfg_nat *ptr, *tcfg; int gencnt; /* * Find/create nat rule. */ IPFW_UH_WLOCK(chain); gencnt = chain->gencnt; ptr = lookup_nat_name(&chain->nat, ucfg->name); if (ptr == NULL) { IPFW_UH_WUNLOCK(chain); /* New rule: allocate and init new instance. */ ptr = malloc(sizeof(struct cfg_nat), M_IPFW, M_WAITOK | M_ZERO); ptr->lib = LibAliasInit(NULL); LIST_INIT(&ptr->redir_chain); } else { /* Entry already present: temporarily unhook it. */ IPFW_WLOCK(chain); LIST_REMOVE(ptr, _next); flush_nat_ptrs(chain, ptr->id); IPFW_WUNLOCK(chain); IPFW_UH_WUNLOCK(chain); } /* * Basic nat (re)configuration. */ ptr->id = strtol(ucfg->name, NULL, 10); /* * XXX - what if this rule doesn't nat any ip and just * redirect? * do we set aliasaddress to 0.0.0.0? */ ptr->ip = ucfg->ip; ptr->redir_cnt = ucfg->redir_cnt; ptr->mode = ucfg->mode; ptr->alias_port_lo = ucfg->alias_port_lo; ptr->alias_port_hi = ucfg->alias_port_hi; strlcpy(ptr->if_name, ucfg->if_name, sizeof(ptr->if_name)); LibAliasSetMode(ptr->lib, ptr->mode, ~0); LibAliasSetAddress(ptr->lib, ptr->ip); LibAliasSetAliasPortRange(ptr->lib, ptr->alias_port_lo, ptr->alias_port_hi); /* * Redir and LSNAT configuration. */ /* Delete old cfgs. */ del_redir_spool_cfg(ptr, &ptr->redir_chain); /* Add new entries. */ add_redir_spool_cfg((char *)(ucfg + 1), ptr); IPFW_UH_WLOCK(chain); /* Extra check to avoid race with another ipfw_nat_cfg() */ tcfg = NULL; if (gencnt != chain->gencnt) tcfg = lookup_nat_name(&chain->nat, ucfg->name); IPFW_WLOCK(chain); if (tcfg != NULL) LIST_REMOVE(tcfg, _next); LIST_INSERT_HEAD(&chain->nat, ptr, _next); IPFW_WUNLOCK(chain); chain->gencnt++; IPFW_UH_WUNLOCK(chain); if (tcfg != NULL) free_nat_instance(ptr); } /* * Creates/configure nat44 instance * Data layout (v0)(current): * Request: [ ipfw_obj_header nat44_cfg_nat .. ] * * Returns 0 on success */ static int nat44_cfg(struct ip_fw_chain *chain, ip_fw3_opheader *op3, struct sockopt_data *sd) { ipfw_obj_header *oh; struct nat44_cfg_nat *ucfg; int id; size_t read; char *errptr; /* Check minimum header size */ if (sd->valsize < (sizeof(*oh) + sizeof(*ucfg))) return (EINVAL); oh = (ipfw_obj_header *)sd->kbuf; /* Basic length checks for TLVs */ if (oh->ntlv.head.length != sizeof(oh->ntlv)) return (EINVAL); ucfg = (struct nat44_cfg_nat *)(oh + 1); /* Check if name is properly terminated and looks like number */ if (strnlen(ucfg->name, sizeof(ucfg->name)) == sizeof(ucfg->name)) return (EINVAL); id = strtol(ucfg->name, &errptr, 10); if (id == 0 || *errptr != '\0') return (EINVAL); read = sizeof(*oh) + sizeof(*ucfg); /* Check number of redirs */ if (sd->valsize < read + ucfg->redir_cnt*sizeof(struct nat44_cfg_redir)) return (EINVAL); nat44_config(chain, ucfg); return (0); } /* * Destroys given nat instances. * Data layout (v0)(current): * Request: [ ipfw_obj_header ] * * Returns 0 on success */ static int nat44_destroy(struct ip_fw_chain *chain, ip_fw3_opheader *op3, struct sockopt_data *sd) { ipfw_obj_header *oh; struct cfg_nat *ptr; ipfw_obj_ntlv *ntlv; /* Check minimum header size */ if (sd->valsize < sizeof(*oh)) return (EINVAL); oh = (ipfw_obj_header *)sd->kbuf; /* Basic length checks for TLVs */ if (oh->ntlv.head.length != sizeof(oh->ntlv)) return (EINVAL); ntlv = &oh->ntlv; /* Check if name is properly terminated */ if (strnlen(ntlv->name, sizeof(ntlv->name)) == sizeof(ntlv->name)) return (EINVAL); IPFW_UH_WLOCK(chain); ptr = lookup_nat_name(&chain->nat, ntlv->name); if (ptr == NULL) { IPFW_UH_WUNLOCK(chain); return (ESRCH); } IPFW_WLOCK(chain); LIST_REMOVE(ptr, _next); flush_nat_ptrs(chain, ptr->id); IPFW_WUNLOCK(chain); IPFW_UH_WUNLOCK(chain); free_nat_instance(ptr); return (0); } static void export_nat_cfg(struct cfg_nat *ptr, struct nat44_cfg_nat *ucfg) { snprintf(ucfg->name, sizeof(ucfg->name), "%d", ptr->id); ucfg->ip = ptr->ip; ucfg->redir_cnt = ptr->redir_cnt; ucfg->mode = ptr->mode; ucfg->alias_port_lo = ptr->alias_port_lo; ucfg->alias_port_hi = ptr->alias_port_hi; strlcpy(ucfg->if_name, ptr->if_name, sizeof(ucfg->if_name)); } /* * Gets config for given nat instance * Data layout (v0)(current): * Request: [ ipfw_obj_header nat44_cfg_nat .. ] * * Returns 0 on success */ static int nat44_get_cfg(struct ip_fw_chain *chain, ip_fw3_opheader *op3, struct sockopt_data *sd) { ipfw_obj_header *oh; struct nat44_cfg_nat *ucfg; struct cfg_nat *ptr; struct cfg_redir *r; struct cfg_spool *s; struct nat44_cfg_redir *ser_r; struct nat44_cfg_spool *ser_s; size_t sz; sz = sizeof(*oh) + sizeof(*ucfg); /* Check minimum header size */ if (sd->valsize < sz) return (EINVAL); oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); /* Basic length checks for TLVs */ if (oh->ntlv.head.length != sizeof(oh->ntlv)) return (EINVAL); ucfg = (struct nat44_cfg_nat *)(oh + 1); /* Check if name is properly terminated */ if (strnlen(ucfg->name, sizeof(ucfg->name)) == sizeof(ucfg->name)) return (EINVAL); IPFW_UH_RLOCK(chain); ptr = lookup_nat_name(&chain->nat, ucfg->name); if (ptr == NULL) { IPFW_UH_RUNLOCK(chain); return (ESRCH); } export_nat_cfg(ptr, ucfg); /* Estimate memory amount */ sz = sizeof(ipfw_obj_header) + sizeof(struct nat44_cfg_nat); LIST_FOREACH(r, &ptr->redir_chain, _next) { sz += sizeof(struct nat44_cfg_redir); LIST_FOREACH(s, &r->spool_chain, _next) sz += sizeof(struct nat44_cfg_spool); } ucfg->size = sz; if (sd->valsize < sz) { /* * Submitted buffer size is not enough. * WE've already filled in @ucfg structure with * relevant info including size, so we * can return. Buffer will be flushed automatically. */ IPFW_UH_RUNLOCK(chain); return (ENOMEM); } /* Size OK, let's copy data */ LIST_FOREACH(r, &ptr->redir_chain, _next) { ser_r = (struct nat44_cfg_redir *)ipfw_get_sopt_space(sd, sizeof(*ser_r)); ser_r->mode = r->mode; ser_r->laddr = r->laddr; ser_r->paddr = r->paddr; ser_r->raddr = r->raddr; ser_r->lport = r->lport; ser_r->pport = r->pport; ser_r->rport = r->rport; ser_r->pport_cnt = r->pport_cnt; ser_r->rport_cnt = r->rport_cnt; ser_r->proto = r->proto; ser_r->spool_cnt = r->spool_cnt; LIST_FOREACH(s, &r->spool_chain, _next) { ser_s = (struct nat44_cfg_spool *)ipfw_get_sopt_space( sd, sizeof(*ser_s)); ser_s->addr = s->addr; ser_s->port = s->port; } } IPFW_UH_RUNLOCK(chain); return (0); } /* * Lists all nat44 instances currently available in kernel. * Data layout (v0)(current): * Request: [ ipfw_obj_lheader ] * Reply: [ ipfw_obj_lheader nat44_cfg_nat x N ] * * Returns 0 on success */ static int nat44_list_nat(struct ip_fw_chain *chain, ip_fw3_opheader *op3, struct sockopt_data *sd) { ipfw_obj_lheader *olh; struct nat44_cfg_nat *ucfg; struct cfg_nat *ptr; int nat_count; /* Check minimum header size */ if (sd->valsize < sizeof(ipfw_obj_lheader)) return (EINVAL); olh = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*olh)); IPFW_UH_RLOCK(chain); nat_count = 0; LIST_FOREACH(ptr, &chain->nat, _next) nat_count++; olh->count = nat_count; olh->objsize = sizeof(struct nat44_cfg_nat); olh->size = sizeof(*olh) + olh->count * olh->objsize; if (sd->valsize < olh->size) { IPFW_UH_RUNLOCK(chain); return (ENOMEM); } LIST_FOREACH(ptr, &chain->nat, _next) { ucfg = (struct nat44_cfg_nat *)ipfw_get_sopt_space(sd, sizeof(*ucfg)); export_nat_cfg(ptr, ucfg); } IPFW_UH_RUNLOCK(chain); return (0); } /* * Gets log for given nat instance * Data layout (v0)(current): * Request: [ ipfw_obj_header nat44_cfg_nat ] * Reply: [ ipfw_obj_header nat44_cfg_nat LOGBUFFER ] * * Returns 0 on success */ static int nat44_get_log(struct ip_fw_chain *chain, ip_fw3_opheader *op3, struct sockopt_data *sd) { ipfw_obj_header *oh; struct nat44_cfg_nat *ucfg; struct cfg_nat *ptr; void *pbuf; size_t sz; sz = sizeof(*oh) + sizeof(*ucfg); /* Check minimum header size */ if (sd->valsize < sz) return (EINVAL); oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); /* Basic length checks for TLVs */ if (oh->ntlv.head.length != sizeof(oh->ntlv)) return (EINVAL); ucfg = (struct nat44_cfg_nat *)(oh + 1); /* Check if name is properly terminated */ if (strnlen(ucfg->name, sizeof(ucfg->name)) == sizeof(ucfg->name)) return (EINVAL); IPFW_UH_RLOCK(chain); ptr = lookup_nat_name(&chain->nat, ucfg->name); if (ptr == NULL) { IPFW_UH_RUNLOCK(chain); return (ESRCH); } if (ptr->lib->logDesc == NULL) { IPFW_UH_RUNLOCK(chain); return (ENOENT); } export_nat_cfg(ptr, ucfg); /* Estimate memory amount */ ucfg->size = sizeof(struct nat44_cfg_nat) + LIBALIAS_BUF_SIZE; if (sd->valsize < sz + sizeof(*oh)) { /* * Submitted buffer size is not enough. * WE've already filled in @ucfg structure with * relevant info including size, so we * can return. Buffer will be flushed automatically. */ IPFW_UH_RUNLOCK(chain); return (ENOMEM); } pbuf = (void *)ipfw_get_sopt_space(sd, LIBALIAS_BUF_SIZE); memcpy(pbuf, ptr->lib->logDesc, LIBALIAS_BUF_SIZE); IPFW_UH_RUNLOCK(chain); return (0); } static struct ipfw_sopt_handler scodes[] = { { IP_FW_NAT44_XCONFIG, 0, HDIR_SET, nat44_cfg }, { IP_FW_NAT44_DESTROY, 0, HDIR_SET, nat44_destroy }, { IP_FW_NAT44_XGETCONFIG, 0, HDIR_GET, nat44_get_cfg }, { IP_FW_NAT44_LIST_NAT, 0, HDIR_GET, nat44_list_nat }, { IP_FW_NAT44_XGETLOG, 0, HDIR_GET, nat44_get_log }, }; /* * Legacy configuration routines */ struct cfg_spool_legacy { LIST_ENTRY(cfg_spool_legacy) _next; struct in_addr addr; u_short port; }; struct cfg_redir_legacy { LIST_ENTRY(cfg_redir) _next; u_int16_t mode; struct in_addr laddr; struct in_addr paddr; struct in_addr raddr; u_short lport; u_short pport; u_short rport; u_short pport_cnt; u_short rport_cnt; int proto; struct alias_link **alink; u_int16_t spool_cnt; LIST_HEAD(, cfg_spool_legacy) spool_chain; }; struct cfg_nat_legacy { LIST_ENTRY(cfg_nat_legacy) _next; int id; struct in_addr ip; char if_name[IF_NAMESIZE]; int mode; struct libalias *lib; int redir_cnt; LIST_HEAD(, cfg_redir_legacy) redir_chain; }; static int ipfw_nat_cfg(struct sockopt *sopt) { struct cfg_nat_legacy *cfg; struct nat44_cfg_nat *ucfg; struct cfg_redir_legacy *rdir; struct nat44_cfg_redir *urdir; char *buf; size_t len, len2; int error, i; len = sopt->sopt_valsize; len2 = len + 128; /* * Allocate 2x buffer to store converted structures. * new redir_cfg has shrunk, so we're sure that * new buffer size is enough. */ buf = malloc(roundup2(len, 8) + len2, M_TEMP, M_WAITOK | M_ZERO); error = sooptcopyin(sopt, buf, len, sizeof(struct cfg_nat_legacy)); if (error != 0) goto out; cfg = (struct cfg_nat_legacy *)buf; if (cfg->id < 0) { error = EINVAL; goto out; } ucfg = (struct nat44_cfg_nat *)&buf[roundup2(len, 8)]; snprintf(ucfg->name, sizeof(ucfg->name), "%d", cfg->id); strlcpy(ucfg->if_name, cfg->if_name, sizeof(ucfg->if_name)); ucfg->ip = cfg->ip; ucfg->mode = cfg->mode; ucfg->redir_cnt = cfg->redir_cnt; if (len < sizeof(*cfg) + cfg->redir_cnt * sizeof(*rdir)) { error = EINVAL; goto out; } urdir = (struct nat44_cfg_redir *)(ucfg + 1); rdir = (struct cfg_redir_legacy *)(cfg + 1); for (i = 0; i < cfg->redir_cnt; i++) { urdir->mode = rdir->mode; urdir->laddr = rdir->laddr; urdir->paddr = rdir->paddr; urdir->raddr = rdir->raddr; urdir->lport = rdir->lport; urdir->pport = rdir->pport; urdir->rport = rdir->rport; urdir->pport_cnt = rdir->pport_cnt; urdir->rport_cnt = rdir->rport_cnt; urdir->proto = rdir->proto; urdir->spool_cnt = rdir->spool_cnt; urdir++; rdir++; } nat44_config(&V_layer3_chain, ucfg); out: free(buf, M_TEMP); return (error); } static int ipfw_nat_del(struct sockopt *sopt) { struct cfg_nat *ptr; struct ip_fw_chain *chain = &V_layer3_chain; int i; sooptcopyin(sopt, &i, sizeof i, sizeof i); /* XXX validate i */ IPFW_UH_WLOCK(chain); ptr = lookup_nat(&chain->nat, i); if (ptr == NULL) { IPFW_UH_WUNLOCK(chain); return (EINVAL); } IPFW_WLOCK(chain); LIST_REMOVE(ptr, _next); flush_nat_ptrs(chain, i); IPFW_WUNLOCK(chain); IPFW_UH_WUNLOCK(chain); free_nat_instance(ptr); return (0); } static int ipfw_nat_get_cfg(struct sockopt *sopt) { struct ip_fw_chain *chain = &V_layer3_chain; struct cfg_nat *n; struct cfg_nat_legacy *ucfg; struct cfg_redir *r; struct cfg_spool *s; struct cfg_redir_legacy *ser_r; struct cfg_spool_legacy *ser_s; char *data; int gencnt, nat_cnt, len, error; nat_cnt = 0; len = sizeof(nat_cnt); IPFW_UH_RLOCK(chain); retry: gencnt = chain->gencnt; /* Estimate memory amount */ LIST_FOREACH(n, &chain->nat, _next) { nat_cnt++; len += sizeof(struct cfg_nat_legacy); LIST_FOREACH(r, &n->redir_chain, _next) { len += sizeof(struct cfg_redir_legacy); LIST_FOREACH(s, &r->spool_chain, _next) len += sizeof(struct cfg_spool_legacy); } } IPFW_UH_RUNLOCK(chain); data = malloc(len, M_TEMP, M_WAITOK | M_ZERO); bcopy(&nat_cnt, data, sizeof(nat_cnt)); nat_cnt = 0; len = sizeof(nat_cnt); IPFW_UH_RLOCK(chain); if (gencnt != chain->gencnt) { free(data, M_TEMP); goto retry; } /* Serialize all the data. */ LIST_FOREACH(n, &chain->nat, _next) { ucfg = (struct cfg_nat_legacy *)&data[len]; ucfg->id = n->id; ucfg->ip = n->ip; ucfg->redir_cnt = n->redir_cnt; ucfg->mode = n->mode; strlcpy(ucfg->if_name, n->if_name, sizeof(ucfg->if_name)); len += sizeof(struct cfg_nat_legacy); LIST_FOREACH(r, &n->redir_chain, _next) { ser_r = (struct cfg_redir_legacy *)&data[len]; ser_r->mode = r->mode; ser_r->laddr = r->laddr; ser_r->paddr = r->paddr; ser_r->raddr = r->raddr; ser_r->lport = r->lport; ser_r->pport = r->pport; ser_r->rport = r->rport; ser_r->pport_cnt = r->pport_cnt; ser_r->rport_cnt = r->rport_cnt; ser_r->proto = r->proto; ser_r->spool_cnt = r->spool_cnt; len += sizeof(struct cfg_redir_legacy); LIST_FOREACH(s, &r->spool_chain, _next) { ser_s = (struct cfg_spool_legacy *)&data[len]; ser_s->addr = s->addr; ser_s->port = s->port; len += sizeof(struct cfg_spool_legacy); } } } IPFW_UH_RUNLOCK(chain); error = sooptcopyout(sopt, data, len); free(data, M_TEMP); return (error); } static int ipfw_nat_get_log(struct sockopt *sopt) { uint8_t *data; struct cfg_nat *ptr; int i, size; struct ip_fw_chain *chain; IPFW_RLOCK_TRACKER; chain = &V_layer3_chain; IPFW_RLOCK(chain); /* one pass to count, one to copy the data */ i = 0; LIST_FOREACH(ptr, &chain->nat, _next) { if (ptr->lib->logDesc == NULL) continue; i++; } size = i * (LIBALIAS_BUF_SIZE + sizeof(int)); data = malloc(size, M_IPFW, M_NOWAIT | M_ZERO); if (data == NULL) { IPFW_RUNLOCK(chain); return (ENOSPC); } i = 0; LIST_FOREACH(ptr, &chain->nat, _next) { if (ptr->lib->logDesc == NULL) continue; bcopy(&ptr->id, &data[i], sizeof(int)); i += sizeof(int); bcopy(ptr->lib->logDesc, &data[i], LIBALIAS_BUF_SIZE); i += LIBALIAS_BUF_SIZE; } IPFW_RUNLOCK(chain); sooptcopyout(sopt, data, size); free(data, M_IPFW); return(0); } static int vnet_ipfw_nat_init(const void *arg __unused) { V_ipfw_nat_ready = 1; return (0); } static int vnet_ipfw_nat_uninit(const void *arg __unused) { struct cfg_nat *ptr, *ptr_temp; struct ip_fw_chain *chain; chain = &V_layer3_chain; IPFW_WLOCK(chain); V_ipfw_nat_ready = 0; LIST_FOREACH_SAFE(ptr, &chain->nat, _next, ptr_temp) { LIST_REMOVE(ptr, _next); free_nat_instance(ptr); } flush_nat_ptrs(chain, -1 /* flush all */); IPFW_WUNLOCK(chain); return (0); } static void ipfw_nat_init(void) { /* init ipfw hooks */ ipfw_nat_ptr = ipfw_nat; lookup_nat_ptr = lookup_nat; ipfw_nat_cfg_ptr = ipfw_nat_cfg; ipfw_nat_del_ptr = ipfw_nat_del; ipfw_nat_get_cfg_ptr = ipfw_nat_get_cfg; ipfw_nat_get_log_ptr = ipfw_nat_get_log; IPFW_ADD_SOPT_HANDLER(1, scodes); ifaddr_event_tag = EVENTHANDLER_REGISTER(ifaddr_event, ifaddr_change, NULL, EVENTHANDLER_PRI_ANY); } static void ipfw_nat_destroy(void) { EVENTHANDLER_DEREGISTER(ifaddr_event, ifaddr_event_tag); /* deregister ipfw_nat */ IPFW_DEL_SOPT_HANDLER(1, scodes); ipfw_nat_ptr = NULL; lookup_nat_ptr = NULL; ipfw_nat_cfg_ptr = NULL; ipfw_nat_del_ptr = NULL; ipfw_nat_get_cfg_ptr = NULL; ipfw_nat_get_log_ptr = NULL; } static int ipfw_nat_modevent(module_t mod, int type, void *unused) { int err = 0; switch (type) { case MOD_LOAD: break; case MOD_UNLOAD: break; default: return EOPNOTSUPP; break; } return err; } static moduledata_t ipfw_nat_mod = { "ipfw_nat", ipfw_nat_modevent, 0 }; /* Define startup order. */ #define IPFW_NAT_SI_SUB_FIREWALL SI_SUB_PROTO_FIREWALL #define IPFW_NAT_MODEVENT_ORDER (SI_ORDER_ANY - 128) /* after ipfw */ #define IPFW_NAT_MODULE_ORDER (IPFW_NAT_MODEVENT_ORDER + 1) #define IPFW_NAT_VNET_ORDER (IPFW_NAT_MODEVENT_ORDER + 2) DECLARE_MODULE(ipfw_nat, ipfw_nat_mod, IPFW_NAT_SI_SUB_FIREWALL, SI_ORDER_ANY); MODULE_DEPEND(ipfw_nat, libalias, 1, 1, 1); MODULE_DEPEND(ipfw_nat, ipfw, 3, 3, 3); MODULE_VERSION(ipfw_nat, 1); SYSINIT(ipfw_nat_init, IPFW_NAT_SI_SUB_FIREWALL, IPFW_NAT_MODULE_ORDER, ipfw_nat_init, NULL); VNET_SYSINIT(vnet_ipfw_nat_init, IPFW_NAT_SI_SUB_FIREWALL, IPFW_NAT_VNET_ORDER, vnet_ipfw_nat_init, NULL); SYSUNINIT(ipfw_nat_destroy, IPFW_NAT_SI_SUB_FIREWALL, IPFW_NAT_MODULE_ORDER, ipfw_nat_destroy, NULL); VNET_SYSUNINIT(vnet_ipfw_nat_uninit, IPFW_NAT_SI_SUB_FIREWALL, IPFW_NAT_VNET_ORDER, vnet_ipfw_nat_uninit, NULL); /* end of file */ diff --git a/sys/netpfil/ipfw/nat64/nat64_translate.c b/sys/netpfil/ipfw/nat64/nat64_translate.c index 4ed3bfa765f6..29666a7d3a9a 100644 --- a/sys/netpfil/ipfw/nat64/nat64_translate.c +++ b/sys/netpfil/ipfw/nat64/nat64_translate.c @@ -1,1726 +1,1736 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015-2019 Yandex LLC * Copyright (c) 2015-2019 Andrey V. Elsukov * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ipstealth.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ip_fw_nat64.h" #include "nat64_translate.h" typedef int (*nat64_output_t)(struct ifnet *, struct mbuf *, struct sockaddr *, struct nat64_counters *, void *); typedef int (*nat64_output_one_t)(struct mbuf *, struct nat64_counters *, void *); static struct nhop_object *nat64_find_route4(struct sockaddr_in *, struct mbuf *); static struct nhop_object *nat64_find_route6(struct sockaddr_in6 *, struct mbuf *); static int nat64_output_one(struct mbuf *, struct nat64_counters *, void *); static int nat64_output(struct ifnet *, struct mbuf *, struct sockaddr *, struct nat64_counters *, void *); static int nat64_direct_output_one(struct mbuf *, struct nat64_counters *, void *); static int nat64_direct_output(struct ifnet *, struct mbuf *, struct sockaddr *, struct nat64_counters *, void *); struct nat64_methods { nat64_output_t output; nat64_output_one_t output_one; }; static const struct nat64_methods nat64_netisr = { .output = nat64_output, .output_one = nat64_output_one }; static const struct nat64_methods nat64_direct = { .output = nat64_direct_output, .output_one = nat64_direct_output_one }; /* These variables should be initialized explicitly on module loading */ VNET_DEFINE_STATIC(const struct nat64_methods *, nat64out); VNET_DEFINE_STATIC(const int *, nat64ipstealth); VNET_DEFINE_STATIC(const int *, nat64ip6stealth); #define V_nat64out VNET(nat64out) #define V_nat64ipstealth VNET(nat64ipstealth) #define V_nat64ip6stealth VNET(nat64ip6stealth) static const int stealth_on = 1; #ifndef IPSTEALTH static const int stealth_off = 0; #endif void nat64_set_output_method(int direct) { if (direct != 0) { V_nat64out = &nat64_direct; #ifdef IPSTEALTH /* Honor corresponding variables, if IPSTEALTH is defined */ V_nat64ipstealth = &V_ipstealth; V_nat64ip6stealth = &V_ip6stealth; #else /* otherwise we need to decrement HLIM/TTL for direct case */ V_nat64ipstealth = V_nat64ip6stealth = &stealth_off; #endif } else { V_nat64out = &nat64_netisr; /* Leave TTL/HLIM decrementing to forwarding code */ V_nat64ipstealth = V_nat64ip6stealth = &stealth_on; } } int nat64_get_output_method(void) { return (V_nat64out == &nat64_direct ? 1: 0); } static void nat64_log(struct pfloghdr *logdata, struct mbuf *m, sa_family_t family) { logdata->dir = PF_OUT; logdata->af = family; ipfw_bpf_mtap2(logdata, PFLOG_HDRLEN, m); } static int nat64_direct_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct nat64_counters *stats, void *logdata) { int error; if (logdata != NULL) nat64_log(logdata, m, dst->sa_family); error = (*ifp->if_output)(ifp, m, dst, NULL); if (error != 0) NAT64STAT_INC(stats, oerrors); return (error); } static int nat64_direct_output_one(struct mbuf *m, struct nat64_counters *stats, void *logdata) { struct nhop_object *nh4 = NULL; struct nhop_object *nh6 = NULL; struct sockaddr_in6 dst6; struct sockaddr_in dst4; struct sockaddr *dst; struct ip6_hdr *ip6; struct ip *ip4; struct ifnet *ifp; int error; ip4 = mtod(m, struct ip *); error = 0; switch (ip4->ip_v) { case IPVERSION: dst4.sin_addr = ip4->ip_dst; nh4 = nat64_find_route4(&dst4, m); if (nh4 == NULL) { NAT64STAT_INC(stats, noroute4); error = EHOSTUNREACH; } else { ifp = nh4->nh_ifp; dst = (struct sockaddr *)&dst4; } break; case (IPV6_VERSION >> 4): ip6 = mtod(m, struct ip6_hdr *); dst6.sin6_addr = ip6->ip6_dst; nh6 = nat64_find_route6(&dst6, m); if (nh6 == NULL) { NAT64STAT_INC(stats, noroute6); error = EHOSTUNREACH; } else { ifp = nh6->nh_ifp; dst = (struct sockaddr *)&dst6; } break; default: m_freem(m); NAT64STAT_INC(stats, dropped); DPRINTF(DP_DROPS, "dropped due to unknown IP version"); return (EAFNOSUPPORT); } if (error != 0) { m_freem(m); return (EHOSTUNREACH); } if (logdata != NULL) nat64_log(logdata, m, dst->sa_family); error = (*ifp->if_output)(ifp, m, dst, NULL); if (error != 0) NAT64STAT_INC(stats, oerrors); return (error); } static int nat64_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct nat64_counters *stats, void *logdata) { struct ip *ip4; int ret, af; ip4 = mtod(m, struct ip *); switch (ip4->ip_v) { case IPVERSION: af = AF_INET; ret = NETISR_IP; break; case (IPV6_VERSION >> 4): af = AF_INET6; ret = NETISR_IPV6; break; default: m_freem(m); NAT64STAT_INC(stats, dropped); DPRINTF(DP_DROPS, "unknown IP version"); return (EAFNOSUPPORT); } if (logdata != NULL) nat64_log(logdata, m, af); if (m->m_pkthdr.rcvif == NULL) m->m_pkthdr.rcvif = V_loif; ret = netisr_queue(ret, m); if (ret != 0) NAT64STAT_INC(stats, oerrors); return (ret); } static int nat64_output_one(struct mbuf *m, struct nat64_counters *stats, void *logdata) { return (nat64_output(NULL, m, NULL, stats, logdata)); } /* * Check the given IPv6 prefix and length according to RFC6052: * The prefixes can only have one of the following lengths: * 32, 40, 48, 56, 64, or 96 (The Well-Known Prefix is 96 bits long). * Returns zero on success, otherwise EINVAL. */ int nat64_check_prefixlen(int length) { switch (length) { case 32: case 40: case 48: case 56: case 64: case 96: return (0); } return (EINVAL); } int nat64_check_prefix6(const struct in6_addr *prefix, int length) { if (nat64_check_prefixlen(length) != 0) return (EINVAL); /* Well-known prefix has 96 prefix length */ if (IN6_IS_ADDR_WKPFX(prefix) && length != 96) return (EINVAL); /* Bits 64 to 71 must be set to zero */ if (prefix->__u6_addr.__u6_addr8[8] != 0) return (EINVAL); /* Some extra checks */ if (IN6_IS_ADDR_MULTICAST(prefix) || IN6_IS_ADDR_UNSPECIFIED(prefix) || IN6_IS_ADDR_LOOPBACK(prefix)) return (EINVAL); return (0); } int nat64_check_private_ip4(const struct nat64_config *cfg, in_addr_t ia) { if (cfg->flags & NAT64_ALLOW_PRIVATE) return (0); /* WKPFX must not be used to represent non-global IPv4 addresses */ if (cfg->flags & NAT64_WKPFX) { /* IN_PRIVATE */ if ((ia & htonl(0xff000000)) == htonl(0x0a000000) || (ia & htonl(0xfff00000)) == htonl(0xac100000) || (ia & htonl(0xffff0000)) == htonl(0xc0a80000)) return (1); /* * RFC 5735: * 192.0.0.0/24 - reserved for IETF protocol assignments * 192.88.99.0/24 - for use as 6to4 relay anycast addresses * 198.18.0.0/15 - for use in benchmark tests * 192.0.2.0/24, 198.51.100.0/24, 203.0.113.0/24 - for use * in documentation and example code */ if ((ia & htonl(0xffffff00)) == htonl(0xc0000000) || (ia & htonl(0xffffff00)) == htonl(0xc0586300) || (ia & htonl(0xfffffe00)) == htonl(0xc6120000) || (ia & htonl(0xffffff00)) == htonl(0xc0000200) || (ia & htonl(0xfffffe00)) == htonl(0xc6336400) || (ia & htonl(0xffffff00)) == htonl(0xcb007100)) return (1); } return (0); } /* * Embed @ia IPv4 address into @ip6 IPv6 address. * Place to embedding determined from prefix length @plen. */ void nat64_embed_ip4(struct in6_addr *ip6, int plen, in_addr_t ia) { switch (plen) { case 32: case 96: ip6->s6_addr32[plen / 32] = ia; break; case 40: case 48: case 56: /* * Preserve prefix bits. * Since suffix bits should be zero and reserved for future * use, we just overwrite the whole word, where they are. */ ip6->s6_addr32[1] &= 0xffffffff << (32 - plen % 32); #if BYTE_ORDER == BIG_ENDIAN ip6->s6_addr32[1] |= ia >> (plen % 32); ip6->s6_addr32[2] = ia << (24 - plen % 32); #elif BYTE_ORDER == LITTLE_ENDIAN ip6->s6_addr32[1] |= ia << (plen % 32); ip6->s6_addr32[2] = ia >> (24 - plen % 32); #endif break; case 64: #if BYTE_ORDER == BIG_ENDIAN ip6->s6_addr32[2] = ia >> 8; ip6->s6_addr32[3] = ia << 24; #elif BYTE_ORDER == LITTLE_ENDIAN ip6->s6_addr32[2] = ia << 8; ip6->s6_addr32[3] = ia >> 24; #endif break; default: panic("Wrong plen: %d", plen); }; /* * Bits 64 to 71 of the address are reserved for compatibility * with the host identifier format defined in the IPv6 addressing * architecture [RFC4291]. These bits MUST be set to zero. */ ip6->s6_addr8[8] = 0; } in_addr_t nat64_extract_ip4(const struct in6_addr *ip6, int plen) { in_addr_t ia; /* * According to RFC 6052 p2.2: * IPv4-embedded IPv6 addresses are composed of a variable-length * prefix, the embedded IPv4 address, and a variable length suffix. * The suffix bits are reserved for future extensions and SHOULD * be set to zero. */ switch (plen) { case 32: if (ip6->s6_addr32[3] != 0 || ip6->s6_addr32[2] != 0) goto badip6; break; case 40: if (ip6->s6_addr32[3] != 0 || (ip6->s6_addr32[2] & htonl(0xff00ffff)) != 0) goto badip6; break; case 48: if (ip6->s6_addr32[3] != 0 || (ip6->s6_addr32[2] & htonl(0xff0000ff)) != 0) goto badip6; break; case 56: if (ip6->s6_addr32[3] != 0 || ip6->s6_addr8[8] != 0) goto badip6; break; case 64: if (ip6->s6_addr8[8] != 0 || (ip6->s6_addr32[3] & htonl(0x00ffffff)) != 0) goto badip6; }; switch (plen) { case 32: case 96: ia = ip6->s6_addr32[plen / 32]; break; case 40: case 48: case 56: #if BYTE_ORDER == BIG_ENDIAN ia = (ip6->s6_addr32[1] << (plen % 32)) | (ip6->s6_addr32[2] >> (24 - plen % 32)); #elif BYTE_ORDER == LITTLE_ENDIAN ia = (ip6->s6_addr32[1] >> (plen % 32)) | (ip6->s6_addr32[2] << (24 - plen % 32)); #endif break; case 64: #if BYTE_ORDER == BIG_ENDIAN ia = (ip6->s6_addr32[2] << 8) | (ip6->s6_addr32[3] >> 24); #elif BYTE_ORDER == LITTLE_ENDIAN ia = (ip6->s6_addr32[2] >> 8) | (ip6->s6_addr32[3] << 24); #endif break; default: return (0); }; if (nat64_check_ip4(ia) == 0) return (ia); DPRINTF(DP_GENERIC | DP_DROPS, "invalid destination address: %08x", ia); return (0); badip6: DPRINTF(DP_GENERIC | DP_DROPS, "invalid IPv4-embedded IPv6 address"); return (0); } /* * According to RFC 1624 the equation for incremental checksum update is: * HC' = ~(~HC + ~m + m') -- [Eqn. 3] * HC' = HC - ~m - m' -- [Eqn. 4] * So, when we are replacing IPv4 addresses to IPv6, we * can assume, that new bytes previously were zeros, and vise versa - * when we replacing IPv6 addresses to IPv4, now unused bytes become * zeros. The payload length in pseudo header has bigger size, but one * half of it should be zero. Using the equation 4 we get: * HC' = HC - (~m0 + m0') -- m0 is first changed word * HC' = (HC - (~m0 + m0')) - (~m1 + m1') -- m1 is second changed word * HC' = HC - ~m0 - m0' - ~m1 - m1' - ... = * = HC - sum(~m[i] + m'[i]) * * The function result should be used as follows: * IPv6 to IPv4: HC' = cksum_add(HC, result) * IPv4 to IPv6: HC' = cksum_add(HC, ~result) */ static uint16_t nat64_cksum_convert(struct ip6_hdr *ip6, struct ip *ip) { uint32_t sum; uint16_t *p; sum = ~ip->ip_src.s_addr >> 16; sum += ~ip->ip_src.s_addr & 0xffff; sum += ~ip->ip_dst.s_addr >> 16; sum += ~ip->ip_dst.s_addr & 0xffff; for (p = (uint16_t *)&ip6->ip6_src; p < (uint16_t *)(&ip6->ip6_src + 2); p++) sum += *p; while (sum >> 16) sum = (sum & 0xffff) + (sum >> 16); return (sum); } static void nat64_init_ip4hdr(const struct ip6_hdr *ip6, const struct ip6_frag *frag, uint16_t plen, uint8_t proto, struct ip *ip) { /* assume addresses are already initialized */ ip->ip_v = IPVERSION; ip->ip_hl = sizeof(*ip) >> 2; ip->ip_tos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; ip->ip_len = htons(sizeof(*ip) + plen); ip->ip_ttl = ip6->ip6_hlim; if (*V_nat64ip6stealth == 0) ip->ip_ttl -= IPV6_HLIMDEC; ip->ip_sum = 0; ip->ip_p = (proto == IPPROTO_ICMPV6) ? IPPROTO_ICMP: proto; ip_fillid(ip); if (frag != NULL) { ip->ip_off = htons(ntohs(frag->ip6f_offlg) >> 3); if (frag->ip6f_offlg & IP6F_MORE_FRAG) ip->ip_off |= htons(IP_MF); } else { ip->ip_off = htons(IP_DF); } ip->ip_sum = in_cksum_hdr(ip); } #define FRAGSZ(mtu) ((mtu) - sizeof(struct ip6_hdr) - sizeof(struct ip6_frag)) static NAT64NOINLINE int nat64_fragment6(struct nat64_counters *stats, struct ip6_hdr *ip6, struct mbufq *mq, struct mbuf *m, uint32_t mtu, uint16_t ip_id, uint16_t ip_off) { struct ip6_frag ip6f; struct mbuf *n; uint16_t hlen, len, offset; int plen; plen = ntohs(ip6->ip6_plen); hlen = sizeof(struct ip6_hdr); /* Fragmentation isn't needed */ if (ip_off == 0 && plen <= mtu - hlen) { M_PREPEND(m, hlen, M_NOWAIT); if (m == NULL) { NAT64STAT_INC(stats, nomem); return (ENOMEM); } bcopy(ip6, mtod(m, void *), hlen); if (mbufq_enqueue(mq, m) != 0) { m_freem(m); NAT64STAT_INC(stats, dropped); DPRINTF(DP_DROPS, "dropped due to mbufq overflow"); return (ENOBUFS); } return (0); } hlen += sizeof(struct ip6_frag); ip6f.ip6f_reserved = 0; ip6f.ip6f_nxt = ip6->ip6_nxt; ip6->ip6_nxt = IPPROTO_FRAGMENT; if (ip_off != 0) { /* * We have got an IPv4 fragment. * Use offset value and ip_id from original fragment. */ ip6f.ip6f_ident = htonl(ntohs(ip_id)); offset = (ntohs(ip_off) & IP_OFFMASK) << 3; NAT64STAT_INC(stats, ifrags); } else { /* The packet size exceeds interface MTU */ ip6f.ip6f_ident = htonl(ip6_randomid()); offset = 0; /* First fragment*/ } while (plen > 0 && m != NULL) { n = NULL; len = FRAGSZ(mtu) & ~7; if (len > plen) len = plen; ip6->ip6_plen = htons(len + sizeof(ip6f)); ip6f.ip6f_offlg = ntohs(offset); if (len < plen || (ip_off & htons(IP_MF)) != 0) ip6f.ip6f_offlg |= IP6F_MORE_FRAG; offset += len; plen -= len; if (plen > 0) { n = m_split(m, len, M_NOWAIT); if (n == NULL) goto fail; } M_PREPEND(m, hlen, M_NOWAIT); if (m == NULL) goto fail; bcopy(ip6, mtod(m, void *), sizeof(struct ip6_hdr)); bcopy(&ip6f, mtodo(m, sizeof(struct ip6_hdr)), sizeof(struct ip6_frag)); if (mbufq_enqueue(mq, m) != 0) goto fail; m = n; } NAT64STAT_ADD(stats, ofrags, mbufq_len(mq)); return (0); fail: if (m != NULL) m_freem(m); if (n != NULL) m_freem(n); mbufq_drain(mq); NAT64STAT_INC(stats, nomem); return (ENOMEM); } static struct nhop_object * nat64_find_route6(struct sockaddr_in6 *dst, struct mbuf *m) { struct nhop_object *nh; NET_EPOCH_ASSERT(); nh = fib6_lookup(M_GETFIB(m), &dst->sin6_addr, 0, 0, 0); if (nh == NULL) return NULL; if (nh->nh_flags & (NHF_BLACKHOLE | NHF_REJECT)) return NULL; /* * XXX: we need to use destination address with embedded scope * zone id, because LLTABLE uses such form of addresses for lookup. */ dst->sin6_family = AF_INET6; dst->sin6_len = sizeof(*dst); dst->sin6_addr = ifatoia6(nh->nh_ifa)->ia_addr.sin6_addr; if (IN6_IS_SCOPE_LINKLOCAL(&dst->sin6_addr)) dst->sin6_addr.s6_addr16[1] = htons(nh->nh_ifp->if_index & 0xffff); dst->sin6_port = 0; dst->sin6_scope_id = 0; dst->sin6_flowinfo = 0; return nh; } #define NAT64_ICMP6_PLEN 64 static NAT64NOINLINE void nat64_icmp6_reflect(struct mbuf *m, uint8_t type, uint8_t code, uint32_t mtu, struct nat64_counters *stats, void *logdata) { struct icmp6_hdr *icmp6; struct ip6_hdr *ip6, *oip6; struct mbuf *n; int len, plen, proto; len = 0; proto = nat64_getlasthdr(m, &len); if (proto < 0) { DPRINTF(DP_DROPS, "mbuf isn't contigious"); goto freeit; } /* * Do not send ICMPv6 in reply to ICMPv6 errors. */ if (proto == IPPROTO_ICMPV6) { if (m->m_len < len + sizeof(*icmp6)) { DPRINTF(DP_DROPS, "mbuf isn't contigious"); goto freeit; } icmp6 = mtodo(m, len); if (icmp6->icmp6_type < ICMP6_ECHO_REQUEST || icmp6->icmp6_type == ND_REDIRECT) { DPRINTF(DP_DROPS, "do not send ICMPv6 in reply to " "ICMPv6 errors"); goto freeit; } /* * If there are extra headers between IPv6 and ICMPv6, * strip off them. */ if (len > sizeof(struct ip6_hdr)) { /* * NOTE: ipfw_chk already did m_pullup() and it is * expected that data is contigious from the start * of IPv6 header up to the end of ICMPv6 header. */ bcopy(mtod(m, caddr_t), mtodo(m, len - sizeof(struct ip6_hdr)), sizeof(struct ip6_hdr)); m_adj(m, len - sizeof(struct ip6_hdr)); } } /* if (icmp6_ratelimit(&ip6->ip6_src, type, code)) goto freeit; */ ip6 = mtod(m, struct ip6_hdr *); switch (type) { case ICMP6_DST_UNREACH: case ICMP6_PACKET_TOO_BIG: case ICMP6_TIME_EXCEEDED: case ICMP6_PARAM_PROB: break; default: goto freeit; } /* Calculate length of ICMPv6 payload */ len = (m->m_pkthdr.len > NAT64_ICMP6_PLEN) ? NAT64_ICMP6_PLEN: m->m_pkthdr.len; /* Create new ICMPv6 datagram */ plen = len + sizeof(struct icmp6_hdr); n = m_get2(sizeof(struct ip6_hdr) + plen + max_hdr, M_NOWAIT, MT_HEADER, M_PKTHDR); if (n == NULL) { NAT64STAT_INC(stats, nomem); m_freem(m); return; } /* * Move pkthdr from original mbuf. We should have initialized some * fields, because we can reinject this mbuf to netisr and it will * go through input path (it requires at least rcvif should be set). * Also do M_ALIGN() to reduce chances of need to allocate new mbuf * in the chain, when we will do M_PREPEND() or make some type of * tunneling. */ m_move_pkthdr(n, m); M_ALIGN(n, sizeof(struct ip6_hdr) + plen + max_hdr); n->m_len = n->m_pkthdr.len = sizeof(struct ip6_hdr) + plen; oip6 = mtod(n, struct ip6_hdr *); /* * Make IPv6 source address selection for reflected datagram. * nat64_check_ip6() doesn't allow scoped addresses, therefore * we use zero scopeid. */ if (in6_selectsrc_addr(M_GETFIB(n), &ip6->ip6_src, 0, n->m_pkthdr.rcvif, &oip6->ip6_src, NULL) != 0) { /* * Failed to find proper source address, drop the packet. */ m_freem(n); goto freeit; } oip6->ip6_dst = ip6->ip6_src; oip6->ip6_nxt = IPPROTO_ICMPV6; oip6->ip6_flow = 0; oip6->ip6_vfc |= IPV6_VERSION; oip6->ip6_hlim = V_ip6_defhlim; oip6->ip6_plen = htons(plen); icmp6 = mtodo(n, sizeof(struct ip6_hdr)); icmp6->icmp6_cksum = 0; icmp6->icmp6_type = type; icmp6->icmp6_code = code; icmp6->icmp6_mtu = htonl(mtu); m_copydata(m, 0, len, mtodo(n, sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr))); icmp6->icmp6_cksum = in6_cksum(n, IPPROTO_ICMPV6, sizeof(struct ip6_hdr), plen); m_freem(m); V_nat64out->output_one(n, stats, logdata); return; freeit: NAT64STAT_INC(stats, dropped); m_freem(m); } static struct nhop_object * nat64_find_route4(struct sockaddr_in *dst, struct mbuf *m) { struct nhop_object *nh; NET_EPOCH_ASSERT(); nh = fib4_lookup(M_GETFIB(m), dst->sin_addr, 0, 0, 0); if (nh == NULL) return NULL; if (nh->nh_flags & (NHF_BLACKHOLE | NHF_BROADCAST | NHF_REJECT)) return NULL; dst->sin_family = AF_INET; dst->sin_len = sizeof(*dst); dst->sin_addr = IA_SIN(nh->nh_ifa)->sin_addr; dst->sin_port = 0; return nh; } #define NAT64_ICMP_PLEN 64 static NAT64NOINLINE void nat64_icmp_reflect(struct mbuf *m, uint8_t type, uint8_t code, uint16_t mtu, struct nat64_counters *stats, void *logdata) { struct icmp *icmp; struct ip *ip, *oip; struct mbuf *n; int len, plen; ip = mtod(m, struct ip *); /* Do not send ICMP error if packet is not the first fragment */ if (ip->ip_off & ~ntohs(IP_MF|IP_DF)) { DPRINTF(DP_DROPS, "not first fragment"); goto freeit; } /* Do not send ICMP in reply to ICMP errors */ if (ip->ip_p == IPPROTO_ICMP) { if (m->m_len < (ip->ip_hl << 2)) { DPRINTF(DP_DROPS, "mbuf isn't contigious"); goto freeit; } icmp = mtodo(m, ip->ip_hl << 2); if (!ICMP_INFOTYPE(icmp->icmp_type)) { DPRINTF(DP_DROPS, "do not send ICMP in reply to " "ICMP errors"); goto freeit; } } switch (type) { case ICMP_UNREACH: case ICMP_TIMXCEED: case ICMP_PARAMPROB: break; default: goto freeit; } /* Calculate length of ICMP payload */ len = (m->m_pkthdr.len > NAT64_ICMP_PLEN) ? (ip->ip_hl << 2) + 8: m->m_pkthdr.len; /* Create new ICMPv4 datagram */ plen = len + sizeof(struct icmphdr) + sizeof(uint32_t); n = m_get2(sizeof(struct ip) + plen + max_hdr, M_NOWAIT, MT_HEADER, M_PKTHDR); if (n == NULL) { NAT64STAT_INC(stats, nomem); m_freem(m); return; } m_move_pkthdr(n, m); M_ALIGN(n, sizeof(struct ip) + plen + max_hdr); n->m_len = n->m_pkthdr.len = sizeof(struct ip) + plen; oip = mtod(n, struct ip *); oip->ip_v = IPVERSION; oip->ip_hl = sizeof(struct ip) >> 2; oip->ip_tos = 0; oip->ip_len = htons(n->m_pkthdr.len); oip->ip_ttl = V_ip_defttl; oip->ip_p = IPPROTO_ICMP; ip_fillid(oip); oip->ip_off = htons(IP_DF); oip->ip_src = ip->ip_dst; oip->ip_dst = ip->ip_src; oip->ip_sum = 0; oip->ip_sum = in_cksum_hdr(oip); icmp = mtodo(n, sizeof(struct ip)); icmp->icmp_type = type; icmp->icmp_code = code; icmp->icmp_cksum = 0; icmp->icmp_pmvoid = 0; icmp->icmp_nextmtu = htons(mtu); m_copydata(m, 0, len, mtodo(n, sizeof(struct ip) + sizeof(struct icmphdr) + sizeof(uint32_t))); icmp->icmp_cksum = in_cksum_skip(n, sizeof(struct ip) + plen, sizeof(struct ip)); m_freem(m); V_nat64out->output_one(n, stats, logdata); return; freeit: NAT64STAT_INC(stats, dropped); m_freem(m); } /* Translate ICMP echo request/reply into ICMPv6 */ static void nat64_icmp_handle_echo(struct ip6_hdr *ip6, struct icmp6_hdr *icmp6, uint16_t id, uint8_t type) { uint16_t old; old = *(uint16_t *)icmp6; /* save type+code in one word */ icmp6->icmp6_type = type; /* Reflect ICMPv6 -> ICMPv4 type translation in the cksum */ icmp6->icmp6_cksum = cksum_adjust(icmp6->icmp6_cksum, old, *(uint16_t *)icmp6); if (id != 0) { old = icmp6->icmp6_id; icmp6->icmp6_id = id; /* Reflect ICMP id translation in the cksum */ icmp6->icmp6_cksum = cksum_adjust(icmp6->icmp6_cksum, old, id); } /* Reflect IPv6 pseudo header in the cksum */ icmp6->icmp6_cksum = ~in6_cksum_pseudo(ip6, ntohs(ip6->ip6_plen), IPPROTO_ICMPV6, ~icmp6->icmp6_cksum); } static NAT64NOINLINE struct mbuf * nat64_icmp_translate(struct mbuf *m, struct ip6_hdr *ip6, uint16_t icmpid, int offset, struct nat64_config *cfg) { struct ip ip; struct icmp *icmp; struct tcphdr *tcp; struct udphdr *udp; struct ip6_hdr *eip6; struct mbuf *n; uint32_t mtu; int len, hlen, plen; uint8_t type, code; if (m->m_len < offset + ICMP_MINLEN) m = m_pullup(m, offset + ICMP_MINLEN); if (m == NULL) { NAT64STAT_INC(&cfg->stats, nomem); return (m); } mtu = 0; icmp = mtodo(m, offset); /* RFC 7915 p4.2 */ switch (icmp->icmp_type) { case ICMP_ECHOREPLY: type = ICMP6_ECHO_REPLY; code = 0; break; case ICMP_UNREACH: type = ICMP6_DST_UNREACH; switch (icmp->icmp_code) { case ICMP_UNREACH_NET: case ICMP_UNREACH_HOST: case ICMP_UNREACH_SRCFAIL: case ICMP_UNREACH_NET_UNKNOWN: case ICMP_UNREACH_HOST_UNKNOWN: case ICMP_UNREACH_TOSNET: case ICMP_UNREACH_TOSHOST: code = ICMP6_DST_UNREACH_NOROUTE; break; case ICMP_UNREACH_PROTOCOL: type = ICMP6_PARAM_PROB; code = ICMP6_PARAMPROB_NEXTHEADER; break; case ICMP_UNREACH_PORT: code = ICMP6_DST_UNREACH_NOPORT; break; case ICMP_UNREACH_NEEDFRAG: type = ICMP6_PACKET_TOO_BIG; code = 0; /* XXX: needs an additional look */ mtu = max(IPV6_MMTU, ntohs(icmp->icmp_nextmtu) + 20); break; case ICMP_UNREACH_NET_PROHIB: case ICMP_UNREACH_HOST_PROHIB: case ICMP_UNREACH_FILTER_PROHIB: case ICMP_UNREACH_PRECEDENCE_CUTOFF: code = ICMP6_DST_UNREACH_ADMIN; break; default: DPRINTF(DP_DROPS, "Unsupported ICMP type %d, code %d", icmp->icmp_type, icmp->icmp_code); goto freeit; } break; case ICMP_TIMXCEED: type = ICMP6_TIME_EXCEEDED; code = icmp->icmp_code; break; case ICMP_ECHO: type = ICMP6_ECHO_REQUEST; code = 0; break; case ICMP_PARAMPROB: type = ICMP6_PARAM_PROB; switch (icmp->icmp_code) { case ICMP_PARAMPROB_ERRATPTR: case ICMP_PARAMPROB_LENGTH: code = ICMP6_PARAMPROB_HEADER; switch (icmp->icmp_pptr) { case 0: /* Version/IHL */ case 1: /* Type Of Service */ mtu = icmp->icmp_pptr; break; case 2: /* Total Length */ case 3: mtu = 4; /* Payload Length */ break; case 8: /* Time to Live */ mtu = 7; /* Hop Limit */ break; case 9: /* Protocol */ mtu = 6; /* Next Header */ break; case 12: /* Source address */ case 13: case 14: case 15: mtu = 8; break; case 16: /* Destination address */ case 17: case 18: case 19: mtu = 24; break; default: /* Silently drop */ DPRINTF(DP_DROPS, "Unsupported ICMP type %d," " code %d, pptr %d", icmp->icmp_type, icmp->icmp_code, icmp->icmp_pptr); goto freeit; } break; default: DPRINTF(DP_DROPS, "Unsupported ICMP type %d," " code %d, pptr %d", icmp->icmp_type, icmp->icmp_code, icmp->icmp_pptr); goto freeit; } break; default: DPRINTF(DP_DROPS, "Unsupported ICMP type %d, code %d", icmp->icmp_type, icmp->icmp_code); goto freeit; } /* * For echo request/reply we can use original payload, * but we need adjust icmp_cksum, because ICMPv6 cksum covers * IPv6 pseudo header and ICMPv6 types differs from ICMPv4. */ if (type == ICMP6_ECHO_REQUEST || type == ICMP6_ECHO_REPLY) { nat64_icmp_handle_echo(ip6, ICMP6(icmp), icmpid, type); return (m); } /* * For other types of ICMP messages we need to translate inner * IPv4 header to IPv6 header. * Assume ICMP src is the same as payload dst * E.g. we have ( GWsrc1 , NATIP1 ) in outer header * and ( NATIP1, Hostdst1 ) in ICMP copy header. * In that case, we already have map for NATIP1 and GWsrc1. * The only thing we need is to copy IPv6 map prefix to * Hostdst1. */ hlen = offset + ICMP_MINLEN; if (m->m_pkthdr.len < hlen + sizeof(struct ip) + ICMP_MINLEN) { DPRINTF(DP_DROPS, "Message is too short %d", m->m_pkthdr.len); goto freeit; } m_copydata(m, hlen, sizeof(struct ip), (char *)&ip); if (ip.ip_v != IPVERSION) { DPRINTF(DP_DROPS, "Wrong IP version %d", ip.ip_v); goto freeit; } hlen += ip.ip_hl << 2; /* Skip inner IP header */ if (nat64_check_ip4(ip.ip_src.s_addr) != 0 || nat64_check_ip4(ip.ip_dst.s_addr) != 0 || nat64_check_private_ip4(cfg, ip.ip_src.s_addr) != 0 || nat64_check_private_ip4(cfg, ip.ip_dst.s_addr) != 0) { DPRINTF(DP_DROPS, "IP addresses checks failed %04x -> %04x", ntohl(ip.ip_src.s_addr), ntohl(ip.ip_dst.s_addr)); goto freeit; } if (m->m_pkthdr.len < hlen + ICMP_MINLEN) { DPRINTF(DP_DROPS, "Message is too short %d", m->m_pkthdr.len); goto freeit; } #if 0 /* * Check that inner source matches the outer destination. * XXX: We need some method to convert IPv4 into IPv6 address here, * and compare IPv6 addresses. */ if (ip.ip_src.s_addr != nat64_get_ip4(&ip6->ip6_dst)) { DPRINTF(DP_GENERIC, "Inner source doesn't match destination ", "%04x vs %04x", ip.ip_src.s_addr, nat64_get_ip4(&ip6->ip6_dst)); goto freeit; } #endif /* * Create new mbuf for ICMPv6 datagram. * NOTE: len is data length just after inner IP header. */ len = m->m_pkthdr.len - hlen; if (sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr) + len > NAT64_ICMP6_PLEN) len = NAT64_ICMP6_PLEN - sizeof(struct icmp6_hdr) - sizeof(struct ip6_hdr); plen = sizeof(struct icmp6_hdr) + sizeof(struct ip6_hdr) + len; n = m_get2(offset + plen + max_hdr, M_NOWAIT, MT_HEADER, M_PKTHDR); if (n == NULL) { NAT64STAT_INC(&cfg->stats, nomem); m_freem(m); return (NULL); } m_move_pkthdr(n, m); M_ALIGN(n, offset + plen + max_hdr); n->m_len = n->m_pkthdr.len = offset + plen; /* Adjust ip6_plen in outer header */ ip6->ip6_plen = htons(plen); /* Construct new inner IPv6 header */ eip6 = mtodo(n, offset + sizeof(struct icmp6_hdr)); eip6->ip6_src = ip6->ip6_dst; /* Use the same prefix that we have in outer header */ eip6->ip6_dst = ip6->ip6_src; MPASS(cfg->flags & NAT64_PLATPFX); nat64_embed_ip4(&eip6->ip6_dst, cfg->plat_plen, ip.ip_dst.s_addr); eip6->ip6_flow = htonl(ip.ip_tos << 20); eip6->ip6_vfc |= IPV6_VERSION; eip6->ip6_hlim = ip.ip_ttl; eip6->ip6_plen = htons(ntohs(ip.ip_len) - (ip.ip_hl << 2)); eip6->ip6_nxt = (ip.ip_p == IPPROTO_ICMP) ? IPPROTO_ICMPV6: ip.ip_p; m_copydata(m, hlen, len, (char *)(eip6 + 1)); /* * We need to translate source port in the inner ULP header, * and adjust ULP checksum. */ switch (ip.ip_p) { case IPPROTO_TCP: if (len < offsetof(struct tcphdr, th_sum)) break; tcp = TCP(eip6 + 1); if (icmpid != 0) { tcp->th_sum = cksum_adjust(tcp->th_sum, tcp->th_sport, icmpid); tcp->th_sport = icmpid; } tcp->th_sum = cksum_add(tcp->th_sum, ~nat64_cksum_convert(eip6, &ip)); break; case IPPROTO_UDP: if (len < offsetof(struct udphdr, uh_sum)) break; udp = UDP(eip6 + 1); if (icmpid != 0) { udp->uh_sum = cksum_adjust(udp->uh_sum, udp->uh_sport, icmpid); udp->uh_sport = icmpid; } udp->uh_sum = cksum_add(udp->uh_sum, ~nat64_cksum_convert(eip6, &ip)); break; case IPPROTO_ICMP: /* * Check if this is an ICMP error message for echo request * that we sent. I.e. ULP in the data containing invoking * packet is IPPROTO_ICMP and its type is ICMP_ECHO. */ icmp = (struct icmp *)(eip6 + 1); if (icmp->icmp_type != ICMP_ECHO) { m_freem(n); goto freeit; } /* * For our client this original datagram should looks * like it was ICMPv6 datagram with type ICMP6_ECHO_REQUEST. * Thus we need adjust icmp_cksum and convert type from * ICMP_ECHO to ICMP6_ECHO_REQUEST. */ nat64_icmp_handle_echo(eip6, ICMP6(icmp), icmpid, ICMP6_ECHO_REQUEST); } m_freem(m); /* Convert ICMPv4 into ICMPv6 header */ icmp = mtodo(n, offset); ICMP6(icmp)->icmp6_type = type; ICMP6(icmp)->icmp6_code = code; ICMP6(icmp)->icmp6_mtu = htonl(mtu); ICMP6(icmp)->icmp6_cksum = 0; ICMP6(icmp)->icmp6_cksum = cksum_add( ~in6_cksum_pseudo(ip6, plen, IPPROTO_ICMPV6, 0), in_cksum_skip(n, n->m_pkthdr.len, offset)); return (n); freeit: m_freem(m); NAT64STAT_INC(&cfg->stats, dropped); return (NULL); } int nat64_getlasthdr(struct mbuf *m, int *offset) { struct ip6_hdr *ip6; struct ip6_hbh *hbh; int proto, hlen; if (offset != NULL) hlen = *offset; else hlen = 0; if (m->m_len < hlen + sizeof(*ip6)) return (-1); ip6 = mtodo(m, hlen); hlen += sizeof(*ip6); proto = ip6->ip6_nxt; /* Skip extension headers */ while (proto == IPPROTO_HOPOPTS || proto == IPPROTO_ROUTING || proto == IPPROTO_DSTOPTS) { hbh = mtodo(m, hlen); /* * We expect mbuf has contigious data up to * upper level header. */ if (m->m_len < hlen) return (-1); /* * We doesn't support Jumbo payload option, * so return error. */ if (proto == IPPROTO_HOPOPTS && ip6->ip6_plen == 0) return (-1); proto = hbh->ip6h_nxt; hlen += (hbh->ip6h_len + 1) << 3; } if (offset != NULL) *offset = hlen; return (proto); } int nat64_do_handle_ip4(struct mbuf *m, struct in6_addr *saddr, struct in6_addr *daddr, uint16_t lport, struct nat64_config *cfg, void *logdata) { struct nhop_object *nh; struct ip6_hdr ip6; struct sockaddr_in6 dst; struct ip *ip; struct mbufq mq; uint16_t ip_id, ip_off; uint16_t *csum; int plen, hlen; uint8_t proto; ip = mtod(m, struct ip*); if (*V_nat64ipstealth == 0 && ip->ip_ttl <= IPTTLDEC) { nat64_icmp_reflect(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, &cfg->stats, logdata); return (NAT64RETURN); } ip6.ip6_dst = *daddr; ip6.ip6_src = *saddr; hlen = ip->ip_hl << 2; plen = ntohs(ip->ip_len) - hlen; proto = ip->ip_p; /* Save ip_id and ip_off, both are in network byte order */ ip_id = ip->ip_id; ip_off = ip->ip_off & htons(IP_OFFMASK | IP_MF); /* Fragment length must be multiple of 8 octets */ if ((ip->ip_off & htons(IP_MF)) != 0 && (plen & 0x7) != 0) { nat64_icmp_reflect(m, ICMP_PARAMPROB, ICMP_PARAMPROB_LENGTH, 0, &cfg->stats, logdata); return (NAT64RETURN); } /* Fragmented ICMP is unsupported */ if (proto == IPPROTO_ICMP && ip_off != 0) { DPRINTF(DP_DROPS, "dropped due to fragmented ICMP"); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } dst.sin6_addr = ip6.ip6_dst; nh = nat64_find_route6(&dst, m); if (nh == NULL) { NAT64STAT_INC(&cfg->stats, noroute6); nat64_icmp_reflect(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, &cfg->stats, logdata); return (NAT64RETURN); } if (nh->nh_mtu < plen + sizeof(ip6) && (ip->ip_off & htons(IP_DF)) != 0) { nat64_icmp_reflect(m, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, FRAGSZ(nh->nh_mtu) + sizeof(struct ip), &cfg->stats, logdata); return (NAT64RETURN); } ip6.ip6_flow = htonl(ip->ip_tos << 20); ip6.ip6_vfc |= IPV6_VERSION; ip6.ip6_hlim = ip->ip_ttl; if (*V_nat64ipstealth == 0) ip6.ip6_hlim -= IPTTLDEC; ip6.ip6_plen = htons(plen); ip6.ip6_nxt = (proto == IPPROTO_ICMP) ? IPPROTO_ICMPV6: proto; /* Handle delayed checksums if needed. */ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { + m = mb_unmapped_to_ext(m); + if (m == NULL) { + NAT64STAT_INC(&cfg->stats, nomem); + return (NAT64RETURN); + } in_delayed_cksum(m); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } /* Convert checksums. */ switch (proto) { case IPPROTO_TCP: csum = &TCP(mtodo(m, hlen))->th_sum; if (lport != 0) { struct tcphdr *tcp = TCP(mtodo(m, hlen)); *csum = cksum_adjust(*csum, tcp->th_dport, lport); tcp->th_dport = lport; } *csum = cksum_add(*csum, ~nat64_cksum_convert(&ip6, ip)); break; case IPPROTO_UDP: csum = &UDP(mtodo(m, hlen))->uh_sum; if (lport != 0) { struct udphdr *udp = UDP(mtodo(m, hlen)); *csum = cksum_adjust(*csum, udp->uh_dport, lport); udp->uh_dport = lport; } *csum = cksum_add(*csum, ~nat64_cksum_convert(&ip6, ip)); break; case IPPROTO_ICMP: m = nat64_icmp_translate(m, &ip6, lport, hlen, cfg); if (m == NULL) /* stats already accounted */ return (NAT64RETURN); } m_adj(m, hlen); mbufq_init(&mq, 255); nat64_fragment6(&cfg->stats, &ip6, &mq, m, nh->nh_mtu, ip_id, ip_off); while ((m = mbufq_dequeue(&mq)) != NULL) { if (V_nat64out->output(nh->nh_ifp, m, (struct sockaddr *)&dst, &cfg->stats, logdata) != 0) break; NAT64STAT_INC(&cfg->stats, opcnt46); } mbufq_drain(&mq); return (NAT64RETURN); } int nat64_handle_icmp6(struct mbuf *m, int hlen, uint32_t aaddr, uint16_t aport, struct nat64_config *cfg, void *logdata) { struct ip ip; struct icmp6_hdr *icmp6; struct ip6_frag *ip6f; struct ip6_hdr *ip6, *ip6i; uint32_t mtu; int plen, proto; uint8_t type, code; if (hlen == 0) { ip6 = mtod(m, struct ip6_hdr *); if (nat64_check_ip6(&ip6->ip6_src) != 0 || nat64_check_ip6(&ip6->ip6_dst) != 0) return (NAT64SKIP); proto = nat64_getlasthdr(m, &hlen); if (proto != IPPROTO_ICMPV6) { DPRINTF(DP_DROPS, "dropped due to mbuf isn't contigious"); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } } /* * Translate ICMPv6 type and code to ICMPv4 (RFC7915). * NOTE: ICMPv6 echo handled by nat64_do_handle_ip6(). */ icmp6 = mtodo(m, hlen); mtu = 0; switch (icmp6->icmp6_type) { case ICMP6_DST_UNREACH: type = ICMP_UNREACH; switch (icmp6->icmp6_code) { case ICMP6_DST_UNREACH_NOROUTE: case ICMP6_DST_UNREACH_BEYONDSCOPE: case ICMP6_DST_UNREACH_ADDR: code = ICMP_UNREACH_HOST; break; case ICMP6_DST_UNREACH_ADMIN: code = ICMP_UNREACH_HOST_PROHIB; break; case ICMP6_DST_UNREACH_NOPORT: code = ICMP_UNREACH_PORT; break; default: DPRINTF(DP_DROPS, "Unsupported ICMPv6 type %d," " code %d", icmp6->icmp6_type, icmp6->icmp6_code); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } break; case ICMP6_PACKET_TOO_BIG: type = ICMP_UNREACH; code = ICMP_UNREACH_NEEDFRAG; mtu = ntohl(icmp6->icmp6_mtu); if (mtu < IPV6_MMTU) { DPRINTF(DP_DROPS, "Wrong MTU %d in ICMPv6 type %d," " code %d", mtu, icmp6->icmp6_type, icmp6->icmp6_code); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } /* * Adjust MTU to reflect difference between * IPv6 an IPv4 headers. */ mtu -= sizeof(struct ip6_hdr) - sizeof(struct ip); break; case ICMP6_TIME_EXCEEDED: type = ICMP_TIMXCEED; code = icmp6->icmp6_code; break; case ICMP6_PARAM_PROB: switch (icmp6->icmp6_code) { case ICMP6_PARAMPROB_HEADER: type = ICMP_PARAMPROB; code = ICMP_PARAMPROB_ERRATPTR; mtu = ntohl(icmp6->icmp6_pptr); switch (mtu) { case 0: /* Version/Traffic Class */ case 1: /* Traffic Class/Flow Label */ break; case 4: /* Payload Length */ case 5: mtu = 2; break; case 6: /* Next Header */ mtu = 9; break; case 7: /* Hop Limit */ mtu = 8; break; default: if (mtu >= 8 && mtu <= 23) { mtu = 12; /* Source address */ break; } if (mtu >= 24 && mtu <= 39) { mtu = 16; /* Destination address */ break; } DPRINTF(DP_DROPS, "Unsupported ICMPv6 type %d," " code %d, pptr %d", icmp6->icmp6_type, icmp6->icmp6_code, mtu); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } case ICMP6_PARAMPROB_NEXTHEADER: type = ICMP_UNREACH; code = ICMP_UNREACH_PROTOCOL; break; default: DPRINTF(DP_DROPS, "Unsupported ICMPv6 type %d," " code %d, pptr %d", icmp6->icmp6_type, icmp6->icmp6_code, ntohl(icmp6->icmp6_pptr)); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } break; default: DPRINTF(DP_DROPS, "Unsupported ICMPv6 type %d, code %d", icmp6->icmp6_type, icmp6->icmp6_code); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } hlen += sizeof(struct icmp6_hdr); if (m->m_pkthdr.len < hlen + sizeof(struct ip6_hdr) + ICMP_MINLEN) { NAT64STAT_INC(&cfg->stats, dropped); DPRINTF(DP_DROPS, "Message is too short %d", m->m_pkthdr.len); return (NAT64MFREE); } /* * We need at least ICMP_MINLEN bytes of original datagram payload * to generate ICMP message. It is nice that ICMP_MINLEN is equal * to sizeof(struct ip6_frag). So, if embedded datagram had a fragment * header we will not have to do m_pullup() again. * * What we have here: * Outer header: (IPv6iGW, v4mapPRefix+v4exthost) * Inner header: (v4mapPRefix+v4host, IPv6iHost) [sport, dport] * We need to translate it to: * * Outer header: (alias_host, v4exthost) * Inner header: (v4exthost, alias_host) [sport, alias_port] * * Assume caller function has checked if v4mapPRefix+v4host * matches configured prefix. * The only two things we should be provided with are mapping between * IPv6iHost <> alias_host and between dport and alias_port. */ if (m->m_len < hlen + sizeof(struct ip6_hdr) + ICMP_MINLEN) m = m_pullup(m, hlen + sizeof(struct ip6_hdr) + ICMP_MINLEN); if (m == NULL) { NAT64STAT_INC(&cfg->stats, nomem); return (NAT64RETURN); } ip6 = mtod(m, struct ip6_hdr *); ip6i = mtodo(m, hlen); ip6f = NULL; proto = ip6i->ip6_nxt; plen = ntohs(ip6i->ip6_plen); hlen += sizeof(struct ip6_hdr); if (proto == IPPROTO_FRAGMENT) { if (m->m_pkthdr.len < hlen + sizeof(struct ip6_frag) + ICMP_MINLEN) goto fail; ip6f = mtodo(m, hlen); proto = ip6f->ip6f_nxt; plen -= sizeof(struct ip6_frag); hlen += sizeof(struct ip6_frag); /* Ajust MTU to reflect frag header size */ if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG) mtu -= sizeof(struct ip6_frag); } if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) { DPRINTF(DP_DROPS, "Unsupported proto %d in the inner header", proto); goto fail; } if (nat64_check_ip6(&ip6i->ip6_src) != 0 || nat64_check_ip6(&ip6i->ip6_dst) != 0) { DPRINTF(DP_DROPS, "Inner addresses do not passes the check"); goto fail; } /* Check if outer dst is the same as inner src */ if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6i->ip6_src)) { DPRINTF(DP_DROPS, "Inner src doesn't match outer dst"); goto fail; } /* Now we need to make a fake IPv4 packet to generate ICMP message */ ip.ip_dst.s_addr = aaddr; ip.ip_src.s_addr = nat64_extract_ip4(&ip6i->ip6_src, cfg->plat_plen); if (ip.ip_src.s_addr == 0) goto fail; /* XXX: Make fake ulp header */ if (V_nat64out == &nat64_direct) /* init_ip4hdr will decrement it */ ip6i->ip6_hlim += IPV6_HLIMDEC; nat64_init_ip4hdr(ip6i, ip6f, plen, proto, &ip); m_adj(m, hlen - sizeof(struct ip)); bcopy(&ip, mtod(m, void *), sizeof(ip)); nat64_icmp_reflect(m, type, code, (uint16_t)mtu, &cfg->stats, logdata); return (NAT64RETURN); fail: /* * We must call m_freem() because mbuf pointer could be * changed with m_pullup(). */ m_freem(m); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64RETURN); } int nat64_do_handle_ip6(struct mbuf *m, uint32_t aaddr, uint16_t aport, struct nat64_config *cfg, void *logdata) { struct ip ip; struct nhop_object *nh; struct sockaddr_in dst; struct ip6_frag *frag; struct ip6_hdr *ip6; struct icmp6_hdr *icmp6; uint16_t *csum; int plen, hlen, proto; /* * XXX: we expect ipfw_chk() did m_pullup() up to upper level * protocol's headers. Also we skip some checks, that ip6_input(), * ip6_forward(), ip6_fastfwd() and ipfw_chk() already did. */ ip6 = mtod(m, struct ip6_hdr *); if (nat64_check_ip6(&ip6->ip6_src) != 0 || nat64_check_ip6(&ip6->ip6_dst) != 0) { return (NAT64SKIP); } /* Starting from this point we must not return zero */ ip.ip_src.s_addr = aaddr; if (nat64_check_ip4(ip.ip_src.s_addr) != 0) { DPRINTF(DP_GENERIC | DP_DROPS, "invalid source address: %08x", ip.ip_src.s_addr); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } ip.ip_dst.s_addr = nat64_extract_ip4(&ip6->ip6_dst, cfg->plat_plen); if (ip.ip_dst.s_addr == 0) { NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } if (*V_nat64ip6stealth == 0 && ip6->ip6_hlim <= IPV6_HLIMDEC) { nat64_icmp6_reflect(m, ICMP6_TIME_EXCEEDED, ICMP6_TIME_EXCEED_TRANSIT, 0, &cfg->stats, logdata); return (NAT64RETURN); } hlen = 0; plen = ntohs(ip6->ip6_plen); proto = nat64_getlasthdr(m, &hlen); if (proto < 0) { DPRINTF(DP_DROPS, "dropped due to mbuf isn't contigious"); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } frag = NULL; if (proto == IPPROTO_FRAGMENT) { /* ipfw_chk should m_pullup up to frag header */ if (m->m_len < hlen + sizeof(*frag)) { DPRINTF(DP_DROPS, "dropped due to mbuf isn't contigious"); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } frag = mtodo(m, hlen); proto = frag->ip6f_nxt; hlen += sizeof(*frag); /* Fragmented ICMPv6 is unsupported */ if (proto == IPPROTO_ICMPV6) { DPRINTF(DP_DROPS, "dropped due to fragmented ICMPv6"); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } /* Fragment length must be multiple of 8 octets */ if ((frag->ip6f_offlg & IP6F_MORE_FRAG) != 0 && ((plen + sizeof(struct ip6_hdr) - hlen) & 0x7) != 0) { nat64_icmp6_reflect(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offsetof(struct ip6_hdr, ip6_plen), &cfg->stats, logdata); return (NAT64RETURN); } } plen -= hlen - sizeof(struct ip6_hdr); if (plen < 0 || m->m_pkthdr.len < plen + hlen) { DPRINTF(DP_DROPS, "plen %d, pkthdr.len %d, hlen %d", plen, m->m_pkthdr.len, hlen); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } icmp6 = NULL; /* Make gcc happy */ if (proto == IPPROTO_ICMPV6) { icmp6 = mtodo(m, hlen); if (icmp6->icmp6_type != ICMP6_ECHO_REQUEST && icmp6->icmp6_type != ICMP6_ECHO_REPLY) return (nat64_handle_icmp6(m, hlen, aaddr, aport, cfg, logdata)); } dst.sin_addr.s_addr = ip.ip_dst.s_addr; nh = nat64_find_route4(&dst, m); if (nh == NULL) { NAT64STAT_INC(&cfg->stats, noroute4); nat64_icmp6_reflect(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOROUTE, 0, &cfg->stats, logdata); return (NAT64RETURN); } if (nh->nh_mtu < plen + sizeof(ip)) { nat64_icmp6_reflect(m, ICMP6_PACKET_TOO_BIG, 0, nh->nh_mtu, &cfg->stats, logdata); return (NAT64RETURN); } nat64_init_ip4hdr(ip6, frag, plen, proto, &ip); /* Handle delayed checksums if needed. */ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6) { + m = mb_unmapped_to_ext(m); + if (m == NULL) { + NAT64STAT_INC(&cfg->stats, nomem); + return (NAT64RETURN); + } in6_delayed_cksum(m, plen, hlen); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6; } /* Convert checksums. */ switch (proto) { case IPPROTO_TCP: csum = &TCP(mtodo(m, hlen))->th_sum; if (aport != 0) { struct tcphdr *tcp = TCP(mtodo(m, hlen)); *csum = cksum_adjust(*csum, tcp->th_sport, aport); tcp->th_sport = aport; } *csum = cksum_add(*csum, nat64_cksum_convert(ip6, &ip)); break; case IPPROTO_UDP: csum = &UDP(mtodo(m, hlen))->uh_sum; if (aport != 0) { struct udphdr *udp = UDP(mtodo(m, hlen)); *csum = cksum_adjust(*csum, udp->uh_sport, aport); udp->uh_sport = aport; } *csum = cksum_add(*csum, nat64_cksum_convert(ip6, &ip)); break; case IPPROTO_ICMPV6: /* Checksum in ICMPv6 covers pseudo header */ csum = &icmp6->icmp6_cksum; *csum = cksum_add(*csum, in6_cksum_pseudo(ip6, plen, IPPROTO_ICMPV6, 0)); /* Convert ICMPv6 types to ICMP */ proto = *(uint16_t *)icmp6; /* save old word for cksum_adjust */ if (icmp6->icmp6_type == ICMP6_ECHO_REQUEST) icmp6->icmp6_type = ICMP_ECHO; else /* ICMP6_ECHO_REPLY */ icmp6->icmp6_type = ICMP_ECHOREPLY; *csum = cksum_adjust(*csum, (uint16_t)proto, *(uint16_t *)icmp6); if (aport != 0) { uint16_t old_id = icmp6->icmp6_id; icmp6->icmp6_id = aport; *csum = cksum_adjust(*csum, old_id, aport); } break; }; m_adj(m, hlen - sizeof(ip)); bcopy(&ip, mtod(m, void *), sizeof(ip)); if (V_nat64out->output(nh->nh_ifp, m, (struct sockaddr *)&dst, &cfg->stats, logdata) == 0) NAT64STAT_INC(&cfg->stats, opcnt64); return (NAT64RETURN); } diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h index 52d000fea5fd..729653fa1e55 100644 --- a/sys/sys/mbuf.h +++ b/sys/sys/mbuf.h @@ -1,1632 +1,1643 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1988, 1993 * The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)mbuf.h 8.5 (Berkeley) 2/19/95 * $FreeBSD$ */ #ifndef _SYS_MBUF_H_ #define _SYS_MBUF_H_ /* XXX: These includes suck. Sorry! */ #include #ifdef _KERNEL #include #include #include #ifdef WITNESS #include #endif #endif #ifdef _KERNEL #include #define MBUF_PROBE1(probe, arg0) \ SDT_PROBE1(sdt, , , probe, arg0) #define MBUF_PROBE2(probe, arg0, arg1) \ SDT_PROBE2(sdt, , , probe, arg0, arg1) #define MBUF_PROBE3(probe, arg0, arg1, arg2) \ SDT_PROBE3(sdt, , , probe, arg0, arg1, arg2) #define MBUF_PROBE4(probe, arg0, arg1, arg2, arg3) \ SDT_PROBE4(sdt, , , probe, arg0, arg1, arg2, arg3) #define MBUF_PROBE5(probe, arg0, arg1, arg2, arg3, arg4) \ SDT_PROBE5(sdt, , , probe, arg0, arg1, arg2, arg3, arg4) SDT_PROBE_DECLARE(sdt, , , m__init); SDT_PROBE_DECLARE(sdt, , , m__gethdr); SDT_PROBE_DECLARE(sdt, , , m__get); SDT_PROBE_DECLARE(sdt, , , m__getcl); SDT_PROBE_DECLARE(sdt, , , m__getjcl); SDT_PROBE_DECLARE(sdt, , , m__clget); SDT_PROBE_DECLARE(sdt, , , m__cljget); SDT_PROBE_DECLARE(sdt, , , m__cljset); SDT_PROBE_DECLARE(sdt, , , m__free); SDT_PROBE_DECLARE(sdt, , , m__freem); #endif /* _KERNEL */ /* * Mbufs are of a single size, MSIZE (sys/param.h), which includes overhead. * An mbuf may add a single "mbuf cluster" of size MCLBYTES (also in * sys/param.h), which has no additional overhead and is used instead of the * internal data area; this is done when at least MINCLSIZE of data must be * stored. Additionally, it is possible to allocate a separate buffer * externally and attach it to the mbuf in a way similar to that of mbuf * clusters. * * NB: These calculation do not take actual compiler-induced alignment and * padding inside the complete struct mbuf into account. Appropriate * attention is required when changing members of struct mbuf. * * MLEN is data length in a normal mbuf. * MHLEN is data length in an mbuf with pktheader. * MINCLSIZE is a smallest amount of data that should be put into cluster. * * Compile-time assertions in uipc_mbuf.c test these values to ensure that * they are sensible. */ struct mbuf; #define MHSIZE offsetof(struct mbuf, m_dat) #define MPKTHSIZE offsetof(struct mbuf, m_pktdat) #define MLEN ((int)(MSIZE - MHSIZE)) #define MHLEN ((int)(MSIZE - MPKTHSIZE)) #define MINCLSIZE (MHLEN + 1) #define M_NODOM 255 #ifdef _KERNEL /*- * Macro for type conversion: convert mbuf pointer to data pointer of correct * type: * * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type. * mtodo(m, o) -- Same as above but with offset 'o' into data. */ #define mtod(m, t) ((t)((m)->m_data)) #define mtodo(m, o) ((void *)(((m)->m_data) + (o))) /* * Argument structure passed to UMA routines during mbuf and packet * allocations. */ struct mb_args { int flags; /* Flags for mbuf being allocated */ short type; /* Type of mbuf being allocated */ }; #endif /* _KERNEL */ /* * Packet tag structure (see below for details). */ struct m_tag { SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */ u_int16_t m_tag_id; /* Tag ID */ u_int16_t m_tag_len; /* Length of data */ u_int32_t m_tag_cookie; /* ABI/Module ID */ void (*m_tag_free)(struct m_tag *); }; /* * Static network interface owned tag. * Allocated through ifp->if_snd_tag_alloc(). */ struct m_snd_tag { struct ifnet *ifp; /* network interface tag belongs to */ volatile u_int refcount; u_int type; /* One of IF_SND_TAG_TYPE_*. */ }; /* * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set. * Size ILP32: 48 * LP64: 56 * Compile-time assertions in uipc_mbuf.c test these values to ensure that * they are correct. */ struct pkthdr { union { struct m_snd_tag *snd_tag; /* send tag, if any */ struct ifnet *rcvif; /* rcv interface */ }; SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */ int32_t len; /* total packet length */ /* Layer crossing persistent information. */ uint32_t flowid; /* packet's 4-tuple system */ uint32_t csum_flags; /* checksum and offload features */ uint16_t fibnum; /* this packet should use this fib */ uint8_t numa_domain; /* NUMA domain of recvd pkt */ uint8_t rsstype; /* hash type */ union { uint64_t rcv_tstmp; /* timestamp in ns */ struct { uint8_t l2hlen; /* layer 2 hdr len */ uint8_t l3hlen; /* layer 3 hdr len */ uint8_t l4hlen; /* layer 4 hdr len */ uint8_t l5hlen; /* layer 5 hdr len */ uint8_t inner_l2hlen; uint8_t inner_l3hlen; uint8_t inner_l4hlen; uint8_t inner_l5hlen; }; }; union { uint8_t eight[8]; uint16_t sixteen[4]; uint32_t thirtytwo[2]; uint64_t sixtyfour[1]; uintptr_t unintptr[1]; void *ptr; } PH_per; /* Layer specific non-persistent local storage for reassembly, etc. */ union { uint8_t eight[8]; uint16_t sixteen[4]; uint32_t thirtytwo[2]; uint64_t sixtyfour[1]; uintptr_t unintptr[1]; void *ptr; } PH_loc; }; #define ether_vtag PH_per.sixteen[0] #define tcp_tun_port PH_per.sixteen[0] /* outbound */ #define PH_vt PH_per #define vt_nrecs sixteen[0] /* mld and v6-ND */ #define tso_segsz PH_per.sixteen[1] /* inbound after LRO */ #define lro_nsegs tso_segsz /* inbound after LRO */ #define csum_data PH_per.thirtytwo[1] /* inbound from hardware up */ #define lro_tcp_d_len PH_loc.sixteen[0] /* inbound during LRO (no reassembly) */ #define lro_tcp_d_csum PH_loc.sixteen[1] /* inbound during LRO (no reassembly) */ #define lro_tcp_h_off PH_loc.sixteen[2] /* inbound during LRO (no reassembly) */ #define lro_etype PH_loc.sixteen[3] /* inbound during LRO (no reassembly) */ /* Note PH_loc is used during IP reassembly (all 8 bytes as a ptr) */ /* * TLS records for TLS 1.0-1.2 can have the following header lengths: * - 5 (AES-CBC with implicit IV) * - 21 (AES-CBC with explicit IV) * - 13 (AES-GCM with 8 byte explicit IV) */ #define MBUF_PEXT_HDR_LEN 23 /* * TLS records for TLS 1.0-1.2 can have the following maximum trailer * lengths: * - 16 (AES-GCM) * - 36 (AES-CBC with SHA1 and up to 16 bytes of padding) * - 48 (AES-CBC with SHA2-256 and up to 16 bytes of padding) * - 64 (AES-CBC with SHA2-384 and up to 16 bytes of padding) */ #define MBUF_PEXT_TRAIL_LEN 64 #if defined(__LP64__) #define MBUF_PEXT_MAX_PGS (40 / sizeof(vm_paddr_t)) #else #define MBUF_PEXT_MAX_PGS (72 / sizeof(vm_paddr_t)) #endif #define MBUF_PEXT_MAX_BYTES \ (MBUF_PEXT_MAX_PGS * PAGE_SIZE + MBUF_PEXT_HDR_LEN + MBUF_PEXT_TRAIL_LEN) struct ktls_session; struct socket; /* * Description of external storage mapped into mbuf; valid only if M_EXT is * set. * Size ILP32: 28 * LP64: 48 * Compile-time assertions in uipc_mbuf.c test these values to ensure that * they are correct. */ typedef void m_ext_free_t(struct mbuf *); struct m_ext { union { /* * If EXT_FLAG_EMBREF is set, then we use refcount in the * mbuf, the 'ext_count' member. Otherwise, we have a * shadow copy and we use pointer 'ext_cnt'. The original * mbuf is responsible to carry the pointer to free routine * and its arguments. They aren't copied into shadows in * mb_dupcl() to avoid dereferencing next cachelines. */ volatile u_int ext_count; volatile u_int *ext_cnt; }; uint32_t ext_size; /* size of buffer, for ext_free */ uint32_t ext_type:8, /* type of external storage */ ext_flags:24; /* external storage mbuf flags */ union { struct { /* * Regular M_EXT mbuf: * o ext_buf always points to the external buffer. * o ext_free (below) and two optional arguments * ext_arg1 and ext_arg2 store the free context for * the external storage. They are set only in the * refcount carrying mbuf, the one with * EXT_FLAG_EMBREF flag, with exclusion for * EXT_EXTREF type, where the free context is copied * into all mbufs that use same external storage. */ char *ext_buf; /* start of buffer */ #define m_ext_copylen offsetof(struct m_ext, ext_arg2) void *ext_arg2; }; struct { /* * Multi-page M_EXTPG mbuf: * o extpg_pa - page vector. * o extpg_trail and extpg_hdr - TLS trailer and * header. * Uses ext_free and may also use ext_arg1. */ vm_paddr_t extpg_pa[MBUF_PEXT_MAX_PGS]; char extpg_trail[MBUF_PEXT_TRAIL_LEN]; char extpg_hdr[MBUF_PEXT_HDR_LEN]; /* Pretend these 3 fields are part of mbuf itself. */ #define m_epg_pa m_ext.extpg_pa #define m_epg_trail m_ext.extpg_trail #define m_epg_hdr m_ext.extpg_hdr #define m_epg_ext_copylen offsetof(struct m_ext, ext_free) }; }; /* * Free method and optional argument pointer, both * used by M_EXT and M_EXTPG. */ m_ext_free_t *ext_free; void *ext_arg1; }; /* * The core of the mbuf object along with some shortcut defines for practical * purposes. */ struct mbuf { /* * Header present at the beginning of every mbuf. * Size ILP32: 24 * LP64: 32 * Compile-time assertions in uipc_mbuf.c test these values to ensure * that they are correct. */ union { /* next buffer in chain */ struct mbuf *m_next; SLIST_ENTRY(mbuf) m_slist; STAILQ_ENTRY(mbuf) m_stailq; }; union { /* next chain in queue/record */ struct mbuf *m_nextpkt; SLIST_ENTRY(mbuf) m_slistpkt; STAILQ_ENTRY(mbuf) m_stailqpkt; }; caddr_t m_data; /* location of data */ int32_t m_len; /* amount of data in this mbuf */ uint32_t m_type:8, /* type of data in this mbuf */ m_flags:24; /* flags; see below */ #if !defined(__LP64__) uint32_t m_pad; /* pad for 64bit alignment */ #endif /* * A set of optional headers (packet header, external storage header) * and internal data storage. Historically, these arrays were sized * to MHLEN (space left after a packet header) and MLEN (space left * after only a regular mbuf header); they are now variable size in * order to support future work on variable-size mbufs. */ union { struct { union { /* M_PKTHDR set. */ struct pkthdr m_pkthdr; /* M_EXTPG set. * Multi-page M_EXTPG mbuf has its meta data * split between the below anonymous structure * and m_ext. It carries vector of pages, * optional header and trailer char vectors * and pointers to socket/TLS data. */ #define m_epg_startcopy m_epg_npgs #define m_epg_endcopy m_epg_stailq struct { /* Overall count of pages and count of * pages with I/O pending. */ uint8_t m_epg_npgs; uint8_t m_epg_nrdy; /* TLS header and trailer lengths. * The data itself resides in m_ext. */ uint8_t m_epg_hdrlen; uint8_t m_epg_trllen; /* Offset into 1st page and length of * data in the last page. */ uint16_t m_epg_1st_off; uint16_t m_epg_last_len; uint8_t m_epg_flags; #define EPG_FLAG_ANON 0x1 /* Data can be encrypted in place. */ #define EPG_FLAG_2FREE 0x2 /* Scheduled for free. */ uint8_t m_epg_record_type; uint8_t __spare[2]; int m_epg_enc_cnt; struct ktls_session *m_epg_tls; struct socket *m_epg_so; uint64_t m_epg_seqno; STAILQ_ENTRY(mbuf) m_epg_stailq; }; }; union { /* M_EXT or M_EXTPG set. */ struct m_ext m_ext; /* M_PKTHDR set, neither M_EXT nor M_EXTPG. */ char m_pktdat[0]; }; }; char m_dat[0]; /* !M_PKTHDR, !M_EXT */ }; }; #ifdef _KERNEL static inline int m_epg_pagelen(const struct mbuf *m, int pidx, int pgoff) { KASSERT(pgoff == 0 || pidx == 0, ("page %d with non-zero offset %d in %p", pidx, pgoff, m)); if (pidx == m->m_epg_npgs - 1) { return (m->m_epg_last_len); } else { return (PAGE_SIZE - pgoff); } } #ifdef INVARIANTS #define MCHECK(ex, msg) KASSERT((ex), \ ("Multi page mbuf %p with " #msg " at %s:%d", \ m, __FILE__, __LINE__)) /* * NB: This expects a non-empty buffer (npgs > 0 and * last_pg_len > 0). */ #define MBUF_EXT_PGS_ASSERT_SANITY(m) do { \ MCHECK(m->m_epg_npgs > 0, "no valid pages"); \ MCHECK(m->m_epg_npgs <= nitems(m->m_epg_pa), \ "too many pages"); \ MCHECK(m->m_epg_nrdy <= m->m_epg_npgs, \ "too many ready pages"); \ MCHECK(m->m_epg_1st_off < PAGE_SIZE, \ "too large page offset"); \ MCHECK(m->m_epg_last_len > 0, "zero last page length"); \ MCHECK(m->m_epg_last_len <= PAGE_SIZE, \ "too large last page length"); \ if (m->m_epg_npgs == 1) \ MCHECK(m->m_epg_1st_off + \ m->m_epg_last_len <= PAGE_SIZE, \ "single page too large"); \ MCHECK(m->m_epg_hdrlen <= sizeof(m->m_epg_hdr), \ "too large header length"); \ MCHECK(m->m_epg_trllen <= sizeof(m->m_epg_trail), \ "too large header length"); \ } while (0) #else #define MBUF_EXT_PGS_ASSERT_SANITY(m) do {} while (0); #endif #endif /* * mbuf flags of global significance and layer crossing. * Those of only protocol/layer specific significance are to be mapped * to M_PROTO[1-11] and cleared at layer handoff boundaries. * NB: Limited to the lower 24 bits. */ #define M_EXT 0x00000001 /* has associated external storage */ #define M_PKTHDR 0x00000002 /* start of record */ #define M_EOR 0x00000004 /* end of record */ #define M_RDONLY 0x00000008 /* associated data is marked read-only */ #define M_BCAST 0x00000010 /* send/received as link-level broadcast */ #define M_MCAST 0x00000020 /* send/received as link-level multicast */ #define M_PROMISC 0x00000040 /* packet was not for us */ #define M_VLANTAG 0x00000080 /* ether_vtag is valid */ #define M_EXTPG 0x00000100 /* has array of unmapped pages and TLS */ #define M_NOFREE 0x00000200 /* do not free mbuf, embedded in cluster */ #define M_TSTMP 0x00000400 /* rcv_tstmp field is valid */ #define M_TSTMP_HPREC 0x00000800 /* rcv_tstmp is high-prec, typically hw-stamped on port (useful for IEEE 1588 and 802.1AS) */ #define M_TSTMP_LRO 0x00001000 /* Time LRO pushed in pkt is valid in (PH_loc) */ #define M_PROTO1 0x00002000 /* protocol-specific */ #define M_PROTO2 0x00004000 /* protocol-specific */ #define M_PROTO3 0x00008000 /* protocol-specific */ #define M_PROTO4 0x00010000 /* protocol-specific */ #define M_PROTO5 0x00020000 /* protocol-specific */ #define M_PROTO6 0x00040000 /* protocol-specific */ #define M_PROTO7 0x00080000 /* protocol-specific */ #define M_PROTO8 0x00100000 /* protocol-specific */ #define M_PROTO9 0x00200000 /* protocol-specific */ #define M_PROTO10 0x00400000 /* protocol-specific */ #define M_PROTO11 0x00800000 /* protocol-specific */ #define MB_DTOR_SKIP 0x1 /* don't pollute the cache by touching a freed mbuf */ /* * Flags to purge when crossing layers. */ #define M_PROTOFLAGS \ (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO4|M_PROTO5|M_PROTO6|M_PROTO7|M_PROTO8|\ M_PROTO9|M_PROTO10|M_PROTO11) /* * Flags preserved when copying m_pkthdr. */ #define M_COPYFLAGS \ (M_PKTHDR|M_EOR|M_RDONLY|M_BCAST|M_MCAST|M_PROMISC|M_VLANTAG|M_TSTMP| \ M_TSTMP_HPREC|M_TSTMP_LRO|M_PROTOFLAGS) /* * Mbuf flag description for use with printf(9) %b identifier. */ #define M_FLAG_BITS \ "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY\5M_BCAST\6M_MCAST" \ "\7M_PROMISC\10M_VLANTAG\11M_EXTPG\12M_NOFREE\13M_TSTMP\14M_TSTMP_HPREC\15M_TSTMP_LRO" #define M_FLAG_PROTOBITS \ "\16M_PROTO1\17M_PROTO2\20M_PROTO3\21M_PROTO4" \ "\22M_PROTO5\23M_PROTO6\24M_PROTO7\25M_PROTO8\26M_PROTO9" \ "\27M_PROTO10\28M_PROTO11" #define M_FLAG_PRINTF (M_FLAG_BITS M_FLAG_PROTOBITS) /* * Network interface cards are able to hash protocol fields (such as IPv4 * addresses and TCP port numbers) classify packets into flows. These flows * can then be used to maintain ordering while delivering packets to the OS * via parallel input queues, as well as to provide a stateless affinity * model. NIC drivers can pass up the hash via m->m_pkthdr.flowid, and set * m_flag fields to indicate how the hash should be interpreted by the * network stack. * * Most NICs support RSS, which provides ordering and explicit affinity, and * use the hash m_flag bits to indicate what header fields were covered by * the hash. M_HASHTYPE_OPAQUE and M_HASHTYPE_OPAQUE_HASH can be set by non- * RSS cards or configurations that provide an opaque flow identifier, allowing * for ordering and distribution without explicit affinity. Additionally, * M_HASHTYPE_OPAQUE_HASH indicates that the flow identifier has hash * properties. * * The meaning of the IPV6_EX suffix: * "o Home address from the home address option in the IPv6 destination * options header. If the extension header is not present, use the Source * IPv6 Address. * o IPv6 address that is contained in the Routing-Header-Type-2 from the * associated extension header. If the extension header is not present, * use the Destination IPv6 Address." * Quoted from: * https://docs.microsoft.com/en-us/windows-hardware/drivers/network/rss-hashing-types#ndishashipv6ex */ #define M_HASHTYPE_HASHPROP 0x80 /* has hash properties */ #define M_HASHTYPE_INNER 0x40 /* calculated from inner headers */ #define M_HASHTYPE_HASH(t) (M_HASHTYPE_HASHPROP | (t)) /* Microsoft RSS standard hash types */ #define M_HASHTYPE_NONE 0 #define M_HASHTYPE_RSS_IPV4 M_HASHTYPE_HASH(1) /* IPv4 2-tuple */ #define M_HASHTYPE_RSS_TCP_IPV4 M_HASHTYPE_HASH(2) /* TCPv4 4-tuple */ #define M_HASHTYPE_RSS_IPV6 M_HASHTYPE_HASH(3) /* IPv6 2-tuple */ #define M_HASHTYPE_RSS_TCP_IPV6 M_HASHTYPE_HASH(4) /* TCPv6 4-tuple */ #define M_HASHTYPE_RSS_IPV6_EX M_HASHTYPE_HASH(5) /* IPv6 2-tuple + * ext hdrs */ #define M_HASHTYPE_RSS_TCP_IPV6_EX M_HASHTYPE_HASH(6) /* TCPv6 4-tuple + * ext hdrs */ #define M_HASHTYPE_RSS_UDP_IPV4 M_HASHTYPE_HASH(7) /* IPv4 UDP 4-tuple*/ #define M_HASHTYPE_RSS_UDP_IPV6 M_HASHTYPE_HASH(9) /* IPv6 UDP 4-tuple*/ #define M_HASHTYPE_RSS_UDP_IPV6_EX M_HASHTYPE_HASH(10)/* IPv6 UDP 4-tuple + * ext hdrs */ #define M_HASHTYPE_OPAQUE 0x3f /* ordering, not affinity */ #define M_HASHTYPE_OPAQUE_HASH M_HASHTYPE_HASH(M_HASHTYPE_OPAQUE) /* ordering+hash, not affinity*/ #define M_HASHTYPE_CLEAR(m) ((m)->m_pkthdr.rsstype = 0) #define M_HASHTYPE_GET(m) ((m)->m_pkthdr.rsstype & ~M_HASHTYPE_INNER) #define M_HASHTYPE_SET(m, v) ((m)->m_pkthdr.rsstype = (v)) #define M_HASHTYPE_TEST(m, v) (M_HASHTYPE_GET(m) == (v)) #define M_HASHTYPE_ISHASH(m) \ (((m)->m_pkthdr.rsstype & M_HASHTYPE_HASHPROP) != 0) #define M_HASHTYPE_SETINNER(m) do { \ (m)->m_pkthdr.rsstype |= M_HASHTYPE_INNER; \ } while (0) /* * External mbuf storage buffer types. */ #define EXT_CLUSTER 1 /* mbuf cluster */ #define EXT_SFBUF 2 /* sendfile(2)'s sf_buf */ #define EXT_JUMBOP 3 /* jumbo cluster page sized */ #define EXT_JUMBO9 4 /* jumbo cluster 9216 bytes */ #define EXT_JUMBO16 5 /* jumbo cluster 16184 bytes */ #define EXT_PACKET 6 /* mbuf+cluster from packet zone */ #define EXT_MBUF 7 /* external mbuf reference */ #define EXT_RXRING 8 /* data in NIC receive ring */ #define EXT_VENDOR1 224 /* for vendor-internal use */ #define EXT_VENDOR2 225 /* for vendor-internal use */ #define EXT_VENDOR3 226 /* for vendor-internal use */ #define EXT_VENDOR4 227 /* for vendor-internal use */ #define EXT_EXP1 244 /* for experimental use */ #define EXT_EXP2 245 /* for experimental use */ #define EXT_EXP3 246 /* for experimental use */ #define EXT_EXP4 247 /* for experimental use */ #define EXT_NET_DRV 252 /* custom ext_buf provided by net driver(s) */ #define EXT_MOD_TYPE 253 /* custom module's ext_buf type */ #define EXT_DISPOSABLE 254 /* can throw this buffer away w/page flipping */ #define EXT_EXTREF 255 /* has externally maintained ext_cnt ptr */ /* * Flags for external mbuf buffer types. * NB: limited to the lower 24 bits. */ #define EXT_FLAG_EMBREF 0x000001 /* embedded ext_count */ #define EXT_FLAG_EXTREF 0x000002 /* external ext_cnt, notyet */ #define EXT_FLAG_NOFREE 0x000010 /* don't free mbuf to pool, notyet */ #define EXT_FLAG_VENDOR1 0x010000 /* These flags are vendor */ #define EXT_FLAG_VENDOR2 0x020000 /* or submodule specific, */ #define EXT_FLAG_VENDOR3 0x040000 /* not used by mbuf code. */ #define EXT_FLAG_VENDOR4 0x080000 /* Set/read by submodule. */ #define EXT_FLAG_EXP1 0x100000 /* for experimental use */ #define EXT_FLAG_EXP2 0x200000 /* for experimental use */ #define EXT_FLAG_EXP3 0x400000 /* for experimental use */ #define EXT_FLAG_EXP4 0x800000 /* for experimental use */ /* * EXT flag description for use with printf(9) %b identifier. */ #define EXT_FLAG_BITS \ "\20\1EXT_FLAG_EMBREF\2EXT_FLAG_EXTREF\5EXT_FLAG_NOFREE" \ "\21EXT_FLAG_VENDOR1\22EXT_FLAG_VENDOR2\23EXT_FLAG_VENDOR3" \ "\24EXT_FLAG_VENDOR4\25EXT_FLAG_EXP1\26EXT_FLAG_EXP2\27EXT_FLAG_EXP3" \ "\30EXT_FLAG_EXP4" /* * Flags indicating checksum, segmentation and other offload work to be * done, or already done, by hardware or lower layers. It is split into * separate inbound and outbound flags. * * Outbound flags that are set by upper protocol layers requesting lower * layers, or ideally the hardware, to perform these offloading tasks. * For outbound packets this field and its flags can be directly tested * against ifnet if_hwassist. Note that the outbound and the inbound flags do * not collide right now but they could be allowed to (as long as the flags are * scrubbed appropriately when the direction of an mbuf changes). CSUM_BITS * would also have to split into CSUM_BITS_TX and CSUM_BITS_RX. * * CSUM_INNER_ is the same as CSUM_ but it applies to the inner frame. * The CSUM_ENCAP_ bits identify the outer encapsulation. */ #define CSUM_IP 0x00000001 /* IP header checksum offload */ #define CSUM_IP_UDP 0x00000002 /* UDP checksum offload */ #define CSUM_IP_TCP 0x00000004 /* TCP checksum offload */ #define CSUM_IP_SCTP 0x00000008 /* SCTP checksum offload */ #define CSUM_IP_TSO 0x00000010 /* TCP segmentation offload */ #define CSUM_IP_ISCSI 0x00000020 /* iSCSI checksum offload */ #define CSUM_INNER_IP6_UDP 0x00000040 #define CSUM_INNER_IP6_TCP 0x00000080 #define CSUM_INNER_IP6_TSO 0x00000100 #define CSUM_IP6_UDP 0x00000200 /* UDP checksum offload */ #define CSUM_IP6_TCP 0x00000400 /* TCP checksum offload */ #define CSUM_IP6_SCTP 0x00000800 /* SCTP checksum offload */ #define CSUM_IP6_TSO 0x00001000 /* TCP segmentation offload */ #define CSUM_IP6_ISCSI 0x00002000 /* iSCSI checksum offload */ #define CSUM_INNER_IP 0x00004000 #define CSUM_INNER_IP_UDP 0x00008000 #define CSUM_INNER_IP_TCP 0x00010000 #define CSUM_INNER_IP_TSO 0x00020000 #define CSUM_ENCAP_VXLAN 0x00040000 /* VXLAN outer encapsulation */ #define CSUM_ENCAP_RSVD1 0x00080000 /* Inbound checksum support where the checksum was verified by hardware. */ #define CSUM_INNER_L3_CALC 0x00100000 #define CSUM_INNER_L3_VALID 0x00200000 #define CSUM_INNER_L4_CALC 0x00400000 #define CSUM_INNER_L4_VALID 0x00800000 #define CSUM_L3_CALC 0x01000000 /* calculated layer 3 csum */ #define CSUM_L3_VALID 0x02000000 /* checksum is correct */ #define CSUM_L4_CALC 0x04000000 /* calculated layer 4 csum */ #define CSUM_L4_VALID 0x08000000 /* checksum is correct */ #define CSUM_L5_CALC 0x10000000 /* calculated layer 5 csum */ #define CSUM_L5_VALID 0x20000000 /* checksum is correct */ #define CSUM_COALESCED 0x40000000 /* contains merged segments */ #define CSUM_SND_TAG 0x80000000 /* Packet header has send tag */ #define CSUM_FLAGS_TX (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP_SCTP | \ CSUM_IP_TSO | CSUM_IP_ISCSI | CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP | \ CSUM_INNER_IP6_TSO | CSUM_IP6_UDP | CSUM_IP6_TCP | CSUM_IP6_SCTP | \ CSUM_IP6_TSO | CSUM_IP6_ISCSI | CSUM_INNER_IP | CSUM_INNER_IP_UDP | \ CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN | \ CSUM_ENCAP_RSVD1 | CSUM_SND_TAG) #define CSUM_FLAGS_RX (CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | \ CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID | CSUM_L3_CALC | CSUM_L3_VALID | \ CSUM_L4_CALC | CSUM_L4_VALID | CSUM_L5_CALC | CSUM_L5_VALID | \ CSUM_COALESCED) /* * CSUM flag description for use with printf(9) %b identifier. */ #define CSUM_BITS \ "\20\1CSUM_IP\2CSUM_IP_UDP\3CSUM_IP_TCP\4CSUM_IP_SCTP\5CSUM_IP_TSO" \ "\6CSUM_IP_ISCSI\7CSUM_INNER_IP6_UDP\10CSUM_INNER_IP6_TCP" \ "\11CSUM_INNER_IP6_TSO\12CSUM_IP6_UDP\13CSUM_IP6_TCP\14CSUM_IP6_SCTP" \ "\15CSUM_IP6_TSO\16CSUM_IP6_ISCSI\17CSUM_INNER_IP\20CSUM_INNER_IP_UDP" \ "\21CSUM_INNER_IP_TCP\22CSUM_INNER_IP_TSO\23CSUM_ENCAP_VXLAN" \ "\24CSUM_ENCAP_RSVD1\25CSUM_INNER_L3_CALC\26CSUM_INNER_L3_VALID" \ "\27CSUM_INNER_L4_CALC\30CSUM_INNER_L4_VALID\31CSUM_L3_CALC" \ "\32CSUM_L3_VALID\33CSUM_L4_CALC\34CSUM_L4_VALID\35CSUM_L5_CALC" \ "\36CSUM_L5_VALID\37CSUM_COALESCED\40CSUM_SND_TAG" /* CSUM flags compatibility mappings. */ #define CSUM_IP_CHECKED CSUM_L3_CALC #define CSUM_IP_VALID CSUM_L3_VALID #define CSUM_DATA_VALID CSUM_L4_VALID #define CSUM_PSEUDO_HDR CSUM_L4_CALC #define CSUM_SCTP_VALID CSUM_L4_VALID #define CSUM_DELAY_DATA (CSUM_TCP|CSUM_UDP) #define CSUM_DELAY_IP CSUM_IP /* Only v4, no v6 IP hdr csum */ #define CSUM_DELAY_DATA_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6) #define CSUM_DATA_VALID_IPV6 CSUM_DATA_VALID #define CSUM_TCP CSUM_IP_TCP #define CSUM_UDP CSUM_IP_UDP #define CSUM_SCTP CSUM_IP_SCTP #define CSUM_TSO (CSUM_IP_TSO|CSUM_IP6_TSO) #define CSUM_INNER_TSO (CSUM_INNER_IP_TSO|CSUM_INNER_IP6_TSO) #define CSUM_UDP_IPV6 CSUM_IP6_UDP #define CSUM_TCP_IPV6 CSUM_IP6_TCP #define CSUM_SCTP_IPV6 CSUM_IP6_SCTP /* * mbuf types describing the content of the mbuf (including external storage). */ #define MT_NOTMBUF 0 /* USED INTERNALLY ONLY! Object is not mbuf */ #define MT_DATA 1 /* dynamic (data) allocation */ #define MT_HEADER MT_DATA /* packet header, use M_PKTHDR instead */ #define MT_VENDOR1 4 /* for vendor-internal use */ #define MT_VENDOR2 5 /* for vendor-internal use */ #define MT_VENDOR3 6 /* for vendor-internal use */ #define MT_VENDOR4 7 /* for vendor-internal use */ #define MT_SONAME 8 /* socket name */ #define MT_EXP1 9 /* for experimental use */ #define MT_EXP2 10 /* for experimental use */ #define MT_EXP3 11 /* for experimental use */ #define MT_EXP4 12 /* for experimental use */ #define MT_CONTROL 14 /* extra-data protocol message */ #define MT_EXTCONTROL 15 /* control message with externalized contents */ #define MT_OOBDATA 16 /* expedited data */ #define MT_NOINIT 255 /* Not a type but a flag to allocate a non-initialized mbuf */ /* * String names of mbuf-related UMA(9) and malloc(9) types. Exposed to * !_KERNEL so that monitoring tools can look up the zones with * libmemstat(3). */ #define MBUF_MEM_NAME "mbuf" #define MBUF_CLUSTER_MEM_NAME "mbuf_cluster" #define MBUF_PACKET_MEM_NAME "mbuf_packet" #define MBUF_JUMBOP_MEM_NAME "mbuf_jumbo_page" #define MBUF_JUMBO9_MEM_NAME "mbuf_jumbo_9k" #define MBUF_JUMBO16_MEM_NAME "mbuf_jumbo_16k" #define MBUF_TAG_MEM_NAME "mbuf_tag" #define MBUF_EXTREFCNT_MEM_NAME "mbuf_ext_refcnt" #define MBUF_EXTPGS_MEM_NAME "mbuf_extpgs" #ifdef _KERNEL union if_snd_tag_alloc_params; #ifdef WITNESS #define MBUF_CHECKSLEEP(how) do { \ if (how == M_WAITOK) \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \ "Sleeping in \"%s\"", __func__); \ } while (0) #else #define MBUF_CHECKSLEEP(how) #endif /* * Network buffer allocation API * * The rest of it is defined in kern/kern_mbuf.c */ extern uma_zone_t zone_mbuf; extern uma_zone_t zone_clust; extern uma_zone_t zone_pack; extern uma_zone_t zone_jumbop; extern uma_zone_t zone_jumbo9; extern uma_zone_t zone_jumbo16; extern uma_zone_t zone_extpgs; void mb_dupcl(struct mbuf *, struct mbuf *); void mb_free_ext(struct mbuf *); void mb_free_extpg(struct mbuf *); void mb_free_mext_pgs(struct mbuf *); struct mbuf *mb_alloc_ext_pgs(int, m_ext_free_t); struct mbuf *mb_alloc_ext_plus_pages(int, int); struct mbuf *mb_mapped_to_unmapped(struct mbuf *, int, int, int, struct mbuf **); int mb_unmapped_compress(struct mbuf *m); struct mbuf *mb_unmapped_to_ext(struct mbuf *m); void mb_free_notready(struct mbuf *m, int count); void m_adj(struct mbuf *, int); void m_adj_decap(struct mbuf *, int); int m_apply(struct mbuf *, int, int, int (*)(void *, void *, u_int), void *); int m_append(struct mbuf *, int, c_caddr_t); void m_cat(struct mbuf *, struct mbuf *); void m_catpkt(struct mbuf *, struct mbuf *); int m_clget(struct mbuf *m, int how); void *m_cljget(struct mbuf *m, int how, int size); struct mbuf *m_collapse(struct mbuf *, int, int); void m_copyback(struct mbuf *, int, int, c_caddr_t); void m_copydata(const struct mbuf *, int, int, caddr_t); struct mbuf *m_copym(struct mbuf *, int, int, int); struct mbuf *m_copypacket(struct mbuf *, int); void m_copy_pkthdr(struct mbuf *, struct mbuf *); struct mbuf *m_copyup(struct mbuf *, int, int); struct mbuf *m_defrag(struct mbuf *, int); void m_demote_pkthdr(struct mbuf *); void m_demote(struct mbuf *, int, int); struct mbuf *m_devget(char *, int, int, struct ifnet *, void (*)(char *, caddr_t, u_int)); void m_dispose_extcontrolm(struct mbuf *m); struct mbuf *m_dup(const struct mbuf *, int); int m_dup_pkthdr(struct mbuf *, const struct mbuf *, int); void m_extadd(struct mbuf *, char *, u_int, m_ext_free_t, void *, void *, int, int); u_int m_fixhdr(struct mbuf *); struct mbuf *m_fragment(struct mbuf *, int, int); void m_freem(struct mbuf *); struct mbuf *m_get2(int, int, short, int); struct mbuf *m_getjcl(int, short, int, int); struct mbuf *m_getm2(struct mbuf *, int, int, short, int); struct mbuf *m_getptr(struct mbuf *, int, int *); u_int m_length(struct mbuf *, struct mbuf **); int m_mbuftouio(struct uio *, const struct mbuf *, int); int m_unmappedtouio(const struct mbuf *, int, struct uio *, int); void m_move_pkthdr(struct mbuf *, struct mbuf *); int m_pkthdr_init(struct mbuf *, int); struct mbuf *m_prepend(struct mbuf *, int, int); void m_print(const struct mbuf *, int); struct mbuf *m_pulldown(struct mbuf *, int, int, int *); struct mbuf *m_pullup(struct mbuf *, int); int m_sanity(struct mbuf *, int); struct mbuf *m_split(struct mbuf *, int, int); struct mbuf *m_uiotombuf(struct uio *, int, int, int, int); struct mbuf *m_unshare(struct mbuf *, int); int m_snd_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *, struct m_snd_tag **); void m_snd_tag_init(struct m_snd_tag *, struct ifnet *, u_int); void m_snd_tag_destroy(struct m_snd_tag *); static __inline int m_gettype(int size) { int type; switch (size) { case MSIZE: type = EXT_MBUF; break; case MCLBYTES: type = EXT_CLUSTER; break; #if MJUMPAGESIZE != MCLBYTES case MJUMPAGESIZE: type = EXT_JUMBOP; break; #endif case MJUM9BYTES: type = EXT_JUMBO9; break; case MJUM16BYTES: type = EXT_JUMBO16; break; default: panic("%s: invalid cluster size %d", __func__, size); } return (type); } /* * Associated an external reference counted buffer with an mbuf. */ static __inline void m_extaddref(struct mbuf *m, char *buf, u_int size, u_int *ref_cnt, m_ext_free_t freef, void *arg1, void *arg2) { KASSERT(ref_cnt != NULL, ("%s: ref_cnt not provided", __func__)); atomic_add_int(ref_cnt, 1); m->m_flags |= M_EXT; m->m_ext.ext_buf = buf; m->m_ext.ext_cnt = ref_cnt; m->m_data = m->m_ext.ext_buf; m->m_ext.ext_size = size; m->m_ext.ext_free = freef; m->m_ext.ext_arg1 = arg1; m->m_ext.ext_arg2 = arg2; m->m_ext.ext_type = EXT_EXTREF; m->m_ext.ext_flags = 0; } static __inline uma_zone_t m_getzone(int size) { uma_zone_t zone; switch (size) { case MCLBYTES: zone = zone_clust; break; #if MJUMPAGESIZE != MCLBYTES case MJUMPAGESIZE: zone = zone_jumbop; break; #endif case MJUM9BYTES: zone = zone_jumbo9; break; case MJUM16BYTES: zone = zone_jumbo16; break; default: panic("%s: invalid cluster size %d", __func__, size); } return (zone); } /* * Initialize an mbuf with linear storage. * * Inline because the consumer text overhead will be roughly the same to * initialize or call a function with this many parameters and M_PKTHDR * should go away with constant propagation for !MGETHDR. */ static __inline int m_init(struct mbuf *m, int how, short type, int flags) { int error; m->m_next = NULL; m->m_nextpkt = NULL; m->m_data = m->m_dat; m->m_len = 0; m->m_flags = flags; m->m_type = type; if (flags & M_PKTHDR) error = m_pkthdr_init(m, how); else error = 0; MBUF_PROBE5(m__init, m, how, type, flags, error); return (error); } static __inline struct mbuf * m_get(int how, short type) { struct mbuf *m; struct mb_args args; args.flags = 0; args.type = type; m = uma_zalloc_arg(zone_mbuf, &args, how); MBUF_PROBE3(m__get, how, type, m); return (m); } static __inline struct mbuf * m_gethdr(int how, short type) { struct mbuf *m; struct mb_args args; args.flags = M_PKTHDR; args.type = type; m = uma_zalloc_arg(zone_mbuf, &args, how); MBUF_PROBE3(m__gethdr, how, type, m); return (m); } static __inline struct mbuf * m_getcl(int how, short type, int flags) { struct mbuf *m; struct mb_args args; args.flags = flags; args.type = type; m = uma_zalloc_arg(zone_pack, &args, how); MBUF_PROBE4(m__getcl, how, type, flags, m); return (m); } /* * XXX: m_cljset() is a dangerous API. One must attach only a new, * unreferenced cluster to an mbuf(9). It is not possible to assert * that, so care can be taken only by users of the API. */ static __inline void m_cljset(struct mbuf *m, void *cl, int type) { int size; switch (type) { case EXT_CLUSTER: size = MCLBYTES; break; #if MJUMPAGESIZE != MCLBYTES case EXT_JUMBOP: size = MJUMPAGESIZE; break; #endif case EXT_JUMBO9: size = MJUM9BYTES; break; case EXT_JUMBO16: size = MJUM16BYTES; break; default: panic("%s: unknown cluster type %d", __func__, type); break; } m->m_data = m->m_ext.ext_buf = cl; m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL; m->m_ext.ext_size = size; m->m_ext.ext_type = type; m->m_ext.ext_flags = EXT_FLAG_EMBREF; m->m_ext.ext_count = 1; m->m_flags |= M_EXT; MBUF_PROBE3(m__cljset, m, cl, type); } static __inline void m_chtype(struct mbuf *m, short new_type) { m->m_type = new_type; } static __inline void m_clrprotoflags(struct mbuf *m) { while (m) { m->m_flags &= ~M_PROTOFLAGS; m = m->m_next; } } static __inline struct mbuf * m_last(struct mbuf *m) { while (m->m_next) m = m->m_next; return (m); } static inline u_int m_extrefcnt(struct mbuf *m) { KASSERT(m->m_flags & M_EXT, ("%s: M_EXT missing", __func__)); return ((m->m_ext.ext_flags & EXT_FLAG_EMBREF) ? m->m_ext.ext_count : *m->m_ext.ext_cnt); } /* * mbuf, cluster, and external object allocation macros (for compatibility * purposes). */ #define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from)) #define MGET(m, how, type) ((m) = m_get((how), (type))) #define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type))) #define MCLGET(m, how) m_clget((m), (how)) #define MEXTADD(m, buf, size, free, arg1, arg2, flags, type) \ m_extadd((m), (char *)(buf), (size), (free), (arg1), (arg2), \ (flags), (type)) #define m_getm(m, len, how, type) \ m_getm2((m), (len), (how), (type), M_PKTHDR) /* * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can * be both the local data payload, or an external buffer area, depending on * whether M_EXT is set). */ #define M_WRITABLE(m) (((m)->m_flags & (M_RDONLY | M_EXTPG)) == 0 && \ (!(((m)->m_flags & M_EXT)) || \ (m_extrefcnt(m) == 1))) /* Check if the supplied mbuf has a packet header, or else panic. */ #define M_ASSERTPKTHDR(m) \ KASSERT((m) != NULL && (m)->m_flags & M_PKTHDR, \ ("%s: no mbuf packet header!", __func__)) /* Check if mbuf is multipage. */ #define M_ASSERTEXTPG(m) \ KASSERT(((m)->m_flags & (M_EXTPG|M_PKTHDR)) == M_EXTPG, \ ("%s: m %p is not multipage!", __func__, m)) /* * Ensure that the supplied mbuf is a valid, non-free mbuf. * * XXX: Broken at the moment. Need some UMA magic to make it work again. */ #define M_ASSERTVALID(m) \ KASSERT((((struct mbuf *)m)->m_flags & 0) == 0, \ ("%s: attempted use of a free mbuf!", __func__)) +/* Check whether any mbuf in the chain is unmapped. */ +#ifdef INVARIANTS +#define M_ASSERTMAPPED(m) do { \ + for (struct mbuf *__m = (m); __m != NULL; __m = __m->m_next) \ + KASSERT((__m->m_flags & M_EXTPG) == 0, \ + ("%s: chain %p contains an unmapped mbuf", __func__, (m)));\ +} while (0) +#else +#define M_ASSERTMAPPED(m) +#endif + /* * Return the address of the start of the buffer associated with an mbuf, * handling external storage, packet-header mbufs, and regular data mbufs. */ #define M_START(m) \ (((m)->m_flags & M_EXTPG) ? NULL : \ ((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf : \ ((m)->m_flags & M_PKTHDR) ? &(m)->m_pktdat[0] : \ &(m)->m_dat[0]) /* * Return the size of the buffer associated with an mbuf, handling external * storage, packet-header mbufs, and regular data mbufs. */ #define M_SIZE(m) \ (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size : \ ((m)->m_flags & M_PKTHDR) ? MHLEN : \ MLEN) /* * Set the m_data pointer of a newly allocated mbuf to place an object of the * specified size at the end of the mbuf, longword aligned. * * NB: Historically, we had M_ALIGN(), MH_ALIGN(), and MEXT_ALIGN() as * separate macros, each asserting that it was called at the proper moment. * This required callers to themselves test the storage type and call the * right one. Rather than require callers to be aware of those layout * decisions, we centralize here. */ static __inline void m_align(struct mbuf *m, int len) { #ifdef INVARIANTS const char *msg = "%s: not a virgin mbuf"; #endif int adjust; KASSERT(m->m_data == M_START(m), (msg, __func__)); adjust = M_SIZE(m) - len; m->m_data += adjust &~ (sizeof(long)-1); } #define M_ALIGN(m, len) m_align(m, len) #define MH_ALIGN(m, len) m_align(m, len) #define MEXT_ALIGN(m, len) m_align(m, len) /* * Compute the amount of space available before the current start of data in * an mbuf. * * The M_WRITABLE() is a temporary, conservative safety measure: the burden * of checking writability of the mbuf data area rests solely with the caller. * * NB: In previous versions, M_LEADINGSPACE() would only check M_WRITABLE() * for mbufs with external storage. We now allow mbuf-embedded data to be * read-only as well. */ #define M_LEADINGSPACE(m) \ (M_WRITABLE(m) ? ((m)->m_data - M_START(m)) : 0) /* * Compute the amount of space available after the end of data in an mbuf. * * The M_WRITABLE() is a temporary, conservative safety measure: the burden * of checking writability of the mbuf data area rests solely with the caller. * * NB: In previous versions, M_TRAILINGSPACE() would only check M_WRITABLE() * for mbufs with external storage. We now allow mbuf-embedded data to be * read-only as well. */ #define M_TRAILINGSPACE(m) \ (M_WRITABLE(m) ? \ ((M_START(m) + M_SIZE(m)) - ((m)->m_data + (m)->m_len)) : 0) /* * Arrange to prepend space of size plen to mbuf m. If a new mbuf must be * allocated, how specifies whether to wait. If the allocation fails, the * original mbuf chain is freed and m is set to NULL. */ #define M_PREPEND(m, plen, how) do { \ struct mbuf **_mmp = &(m); \ struct mbuf *_mm = *_mmp; \ int _mplen = (plen); \ int __mhow = (how); \ \ MBUF_CHECKSLEEP(how); \ if (M_LEADINGSPACE(_mm) >= _mplen) { \ _mm->m_data -= _mplen; \ _mm->m_len += _mplen; \ } else \ _mm = m_prepend(_mm, _mplen, __mhow); \ if (_mm != NULL && _mm->m_flags & M_PKTHDR) \ _mm->m_pkthdr.len += _mplen; \ *_mmp = _mm; \ } while (0) /* * Change mbuf to new type. This is a relatively expensive operation and * should be avoided. */ #define MCHTYPE(m, t) m_chtype((m), (t)) /* Return the rcvif of a packet header. */ static __inline struct ifnet * m_rcvif(struct mbuf *m) { M_ASSERTPKTHDR(m); if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) return (NULL); return (m->m_pkthdr.rcvif); } /* Length to m_copy to copy all. */ #define M_COPYALL 1000000000 extern int max_datalen; /* MHLEN - max_hdr */ extern int max_hdr; /* Largest link + protocol header */ extern int max_linkhdr; /* Largest link-level header */ extern int max_protohdr; /* Largest protocol header */ extern int nmbclusters; /* Maximum number of clusters */ extern bool mb_use_ext_pgs; /* Use ext_pgs for sendfile */ /*- * Network packets may have annotations attached by affixing a list of * "packet tags" to the pkthdr structure. Packet tags are dynamically * allocated semi-opaque data structures that have a fixed header * (struct m_tag) that specifies the size of the memory block and a * pair that identifies it. The cookie is a 32-bit unique * unsigned value used to identify a module or ABI. By convention this value * is chosen as the date+time that the module is created, expressed as the * number of seconds since the epoch (e.g., using date -u +'%s'). The type * value is an ABI/module-specific value that identifies a particular * annotation and is private to the module. For compatibility with systems * like OpenBSD that define packet tags w/o an ABI/module cookie, the value * PACKET_ABI_COMPAT is used to implement m_tag_get and m_tag_find * compatibility shim functions and several tag types are defined below. * Users that do not require compatibility should use a private cookie value * so that packet tag-related definitions can be maintained privately. * * Note that the packet tag returned by m_tag_alloc has the default memory * alignment implemented by malloc. To reference private data one can use a * construct like: * * struct m_tag *mtag = m_tag_alloc(...); * struct foo *p = (struct foo *)(mtag+1); * * if the alignment of struct m_tag is sufficient for referencing members of * struct foo. Otherwise it is necessary to embed struct m_tag within the * private data structure to insure proper alignment; e.g., * * struct foo { * struct m_tag tag; * ... * }; * struct foo *p = (struct foo *) m_tag_alloc(...); * struct m_tag *mtag = &p->tag; */ /* * Persistent tags stay with an mbuf until the mbuf is reclaimed. Otherwise * tags are expected to ``vanish'' when they pass through a network * interface. For most interfaces this happens normally as the tags are * reclaimed when the mbuf is free'd. However in some special cases * reclaiming must be done manually. An example is packets that pass through * the loopback interface. Also, one must be careful to do this when * ``turning around'' packets (e.g., icmp_reflect). * * To mark a tag persistent bit-or this flag in when defining the tag id. * The tag will then be treated as described above. */ #define MTAG_PERSISTENT 0x800 #define PACKET_TAG_NONE 0 /* Nadda */ /* Packet tags for use with PACKET_ABI_COMPAT. */ #define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */ #define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */ #define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */ #define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */ #define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 5 /* NIC notifies IPsec */ #define PACKET_TAG_IPSEC_PENDING_TDB 6 /* Reminder to do IPsec */ #define PACKET_TAG_BRIDGE 7 /* Bridge processing done */ #define PACKET_TAG_GIF 8 /* GIF processing done */ #define PACKET_TAG_GRE 9 /* GRE processing done */ #define PACKET_TAG_IN_PACKET_CHECKSUM 10 /* NIC checksumming done */ #define PACKET_TAG_ENCAP 11 /* Encap. processing */ #define PACKET_TAG_IPSEC_SOCKET 12 /* IPSEC socket ref */ #define PACKET_TAG_IPSEC_HISTORY 13 /* IPSEC history */ #define PACKET_TAG_IPV6_INPUT 14 /* IPV6 input processing */ #define PACKET_TAG_DUMMYNET 15 /* dummynet info */ #define PACKET_TAG_DIVERT 17 /* divert info */ #define PACKET_TAG_IPFORWARD 18 /* ipforward info */ #define PACKET_TAG_MACLABEL (19 | MTAG_PERSISTENT) /* MAC label */ #define PACKET_TAG_PF (21 | MTAG_PERSISTENT) /* PF/ALTQ information */ #define PACKET_TAG_RTSOCKFAM 25 /* rtsock sa family */ #define PACKET_TAG_IPOPTIONS 27 /* Saved IP options */ #define PACKET_TAG_CARP 28 /* CARP info */ #define PACKET_TAG_IPSEC_NAT_T_PORTS 29 /* two uint16_t */ #define PACKET_TAG_ND_OUTGOING 30 /* ND outgoing */ /* Specific cookies and tags. */ /* Packet tag routines. */ struct m_tag *m_tag_alloc(u_int32_t, int, int, int); void m_tag_delete(struct mbuf *, struct m_tag *); void m_tag_delete_chain(struct mbuf *, struct m_tag *); void m_tag_free_default(struct m_tag *); struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *); struct m_tag *m_tag_copy(struct m_tag *, int); int m_tag_copy_chain(struct mbuf *, const struct mbuf *, int); void m_tag_delete_nonpersistent(struct mbuf *); /* * Initialize the list of tags associated with an mbuf. */ static __inline void m_tag_init(struct mbuf *m) { SLIST_INIT(&m->m_pkthdr.tags); } /* * Set up the contents of a tag. Note that this does not fill in the free * method; the caller is expected to do that. * * XXX probably should be called m_tag_init, but that was already taken. */ static __inline void m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len) { t->m_tag_id = type; t->m_tag_len = len; t->m_tag_cookie = cookie; } /* * Reclaim resources associated with a tag. */ static __inline void m_tag_free(struct m_tag *t) { (*t->m_tag_free)(t); } /* * Return the first tag associated with an mbuf. */ static __inline struct m_tag * m_tag_first(struct mbuf *m) { return (SLIST_FIRST(&m->m_pkthdr.tags)); } /* * Return the next tag in the list of tags associated with an mbuf. */ static __inline struct m_tag * m_tag_next(struct mbuf *m __unused, struct m_tag *t) { return (SLIST_NEXT(t, m_tag_link)); } /* * Prepend a tag to the list of tags associated with an mbuf. */ static __inline void m_tag_prepend(struct mbuf *m, struct m_tag *t) { SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link); } /* * Unlink a tag from the list of tags associated with an mbuf. */ static __inline void m_tag_unlink(struct mbuf *m, struct m_tag *t) { SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link); } /* These are for OpenBSD compatibility. */ #define MTAG_ABI_COMPAT 0 /* compatibility ABI */ static __inline struct m_tag * m_tag_get(int type, int length, int wait) { return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait)); } static __inline struct m_tag * m_tag_find(struct mbuf *m, int type, struct m_tag *start) { return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL : m_tag_locate(m, MTAG_ABI_COMPAT, type, start)); } static inline struct m_snd_tag * m_snd_tag_ref(struct m_snd_tag *mst) { refcount_acquire(&mst->refcount); return (mst); } static inline void m_snd_tag_rele(struct m_snd_tag *mst) { if (refcount_release(&mst->refcount)) m_snd_tag_destroy(mst); } static __inline struct mbuf * m_free(struct mbuf *m) { struct mbuf *n = m->m_next; MBUF_PROBE1(m__free, m); if ((m->m_flags & (M_PKTHDR|M_NOFREE)) == (M_PKTHDR|M_NOFREE)) m_tag_delete_chain(m, NULL); if (m->m_flags & M_PKTHDR && m->m_pkthdr.csum_flags & CSUM_SND_TAG) m_snd_tag_rele(m->m_pkthdr.snd_tag); if (m->m_flags & M_EXTPG) mb_free_extpg(m); else if (m->m_flags & M_EXT) mb_free_ext(m); else if ((m->m_flags & M_NOFREE) == 0) uma_zfree(zone_mbuf, m); return (n); } static __inline int rt_m_getfib(struct mbuf *m) { KASSERT(m->m_flags & M_PKTHDR , ("Attempt to get FIB from non header mbuf.")); return (m->m_pkthdr.fibnum); } #define M_GETFIB(_m) rt_m_getfib(_m) #define M_SETFIB(_m, _fib) do { \ KASSERT((_m)->m_flags & M_PKTHDR, ("Attempt to set FIB on non header mbuf.")); \ ((_m)->m_pkthdr.fibnum) = (_fib); \ } while (0) /* flags passed as first argument for "m_xxx_tcpip_hash()" */ #define MBUF_HASHFLAG_L2 (1 << 2) #define MBUF_HASHFLAG_L3 (1 << 3) #define MBUF_HASHFLAG_L4 (1 << 4) /* mbuf hashing helper routines */ uint32_t m_ether_tcpip_hash_init(void); uint32_t m_ether_tcpip_hash(const uint32_t, const struct mbuf *, uint32_t); uint32_t m_infiniband_tcpip_hash_init(void); uint32_t m_infiniband_tcpip_hash(const uint32_t, const struct mbuf *, uint32_t); #ifdef MBUF_PROFILING void m_profile(struct mbuf *m); #define M_PROFILE(m) m_profile(m) #else #define M_PROFILE(m) #endif struct mbufq { STAILQ_HEAD(, mbuf) mq_head; int mq_len; int mq_maxlen; }; static inline void mbufq_init(struct mbufq *mq, int maxlen) { STAILQ_INIT(&mq->mq_head); mq->mq_maxlen = maxlen; mq->mq_len = 0; } static inline struct mbuf * mbufq_flush(struct mbufq *mq) { struct mbuf *m; m = STAILQ_FIRST(&mq->mq_head); STAILQ_INIT(&mq->mq_head); mq->mq_len = 0; return (m); } static inline void mbufq_drain(struct mbufq *mq) { struct mbuf *m, *n; n = mbufq_flush(mq); while ((m = n) != NULL) { n = STAILQ_NEXT(m, m_stailqpkt); m_freem(m); } } static inline struct mbuf * mbufq_first(const struct mbufq *mq) { return (STAILQ_FIRST(&mq->mq_head)); } static inline struct mbuf * mbufq_last(const struct mbufq *mq) { return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt)); } static inline int mbufq_full(const struct mbufq *mq) { return (mq->mq_maxlen > 0 && mq->mq_len >= mq->mq_maxlen); } static inline int mbufq_len(const struct mbufq *mq) { return (mq->mq_len); } static inline int mbufq_enqueue(struct mbufq *mq, struct mbuf *m) { if (mbufq_full(mq)) return (ENOBUFS); STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt); mq->mq_len++; return (0); } static inline struct mbuf * mbufq_dequeue(struct mbufq *mq) { struct mbuf *m; m = STAILQ_FIRST(&mq->mq_head); if (m) { STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt); m->m_nextpkt = NULL; mq->mq_len--; } return (m); } static inline void mbufq_prepend(struct mbufq *mq, struct mbuf *m) { STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt); mq->mq_len++; } /* * Note: this doesn't enforce the maximum list size for dst. */ static inline void mbufq_concat(struct mbufq *mq_dst, struct mbufq *mq_src) { mq_dst->mq_len += mq_src->mq_len; STAILQ_CONCAT(&mq_dst->mq_head, &mq_src->mq_head); mq_src->mq_len = 0; } #ifdef _SYS_TIMESPEC_H_ static inline void mbuf_tstmp2timespec(struct mbuf *m, struct timespec *ts) { KASSERT((m->m_flags & M_PKTHDR) != 0, ("mbuf %p no M_PKTHDR", m)); KASSERT((m->m_flags & (M_TSTMP|M_TSTMP_LRO)) != 0, ("mbuf %p no M_TSTMP or M_TSTMP_LRO", m)); ts->tv_sec = m->m_pkthdr.rcv_tstmp / 1000000000; ts->tv_nsec = m->m_pkthdr.rcv_tstmp % 1000000000; } #endif #ifdef DEBUGNET /* Invoked from the debugnet client code. */ void debugnet_mbuf_drain(void); void debugnet_mbuf_start(void); void debugnet_mbuf_finish(void); void debugnet_mbuf_reinit(int nmbuf, int nclust, int clsize); #endif static inline bool mbuf_has_tls_session(struct mbuf *m) { if (m->m_flags & M_EXTPG) { if (m->m_epg_tls != NULL) { return (true); } } return (false); } #endif /* _KERNEL */ #endif /* !_SYS_MBUF_H_ */