diff --git a/sys/netinet/ip_icmp.c b/sys/netinet/ip_icmp.c index 125cdd39579d..e9221112b9e5 100644 --- a/sys/netinet/ip_icmp.c +++ b/sys/netinet/ip_icmp.c @@ -1,923 +1,923 @@ /* * Copyright (c) 1982, 1986, 1988, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ip_icmp.c 8.2 (Berkeley) 1/4/94 * $FreeBSD$ */ #include "opt_ipsec.h" #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IPSEC #include #include #endif #ifdef FAST_IPSEC #include #include #define IPSEC #endif #include /* * ICMP routines: error generation, receive packet processing, and * routines to turnaround packets back to the originator, and * host table maintenance routines. */ struct icmpstat icmpstat; SYSCTL_STRUCT(_net_inet_icmp, ICMPCTL_STATS, stats, CTLFLAG_RW, &icmpstat, icmpstat, ""); static int icmpmaskrepl = 0; SYSCTL_INT(_net_inet_icmp, ICMPCTL_MASKREPL, maskrepl, CTLFLAG_RW, &icmpmaskrepl, 0, "Reply to ICMP Address Mask Request packets."); static u_int icmpmaskfake = 0; SYSCTL_UINT(_net_inet_icmp, OID_AUTO, maskfake, CTLFLAG_RW, &icmpmaskfake, 0, "Fake reply to ICMP Address Mask Request packets."); static int drop_redirect = 0; SYSCTL_INT(_net_inet_icmp, OID_AUTO, drop_redirect, CTLFLAG_RW, &drop_redirect, 0, ""); static int log_redirect = 0; SYSCTL_INT(_net_inet_icmp, OID_AUTO, log_redirect, CTLFLAG_RW, &log_redirect, 0, ""); static int icmplim = 200; SYSCTL_INT(_net_inet_icmp, ICMPCTL_ICMPLIM, icmplim, CTLFLAG_RW, &icmplim, 0, ""); static int icmplim_output = 1; SYSCTL_INT(_net_inet_icmp, OID_AUTO, icmplim_output, CTLFLAG_RW, &icmplim_output, 0, ""); static char reply_src[IFNAMSIZ]; SYSCTL_STRING(_net_inet_icmp, OID_AUTO, reply_src, CTLFLAG_RW, &reply_src, IFNAMSIZ, "icmp reply source for non-local packets."); /* * ICMP broadcast echo sysctl */ static int icmpbmcastecho = 0; SYSCTL_INT(_net_inet_icmp, OID_AUTO, bmcastecho, CTLFLAG_RW, &icmpbmcastecho, 0, ""); #ifdef ICMPPRINTFS int icmpprintfs = 0; #endif static void icmp_reflect(struct mbuf *); static void icmp_send(struct mbuf *, struct mbuf *); static int ip_next_mtu(int, int); extern struct protosw inetsw[]; /* * Generate an error packet of type error * in response to bad packet ip. */ void icmp_error(n, type, code, dest, destifp) struct mbuf *n; int type, code; n_long dest; struct ifnet *destifp; { register struct ip *oip = mtod(n, struct ip *), *nip; register unsigned oiplen = oip->ip_hl << 2; register struct icmp *icp; register struct mbuf *m; unsigned icmplen; #ifdef ICMPPRINTFS if (icmpprintfs) printf("icmp_error(%p, %x, %d)\n", oip, type, code); #endif if (type != ICMP_REDIRECT) icmpstat.icps_error++; /* * Don't send error if the original packet was encrypted. * Don't send error if not the first fragment of message. * Don't error if the old packet protocol was ICMP * error message, only known informational types. */ if (n->m_flags & M_DECRYPTED) goto freeit; if (oip->ip_off &~ (IP_MF|IP_DF)) goto freeit; if (oip->ip_p == IPPROTO_ICMP && type != ICMP_REDIRECT && n->m_len >= oiplen + ICMP_MINLEN && !ICMP_INFOTYPE(((struct icmp *)((caddr_t)oip + oiplen))->icmp_type)) { icmpstat.icps_oldicmp++; goto freeit; } /* Don't send error in response to a multicast or broadcast packet */ if (n->m_flags & (M_BCAST|M_MCAST)) goto freeit; /* * First, formulate icmp message */ m = m_gethdr(M_DONTWAIT, MT_HEADER); if (m == NULL) goto freeit; #ifdef MAC mac_create_mbuf_netlayer(n, m); #endif icmplen = min(oiplen + 8, oip->ip_len); if (icmplen < sizeof(struct ip)) panic("icmp_error: bad length"); m->m_len = icmplen + ICMP_MINLEN; MH_ALIGN(m, m->m_len); icp = mtod(m, struct icmp *); if ((u_int)type > ICMP_MAXTYPE) panic("icmp_error"); icmpstat.icps_outhist[type]++; icp->icmp_type = type; if (type == ICMP_REDIRECT) icp->icmp_gwaddr.s_addr = dest; else { icp->icmp_void = 0; /* * The following assignments assume an overlay with the * zeroed icmp_void field. */ if (type == ICMP_PARAMPROB) { icp->icmp_pptr = code; code = 0; } else if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG && destifp) { icp->icmp_nextmtu = htons(destifp->if_mtu); } } icp->icmp_code = code; m_copydata(n, 0, icmplen, (caddr_t)&icp->icmp_ip); nip = &icp->icmp_ip; /* * Convert fields to network representation. */ nip->ip_len = htons(nip->ip_len); nip->ip_off = htons(nip->ip_off); /* * Now, copy old ip header (without options) * in front of icmp message. */ if (m->m_data - sizeof(struct ip) < m->m_pktdat) panic("icmp len"); /* * If the original mbuf was meant to bypass the firewall, the error * reply should bypass as well. */ m->m_flags |= n->m_flags & M_SKIP_FIREWALL; m->m_data -= sizeof(struct ip); m->m_len += sizeof(struct ip); m->m_pkthdr.len = m->m_len; m->m_pkthdr.rcvif = n->m_pkthdr.rcvif; nip = mtod(m, struct ip *); bcopy((caddr_t)oip, (caddr_t)nip, sizeof(struct ip)); nip->ip_len = m->m_len; nip->ip_v = IPVERSION; nip->ip_hl = 5; nip->ip_p = IPPROTO_ICMP; nip->ip_tos = 0; icmp_reflect(m); freeit: m_freem(n); } /* * Process a received ICMP message. */ void icmp_input(m, off) struct mbuf *m; int off; { struct icmp *icp; struct in_ifaddr *ia; struct ip *ip = mtod(m, struct ip *); struct sockaddr_in icmpsrc, icmpdst, icmpgw; int hlen = off; int icmplen = ip->ip_len; int i, code; void (*ctlfunc)(int, struct sockaddr *, void *); /* * Locate icmp structure in mbuf, and check * that not corrupted and of at least minimum length. */ #ifdef ICMPPRINTFS if (icmpprintfs) { char buf[4 * sizeof "123"]; strcpy(buf, inet_ntoa(ip->ip_src)); printf("icmp_input from %s to %s, len %d\n", buf, inet_ntoa(ip->ip_dst), icmplen); } #endif if (icmplen < ICMP_MINLEN) { icmpstat.icps_tooshort++; goto freeit; } i = hlen + min(icmplen, ICMP_ADVLENMIN); if (m->m_len < i && (m = m_pullup(m, i)) == 0) { icmpstat.icps_tooshort++; return; } ip = mtod(m, struct ip *); m->m_len -= hlen; m->m_data += hlen; icp = mtod(m, struct icmp *); if (in_cksum(m, icmplen)) { icmpstat.icps_checksum++; goto freeit; } m->m_len += hlen; m->m_data -= hlen; if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) { /* * Deliver very specific ICMP type only. */ switch (icp->icmp_type) { case ICMP_UNREACH: case ICMP_TIMXCEED: break; default: goto freeit; } } #ifdef ICMPPRINTFS if (icmpprintfs) printf("icmp_input, type %d code %d\n", icp->icmp_type, icp->icmp_code); #endif /* * Message type specific processing. */ if (icp->icmp_type > ICMP_MAXTYPE) goto raw; /* Initialize */ bzero(&icmpsrc, sizeof(icmpsrc)); icmpsrc.sin_len = sizeof(struct sockaddr_in); icmpsrc.sin_family = AF_INET; bzero(&icmpdst, sizeof(icmpdst)); icmpdst.sin_len = sizeof(struct sockaddr_in); icmpdst.sin_family = AF_INET; bzero(&icmpgw, sizeof(icmpgw)); icmpgw.sin_len = sizeof(struct sockaddr_in); icmpgw.sin_family = AF_INET; icmpstat.icps_inhist[icp->icmp_type]++; code = icp->icmp_code; switch (icp->icmp_type) { case ICMP_UNREACH: switch (code) { case ICMP_UNREACH_NET: case ICMP_UNREACH_HOST: case ICMP_UNREACH_SRCFAIL: case ICMP_UNREACH_NET_UNKNOWN: case ICMP_UNREACH_HOST_UNKNOWN: case ICMP_UNREACH_ISOLATED: case ICMP_UNREACH_TOSNET: case ICMP_UNREACH_TOSHOST: case ICMP_UNREACH_HOST_PRECEDENCE: case ICMP_UNREACH_PRECEDENCE_CUTOFF: code = PRC_UNREACH_NET; break; case ICMP_UNREACH_NEEDFRAG: code = PRC_MSGSIZE; break; /* * RFC 1122, Sections 3.2.2.1 and 4.2.3.9. * Treat subcodes 2,3 as immediate RST */ case ICMP_UNREACH_PROTOCOL: case ICMP_UNREACH_PORT: code = PRC_UNREACH_PORT; break; case ICMP_UNREACH_NET_PROHIB: case ICMP_UNREACH_HOST_PROHIB: case ICMP_UNREACH_FILTER_PROHIB: code = PRC_UNREACH_ADMIN_PROHIB; break; default: goto badcode; } goto deliver; case ICMP_TIMXCEED: if (code > 1) goto badcode; code += PRC_TIMXCEED_INTRANS; goto deliver; case ICMP_PARAMPROB: if (code > 1) goto badcode; code = PRC_PARAMPROB; goto deliver; case ICMP_SOURCEQUENCH: if (code) goto badcode; code = PRC_QUENCH; deliver: /* * Problem with datagram; advise higher level routines. */ if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) || icp->icmp_ip.ip_hl < (sizeof(struct ip) >> 2)) { icmpstat.icps_badlen++; goto freeit; } icp->icmp_ip.ip_len = ntohs(icp->icmp_ip.ip_len); /* Discard ICMP's in response to multicast packets */ if (IN_MULTICAST(ntohl(icp->icmp_ip.ip_dst.s_addr))) goto badcode; #ifdef ICMPPRINTFS if (icmpprintfs) printf("deliver to protocol %d\n", icp->icmp_ip.ip_p); #endif icmpsrc.sin_addr = icp->icmp_ip.ip_dst; /* * MTU discovery: * If we got a needfrag and there is a host route to the * original destination, and the MTU is not locked, then * set the MTU in the route to the suggested new value * (if given) and then notify as usual. The ULPs will * notice that the MTU has changed and adapt accordingly. * If no new MTU was suggested, then we guess a new one * less than the current value. If the new MTU is * unreasonably small (defined by sysctl tcp_minmss), then * we don't update the MTU value. * * XXX: All this should be done in tcp_mtudisc() because * the way we do it now, everyone can send us bogus ICMP * MSGSIZE packets for any destination. By doing this far * higher in the chain we have a matching tcp connection. * Thus spoofing is much harder. However there is no easy * non-hackish way to pass the new MTU up to tcp_mtudisc(). * Also see next XXX regarding IPv4 AH TCP. */ if (code == PRC_MSGSIZE) { int mtu; struct in_conninfo inc; bzero(&inc, sizeof(inc)); inc.inc_flags = 0; /* IPv4 */ inc.inc_faddr = icmpsrc.sin_addr; mtu = ntohs(icp->icmp_nextmtu); if (!mtu) mtu = ip_next_mtu(mtu, 1); if (mtu >= max(296, (tcp_minmss + sizeof(struct tcpiphdr)))) tcp_hc_updatemtu(&inc, mtu); #ifdef DEBUG_MTUDISC printf("MTU for %s reduced to %d\n", inet_ntoa(icmpsrc.sin_addr), mtu); #endif } /* * XXX if the packet contains [IPv4 AH TCP], we can't make a * notification to TCP layer. */ ctlfunc = inetsw[ip_protox[icp->icmp_ip.ip_p]].pr_ctlinput; if (ctlfunc) (*ctlfunc)(code, (struct sockaddr *)&icmpsrc, (void *)&icp->icmp_ip); break; badcode: icmpstat.icps_badcode++; break; case ICMP_ECHO: if (!icmpbmcastecho && (m->m_flags & (M_MCAST | M_BCAST)) != 0) { icmpstat.icps_bmcastecho++; break; } icp->icmp_type = ICMP_ECHOREPLY; if (badport_bandlim(BANDLIM_ICMP_ECHO) < 0) goto freeit; else goto reflect; case ICMP_TSTAMP: if (!icmpbmcastecho && (m->m_flags & (M_MCAST | M_BCAST)) != 0) { icmpstat.icps_bmcasttstamp++; break; } if (icmplen < ICMP_TSLEN) { icmpstat.icps_badlen++; break; } icp->icmp_type = ICMP_TSTAMPREPLY; icp->icmp_rtime = iptime(); icp->icmp_ttime = icp->icmp_rtime; /* bogus, do later! */ if (badport_bandlim(BANDLIM_ICMP_TSTAMP) < 0) goto freeit; else goto reflect; case ICMP_MASKREQ: if (icmpmaskrepl == 0) break; /* * We are not able to respond with all ones broadcast * unless we receive it over a point-to-point interface. */ if (icmplen < ICMP_MASKLEN) break; switch (ip->ip_dst.s_addr) { case INADDR_BROADCAST: case INADDR_ANY: icmpdst.sin_addr = ip->ip_src; break; default: icmpdst.sin_addr = ip->ip_dst; } ia = (struct in_ifaddr *)ifaof_ifpforaddr( (struct sockaddr *)&icmpdst, m->m_pkthdr.rcvif); if (ia == 0) break; if (ia->ia_ifp == 0) break; icp->icmp_type = ICMP_MASKREPLY; if (icmpmaskfake == 0) icp->icmp_mask = ia->ia_sockmask.sin_addr.s_addr; else icp->icmp_mask = icmpmaskfake; if (ip->ip_src.s_addr == 0) { if (ia->ia_ifp->if_flags & IFF_BROADCAST) ip->ip_src = satosin(&ia->ia_broadaddr)->sin_addr; else if (ia->ia_ifp->if_flags & IFF_POINTOPOINT) ip->ip_src = satosin(&ia->ia_dstaddr)->sin_addr; } reflect: ip->ip_len += hlen; /* since ip_input deducts this */ icmpstat.icps_reflect++; icmpstat.icps_outhist[icp->icmp_type]++; icmp_reflect(m); return; case ICMP_REDIRECT: if (log_redirect) { u_long src, dst, gw; src = ntohl(ip->ip_src.s_addr); dst = ntohl(icp->icmp_ip.ip_dst.s_addr); gw = ntohl(icp->icmp_gwaddr.s_addr); printf("icmp redirect from %d.%d.%d.%d: " "%d.%d.%d.%d => %d.%d.%d.%d\n", (int)(src >> 24), (int)((src >> 16) & 0xff), (int)((src >> 8) & 0xff), (int)(src & 0xff), (int)(dst >> 24), (int)((dst >> 16) & 0xff), (int)((dst >> 8) & 0xff), (int)(dst & 0xff), (int)(gw >> 24), (int)((gw >> 16) & 0xff), (int)((gw >> 8) & 0xff), (int)(gw & 0xff)); } /* * RFC1812 says we must ignore ICMP redirects if we * are acting as router. */ if (drop_redirect || ipforwarding) break; if (code > 3) goto badcode; if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) || icp->icmp_ip.ip_hl < (sizeof(struct ip) >> 2)) { icmpstat.icps_badlen++; break; } /* * Short circuit routing redirects to force * immediate change in the kernel's routing * tables. The message is also handed to anyone * listening on a raw socket (e.g. the routing * daemon for use in updating its tables). */ icmpgw.sin_addr = ip->ip_src; icmpdst.sin_addr = icp->icmp_gwaddr; #ifdef ICMPPRINTFS if (icmpprintfs) { char buf[4 * sizeof "123"]; strcpy(buf, inet_ntoa(icp->icmp_ip.ip_dst)); printf("redirect dst %s to %s\n", buf, inet_ntoa(icp->icmp_gwaddr)); } #endif icmpsrc.sin_addr = icp->icmp_ip.ip_dst; rtredirect((struct sockaddr *)&icmpsrc, (struct sockaddr *)&icmpdst, (struct sockaddr *)0, RTF_GATEWAY | RTF_HOST, (struct sockaddr *)&icmpgw); pfctlinput(PRC_REDIRECT_HOST, (struct sockaddr *)&icmpsrc); #ifdef IPSEC key_sa_routechange((struct sockaddr *)&icmpsrc); #endif break; /* * No kernel processing for the following; * just fall through to send to raw listener. */ case ICMP_ECHOREPLY: case ICMP_ROUTERADVERT: case ICMP_ROUTERSOLICIT: case ICMP_TSTAMPREPLY: case ICMP_IREQREPLY: case ICMP_MASKREPLY: default: break; } raw: rip_input(m, off); return; freeit: m_freem(m); } /* * Reflect the ip packet back to the source */ static void icmp_reflect(m) struct mbuf *m; { struct ip *ip = mtod(m, struct ip *); struct ifaddr *ifa; struct ifnet *ifn; struct in_ifaddr *ia; struct in_addr t; struct mbuf *opts = 0; int optlen = (ip->ip_hl << 2) - sizeof(struct ip); if (!in_canforward(ip->ip_src) && ((ntohl(ip->ip_src.s_addr) & IN_CLASSA_NET) != (IN_LOOPBACKNET << IN_CLASSA_NSHIFT))) { m_freem(m); /* Bad return address */ icmpstat.icps_badaddr++; goto done; /* Ip_output() will check for broadcast */ } t = ip->ip_dst; ip->ip_dst = ip->ip_src; /* * Source selection for ICMP replies: * * If the incoming packet was addressed directly to one of our * own addresses, use dst as the src for the reply. */ LIST_FOREACH(ia, INADDR_HASH(t.s_addr), ia_hash) if (t.s_addr == IA_SIN(ia)->sin_addr.s_addr) goto match; /* * If the incoming packet was addressed to one of our broadcast * addresses, use the first non-broadcast address which corresponds * to the incoming interface. */ if (m->m_pkthdr.rcvif != NULL && m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) { TAILQ_FOREACH(ifa, &m->m_pkthdr.rcvif->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET) continue; ia = ifatoia(ifa); if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == t.s_addr) goto match; } } /* * If the incoming packet was not addressed directly to us, use * designated interface for icmp replies specified by sysctl * net.inet.icmp.reply_src (default not set). Otherwise continue * with normal source selection. */ if (reply_src[0] != '\0' && (ifn = ifunit(reply_src))) { TAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET) continue; ia = ifatoia(ifa); goto match; } } /* * If the packet was transiting through us, use the address of * the interface that is the closest to the packet source. * When we don't have a route back to the packet source, stop here * and drop the packet. */ ia = ip_rtaddr(ip->ip_dst); if (ia == NULL) { m_freem(m); icmpstat.icps_noroute++; goto done; } match: #ifdef MAC mac_reflect_mbuf_icmp(m); #endif t = IA_SIN(ia)->sin_addr; ip->ip_src = t; ip->ip_ttl = ip_defttl; if (optlen > 0) { register u_char *cp; int opt, cnt; u_int len; /* * Retrieve any source routing from the incoming packet; * add on any record-route or timestamp options. */ cp = (u_char *) (ip + 1); - if ((opts = ip_srcroute()) == 0 && + if ((opts = ip_srcroute(m)) == 0 && (opts = m_gethdr(M_DONTWAIT, MT_HEADER))) { opts->m_len = sizeof(struct in_addr); mtod(opts, struct in_addr *)->s_addr = 0; } if (opts) { #ifdef ICMPPRINTFS if (icmpprintfs) printf("icmp_reflect optlen %d rt %d => ", optlen, opts->m_len); #endif for (cnt = optlen; cnt > 0; cnt -= len, cp += len) { opt = cp[IPOPT_OPTVAL]; if (opt == IPOPT_EOL) break; if (opt == IPOPT_NOP) len = 1; else { if (cnt < IPOPT_OLEN + sizeof(*cp)) break; len = cp[IPOPT_OLEN]; if (len < IPOPT_OLEN + sizeof(*cp) || len > cnt) break; } /* * Should check for overflow, but it "can't happen" */ if (opt == IPOPT_RR || opt == IPOPT_TS || opt == IPOPT_SECURITY) { bcopy((caddr_t)cp, mtod(opts, caddr_t) + opts->m_len, len); opts->m_len += len; } } /* Terminate & pad, if necessary */ cnt = opts->m_len % 4; if (cnt) { for (; cnt < 4; cnt++) { *(mtod(opts, caddr_t) + opts->m_len) = IPOPT_EOL; opts->m_len++; } } #ifdef ICMPPRINTFS if (icmpprintfs) printf("%d\n", opts->m_len); #endif } /* * Now strip out original options by copying rest of first * mbuf's data back, and adjust the IP length. */ ip->ip_len -= optlen; ip->ip_v = IPVERSION; ip->ip_hl = 5; m->m_len -= optlen; if (m->m_flags & M_PKTHDR) m->m_pkthdr.len -= optlen; optlen += sizeof(struct ip); bcopy((caddr_t)ip + optlen, (caddr_t)(ip + 1), (unsigned)(m->m_len - sizeof(struct ip))); } m_tag_delete_nonpersistent(m); m->m_flags &= ~(M_BCAST|M_MCAST); icmp_send(m, opts); done: if (opts) (void)m_free(opts); } /* * Send an icmp packet back to the ip level, * after supplying a checksum. */ static void icmp_send(m, opts) register struct mbuf *m; struct mbuf *opts; { register struct ip *ip = mtod(m, struct ip *); register int hlen; register struct icmp *icp; hlen = ip->ip_hl << 2; m->m_data += hlen; m->m_len -= hlen; icp = mtod(m, struct icmp *); icp->icmp_cksum = 0; icp->icmp_cksum = in_cksum(m, ip->ip_len - hlen); m->m_data -= hlen; m->m_len += hlen; m->m_pkthdr.rcvif = (struct ifnet *)0; #ifdef ICMPPRINTFS if (icmpprintfs) { char buf[4 * sizeof "123"]; strcpy(buf, inet_ntoa(ip->ip_dst)); printf("icmp_send dst %s src %s\n", buf, inet_ntoa(ip->ip_src)); } #endif (void) ip_output(m, opts, NULL, 0, NULL, NULL); } n_time iptime() { struct timeval atv; u_long t; getmicrotime(&atv); t = (atv.tv_sec % (24*60*60)) * 1000 + atv.tv_usec / 1000; return (htonl(t)); } /* * Return the next larger or smaller MTU plateau (table from RFC 1191) * given current value MTU. If DIR is less than zero, a larger plateau * is returned; otherwise, a smaller value is returned. */ static int ip_next_mtu(mtu, dir) int mtu; int dir; { static int mtutab[] = { 65535, 32000, 17914, 8166, 4352, 2002, 1492, 1006, 508, 296, 68, 0 }; int i; for (i = 0; i < (sizeof mtutab) / (sizeof mtutab[0]); i++) { if (mtu >= mtutab[i]) break; } if (dir < 0) { if (i == 0) { return 0; } else { return mtutab[i - 1]; } } else { if (mtutab[i] == 0) { return 0; } else if(mtu > mtutab[i]) { return mtutab[i]; } else { return mtutab[i + 1]; } } } /* * badport_bandlim() - check for ICMP bandwidth limit * * Return 0 if it is ok to send an ICMP error response, -1 if we have * hit our bandwidth limit and it is not ok. * * If icmplim is <= 0, the feature is disabled and 0 is returned. * * For now we separate the TCP and UDP subsystems w/ different 'which' * values. We may eventually remove this separation (and simplify the * code further). * * Note that the printing of the error message is delayed so we can * properly print the icmp error rate that the system was trying to do * (i.e. 22000/100 pps, etc...). This can cause long delays in printing * the 'final' error, but it doesn't make sense to solve the printing * delay with more complex code. */ int badport_bandlim(int which) { #define N(a) (sizeof (a) / sizeof (a[0])) static struct rate { const char *type; struct timeval lasttime; int curpps; } rates[BANDLIM_MAX+1] = { { "icmp unreach response" }, { "icmp ping response" }, { "icmp tstamp response" }, { "closed port RST response" }, { "open port RST response" } }; /* * Return ok status if feature disabled or argument out of range. */ if (icmplim > 0 && (u_int) which < N(rates)) { struct rate *r = &rates[which]; int opps = r->curpps; if (!ppsratecheck(&r->lasttime, &r->curpps, icmplim)) return -1; /* discard packet */ /* * If we've dropped below the threshold after having * rate-limited traffic print the message. This preserves * the previous behaviour at the expense of added complexity. */ if (icmplim_output && opps > icmplim) printf("Limiting %s from %d to %d packets/sec\n", r->type, opps, icmplim); } return 0; /* okay to send packet */ #undef N } diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c index 4d8c813fbc8a..fac2fc939241 100644 --- a/sys/netinet/ip_input.c +++ b/sys/netinet/ip_input.c @@ -1,2029 +1,2038 @@ /* * Copyright (c) 1982, 1986, 1988, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 * $FreeBSD$ */ #include "opt_bootp.h" #include "opt_ipfw.h" #include "opt_ipstealth.h" #include "opt_ipsec.h" #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXX: Temporary until ipfw_ether and ipfw_bridge are converted. */ #include #include #ifdef IPSEC #include #include #endif #ifdef FAST_IPSEC #include #include #endif int rsvp_on = 0; int ipforwarding = 0; SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, &ipforwarding, 0, "Enable IP forwarding between interfaces"); static int ipsendredirects = 1; /* XXX */ SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, &ipsendredirects, 0, "Enable sending IP redirects"); int ip_defttl = IPDEFTTL; SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW, &ip_defttl, 0, "Maximum TTL on IP packets"); static int ip_dosourceroute = 0; SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute, CTLFLAG_RW, &ip_dosourceroute, 0, "Enable forwarding source routed IP packets"); static int ip_acceptsourceroute = 0; SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute, CTLFLAG_RW, &ip_acceptsourceroute, 0, "Enable accepting source routed IP packets"); int ip_doopts = 1; /* 0 = ignore, 1 = process, 2 = reject */ SYSCTL_INT(_net_inet_ip, OID_AUTO, process_options, CTLFLAG_RW, &ip_doopts, 0, "Enable IP options processing ([LS]SRR, RR, TS)"); static int ip_keepfaith = 0; SYSCTL_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, &ip_keepfaith, 0, "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); static int nipq = 0; /* total # of reass queues */ static int maxnipq; SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_RW, &maxnipq, 0, "Maximum number of IPv4 fragment reassembly queue entries"); static int maxfragsperpacket; SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, &maxfragsperpacket, 0, "Maximum number of IPv4 fragments allowed per packet"); static int ip_sendsourcequench = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, &ip_sendsourcequench, 0, "Enable the transmission of source quench packets"); int ip_do_randomid = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, &ip_do_randomid, 0, "Assign random ip_id values"); /* * XXX - Setting ip_checkinterface mostly implements the receive side of * the Strong ES model described in RFC 1122, but since the routing table * and transmit implementation do not implement the Strong ES model, * setting this to 1 results in an odd hybrid. * * XXX - ip_checkinterface currently must be disabled if you use ipnat * to translate the destination address to another local interface. * * XXX - ip_checkinterface must be disabled if you add IP aliases * to the loopback interface instead of the interface where the * packets for those addresses are received. */ static int ip_checkinterface = 1; SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, &ip_checkinterface, 0, "Verify packet arrives on correct interface"); #ifdef DIAGNOSTIC static int ipprintfs = 0; #endif struct pfil_head inet_pfil_hook; /* Packet filter hooks */ static struct ifqueue ipintrq; static int ipqmaxlen = IFQ_MAXLEN; extern struct domain inetdomain; extern struct protosw inetsw[]; u_char ip_protox[IPPROTO_MAX]; struct in_ifaddrhead in_ifaddrhead; /* first inet address */ struct in_ifaddrhashhead *in_ifaddrhashtbl; /* inet addr hash table */ u_long in_ifaddrhmask; /* mask for hash table */ SYSCTL_INT(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, CTLFLAG_RW, &ipintrq.ifq_maxlen, 0, "Maximum size of the IP input queue"); SYSCTL_INT(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, CTLFLAG_RD, &ipintrq.ifq_drops, 0, "Number of packets dropped from the IP input queue"); struct ipstat ipstat; SYSCTL_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW, &ipstat, ipstat, "IP statistics (struct ipstat, netinet/ip_var.h)"); /* Packet reassembly stuff */ #define IPREASS_NHASH_LOG2 6 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) #define IPREASS_HMASK (IPREASS_NHASH - 1) #define IPREASS_HASH(x,y) \ (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK) static TAILQ_HEAD(ipqhead, ipq) ipq[IPREASS_NHASH]; struct mtx ipqlock; #define IPQ_LOCK() mtx_lock(&ipqlock) #define IPQ_UNLOCK() mtx_unlock(&ipqlock) #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) #ifdef IPCTL_DEFMTU SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, &ip_mtu, 0, "Default MTU"); #endif #ifdef IPSTEALTH int ipstealth = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, &ipstealth, 0, ""); #endif /* * ipfw_ether and ipfw_bridge hooks. * XXX: Temporary until those are converted to pfil_hooks as well. */ ip_fw_chk_t *ip_fw_chk_ptr = NULL; ip_dn_io_t *ip_dn_io_ptr = NULL; int fw_enable = 1; int fw_one_pass = 1; /* - * XXX this is ugly -- the following two global variables are - * used to store packet state while it travels through the stack. - * Note that the code even makes assumptions on the size and - * alignment of fields inside struct ip_srcrt so e.g. adding some - * fields will break the code. This needs to be fixed. - * - * We need to save the IP options in case a protocol wants to respond - * to an incoming packet over the same route if the packet got here - * using IP source routing. This allows connection establishment and - * maintenance when the remote end is on a network that is not known - * to us. - * XXX: Broken on SMP and possibly preemption! + * XXX this is ugly. IP options source routing magic. */ -static int ip_nhops = 0; -static struct ip_srcrt { +struct ipoptrt { struct in_addr dst; /* final destination */ char nop; /* one NOP to align */ char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */ struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)]; -} ip_srcrt; +}; + +struct ipopt_tag { + struct m_tag tag; + int ip_nhops; + struct ipoptrt ip_srcrt; +}; -static void save_rte(u_char *, struct in_addr); +static void save_rte(struct mbuf *, u_char *, struct in_addr); static int ip_dooptions(struct mbuf *m, int); static void ip_forward(struct mbuf *m, int srcrt); static void ip_freef(struct ipqhead *, struct ipq *); /* * IP initialization: fill in IP protocol switch table. * All protocols not implemented in kernel go to raw IP protocol handler. */ void ip_init() { register struct protosw *pr; register int i; TAILQ_INIT(&in_ifaddrhead); in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &in_ifaddrhmask); pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); if (pr == 0) panic("ip_init"); for (i = 0; i < IPPROTO_MAX; i++) ip_protox[i] = pr - inetsw; for (pr = inetdomain.dom_protosw; pr < inetdomain.dom_protoswNPROTOSW; pr++) if (pr->pr_domain->dom_family == PF_INET && pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) ip_protox[pr->pr_protocol] = pr - inetsw; /* Initialize packet filter hooks. */ inet_pfil_hook.ph_type = PFIL_TYPE_AF; inet_pfil_hook.ph_af = AF_INET; if ((i = pfil_head_register(&inet_pfil_hook)) != 0) printf("%s: WARNING: unable to register pfil hook, " "error %d\n", __func__, i); IPQ_LOCK_INIT(); for (i = 0; i < IPREASS_NHASH; i++) TAILQ_INIT(&ipq[i]); maxnipq = nmbclusters / 32; maxfragsperpacket = 16; ip_id = time_second & 0xffff; ipintrq.ifq_maxlen = ipqmaxlen; mtx_init(&ipintrq.ifq_mtx, "ip_inq", NULL, MTX_DEF); netisr_register(NETISR_IP, ip_input, &ipintrq, NETISR_MPSAFE); } /* * Ip input routine. Checksum and byte swap header. If fragmented * try to reassemble. Process options. Pass to next level. */ void ip_input(struct mbuf *m) { struct ip *ip = NULL; struct in_ifaddr *ia = NULL; struct ifaddr *ifa; int checkif, hlen = 0; u_short sum; int dchg = 0; /* dest changed after fw */ struct in_addr odst; /* original dst address */ #ifdef FAST_IPSEC struct m_tag *mtag; struct tdb_ident *tdbi; struct secpolicy *sp; int s, error; #endif /* FAST_IPSEC */ M_ASSERTPKTHDR(m); if (m->m_flags & M_FASTFWD_OURS) { /* * ip_fastforward firewall changed dest to local. * We expect ip_len and ip_off in host byte order. */ m->m_flags &= ~M_FASTFWD_OURS; /* for reflected mbufs */ /* Set up some basic stuff */ ip = mtod(m, struct ip *); hlen = ip->ip_hl << 2; goto ours; } ipstat.ips_total++; if (m->m_pkthdr.len < sizeof(struct ip)) goto tooshort; if (m->m_len < sizeof (struct ip) && (m = m_pullup(m, sizeof (struct ip))) == NULL) { ipstat.ips_toosmall++; return; } ip = mtod(m, struct ip *); if (ip->ip_v != IPVERSION) { ipstat.ips_badvers++; goto bad; } hlen = ip->ip_hl << 2; if (hlen < sizeof(struct ip)) { /* minimum header length */ ipstat.ips_badhlen++; goto bad; } if (hlen > m->m_len) { if ((m = m_pullup(m, hlen)) == NULL) { ipstat.ips_badhlen++; return; } ip = mtod(m, struct ip *); } /* 127/8 must not appear on wire - RFC1122 */ if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) { ipstat.ips_badaddr++; goto bad; } } if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); } else { if (hlen == sizeof(struct ip)) { sum = in_cksum_hdr(ip); } else { sum = in_cksum(m, hlen); } } if (sum) { ipstat.ips_badsum++; goto bad; } #ifdef ALTQ if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) /* packet is dropped by traffic conditioner */ return; #endif /* * Convert fields to host representation. */ ip->ip_len = ntohs(ip->ip_len); if (ip->ip_len < hlen) { ipstat.ips_badlen++; goto bad; } ip->ip_off = ntohs(ip->ip_off); /* * Check that the amount of data in the buffers * is as at least much as the IP header would have us expect. * Trim mbufs if longer than we expect. * Drop packet if shorter than we expect. */ if (m->m_pkthdr.len < ip->ip_len) { tooshort: ipstat.ips_tooshort++; goto bad; } if (m->m_pkthdr.len > ip->ip_len) { if (m->m_len == m->m_pkthdr.len) { m->m_len = ip->ip_len; m->m_pkthdr.len = ip->ip_len; } else m_adj(m, ip->ip_len - m->m_pkthdr.len); } #if defined(IPSEC) && !defined(IPSEC_FILTERGIF) /* * Bypass packet filtering for packets from a tunnel (gif). */ if (ipsec_getnhist(m)) goto passin; #endif #if defined(FAST_IPSEC) && !defined(IPSEC_FILTERGIF) /* * Bypass packet filtering for packets from a tunnel (gif). */ if (m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL) goto passin; #endif /* * Run through list of hooks for input packets. * * NB: Beware of the destination address changing (e.g. * by NAT rewriting). When this happens, tell * ip_forward to do the right thing. */ /* Jump over all PFIL processing if hooks are not active. */ if (inet_pfil_hook.ph_busy_count == -1) goto passin; odst = ip->ip_dst; if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif, PFIL_IN) != 0) return; if (m == NULL) /* consumed by filter */ return; ip = mtod(m, struct ip *); dchg = (odst.s_addr != ip->ip_dst.s_addr); #ifdef IPFIREWALL_FORWARD if (m->m_flags & M_FASTFWD_OURS) { m->m_flags &= ~M_FASTFWD_OURS; goto ours; } dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL); #endif /* IPFIREWALL_FORWARD */ passin: /* * Process options and, if not destined for us, * ship it on. ip_dooptions returns 1 when an * error was detected (causing an icmp message * to be sent and the original packet to be freed). */ if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) return; /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no * matter if it is destined to another node, or whether it is * a multicast one, RSVP wants it! and prevents it from being forwarded * anywhere else. Also checks if the rsvp daemon is running before * grabbing the packet. */ if (rsvp_on && ip->ip_p==IPPROTO_RSVP) goto ours; /* * Check our list of addresses, to see if the packet is for us. * If we don't have any addresses, assume any unicast packet * we receive might be for us (and let the upper layers deal * with it). */ if (TAILQ_EMPTY(&in_ifaddrhead) && (m->m_flags & (M_MCAST|M_BCAST)) == 0) goto ours; /* * Enable a consistency check between the destination address * and the arrival interface for a unicast packet (the RFC 1122 * strong ES model) if IP forwarding is disabled and the packet * is not locally generated and the packet is not subject to * 'ipfw fwd'. * * XXX - Checking also should be disabled if the destination * address is ipnat'ed to a different interface. * * XXX - Checking is incompatible with IP aliases added * to the loopback interface instead of the interface where * the packets are received. */ checkif = ip_checkinterface && (ipforwarding == 0) && m->m_pkthdr.rcvif != NULL && ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) && (dchg == 0); /* * Check for exact addresses in the hash bucket. */ LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { /* * If the address matches, verify that the packet * arrived via the correct interface if checking is * enabled. */ if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && (!checkif || ia->ia_ifp == m->m_pkthdr.rcvif)) goto ours; } /* * Check for broadcast addresses. * * Only accept broadcast packets that arrive via the matching * interface. Reception of forwarded directed broadcasts would * be handled via ip_forward() and ether_output() with the loopback * into the stack for SIMPLEX interfaces handled by ether_output(). */ if (m->m_pkthdr.rcvif != NULL && m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) { TAILQ_FOREACH(ifa, &m->m_pkthdr.rcvif->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET) continue; ia = ifatoia(ifa); if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == ip->ip_dst.s_addr) goto ours; if (ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) goto ours; #ifdef BOOTP_COMPAT if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) goto ours; #endif } } if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { struct in_multi *inm; if (ip_mrouter) { /* * If we are acting as a multicast router, all * incoming multicast packets are passed to the * kernel-level multicast forwarding function. * The packet is returned (relatively) intact; if * ip_mforward() returns a non-zero value, the packet * must be discarded, else it may be accepted below. */ if (ip_mforward && ip_mforward(ip, m->m_pkthdr.rcvif, m, 0) != 0) { ipstat.ips_cantforward++; m_freem(m); return; } /* * The process-level routing daemon needs to receive * all multicast IGMP packets, whether or not this * host belongs to their destination groups. */ if (ip->ip_p == IPPROTO_IGMP) goto ours; ipstat.ips_forward++; } /* * See if we belong to the destination multicast group on the * arrival interface. */ IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm); if (inm == NULL) { ipstat.ips_notmember++; m_freem(m); return; } goto ours; } if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) goto ours; if (ip->ip_dst.s_addr == INADDR_ANY) goto ours; /* * FAITH(Firewall Aided Internet Translator) */ if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) { if (ip_keepfaith) { if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) goto ours; } m_freem(m); return; } /* * Not for us; forward if possible and desirable. */ if (ipforwarding == 0) { ipstat.ips_cantforward++; m_freem(m); } else { #ifdef IPSEC /* * Enforce inbound IPsec SPD. */ if (ipsec4_in_reject(m, NULL)) { ipsecstat.in_polvio++; goto bad; } #endif /* IPSEC */ #ifdef FAST_IPSEC mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); s = splnet(); if (mtag != NULL) { tdbi = (struct tdb_ident *)(mtag + 1); sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND); } else { sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error); } if (sp == NULL) { /* NB: can happen if error */ splx(s); /*XXX error stat???*/ DPRINTF(("ip_input: no SP for forwarding\n")); /*XXX*/ goto bad; } /* * Check security policy against packet attributes. */ error = ipsec_in_reject(sp, m); KEY_FREESP(&sp); splx(s); if (error) { ipstat.ips_cantforward++; goto bad; } #endif /* FAST_IPSEC */ ip_forward(m, dchg); } return; ours: #ifdef IPSTEALTH /* * IPSTEALTH: Process non-routing options only * if the packet is destined for us. */ if (ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) return; #endif /* IPSTEALTH */ /* Count the packet in the ip address stats */ if (ia != NULL) { ia->ia_ifa.if_ipackets++; ia->ia_ifa.if_ibytes += m->m_pkthdr.len; } /* * Attempt reassembly; if it succeeds, proceed. * ip_reass() will return a different mbuf. */ if (ip->ip_off & (IP_MF | IP_OFFMASK)) { m = ip_reass(m); if (m == NULL) return; ip = mtod(m, struct ip *); /* Get the header length of the reassembled packet */ hlen = ip->ip_hl << 2; } /* * Further protocols expect the packet length to be w/o the * IP header. */ ip->ip_len -= hlen; #ifdef IPSEC /* * enforce IPsec policy checking if we are seeing last header. * note that we do not visit this with protocols with pcb layer * code - like udp/tcp/raw ip. */ if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) != 0 && ipsec4_in_reject(m, NULL)) { ipsecstat.in_polvio++; goto bad; } #endif #if FAST_IPSEC /* * enforce IPsec policy checking if we are seeing last header. * note that we do not visit this with protocols with pcb layer * code - like udp/tcp/raw ip. */ if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) != 0) { /* * Check if the packet has already had IPsec processing * done. If so, then just pass it along. This tag gets * set during AH, ESP, etc. input handling, before the * packet is returned to the ip input queue for delivery. */ mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); s = splnet(); if (mtag != NULL) { tdbi = (struct tdb_ident *)(mtag + 1); sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND); } else { sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error); } if (sp != NULL) { /* * Check security policy against packet attributes. */ error = ipsec_in_reject(sp, m); KEY_FREESP(&sp); } else { /* XXX error stat??? */ error = EINVAL; DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/ goto bad; } splx(s); if (error) goto bad; } #endif /* FAST_IPSEC */ /* * Switch out to protocol's input routine. */ ipstat.ips_delivered++; (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); return; bad: m_freem(m); } /* * Take incoming datagram fragment and try to reassemble it into * whole datagram. If the argument is the first fragment or one * in between the function will return NULL and store the mbuf * in the fragment chain. If the argument is the last fragment * the packet will be reassembled and the pointer to the new * mbuf returned for further processing. Only m_tags attached * to the first packet/fragment are preserved. * The IP header is *NOT* adjusted out of iplen. */ struct mbuf * ip_reass(struct mbuf *m) { struct ip *ip; struct mbuf *p, *q, *nq, *t; struct ipq *fp = NULL; struct ipqhead *head; int i, hlen, next; u_int8_t ecn, ecn0; u_short hash; /* If maxnipq is 0, never accept fragments. */ if (maxnipq == 0) { ipstat.ips_fragments++; ipstat.ips_fragdropped++; m_freem(m); return (NULL); } ip = mtod(m, struct ip *); hlen = ip->ip_hl << 2; hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); head = &ipq[hash]; IPQ_LOCK(); /* * Look for queue of fragments * of this datagram. */ TAILQ_FOREACH(fp, head, ipq_list) if (ip->ip_id == fp->ipq_id && ip->ip_src.s_addr == fp->ipq_src.s_addr && ip->ip_dst.s_addr == fp->ipq_dst.s_addr && #ifdef MAC mac_fragment_match(m, fp) && #endif ip->ip_p == fp->ipq_p) goto found; fp = NULL; /* * Enforce upper bound on number of fragmented packets * for which we attempt reassembly; * If maxnipq is -1, accept all fragments without limitation. */ if ((nipq > maxnipq) && (maxnipq > 0)) { /* * drop something from the tail of the current queue * before proceeding further */ struct ipq *q = TAILQ_LAST(head, ipqhead); if (q == NULL) { /* gak */ for (i = 0; i < IPREASS_NHASH; i++) { struct ipq *r = TAILQ_LAST(&ipq[i], ipqhead); if (r) { ipstat.ips_fragtimeout += r->ipq_nfrags; ip_freef(&ipq[i], r); break; } } } else { ipstat.ips_fragtimeout += q->ipq_nfrags; ip_freef(head, q); } } found: /* * Adjust ip_len to not reflect header, * convert offset of this to bytes. */ ip->ip_len -= hlen; if (ip->ip_off & IP_MF) { /* * Make sure that fragments have a data length * that's a non-zero multiple of 8 bytes. */ if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { ipstat.ips_toosmall++; /* XXX */ goto dropfrag; } m->m_flags |= M_FRAG; } else m->m_flags &= ~M_FRAG; ip->ip_off <<= 3; /* * Attempt reassembly; if it succeeds, proceed. * ip_reass() will return a different mbuf. */ ipstat.ips_fragments++; m->m_pkthdr.header = ip; /* Previous ip_reass() started here. */ /* * Presence of header sizes in mbufs * would confuse code below. */ m->m_data += hlen; m->m_len -= hlen; /* * If first fragment to arrive, create a reassembly queue. */ if (fp == NULL) { if ((t = m_get(M_DONTWAIT, MT_FTABLE)) == NULL) goto dropfrag; fp = mtod(t, struct ipq *); #ifdef MAC if (mac_init_ipq(fp, M_NOWAIT) != 0) { m_free(t); goto dropfrag; } mac_create_ipq(m, fp); #endif TAILQ_INSERT_HEAD(head, fp, ipq_list); nipq++; fp->ipq_nfrags = 1; fp->ipq_ttl = IPFRAGTTL; fp->ipq_p = ip->ip_p; fp->ipq_id = ip->ip_id; fp->ipq_src = ip->ip_src; fp->ipq_dst = ip->ip_dst; fp->ipq_frags = m; m->m_nextpkt = NULL; goto inserted; } else { fp->ipq_nfrags++; #ifdef MAC mac_update_ipq(m, fp); #endif } #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) /* * Handle ECN by comparing this segment with the first one; * if CE is set, do not lose CE. * drop if CE and not-ECT are mixed for the same packet. */ ecn = ip->ip_tos & IPTOS_ECN_MASK; ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; if (ecn == IPTOS_ECN_CE) { if (ecn0 == IPTOS_ECN_NOTECT) goto dropfrag; if (ecn0 != IPTOS_ECN_CE) GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; } if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) goto dropfrag; /* * Find a segment which begins after this one does. */ for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) if (GETIP(q)->ip_off > ip->ip_off) break; /* * If there is a preceding segment, it may provide some of * our data already. If so, drop the data from the incoming * segment. If it provides all of our data, drop us, otherwise * stick new segment in the proper place. * * If some of the data is dropped from the the preceding * segment, then it's checksum is invalidated. */ if (p) { i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off; if (i > 0) { if (i >= ip->ip_len) goto dropfrag; m_adj(m, i); m->m_pkthdr.csum_flags = 0; ip->ip_off += i; ip->ip_len -= i; } m->m_nextpkt = p->m_nextpkt; p->m_nextpkt = m; } else { m->m_nextpkt = fp->ipq_frags; fp->ipq_frags = m; } /* * While we overlap succeeding segments trim them or, * if they are completely covered, dequeue them. */ for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off; q = nq) { i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off; if (i < GETIP(q)->ip_len) { GETIP(q)->ip_len -= i; GETIP(q)->ip_off += i; m_adj(q, i); q->m_pkthdr.csum_flags = 0; break; } nq = q->m_nextpkt; m->m_nextpkt = nq; ipstat.ips_fragdropped++; fp->ipq_nfrags--; m_freem(q); } inserted: /* * Check for complete reassembly and perform frag per packet * limiting. * * Frag limiting is performed here so that the nth frag has * a chance to complete the packet before we drop the packet. * As a result, n+1 frags are actually allowed per packet, but * only n will ever be stored. (n = maxfragsperpacket.) * */ next = 0; for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { if (GETIP(q)->ip_off != next) { if (fp->ipq_nfrags > maxfragsperpacket) { ipstat.ips_fragdropped += fp->ipq_nfrags; ip_freef(head, fp); } goto done; } next += GETIP(q)->ip_len; } /* Make sure the last packet didn't have the IP_MF flag */ if (p->m_flags & M_FRAG) { if (fp->ipq_nfrags > maxfragsperpacket) { ipstat.ips_fragdropped += fp->ipq_nfrags; ip_freef(head, fp); } goto done; } /* * Reassembly is complete. Make sure the packet is a sane size. */ q = fp->ipq_frags; ip = GETIP(q); if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { ipstat.ips_toolong++; ipstat.ips_fragdropped += fp->ipq_nfrags; ip_freef(head, fp); goto done; } /* * Concatenate fragments. */ m = q; t = m->m_next; m->m_next = 0; m_cat(m, t); nq = q->m_nextpkt; q->m_nextpkt = 0; for (q = nq; q != NULL; q = nq) { nq = q->m_nextpkt; q->m_nextpkt = NULL; m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; m_cat(m, q); } #ifdef MAC mac_create_datagram_from_ipq(fp, m); mac_destroy_ipq(fp); #endif /* * Create header for new ip packet by modifying header of first * packet; dequeue and discard fragment reassembly header. * Make header visible. */ ip->ip_len = (ip->ip_hl << 2) + next; ip->ip_src = fp->ipq_src; ip->ip_dst = fp->ipq_dst; TAILQ_REMOVE(head, fp, ipq_list); nipq--; (void) m_free(dtom(fp)); m->m_len += (ip->ip_hl << 2); m->m_data -= (ip->ip_hl << 2); /* some debugging cruft by sklower, below, will go away soon */ if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ m_fixhdr(m); ipstat.ips_reassembled++; IPQ_UNLOCK(); return (m); dropfrag: ipstat.ips_fragdropped++; if (fp != NULL) fp->ipq_nfrags--; m_freem(m); done: IPQ_UNLOCK(); return (NULL); #undef GETIP } /* * Free a fragment reassembly header and all * associated datagrams. */ static void ip_freef(fhp, fp) struct ipqhead *fhp; struct ipq *fp; { register struct mbuf *q; IPQ_LOCK_ASSERT(); while (fp->ipq_frags) { q = fp->ipq_frags; fp->ipq_frags = q->m_nextpkt; m_freem(q); } TAILQ_REMOVE(fhp, fp, ipq_list); (void) m_free(dtom(fp)); nipq--; } /* * IP timer processing; * if a timer expires on a reassembly * queue, discard it. */ void ip_slowtimo() { register struct ipq *fp; int s = splnet(); int i; IPQ_LOCK(); for (i = 0; i < IPREASS_NHASH; i++) { for(fp = TAILQ_FIRST(&ipq[i]); fp;) { struct ipq *fpp; fpp = fp; fp = TAILQ_NEXT(fp, ipq_list); if(--fpp->ipq_ttl == 0) { ipstat.ips_fragtimeout += fpp->ipq_nfrags; ip_freef(&ipq[i], fpp); } } } /* * If we are over the maximum number of fragments * (due to the limit being lowered), drain off * enough to get down to the new limit. */ if (maxnipq >= 0 && nipq > maxnipq) { for (i = 0; i < IPREASS_NHASH; i++) { while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i])) { ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags; ip_freef(&ipq[i], TAILQ_FIRST(&ipq[i])); } } } IPQ_UNLOCK(); splx(s); } /* * Drain off all datagram fragments. */ void ip_drain() { int i; IPQ_LOCK(); for (i = 0; i < IPREASS_NHASH; i++) { while(!TAILQ_EMPTY(&ipq[i])) { ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags; ip_freef(&ipq[i], TAILQ_FIRST(&ipq[i])); } } IPQ_UNLOCK(); in_rtqdrain(); } /* * Do option processing on a datagram, * possibly discarding it if bad options are encountered, * or forwarding it if source-routed. * The pass argument is used when operating in the IPSTEALTH * mode to tell what options to process: * [LS]SRR (pass 0) or the others (pass 1). * The reason for as many as two passes is that when doing IPSTEALTH, * non-routing options should be processed only if the packet is for us. * Returns 1 if packet has been forwarded/freed, * 0 if the packet should be processed further. */ static int ip_dooptions(struct mbuf *m, int pass) { struct ip *ip = mtod(m, struct ip *); u_char *cp; struct in_ifaddr *ia; int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; struct in_addr *sin, dst; n_time ntime; struct sockaddr_in ipaddr = { sizeof(ipaddr), AF_INET }; /* ignore or reject packets with IP options */ if (ip_doopts == 0) return 0; else if (ip_doopts == 2) { type = ICMP_UNREACH; code = ICMP_UNREACH_FILTER_PROHIB; goto bad; } dst = ip->ip_dst; cp = (u_char *)(ip + 1); cnt = (ip->ip_hl << 2) - sizeof (struct ip); for (; cnt > 0; cnt -= optlen, cp += optlen) { opt = cp[IPOPT_OPTVAL]; if (opt == IPOPT_EOL) break; if (opt == IPOPT_NOP) optlen = 1; else { if (cnt < IPOPT_OLEN + sizeof(*cp)) { code = &cp[IPOPT_OLEN] - (u_char *)ip; goto bad; } optlen = cp[IPOPT_OLEN]; if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { code = &cp[IPOPT_OLEN] - (u_char *)ip; goto bad; } } switch (opt) { default: break; /* * Source routing with record. * Find interface with current destination address. * If none on this machine then drop if strictly routed, * or do nothing if loosely routed. * Record interface address and bring up next address * component. If strictly routed make sure next * address is on directly accessible net. */ case IPOPT_LSRR: case IPOPT_SSRR: #ifdef IPSTEALTH if (ipstealth && pass > 0) break; #endif if (optlen < IPOPT_OFFSET + sizeof(*cp)) { code = &cp[IPOPT_OLEN] - (u_char *)ip; goto bad; } if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { code = &cp[IPOPT_OFFSET] - (u_char *)ip; goto bad; } ipaddr.sin_addr = ip->ip_dst; ia = (struct in_ifaddr *) ifa_ifwithaddr((struct sockaddr *)&ipaddr); if (ia == NULL) { if (opt == IPOPT_SSRR) { type = ICMP_UNREACH; code = ICMP_UNREACH_SRCFAIL; goto bad; } if (!ip_dosourceroute) goto nosourcerouting; /* * Loose routing, and not at next destination * yet; nothing to do except forward. */ break; } off--; /* 0 origin */ if (off > optlen - (int)sizeof(struct in_addr)) { /* * End of source route. Should be for us. */ if (!ip_acceptsourceroute) goto nosourcerouting; - save_rte(cp, ip->ip_src); + save_rte(m, cp, ip->ip_src); break; } #ifdef IPSTEALTH if (ipstealth) goto dropit; #endif if (!ip_dosourceroute) { if (ipforwarding) { char buf[16]; /* aaa.bbb.ccc.ddd\0 */ /* * Acting as a router, so generate ICMP */ nosourcerouting: strcpy(buf, inet_ntoa(ip->ip_dst)); log(LOG_WARNING, "attempted source route from %s to %s\n", inet_ntoa(ip->ip_src), buf); type = ICMP_UNREACH; code = ICMP_UNREACH_SRCFAIL; goto bad; } else { /* * Not acting as a router, so silently drop. */ #ifdef IPSTEALTH dropit: #endif ipstat.ips_cantforward++; m_freem(m); return (1); } } /* * locate outgoing interface */ (void)memcpy(&ipaddr.sin_addr, cp + off, sizeof(ipaddr.sin_addr)); if (opt == IPOPT_SSRR) { #define INA struct in_ifaddr * #define SA struct sockaddr * if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) == NULL) ia = (INA)ifa_ifwithnet((SA)&ipaddr); } else ia = ip_rtaddr(ipaddr.sin_addr); if (ia == NULL) { type = ICMP_UNREACH; code = ICMP_UNREACH_SRCFAIL; goto bad; } ip->ip_dst = ipaddr.sin_addr; (void)memcpy(cp + off, &(IA_SIN(ia)->sin_addr), sizeof(struct in_addr)); cp[IPOPT_OFFSET] += sizeof(struct in_addr); /* * Let ip_intr's mcast routing check handle mcast pkts */ forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr)); break; case IPOPT_RR: #ifdef IPSTEALTH if (ipstealth && pass == 0) break; #endif if (optlen < IPOPT_OFFSET + sizeof(*cp)) { code = &cp[IPOPT_OFFSET] - (u_char *)ip; goto bad; } if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { code = &cp[IPOPT_OFFSET] - (u_char *)ip; goto bad; } /* * If no space remains, ignore. */ off--; /* 0 origin */ if (off > optlen - (int)sizeof(struct in_addr)) break; (void)memcpy(&ipaddr.sin_addr, &ip->ip_dst, sizeof(ipaddr.sin_addr)); /* * locate outgoing interface; if we're the destination, * use the incoming interface (should be same). */ if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == NULL && (ia = ip_rtaddr(ipaddr.sin_addr)) == NULL) { type = ICMP_UNREACH; code = ICMP_UNREACH_HOST; goto bad; } (void)memcpy(cp + off, &(IA_SIN(ia)->sin_addr), sizeof(struct in_addr)); cp[IPOPT_OFFSET] += sizeof(struct in_addr); break; case IPOPT_TS: #ifdef IPSTEALTH if (ipstealth && pass == 0) break; #endif code = cp - (u_char *)ip; if (optlen < 4 || optlen > 40) { code = &cp[IPOPT_OLEN] - (u_char *)ip; goto bad; } if ((off = cp[IPOPT_OFFSET]) < 5) { code = &cp[IPOPT_OLEN] - (u_char *)ip; goto bad; } if (off > optlen - (int)sizeof(int32_t)) { cp[IPOPT_OFFSET + 1] += (1 << 4); if ((cp[IPOPT_OFFSET + 1] & 0xf0) == 0) { code = &cp[IPOPT_OFFSET] - (u_char *)ip; goto bad; } break; } off--; /* 0 origin */ sin = (struct in_addr *)(cp + off); switch (cp[IPOPT_OFFSET + 1] & 0x0f) { case IPOPT_TS_TSONLY: break; case IPOPT_TS_TSANDADDR: if (off + sizeof(n_time) + sizeof(struct in_addr) > optlen) { code = &cp[IPOPT_OFFSET] - (u_char *)ip; goto bad; } ipaddr.sin_addr = dst; ia = (INA)ifaof_ifpforaddr((SA)&ipaddr, m->m_pkthdr.rcvif); if (ia == NULL) continue; (void)memcpy(sin, &IA_SIN(ia)->sin_addr, sizeof(struct in_addr)); cp[IPOPT_OFFSET] += sizeof(struct in_addr); off += sizeof(struct in_addr); break; case IPOPT_TS_PRESPEC: if (off + sizeof(n_time) + sizeof(struct in_addr) > optlen) { code = &cp[IPOPT_OFFSET] - (u_char *)ip; goto bad; } (void)memcpy(&ipaddr.sin_addr, sin, sizeof(struct in_addr)); if (ifa_ifwithaddr((SA)&ipaddr) == NULL) continue; cp[IPOPT_OFFSET] += sizeof(struct in_addr); off += sizeof(struct in_addr); break; default: code = &cp[IPOPT_OFFSET + 1] - (u_char *)ip; goto bad; } ntime = iptime(); (void)memcpy(cp + off, &ntime, sizeof(n_time)); cp[IPOPT_OFFSET] += sizeof(n_time); } } if (forward && ipforwarding) { ip_forward(m, 1); return (1); } return (0); bad: icmp_error(m, type, code, 0, 0); ipstat.ips_badoptions++; return (1); } /* * Given address of next destination (final or next hop), * return internet address info of interface to be used to get there. */ struct in_ifaddr * ip_rtaddr(dst) struct in_addr dst; { struct route sro; struct sockaddr_in *sin; struct in_ifaddr *ifa; bzero(&sro, sizeof(sro)); sin = (struct sockaddr_in *)&sro.ro_dst; sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); sin->sin_addr = dst; rtalloc_ign(&sro, RTF_CLONING); if (sro.ro_rt == NULL) return ((struct in_ifaddr *)0); ifa = ifatoia(sro.ro_rt->rt_ifa); RTFREE(sro.ro_rt); return ifa; } /* * Save incoming source route for use in replies, * to be picked up later by ip_srcroute if the receiver is interested. */ static void -save_rte(option, dst) +save_rte(m, option, dst) + struct mbuf *m; u_char *option; struct in_addr dst; { unsigned olen; + struct ipopt_tag *opts; + + opts = (struct ipopt_tag *)m_tag_get(PACKET_TAG_IPOPTIONS, + sizeof(struct ipopt_tag), M_NOWAIT); + if (opts == NULL) + return; olen = option[IPOPT_OLEN]; #ifdef DIAGNOSTIC if (ipprintfs) printf("save_rte: olen %d\n", olen); #endif - if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst))) + if (olen > sizeof(opts->ip_srcrt) - (1 + sizeof(dst))) return; - bcopy(option, ip_srcrt.srcopt, olen); - ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); - ip_srcrt.dst = dst; + bcopy(option, opts->ip_srcrt.srcopt, olen); + opts->ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); + opts->ip_srcrt.dst = dst; + m_tag_prepend(m, (struct m_tag *)opts); } /* * Retrieve incoming source route for use in replies, * in the same form used by setsockopt. * The first hop is placed before the options, will be removed later. */ struct mbuf * -ip_srcroute() +ip_srcroute(m0) + struct mbuf *m0; { register struct in_addr *p, *q; register struct mbuf *m; + struct ipopt_tag *opts; + + opts = (struct ipopt_tag *)m_tag_find(m0, PACKET_TAG_IPOPTIONS, NULL); + if (opts == NULL) + return ((struct mbuf *)0); - if (ip_nhops == 0) + if (opts->ip_nhops == 0) return ((struct mbuf *)0); m = m_get(M_DONTWAIT, MT_HEADER); if (m == NULL) return ((struct mbuf *)0); -#define OPTSIZ (sizeof(ip_srcrt.nop) + sizeof(ip_srcrt.srcopt)) +#define OPTSIZ (sizeof(opts->ip_srcrt.nop) + sizeof(opts->ip_srcrt.srcopt)) /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */ - m->m_len = ip_nhops * sizeof(struct in_addr) + sizeof(struct in_addr) + - OPTSIZ; + m->m_len = opts->ip_nhops * sizeof(struct in_addr) + + sizeof(struct in_addr) + OPTSIZ; #ifdef DIAGNOSTIC if (ipprintfs) - printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len); + printf("ip_srcroute: nhops %d mlen %d", opts->ip_nhops, m->m_len); #endif /* * First save first hop for return route */ - p = &ip_srcrt.route[ip_nhops - 1]; + p = &(opts->ip_srcrt.route[opts->ip_nhops - 1]); *(mtod(m, struct in_addr *)) = *p--; #ifdef DIAGNOSTIC if (ipprintfs) printf(" hops %lx", (u_long)ntohl(mtod(m, struct in_addr *)->s_addr)); #endif /* * Copy option fields and padding (nop) to mbuf. */ - ip_srcrt.nop = IPOPT_NOP; - ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF; + opts->ip_srcrt.nop = IPOPT_NOP; + opts->ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF; (void)memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), - &ip_srcrt.nop, OPTSIZ); + &(opts->ip_srcrt.nop), OPTSIZ); q = (struct in_addr *)(mtod(m, caddr_t) + sizeof(struct in_addr) + OPTSIZ); #undef OPTSIZ /* * Record return path as an IP source route, * reversing the path (pointers are now aligned). */ - while (p >= ip_srcrt.route) { + while (p >= opts->ip_srcrt.route) { #ifdef DIAGNOSTIC if (ipprintfs) printf(" %lx", (u_long)ntohl(q->s_addr)); #endif *q++ = *p--; } /* * Last hop goes to final destination. */ - *q = ip_srcrt.dst; + *q = opts->ip_srcrt.dst; #ifdef DIAGNOSTIC if (ipprintfs) printf(" %lx\n", (u_long)ntohl(q->s_addr)); #endif + m_tag_delete(m0, (struct m_tag *)opts); return (m); } /* * Strip out IP options, at higher * level protocol in the kernel. * Second argument is buffer to which options * will be moved, and return value is their length. * XXX should be deleted; last arg currently ignored. */ void ip_stripoptions(m, mopt) register struct mbuf *m; struct mbuf *mopt; { register int i; struct ip *ip = mtod(m, struct ip *); register caddr_t opts; int olen; olen = (ip->ip_hl << 2) - sizeof (struct ip); opts = (caddr_t)(ip + 1); i = m->m_len - (sizeof (struct ip) + olen); bcopy(opts + olen, opts, (unsigned)i); m->m_len -= olen; if (m->m_flags & M_PKTHDR) m->m_pkthdr.len -= olen; ip->ip_v = IPVERSION; ip->ip_hl = sizeof(struct ip) >> 2; } u_char inetctlerrmap[PRC_NCMDS] = { 0, 0, 0, 0, 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, EMSGSIZE, EHOSTUNREACH, 0, 0, 0, 0, EHOSTUNREACH, 0, ENOPROTOOPT, ECONNREFUSED }; /* * Forward a packet. If some error occurs return the sender * an icmp packet. Note we can't always generate a meaningful * icmp message because icmp doesn't have a large enough repertoire * of codes and types. * * If not forwarding, just drop the packet. This could be confusing * if ipforwarding was zero but some routing protocol was advancing * us as a gateway to somewhere. However, we must let the routing * protocol deal with that. * * The srcrt parameter indicates whether the packet is being forwarded * via a source route. */ void ip_forward(struct mbuf *m, int srcrt) { struct ip *ip = mtod(m, struct ip *); struct in_ifaddr *ia = NULL; int error, type = 0, code = 0; struct mbuf *mcopy; struct in_addr dest; struct ifnet *destifp, dummyifp; #ifdef DIAGNOSTIC if (ipprintfs) printf("forward: src %lx dst %lx ttl %x\n", (u_long)ip->ip_src.s_addr, (u_long)ip->ip_dst.s_addr, ip->ip_ttl); #endif if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { ipstat.ips_cantforward++; m_freem(m); return; } #ifdef IPSTEALTH if (!ipstealth) { #endif if (ip->ip_ttl <= IPTTLDEC) { icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, 0); return; } #ifdef IPSTEALTH } #endif if (!srcrt && (ia = ip_rtaddr(ip->ip_dst)) == NULL) { icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); return; } /* * Save the IP header and at most 8 bytes of the payload, * in case we need to generate an ICMP message to the src. * * XXX this can be optimized a lot by saving the data in a local * buffer on the stack (72 bytes at most), and only allocating the * mbuf if really necessary. The vast majority of the packets * are forwarded without having to send an ICMP back (either * because unnecessary, or because rate limited), so we are * really we are wasting a lot of work here. * * We don't use m_copy() because it might return a reference * to a shared cluster. Both this function and ip_output() * assume exclusive access to the IP header in `m', so any * data in a cluster may change before we reach icmp_error(). */ MGET(mcopy, M_DONTWAIT, m->m_type); if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_DONTWAIT)) { /* * It's probably ok if the pkthdr dup fails (because * the deep copy of the tag chain failed), but for now * be conservative and just discard the copy since * code below may some day want the tags. */ m_free(mcopy); mcopy = NULL; } if (mcopy != NULL) { mcopy->m_len = imin((ip->ip_hl << 2) + 8, (int)ip->ip_len); mcopy->m_pkthdr.len = mcopy->m_len; m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); } #ifdef IPSTEALTH if (!ipstealth) { #endif ip->ip_ttl -= IPTTLDEC; #ifdef IPSTEALTH } #endif /* * If forwarding packet using same interface that it came in on, * perhaps should send a redirect to sender to shortcut a hop. * Only send redirect if source is sending directly to us, * and if packet was not source routed (or has any options). * Also, don't send redirect if forwarding using a default route * or a route modified by a redirect. */ dest.s_addr = 0; if (!srcrt && ipsendredirects && ia->ia_ifp == m->m_pkthdr.rcvif) { struct sockaddr_in *sin; struct route ro; struct rtentry *rt; bzero(&ro, sizeof(ro)); sin = (struct sockaddr_in *)&ro.ro_dst; sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); sin->sin_addr = ip->ip_dst; rtalloc_ign(&ro, RTF_CLONING); rt = ro.ro_rt; if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && satosin(rt_key(rt))->sin_addr.s_addr != 0) { #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) u_long src = ntohl(ip->ip_src.s_addr); if (RTA(rt) && (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { if (rt->rt_flags & RTF_GATEWAY) dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; else dest.s_addr = ip->ip_dst.s_addr; /* Router requirements says to only send host redirects */ type = ICMP_REDIRECT; code = ICMP_REDIRECT_HOST; #ifdef DIAGNOSTIC if (ipprintfs) printf("redirect (%d) to %lx\n", code, (u_long)dest.s_addr); #endif } } if (rt) RTFREE(rt); } error = ip_output(m, (struct mbuf *)0, NULL, IP_FORWARDING, 0, NULL); if (error) ipstat.ips_cantforward++; else { ipstat.ips_forward++; if (type) ipstat.ips_redirectsent++; else { if (mcopy) m_freem(mcopy); return; } } if (mcopy == NULL) return; destifp = NULL; switch (error) { case 0: /* forwarded, but need redirect */ /* type, code set above */ break; case ENETUNREACH: /* shouldn't happen, checked above */ case EHOSTUNREACH: case ENETDOWN: case EHOSTDOWN: default: type = ICMP_UNREACH; code = ICMP_UNREACH_HOST; break; case EMSGSIZE: type = ICMP_UNREACH; code = ICMP_UNREACH_NEEDFRAG; #if defined(IPSEC) || defined(FAST_IPSEC) /* * If the packet is routed over IPsec tunnel, tell the * originator the tunnel MTU. * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz * XXX quickhack!!! */ { struct secpolicy *sp = NULL; int ipsecerror; int ipsechdr; struct route *ro; #ifdef IPSEC sp = ipsec4_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND, IP_FORWARDING, &ipsecerror); #else /* FAST_IPSEC */ sp = ipsec_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND, IP_FORWARDING, &ipsecerror); #endif if (sp != NULL) { /* count IPsec header size */ ipsechdr = ipsec4_hdrsiz(mcopy, IPSEC_DIR_OUTBOUND, NULL); /* * find the correct route for outer IPv4 * header, compute tunnel MTU. * * XXX BUG ALERT * The "dummyifp" code relies upon the fact * that icmp_error() touches only ifp->if_mtu. */ /*XXX*/ destifp = NULL; if (sp->req != NULL && sp->req->sav != NULL && sp->req->sav->sah != NULL) { ro = &sp->req->sav->sah->sa_route; if (ro->ro_rt && ro->ro_rt->rt_ifp) { dummyifp.if_mtu = ro->ro_rt->rt_rmx.rmx_mtu ? ro->ro_rt->rt_rmx.rmx_mtu : ro->ro_rt->rt_ifp->if_mtu; dummyifp.if_mtu -= ipsechdr; destifp = &dummyifp; } } #ifdef IPSEC key_freesp(sp); #else /* FAST_IPSEC */ KEY_FREESP(&sp); #endif ipstat.ips_cantfrag++; break; } else #endif /*IPSEC || FAST_IPSEC*/ /* * When doing source routing 'ia' can be NULL. Fall back * to the minimum guaranteed routeable packet size and use * the same hack as IPSEC to setup a dummyifp for icmp. */ if (ia == NULL) { dummyifp.if_mtu = IP_MSS; destifp = &dummyifp; } else destifp = ia->ia_ifp; #if defined(IPSEC) || defined(FAST_IPSEC) } #endif /*IPSEC || FAST_IPSEC*/ ipstat.ips_cantfrag++; break; case ENOBUFS: /* * A router should not generate ICMP_SOURCEQUENCH as * required in RFC1812 Requirements for IP Version 4 Routers. * Source quench could be a big problem under DoS attacks, * or if the underlying interface is rate-limited. * Those who need source quench packets may re-enable them * via the net.inet.ip.sendsourcequench sysctl. */ if (ip_sendsourcequench == 0) { m_freem(mcopy); return; } else { type = ICMP_SOURCEQUENCH; code = 0; } break; case EACCES: /* ipfw denied packet */ m_freem(mcopy); return; } icmp_error(mcopy, type, code, dest.s_addr, destifp); } void ip_savecontrol(inp, mp, ip, m) register struct inpcb *inp; register struct mbuf **mp; register struct ip *ip; register struct mbuf *m; { if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { struct bintime bt; bintime(&bt); if (inp->inp_socket->so_options & SO_BINTIME) { *mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt), SCM_BINTIME, SOL_SOCKET); if (*mp) mp = &(*mp)->m_next; } if (inp->inp_socket->so_options & SO_TIMESTAMP) { struct timeval tv; bintime2timeval(&bt, &tv); *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), SCM_TIMESTAMP, SOL_SOCKET); if (*mp) mp = &(*mp)->m_next; } } if (inp->inp_flags & INP_RECVDSTADDR) { *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); if (*mp) mp = &(*mp)->m_next; } if (inp->inp_flags & INP_RECVTTL) { *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, sizeof(u_char), IP_RECVTTL, IPPROTO_IP); if (*mp) mp = &(*mp)->m_next; } #ifdef notyet /* XXX * Moving these out of udp_input() made them even more broken * than they already were. */ /* options were tossed already */ if (inp->inp_flags & INP_RECVOPTS) { *mp = sbcreatecontrol((caddr_t) opts_deleted_above, sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); if (*mp) mp = &(*mp)->m_next; } /* ip_srcroute doesn't do what we want here, need to fix */ if (inp->inp_flags & INP_RECVRETOPTS) { - *mp = sbcreatecontrol((caddr_t) ip_srcroute(), + *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); if (*mp) mp = &(*mp)->m_next; } #endif if (inp->inp_flags & INP_RECVIF) { struct ifnet *ifp; struct sdlbuf { struct sockaddr_dl sdl; u_char pad[32]; } sdlbuf; struct sockaddr_dl *sdp; struct sockaddr_dl *sdl2 = &sdlbuf.sdl; if (((ifp = m->m_pkthdr.rcvif)) && ( ifp->if_index && (ifp->if_index <= if_index))) { sdp = (struct sockaddr_dl *) (ifaddr_byindex(ifp->if_index)->ifa_addr); /* * Change our mind and don't try copy. */ if ((sdp->sdl_family != AF_LINK) || (sdp->sdl_len > sizeof(sdlbuf))) { goto makedummy; } bcopy(sdp, sdl2, sdp->sdl_len); } else { makedummy: sdl2->sdl_len = offsetof(struct sockaddr_dl, sdl_data[0]); sdl2->sdl_family = AF_LINK; sdl2->sdl_index = 0; sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; } *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len, IP_RECVIF, IPPROTO_IP); if (*mp) mp = &(*mp)->m_next; } } /* * XXX these routines are called from the upper part of the kernel. * They need to be locked when we remove Giant. * * They could also be moved to ip_mroute.c, since all the RSVP * handling is done there already. */ static int ip_rsvp_on; struct socket *ip_rsvpd; int ip_rsvp_init(struct socket *so) { if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP) return EOPNOTSUPP; if (ip_rsvpd != NULL) return EADDRINUSE; ip_rsvpd = so; /* * This may seem silly, but we need to be sure we don't over-increment * the RSVP counter, in case something slips up. */ if (!ip_rsvp_on) { ip_rsvp_on = 1; rsvp_on++; } return 0; } int ip_rsvp_done(void) { ip_rsvpd = NULL; /* * This may seem silly, but we need to be sure we don't over-decrement * the RSVP counter, in case something slips up. */ if (ip_rsvp_on) { ip_rsvp_on = 0; rsvp_on--; } return 0; } void rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ { if (rsvp_input_p) { /* call the real one if loaded */ rsvp_input_p(m, off); return; } /* Can still get packets with rsvp_on = 0 if there is a local member * of the group to which the RSVP packet is addressed. But in this * case we want to throw the packet away. */ if (!rsvp_on) { m_freem(m); return; } if (ip_rsvpd != NULL) { rip_input(m, off); return; } /* Drop the packet */ m_freem(m); } diff --git a/sys/netinet/ip_var.h b/sys/netinet/ip_var.h index 46bd596f2895..2fa80d356151 100644 --- a/sys/netinet/ip_var.h +++ b/sys/netinet/ip_var.h @@ -1,213 +1,213 @@ /* * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ip_var.h 8.2 (Berkeley) 1/9/95 * $FreeBSD$ */ #ifndef _NETINET_IP_VAR_H_ #define _NETINET_IP_VAR_H_ #include /* * Overlay for ip header used by other protocols (tcp, udp). */ struct ipovly { u_char ih_x1[9]; /* (unused) */ u_char ih_pr; /* protocol */ u_short ih_len; /* protocol length */ struct in_addr ih_src; /* source internet address */ struct in_addr ih_dst; /* destination internet address */ }; #ifdef _KERNEL /* * Ip reassembly queue structure. Each fragment * being reassembled is attached to one of these structures. * They are timed out after ipq_ttl drops to 0, and may also * be reclaimed if memory becomes tight. */ struct ipq { TAILQ_ENTRY(ipq) ipq_list; /* to other reass headers */ u_char ipq_ttl; /* time for reass q to live */ u_char ipq_p; /* protocol of this fragment */ u_short ipq_id; /* sequence id for reassembly */ struct mbuf *ipq_frags; /* to ip headers of fragments */ struct in_addr ipq_src,ipq_dst; u_char ipq_nfrags; /* # frags in this packet */ struct label *ipq_label; /* MAC label */ }; #endif /* _KERNEL */ /* * Structure stored in mbuf in inpcb.ip_options * and passed to ip_output when ip options are in use. * The actual length of the options (including ipopt_dst) * is in m_len. */ #define MAX_IPOPTLEN 40 struct ipoption { struct in_addr ipopt_dst; /* first-hop dst if source routed */ char ipopt_list[MAX_IPOPTLEN]; /* options proper */ }; /* * Structure attached to inpcb.ip_moptions and * passed to ip_output when IP multicast options are in use. */ struct ip_moptions { struct ifnet *imo_multicast_ifp; /* ifp for outgoing multicasts */ struct in_addr imo_multicast_addr; /* ifindex/addr on MULTICAST_IF */ u_char imo_multicast_ttl; /* TTL for outgoing multicasts */ u_char imo_multicast_loop; /* 1 => hear sends if a member */ u_short imo_num_memberships; /* no. memberships this socket */ struct in_multi *imo_membership[IP_MAX_MEMBERSHIPS]; u_long imo_multicast_vif; /* vif num outgoing multicasts */ }; struct ipstat { u_long ips_total; /* total packets received */ u_long ips_badsum; /* checksum bad */ u_long ips_tooshort; /* packet too short */ u_long ips_toosmall; /* not enough data */ u_long ips_badhlen; /* ip header length < data size */ u_long ips_badlen; /* ip length < ip header length */ u_long ips_fragments; /* fragments received */ u_long ips_fragdropped; /* frags dropped (dups, out of space) */ u_long ips_fragtimeout; /* fragments timed out */ u_long ips_forward; /* packets forwarded */ u_long ips_fastforward; /* packets fast forwarded */ u_long ips_cantforward; /* packets rcvd for unreachable dest */ u_long ips_redirectsent; /* packets forwarded on same net */ u_long ips_noproto; /* unknown or unsupported protocol */ u_long ips_delivered; /* datagrams delivered to upper level*/ u_long ips_localout; /* total ip packets generated here */ u_long ips_odropped; /* lost packets due to nobufs, etc. */ u_long ips_reassembled; /* total packets reassembled ok */ u_long ips_fragmented; /* datagrams successfully fragmented */ u_long ips_ofragments; /* output fragments created */ u_long ips_cantfrag; /* don't fragment flag was set, etc. */ u_long ips_badoptions; /* error in option processing */ u_long ips_noroute; /* packets discarded due to no route */ u_long ips_badvers; /* ip version != 4 */ u_long ips_rawout; /* total raw ip packets generated */ u_long ips_toolong; /* ip length > max ip packet size */ u_long ips_notmember; /* multicasts for unregistered grps */ u_long ips_nogif; /* no match gif found */ u_long ips_badaddr; /* invalid address on header */ }; #ifdef _KERNEL /* flags passed to ip_output as last parameter */ #define IP_FORWARDING 0x1 /* most of ip header exists */ #define IP_RAWOUTPUT 0x2 /* raw ip header exists */ #define IP_SENDONES 0x4 /* send all-ones broadcast */ #define IP_ROUTETOIF SO_DONTROUTE /* bypass routing tables */ #define IP_ALLOWBROADCAST SO_BROADCAST /* can send broadcast packets */ /* mbuf flag used by ip_fastfwd */ #define M_FASTFWD_OURS M_PROTO1 /* changed dst to local */ struct ip; struct inpcb; struct route; struct sockopt; extern struct ipstat ipstat; extern u_short ip_id; /* ip packet ctr, for ids */ extern int ip_defttl; /* default IP ttl */ extern int ipforwarding; /* ip forwarding */ extern int ip_doopts; /* process or ignore IP options */ #ifdef IPSTEALTH extern int ipstealth; /* stealth forwarding */ #endif extern u_char ip_protox[]; extern struct socket *ip_rsvpd; /* reservation protocol daemon */ extern struct socket *ip_mrouter; /* multicast routing daemon */ extern int (*legal_vif_num)(int); extern u_long (*ip_mcast_src)(int); extern int rsvp_on; extern struct pr_usrreqs rip_usrreqs; int ip_ctloutput(struct socket *, struct sockopt *sopt); void ip_drain(void); int ip_fragment(struct ip *ip, struct mbuf **m_frag, int mtu, u_long if_hwassist_flags, int sw_csum); void ip_freemoptions(struct ip_moptions *); void ip_init(void); extern int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, struct ip_moptions *); int ip_output(struct mbuf *, struct mbuf *, struct route *, int, struct ip_moptions *, struct inpcb *); struct mbuf * ip_reass(struct mbuf *); struct in_ifaddr * ip_rtaddr(struct in_addr); void ip_savecontrol(struct inpcb *, struct mbuf **, struct ip *, struct mbuf *); void ip_slowtimo(void); struct mbuf * - ip_srcroute(void); + ip_srcroute(struct mbuf *); void ip_stripoptions(struct mbuf *, struct mbuf *); u_int16_t ip_randomid(void); int rip_ctloutput(struct socket *, struct sockopt *); void rip_ctlinput(int, struct sockaddr *, void *); void rip_init(void); void rip_input(struct mbuf *, int); int rip_output(struct mbuf *, struct socket *, u_long); void ipip_input(struct mbuf *, int); void rsvp_input(struct mbuf *, int); int ip_rsvp_init(struct socket *); int ip_rsvp_done(void); extern int (*ip_rsvp_vif)(struct socket *, struct sockopt *); extern void (*ip_rsvp_force_done)(struct socket *); extern void (*rsvp_input_p)(struct mbuf *m, int off); extern struct pfil_head inet_pfil_hook; /* packet filter hooks */ void in_delayed_cksum(struct mbuf *m); static __inline uint16_t ip_newid(void); extern int ip_do_randomid; static __inline uint16_t ip_newid(void) { if (ip_do_randomid) return ip_randomid(); return htons(ip_id++); } #endif /* _KERNEL */ #endif /* !_NETINET_IP_VAR_H_ */ diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c index 91252354bd98..e648fc91e3e3 100644 --- a/sys/netinet/tcp_syncache.c +++ b/sys/netinet/tcp_syncache.c @@ -1,1492 +1,1492 @@ /*- * Copyright (c) 2001 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Jonathan Lemon * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" #include "opt_mac.h" #include "opt_tcpdebug.h" #include "opt_tcp_sack.h" #include #include #include #include #include #include #include #include #include /* for proc0 declaration */ #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #include #include #endif #include #ifdef TCPDEBUG #include #endif #include #include #include #include #ifdef TCPDEBUG #include #endif #ifdef INET6 #include #endif #ifdef IPSEC #include #ifdef INET6 #include #endif #endif /*IPSEC*/ #ifdef FAST_IPSEC #include #ifdef INET6 #include #endif #include #endif /*FAST_IPSEC*/ #include #include static int tcp_syncookies = 1; SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW, &tcp_syncookies, 0, "Use TCP SYN cookies if the syncache overflows"); static void syncache_drop(struct syncache *, struct syncache_head *); static void syncache_free(struct syncache *); static void syncache_insert(struct syncache *, struct syncache_head *); struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **); #ifdef TCPDEBUG static int syncache_respond(struct syncache *, struct mbuf *, struct socket *); #else static int syncache_respond(struct syncache *, struct mbuf *); #endif static struct socket *syncache_socket(struct syncache *, struct socket *, struct mbuf *m); static void syncache_timer(void *); static u_int32_t syncookie_generate(struct syncache *, u_int32_t *); static struct syncache *syncookie_lookup(struct in_conninfo *, struct tcphdr *, struct socket *); /* * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies. * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds, * the odds are that the user has given up attempting to connect by then. */ #define SYNCACHE_MAXREXMTS 3 /* Arbitrary values */ #define TCP_SYNCACHE_HASHSIZE 512 #define TCP_SYNCACHE_BUCKETLIMIT 30 struct tcp_syncache { struct syncache_head *hashbase; uma_zone_t zone; u_int hashsize; u_int hashmask; u_int bucket_limit; u_int cache_count; u_int cache_limit; u_int rexmt_limit; u_int hash_secret; TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1]; struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1]; }; static struct tcp_syncache tcp_syncache; SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache"); SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN, &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache"); SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN, &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache"); SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD, &tcp_syncache.cache_count, 0, "Current number of entries in syncache"); SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN, &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable"); SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW, &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions"); static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache"); #define SYNCACHE_HASH(inc, mask) \ ((tcp_syncache.hash_secret ^ \ (inc)->inc_faddr.s_addr ^ \ ((inc)->inc_faddr.s_addr >> 16) ^ \ (inc)->inc_fport ^ (inc)->inc_lport) & mask) #define SYNCACHE_HASH6(inc, mask) \ ((tcp_syncache.hash_secret ^ \ (inc)->inc6_faddr.s6_addr32[0] ^ \ (inc)->inc6_faddr.s6_addr32[3] ^ \ (inc)->inc_fport ^ (inc)->inc_lport) & mask) #define ENDPTS_EQ(a, b) ( \ (a)->ie_fport == (b)->ie_fport && \ (a)->ie_lport == (b)->ie_lport && \ (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \ (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \ ) #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0) #define SYNCACHE_TIMEOUT(sc, slot) do { \ sc->sc_rxtslot = (slot); \ sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[(slot)]; \ TAILQ_INSERT_TAIL(&tcp_syncache.timerq[(slot)], sc, sc_timerq); \ if (!callout_active(&tcp_syncache.tt_timerq[(slot)])) \ callout_reset(&tcp_syncache.tt_timerq[(slot)], \ TCPTV_RTOBASE * tcp_backoff[(slot)], \ syncache_timer, (void *)((intptr_t)(slot))); \ } while (0) static void syncache_free(struct syncache *sc) { if (sc->sc_ipopts) (void) m_free(sc->sc_ipopts); uma_zfree(tcp_syncache.zone, sc); } void syncache_init(void) { int i; tcp_syncache.cache_count = 0; tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT; tcp_syncache.cache_limit = tcp_syncache.hashsize * tcp_syncache.bucket_limit; tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS; tcp_syncache.hash_secret = arc4random(); TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize", &tcp_syncache.hashsize); TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit", &tcp_syncache.cache_limit); TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit", &tcp_syncache.bucket_limit); if (!powerof2(tcp_syncache.hashsize)) { printf("WARNING: syncache hash size is not a power of 2.\n"); tcp_syncache.hashsize = 512; /* safe default */ } tcp_syncache.hashmask = tcp_syncache.hashsize - 1; /* Allocate the hash table. */ MALLOC(tcp_syncache.hashbase, struct syncache_head *, tcp_syncache.hashsize * sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK); /* Initialize the hash buckets. */ for (i = 0; i < tcp_syncache.hashsize; i++) { TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket); tcp_syncache.hashbase[i].sch_length = 0; } /* Initialize the timer queues. */ for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) { TAILQ_INIT(&tcp_syncache.timerq[i]); callout_init(&tcp_syncache.tt_timerq[i], debug_mpsafenet ? CALLOUT_MPSAFE : 0); } /* * Allocate the syncache entries. Allow the zone to allocate one * more entry than cache limit, so a new entry can bump out an * older one. */ tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit); tcp_syncache.cache_limit -= 1; } static void syncache_insert(sc, sch) struct syncache *sc; struct syncache_head *sch; { struct syncache *sc2; int i; INP_INFO_WLOCK_ASSERT(&tcbinfo); /* * Make sure that we don't overflow the per-bucket * limit or the total cache size limit. */ if (sch->sch_length >= tcp_syncache.bucket_limit) { /* * The bucket is full, toss the oldest element. */ sc2 = TAILQ_FIRST(&sch->sch_bucket); sc2->sc_tp->ts_recent = ticks; syncache_drop(sc2, sch); tcpstat.tcps_sc_bucketoverflow++; } else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) { /* * The cache is full. Toss the oldest entry in the * entire cache. This is the front entry in the * first non-empty timer queue with the largest * timeout value. */ for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) { sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]); if (sc2 != NULL) break; } sc2->sc_tp->ts_recent = ticks; syncache_drop(sc2, NULL); tcpstat.tcps_sc_cacheoverflow++; } /* Initialize the entry's timer. */ SYNCACHE_TIMEOUT(sc, 0); /* Put it into the bucket. */ TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash); sch->sch_length++; tcp_syncache.cache_count++; tcpstat.tcps_sc_added++; } static void syncache_drop(sc, sch) struct syncache *sc; struct syncache_head *sch; { INP_INFO_WLOCK_ASSERT(&tcbinfo); if (sch == NULL) { #ifdef INET6 if (sc->sc_inc.inc_isipv6) { sch = &tcp_syncache.hashbase[ SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)]; } else #endif { sch = &tcp_syncache.hashbase[ SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)]; } } TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); sch->sch_length--; tcp_syncache.cache_count--; TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq); if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot])) callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]); syncache_free(sc); } /* * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. * If we have retransmitted an entry the maximum number of times, expire it. */ static void syncache_timer(xslot) void *xslot; { intptr_t slot = (intptr_t)xslot; struct syncache *sc, *nsc; struct inpcb *inp; INP_INFO_WLOCK(&tcbinfo); if (callout_pending(&tcp_syncache.tt_timerq[slot]) || !callout_active(&tcp_syncache.tt_timerq[slot])) { /* XXX can this happen? */ INP_INFO_WUNLOCK(&tcbinfo); return; } callout_deactivate(&tcp_syncache.tt_timerq[slot]); nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]); while (nsc != NULL) { if (ticks < nsc->sc_rxttime) break; sc = nsc; inp = sc->sc_tp->t_inpcb; if (slot == SYNCACHE_MAXREXMTS || slot >= tcp_syncache.rexmt_limit || inp == NULL || inp->inp_gencnt != sc->sc_inp_gencnt) { nsc = TAILQ_NEXT(sc, sc_timerq); syncache_drop(sc, NULL); tcpstat.tcps_sc_stale++; continue; } /* * syncache_respond() may call back into the syncache to * to modify another entry, so do not obtain the next * entry on the timer chain until it has completed. */ #ifdef TCPDEBUG (void) syncache_respond(sc, NULL, NULL); #else (void) syncache_respond(sc, NULL); #endif nsc = TAILQ_NEXT(sc, sc_timerq); tcpstat.tcps_sc_retransmitted++; TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq); SYNCACHE_TIMEOUT(sc, slot + 1); } if (nsc != NULL) callout_reset(&tcp_syncache.tt_timerq[slot], nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot)); INP_INFO_WUNLOCK(&tcbinfo); } /* * Find an entry in the syncache. */ struct syncache * syncache_lookup(inc, schp) struct in_conninfo *inc; struct syncache_head **schp; { struct syncache *sc; struct syncache_head *sch; INP_INFO_WLOCK_ASSERT(&tcbinfo); #ifdef INET6 if (inc->inc_isipv6) { sch = &tcp_syncache.hashbase[ SYNCACHE_HASH6(inc, tcp_syncache.hashmask)]; *schp = sch; TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) return (sc); } } else #endif { sch = &tcp_syncache.hashbase[ SYNCACHE_HASH(inc, tcp_syncache.hashmask)]; *schp = sch; TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { #ifdef INET6 if (sc->sc_inc.inc_isipv6) continue; #endif if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) return (sc); } } return (NULL); } /* * This function is called when we get a RST for a * non-existent connection, so that we can see if the * connection is in the syn cache. If it is, zap it. */ void syncache_chkrst(inc, th) struct in_conninfo *inc; struct tcphdr *th; { struct syncache *sc; struct syncache_head *sch; INP_INFO_WLOCK_ASSERT(&tcbinfo); sc = syncache_lookup(inc, &sch); if (sc == NULL) return; /* * If the RST bit is set, check the sequence number to see * if this is a valid reset segment. * RFC 793 page 37: * In all states except SYN-SENT, all reset (RST) segments * are validated by checking their SEQ-fields. A reset is * valid if its sequence number is in the window. * * The sequence number in the reset segment is normally an * echo of our outgoing acknowlegement numbers, but some hosts * send a reset with the sequence number at the rightmost edge * of our receive window, and we have to handle this case. */ if (SEQ_GEQ(th->th_seq, sc->sc_irs) && SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) { syncache_drop(sc, sch); tcpstat.tcps_sc_reset++; } } void syncache_badack(inc) struct in_conninfo *inc; { struct syncache *sc; struct syncache_head *sch; INP_INFO_WLOCK_ASSERT(&tcbinfo); sc = syncache_lookup(inc, &sch); if (sc != NULL) { syncache_drop(sc, sch); tcpstat.tcps_sc_badack++; } } void syncache_unreach(inc, th) struct in_conninfo *inc; struct tcphdr *th; { struct syncache *sc; struct syncache_head *sch; INP_INFO_WLOCK_ASSERT(&tcbinfo); /* we are called at splnet() here */ sc = syncache_lookup(inc, &sch); if (sc == NULL) return; /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ if (ntohl(th->th_seq) != sc->sc_iss) return; /* * If we've rertransmitted 3 times and this is our second error, * we remove the entry. Otherwise, we allow it to continue on. * This prevents us from incorrectly nuking an entry during a * spurious network outage. * * See tcp_notify(). */ if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) { sc->sc_flags |= SCF_UNREACH; return; } syncache_drop(sc, sch); tcpstat.tcps_sc_unreach++; } /* * Build a new TCP socket structure from a syncache entry. */ static struct socket * syncache_socket(sc, lso, m) struct syncache *sc; struct socket *lso; struct mbuf *m; { struct inpcb *inp = NULL; struct socket *so; struct tcpcb *tp; NET_ASSERT_GIANT(); INP_INFO_WLOCK_ASSERT(&tcbinfo); /* * Ok, create the full blown connection, and set things up * as they would have been set up if we had created the * connection when the SYN arrived. If we can't create * the connection, abort it. */ so = sonewconn(lso, SS_ISCONNECTED); if (so == NULL) { /* * Drop the connection; we will send a RST if the peer * retransmits the ACK, */ tcpstat.tcps_listendrop++; goto abort2; } #ifdef MAC SOCK_LOCK(so); mac_set_socket_peer_from_mbuf(m, so); SOCK_UNLOCK(so); #endif inp = sotoinpcb(so); INP_LOCK(inp); /* * Insert new socket into hash list. */ inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6; #ifdef INET6 if (sc->sc_inc.inc_isipv6) { inp->in6p_laddr = sc->sc_inc.inc6_laddr; } else { inp->inp_vflag &= ~INP_IPV6; inp->inp_vflag |= INP_IPV4; #endif inp->inp_laddr = sc->sc_inc.inc_laddr; #ifdef INET6 } #endif inp->inp_lport = sc->sc_inc.inc_lport; if (in_pcbinshash(inp) != 0) { /* * Undo the assignments above if we failed to * put the PCB on the hash lists. */ #ifdef INET6 if (sc->sc_inc.inc_isipv6) inp->in6p_laddr = in6addr_any; else #endif inp->inp_laddr.s_addr = INADDR_ANY; inp->inp_lport = 0; goto abort; } #ifdef IPSEC /* copy old policy into new socket's */ if (ipsec_copy_pcbpolicy(sotoinpcb(lso)->inp_sp, inp->inp_sp)) printf("syncache_expand: could not copy policy\n"); #endif #ifdef FAST_IPSEC /* copy old policy into new socket's */ if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp)) printf("syncache_expand: could not copy policy\n"); #endif #ifdef INET6 if (sc->sc_inc.inc_isipv6) { struct inpcb *oinp = sotoinpcb(lso); struct in6_addr laddr6; struct sockaddr_in6 sin6; /* * Inherit socket options from the listening socket. * Note that in6p_inputopts are not (and should not be) * copied, since it stores previously received options and is * used to detect if each new option is different than the * previous one and hence should be passed to a user. * If we copied in6p_inputopts, a user would not be able to * receive options just after calling the accept system call. */ inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS; if (oinp->in6p_outputopts) inp->in6p_outputopts = ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT); sin6.sin6_family = AF_INET6; sin6.sin6_len = sizeof(sin6); sin6.sin6_addr = sc->sc_inc.inc6_faddr; sin6.sin6_port = sc->sc_inc.inc_fport; sin6.sin6_flowinfo = sin6.sin6_scope_id = 0; laddr6 = inp->in6p_laddr; if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) inp->in6p_laddr = sc->sc_inc.inc6_laddr; if (in6_pcbconnect(inp, (struct sockaddr *)&sin6, thread0.td_ucred)) { inp->in6p_laddr = laddr6; goto abort; } /* Override flowlabel from in6_pcbconnect. */ inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK; inp->in6p_flowinfo |= sc->sc_flowlabel; } else #endif { struct in_addr laddr; struct sockaddr_in sin; - inp->inp_options = ip_srcroute(); + inp->inp_options = ip_srcroute(m); if (inp->inp_options == NULL) { inp->inp_options = sc->sc_ipopts; sc->sc_ipopts = NULL; } sin.sin_family = AF_INET; sin.sin_len = sizeof(sin); sin.sin_addr = sc->sc_inc.inc_faddr; sin.sin_port = sc->sc_inc.inc_fport; bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero)); laddr = inp->inp_laddr; if (inp->inp_laddr.s_addr == INADDR_ANY) inp->inp_laddr = sc->sc_inc.inc_laddr; if (in_pcbconnect(inp, (struct sockaddr *)&sin, thread0.td_ucred)) { inp->inp_laddr = laddr; goto abort; } } tp = intotcpcb(inp); tp->t_state = TCPS_SYN_RECEIVED; tp->iss = sc->sc_iss; tp->irs = sc->sc_irs; tcp_rcvseqinit(tp); tcp_sendseqinit(tp); tp->snd_wl1 = sc->sc_irs; tp->rcv_up = sc->sc_irs + 1; tp->rcv_wnd = sc->sc_wnd; tp->rcv_adv += tp->rcv_wnd; tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY); if (sc->sc_flags & SCF_NOOPT) tp->t_flags |= TF_NOOPT; if (sc->sc_flags & SCF_WINSCALE) { tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE; tp->requested_s_scale = sc->sc_requested_s_scale; tp->request_r_scale = sc->sc_request_r_scale; } if (sc->sc_flags & SCF_TIMESTAMP) { tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP; tp->ts_recent = sc->sc_tsrecent; tp->ts_recent_age = ticks; } if (sc->sc_flags & SCF_CC) { /* * Initialization of the tcpcb for transaction; * set SND.WND = SEG.WND, * initialize CCsend and CCrecv. */ tp->t_flags |= TF_REQ_CC|TF_RCVD_CC; tp->cc_send = sc->sc_cc_send; tp->cc_recv = sc->sc_cc_recv; } #ifdef TCP_SIGNATURE if (sc->sc_flags & SCF_SIGNATURE) tp->t_flags |= TF_SIGNATURE; #endif if (sc->sc_flags & SCF_SACK) { tp->sack_enable = 1; tp->t_flags |= TF_SACK_PERMIT; } /* * Set up MSS and get cached values from tcp_hostcache. * This might overwrite some of the defaults we just set. */ tcp_mss(tp, sc->sc_peer_mss); /* * If the SYN,ACK was retransmitted, reset cwnd to 1 segment. */ if (sc->sc_rxtslot != 0) tp->snd_cwnd = tp->t_maxseg; callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); INP_UNLOCK(inp); tcpstat.tcps_accepts++; return (so); abort: INP_UNLOCK(inp); abort2: if (so != NULL) (void) soabort(so); return (NULL); } /* * This function gets called when we receive an ACK for a * socket in the LISTEN state. We look up the connection * in the syncache, and if its there, we pull it out of * the cache and turn it into a full-blown connection in * the SYN-RECEIVED state. */ int syncache_expand(inc, th, sop, m) struct in_conninfo *inc; struct tcphdr *th; struct socket **sop; struct mbuf *m; { struct syncache *sc; struct syncache_head *sch; struct socket *so; INP_INFO_WLOCK_ASSERT(&tcbinfo); sc = syncache_lookup(inc, &sch); if (sc == NULL) { /* * There is no syncache entry, so see if this ACK is * a returning syncookie. To do this, first: * A. See if this socket has had a syncache entry dropped in * the past. We don't want to accept a bogus syncookie * if we've never received a SYN. * B. check that the syncookie is valid. If it is, then * cobble up a fake syncache entry, and return. */ if (!tcp_syncookies) return (0); sc = syncookie_lookup(inc, th, *sop); if (sc == NULL) return (0); sch = NULL; tcpstat.tcps_sc_recvcookie++; } /* * If seg contains an ACK, but not for our SYN/ACK, send a RST. */ if (th->th_ack != sc->sc_iss + 1) return (0); so = syncache_socket(sc, *sop, m); if (so == NULL) { #if 0 resetandabort: /* XXXjlemon check this - is this correct? */ (void) tcp_respond(NULL, m, m, th, th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK); #endif m_freem(m); /* XXX only needed for above */ tcpstat.tcps_sc_aborted++; } else tcpstat.tcps_sc_completed++; if (sch == NULL) syncache_free(sc); else syncache_drop(sc, sch); *sop = so; return (1); } /* * Given a LISTEN socket and an inbound SYN request, add * this to the syn cache, and send back a segment: * * to the source. * * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. * Doing so would require that we hold onto the data and deliver it * to the application. However, if we are the target of a SYN-flood * DoS attack, an attacker could send data which would eventually * consume all available buffer space if it were ACKed. By not ACKing * the data, we avoid this DoS scenario. */ int syncache_add(inc, to, th, sop, m) struct in_conninfo *inc; struct tcpopt *to; struct tcphdr *th; struct socket **sop; struct mbuf *m; { struct tcpcb *tp; struct socket *so; struct syncache *sc = NULL; struct syncache_head *sch; struct mbuf *ipopts = NULL; struct rmxp_tao tao; u_int32_t flowtmp; int i, win; INP_INFO_WLOCK_ASSERT(&tcbinfo); so = *sop; tp = sototcpcb(so); bzero(&tao, sizeof(tao)); /* * Remember the IP options, if any. */ #ifdef INET6 if (!inc->inc_isipv6) #endif - ipopts = ip_srcroute(); + ipopts = ip_srcroute(m); /* * See if we already have an entry for this connection. * If we do, resend the SYN,ACK, and reset the retransmit timer. * * XXX * should the syncache be re-initialized with the contents * of the new SYN here (which may have different options?) */ sc = syncache_lookup(inc, &sch); if (sc != NULL) { tcpstat.tcps_sc_dupsyn++; if (ipopts) { /* * If we were remembering a previous source route, * forget it and use the new one we've been given. */ if (sc->sc_ipopts) (void) m_free(sc->sc_ipopts); sc->sc_ipopts = ipopts; } /* * Update timestamp if present. */ if (sc->sc_flags & SCF_TIMESTAMP) sc->sc_tsrecent = to->to_tsval; /* * PCB may have changed, pick up new values. */ sc->sc_tp = tp; sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; #ifdef TCPDEBUG if (syncache_respond(sc, m, so) == 0) { #else if (syncache_respond(sc, m) == 0) { #endif /* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */ TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq); SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot); tcpstat.tcps_sndacks++; tcpstat.tcps_sndtotal++; } *sop = NULL; return (1); } sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT); if (sc == NULL) { /* * The zone allocator couldn't provide more entries. * Treat this as if the cache was full; drop the oldest * entry and insert the new one. */ /* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */ for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) { sc = TAILQ_FIRST(&tcp_syncache.timerq[i]); if (sc != NULL) break; } sc->sc_tp->ts_recent = ticks; syncache_drop(sc, NULL); tcpstat.tcps_sc_zonefail++; sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT); if (sc == NULL) { if (ipopts) (void) m_free(ipopts); return (0); } } /* * Fill in the syncache values. */ bzero(sc, sizeof(*sc)); sc->sc_tp = tp; sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; sc->sc_ipopts = ipopts; sc->sc_inc.inc_fport = inc->inc_fport; sc->sc_inc.inc_lport = inc->inc_lport; #ifdef INET6 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; if (inc->inc_isipv6) { sc->sc_inc.inc6_faddr = inc->inc6_faddr; sc->sc_inc.inc6_laddr = inc->inc6_laddr; } else #endif { sc->sc_inc.inc_faddr = inc->inc_faddr; sc->sc_inc.inc_laddr = inc->inc_laddr; } sc->sc_irs = th->th_seq; sc->sc_flags = 0; sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0; sc->sc_flowlabel = 0; if (tcp_syncookies) { sc->sc_iss = syncookie_generate(sc, &flowtmp); #ifdef INET6 if (inc->inc_isipv6 && (sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL)) { sc->sc_flowlabel = flowtmp & IPV6_FLOWLABEL_MASK; } #endif } else { sc->sc_iss = arc4random(); #ifdef INET6 if (inc->inc_isipv6 && (sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL)) { sc->sc_flowlabel = (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); } #endif } /* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */ win = sbspace(&so->so_rcv); win = imax(win, 0); win = imin(win, TCP_MAXWIN); sc->sc_wnd = win; if (tcp_do_rfc1323) { /* * A timestamp received in a SYN makes * it ok to send timestamp requests and replies. */ if (to->to_flags & TOF_TS) { sc->sc_tsrecent = to->to_tsval; sc->sc_flags |= SCF_TIMESTAMP; } if (to->to_flags & TOF_SCALE) { int wscale = 0; /* Compute proper scaling value from buffer space */ while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat) wscale++; sc->sc_request_r_scale = wscale; sc->sc_requested_s_scale = to->to_requested_s_scale; sc->sc_flags |= SCF_WINSCALE; } } if (tcp_do_rfc1644) { /* * A CC or CC.new option received in a SYN makes * it ok to send CC in subsequent segments. */ if (to->to_flags & (TOF_CC|TOF_CCNEW)) { sc->sc_cc_recv = to->to_cc; sc->sc_cc_send = CC_INC(tcp_ccgen); sc->sc_flags |= SCF_CC; } } if (tp->t_flags & TF_NOOPT) sc->sc_flags = SCF_NOOPT; #ifdef TCP_SIGNATURE /* * If listening socket requested TCP digests, and received SYN * contains the option, flag this in the syncache so that * syncache_respond() will do the right thing with the SYN+ACK. * XXX Currently we always record the option by default and will * attempt to use it in syncache_respond(). */ if (to->to_flags & TOF_SIGNATURE) sc->sc_flags = SCF_SIGNATURE; #endif if (to->to_flags & TOF_SACK) sc->sc_flags |= SCF_SACK; /* * XXX * We have the option here of not doing TAO (even if the segment * qualifies) and instead fall back to a normal 3WHS via the syncache. * This allows us to apply synflood protection to TAO-qualifying SYNs * also. However, there should be a hueristic to determine when to * do this, and is not present at the moment. */ /* * Perform TAO test on incoming CC (SEG.CC) option, if any. * - compare SEG.CC against cached CC from the same host, if any. * - if SEG.CC > chached value, SYN must be new and is accepted * immediately: save new CC in the cache, mark the socket * connected, enter ESTABLISHED state, turn on flag to * send a SYN in the next segment. * A virtual advertised window is set in rcv_adv to * initialize SWS prevention. Then enter normal segment * processing: drop SYN, process data and FIN. * - otherwise do a normal 3-way handshake. */ if (tcp_do_rfc1644) tcp_hc_gettao(&sc->sc_inc, &tao); if ((to->to_flags & TOF_CC) != 0) { if (((tp->t_flags & TF_NOPUSH) != 0) && sc->sc_flags & SCF_CC && tao.tao_cc != 0 && CC_GT(to->to_cc, tao.tao_cc)) { sc->sc_rxtslot = 0; so = syncache_socket(sc, *sop, m); if (so != NULL) { tao.tao_cc = to->to_cc; tcp_hc_updatetao(&sc->sc_inc, TCP_HC_TAO_CC, tao.tao_cc, 0); *sop = so; } syncache_free(sc); return (so != NULL); } } else { /* * No CC option, but maybe CC.NEW: invalidate cached value. */ if (tcp_do_rfc1644) { tao.tao_cc = 0; tcp_hc_updatetao(&sc->sc_inc, TCP_HC_TAO_CC, tao.tao_cc, 0); } } /* * TAO test failed or there was no CC option, * do a standard 3-way handshake. */ #ifdef TCPDEBUG if (syncache_respond(sc, m, so) == 0) { #else if (syncache_respond(sc, m) == 0) { #endif syncache_insert(sc, sch); tcpstat.tcps_sndacks++; tcpstat.tcps_sndtotal++; } else { syncache_free(sc); tcpstat.tcps_sc_dropped++; } *sop = NULL; return (1); } #ifdef TCPDEBUG static int syncache_respond(sc, m, so) struct syncache *sc; struct mbuf *m; struct socket *so; #else static int syncache_respond(sc, m) struct syncache *sc; struct mbuf *m; #endif { u_int8_t *optp; int optlen, error; u_int16_t tlen, hlen, mssopt; struct ip *ip = NULL; struct tcphdr *th; struct inpcb *inp; #ifdef INET6 struct ip6_hdr *ip6 = NULL; #endif hlen = #ifdef INET6 (sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) : #endif sizeof(struct ip); KASSERT((&sc->sc_inc) != NULL, ("syncache_respond with NULL in_conninfo pointer")); /* Determine MSS we advertize to other end of connection */ mssopt = tcp_mssopt(&sc->sc_inc); /* Compute the size of the TCP options. */ if (sc->sc_flags & SCF_NOOPT) { optlen = 0; } else { optlen = TCPOLEN_MAXSEG + ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) + ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) + ((sc->sc_flags & SCF_CC) ? TCPOLEN_CC_APPA * 2 : 0); #ifdef TCP_SIGNATURE optlen += (sc->sc_flags & SCF_SIGNATURE) ? TCPOLEN_SIGNATURE + 2 : 0; #endif optlen += ((sc->sc_flags & SCF_SACK) ? 4 : 0); } tlen = hlen + sizeof(struct tcphdr) + optlen; /* * XXX * assume that the entire packet will fit in a header mbuf */ KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small")); /* * XXX shouldn't this reuse the mbuf if possible ? * Create the IP+TCP header from scratch. */ if (m) m_freem(m); m = m_gethdr(M_DONTWAIT, MT_HEADER); if (m == NULL) return (ENOBUFS); m->m_data += max_linkhdr; m->m_len = tlen; m->m_pkthdr.len = tlen; m->m_pkthdr.rcvif = NULL; inp = sc->sc_tp->t_inpcb; INP_LOCK(inp); #ifdef MAC mac_create_mbuf_from_inpcb(inp, m); #endif #ifdef INET6 if (sc->sc_inc.inc_isipv6) { ip6 = mtod(m, struct ip6_hdr *); ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_nxt = IPPROTO_TCP; ip6->ip6_src = sc->sc_inc.inc6_laddr; ip6->ip6_dst = sc->sc_inc.inc6_faddr; ip6->ip6_plen = htons(tlen - hlen); /* ip6_hlim is set after checksum */ ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK; ip6->ip6_flow |= sc->sc_flowlabel; th = (struct tcphdr *)(ip6 + 1); } else #endif { ip = mtod(m, struct ip *); ip->ip_v = IPVERSION; ip->ip_hl = sizeof(struct ip) >> 2; ip->ip_len = tlen; ip->ip_id = 0; ip->ip_off = 0; ip->ip_sum = 0; ip->ip_p = IPPROTO_TCP; ip->ip_src = sc->sc_inc.inc_laddr; ip->ip_dst = sc->sc_inc.inc_faddr; ip->ip_ttl = inp->inp_ip_ttl; /* XXX */ ip->ip_tos = inp->inp_ip_tos; /* XXX */ /* * See if we should do MTU discovery. Route lookups are * expensive, so we will only unset the DF bit if: * * 1) path_mtu_discovery is disabled * 2) the SCF_UNREACH flag has been set */ if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0)) ip->ip_off |= IP_DF; th = (struct tcphdr *)(ip + 1); } th->th_sport = sc->sc_inc.inc_lport; th->th_dport = sc->sc_inc.inc_fport; th->th_seq = htonl(sc->sc_iss); th->th_ack = htonl(sc->sc_irs + 1); th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; th->th_x2 = 0; th->th_flags = TH_SYN|TH_ACK; th->th_win = htons(sc->sc_wnd); th->th_urp = 0; /* Tack on the TCP options. */ if (optlen != 0) { optp = (u_int8_t *)(th + 1); *optp++ = TCPOPT_MAXSEG; *optp++ = TCPOLEN_MAXSEG; *optp++ = (mssopt >> 8) & 0xff; *optp++ = mssopt & 0xff; if (sc->sc_flags & SCF_WINSCALE) { *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 | TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | sc->sc_request_r_scale); optp += 4; } if (sc->sc_flags & SCF_TIMESTAMP) { u_int32_t *lp = (u_int32_t *)(optp); /* Form timestamp option per appendix A of RFC 1323. */ *lp++ = htonl(TCPOPT_TSTAMP_HDR); *lp++ = htonl(ticks); *lp = htonl(sc->sc_tsrecent); optp += TCPOLEN_TSTAMP_APPA; } /* * Send CC and CC.echo if we received CC from our peer. */ if (sc->sc_flags & SCF_CC) { u_int32_t *lp = (u_int32_t *)(optp); *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC)); *lp++ = htonl(sc->sc_cc_send); *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CCECHO)); *lp = htonl(sc->sc_cc_recv); optp += TCPOLEN_CC_APPA * 2; } #ifdef TCP_SIGNATURE /* * Handle TCP-MD5 passive opener response. */ if (sc->sc_flags & SCF_SIGNATURE) { u_int8_t *bp = optp; int i; *bp++ = TCPOPT_SIGNATURE; *bp++ = TCPOLEN_SIGNATURE; for (i = 0; i < TCP_SIGLEN; i++) *bp++ = 0; tcp_signature_compute(m, sizeof(struct ip), 0, optlen, optp + 2, IPSEC_DIR_OUTBOUND); *bp++ = TCPOPT_NOP; *bp++ = TCPOPT_EOL; optp += TCPOLEN_SIGNATURE + 2; } #endif /* TCP_SIGNATURE */ if (sc->sc_flags & SCF_SACK) { *(u_int32_t *)optp = htonl(TCPOPT_SACK_PERMIT_HDR); optp += 4; } } #ifdef INET6 if (sc->sc_inc.inc_isipv6) { th->th_sum = 0; th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen); ip6->ip6_hlim = in6_selecthlim(NULL, NULL); error = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp); } else #endif { th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(tlen - hlen + IPPROTO_TCP)); m->m_pkthdr.csum_flags = CSUM_TCP; m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); #ifdef TCPDEBUG /* * Trace. */ if (so != NULL && so->so_options & SO_DEBUG) { struct tcpcb *tp = sototcpcb(so); tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0); } #endif error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, inp); } INP_UNLOCK(inp); return (error); } /* * cookie layers: * * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .| * | peer iss | * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .| * | 0 |(A)| | * (A): peer mss index */ /* * The values below are chosen to minimize the size of the tcp_secret * table, as well as providing roughly a 16 second lifetime for the cookie. */ #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */ #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */ #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1) #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS) #define SYNCOOKIE_TIMEOUT \ (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT)) #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK) static struct { u_int32_t ts_secbits[4]; u_int ts_expire; } tcp_secret[SYNCOOKIE_NSECRETS]; static int tcp_msstab[] = { 0, 536, 1460, 8960 }; static MD5_CTX syn_ctx; #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v)) struct md5_add { u_int32_t laddr, faddr; u_int32_t secbits[4]; u_int16_t lport, fport; }; #ifdef CTASSERT CTASSERT(sizeof(struct md5_add) == 28); #endif /* * Consider the problem of a recreated (and retransmitted) cookie. If the * original SYN was accepted, the connection is established. The second * SYN is inflight, and if it arrives with an ISN that falls within the * receive window, the connection is killed. * * However, since cookies have other problems, this may not be worth * worrying about. */ static u_int32_t syncookie_generate(struct syncache *sc, u_int32_t *flowid) { u_int32_t md5_buffer[4]; u_int32_t data; int idx, i; struct md5_add add; /* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */ idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK; if (tcp_secret[idx].ts_expire < ticks) { for (i = 0; i < 4; i++) tcp_secret[idx].ts_secbits[i] = arc4random(); tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT; } for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--) if (tcp_msstab[data] <= sc->sc_peer_mss) break; data = (data << SYNCOOKIE_WNDBITS) | idx; data ^= sc->sc_irs; /* peer's iss */ MD5Init(&syn_ctx); #ifdef INET6 if (sc->sc_inc.inc_isipv6) { MD5Add(sc->sc_inc.inc6_laddr); MD5Add(sc->sc_inc.inc6_faddr); add.laddr = 0; add.faddr = 0; } else #endif { add.laddr = sc->sc_inc.inc_laddr.s_addr; add.faddr = sc->sc_inc.inc_faddr.s_addr; } add.lport = sc->sc_inc.inc_lport; add.fport = sc->sc_inc.inc_fport; add.secbits[0] = tcp_secret[idx].ts_secbits[0]; add.secbits[1] = tcp_secret[idx].ts_secbits[1]; add.secbits[2] = tcp_secret[idx].ts_secbits[2]; add.secbits[3] = tcp_secret[idx].ts_secbits[3]; MD5Add(add); MD5Final((u_char *)&md5_buffer, &syn_ctx); data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK); *flowid = md5_buffer[1]; return (data); } static struct syncache * syncookie_lookup(inc, th, so) struct in_conninfo *inc; struct tcphdr *th; struct socket *so; { u_int32_t md5_buffer[4]; struct syncache *sc; u_int32_t data; int wnd, idx; struct md5_add add; /* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */ data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */ idx = data & SYNCOOKIE_WNDMASK; if (tcp_secret[idx].ts_expire < ticks || sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks) return (NULL); MD5Init(&syn_ctx); #ifdef INET6 if (inc->inc_isipv6) { MD5Add(inc->inc6_laddr); MD5Add(inc->inc6_faddr); add.laddr = 0; add.faddr = 0; } else #endif { add.laddr = inc->inc_laddr.s_addr; add.faddr = inc->inc_faddr.s_addr; } add.lport = inc->inc_lport; add.fport = inc->inc_fport; add.secbits[0] = tcp_secret[idx].ts_secbits[0]; add.secbits[1] = tcp_secret[idx].ts_secbits[1]; add.secbits[2] = tcp_secret[idx].ts_secbits[2]; add.secbits[3] = tcp_secret[idx].ts_secbits[3]; MD5Add(add); MD5Final((u_char *)&md5_buffer, &syn_ctx); data ^= md5_buffer[0]; if ((data & ~SYNCOOKIE_DATAMASK) != 0) return (NULL); data = data >> SYNCOOKIE_WNDBITS; sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT); if (sc == NULL) return (NULL); /* * Fill in the syncache values. * XXX duplicate code from syncache_add */ sc->sc_ipopts = NULL; sc->sc_inc.inc_fport = inc->inc_fport; sc->sc_inc.inc_lport = inc->inc_lport; sc->sc_tp = sototcpcb(so); #ifdef INET6 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; if (inc->inc_isipv6) { sc->sc_inc.inc6_faddr = inc->inc6_faddr; sc->sc_inc.inc6_laddr = inc->inc6_laddr; if (sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL) sc->sc_flowlabel = md5_buffer[1] & IPV6_FLOWLABEL_MASK; } else #endif { sc->sc_inc.inc_faddr = inc->inc_faddr; sc->sc_inc.inc_laddr = inc->inc_laddr; } sc->sc_irs = th->th_seq - 1; sc->sc_iss = th->th_ack - 1; wnd = sbspace(&so->so_rcv); wnd = imax(wnd, 0); wnd = imin(wnd, TCP_MAXWIN); sc->sc_wnd = wnd; sc->sc_flags = 0; sc->sc_rxtslot = 0; sc->sc_peer_mss = tcp_msstab[data]; return (sc); } diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h index ff799bc9aaf8..0c65b8eb1e8c 100644 --- a/sys/sys/mbuf.h +++ b/sys/sys/mbuf.h @@ -1,742 +1,743 @@ /*- * Copyright (c) 1982, 1986, 1988, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)mbuf.h 8.5 (Berkeley) 2/19/95 * $FreeBSD$ */ #ifndef _SYS_MBUF_H_ #define _SYS_MBUF_H_ /* XXX: These includes suck. Sorry! */ #include #ifdef _KERNEL #include #include #ifdef WITNESS #include #endif #endif /* * Mbufs are of a single size, MSIZE (sys/param.h), which * includes overhead. An mbuf may add a single "mbuf cluster" of size * MCLBYTES (also in sys/param.h), which has no additional overhead * and is used instead of the internal data area; this is done when * at least MINCLSIZE of data must be stored. Additionally, it is possible * to allocate a separate buffer externally and attach it to the mbuf in * a way similar to that of mbuf clusters. */ #define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */ #define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */ #define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */ #define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */ #ifdef _KERNEL /*- * Macros for type conversion: * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type. * dtom(x) -- Convert data pointer within mbuf to mbuf pointer (XXX). */ #define mtod(m, t) ((t)((m)->m_data)) #define dtom(x) ((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1))) /* * Argument structure passed to UMA routines during mbuf and packet * allocations. */ struct mb_args { int flags; /* Flags for mbuf being allocated */ short type; /* Type of mbuf being allocated */ }; #endif /* _KERNEL */ /* * Header present at the beginning of every mbuf. */ struct m_hdr { struct mbuf *mh_next; /* next buffer in chain */ struct mbuf *mh_nextpkt; /* next chain in queue/record */ caddr_t mh_data; /* location of data */ int mh_len; /* amount of data in this mbuf */ int mh_flags; /* flags; see below */ short mh_type; /* type of data in this mbuf */ }; /* * Packet tag structure (see below for details). */ struct m_tag { SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */ u_int16_t m_tag_id; /* Tag ID */ u_int16_t m_tag_len; /* Length of data */ u_int32_t m_tag_cookie; /* ABI/Module ID */ void (*m_tag_free)(struct m_tag *); }; /* * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set. */ struct pkthdr { struct ifnet *rcvif; /* rcv interface */ int len; /* total packet length */ /* variables for ip and tcp reassembly */ void *header; /* pointer to packet header */ /* variables for hardware checksum */ int csum_flags; /* flags regarding checksum */ int csum_data; /* data field used by csum routines */ SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */ }; /* * Description of external storage mapped into mbuf; valid only if M_EXT is set. */ struct m_ext { caddr_t ext_buf; /* start of buffer */ void (*ext_free) /* free routine if not the usual */ (void *, void *); void *ext_args; /* optional argument pointer */ u_int ext_size; /* size of buffer, for ext_free */ u_int *ref_cnt; /* pointer to ref count info */ int ext_type; /* type of external storage */ }; /* * The core of the mbuf object along with some shortcut defines for * practical purposes. */ struct mbuf { struct m_hdr m_hdr; union { struct { struct pkthdr MH_pkthdr; /* M_PKTHDR set */ union { struct m_ext MH_ext; /* M_EXT set */ char MH_databuf[MHLEN]; } MH_dat; } MH; char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */ } M_dat; }; #define m_next m_hdr.mh_next #define m_len m_hdr.mh_len #define m_data m_hdr.mh_data #define m_type m_hdr.mh_type #define m_flags m_hdr.mh_flags #define m_nextpkt m_hdr.mh_nextpkt #define m_act m_nextpkt #define m_pkthdr M_dat.MH.MH_pkthdr #define m_ext M_dat.MH.MH_dat.MH_ext #define m_pktdat M_dat.MH.MH_dat.MH_databuf #define m_dat M_dat.M_databuf /* * mbuf flags. */ #define M_EXT 0x0001 /* has associated external storage */ #define M_PKTHDR 0x0002 /* start of record */ #define M_EOR 0x0004 /* end of record */ #define M_RDONLY 0x0008 /* associated data is marked read-only */ #define M_PROTO1 0x0010 /* protocol-specific */ #define M_PROTO2 0x0020 /* protocol-specific */ #define M_PROTO3 0x0040 /* protocol-specific */ #define M_PROTO4 0x0080 /* protocol-specific */ #define M_PROTO5 0x0100 /* protocol-specific */ #define M_SKIP_FIREWALL 0x4000 /* skip firewall processing */ #define M_FREELIST 0x8000 /* mbuf is on the free list */ /* * mbuf pkthdr flags (also stored in m_flags). */ #define M_BCAST 0x0200 /* send/received as link-level broadcast */ #define M_MCAST 0x0400 /* send/received as link-level multicast */ #define M_FRAG 0x0800 /* packet is a fragment of a larger packet */ #define M_FIRSTFRAG 0x1000 /* packet is first fragment */ #define M_LASTFRAG 0x2000 /* packet is last fragment */ /* * External buffer types: identify ext_buf type. */ #define EXT_CLUSTER 1 /* mbuf cluster */ #define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */ #define EXT_PACKET 3 /* came out of Packet zone */ #define EXT_NET_DRV 100 /* custom ext_buf provided by net driver(s) */ #define EXT_MOD_TYPE 200 /* custom module's ext_buf type */ #define EXT_DISPOSABLE 300 /* can throw this buffer away w/page flipping */ #define EXT_EXTREF 400 /* has externally maintained ref_cnt ptr */ /* * Flags copied when copying m_pkthdr. */ #define M_COPYFLAGS (M_PKTHDR|M_EOR|M_RDONLY|M_PROTO1|M_PROTO1|M_PROTO2|\ M_PROTO3|M_PROTO4|M_PROTO5|M_SKIP_FIREWALL|\ M_BCAST|M_MCAST|M_FRAG|M_FIRSTFRAG|M_LASTFRAG) /* * Flags indicating hw checksum support and sw checksum requirements. */ #define CSUM_IP 0x0001 /* will csum IP */ #define CSUM_TCP 0x0002 /* will csum TCP */ #define CSUM_UDP 0x0004 /* will csum UDP */ #define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */ #define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */ #define CSUM_IP_CHECKED 0x0100 /* did csum IP */ #define CSUM_IP_VALID 0x0200 /* ... the csum is valid */ #define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */ #define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */ #define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP) #define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */ /* * mbuf types. */ #define MT_NOTMBUF 0 /* USED INTERNALLY ONLY! Object is not mbuf */ #define MT_DATA 1 /* dynamic (data) allocation */ #define MT_HEADER 2 /* packet header */ #if 0 #define MT_SOCKET 3 /* socket structure */ #define MT_PCB 4 /* protocol control block */ #define MT_RTABLE 5 /* routing tables */ #define MT_HTABLE 6 /* IMP host tables */ #define MT_ATABLE 7 /* address resolution tables */ #endif #define MT_SONAME 8 /* socket name */ #if 0 #define MT_SOOPTS 10 /* socket options */ #endif #define MT_FTABLE 11 /* fragment reassembly header */ #if 0 #define MT_RIGHTS 12 /* access rights */ #define MT_IFADDR 13 /* interface address */ #endif #define MT_CONTROL 14 /* extra-data protocol message */ #define MT_OOBDATA 15 /* expedited data */ #define MT_NTYPES 16 /* number of mbuf types for mbtypes[] */ /* * General mbuf allocator statistics structure. */ struct mbstat { u_long m_mbufs; /* XXX */ u_long m_mclusts; /* XXX */ u_long m_drain; /* times drained protocols for space */ u_long m_mcfail; /* XXX: times m_copym failed */ u_long m_mpfail; /* XXX: times m_pullup failed */ u_long m_msize; /* length of an mbuf */ u_long m_mclbytes; /* length of an mbuf cluster */ u_long m_minclsize; /* min length of data to allocate a cluster */ u_long m_mlen; /* length of data in an mbuf */ u_long m_mhlen; /* length of data in a header mbuf */ /* Number of mbtypes (gives # elems in mbtypes[] array: */ short m_numtypes; /* XXX: Sendfile stats should eventually move to their own struct */ u_long sf_iocnt; /* times sendfile had to do disk I/O */ u_long sf_allocfail; /* times sfbuf allocation failed */ u_long sf_allocwait; /* times sfbuf allocation had to wait */ }; /* * Flags specifying how an allocation should be made. * * The flag to use is as follows: * - M_DONTWAIT or M_NOWAIT from an interrupt handler to not block allocation. * - M_WAIT or M_WAITOK or M_TRYWAIT from wherever it is safe to block. * * M_DONTWAIT/M_NOWAIT means that we will not block the thread explicitly * and if we cannot allocate immediately we may return NULL, * whereas M_WAIT/M_WAITOK/M_TRYWAIT means that if we cannot allocate * resources we will block until they are available, and thus never * return NULL. * * XXX Eventually just phase this out to use M_WAITOK/M_NOWAIT. */ #define MBTOM(how) (how) #define M_DONTWAIT M_NOWAIT #define M_TRYWAIT M_WAITOK #define M_WAIT M_WAITOK #ifdef _KERNEL /*- * mbuf external reference count management macros. * * MEXT_IS_REF(m): true if (m) is not the only mbuf referencing * the external buffer ext_buf. * * MEXT_REM_REF(m): remove reference to m_ext object. * * MEXT_ADD_REF(m): add reference to m_ext object already * referred to by (m). */ #define MEXT_IS_REF(m) (*((m)->m_ext.ref_cnt) > 1) #define MEXT_REM_REF(m) do { \ KASSERT(*((m)->m_ext.ref_cnt) > 0, ("m_ext refcnt < 0")); \ atomic_subtract_int((m)->m_ext.ref_cnt, 1); \ } while(0) #define MEXT_ADD_REF(m) atomic_add_int((m)->m_ext.ref_cnt, 1) #ifdef WITNESS #define MBUF_CHECKSLEEP(how) do { \ if (how == M_WAITOK) \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \ "Sleeping in \"%s\"", __func__); \ } while(0) #else #define MBUF_CHECKSLEEP(how) #endif /* * Network buffer allocation API * * The rest of it is defined in kern/subr_mbuf.c */ extern uma_zone_t zone_mbuf; extern uma_zone_t zone_clust; extern uma_zone_t zone_pack; static __inline struct mbuf *m_get(int how, short type); static __inline struct mbuf *m_gethdr(int how, short type); static __inline struct mbuf *m_getcl(int how, short type, int flags); static __inline struct mbuf *m_getclr(int how, short type); /* XXX */ static __inline struct mbuf *m_free(struct mbuf *m); static __inline void m_clget(struct mbuf *m, int how); static __inline void m_chtype(struct mbuf *m, short new_type); void mb_free_ext(struct mbuf *); static __inline struct mbuf * m_get(int how, short type) { struct mb_args args; args.flags = 0; args.type = type; return (uma_zalloc_arg(zone_mbuf, &args, how)); } /* XXX This should be depracated, very little use */ static __inline struct mbuf * m_getclr(int how, short type) { struct mbuf *m; struct mb_args args; args.flags = 0; args.type = type; m = uma_zalloc_arg(zone_mbuf, &args, how); if (m != NULL) bzero(m->m_data, MLEN); return m; } static __inline struct mbuf * m_gethdr(int how, short type) { struct mb_args args; args.flags = M_PKTHDR; args.type = type; return (uma_zalloc_arg(zone_mbuf, &args, how)); } static __inline struct mbuf * m_getcl(int how, short type, int flags) { struct mb_args args; args.flags = flags; args.type = type; return (uma_zalloc_arg(zone_pack, &args, how)); } static __inline struct mbuf * m_free(struct mbuf *m) { struct mbuf *n = m->m_next; #ifdef INVARIANTS m->m_flags |= M_FREELIST; #endif if (m->m_flags & M_EXT) mb_free_ext(m); else uma_zfree(zone_mbuf, m); return n; } static __inline void m_clget(struct mbuf *m, int how) { m->m_ext.ext_buf = NULL; uma_zalloc_arg(zone_clust, m, how); } static __inline void m_chtype(struct mbuf *m, short new_type) { m->m_type = new_type; } /* * mbuf, cluster, and external object allocation macros * (for compatibility purposes). */ /* NB: M_COPY_PKTHDR is deprecated. Use M_MOVE_PKTHDR or m_dup_pktdr. */ #define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from)) #define MGET(m, how, type) ((m) = m_get((how), (type))) #define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type))) #define MCLGET(m, how) m_clget((m), (how)) #define MEXTADD(m, buf, size, free, args, flags, type) \ m_extadd((m), (caddr_t)(buf), (size), (free), (args), (flags), (type)) /* * Evaluate TRUE if it's safe to write to the mbuf m's data region (this * can be both the local data payload, or an external buffer area, * depending on whether M_EXT is set). */ #define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && (!((m)->m_flags \ & M_EXT) || !MEXT_IS_REF(m))) /* Check if the supplied mbuf has a packet header, or else panic. */ #define M_ASSERTPKTHDR(m) \ KASSERT(m != NULL && m->m_flags & M_PKTHDR, \ ("%s: no mbuf packet header!", __func__)) /* Ensure that the supplied mbuf is a valid, non-free mbuf. */ #define M_ASSERTVALID(m) \ KASSERT((((struct mbuf *)m)->m_flags & M_FREELIST) == 0, \ ("%s: attempted use of a free mbuf!", __func__)) /* * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place * an object of the specified size at the end of the mbuf, longword aligned. */ #define M_ALIGN(m, len) do { \ (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \ } while (0) /* * As above, for mbufs allocated with m_gethdr/MGETHDR * or initialized by M_COPY_PKTHDR. */ #define MH_ALIGN(m, len) do { \ (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \ } while (0) /* * Compute the amount of space available * before the current start of data in an mbuf. * * The M_WRITABLE() is a temporary, conservative safety measure: the burden * of checking writability of the mbuf data area rests solely with the caller. */ #define M_LEADINGSPACE(m) \ ((m)->m_flags & M_EXT ? \ (M_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \ (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \ (m)->m_data - (m)->m_dat) /* * Compute the amount of space available * after the end of data in an mbuf. * * The M_WRITABLE() is a temporary, conservative safety measure: the burden * of checking writability of the mbuf data area rests solely with the caller. */ #define M_TRAILINGSPACE(m) \ ((m)->m_flags & M_EXT ? \ (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \ - ((m)->m_data + (m)->m_len) : 0) : \ &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len)) /* * Arrange to prepend space of size plen to mbuf m. * If a new mbuf must be allocated, how specifies whether to wait. * If the allocation fails, the original mbuf chain is freed and m is * set to NULL. */ #define M_PREPEND(m, plen, how) do { \ struct mbuf **_mmp = &(m); \ struct mbuf *_mm = *_mmp; \ int _mplen = (plen); \ int __mhow = (how); \ \ MBUF_CHECKSLEEP(how); \ if (M_LEADINGSPACE(_mm) >= _mplen) { \ _mm->m_data -= _mplen; \ _mm->m_len += _mplen; \ } else \ _mm = m_prepend(_mm, _mplen, __mhow); \ if (_mm != NULL && _mm->m_flags & M_PKTHDR) \ _mm->m_pkthdr.len += _mplen; \ *_mmp = _mm; \ } while (0) /* * Change mbuf to new type. * This is a relatively expensive operation and should be avoided. */ #define MCHTYPE(m, t) m_chtype((m), (t)) /* Length to m_copy to copy all. */ #define M_COPYALL 1000000000 /* Compatibility with 4.3. */ #define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT) extern int max_datalen; /* MHLEN - max_hdr */ extern int max_hdr; /* Largest link + protocol header */ extern int max_linkhdr; /* Largest link-level header */ extern int max_protohdr; /* Largest protocol header */ extern struct mbstat mbstat; /* General mbuf stats/infos */ extern int nmbclusters; /* Maximum number of clusters */ struct uio; void m_adj(struct mbuf *, int); int m_apply(struct mbuf *, int, int, int (*)(void *, void *, u_int), void *); void m_cat(struct mbuf *, struct mbuf *); void m_extadd(struct mbuf *, caddr_t, u_int, void (*)(void *, void *), void *, int, int); void m_copyback(struct mbuf *, int, int, c_caddr_t); void m_copydata(const struct mbuf *, int, int, caddr_t); struct mbuf *m_copym(struct mbuf *, int, int, int); struct mbuf *m_copypacket(struct mbuf *, int); void m_copy_pkthdr(struct mbuf *, struct mbuf *); struct mbuf *m_defrag(struct mbuf *, int); struct mbuf *m_devget(char *, int, int, struct ifnet *, void (*)(char *, caddr_t, u_int)); struct mbuf *m_dup(struct mbuf *, int); int m_dup_pkthdr(struct mbuf *, struct mbuf *, int); u_int m_fixhdr(struct mbuf *); struct mbuf *m_fragment(struct mbuf *, int, int); void m_freem(struct mbuf *); struct mbuf *m_getm(struct mbuf *, int, int, short); struct mbuf *m_getptr(struct mbuf *, int, int *); u_int m_length(struct mbuf *, struct mbuf **); void m_move_pkthdr(struct mbuf *, struct mbuf *); struct mbuf *m_prepend(struct mbuf *, int, int); void m_print(const struct mbuf *); struct mbuf *m_pulldown(struct mbuf *, int, int, int *); struct mbuf *m_pullup(struct mbuf *, int); struct mbuf *m_split(struct mbuf *, int, int); struct mbuf *m_uiotombuf(struct uio *, int, int); /*- * Network packets may have annotations attached by affixing a list * of "packet tags" to the pkthdr structure. Packet tags are * dynamically allocated semi-opaque data structures that have * a fixed header (struct m_tag) that specifies the size of the * memory block and a pair that identifies it. * The cookie is a 32-bit unique unsigned value used to identify * a module or ABI. By convention this value is chose as the * date+time that the module is created, expressed as the number of * seconds since the epoch (e.g., using date -u +'%s'). The type value * is an ABI/module-specific value that identifies a particular annotation * and is private to the module. For compatibility with systems * like OpenBSD that define packet tags w/o an ABI/module cookie, * the value PACKET_ABI_COMPAT is used to implement m_tag_get and * m_tag_find compatibility shim functions and several tag types are * defined below. Users that do not require compatibility should use * a private cookie value so that packet tag-related definitions * can be maintained privately. * * Note that the packet tag returned by m_tag_alloc has the default * memory alignment implemented by malloc. To reference private data * one can use a construct like: * * struct m_tag *mtag = m_tag_alloc(...); * struct foo *p = (struct foo *)(mtag+1); * * if the alignment of struct m_tag is sufficient for referencing members * of struct foo. Otherwise it is necessary to embed struct m_tag within * the private data structure to insure proper alignment; e.g., * * struct foo { * struct m_tag tag; * ... * }; * struct foo *p = (struct foo *) m_tag_alloc(...); * struct m_tag *mtag = &p->tag; */ /* * Persistent tags stay with an mbuf until the mbuf is reclaimed. * Otherwise tags are expected to ``vanish'' when they pass through * a network interface. For most interfaces this happens normally * as the tags are reclaimed when the mbuf is free'd. However in * some special cases reclaiming must be done manually. An example * is packets that pass through the loopback interface. Also, one * must be careful to do this when ``turning around'' packets (e.g., * icmp_reflect). * * To mark a tag persistent bit-or this flag in when defining the * tag id. The tag will then be treated as described above. */ #define MTAG_PERSISTENT 0x800 #define PACKET_TAG_NONE 0 /* Nadda */ /* Packet tags for use with PACKET_ABI_COMPAT. */ #define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */ #define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */ #define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */ #define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */ #define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 5 /* NIC notifies IPsec */ #define PACKET_TAG_IPSEC_PENDING_TDB 6 /* Reminder to do IPsec */ #define PACKET_TAG_BRIDGE 7 /* Bridge processing done */ #define PACKET_TAG_GIF 8 /* GIF processing done */ #define PACKET_TAG_GRE 9 /* GRE processing done */ #define PACKET_TAG_IN_PACKET_CHECKSUM 10 /* NIC checksumming done */ #define PACKET_TAG_ENCAP 11 /* Encap. processing */ #define PACKET_TAG_IPSEC_SOCKET 12 /* IPSEC socket ref */ #define PACKET_TAG_IPSEC_HISTORY 13 /* IPSEC history */ #define PACKET_TAG_IPV6_INPUT 14 /* IPV6 input processing */ #define PACKET_TAG_DUMMYNET 15 /* dummynet info */ #define PACKET_TAG_DIVERT 17 /* divert info */ #define PACKET_TAG_IPFORWARD 18 /* ipforward info */ #define PACKET_TAG_MACLABEL (19 | MTAG_PERSISTENT) /* MAC label */ #define PACKET_TAG_PF_ROUTED 21 /* PF routed, avoid loops */ #define PACKET_TAG_PF_FRAGCACHE 22 /* PF fragment cached */ #define PACKET_TAG_PF_QID 23 /* PF ALTQ queue id */ #define PACKET_TAG_PF_TAG 24 /* PF tagged */ #define PACKET_TAG_RTSOCKFAM 25 /* rtsock sa family */ #define PACKET_TAG_PF_TRANSLATE_LOCALHOST 26 /* PF translate localhost */ +#define PACKET_TAG_IPOPTIONS 27 /* Saved IP options */ /* Packet tag routines. */ struct m_tag *m_tag_alloc(u_int32_t, int, int, int); void m_tag_delete(struct mbuf *, struct m_tag *); void m_tag_delete_chain(struct mbuf *, struct m_tag *); struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *); struct m_tag *m_tag_copy(struct m_tag *, int); int m_tag_copy_chain(struct mbuf *, struct mbuf *, int); void m_tag_delete_nonpersistent(struct mbuf *); /* * Initialize the list of tags associated with an mbuf. */ static __inline void m_tag_init(struct mbuf *m) { SLIST_INIT(&m->m_pkthdr.tags); } /* * Set up the contents of a tag. Note that this does not * fill in the free method; the caller is expected to do that. * * XXX probably should be called m_tag_init, but that was * already taken. */ static __inline void m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len) { t->m_tag_id = type; t->m_tag_len = len; t->m_tag_cookie = cookie; } /* * Reclaim resources associated with a tag. */ static __inline void m_tag_free(struct m_tag *t) { (*t->m_tag_free)(t); } /* * Return the first tag associated with an mbuf. */ static __inline struct m_tag * m_tag_first(struct mbuf *m) { return (SLIST_FIRST(&m->m_pkthdr.tags)); } /* * Return the next tag in the list of tags associated with an mbuf. */ static __inline struct m_tag * m_tag_next(struct mbuf *m, struct m_tag *t) { return (SLIST_NEXT(t, m_tag_link)); } /* * Prepend a tag to the list of tags associated with an mbuf. */ static __inline void m_tag_prepend(struct mbuf *m, struct m_tag *t) { SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link); } /* * Unlink a tag from the list of tags associated with an mbuf. */ static __inline void m_tag_unlink(struct mbuf *m, struct m_tag *t) { SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link); } /* These are for OpenBSD compatibility. */ #define MTAG_ABI_COMPAT 0 /* compatibility ABI */ static __inline struct m_tag * m_tag_get(int type, int length, int wait) { return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait)); } static __inline struct m_tag * m_tag_find(struct mbuf *m, int type, struct m_tag *start) { return (SLIST_EMPTY(&m->m_pkthdr.tags) ? NULL : m_tag_locate(m, MTAG_ABI_COMPAT, type, start)); } #endif /* _KERNEL */ #endif /* !_SYS_MBUF_H_ */