diff --git a/share/man/man4/ipsec.4 b/share/man/man4/ipsec.4 index 96a10dfb7700..9fd6207c2f14 100644 --- a/share/man/man4/ipsec.4 +++ b/share/man/man4/ipsec.4 @@ -1,444 +1,448 @@ .\" $KAME: ipsec.4,v 1.17 2001/06/27 15:25:10 itojun Exp $ .\" .\" Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. Neither the name of the project nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd February 6, 2017 +.Dd March 4, 2025 .Dt IPSEC 4 .Os .Sh NAME .Nm ipsec .Nd Internet Protocol Security protocol .Sh SYNOPSIS .Cd "options IPSEC" .Cd "options IPSEC_SUPPORT" .Cd "device crypto" .Pp .In sys/types.h .In netinet/in.h .In netipsec/ipsec.h .In netipsec/ipsec6.h .Sh DESCRIPTION .Nm is a security protocol implemented within the Internet Protocol layer of the networking stack. .Nm is defined for both IPv4 and IPv6 .Xr ( inet 4 and .Xr inet6 4 ) . .Nm is a set of protocols, .Tn ESP (for Encapsulating Security Payload) .Tn AH (for Authentication Header), and .Tn IPComp (for IP Payload Compression Protocol) that provide security services for IP datagrams. AH both authenticates and guarantees the integrity of an IP packet by attaching a cryptographic checksum computed using one-way hash functions. ESP, in addition, prevents unauthorized parties from reading the payload of an IP packet by also encrypting it. IPComp tries to increase communication performance by compressing IP payload, thus reducing the amount of data sent. This will help nodes on slow links but with enough computing power. .Nm operates in one of two modes: transport mode or tunnel mode. Transport mode is used to protect peer-to-peer communication between end nodes. Tunnel mode encapsulates IP packets within other IP packets and is designed for security gateways such as VPN endpoints. .Pp System configuration requires the .Xr crypto 4 subsystem. .Pp The packets can be passed to a virtual .Xr enc 4 interface, to perform packet filtering before outbound encryption and after decapsulation inbound. .Pp To properly filter on the inner packets of an .Nm tunnel with firewalls, you can change the values of the following sysctls .Bl -column net.inet6.ipsec6.filtertunnel default enable .It Sy "Name Default Enable" .It "net.inet.ipsec.filtertunnel 0 1" .It "net.inet6.ipsec6.filtertunnel 0 1" .El .\" .Ss Kernel interface .Nm is controlled by a key management and policy engine, that reside in the operating system kernel. Key management is the process of associating keys with security associations, also know as SAs. Policy management dictates when new security associations created or destroyed. .Pp The key management engine can be accessed from userland by using .Dv PF_KEY sockets. The .Dv PF_KEY socket API is defined in RFC2367. .Pp The policy engine is controlled by an extension to the .Dv PF_KEY API, .Xr setsockopt 2 operations, and .Xr sysctl 3 interface. The kernel implements an extended version of the .Dv PF_KEY interface and allows the programmer to define IPsec policies which are similar to the per-packet filters. The .Xr setsockopt 2 interface is used to define per-socket behavior, and .Xr sysctl 3 interface is used to define host-wide default behavior. .Pp The kernel code does not implement a dynamic encryption key exchange protocol such as IKE (Internet Key Exchange). Key exchange protocols are beyond what is necessary in the kernel and should be implemented as daemon processes which call the .Nm APIs. .\" .Ss Policy management IPsec policies can be managed in one of two ways, either by configuring per-socket policies using the .Xr setsockopt 2 system calls, or by configuring kernel level packet filter-based policies using the .Dv PF_KEY interface, via the .Xr setkey 8 you can define IPsec policies against packets using rules similar to packet filtering rules. Refer to .Xr setkey 8 on how to use it. .Pp Depending on the socket's address family, IPPROTO_IP or IPPROTO_IPV6 transport level and IP_IPSEC_POLICY or IPV6_IPSEC_POLICY socket options may be used to configure per-socket security policies. A properly-formed IPsec policy specification structure can be created using .Xr ipsec_set_policy 3 function and used as socket option value for the .Xr setsockopt 2 call. .Pp When setting policies using the .Xr setkey 8 command, the .Dq Li default option instructs the system to use its default policy, as explained below, for processing packets. The following sysctl variables are available for configuring the system's IPsec behavior. The variables can have one of two values. A .Li 1 means .Dq Li use , which means that if there is a security association then use it but if there is not then the packets are not processed by IPsec. The value .Li 2 is synonymous with .Dq Li require , which requires that a security association must exist for the packets to move, and not be dropped. These terms are defined in .Xr ipsec_set_policy 3 . .Bl -column net.inet6.ipsec6.esp_trans_deflev integerxxx .It Sy "Name Type Changeable" .It "net.inet.ipsec.esp_trans_deflev integer yes" .It "net.inet.ipsec.esp_net_deflev integer yes" .It "net.inet.ipsec.ah_trans_deflev integer yes" .It "net.inet.ipsec.ah_net_deflev integer yes" .It "net.inet6.ipsec6.esp_trans_deflev integer yes" .It "net.inet6.ipsec6.esp_net_deflev integer yes" .It "net.inet6.ipsec6.ah_trans_deflev integer yes" .It "net.inet6.ipsec6.ah_net_deflev integer yes" .El .Pp If the kernel does not find a matching, system wide, policy then the default value is applied. The system wide default policy is specified by the following .Xr sysctl 8 variables. .Li 0 means .Dq Li discard which asks the kernel to drop the packet. .Li 1 means .Dq Li none . .Bl -column net.inet6.ipsec6.def_policy integerxxx .It Sy "Name Type Changeable" .It "net.inet.ipsec.def_policy integer yes" .It "net.inet6.ipsec6.def_policy integer yes" .El .\" .Ss Miscellaneous sysctl variables When the .Nm protocols are configured for use, all protocols are included in the system. To selectively enable/disable protocols, use .Xr sysctl 8 . .Bl -column net.inet.ipcomp.ipcomp_enable .It Sy "Name Default" .It "net.inet.esp.esp_enable On" .It "net.inet.ah.ah_enable On" .It "net.inet.ipcomp.ipcomp_enable On" .El .Pp In addition the following variables are accessible via .Xr sysctl 8 , for tweaking the kernel's IPsec behavior: .Bl -column net.inet6.ipsec6.inbonud_call_ike integerxxx .It Sy "Name Type Changeable" .It "net.inet.ipsec.ah_cleartos integer yes" .It "net.inet.ipsec.ah_offsetmask integer yes" .It "net.inet.ipsec.dfbit integer yes" .It "net.inet.ipsec.ecn integer yes" .It "net.inet.ipsec.debug integer yes" .It "net.inet.ipsec.natt_cksum_policy integer yes" .It "net.inet.ipsec.check_policy_history integer yes" +.It "net.inet.ipsec.random_id integer yes" .It "net.inet6.ipsec6.ecn integer yes" .It "net.inet6.ipsec6.debug integer yes" .El .Pp The variables are interpreted as follows: .Bl -tag -width 6n .It Li ipsec.ah_cleartos If set to non-zero, the kernel clears the type-of-service field in the IPv4 header during AH authentication data computation. This variable is used to get current systems to inter-operate with devices that implement RFC1826 AH. It should be set to non-zero (clear the type-of-service field) for RFC2402 conformance. .It Li ipsec.ah_offsetmask During AH authentication data computation, the kernel will include a 16bit fragment offset field (including flag bits) in the IPv4 header, after computing logical AND with the variable. The variable is used for inter-operating with devices that implement RFC1826 AH. It should be set to zero (clear the fragment offset field during computation) for RFC2402 conformance. .It Li ipsec.dfbit This variable configures the kernel behavior on IPv4 IPsec tunnel encapsulation. If set to 0, the DF bit on the outer IPv4 header will be cleared while 1 means that the outer DF bit is set regardless from the inner DF bit and 2 indicates that the DF bit is copied from the inner header to the outer one. The variable is supplied to conform to RFC2401 chapter 6.1. .It Li ipsec.ecn If set to non-zero, IPv4 IPsec tunnel encapsulation/decapsulation behavior will be friendly to ECN (explicit congestion notification), as documented in .Li draft-ietf-ipsec-ecn-02.txt . .Xr gif 4 talks more about the behavior. .It Li ipsec.debug If set to non-zero, debug messages will be generated via .Xr syslog 3 . .It Li ipsec.natt_cksum_policy Controls how the kernel handles TCP and UDP checksums when ESP in UDP encapsulation is used for IPsec transport mode. If set to a non-zero value, the kernel fully recomputes checksums for inbound TCP segments and UDP datagrams after they are decapsulated and decrypted. If set to 0 and original addresses were configured for corresponding SA by the IKE daemon, the kernel incrementally recomputes checksums for inbound TCP segments and UDP datagrams. If addresses were not configured, the checksums are ignored. .It Li ipsec.check_policy_history Enables strict policy checking for inbound packets. By default, inbound security policies check that packets handled by IPsec have been decrypted and authenticated. If this variable is set to a non-zero value, each packet handled by IPsec is checked against the history of IPsec security associations. The IPsec security protocol, mode, and SA addresses must match. +.It Li ipsec.random_id +Enables randomization of encapsulated IPv4 packets ID. +By default, ID randomization is not enabled. .El .Pp Variables under the .Li net.inet6.ipsec6 tree have similar meanings to those described above. .\" .Sh PROTOCOLS The .Nm protocol acts as a plug-in to the .Xr inet 4 and .Xr inet6 4 protocols and therefore supports most of the protocols defined upon those IP-layer protocols. The .Xr icmp 4 and .Xr icmp6 4 protocols may behave differently with .Nm because .Nm can prevent .Xr icmp 4 or .Xr icmp6 4 routines from looking into the IP payload. .\" .Sh SEE ALSO .Xr ioctl 2 , .Xr socket 2 , .Xr ipsec_set_policy 3 , .Xr crypto 4 , .Xr enc 4 , .Xr icmp6 4 , .Xr if_ipsec 4 , .Xr intro 4 , .Xr ip6 4 , .Xr setkey 8 , .Xr sysctl 8 .\".Xr racoon 8 .Rs .%A "S. Kent" .%A "R. Atkinson" .%T "IP Authentication Header" .%O "RFC 2404" .Re .Rs .%A "S. Kent" .%A "R. Atkinson" .%T "IP Encapsulating Security Payload (ESP)" .%O "RFC 2406" .Re .Sh STANDARDS .Rs .%A Daniel L. McDonald .%A Craig Metz .%A Bao G. Phan .%T "PF_KEY Key Management API, Version 2" .%R RFC .%N 2367 .Re .Pp .Rs .%A "D. L. McDonald" .%T "A Simple IP Security API Extension to BSD Sockets" .%R internet draft .%N "draft-mcdonald-simple-ipsec-api-03.txt" .%O work in progress material .Re .Sh HISTORY The original .Nm implementation appeared in the WIDE/KAME IPv6/IPsec stack. .Pp For .Fx 5.0 a fully locked IPsec implementation called fast_ipsec was brought in. The protocols drew heavily on the .Ox implementation of the .Tn IPsec protocols. The policy management code was derived from the .Tn KAME implementation found in their .Tn IPsec protocols. The fast_ipsec implementation lacked .Xr ip6 4 support but made use of the .Xr crypto 4 subsystem. .Pp For .Fx 7.0 .Xr ip6 4 support was added to fast_ipsec. After this the old KAME IPsec implementation was dropped and fast_ipsec became what now is the only .Nm implementation in .Fx . .Sh BUGS There is no single standard for the policy engine API, so the policy engine API described herein is just for this implementation. .Pp AH and tunnel mode encapsulation may not work as you might expect. If you configure inbound .Dq require policy with an AH tunnel or any IPsec encapsulating policy with AH (like .Dq Li esp/tunnel/A-B/use ah/transport/A-B/require ) , tunnelled packets will be rejected. This is because the policy check is enforced on the inner packet on reception, and AH authenticates encapsulating (outer) packet, not the encapsulated (inner) packet (so for the receiving kernel there is no sign of authenticity). The issue will be solved when we revamp our policy engine to keep all the packet decapsulation history. .Pp When a large database of security associations or policies is present in the kernel the .Dv SADB_DUMP and .Dv SADB_SPDDUMP operations on .Dv PF_KEY sockets may fail due to lack of space. Increasing the socket buffer size may alleviate this problem. .Pp The .Tn IPcomp protocol may occasionally error because of .Xr zlib 3 problems. .Pp This documentation needs more review. diff --git a/sys/netinet/ip_carp.c b/sys/netinet/ip_carp.c index 6fde7bd70c6b..c01d1bdd8cff 100644 --- a/sys/netinet/ip_carp.c +++ b/sys/netinet/ip_carp.c @@ -1,3104 +1,3104 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2002 Michael Shalayeff. * Copyright (c) 2003 Ryan McBride. * Copyright (c) 2011 Gleb Smirnoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_bpf.h" #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(INET) || defined(INET6) #include #include #include #include #include #include #endif #ifdef INET #include #include #endif #ifdef INET6 #include #include #include #include #include #include #endif #include #include #include #include #include static MALLOC_DEFINE(M_CARP, "CARP", "CARP addresses"); struct carp_softc { struct ifnet *sc_carpdev; /* Pointer to parent ifnet. */ struct ifaddr **sc_ifas; /* Our ifaddrs. */ carp_version_t sc_version; /* carp or VRRPv3 */ uint8_t sc_addr[ETHER_ADDR_LEN]; /* Our link level address. */ struct callout sc_ad_tmo; /* Advertising timeout. */ #ifdef INET struct callout sc_md_tmo; /* Master down timeout. */ #endif #ifdef INET6 struct callout sc_md6_tmo; /* XXX: Master down timeout. */ #endif struct mtx sc_mtx; int sc_vhid; union { struct { /* sc_version == CARP_VERSION_CARP */ int sc_advskew; int sc_advbase; struct in_addr sc_carpaddr; struct in6_addr sc_carpaddr6; uint64_t sc_counter; bool sc_init_counter; #define CARP_HMAC_PAD 64 unsigned char sc_key[CARP_KEY_LEN]; unsigned char sc_pad[CARP_HMAC_PAD]; SHA1_CTX sc_sha1; }; struct { /* sc_version == CARP_VERSION_VRRPv3 */ uint8_t sc_vrrp_prio; uint16_t sc_vrrp_adv_inter; uint16_t sc_vrrp_master_inter; }; }; int sc_naddrs; int sc_naddrs6; int sc_ifasiz; enum { INIT = 0, BACKUP, MASTER } sc_state; int sc_suppress; int sc_sendad_errors; #define CARP_SENDAD_MAX_ERRORS 3 int sc_sendad_success; #define CARP_SENDAD_MIN_SUCCESS 3 TAILQ_ENTRY(carp_softc) sc_list; /* On the carp_if list. */ LIST_ENTRY(carp_softc) sc_next; /* On the global list. */ }; struct carp_if { #ifdef INET int cif_naddrs; #endif #ifdef INET6 int cif_naddrs6; #endif TAILQ_HEAD(, carp_softc) cif_vrs; #ifdef INET struct ip_moptions cif_imo; #endif #ifdef INET6 struct ip6_moptions cif_im6o; #endif struct ifnet *cif_ifp; struct mtx cif_mtx; uint32_t cif_flags; #define CIF_PROMISC 0x00000001 }; /* Kernel equivalent of struct carpreq, but with more fields for new features. * */ struct carpkreq { int carpr_count; int carpr_vhid; int carpr_state; int carpr_advskew; int carpr_advbase; unsigned char carpr_key[CARP_KEY_LEN]; /* Everything above this is identical to carpreq */ struct in_addr carpr_addr; struct in6_addr carpr_addr6; carp_version_t carpr_version; uint8_t carpr_vrrp_priority; uint16_t carpr_vrrp_adv_inter; }; /* * Brief design of carp(4). * * Any carp-capable ifnet may have a list of carp softcs hanging off * its ifp->if_carp pointer. Each softc represents one unique virtual * host id, or vhid. The softc has a back pointer to the ifnet. All * softcs are joined in a global list, which has quite limited use. * * Any interface address that takes part in CARP negotiation has a * pointer to the softc of its vhid, ifa->ifa_carp. That could be either * AF_INET or AF_INET6 address. * * Although, one can get the softc's backpointer to ifnet and traverse * through its ifp->if_addrhead queue to find all interface addresses * involved in CARP, we keep a growable array of ifaddr pointers. This * allows us to avoid grabbing the IF_ADDR_LOCK() in many traversals that * do calls into the network stack, thus avoiding LORs. * * Locking: * * Each softc has a lock sc_mtx. It is used to synchronise carp_input_c(), * callout-driven events and ioctl()s. * * To traverse the list of softcs on an ifnet we use CIF_LOCK() or carp_sx. * To traverse the global list we use the mutex carp_mtx. * * Known issues with locking: * * - Sending ad, we put the pointer to the softc in an mtag, and no reference * counting is done on the softc. * - On module unload we may race (?) with packet processing thread * dereferencing our function pointers. */ /* Accept incoming CARP packets. */ VNET_DEFINE_STATIC(int, carp_allow) = 1; #define V_carp_allow VNET(carp_allow) /* Set DSCP in outgoing CARP packets. */ VNET_DEFINE_STATIC(int, carp_dscp) = 56; #define V_carp_dscp VNET(carp_dscp) /* Preempt slower nodes. */ VNET_DEFINE_STATIC(int, carp_preempt) = 0; #define V_carp_preempt VNET(carp_preempt) /* Log level. */ VNET_DEFINE_STATIC(int, carp_log) = 1; #define V_carp_log VNET(carp_log) /* Global advskew demotion. */ VNET_DEFINE_STATIC(int, carp_demotion) = 0; #define V_carp_demotion VNET(carp_demotion) /* Send error demotion factor. */ VNET_DEFINE_STATIC(int, carp_senderr_adj) = CARP_MAXSKEW; #define V_carp_senderr_adj VNET(carp_senderr_adj) /* Iface down demotion factor. */ VNET_DEFINE_STATIC(int, carp_ifdown_adj) = CARP_MAXSKEW; #define V_carp_ifdown_adj VNET(carp_ifdown_adj) static int carp_allow_sysctl(SYSCTL_HANDLER_ARGS); static int carp_dscp_sysctl(SYSCTL_HANDLER_ARGS); static int carp_demote_adj_sysctl(SYSCTL_HANDLER_ARGS); SYSCTL_NODE(_net_inet, IPPROTO_CARP, carp, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "CARP"); SYSCTL_PROC(_net_inet_carp, OID_AUTO, allow, CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, &VNET_NAME(carp_allow), 0, carp_allow_sysctl, "I", "Accept incoming CARP packets"); SYSCTL_PROC(_net_inet_carp, OID_AUTO, dscp, CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, carp_dscp_sysctl, "I", "DSCP value for carp packets"); SYSCTL_INT(_net_inet_carp, OID_AUTO, preempt, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(carp_preempt), 0, "High-priority backup preemption mode"); SYSCTL_INT(_net_inet_carp, OID_AUTO, log, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(carp_log), 0, "CARP log level"); SYSCTL_PROC(_net_inet_carp, OID_AUTO, demotion, CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, carp_demote_adj_sysctl, "I", "Adjust demotion factor (skew of advskew)"); SYSCTL_INT(_net_inet_carp, OID_AUTO, senderr_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(carp_senderr_adj), 0, "Send error demotion factor adjustment"); SYSCTL_INT(_net_inet_carp, OID_AUTO, ifdown_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(carp_ifdown_adj), 0, "Interface down demotion factor adjustment"); VNET_PCPUSTAT_DEFINE(struct carpstats, carpstats); VNET_PCPUSTAT_SYSINIT(carpstats); VNET_PCPUSTAT_SYSUNINIT(carpstats); #define CARPSTATS_ADD(name, val) \ counter_u64_add(VNET(carpstats)[offsetof(struct carpstats, name) / \ sizeof(uint64_t)], (val)) #define CARPSTATS_INC(name) CARPSTATS_ADD(name, 1) SYSCTL_VNET_PCPUSTAT(_net_inet_carp, OID_AUTO, stats, struct carpstats, carpstats, "CARP statistics (struct carpstats, netinet/ip_carp.h)"); #define CARP_LOCK_INIT(sc) mtx_init(&(sc)->sc_mtx, "carp_softc", \ NULL, MTX_DEF) #define CARP_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) #define CARP_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define CARP_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define CARP_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define CIF_LOCK_INIT(cif) mtx_init(&(cif)->cif_mtx, "carp_if", \ NULL, MTX_DEF) #define CIF_LOCK_DESTROY(cif) mtx_destroy(&(cif)->cif_mtx) #define CIF_LOCK_ASSERT(cif) mtx_assert(&(cif)->cif_mtx, MA_OWNED) #define CIF_LOCK(cif) mtx_lock(&(cif)->cif_mtx) #define CIF_UNLOCK(cif) mtx_unlock(&(cif)->cif_mtx) #define CIF_FREE(cif) do { \ CIF_LOCK(cif); \ if (TAILQ_EMPTY(&(cif)->cif_vrs)) \ carp_free_if(cif); \ else \ CIF_UNLOCK(cif); \ } while (0) #define CARP_LOG(...) do { \ if (V_carp_log > 0) \ log(LOG_INFO, "carp: " __VA_ARGS__); \ } while (0) #define CARP_DEBUG(...) do { \ if (V_carp_log > 1) \ log(LOG_DEBUG, __VA_ARGS__); \ } while (0) #define IFNET_FOREACH_IFA(ifp, ifa) \ CK_STAILQ_FOREACH((ifa), &(ifp)->if_addrhead, ifa_link) \ if ((ifa)->ifa_carp != NULL) #define CARP_FOREACH_IFA(sc, ifa) \ CARP_LOCK_ASSERT(sc); \ for (int _i = 0; \ _i < (sc)->sc_naddrs + (sc)->sc_naddrs6 && \ ((ifa) = sc->sc_ifas[_i]) != NULL; \ ++_i) #define IFNET_FOREACH_CARP(ifp, sc) \ KASSERT(mtx_owned(&ifp->if_carp->cif_mtx) || \ sx_xlocked(&carp_sx), ("cif_vrs not locked")); \ TAILQ_FOREACH((sc), &(ifp)->if_carp->cif_vrs, sc_list) #define DEMOTE_ADVSKEW(sc) \ (((sc)->sc_advskew + V_carp_demotion > CARP_MAXSKEW) ? \ CARP_MAXSKEW : \ (((sc)->sc_advskew + V_carp_demotion < 0) ? \ 0 : ((sc)->sc_advskew + V_carp_demotion))) static void carp_input_c(struct mbuf *, struct carp_header *, sa_family_t, int); static void vrrp_input_c(struct mbuf *, int, sa_family_t, int, int, uint16_t); static struct carp_softc *carp_alloc(struct ifnet *, carp_version_t, int); static void carp_destroy(struct carp_softc *); static struct carp_if *carp_alloc_if(struct ifnet *); static void carp_free_if(struct carp_if *); static void carp_set_state(struct carp_softc *, int, const char* reason); static void carp_sc_state(struct carp_softc *); static void carp_setrun(struct carp_softc *, sa_family_t); static void carp_master_down(void *); static void carp_master_down_locked(struct carp_softc *, const char* reason); static void carp_send_ad_locked(struct carp_softc *); static void vrrp_send_ad_locked(struct carp_softc *); static void carp_addroute(struct carp_softc *); static void carp_ifa_addroute(struct ifaddr *); static void carp_delroute(struct carp_softc *); static void carp_ifa_delroute(struct ifaddr *); static void carp_send_ad_all(void *, int); static void carp_demote_adj(int, char *); static LIST_HEAD(, carp_softc) carp_list; static struct mtx carp_mtx; static struct sx carp_sx; static struct task carp_sendall_task = TASK_INITIALIZER(0, carp_send_ad_all, NULL); static int carp_is_supported_if(if_t ifp) { if (ifp == NULL) return (ENXIO); switch (ifp->if_type) { case IFT_ETHER: case IFT_L2VLAN: case IFT_BRIDGE: break; default: return (EOPNOTSUPP); } return (0); } static void carp_hmac_prepare(struct carp_softc *sc) { uint8_t version = CARP_VERSION_CARP, type = CARP_ADVERTISEMENT; uint8_t vhid = sc->sc_vhid & 0xff; struct ifaddr *ifa; int i, found; #ifdef INET struct in_addr last, cur, in; #endif #ifdef INET6 struct in6_addr last6, cur6, in6; #endif CARP_LOCK_ASSERT(sc); MPASS(sc->sc_version == CARP_VERSION_CARP); /* Compute ipad from key. */ bzero(sc->sc_pad, sizeof(sc->sc_pad)); bcopy(sc->sc_key, sc->sc_pad, sizeof(sc->sc_key)); for (i = 0; i < sizeof(sc->sc_pad); i++) sc->sc_pad[i] ^= 0x36; /* Precompute first part of inner hash. */ SHA1Init(&sc->sc_sha1); SHA1Update(&sc->sc_sha1, sc->sc_pad, sizeof(sc->sc_pad)); SHA1Update(&sc->sc_sha1, (void *)&version, sizeof(version)); SHA1Update(&sc->sc_sha1, (void *)&type, sizeof(type)); SHA1Update(&sc->sc_sha1, (void *)&vhid, sizeof(vhid)); #ifdef INET cur.s_addr = 0; do { found = 0; last = cur; cur.s_addr = 0xffffffff; CARP_FOREACH_IFA(sc, ifa) { in.s_addr = ifatoia(ifa)->ia_addr.sin_addr.s_addr; if (ifa->ifa_addr->sa_family == AF_INET && ntohl(in.s_addr) > ntohl(last.s_addr) && ntohl(in.s_addr) < ntohl(cur.s_addr)) { cur.s_addr = in.s_addr; found++; } } if (found) SHA1Update(&sc->sc_sha1, (void *)&cur, sizeof(cur)); } while (found); #endif /* INET */ #ifdef INET6 memset(&cur6, 0, sizeof(cur6)); do { found = 0; last6 = cur6; memset(&cur6, 0xff, sizeof(cur6)); CARP_FOREACH_IFA(sc, ifa) { in6 = ifatoia6(ifa)->ia_addr.sin6_addr; if (IN6_IS_SCOPE_EMBED(&in6)) in6.s6_addr16[1] = 0; if (ifa->ifa_addr->sa_family == AF_INET6 && memcmp(&in6, &last6, sizeof(in6)) > 0 && memcmp(&in6, &cur6, sizeof(in6)) < 0) { cur6 = in6; found++; } } if (found) SHA1Update(&sc->sc_sha1, (void *)&cur6, sizeof(cur6)); } while (found); #endif /* INET6 */ /* convert ipad to opad */ for (i = 0; i < sizeof(sc->sc_pad); i++) sc->sc_pad[i] ^= 0x36 ^ 0x5c; } static void carp_hmac_generate(struct carp_softc *sc, uint32_t counter[2], unsigned char md[20]) { SHA1_CTX sha1ctx; CARP_LOCK_ASSERT(sc); /* fetch first half of inner hash */ bcopy(&sc->sc_sha1, &sha1ctx, sizeof(sha1ctx)); SHA1Update(&sha1ctx, (void *)counter, sizeof(sc->sc_counter)); SHA1Final(md, &sha1ctx); /* outer hash */ SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, sc->sc_pad, sizeof(sc->sc_pad)); SHA1Update(&sha1ctx, md, 20); SHA1Final(md, &sha1ctx); } static int carp_hmac_verify(struct carp_softc *sc, uint32_t counter[2], unsigned char md[20]) { unsigned char md2[20]; CARP_LOCK_ASSERT(sc); carp_hmac_generate(sc, counter, md2); return (bcmp(md, md2, sizeof(md2))); } static int vrrp_checksum_verify(struct mbuf *m, int off, int len, uint16_t phdrcksum) { uint16_t cksum; /* * Note that VRRPv3 checksums are different from CARP checksums. * Carp just calculates the checksum over the packet. * VRRPv3 includes the pseudo-header checksum as well. */ cksum = in_cksum_skip(m, off + len, off); cksum -= phdrcksum; return (cksum); } /* * process input packet. * we have rearranged checks order compared to the rfc, * but it seems more efficient this way or not possible otherwise. */ #ifdef INET static int carp_input(struct mbuf **mp, int *offp, int proto) { struct mbuf *m = *mp; struct ip *ip; struct vrrpv3_header *vh; int iplen; int minlen; int totlen; iplen = *offp; *mp = NULL; CARPSTATS_INC(carps_ipackets); if (!V_carp_allow) { m_freem(m); return (IPPROTO_DONE); } /* Ensure we have enough header to figure out the version. */ if (m->m_pkthdr.len < iplen + sizeof(*vh)) { CARPSTATS_INC(carps_badlen); CARP_DEBUG("%s: received len %zd < sizeof(struct vrrpv3_header) " "on %s\n", __func__, m->m_len - sizeof(struct ip), if_name(m->m_pkthdr.rcvif)); m_freem(m); return (IPPROTO_DONE); } if (m->m_len < iplen + sizeof(*vh)) { if ((m = m_pullup(m, iplen + sizeof(*vh))) == NULL) { CARPSTATS_INC(carps_hdrops); CARP_DEBUG("%s():%d: pullup failed\n", __func__, __LINE__); return (IPPROTO_DONE); } } ip = mtod(m, struct ip *); totlen = ntohs(ip->ip_len); vh = (struct vrrpv3_header *)((char *)ip + iplen); switch (vh->vrrp_version) { case CARP_VERSION_CARP: minlen = sizeof(struct carp_header); break; case CARP_VERSION_VRRPv3: minlen = sizeof(struct vrrpv3_header); break; default: CARPSTATS_INC(carps_badver); CARP_DEBUG("%s: unsupported version %d on %s\n", __func__, vh->vrrp_version, if_name(m->m_pkthdr.rcvif)); m_freem(m); return (IPPROTO_DONE); } /* And now check the length again but with the real minimal length. */ if (m->m_pkthdr.len < iplen + minlen) { CARPSTATS_INC(carps_badlen); CARP_DEBUG("%s: received len %zd < %d " "on %s\n", __func__, m->m_len - sizeof(struct ip), iplen + minlen, if_name(m->m_pkthdr.rcvif)); m_freem(m); return (IPPROTO_DONE); } if (m->m_len < iplen + minlen) { if ((m = m_pullup(m, iplen + minlen)) == NULL) { CARPSTATS_INC(carps_hdrops); CARP_DEBUG("%s():%d: pullup failed\n", __func__, __LINE__); return (IPPROTO_DONE); } ip = mtod(m, struct ip *); vh = (struct vrrpv3_header *)((char *)ip + iplen); } switch (vh->vrrp_version) { case CARP_VERSION_CARP: { struct carp_header *ch; /* verify the CARP checksum */ if (in_cksum_skip(m, totlen, iplen)) { CARPSTATS_INC(carps_badsum); CARP_DEBUG("%s: checksum failed on %s\n", __func__, if_name(m->m_pkthdr.rcvif)); m_freem(m); break; } ch = (struct carp_header *)((char *)ip + iplen); carp_input_c(m, ch, AF_INET, ip->ip_ttl); break; } case CARP_VERSION_VRRPv3: { uint16_t phdrcksum; phdrcksum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl((u_short)(totlen - iplen) + ip->ip_p)); vrrp_input_c(m, iplen, AF_INET, ip->ip_ttl, totlen - iplen, phdrcksum); break; } default: KASSERT(false, ("Unsupported version %d", vh->vrrp_version)); } return (IPPROTO_DONE); } #endif #ifdef INET6 static int carp6_input(struct mbuf **mp, int *offp, int proto) { struct mbuf *m = *mp; struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); struct vrrpv3_header *vh; u_int len, minlen; CARPSTATS_INC(carps_ipackets6); if (!V_carp_allow) { m_freem(m); return (IPPROTO_DONE); } /* check if received on a valid carp interface */ if (m->m_pkthdr.rcvif->if_carp == NULL) { CARPSTATS_INC(carps_badif); CARP_DEBUG("%s: packet received on non-carp interface: %s\n", __func__, if_name(m->m_pkthdr.rcvif)); m_freem(m); return (IPPROTO_DONE); } if (m->m_len < *offp + sizeof(*vh)) { len = m->m_len; m = m_pullup(m, *offp + sizeof(*vh)); if (m == NULL) { CARPSTATS_INC(carps_badlen); CARP_DEBUG("%s: packet size %u too small\n", __func__, len); return (IPPROTO_DONE); } ip6 = mtod(m, struct ip6_hdr *); } vh = (struct vrrpv3_header *)(mtod(m, char *) + *offp); switch (vh->vrrp_version) { case CARP_VERSION_CARP: minlen = sizeof(struct carp_header); break; case CARP_VERSION_VRRPv3: minlen = sizeof(struct vrrpv3_header); break; default: CARPSTATS_INC(carps_badver); CARP_DEBUG("%s: unsupported version %d on %s\n", __func__, vh->vrrp_version, if_name(m->m_pkthdr.rcvif)); m_freem(m); return (IPPROTO_DONE); } /* And now check the length again but with the real minimal length. */ if (m->m_pkthdr.len < sizeof(*ip6) + minlen) { CARPSTATS_INC(carps_badlen); CARP_DEBUG("%s: received len %zd < %zd " "on %s\n", __func__, m->m_len - sizeof(struct ip), sizeof(*ip6) + minlen, if_name(m->m_pkthdr.rcvif)); m_freem(m); return (IPPROTO_DONE); } if (m->m_len < sizeof(*ip6) + minlen) { if ((m = m_pullup(m, sizeof(*ip6) + minlen)) == NULL) { CARPSTATS_INC(carps_hdrops); CARP_DEBUG("%s():%d: pullup failed\n", __func__, __LINE__); return (IPPROTO_DONE); } ip6 = mtod(m, struct ip6_hdr *); vh = (struct vrrpv3_header *)mtodo(m, sizeof(*ip6)); } switch (vh->vrrp_version) { case CARP_VERSION_CARP: { struct carp_header *ch; /* verify the CARP checksum */ if (in_cksum_skip(m, *offp + sizeof(struct carp_header), *offp)) { CARPSTATS_INC(carps_badsum); CARP_DEBUG("%s: checksum failed, on %s\n", __func__, if_name(m->m_pkthdr.rcvif)); m_freem(m); break; } ch = (struct carp_header *)((char *)ip6 + sizeof(*ip6)); carp_input_c(m, ch, AF_INET6, ip6->ip6_hlim); break; } case CARP_VERSION_VRRPv3: { uint16_t phdrcksum; phdrcksum = in6_cksum_pseudo(ip6, ntohs(ip6->ip6_plen), ip6->ip6_nxt, 0); vrrp_input_c(m, sizeof(*ip6), AF_INET6, ip6->ip6_hlim, ntohs(ip6->ip6_plen), phdrcksum); break; } default: KASSERT(false, ("Unsupported version %d", vh->vrrp_version)); } return (IPPROTO_DONE); } #endif /* INET6 */ /* * This routine should not be necessary at all, but some switches * (VMWare ESX vswitches) can echo our own packets back at us, * and we must ignore them or they will cause us to drop out of * MASTER mode. * * We cannot catch all cases of network loops. Instead, what we * do here is catch any packet that arrives with a carp header * with a VHID of 0, that comes from an address that is our own. * These packets are by definition "from us" (even if they are from * a misconfigured host that is pretending to be us). * * The VHID test is outside this mini-function. */ static int carp_source_is_self(const struct mbuf *m, struct ifaddr *ifa, sa_family_t af) { #ifdef INET struct ip *ip4; struct in_addr in4; #endif #ifdef INET6 struct ip6_hdr *ip6; struct in6_addr in6; #endif switch (af) { #ifdef INET case AF_INET: ip4 = mtod(m, struct ip *); in4 = ifatoia(ifa)->ia_addr.sin_addr; return (in4.s_addr == ip4->ip_src.s_addr); #endif #ifdef INET6 case AF_INET6: ip6 = mtod(m, struct ip6_hdr *); in6 = ifatoia6(ifa)->ia_addr.sin6_addr; return (memcmp(&in6, &ip6->ip6_src, sizeof(in6)) == 0); #endif default: break; } return (0); } static struct ifaddr * carp_find_ifa(const struct mbuf *m, sa_family_t af, uint8_t vhid) { struct ifnet *ifp = m->m_pkthdr.rcvif; struct ifaddr *ifa, *match; int error; NET_EPOCH_ASSERT(); /* * Verify that the VHID is valid on the receiving interface. * * There should be just one match. If there are none * the VHID is not valid and we drop the packet. If * there are multiple VHID matches, take just the first * one, for compatibility with previous code. While we're * scanning, check for obvious loops in the network topology * (these should never happen, and as noted above, we may * miss real loops; this is just a double-check). */ error = 0; match = NULL; IFNET_FOREACH_IFA(ifp, ifa) { if (match == NULL && ifa->ifa_carp != NULL && ifa->ifa_addr->sa_family == af && ifa->ifa_carp->sc_vhid == vhid) match = ifa; if (vhid == 0 && carp_source_is_self(m, ifa, af)) error = ELOOP; } ifa = error ? NULL : match; if (ifa != NULL) ifa_ref(ifa); if (ifa == NULL) { if (error == ELOOP) { CARP_DEBUG("dropping looped packet on interface %s\n", if_name(ifp)); CARPSTATS_INC(carps_badif); /* ??? */ } else { CARPSTATS_INC(carps_badvhid); } } return (ifa); } static void carp_input_c(struct mbuf *m, struct carp_header *ch, sa_family_t af, int ttl) { struct ifnet *ifp = m->m_pkthdr.rcvif; struct ifaddr *ifa; struct carp_softc *sc; uint64_t tmp_counter; struct timeval sc_tv, ch_tv; bool multicast = false; NET_EPOCH_ASSERT(); MPASS(ch->carp_version == CARP_VERSION_CARP); ifa = carp_find_ifa(m, af, ch->carp_vhid); if (ifa == NULL) { m_freem(m); return; } sc = ifa->ifa_carp; CARP_LOCK(sc); /* verify the CARP version. */ if (sc->sc_version != CARP_VERSION_CARP) { CARP_UNLOCK(sc); CARPSTATS_INC(carps_badver); CARP_DEBUG("%s: invalid version %d\n", if_name(ifp), ch->carp_version); ifa_free(ifa); m_freem(m); return; } if (ifa->ifa_addr->sa_family == AF_INET) { multicast = IN_MULTICAST(ntohl(sc->sc_carpaddr.s_addr)); } else { multicast = IN6_IS_ADDR_MULTICAST(&sc->sc_carpaddr6); } ifa_free(ifa); /* verify that the IP TTL is 255, but only if we're not in unicast mode. */ if (multicast && ttl != CARP_DFLTTL) { CARPSTATS_INC(carps_badttl); CARP_DEBUG("%s: received ttl %d != 255 on %s\n", __func__, ttl, if_name(m->m_pkthdr.rcvif)); goto out; } if (carp_hmac_verify(sc, ch->carp_counter, ch->carp_md)) { CARPSTATS_INC(carps_badauth); CARP_DEBUG("%s: incorrect hash for VHID %u@%s\n", __func__, sc->sc_vhid, if_name(ifp)); goto out; } tmp_counter = ntohl(ch->carp_counter[0]); tmp_counter = tmp_counter<<32; tmp_counter += ntohl(ch->carp_counter[1]); /* XXX Replay protection goes here */ sc->sc_init_counter = false; sc->sc_counter = tmp_counter; sc_tv.tv_sec = sc->sc_advbase; sc_tv.tv_usec = DEMOTE_ADVSKEW(sc) * 1000000 / 256; ch_tv.tv_sec = ch->carp_advbase; ch_tv.tv_usec = ch->carp_advskew * 1000000 / 256; switch (sc->sc_state) { case INIT: break; case MASTER: /* * If we receive an advertisement from a master who's going to * be more frequent than us, go into BACKUP state. */ if (timevalcmp(&sc_tv, &ch_tv, >) || timevalcmp(&sc_tv, &ch_tv, ==)) { callout_stop(&sc->sc_ad_tmo); carp_set_state(sc, BACKUP, "more frequent advertisement received"); carp_setrun(sc, 0); carp_delroute(sc); } break; case BACKUP: /* * If we're pre-empting masters who advertise slower than us, * and this one claims to be slower, treat him as down. */ if (V_carp_preempt && timevalcmp(&sc_tv, &ch_tv, <)) { carp_master_down_locked(sc, "preempting a slower master"); break; } /* * If the master is going to advertise at such a low frequency * that he's guaranteed to time out, we'd might as well just * treat him as timed out now. */ sc_tv.tv_sec = sc->sc_advbase * 3; if (timevalcmp(&sc_tv, &ch_tv, <)) { carp_master_down_locked(sc, "master will time out"); break; } /* * Otherwise, we reset the counter and wait for the next * advertisement. */ carp_setrun(sc, af); break; } out: CARP_UNLOCK(sc); m_freem(m); } static void vrrp_input_c(struct mbuf *m, int off, sa_family_t af, int ttl, int len, uint16_t phdrcksum) { struct vrrpv3_header *vh = mtodo(m, off); struct ifnet *ifp = m->m_pkthdr.rcvif; struct ifaddr *ifa; struct carp_softc *sc; NET_EPOCH_ASSERT(); MPASS(vh->vrrp_version == CARP_VERSION_VRRPv3); ifa = carp_find_ifa(m, af, vh->vrrp_vrtid); if (ifa == NULL) { m_freem(m); return; } sc = ifa->ifa_carp; CARP_LOCK(sc); ifa_free(ifa); /* verify the CARP version. */ if (sc->sc_version != CARP_VERSION_VRRPv3) { CARP_UNLOCK(sc); CARPSTATS_INC(carps_badver); CARP_DEBUG("%s: invalid version %d\n", if_name(ifp), vh->vrrp_version); m_freem(m); return; } /* verify that the IP TTL is 255. */ if (ttl != CARP_DFLTTL) { CARPSTATS_INC(carps_badttl); CARP_DEBUG("%s: received ttl %d != 255 on %s\n", __func__, ttl, if_name(m->m_pkthdr.rcvif)); goto out; } if (vrrp_checksum_verify(m, off, len, phdrcksum)) { CARPSTATS_INC(carps_badsum); CARP_DEBUG("%s: incorrect checksum for VRID %u@%s\n", __func__, sc->sc_vhid, if_name(ifp)); goto out; } /* RFC9568, 7.1 Receiving VRRP packets. */ if (sc->sc_vrrp_prio == 255) { CARP_DEBUG("%s: our priority is 255. Ignore peer announcement.\n", __func__); goto out; } /* XXX TODO Check IP address payload. */ sc->sc_vrrp_master_inter = ntohs(vh->vrrp_max_adver_int); switch (sc->sc_state) { case INIT: break; case MASTER: /* * If we receive an advertisement from a master who's going to * be more frequent than us, go into BACKUP state. * Same if the peer has a higher priority than us. */ if (ntohs(vh->vrrp_max_adver_int) < sc->sc_vrrp_adv_inter || vh->vrrp_priority > sc->sc_vrrp_prio) { callout_stop(&sc->sc_ad_tmo); carp_set_state(sc, BACKUP, "more frequent advertisement received"); carp_setrun(sc, 0); carp_delroute(sc); } break; case BACKUP: /* * If we're pre-empting masters who advertise slower than us, * and this one claims to be slower, treat him as down. */ if (V_carp_preempt && (ntohs(vh->vrrp_max_adver_int) > sc->sc_vrrp_adv_inter || vh->vrrp_priority < sc->sc_vrrp_prio)) { carp_master_down_locked(sc, "preempting a slower master"); break; } /* * Otherwise, we reset the counter and wait for the next * advertisement. */ carp_setrun(sc, af); break; } out: CARP_UNLOCK(sc); m_freem(m); } static int carp_tag(struct carp_softc *sc, struct mbuf *m) { struct m_tag *mtag; /* Tag packet for carp_output */ if ((mtag = m_tag_get(PACKET_TAG_CARP, sizeof(sc->sc_vhid), M_NOWAIT)) == NULL) { m_freem(m); CARPSTATS_INC(carps_onomem); return (ENOMEM); } bcopy(&sc->sc_vhid, mtag + 1, sizeof(sc->sc_vhid)); m_tag_prepend(m, mtag); return (0); } static void carp_prepare_ad(struct mbuf *m, struct carp_softc *sc, struct carp_header *ch) { MPASS(sc->sc_version == CARP_VERSION_CARP); if (sc->sc_init_counter) { /* this could also be seconds since unix epoch */ sc->sc_counter = arc4random(); sc->sc_counter = sc->sc_counter << 32; sc->sc_counter += arc4random(); } else sc->sc_counter++; ch->carp_counter[0] = htonl((sc->sc_counter>>32)&0xffffffff); ch->carp_counter[1] = htonl(sc->sc_counter&0xffffffff); carp_hmac_generate(sc, ch->carp_counter, ch->carp_md); } static inline void send_ad_locked(struct carp_softc *sc) { switch (sc->sc_version) { case CARP_VERSION_CARP: carp_send_ad_locked(sc); break; case CARP_VERSION_VRRPv3: vrrp_send_ad_locked(sc); break; } } /* * To avoid LORs and possible recursions this function shouldn't * be called directly, but scheduled via taskqueue. */ static void carp_send_ad_all(void *ctx __unused, int pending __unused) { struct carp_softc *sc; struct epoch_tracker et; NET_EPOCH_ENTER(et); mtx_lock(&carp_mtx); LIST_FOREACH(sc, &carp_list, sc_next) if (sc->sc_state == MASTER) { CARP_LOCK(sc); CURVNET_SET(sc->sc_carpdev->if_vnet); send_ad_locked(sc); CURVNET_RESTORE(); CARP_UNLOCK(sc); } mtx_unlock(&carp_mtx); NET_EPOCH_EXIT(et); } /* Send a periodic advertisement, executed in callout context. */ static void carp_callout(void *v) { struct carp_softc *sc = v; struct epoch_tracker et; NET_EPOCH_ENTER(et); CARP_LOCK_ASSERT(sc); CURVNET_SET(sc->sc_carpdev->if_vnet); send_ad_locked(sc); CURVNET_RESTORE(); CARP_UNLOCK(sc); NET_EPOCH_EXIT(et); } static void carp_send_ad_error(struct carp_softc *sc, int error) { /* * We track errors and successful sends with this logic: * - Any error resets success counter to 0. * - MAX_ERRORS triggers demotion. * - MIN_SUCCESS successes resets error counter to 0. * - MIN_SUCCESS reverts demotion, if it was triggered before. */ if (error) { if (sc->sc_sendad_errors < INT_MAX) sc->sc_sendad_errors++; if (sc->sc_sendad_errors == CARP_SENDAD_MAX_ERRORS) { static const char fmt[] = "send error %d on %s"; char msg[sizeof(fmt) + IFNAMSIZ]; sprintf(msg, fmt, error, if_name(sc->sc_carpdev)); carp_demote_adj(V_carp_senderr_adj, msg); } sc->sc_sendad_success = 0; } else if (sc->sc_sendad_errors > 0) { if (++sc->sc_sendad_success >= CARP_SENDAD_MIN_SUCCESS) { if (sc->sc_sendad_errors >= CARP_SENDAD_MAX_ERRORS) { static const char fmt[] = "send ok on %s"; char msg[sizeof(fmt) + IFNAMSIZ]; sprintf(msg, fmt, if_name(sc->sc_carpdev)); carp_demote_adj(-V_carp_senderr_adj, msg); } sc->sc_sendad_errors = 0; } } } /* * Pick the best ifaddr on the given ifp for sending CARP * advertisements. * * "Best" here is defined by ifa_preferred(). This function is much * much like ifaof_ifpforaddr() except that we just use ifa_preferred(). * * (This could be simplified to return the actual address, except that * it has a different format in AF_INET and AF_INET6.) */ static struct ifaddr * carp_best_ifa(int af, struct ifnet *ifp) { struct ifaddr *ifa, *best; NET_EPOCH_ASSERT(); if (af >= AF_MAX) return (NULL); best = NULL; CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family == af && (best == NULL || ifa_preferred(best, ifa))) best = ifa; } if (best != NULL) ifa_ref(best); return (best); } static void carp_send_ad_locked(struct carp_softc *sc) { struct carp_header ch; struct timeval tv; struct ifaddr *ifa; struct carp_header *ch_ptr; struct mbuf *m; int len, advskew; NET_EPOCH_ASSERT(); CARP_LOCK_ASSERT(sc); MPASS(sc->sc_version == CARP_VERSION_CARP); advskew = DEMOTE_ADVSKEW(sc); tv.tv_sec = sc->sc_advbase; tv.tv_usec = advskew * 1000000 / 256; ch.carp_version = CARP_VERSION_CARP; ch.carp_type = CARP_ADVERTISEMENT; ch.carp_vhid = sc->sc_vhid; ch.carp_advbase = sc->sc_advbase; ch.carp_advskew = advskew; ch.carp_authlen = 7; /* XXX DEFINE */ ch.carp_pad1 = 0; /* must be zero */ ch.carp_cksum = 0; /* XXXGL: OpenBSD picks first ifaddr with needed family. */ #ifdef INET if (sc->sc_naddrs) { struct ip *ip; m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { CARPSTATS_INC(carps_onomem); goto resched; } len = sizeof(*ip) + sizeof(ch); m->m_pkthdr.len = len; m->m_pkthdr.rcvif = NULL; m->m_len = len; M_ALIGN(m, m->m_len); if (IN_MULTICAST(ntohl(sc->sc_carpaddr.s_addr))) m->m_flags |= M_MCAST; ip = mtod(m, struct ip *); ip->ip_v = IPVERSION; ip->ip_hl = sizeof(*ip) >> 2; ip->ip_tos = V_carp_dscp << IPTOS_DSCP_OFFSET; ip->ip_len = htons(len); ip->ip_off = htons(IP_DF); ip->ip_ttl = CARP_DFLTTL; ip->ip_p = IPPROTO_CARP; ip->ip_sum = 0; - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); ifa = carp_best_ifa(AF_INET, sc->sc_carpdev); if (ifa != NULL) { ip->ip_src.s_addr = ifatoia(ifa)->ia_addr.sin_addr.s_addr; ifa_free(ifa); } else ip->ip_src.s_addr = 0; ip->ip_dst = sc->sc_carpaddr; ch_ptr = (struct carp_header *)(&ip[1]); bcopy(&ch, ch_ptr, sizeof(ch)); carp_prepare_ad(m, sc, ch_ptr); if (IN_MULTICAST(ntohl(sc->sc_carpaddr.s_addr)) && carp_tag(sc, m) != 0) goto resched; m->m_data += sizeof(*ip); ch_ptr->carp_cksum = in_cksum(m, len - sizeof(*ip)); m->m_data -= sizeof(*ip); CARPSTATS_INC(carps_opackets); carp_send_ad_error(sc, ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_carpdev->if_carp->cif_imo, NULL)); } #endif /* INET */ #ifdef INET6 if (sc->sc_naddrs6) { struct ip6_hdr *ip6; m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { CARPSTATS_INC(carps_onomem); goto resched; } len = sizeof(*ip6) + sizeof(ch); m->m_pkthdr.len = len; m->m_pkthdr.rcvif = NULL; m->m_len = len; M_ALIGN(m, m->m_len); ip6 = mtod(m, struct ip6_hdr *); bzero(ip6, sizeof(*ip6)); ip6->ip6_vfc |= IPV6_VERSION; /* Traffic class isn't defined in ip6 struct instead * it gets offset into flowid field */ ip6->ip6_flow |= htonl(V_carp_dscp << (IPV6_FLOWLABEL_LEN + IPTOS_DSCP_OFFSET)); ip6->ip6_hlim = CARP_DFLTTL; ip6->ip6_nxt = IPPROTO_CARP; /* set the source address */ ifa = carp_best_ifa(AF_INET6, sc->sc_carpdev); if (ifa != NULL) { bcopy(IFA_IN6(ifa), &ip6->ip6_src, sizeof(struct in6_addr)); ifa_free(ifa); } else /* This should never happen with IPv6. */ bzero(&ip6->ip6_src, sizeof(struct in6_addr)); /* Set the multicast destination. */ memcpy(&ip6->ip6_dst, &sc->sc_carpaddr6, sizeof(ip6->ip6_dst)); if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_dst)) { if (in6_setscope(&ip6->ip6_dst, sc->sc_carpdev, NULL) != 0) { m_freem(m); CARP_DEBUG("%s: in6_setscope failed\n", __func__); goto resched; } } ch_ptr = (struct carp_header *)(&ip6[1]); bcopy(&ch, ch_ptr, sizeof(ch)); carp_prepare_ad(m, sc, ch_ptr); if (IN6_IS_ADDR_MULTICAST(&sc->sc_carpaddr6) && carp_tag(sc, m) != 0) goto resched; m->m_data += sizeof(*ip6); ch_ptr->carp_cksum = in_cksum(m, len - sizeof(*ip6)); m->m_data -= sizeof(*ip6); CARPSTATS_INC(carps_opackets6); carp_send_ad_error(sc, ip6_output(m, NULL, NULL, 0, &sc->sc_carpdev->if_carp->cif_im6o, NULL, NULL)); } #endif /* INET6 */ resched: callout_reset(&sc->sc_ad_tmo, tvtohz(&tv), carp_callout, sc); } static void vrrp_send_ad_locked(struct carp_softc *sc) { struct vrrpv3_header *vh_ptr; struct ifaddr *ifa; struct mbuf *m; int len; struct vrrpv3_header vh = { .vrrp_version = CARP_VERSION_VRRPv3, .vrrp_type = VRRP_TYPE_ADVERTISEMENT, .vrrp_vrtid = sc->sc_vhid, .vrrp_priority = sc->sc_vrrp_prio, .vrrp_count_addr = 0, .vrrp_max_adver_int = htons(sc->sc_vrrp_adv_inter), .vrrp_checksum = 0, }; NET_EPOCH_ASSERT(); CARP_LOCK_ASSERT(sc); MPASS(sc->sc_version == CARP_VERSION_VRRPv3); #ifdef INET if (sc->sc_naddrs) { struct ip *ip; m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { CARPSTATS_INC(carps_onomem); goto resched; } len = sizeof(*ip) + sizeof(vh); m->m_pkthdr.len = len; m->m_pkthdr.rcvif = NULL; m->m_len = len; M_ALIGN(m, m->m_len); m->m_flags |= M_MCAST; ip = mtod(m, struct ip *); ip->ip_v = IPVERSION; ip->ip_hl = sizeof(*ip) >> 2; ip->ip_tos = V_carp_dscp << IPTOS_DSCP_OFFSET; ip->ip_off = htons(IP_DF); ip->ip_ttl = CARP_DFLTTL; ip->ip_p = IPPROTO_CARP; ip->ip_sum = 0; - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); ifa = carp_best_ifa(AF_INET, sc->sc_carpdev); if (ifa != NULL) { ip->ip_src.s_addr = ifatoia(ifa)->ia_addr.sin_addr.s_addr; ifa_free(ifa); } else ip->ip_src.s_addr = 0; ip->ip_dst.s_addr = htonl(INADDR_CARP_GROUP); /* Include the IP addresses in the announcement. */ for (int i = 0; i < (sc->sc_naddrs + sc->sc_naddrs6); i++) { struct sockaddr_in *in; MPASS(sc->sc_ifas[i] != NULL); if (sc->sc_ifas[i]->ifa_addr->sa_family != AF_INET) continue; in = (struct sockaddr_in *)sc->sc_ifas[i]->ifa_addr; if (m_append(m, sizeof(in->sin_addr), (caddr_t)&in->sin_addr) != 1) { m_freem(m); goto resched; } vh.vrrp_count_addr++; len += sizeof(in->sin_addr); } ip->ip_len = htons(len); vh_ptr = (struct vrrpv3_header *)mtodo(m, sizeof(*ip)); bcopy(&vh, vh_ptr, sizeof(vh)); vh_ptr->vrrp_checksum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl((uint16_t)(len - sizeof(*ip)) + ip->ip_p)); vh_ptr->vrrp_checksum = in_cksum_skip(m, len, sizeof(*ip)); if (carp_tag(sc, m)) goto resched; CARPSTATS_INC(carps_opackets); carp_send_ad_error(sc, ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_carpdev->if_carp->cif_imo, NULL)); } #endif #ifdef INET6 if (sc->sc_naddrs6) { struct ip6_hdr *ip6; m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { CARPSTATS_INC(carps_onomem); goto resched; } len = sizeof(*ip6) + sizeof(vh); m->m_pkthdr.len = len; m->m_pkthdr.rcvif = NULL; m->m_len = len; M_ALIGN(m, m->m_len); m->m_flags |= M_MCAST; ip6 = mtod(m, struct ip6_hdr *); bzero(ip6, sizeof(*ip6)); ip6->ip6_vfc |= IPV6_VERSION; /* Traffic class isn't defined in ip6 struct instead * it gets offset into flowid field */ ip6->ip6_flow |= htonl(V_carp_dscp << (IPV6_FLOWLABEL_LEN + IPTOS_DSCP_OFFSET)); ip6->ip6_hlim = CARP_DFLTTL; ip6->ip6_nxt = IPPROTO_CARP; /* set the source address */ ifa = carp_best_ifa(AF_INET6, sc->sc_carpdev); if (ifa != NULL) { bcopy(IFA_IN6(ifa), &ip6->ip6_src, sizeof(struct in6_addr)); ifa_free(ifa); } else /* This should never happen with IPv6. */ bzero(&ip6->ip6_src, sizeof(struct in6_addr)); /* Set the multicast destination. */ bzero(&ip6->ip6_dst, sizeof(ip6->ip6_dst)); ip6->ip6_dst.s6_addr16[0] = IPV6_ADDR_INT16_MLL; ip6->ip6_dst.s6_addr8[15] = 0x12; /* Include the IP addresses in the announcement. */ len = sizeof(vh); for (int i = 0; i < (sc->sc_naddrs + sc->sc_naddrs6); i++) { struct sockaddr_in6 *in6; MPASS(sc->sc_ifas[i] != NULL); if (sc->sc_ifas[i]->ifa_addr->sa_family != AF_INET6) continue; in6 = (struct sockaddr_in6 *)sc->sc_ifas[i]->ifa_addr; if (m_append(m, sizeof(in6->sin6_addr), (char *)&in6->sin6_addr) != 1) { m_freem(m); goto resched; } vh.vrrp_count_addr++; len += sizeof(in6->sin6_addr); } ip6->ip6_plen = htonl(len); vh_ptr = (struct vrrpv3_header *)mtodo(m, sizeof(*ip6)); bcopy(&vh, vh_ptr, sizeof(vh)); vh_ptr->vrrp_checksum = in6_cksum_pseudo(ip6, len, ip6->ip6_nxt, 0); vh_ptr->vrrp_checksum = in_cksum_skip(m, len + sizeof(*ip6), sizeof(*ip6)); if (in6_setscope(&ip6->ip6_dst, sc->sc_carpdev, NULL) != 0) { m_freem(m); CARP_DEBUG("%s: in6_setscope failed\n", __func__); goto resched; } if (carp_tag(sc, m)) goto resched; CARPSTATS_INC(carps_opackets6); carp_send_ad_error(sc, ip6_output(m, NULL, NULL, 0, &sc->sc_carpdev->if_carp->cif_im6o, NULL, NULL)); } #endif resched: callout_reset(&sc->sc_ad_tmo, sc->sc_vrrp_adv_inter * hz / 100, carp_callout, sc); } static void carp_addroute(struct carp_softc *sc) { struct ifaddr *ifa; CARP_FOREACH_IFA(sc, ifa) carp_ifa_addroute(ifa); } static void carp_ifa_addroute(struct ifaddr *ifa) { switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: in_addprefix(ifatoia(ifa)); ifa_add_loopback_route(ifa, (struct sockaddr *)&ifatoia(ifa)->ia_addr); break; #endif #ifdef INET6 case AF_INET6: ifa_add_loopback_route(ifa, (struct sockaddr *)&ifatoia6(ifa)->ia_addr); nd6_add_ifa_lle(ifatoia6(ifa)); break; #endif } } static void carp_delroute(struct carp_softc *sc) { struct ifaddr *ifa; CARP_FOREACH_IFA(sc, ifa) carp_ifa_delroute(ifa); } static void carp_ifa_delroute(struct ifaddr *ifa) { switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: ifa_del_loopback_route(ifa, (struct sockaddr *)&ifatoia(ifa)->ia_addr); in_scrubprefix(ifatoia(ifa), LLE_STATIC); break; #endif #ifdef INET6 case AF_INET6: ifa_del_loopback_route(ifa, (struct sockaddr *)&ifatoia6(ifa)->ia_addr); nd6_rem_ifa_lle(ifatoia6(ifa), 1); break; #endif } } int carp_master(struct ifaddr *ifa) { struct carp_softc *sc = ifa->ifa_carp; return (sc->sc_state == MASTER); } #ifdef INET /* * Broadcast a gratuitous ARP request containing * the virtual router MAC address for each IP address * associated with the virtual router. */ static void carp_send_arp(struct carp_softc *sc) { struct ifaddr *ifa; struct in_addr addr; NET_EPOCH_ASSERT(); CARP_FOREACH_IFA(sc, ifa) { if (ifa->ifa_addr->sa_family != AF_INET) continue; addr = ((struct sockaddr_in *)ifa->ifa_addr)->sin_addr; arp_announce_ifaddr(sc->sc_carpdev, addr, sc->sc_addr); } } int carp_iamatch(struct ifaddr *ifa, uint8_t **enaddr) { struct carp_softc *sc = ifa->ifa_carp; if (sc->sc_state == MASTER) { *enaddr = sc->sc_addr; return (1); } return (0); } #endif #ifdef INET6 static void carp_send_na(struct carp_softc *sc) { static struct in6_addr mcast = IN6ADDR_LINKLOCAL_ALLNODES_INIT; struct ifaddr *ifa; struct in6_addr *in6; CARP_FOREACH_IFA(sc, ifa) { if (ifa->ifa_addr->sa_family != AF_INET6) continue; in6 = IFA_IN6(ifa); nd6_na_output(sc->sc_carpdev, &mcast, in6, ND_NA_FLAG_OVERRIDE, 1, NULL); DELAY(1000); /* XXX */ } } /* * Returns ifa in case it's a carp address and it is MASTER, or if the address * matches and is not a carp address. Returns NULL otherwise. */ struct ifaddr * carp_iamatch6(struct ifnet *ifp, struct in6_addr *taddr) { struct ifaddr *ifa; NET_EPOCH_ASSERT(); ifa = NULL; CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_INET6) continue; if (!IN6_ARE_ADDR_EQUAL(taddr, IFA_IN6(ifa))) continue; if (ifa->ifa_carp && ifa->ifa_carp->sc_state != MASTER) ifa = NULL; else ifa_ref(ifa); break; } return (ifa); } char * carp_macmatch6(struct ifnet *ifp, struct mbuf *m, const struct in6_addr *taddr) { struct ifaddr *ifa; NET_EPOCH_ASSERT(); IFNET_FOREACH_IFA(ifp, ifa) if (ifa->ifa_addr->sa_family == AF_INET6 && IN6_ARE_ADDR_EQUAL(taddr, IFA_IN6(ifa))) { struct carp_softc *sc = ifa->ifa_carp; struct m_tag *mtag; mtag = m_tag_get(PACKET_TAG_CARP, sizeof(struct carp_softc *), M_NOWAIT); if (mtag == NULL) /* Better a bit than nothing. */ return (sc->sc_addr); bcopy(&sc, mtag + 1, sizeof(sc)); m_tag_prepend(m, mtag); return (sc->sc_addr); } return (NULL); } #endif /* INET6 */ int carp_forus(struct ifnet *ifp, u_char *dhost) { struct carp_softc *sc; uint8_t *ena = dhost; if (ena[0] || ena[1] || ena[2] != 0x5e || ena[3] || ena[4] != 1) return (0); CIF_LOCK(ifp->if_carp); IFNET_FOREACH_CARP(ifp, sc) { /* * CARP_LOCK() is not here, since would protect nothing, but * cause deadlock with if_bridge, calling this under its lock. */ if (sc->sc_state == MASTER && !bcmp(dhost, sc->sc_addr, ETHER_ADDR_LEN)) { CIF_UNLOCK(ifp->if_carp); return (1); } } CIF_UNLOCK(ifp->if_carp); return (0); } /* Master down timeout event, executed in callout context. */ static void carp_master_down(void *v) { struct carp_softc *sc = v; struct epoch_tracker et; NET_EPOCH_ENTER(et); CARP_LOCK_ASSERT(sc); CURVNET_SET(sc->sc_carpdev->if_vnet); if (sc->sc_state == BACKUP) { carp_master_down_locked(sc, "master timed out"); } CURVNET_RESTORE(); CARP_UNLOCK(sc); NET_EPOCH_EXIT(et); } static void carp_master_down_locked(struct carp_softc *sc, const char *reason) { NET_EPOCH_ASSERT(); CARP_LOCK_ASSERT(sc); switch (sc->sc_state) { case BACKUP: carp_set_state(sc, MASTER, reason); send_ad_locked(sc); #ifdef INET carp_send_arp(sc); #endif #ifdef INET6 carp_send_na(sc); #endif carp_setrun(sc, 0); carp_addroute(sc); break; case INIT: case MASTER: #ifdef INVARIANTS panic("carp: VHID %u@%s: master_down event in %s state\n", sc->sc_vhid, if_name(sc->sc_carpdev), sc->sc_state ? "MASTER" : "INIT"); #endif break; } } /* * When in backup state, af indicates whether to reset the master down timer * for v4 or v6. If it's set to zero, reset the ones which are already pending. */ static void carp_setrun(struct carp_softc *sc, sa_family_t af) { struct timeval tv; int timeout; CARP_LOCK_ASSERT(sc); if ((sc->sc_carpdev->if_flags & IFF_UP) == 0 || sc->sc_carpdev->if_link_state != LINK_STATE_UP || (sc->sc_naddrs == 0 && sc->sc_naddrs6 == 0) || !V_carp_allow) return; switch (sc->sc_state) { case INIT: carp_set_state(sc, BACKUP, "initialization complete"); carp_setrun(sc, 0); break; case BACKUP: callout_stop(&sc->sc_ad_tmo); switch (sc->sc_version) { case CARP_VERSION_CARP: tv.tv_sec = 3 * sc->sc_advbase; tv.tv_usec = sc->sc_advskew * 1000000 / 256; timeout = tvtohz(&tv); break; case CARP_VERSION_VRRPv3: /* skew time */ timeout = (256 - sc->sc_vrrp_prio) * sc->sc_vrrp_master_inter / 256; timeout += (3 * sc->sc_vrrp_master_inter); timeout *= hz; timeout /= 100; /* master interval is in centiseconds */ break; } switch (af) { #ifdef INET case AF_INET: callout_reset(&sc->sc_md_tmo, timeout, carp_master_down, sc); break; #endif #ifdef INET6 case AF_INET6: callout_reset(&sc->sc_md6_tmo, timeout, carp_master_down, sc); break; #endif default: #ifdef INET if (sc->sc_naddrs) callout_reset(&sc->sc_md_tmo, timeout, carp_master_down, sc); #endif #ifdef INET6 if (sc->sc_naddrs6) callout_reset(&sc->sc_md6_tmo, timeout, carp_master_down, sc); #endif break; } break; case MASTER: switch (sc->sc_version) { case CARP_VERSION_CARP: tv.tv_sec = sc->sc_advbase; tv.tv_usec = sc->sc_advskew * 1000000 / 256; callout_reset(&sc->sc_ad_tmo, tvtohz(&tv), carp_callout, sc); break; case CARP_VERSION_VRRPv3: callout_reset(&sc->sc_ad_tmo, sc->sc_vrrp_adv_inter * hz / 100, carp_callout, sc); break; } break; } } /* * Setup multicast structures. */ static int carp_multicast_setup(struct carp_if *cif, sa_family_t sa) { struct ifnet *ifp = cif->cif_ifp; int error = 0; switch (sa) { #ifdef INET case AF_INET: { struct ip_moptions *imo = &cif->cif_imo; struct in_mfilter *imf; struct in_addr addr; if (ip_mfilter_first(&imo->imo_head) != NULL) return (0); imf = ip_mfilter_alloc(M_WAITOK, 0, 0); ip_mfilter_init(&imo->imo_head); imo->imo_multicast_vif = -1; addr.s_addr = htonl(INADDR_CARP_GROUP); if ((error = in_joingroup(ifp, &addr, NULL, &imf->imf_inm)) != 0) { ip_mfilter_free(imf); break; } ip_mfilter_insert(&imo->imo_head, imf); imo->imo_multicast_ifp = ifp; imo->imo_multicast_ttl = CARP_DFLTTL; imo->imo_multicast_loop = 0; break; } #endif #ifdef INET6 case AF_INET6: { struct ip6_moptions *im6o = &cif->cif_im6o; struct in6_mfilter *im6f[2]; struct in6_addr in6; if (ip6_mfilter_first(&im6o->im6o_head)) return (0); im6f[0] = ip6_mfilter_alloc(M_WAITOK, 0, 0); im6f[1] = ip6_mfilter_alloc(M_WAITOK, 0, 0); ip6_mfilter_init(&im6o->im6o_head); im6o->im6o_multicast_hlim = CARP_DFLTTL; im6o->im6o_multicast_ifp = ifp; /* Join IPv6 CARP multicast group. */ bzero(&in6, sizeof(in6)); in6.s6_addr16[0] = htons(0xff02); in6.s6_addr8[15] = 0x12; if ((error = in6_setscope(&in6, ifp, NULL)) != 0) { ip6_mfilter_free(im6f[0]); ip6_mfilter_free(im6f[1]); break; } if ((error = in6_joingroup(ifp, &in6, NULL, &im6f[0]->im6f_in6m, 0)) != 0) { ip6_mfilter_free(im6f[0]); ip6_mfilter_free(im6f[1]); break; } /* Join solicited multicast address. */ bzero(&in6, sizeof(in6)); in6.s6_addr16[0] = htons(0xff02); in6.s6_addr32[1] = 0; in6.s6_addr32[2] = htonl(1); in6.s6_addr32[3] = 0; in6.s6_addr8[12] = 0xff; if ((error = in6_setscope(&in6, ifp, NULL)) != 0) { ip6_mfilter_free(im6f[0]); ip6_mfilter_free(im6f[1]); break; } if ((error = in6_joingroup(ifp, &in6, NULL, &im6f[1]->im6f_in6m, 0)) != 0) { in6_leavegroup(im6f[0]->im6f_in6m, NULL); ip6_mfilter_free(im6f[0]); ip6_mfilter_free(im6f[1]); break; } ip6_mfilter_insert(&im6o->im6o_head, im6f[0]); ip6_mfilter_insert(&im6o->im6o_head, im6f[1]); break; } #endif } return (error); } /* * Free multicast structures. */ static void carp_multicast_cleanup(struct carp_if *cif, sa_family_t sa) { #ifdef INET struct ip_moptions *imo = &cif->cif_imo; struct in_mfilter *imf; #endif #ifdef INET6 struct ip6_moptions *im6o = &cif->cif_im6o; struct in6_mfilter *im6f; #endif sx_assert(&carp_sx, SA_XLOCKED); switch (sa) { #ifdef INET case AF_INET: if (cif->cif_naddrs != 0) break; while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) { ip_mfilter_remove(&imo->imo_head, imf); in_leavegroup(imf->imf_inm, NULL); ip_mfilter_free(imf); } break; #endif #ifdef INET6 case AF_INET6: if (cif->cif_naddrs6 != 0) break; while ((im6f = ip6_mfilter_first(&im6o->im6o_head)) != NULL) { ip6_mfilter_remove(&im6o->im6o_head, im6f); in6_leavegroup(im6f->im6f_in6m, NULL); ip6_mfilter_free(im6f); } break; #endif } } int carp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa) { struct m_tag *mtag; int vhid; if (!sa) return (0); switch (sa->sa_family) { #ifdef INET case AF_INET: break; #endif #ifdef INET6 case AF_INET6: break; #endif default: return (0); } mtag = m_tag_find(m, PACKET_TAG_CARP, NULL); if (mtag == NULL) return (0); bcopy(mtag + 1, &vhid, sizeof(vhid)); /* Set the source MAC address to the Virtual Router MAC Address. */ switch (ifp->if_type) { case IFT_ETHER: case IFT_BRIDGE: case IFT_L2VLAN: { struct ether_header *eh; eh = mtod(m, struct ether_header *); eh->ether_shost[0] = 0; eh->ether_shost[1] = 0; eh->ether_shost[2] = 0x5e; eh->ether_shost[3] = 0; eh->ether_shost[4] = 1; eh->ether_shost[5] = vhid; } break; default: printf("%s: carp is not supported for the %d interface type\n", if_name(ifp), ifp->if_type); return (EOPNOTSUPP); } return (0); } static struct carp_softc* carp_alloc(struct ifnet *ifp, carp_version_t version, int vhid) { struct carp_softc *sc; struct carp_if *cif; sx_assert(&carp_sx, SA_XLOCKED); if ((cif = ifp->if_carp) == NULL) cif = carp_alloc_if(ifp); sc = malloc(sizeof(*sc), M_CARP, M_WAITOK); *sc = (struct carp_softc ){ .sc_vhid = vhid, .sc_version = version, .sc_state = INIT, .sc_carpdev = ifp, .sc_ifasiz = sizeof(struct ifaddr *), .sc_addr = { 0, 0, 0x5e, 0, 1, vhid }, }; sc->sc_ifas = malloc(sc->sc_ifasiz, M_CARP, M_WAITOK|M_ZERO); switch (version) { case CARP_VERSION_CARP: sc->sc_advbase = CARP_DFLTINTV; sc->sc_init_counter = true; sc->sc_carpaddr.s_addr = htonl(INADDR_CARP_GROUP); sc->sc_carpaddr6.s6_addr16[0] = IPV6_ADDR_INT16_MLL; sc->sc_carpaddr6.s6_addr8[15] = 0x12; break; case CARP_VERSION_VRRPv3: sc->sc_vrrp_adv_inter = 100; sc->sc_vrrp_master_inter = sc->sc_vrrp_adv_inter; sc->sc_vrrp_prio = 100; break; } CARP_LOCK_INIT(sc); #ifdef INET callout_init_mtx(&sc->sc_md_tmo, &sc->sc_mtx, CALLOUT_RETURNUNLOCKED); #endif #ifdef INET6 callout_init_mtx(&sc->sc_md6_tmo, &sc->sc_mtx, CALLOUT_RETURNUNLOCKED); #endif callout_init_mtx(&sc->sc_ad_tmo, &sc->sc_mtx, CALLOUT_RETURNUNLOCKED); CIF_LOCK(cif); TAILQ_INSERT_TAIL(&cif->cif_vrs, sc, sc_list); CIF_UNLOCK(cif); mtx_lock(&carp_mtx); LIST_INSERT_HEAD(&carp_list, sc, sc_next); mtx_unlock(&carp_mtx); return (sc); } static void carp_grow_ifas(struct carp_softc *sc) { struct ifaddr **new; new = malloc(sc->sc_ifasiz * 2, M_CARP, M_WAITOK | M_ZERO); CARP_LOCK(sc); bcopy(sc->sc_ifas, new, sc->sc_ifasiz); free(sc->sc_ifas, M_CARP); sc->sc_ifas = new; sc->sc_ifasiz *= 2; CARP_UNLOCK(sc); } static void carp_destroy(struct carp_softc *sc) { struct ifnet *ifp = sc->sc_carpdev; struct carp_if *cif = ifp->if_carp; sx_assert(&carp_sx, SA_XLOCKED); if (sc->sc_suppress) carp_demote_adj(-V_carp_ifdown_adj, "vhid removed"); CARP_UNLOCK(sc); CIF_LOCK(cif); TAILQ_REMOVE(&cif->cif_vrs, sc, sc_list); CIF_UNLOCK(cif); mtx_lock(&carp_mtx); LIST_REMOVE(sc, sc_next); mtx_unlock(&carp_mtx); callout_drain(&sc->sc_ad_tmo); #ifdef INET callout_drain(&sc->sc_md_tmo); #endif #ifdef INET6 callout_drain(&sc->sc_md6_tmo); #endif CARP_LOCK_DESTROY(sc); free(sc->sc_ifas, M_CARP); free(sc, M_CARP); } static struct carp_if* carp_alloc_if(struct ifnet *ifp) { struct carp_if *cif; int error; cif = malloc(sizeof(*cif), M_CARP, M_WAITOK|M_ZERO); if ((error = ifpromisc(ifp, 1)) != 0) printf("%s: ifpromisc(%s) failed: %d\n", __func__, if_name(ifp), error); else cif->cif_flags |= CIF_PROMISC; CIF_LOCK_INIT(cif); cif->cif_ifp = ifp; TAILQ_INIT(&cif->cif_vrs); IF_ADDR_WLOCK(ifp); ifp->if_carp = cif; if_ref(ifp); IF_ADDR_WUNLOCK(ifp); return (cif); } static void carp_free_if(struct carp_if *cif) { struct ifnet *ifp = cif->cif_ifp; CIF_LOCK_ASSERT(cif); KASSERT(TAILQ_EMPTY(&cif->cif_vrs), ("%s: softc list not empty", __func__)); IF_ADDR_WLOCK(ifp); ifp->if_carp = NULL; IF_ADDR_WUNLOCK(ifp); CIF_LOCK_DESTROY(cif); if (cif->cif_flags & CIF_PROMISC) ifpromisc(ifp, 0); if_rele(ifp); free(cif, M_CARP); } static bool carp_carprcp(void *arg, struct carp_softc *sc, int priv) { struct carpreq *carpr = arg; CARP_LOCK(sc); carpr->carpr_state = sc->sc_state; carpr->carpr_vhid = sc->sc_vhid; switch (sc->sc_version) { case CARP_VERSION_CARP: carpr->carpr_advbase = sc->sc_advbase; carpr->carpr_advskew = sc->sc_advskew; if (priv) bcopy(sc->sc_key, carpr->carpr_key, sizeof(carpr->carpr_key)); else bzero(carpr->carpr_key, sizeof(carpr->carpr_key)); break; case CARP_VERSION_VRRPv3: break; } CARP_UNLOCK(sc); return (true); } static int carp_ioctl_set(if_t ifp, struct carpkreq *carpr) { struct epoch_tracker et; struct carp_softc *sc = NULL; int error = 0; if (carpr->carpr_vhid <= 0 || carpr->carpr_vhid > CARP_MAXVHID) return (EINVAL); switch (carpr->carpr_version) { case CARP_VERSION_CARP: if (carpr->carpr_advbase != 0 && (carpr->carpr_advbase > 255 || carpr->carpr_advbase < CARP_DFLTINTV)) return (EINVAL); if (carpr->carpr_advskew < 0 || carpr->carpr_advskew >= 255) return (EINVAL); break; case CARP_VERSION_VRRPv3: /* XXXGL: shouldn't we check anything? */ break; default: return (EINVAL); } if (ifp->if_carp) { IFNET_FOREACH_CARP(ifp, sc) if (sc->sc_vhid == carpr->carpr_vhid) break; } if (sc == NULL) sc = carp_alloc(ifp, carpr->carpr_version, carpr->carpr_vhid); else if (sc->sc_version != carpr->carpr_version) return (EINVAL); CARP_LOCK(sc); switch (sc->sc_version) { case CARP_VERSION_CARP: if (carpr->carpr_advbase != 0) sc->sc_advbase = carpr->carpr_advbase; sc->sc_advskew = carpr->carpr_advskew; if (carpr->carpr_addr.s_addr != INADDR_ANY) sc->sc_carpaddr = carpr->carpr_addr; if (!IN6_IS_ADDR_UNSPECIFIED(&carpr->carpr_addr6)) { memcpy(&sc->sc_carpaddr6, &carpr->carpr_addr6, sizeof(sc->sc_carpaddr6)); } if (carpr->carpr_key[0] != '\0') { bcopy(carpr->carpr_key, sc->sc_key, sizeof(sc->sc_key)); carp_hmac_prepare(sc); } break; case CARP_VERSION_VRRPv3: if (carpr->carpr_vrrp_priority != 0) sc->sc_vrrp_prio = carpr->carpr_vrrp_priority; if (carpr->carpr_vrrp_adv_inter) sc->sc_vrrp_adv_inter = carpr->carpr_vrrp_adv_inter; break; } if (sc->sc_state != INIT && carpr->carpr_state != sc->sc_state) { switch (carpr->carpr_state) { case BACKUP: callout_stop(&sc->sc_ad_tmo); carp_set_state(sc, BACKUP, "user requested via ifconfig"); carp_setrun(sc, 0); carp_delroute(sc); break; case MASTER: NET_EPOCH_ENTER(et); carp_master_down_locked(sc, "user requested via ifconfig"); NET_EPOCH_EXIT(et); break; default: break; } } CARP_UNLOCK(sc); return (error); } static int carp_ioctl_get(if_t ifp, struct ucred *cred, struct carpreq *carpr, bool (*outfn)(void *, struct carp_softc *, int), void *arg) { int priveleged; struct carp_softc *sc; if (carpr->carpr_vhid < 0 || carpr->carpr_vhid > CARP_MAXVHID) return (EINVAL); if (carpr->carpr_count < 1) return (EMSGSIZE); if (ifp->if_carp == NULL) return (ENOENT); priveleged = (priv_check_cred(cred, PRIV_NETINET_CARP) == 0); if (carpr->carpr_vhid != 0) { IFNET_FOREACH_CARP(ifp, sc) if (sc->sc_vhid == carpr->carpr_vhid) break; if (sc == NULL) return (ENOENT); if (! outfn(arg, sc, priveleged)) return (ENOMEM); carpr->carpr_count = 1; } else { int count; count = 0; IFNET_FOREACH_CARP(ifp, sc) count++; if (count > carpr->carpr_count) return (EMSGSIZE); IFNET_FOREACH_CARP(ifp, sc) { if (! outfn(arg, sc, priveleged)) return (ENOMEM); carpr->carpr_count = count; } } return (0); } int carp_ioctl(struct ifreq *ifr, u_long cmd, struct thread *td) { struct carpreq carpr; struct carpkreq carprk = { .carpr_version = CARP_VERSION_CARP, }; struct ifnet *ifp; int error = 0; if ((error = copyin(ifr_data_get_ptr(ifr), &carpr, sizeof carpr))) return (error); ifp = ifunit_ref(ifr->ifr_name); if ((error = carp_is_supported_if(ifp)) != 0) goto out; if ((ifp->if_flags & IFF_MULTICAST) == 0) { error = EADDRNOTAVAIL; goto out; } sx_xlock(&carp_sx); switch (cmd) { case SIOCSVH: if ((error = priv_check(td, PRIV_NETINET_CARP))) break; memcpy(&carprk, &carpr, sizeof(carpr)); error = carp_ioctl_set(ifp, &carprk); break; case SIOCGVH: error = carp_ioctl_get(ifp, td->td_ucred, &carpr, carp_carprcp, &carpr); if (error == 0) { error = copyout(&carpr, (char *)ifr_data_get_ptr(ifr), carpr.carpr_count * sizeof(carpr)); } break; default: error = EINVAL; } sx_xunlock(&carp_sx); out: if (ifp != NULL) if_rele(ifp); return (error); } static int carp_get_vhid(struct ifaddr *ifa) { if (ifa == NULL || ifa->ifa_carp == NULL) return (0); return (ifa->ifa_carp->sc_vhid); } int carp_attach(struct ifaddr *ifa, int vhid) { struct ifnet *ifp = ifa->ifa_ifp; struct carp_if *cif = ifp->if_carp; struct carp_softc *sc; int index, error; KASSERT(ifa->ifa_carp == NULL, ("%s: ifa %p attached", __func__, ifa)); switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: #endif #ifdef INET6 case AF_INET6: #endif break; default: return (EPROTOTYPE); } sx_xlock(&carp_sx); if (ifp->if_carp == NULL) { sx_xunlock(&carp_sx); return (ENOPROTOOPT); } IFNET_FOREACH_CARP(ifp, sc) if (sc->sc_vhid == vhid) break; if (sc == NULL) { sx_xunlock(&carp_sx); return (ENOENT); } error = carp_multicast_setup(cif, ifa->ifa_addr->sa_family); if (error) { CIF_FREE(cif); sx_xunlock(&carp_sx); return (error); } index = sc->sc_naddrs + sc->sc_naddrs6 + 1; if (index > sc->sc_ifasiz / sizeof(struct ifaddr *)) carp_grow_ifas(sc); switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: cif->cif_naddrs++; sc->sc_naddrs++; break; #endif #ifdef INET6 case AF_INET6: cif->cif_naddrs6++; sc->sc_naddrs6++; break; #endif } ifa_ref(ifa); CARP_LOCK(sc); sc->sc_ifas[index - 1] = ifa; ifa->ifa_carp = sc; if (sc->sc_version == CARP_VERSION_CARP) carp_hmac_prepare(sc); carp_sc_state(sc); CARP_UNLOCK(sc); sx_xunlock(&carp_sx); return (0); } void carp_detach(struct ifaddr *ifa, bool keep_cif) { struct ifnet *ifp = ifa->ifa_ifp; struct carp_if *cif = ifp->if_carp; struct carp_softc *sc = ifa->ifa_carp; int i, index; KASSERT(sc != NULL, ("%s: %p not attached", __func__, ifa)); sx_xlock(&carp_sx); CARP_LOCK(sc); /* Shift array. */ index = sc->sc_naddrs + sc->sc_naddrs6; for (i = 0; i < index; i++) if (sc->sc_ifas[i] == ifa) break; KASSERT(i < index, ("%s: %p no backref", __func__, ifa)); for (; i < index - 1; i++) sc->sc_ifas[i] = sc->sc_ifas[i+1]; sc->sc_ifas[index - 1] = NULL; switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: cif->cif_naddrs--; sc->sc_naddrs--; break; #endif #ifdef INET6 case AF_INET6: cif->cif_naddrs6--; sc->sc_naddrs6--; break; #endif } carp_ifa_delroute(ifa); carp_multicast_cleanup(cif, ifa->ifa_addr->sa_family); ifa->ifa_carp = NULL; ifa_free(ifa); if (sc->sc_version == CARP_VERSION_CARP) carp_hmac_prepare(sc); carp_sc_state(sc); if (!keep_cif && sc->sc_naddrs == 0 && sc->sc_naddrs6 == 0) carp_destroy(sc); else CARP_UNLOCK(sc); if (!keep_cif) CIF_FREE(cif); sx_xunlock(&carp_sx); } static void carp_set_state(struct carp_softc *sc, int state, const char *reason) { CARP_LOCK_ASSERT(sc); if (sc->sc_state != state) { const char *carp_states[] = { CARP_STATES }; char subsys[IFNAMSIZ+5]; snprintf(subsys, IFNAMSIZ+5, "%u@%s", sc->sc_vhid, if_name(sc->sc_carpdev)); CARP_LOG("%s: %s -> %s (%s)\n", subsys, carp_states[sc->sc_state], carp_states[state], reason); sc->sc_state = state; devctl_notify("CARP", subsys, carp_states[state], NULL); } } static void carp_linkstate(struct ifnet *ifp) { struct carp_softc *sc; CIF_LOCK(ifp->if_carp); IFNET_FOREACH_CARP(ifp, sc) { CARP_LOCK(sc); carp_sc_state(sc); CARP_UNLOCK(sc); } CIF_UNLOCK(ifp->if_carp); } static void carp_sc_state(struct carp_softc *sc) { CARP_LOCK_ASSERT(sc); if (sc->sc_carpdev->if_link_state != LINK_STATE_UP || !(sc->sc_carpdev->if_flags & IFF_UP) || !V_carp_allow) { callout_stop(&sc->sc_ad_tmo); #ifdef INET callout_stop(&sc->sc_md_tmo); #endif #ifdef INET6 callout_stop(&sc->sc_md6_tmo); #endif carp_set_state(sc, INIT, "hardware interface down"); carp_setrun(sc, 0); carp_delroute(sc); if (!sc->sc_suppress) carp_demote_adj(V_carp_ifdown_adj, "interface down"); sc->sc_suppress = 1; } else { carp_set_state(sc, INIT, "hardware interface up"); carp_setrun(sc, 0); if (sc->sc_suppress) carp_demote_adj(-V_carp_ifdown_adj, "interface up"); sc->sc_suppress = 0; } } static void carp_demote_adj(int adj, char *reason) { atomic_add_int(&V_carp_demotion, adj); CARP_LOG("demoted by %d to %d (%s)\n", adj, V_carp_demotion, reason); taskqueue_enqueue(taskqueue_swi, &carp_sendall_task); } static int carp_allow_sysctl(SYSCTL_HANDLER_ARGS) { int new, error; struct carp_softc *sc; new = V_carp_allow; error = sysctl_handle_int(oidp, &new, 0, req); if (error || !req->newptr) return (error); if (V_carp_allow != new) { V_carp_allow = new; mtx_lock(&carp_mtx); LIST_FOREACH(sc, &carp_list, sc_next) { CARP_LOCK(sc); if (curvnet == sc->sc_carpdev->if_vnet) carp_sc_state(sc); CARP_UNLOCK(sc); } mtx_unlock(&carp_mtx); } return (0); } static int carp_dscp_sysctl(SYSCTL_HANDLER_ARGS) { int new, error; new = V_carp_dscp; error = sysctl_handle_int(oidp, &new, 0, req); if (error || !req->newptr) return (error); if (new < 0 || new > 63) return (EINVAL); V_carp_dscp = new; return (0); } static int carp_demote_adj_sysctl(SYSCTL_HANDLER_ARGS) { int new, error; new = V_carp_demotion; error = sysctl_handle_int(oidp, &new, 0, req); if (error || !req->newptr) return (error); carp_demote_adj(new, "sysctl"); return (0); } static int nlattr_get_carp_key(struct nlattr *nla, struct nl_pstate *npt, const void *arg, void *target) { if (__predict_false(NLA_DATA_LEN(nla) > CARP_KEY_LEN)) return (EINVAL); memcpy(target, NLA_DATA_CONST(nla), NLA_DATA_LEN(nla)); return (0); } struct carp_nl_send_args { struct nlmsghdr *hdr; struct nl_pstate *npt; }; static bool carp_nl_send(void *arg, struct carp_softc *sc, int priv) { struct carp_nl_send_args *nlsa = arg; struct nlmsghdr *hdr = nlsa->hdr; struct nl_pstate *npt = nlsa->npt; struct nl_writer *nw = npt->nw; struct genlmsghdr *ghdr_new; if (!nlmsg_reply(nw, hdr, sizeof(struct genlmsghdr))) { nlmsg_abort(nw); return (false); } ghdr_new = nlmsg_reserve_object(nw, struct genlmsghdr); if (ghdr_new == NULL) { nlmsg_abort(nw); return (false); } ghdr_new->cmd = CARP_NL_CMD_GET; ghdr_new->version = 0; ghdr_new->reserved = 0; CARP_LOCK(sc); nlattr_add_u32(nw, CARP_NL_VHID, sc->sc_vhid); nlattr_add_u32(nw, CARP_NL_STATE, sc->sc_state); nlattr_add_u8(nw, CARP_NL_VERSION, sc->sc_version); switch (sc->sc_version) { case CARP_VERSION_CARP: nlattr_add_s32(nw, CARP_NL_ADVBASE, sc->sc_advbase); nlattr_add_s32(nw, CARP_NL_ADVSKEW, sc->sc_advskew); nlattr_add_in_addr(nw, CARP_NL_ADDR, &sc->sc_carpaddr); nlattr_add_in6_addr(nw, CARP_NL_ADDR6, &sc->sc_carpaddr6); if (priv) nlattr_add(nw, CARP_NL_KEY, sizeof(sc->sc_key), sc->sc_key); break; case CARP_VERSION_VRRPv3: nlattr_add_u8(nw, CARP_NL_VRRP_PRIORITY, sc->sc_vrrp_prio); nlattr_add_u16(nw, CARP_NL_VRRP_ADV_INTER, sc->sc_vrrp_adv_inter); break; } CARP_UNLOCK(sc); if (! nlmsg_end(nw)) { nlmsg_abort(nw); return (false); } return (true); } struct nl_carp_parsed { unsigned int ifindex; char *ifname; uint32_t state; uint32_t vhid; int32_t advbase; int32_t advskew; char key[CARP_KEY_LEN]; struct in_addr addr; struct in6_addr addr6; carp_version_t version; uint8_t vrrp_prio; uint16_t vrrp_adv_inter; }; #define _OUT(_field) offsetof(struct nl_carp_parsed, _field) static const struct nlattr_parser nla_p_set[] = { { .type = CARP_NL_VHID, .off = _OUT(vhid), .cb = nlattr_get_uint32 }, { .type = CARP_NL_STATE, .off = _OUT(state), .cb = nlattr_get_uint32 }, { .type = CARP_NL_ADVBASE, .off = _OUT(advbase), .cb = nlattr_get_uint32 }, { .type = CARP_NL_ADVSKEW, .off = _OUT(advskew), .cb = nlattr_get_uint32 }, { .type = CARP_NL_KEY, .off = _OUT(key), .cb = nlattr_get_carp_key }, { .type = CARP_NL_IFINDEX, .off = _OUT(ifindex), .cb = nlattr_get_uint32 }, { .type = CARP_NL_ADDR, .off = _OUT(addr), .cb = nlattr_get_in_addr }, { .type = CARP_NL_ADDR6, .off = _OUT(addr6), .cb = nlattr_get_in6_addr }, { .type = CARP_NL_IFNAME, .off = _OUT(ifname), .cb = nlattr_get_string }, { .type = CARP_NL_VERSION, .off = _OUT(version), .cb = nlattr_get_uint8 }, { .type = CARP_NL_VRRP_PRIORITY, .off = _OUT(vrrp_prio), .cb = nlattr_get_uint8 }, { .type = CARP_NL_VRRP_ADV_INTER, .off = _OUT(vrrp_adv_inter), .cb = nlattr_get_uint16 }, }; NL_DECLARE_PARSER(carp_parser, struct genlmsghdr, nlf_p_empty, nla_p_set); #undef _OUT static int carp_nl_get(struct nlmsghdr *hdr, struct nl_pstate *npt) { struct nl_carp_parsed attrs = { }; struct carp_nl_send_args args; struct carpreq carpr = { }; struct epoch_tracker et; if_t ifp = NULL; int error; error = nl_parse_nlmsg(hdr, &carp_parser, npt, &attrs); if (error != 0) return (error); NET_EPOCH_ENTER(et); if (attrs.ifname != NULL) ifp = ifunit_ref(attrs.ifname); else if (attrs.ifindex != 0) ifp = ifnet_byindex_ref(attrs.ifindex); NET_EPOCH_EXIT(et); if ((error = carp_is_supported_if(ifp)) != 0) goto out; hdr->nlmsg_flags |= NLM_F_MULTI; args.hdr = hdr; args.npt = npt; carpr.carpr_vhid = attrs.vhid; carpr.carpr_count = CARP_MAXVHID; sx_xlock(&carp_sx); error = carp_ioctl_get(ifp, nlp_get_cred(npt->nlp), &carpr, carp_nl_send, &args); sx_xunlock(&carp_sx); if (! nlmsg_end_dump(npt->nw, error, hdr)) error = ENOMEM; out: if (ifp != NULL) if_rele(ifp); return (error); } static int carp_nl_set(struct nlmsghdr *hdr, struct nl_pstate *npt) { struct nl_carp_parsed attrs = { }; struct carpkreq carpr; struct epoch_tracker et; if_t ifp = NULL; int error; error = nl_parse_nlmsg(hdr, &carp_parser, npt, &attrs); if (error != 0) return (error); if (attrs.vhid <= 0 || attrs.vhid > CARP_MAXVHID) return (EINVAL); if (attrs.state > CARP_MAXSTATE) return (EINVAL); if (attrs.version == 0) /* compat with pre-VRRPv3 */ attrs.version = CARP_VERSION_CARP; switch (attrs.version) { case CARP_VERSION_CARP: if (attrs.advbase < 0 || attrs.advskew < 0) return (EINVAL); if (attrs.advbase > 255) return (EINVAL); if (attrs.advskew >= 255) return (EINVAL); break; case CARP_VERSION_VRRPv3: if (attrs.vrrp_adv_inter > VRRP_MAX_INTERVAL) return (EINVAL); break; default: return (EINVAL); } NET_EPOCH_ENTER(et); if (attrs.ifname != NULL) ifp = ifunit_ref(attrs.ifname); else if (attrs.ifindex != 0) ifp = ifnet_byindex_ref(attrs.ifindex); NET_EPOCH_EXIT(et); if ((error = carp_is_supported_if(ifp)) != 0) goto out; if ((ifp->if_flags & IFF_MULTICAST) == 0) { error = EADDRNOTAVAIL; goto out; } carpr.carpr_count = 1; carpr.carpr_vhid = attrs.vhid; carpr.carpr_state = attrs.state; carpr.carpr_version = attrs.version; switch (attrs.version) { case CARP_VERSION_CARP: carpr.carpr_advbase = attrs.advbase; carpr.carpr_advskew = attrs.advskew; carpr.carpr_addr = attrs.addr; carpr.carpr_addr6 = attrs.addr6; memcpy(&carpr.carpr_key, &attrs.key, sizeof(attrs.key)); break; case CARP_VERSION_VRRPv3: carpr.carpr_vrrp_priority = attrs.vrrp_prio; carpr.carpr_vrrp_adv_inter = attrs.vrrp_adv_inter; break; } sx_xlock(&carp_sx); error = carp_ioctl_set(ifp, &carpr); sx_xunlock(&carp_sx); out: if (ifp != NULL) if_rele(ifp); return (error); } static const struct nlhdr_parser *all_parsers[] = { &carp_parser }; static const struct genl_cmd carp_cmds[] = { { .cmd_num = CARP_NL_CMD_GET, .cmd_name = "SIOCGVH", .cmd_cb = carp_nl_get, .cmd_flags = GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP | GENL_CMD_CAP_HASPOL, }, { .cmd_num = CARP_NL_CMD_SET, .cmd_name = "SIOCSVH", .cmd_cb = carp_nl_set, .cmd_flags = GENL_CMD_CAP_DO | GENL_CMD_CAP_HASPOL, .cmd_priv = PRIV_NETINET_CARP, }, }; static uint16_t carp_family_id; static void carp_nl_register(void) { bool ret __diagused; NL_VERIFY_PARSERS(all_parsers); carp_family_id = genl_register_family(CARP_NL_FAMILY_NAME, 0, 2, CARP_NL_CMD_MAX); MPASS(carp_family_id != 0); ret = genl_register_cmds(carp_family_id, carp_cmds, nitems(carp_cmds)); MPASS(ret); } static void carp_nl_unregister(void) { genl_unregister_family(carp_family_id); } static void carp_mod_cleanup(void) { carp_nl_unregister(); #ifdef INET (void)ipproto_unregister(IPPROTO_CARP); carp_iamatch_p = NULL; #endif #ifdef INET6 (void)ip6proto_unregister(IPPROTO_CARP); carp_iamatch6_p = NULL; carp_macmatch6_p = NULL; #endif carp_ioctl_p = NULL; carp_attach_p = NULL; carp_detach_p = NULL; carp_get_vhid_p = NULL; carp_linkstate_p = NULL; carp_forus_p = NULL; carp_output_p = NULL; carp_demote_adj_p = NULL; carp_master_p = NULL; mtx_unlock(&carp_mtx); taskqueue_drain(taskqueue_swi, &carp_sendall_task); mtx_destroy(&carp_mtx); sx_destroy(&carp_sx); } static void ipcarp_sysinit(void) { /* Load allow as tunable so to postpone carp start after module load */ TUNABLE_INT_FETCH("net.inet.carp.allow", &V_carp_allow); } VNET_SYSINIT(ip_carp, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, ipcarp_sysinit, NULL); static int carp_mod_load(void) { int err; mtx_init(&carp_mtx, "carp_mtx", NULL, MTX_DEF); sx_init(&carp_sx, "carp_sx"); LIST_INIT(&carp_list); carp_get_vhid_p = carp_get_vhid; carp_forus_p = carp_forus; carp_output_p = carp_output; carp_linkstate_p = carp_linkstate; carp_ioctl_p = carp_ioctl; carp_attach_p = carp_attach; carp_detach_p = carp_detach; carp_demote_adj_p = carp_demote_adj; carp_master_p = carp_master; #ifdef INET6 carp_iamatch6_p = carp_iamatch6; carp_macmatch6_p = carp_macmatch6; err = ip6proto_register(IPPROTO_CARP, carp6_input, NULL); if (err) { printf("carp: error %d registering with INET6\n", err); carp_mod_cleanup(); return (err); } #endif #ifdef INET carp_iamatch_p = carp_iamatch; err = ipproto_register(IPPROTO_CARP, carp_input, NULL); if (err) { printf("carp: error %d registering with INET\n", err); carp_mod_cleanup(); return (err); } #endif carp_nl_register(); return (0); } static int carp_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: return carp_mod_load(); /* NOTREACHED */ case MOD_UNLOAD: mtx_lock(&carp_mtx); if (LIST_EMPTY(&carp_list)) carp_mod_cleanup(); else { mtx_unlock(&carp_mtx); return (EBUSY); } break; default: return (EINVAL); } return (0); } static moduledata_t carp_mod = { "carp", carp_modevent, 0 }; DECLARE_MODULE(carp, carp_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); diff --git a/sys/netinet/ip_gre.c b/sys/netinet/ip_gre.c index c9356edb0608..01a6ef4cd670 100644 --- a/sys/netinet/ip_gre.c +++ b/sys/netinet/ip_gre.c @@ -1,582 +1,582 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1998 The NetBSD Foundation, Inc. * Copyright (c) 2014, 2018 Andrey V. Elsukov * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Heiko W.Rupp * * IPv6-over-GRE contributed by Gert Doering * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: ip_gre.c,v 1.29 2003/09/05 23:02:43 itojun Exp $ */ #include #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #endif #include #include #define GRE_TTL 30 VNET_DEFINE(int, ip_gre_ttl) = GRE_TTL; #define V_ip_gre_ttl VNET(ip_gre_ttl) SYSCTL_INT(_net_inet_ip, OID_AUTO, grettl, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip_gre_ttl), 0, "Default TTL value for encapsulated packets"); struct in_gre_socket { struct gre_socket base; in_addr_t addr; }; VNET_DEFINE_STATIC(struct gre_sockets *, ipv4_sockets) = NULL; VNET_DEFINE_STATIC(struct gre_list *, ipv4_hashtbl) = NULL; VNET_DEFINE_STATIC(struct gre_list *, ipv4_srchashtbl) = NULL; #define V_ipv4_sockets VNET(ipv4_sockets) #define V_ipv4_hashtbl VNET(ipv4_hashtbl) #define V_ipv4_srchashtbl VNET(ipv4_srchashtbl) #define GRE_HASH(src, dst) (V_ipv4_hashtbl[\ in_gre_hashval((src), (dst)) & (GRE_HASH_SIZE - 1)]) #define GRE_SRCHASH(src) (V_ipv4_srchashtbl[\ fnv_32_buf(&(src), sizeof(src), FNV1_32_INIT) & (GRE_HASH_SIZE - 1)]) #define GRE_SOCKHASH(src) (V_ipv4_sockets[\ fnv_32_buf(&(src), sizeof(src), FNV1_32_INIT) & (GRE_HASH_SIZE - 1)]) #define GRE_HASH_SC(sc) GRE_HASH((sc)->gre_oip.ip_src.s_addr,\ (sc)->gre_oip.ip_dst.s_addr) static uint32_t in_gre_hashval(in_addr_t src, in_addr_t dst) { uint32_t ret; ret = fnv_32_buf(&src, sizeof(src), FNV1_32_INIT); return (fnv_32_buf(&dst, sizeof(dst), ret)); } static struct gre_socket* in_gre_lookup_socket(in_addr_t addr) { struct gre_socket *gs; struct in_gre_socket *s; CK_LIST_FOREACH(gs, &GRE_SOCKHASH(addr), chain) { s = __containerof(gs, struct in_gre_socket, base); if (s->addr == addr) break; } return (gs); } static int in_gre_checkdup(const struct gre_softc *sc, in_addr_t src, in_addr_t dst, uint32_t opts) { struct gre_list *head; struct gre_softc *tmp; struct gre_socket *gs; if (sc->gre_family == AF_INET && sc->gre_oip.ip_src.s_addr == src && sc->gre_oip.ip_dst.s_addr == dst && (sc->gre_options & GRE_UDPENCAP) == (opts & GRE_UDPENCAP)) return (EEXIST); if (opts & GRE_UDPENCAP) { gs = in_gre_lookup_socket(src); if (gs == NULL) return (0); head = &gs->list; } else head = &GRE_HASH(src, dst); CK_LIST_FOREACH(tmp, head, chain) { if (tmp == sc) continue; if (tmp->gre_oip.ip_src.s_addr == src && tmp->gre_oip.ip_dst.s_addr == dst) return (EADDRNOTAVAIL); } return (0); } static int in_gre_lookup(const struct mbuf *m, int off, int proto, void **arg) { const struct ip *ip; struct gre_softc *sc; if (V_ipv4_hashtbl == NULL) return (0); NET_EPOCH_ASSERT(); ip = mtod(m, const struct ip *); CK_LIST_FOREACH(sc, &GRE_HASH(ip->ip_dst.s_addr, ip->ip_src.s_addr), chain) { /* * This is an inbound packet, its ip_dst is source address * in softc. */ if (sc->gre_oip.ip_src.s_addr == ip->ip_dst.s_addr && sc->gre_oip.ip_dst.s_addr == ip->ip_src.s_addr) { if ((GRE2IFP(sc)->if_flags & IFF_UP) == 0) return (0); *arg = sc; return (ENCAP_DRV_LOOKUP); } } return (0); } /* * Check that ingress address belongs to local host. */ static void in_gre_set_running(struct gre_softc *sc) { if (in_localip(sc->gre_oip.ip_src)) GRE2IFP(sc)->if_drv_flags |= IFF_DRV_RUNNING; else GRE2IFP(sc)->if_drv_flags &= ~IFF_DRV_RUNNING; } /* * ifaddr_event handler. * Clear IFF_DRV_RUNNING flag when ingress address disappears to prevent * source address spoofing. */ static void in_gre_srcaddr(void *arg __unused, const struct sockaddr *sa, int event __unused) { const struct sockaddr_in *sin; struct gre_softc *sc; /* Check that VNET is ready */ if (V_ipv4_hashtbl == NULL) return; NET_EPOCH_ASSERT(); sin = (const struct sockaddr_in *)sa; CK_LIST_FOREACH(sc, &GRE_SRCHASH(sin->sin_addr.s_addr), srchash) { if (sc->gre_oip.ip_src.s_addr != sin->sin_addr.s_addr) continue; in_gre_set_running(sc); } } static bool in_gre_udp_input(struct mbuf *m, int off, struct inpcb *inp, const struct sockaddr *sa, void *ctx) { struct gre_socket *gs; struct gre_softc *sc; in_addr_t dst; NET_EPOCH_ASSERT(); gs = (struct gre_socket *)ctx; dst = ((const struct sockaddr_in *)sa)->sin_addr.s_addr; CK_LIST_FOREACH(sc, &gs->list, chain) { if (sc->gre_oip.ip_dst.s_addr == dst) break; } if (sc != NULL && (GRE2IFP(sc)->if_flags & IFF_UP) != 0){ gre_input(m, off + sizeof(struct udphdr), IPPROTO_UDP, sc); return (true); } m_freem(m); return (true); } static int in_gre_setup_socket(struct gre_softc *sc) { struct sockopt sopt; struct sockaddr_in sin; struct in_gre_socket *s; struct gre_socket *gs; in_addr_t addr; int error, value; /* * NOTE: we are protected with gre_ioctl_sx lock. * * First check that socket is already configured. * If so, check that source address was not changed. * If address is different, check that there are no other tunnels * and close socket. */ addr = sc->gre_oip.ip_src.s_addr; gs = sc->gre_so; if (gs != NULL) { s = __containerof(gs, struct in_gre_socket, base); if (s->addr != addr) { if (CK_LIST_EMPTY(&gs->list)) { CK_LIST_REMOVE(gs, chain); soclose(gs->so); NET_EPOCH_CALL(gre_sofree, &gs->epoch_ctx); } gs = sc->gre_so = NULL; } } if (gs == NULL) { /* * Check that socket for given address is already * configured. */ gs = in_gre_lookup_socket(addr); if (gs == NULL) { s = malloc(sizeof(*s), M_GRE, M_WAITOK | M_ZERO); s->addr = addr; gs = &s->base; error = socreate(sc->gre_family, &gs->so, SOCK_DGRAM, IPPROTO_UDP, curthread->td_ucred, curthread); if (error != 0) { if_printf(GRE2IFP(sc), "cannot create socket: %d\n", error); free(s, M_GRE); return (error); } error = udp_set_kernel_tunneling(gs->so, in_gre_udp_input, NULL, gs); if (error != 0) { if_printf(GRE2IFP(sc), "cannot set UDP tunneling: %d\n", error); goto fail; } memset(&sopt, 0, sizeof(sopt)); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_IP; sopt.sopt_name = IP_BINDANY; sopt.sopt_val = &value; sopt.sopt_valsize = sizeof(value); value = 1; error = sosetopt(gs->so, &sopt); if (error != 0) { if_printf(GRE2IFP(sc), "cannot set IP_BINDANY opt: %d\n", error); goto fail; } memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_len = sizeof(sin); sin.sin_addr.s_addr = addr; sin.sin_port = htons(GRE_UDPPORT); error = sobind(gs->so, (struct sockaddr *)&sin, curthread); if (error != 0) { if_printf(GRE2IFP(sc), "cannot bind socket: %d\n", error); goto fail; } /* Add socket to the chain */ CK_LIST_INSERT_HEAD(&GRE_SOCKHASH(addr), gs, chain); } } /* Add softc to the socket's list */ CK_LIST_INSERT_HEAD(&gs->list, sc, chain); sc->gre_so = gs; return (0); fail: soclose(gs->so); free(s, M_GRE); return (error); } static int in_gre_attach(struct gre_softc *sc) { struct epoch_tracker et; struct grehdr *gh; int error; if (sc->gre_options & GRE_UDPENCAP) { sc->gre_csumflags = CSUM_UDP; sc->gre_hlen = sizeof(struct greudp); sc->gre_oip.ip_p = IPPROTO_UDP; gh = &sc->gre_udphdr->gi_gre; gre_update_udphdr(sc, &sc->gre_udp, in_pseudo(sc->gre_oip.ip_src.s_addr, sc->gre_oip.ip_dst.s_addr, 0)); } else { sc->gre_hlen = sizeof(struct greip); sc->gre_oip.ip_p = IPPROTO_GRE; gh = &sc->gre_iphdr->gi_gre; } sc->gre_oip.ip_v = IPVERSION; sc->gre_oip.ip_hl = sizeof(struct ip) >> 2; gre_update_hdr(sc, gh); /* * If we return error, this means that sc is not linked, * and caller should reset gre_family and free(sc->gre_hdr). */ if (sc->gre_options & GRE_UDPENCAP) { error = in_gre_setup_socket(sc); if (error != 0) return (error); } else CK_LIST_INSERT_HEAD(&GRE_HASH_SC(sc), sc, chain); CK_LIST_INSERT_HEAD(&GRE_SRCHASH(sc->gre_oip.ip_src.s_addr), sc, srchash); /* Set IFF_DRV_RUNNING if interface is ready */ NET_EPOCH_ENTER(et); in_gre_set_running(sc); NET_EPOCH_EXIT(et); return (0); } int in_gre_setopts(struct gre_softc *sc, u_long cmd, uint32_t value) { int error; /* NOTE: we are protected with gre_ioctl_sx lock */ MPASS(cmd == GRESKEY || cmd == GRESOPTS || cmd == GRESPORT); MPASS(sc->gre_family == AF_INET); /* * If we are going to change encapsulation protocol, do check * for duplicate tunnels. Return EEXIST here to do not confuse * user. */ if (cmd == GRESOPTS && (sc->gre_options & GRE_UDPENCAP) != (value & GRE_UDPENCAP) && in_gre_checkdup(sc, sc->gre_oip.ip_src.s_addr, sc->gre_oip.ip_dst.s_addr, value) == EADDRNOTAVAIL) return (EEXIST); CK_LIST_REMOVE(sc, chain); CK_LIST_REMOVE(sc, srchash); GRE_WAIT(); switch (cmd) { case GRESKEY: sc->gre_key = value; break; case GRESOPTS: sc->gre_options = value; break; case GRESPORT: sc->gre_port = value; break; } error = in_gre_attach(sc); if (error != 0) { sc->gre_family = 0; free(sc->gre_hdr, M_GRE); } return (error); } int in_gre_ioctl(struct gre_softc *sc, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct sockaddr_in *dst, *src; struct ip *ip; int error; /* NOTE: we are protected with gre_ioctl_sx lock */ error = EINVAL; switch (cmd) { case SIOCSIFPHYADDR: src = &((struct in_aliasreq *)data)->ifra_addr; dst = &((struct in_aliasreq *)data)->ifra_dstaddr; /* sanity checks */ if (src->sin_family != dst->sin_family || src->sin_family != AF_INET || src->sin_len != dst->sin_len || src->sin_len != sizeof(*src)) break; if (src->sin_addr.s_addr == INADDR_ANY || dst->sin_addr.s_addr == INADDR_ANY) { error = EADDRNOTAVAIL; break; } if (V_ipv4_hashtbl == NULL) { V_ipv4_hashtbl = gre_hashinit(); V_ipv4_srchashtbl = gre_hashinit(); V_ipv4_sockets = (struct gre_sockets *)gre_hashinit(); } error = in_gre_checkdup(sc, src->sin_addr.s_addr, dst->sin_addr.s_addr, sc->gre_options); if (error == EADDRNOTAVAIL) break; if (error == EEXIST) { /* Addresses are the same. Just return. */ error = 0; break; } ip = malloc(sizeof(struct greudp) + 3 * sizeof(uint32_t), M_GRE, M_WAITOK | M_ZERO); ip->ip_src.s_addr = src->sin_addr.s_addr; ip->ip_dst.s_addr = dst->sin_addr.s_addr; if (sc->gre_family != 0) { /* Detach existing tunnel first */ CK_LIST_REMOVE(sc, chain); CK_LIST_REMOVE(sc, srchash); GRE_WAIT(); free(sc->gre_hdr, M_GRE); /* XXX: should we notify about link state change? */ } sc->gre_family = AF_INET; sc->gre_hdr = ip; sc->gre_oseq = 0; sc->gre_iseq = UINT32_MAX; error = in_gre_attach(sc); if (error != 0) { sc->gre_family = 0; free(sc->gre_hdr, M_GRE); } break; case SIOCGIFPSRCADDR: case SIOCGIFPDSTADDR: if (sc->gre_family != AF_INET) { error = EADDRNOTAVAIL; break; } src = (struct sockaddr_in *)&ifr->ifr_addr; memset(src, 0, sizeof(*src)); src->sin_family = AF_INET; src->sin_len = sizeof(*src); src->sin_addr = (cmd == SIOCGIFPSRCADDR) ? sc->gre_oip.ip_src: sc->gre_oip.ip_dst; error = prison_if(curthread->td_ucred, (struct sockaddr *)src); if (error != 0) memset(src, 0, sizeof(*src)); break; } return (error); } int in_gre_output(struct mbuf *m, int af, int hlen) { struct greip *gi; gi = mtod(m, struct greip *); switch (af) { case AF_INET: /* * gre_transmit() has used M_PREPEND() that doesn't guarantee * m_data is contiguous more than hlen bytes. Use m_copydata() * here to avoid m_pullup(). */ m_copydata(m, hlen + offsetof(struct ip, ip_tos), sizeof(u_char), &gi->gi_ip.ip_tos); m_copydata(m, hlen + offsetof(struct ip, ip_id), sizeof(u_short), (caddr_t)&gi->gi_ip.ip_id); break; #ifdef INET6 case AF_INET6: gi->gi_ip.ip_tos = 0; /* XXX */ - ip_fillid(&gi->gi_ip); + ip_fillid(&gi->gi_ip, V_ip_random_id); break; #endif } gi->gi_ip.ip_ttl = V_ip_gre_ttl; gi->gi_ip.ip_len = htons(m->m_pkthdr.len); return (ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL)); } static const struct srcaddrtab *ipv4_srcaddrtab = NULL; static const struct encaptab *ecookie = NULL; static const struct encap_config ipv4_encap_cfg = { .proto = IPPROTO_GRE, .min_length = sizeof(struct greip) + sizeof(struct ip), .exact_match = ENCAP_DRV_LOOKUP, .lookup = in_gre_lookup, .input = gre_input }; void in_gre_init(void) { if (!IS_DEFAULT_VNET(curvnet)) return; ipv4_srcaddrtab = ip_encap_register_srcaddr(in_gre_srcaddr, NULL, M_WAITOK); ecookie = ip_encap_attach(&ipv4_encap_cfg, NULL, M_WAITOK); } void in_gre_uninit(void) { if (IS_DEFAULT_VNET(curvnet)) { ip_encap_detach(ecookie); ip_encap_unregister_srcaddr(ipv4_srcaddrtab); } if (V_ipv4_hashtbl != NULL) { gre_hashdestroy(V_ipv4_hashtbl); V_ipv4_hashtbl = NULL; GRE_WAIT(); gre_hashdestroy(V_ipv4_srchashtbl); gre_hashdestroy((struct gre_list *)V_ipv4_sockets); } } diff --git a/sys/netinet/ip_id.c b/sys/netinet/ip_id.c index 12dd6c8bf972..738b7eceb448 100644 --- a/sys/netinet/ip_id.c +++ b/sys/netinet/ip_id.c @@ -1,298 +1,298 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2008 Michael J. Silbersack. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include /* * IP ID generation is a fascinating topic. * * In order to avoid ID collisions during packet reassembly, common sense * dictates that the period between reuse of IDs be as large as possible. * This leads to the classic implementation of a system-wide counter, thereby * ensuring that IDs repeat only once every 2^16 packets. * * Subsequent security researchers have pointed out that using a global * counter makes ID values predictable. This predictability allows traffic * analysis, idle scanning, and even packet injection in specific cases. * These results suggest that IP IDs should be as random as possible. * * The "searchable queues" algorithm used in this IP ID implementation was * proposed by Amit Klein. It is a compromise between the above two * viewpoints that has provable behavior that can be tuned to the user's * requirements. * * The basic concept is that we supplement a standard random number generator * with a queue of the last L IDs that we have handed out to ensure that all * IDs have a period of at least L. * * To efficiently implement this idea, we keep two data structures: a * circular array of IDs of size L and a bitstring of 65536 bits. * * To start, we ask the RNG for a new ID. A quick index into the bitstring * is used to determine if this is a recently used value. The process is * repeated until a value is returned that is not in the bitstring. * * Having found a usable ID, we remove the ID stored at the current position * in the queue from the bitstring and replace it with our new ID. Our new * ID is then added to the bitstring and the queue pointer is incremented. * * The lower limit of 512 was chosen because there doesn't seem to be much * point to having a smaller value. The upper limit of 32768 was chosen for * two reasons. First, every step above 32768 decreases the entropy. Taken * to an extreme, 65533 would offer 1 bit of entropy. Second, the number of * attempts it takes the algorithm to find an unused ID drastically * increases, killing performance. The default value of 8192 was chosen * because it provides a good tradeoff between randomness and non-repetition. * * With L=8192, the queue will use 16K of memory. The bitstring always * uses 8K of memory. No memory is allocated until the use of random ids is * enabled. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * By default we generate IP ID only for non-atomic datagrams, as * suggested by RFC6864. We use per-CPU counter for that, or if * user wants to, we can turn on random ID generation. */ VNET_DEFINE_STATIC(int, ip_rfc6864) = 1; -VNET_DEFINE_STATIC(int, ip_do_randomid) = 0; #define V_ip_rfc6864 VNET(ip_rfc6864) -#define V_ip_do_randomid VNET(ip_do_randomid) + +VNET_DEFINE(int, ip_random_id) = 0; /* * Random ID state engine. */ static MALLOC_DEFINE(M_IPID, "ipid", "randomized ip id state"); VNET_DEFINE_STATIC(uint16_t *, id_array); VNET_DEFINE_STATIC(bitstr_t *, id_bits); VNET_DEFINE_STATIC(int, array_ptr); VNET_DEFINE_STATIC(int, array_size); VNET_DEFINE_STATIC(int, random_id_collisions); VNET_DEFINE_STATIC(int, random_id_total); VNET_DEFINE_STATIC(struct mtx, ip_id_mtx); #define V_id_array VNET(id_array) #define V_id_bits VNET(id_bits) #define V_array_ptr VNET(array_ptr) #define V_array_size VNET(array_size) #define V_random_id_collisions VNET(random_id_collisions) #define V_random_id_total VNET(random_id_total) #define V_ip_id_mtx VNET(ip_id_mtx) /* * Non-random ID state engine is simply a per-cpu counter. */ VNET_DEFINE_STATIC(counter_u64_t, ip_id); #define V_ip_id VNET(ip_id) -static int sysctl_ip_randomid(SYSCTL_HANDLER_ARGS); +static int sysctl_ip_random_id(SYSCTL_HANDLER_ARGS); static int sysctl_ip_id_change(SYSCTL_HANDLER_ARGS); static void ip_initid(int); static uint16_t ip_randomid(void); static void ipid_sysinit(void); static void ipid_sysuninit(void); SYSCTL_DECL(_net_inet_ip); SYSCTL_PROC(_net_inet_ip, OID_AUTO, random_id, CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_MPSAFE, - &VNET_NAME(ip_do_randomid), 0, sysctl_ip_randomid, "IU", + &VNET_NAME(ip_random_id), 0, sysctl_ip_random_id, "IU", "Assign random ip_id values"); SYSCTL_INT(_net_inet_ip, OID_AUTO, rfc6864, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip_rfc6864), 0, "Use constant IP ID for atomic datagrams"); SYSCTL_PROC(_net_inet_ip, OID_AUTO, random_id_period, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_MPSAFE, &VNET_NAME(array_size), 0, sysctl_ip_id_change, "IU", "IP ID Array size"); SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id_collisions, CTLFLAG_RD | CTLFLAG_VNET, &VNET_NAME(random_id_collisions), 0, "Count of IP ID collisions"); SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id_total, CTLFLAG_RD | CTLFLAG_VNET, &VNET_NAME(random_id_total), 0, "Count of IP IDs created"); static int -sysctl_ip_randomid(SYSCTL_HANDLER_ARGS) +sysctl_ip_random_id(SYSCTL_HANDLER_ARGS) { int error, new; - new = V_ip_do_randomid; + new = V_ip_random_id; error = sysctl_handle_int(oidp, &new, 0, req); if (error || req->newptr == NULL) return (error); if (new != 0 && new != 1) return (EINVAL); - if (new == V_ip_do_randomid) + if (new == V_ip_random_id) return (0); - if (new == 1 && V_ip_do_randomid == 0) + if (new == 1 && V_ip_random_id == 0) ip_initid(8192); /* We don't free memory when turning random ID off, due to race. */ - V_ip_do_randomid = new; + V_ip_random_id = new; return (0); } static int sysctl_ip_id_change(SYSCTL_HANDLER_ARGS) { int error, new; new = V_array_size; error = sysctl_handle_int(oidp, &new, 0, req); if (error == 0 && req->newptr) { if (new >= 512 && new <= 32768) ip_initid(new); else error = EINVAL; } return (error); } static void ip_initid(int new_size) { uint16_t *new_array; bitstr_t *new_bits; new_array = malloc(new_size * sizeof(uint16_t), M_IPID, M_WAITOK | M_ZERO); new_bits = malloc(bitstr_size(65536), M_IPID, M_WAITOK | M_ZERO); mtx_lock(&V_ip_id_mtx); if (V_id_array != NULL) { free(V_id_array, M_IPID); free(V_id_bits, M_IPID); } V_id_array = new_array; V_id_bits = new_bits; V_array_size = new_size; V_array_ptr = 0; V_random_id_collisions = 0; V_random_id_total = 0; mtx_unlock(&V_ip_id_mtx); } static uint16_t ip_randomid(void) { uint16_t new_id; mtx_lock(&V_ip_id_mtx); /* * To avoid a conflict with the zeros that the array is initially * filled with, we never hand out an id of zero. */ new_id = 0; do { if (new_id != 0) V_random_id_collisions++; arc4rand(&new_id, sizeof(new_id), 0); } while (bit_test(V_id_bits, new_id) || new_id == 0); bit_clear(V_id_bits, V_id_array[V_array_ptr]); bit_set(V_id_bits, new_id); V_id_array[V_array_ptr] = new_id; V_array_ptr++; if (V_array_ptr == V_array_size) V_array_ptr = 0; V_random_id_total++; mtx_unlock(&V_ip_id_mtx); return (new_id); } void -ip_fillid(struct ip *ip) +ip_fillid(struct ip *ip, bool do_randomid) { /* * Per RFC6864 Section 4 * * o Atomic datagrams: (DF==1) && (MF==0) && (frag_offset==0) * o Non-atomic datagrams: (DF==0) || (MF==1) || (frag_offset>0) */ if (V_ip_rfc6864 && (ip->ip_off & htons(IP_DF)) == htons(IP_DF)) ip->ip_id = 0; - else if (V_ip_do_randomid) + else if (do_randomid) ip->ip_id = ip_randomid(); else { counter_u64_add(V_ip_id, 1); /* * There are two issues about this trick, to be kept in mind. * 1) We can migrate between counter_u64_add() and next * line, and grab counter from other CPU, resulting in too * quick ID reuse. This is tolerable in our particular case, * since probability of such event is much lower then reuse * of ID due to legitimate overflow, that at modern Internet * speeds happens all the time. * 2) We are relying on the fact that counter(9) is based on * UMA_ZONE_PCPU uma(9) zone. We also take only last * sixteen bits of a counter, so we don't care about the * fact that machines with 32-bit word update their counters * not atomically. */ ip->ip_id = htons((*(uint64_t *)zpcpu_get(V_ip_id)) & 0xffff); } } static void ipid_sysinit(void) { int i; mtx_init(&V_ip_id_mtx, "ip_id_mtx", NULL, MTX_DEF); V_ip_id = counter_u64_alloc(M_WAITOK); CPU_FOREACH(i) arc4rand(zpcpu_get_cpu(V_ip_id, i), sizeof(uint64_t), 0); } VNET_SYSINIT(ip_id, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, ipid_sysinit, NULL); static void ipid_sysuninit(void) { if (V_id_array != NULL) { free(V_id_array, M_IPID); free(V_id_bits, M_IPID); } counter_u64_free(V_ip_id); mtx_destroy(&V_ip_id_mtx); } VNET_SYSUNINIT(ip_id, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ipid_sysuninit, NULL); diff --git a/sys/netinet/ip_mroute.c b/sys/netinet/ip_mroute.c index e224d2e6daf5..d30bd42ec578 100644 --- a/sys/netinet/ip_mroute.c +++ b/sys/netinet/ip_mroute.c @@ -1,2920 +1,2920 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989 Stephen Deering * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Stephen Deering of Stanford University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * IP multicast forwarding procedures * * Written by David Waitzman, BBN Labs, August 1988. * Modified by Steve Deering, Stanford, February 1989. * Modified by Mark J. Steiglitz, Stanford, May, 1991 * Modified by Van Jacobson, LBL, January 1993 * Modified by Ajit Thyagarajan, PARC, August 1993 * Modified by Bill Fenner, PARC, April 1995 * Modified by Ahmed Helmy, SGI, June 1996 * Modified by George Edmond Eddy (Rusty), ISI, February 1998 * Modified by Pavlin Radoslavov, USC/ISI, May 1998, August 1999, October 2000 * Modified by Hitoshi Asaeda, WIDE, August 2000 * Modified by Pavlin Radoslavov, ICSI, October 2002 * Modified by Wojciech Macek, Semihalf, May 2021 * * MROUTING Revision: 3.5 * and PIM-SMv2 and PIM-DM support, advanced API support, * bandwidth metering and signaling */ /* * TODO: Prefix functions with ipmf_. * TODO: Maintain a refcount on if_allmulti() in ifnet or in the protocol * domain attachment (if_afdata) so we can track consumers of that service. * TODO: Deprecate routing socket path for SIOCGETSGCNT and SIOCGETVIFCNT, * move it to socket options. * TODO: Cleanup LSRR removal further. * TODO: Push RSVP stubs into raw_ip.c. * TODO: Use bitstring.h for vif set. * TODO: Fix mrt6_ioctl dangling ref when dynamically loaded. * TODO: Sync ip6_mroute.c with this file. */ #include #include "opt_inet.h" #include "opt_mrouting.h" #define _PIM_VT 1 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef KTR_IPMF #define KTR_IPMF KTR_INET #endif #define VIFI_INVALID ((vifi_t) -1) static MALLOC_DEFINE(M_MRTABLE, "mroutetbl", "multicast forwarding cache"); /* * Locking. We use two locks: one for the virtual interface table and * one for the forwarding table. These locks may be nested in which case * the VIF lock must always be taken first. Note that each lock is used * to cover not only the specific data structure but also related data * structures. */ static struct sx __exclusive_cache_line mrouter_teardown; #define MRW_TEARDOWN_WLOCK() sx_xlock(&mrouter_teardown) #define MRW_TEARDOWN_WUNLOCK() sx_xunlock(&mrouter_teardown) #define MRW_TEARDOWN_LOCK_INIT() \ sx_init(&mrouter_teardown, "IPv4 multicast forwarding teardown") #define MRW_TEARDOWN_LOCK_DESTROY() sx_destroy(&mrouter_teardown) static struct rwlock mrouter_lock; #define MRW_RLOCK() rw_rlock(&mrouter_lock) #define MRW_WLOCK() rw_wlock(&mrouter_lock) #define MRW_RUNLOCK() rw_runlock(&mrouter_lock) #define MRW_WUNLOCK() rw_wunlock(&mrouter_lock) #define MRW_UNLOCK() rw_unlock(&mrouter_lock) #define MRW_LOCK_ASSERT() rw_assert(&mrouter_lock, RA_LOCKED) #define MRW_WLOCK_ASSERT() rw_assert(&mrouter_lock, RA_WLOCKED) #define MRW_LOCK_TRY_UPGRADE() rw_try_upgrade(&mrouter_lock) #define MRW_WOWNED() rw_wowned(&mrouter_lock) #define MRW_LOCK_INIT() \ rw_init(&mrouter_lock, "IPv4 multicast forwarding") #define MRW_LOCK_DESTROY() rw_destroy(&mrouter_lock) static int ip_mrouter_cnt; /* # of vnets with active mrouters */ static int ip_mrouter_unloading; /* Allow no more V_ip_mrouter sockets */ VNET_PCPUSTAT_DEFINE_STATIC(struct mrtstat, mrtstat); VNET_PCPUSTAT_SYSINIT(mrtstat); VNET_PCPUSTAT_SYSUNINIT(mrtstat); SYSCTL_VNET_PCPUSTAT(_net_inet_ip, OID_AUTO, mrtstat, struct mrtstat, mrtstat, "IPv4 Multicast Forwarding Statistics (struct mrtstat, " "netinet/ip_mroute.h)"); VNET_DEFINE_STATIC(u_long, mfchash); #define V_mfchash VNET(mfchash) #define MFCHASH(a, g) \ ((((a).s_addr >> 20) ^ ((a).s_addr >> 10) ^ (a).s_addr ^ \ ((g).s_addr >> 20) ^ ((g).s_addr >> 10) ^ (g).s_addr) & V_mfchash) #define MFCHASHSIZE 256 static u_long mfchashsize = MFCHASHSIZE; /* Hash size */ SYSCTL_ULONG(_net_inet_ip, OID_AUTO, mfchashsize, CTLFLAG_RDTUN, &mfchashsize, 0, "IPv4 Multicast Forwarding Table hash size"); VNET_DEFINE_STATIC(u_char *, nexpire); /* 0..mfchashsize-1 */ #define V_nexpire VNET(nexpire) VNET_DEFINE_STATIC(LIST_HEAD(mfchashhdr, mfc)*, mfchashtbl); #define V_mfchashtbl VNET(mfchashtbl) VNET_DEFINE_STATIC(struct taskqueue *, task_queue); #define V_task_queue VNET(task_queue) VNET_DEFINE_STATIC(struct task, task); #define V_task VNET(task) VNET_DEFINE_STATIC(vifi_t, numvifs); #define V_numvifs VNET(numvifs) VNET_DEFINE_STATIC(struct vif *, viftable); #define V_viftable VNET(viftable) static eventhandler_tag if_detach_event_tag = NULL; VNET_DEFINE_STATIC(struct callout, expire_upcalls_ch); #define V_expire_upcalls_ch VNET(expire_upcalls_ch) VNET_DEFINE_STATIC(struct mtx, buf_ring_mtx); #define V_buf_ring_mtx VNET(buf_ring_mtx) #define EXPIRE_TIMEOUT (hz / 4) /* 4x / second */ #define UPCALL_EXPIRE 6 /* number of timeouts */ /* * Bandwidth meter variables and constants */ static MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters"); /* * Pending upcalls are stored in a ring which is flushed when * full, or periodically */ VNET_DEFINE_STATIC(struct callout, bw_upcalls_ch); #define V_bw_upcalls_ch VNET(bw_upcalls_ch) VNET_DEFINE_STATIC(struct buf_ring *, bw_upcalls_ring); #define V_bw_upcalls_ring VNET(bw_upcalls_ring) VNET_DEFINE_STATIC(struct mtx, bw_upcalls_ring_mtx); #define V_bw_upcalls_ring_mtx VNET(bw_upcalls_ring_mtx) #define BW_UPCALLS_PERIOD (hz) /* periodical flush of bw upcalls */ VNET_PCPUSTAT_DEFINE_STATIC(struct pimstat, pimstat); VNET_PCPUSTAT_SYSINIT(pimstat); VNET_PCPUSTAT_SYSUNINIT(pimstat); SYSCTL_NODE(_net_inet, IPPROTO_PIM, pim, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "PIM"); SYSCTL_VNET_PCPUSTAT(_net_inet_pim, PIMCTL_STATS, stats, struct pimstat, pimstat, "PIM Statistics (struct pimstat, netinet/pim_var.h)"); static u_long pim_squelch_wholepkt = 0; SYSCTL_ULONG(_net_inet_pim, OID_AUTO, squelch_wholepkt, CTLFLAG_RWTUN, &pim_squelch_wholepkt, 0, "Disable IGMP_WHOLEPKT notifications if rendezvous point is unspecified"); static const struct encaptab *pim_encap_cookie; static int pim_encapcheck(const struct mbuf *, int, int, void *); static int pim_input(struct mbuf *, int, int, void *); extern int in_mcast_loop; static const struct encap_config ipv4_encap_cfg = { .proto = IPPROTO_PIM, .min_length = sizeof(struct ip) + PIM_MINLEN, .exact_match = 8, .check = pim_encapcheck, .input = pim_input }; /* * Note: the PIM Register encapsulation adds the following in front of a * data packet: * * struct pim_encap_hdr { * struct ip ip; * struct pim_encap_pimhdr pim; * } * */ struct pim_encap_pimhdr { struct pim pim; uint32_t flags; }; #define PIM_ENCAP_TTL 64 static struct ip pim_encap_iphdr = { #if BYTE_ORDER == LITTLE_ENDIAN sizeof(struct ip) >> 2, IPVERSION, #else IPVERSION, sizeof(struct ip) >> 2, #endif 0, /* tos */ sizeof(struct ip), /* total length */ 0, /* id */ 0, /* frag offset */ PIM_ENCAP_TTL, IPPROTO_PIM, 0, /* checksum */ }; static struct pim_encap_pimhdr pim_encap_pimhdr = { { PIM_MAKE_VT(PIM_VERSION, PIM_REGISTER), /* PIM vers and message type */ 0, /* reserved */ 0, /* checksum */ }, 0 /* flags */ }; VNET_DEFINE_STATIC(vifi_t, reg_vif_num) = VIFI_INVALID; #define V_reg_vif_num VNET(reg_vif_num) VNET_DEFINE_STATIC(struct ifnet *, multicast_register_if); #define V_multicast_register_if VNET(multicast_register_if) /* * Private variables. */ static u_long X_ip_mcast_src(int); static int X_ip_mforward(struct ip *, struct ifnet *, struct mbuf *, struct ip_moptions *); static int X_ip_mrouter_done(void); static int X_ip_mrouter_get(struct socket *, struct sockopt *); static int X_ip_mrouter_set(struct socket *, struct sockopt *); static int X_legal_vif_num(int); static int X_mrt_ioctl(u_long, caddr_t, int); static int add_bw_upcall(struct bw_upcall *); static int add_mfc(struct mfcctl2 *); static int add_vif(struct vifctl *); static void bw_meter_prepare_upcall(struct bw_meter *, struct timeval *); static void bw_meter_geq_receive_packet(struct bw_meter *, int, struct timeval *); static void bw_upcalls_send(void); static int del_bw_upcall(struct bw_upcall *); static int del_mfc(struct mfcctl2 *); static int del_vif(vifi_t); static int del_vif_locked(vifi_t, struct ifnet **, struct ifnet **); static void expire_bw_upcalls_send(void *); static void expire_mfc(struct mfc *); static void expire_upcalls(void *); static void free_bw_list(struct bw_meter *); static int get_sg_cnt(struct sioc_sg_req *); static int get_vif_cnt(struct sioc_vif_req *); static void if_detached_event(void *, struct ifnet *); static int ip_mdq(struct mbuf *, struct ifnet *, struct mfc *, vifi_t); static int ip_mrouter_init(struct socket *, int); static __inline struct mfc * mfc_find(struct in_addr *, struct in_addr *); static void phyint_send(struct ip *, struct vif *, struct mbuf *); static struct mbuf * pim_register_prepare(struct ip *, struct mbuf *); static int pim_register_send(struct ip *, struct vif *, struct mbuf *, struct mfc *); static int pim_register_send_rp(struct ip *, struct vif *, struct mbuf *, struct mfc *); static int pim_register_send_upcall(struct ip *, struct vif *, struct mbuf *, struct mfc *); static void send_packet(struct vif *, struct mbuf *); static int set_api_config(uint32_t *); static int set_assert(int); static int socket_send(struct socket *, struct mbuf *, struct sockaddr_in *); /* * Kernel multicast forwarding API capabilities and setup. * If more API capabilities are added to the kernel, they should be * recorded in `mrt_api_support'. */ #define MRT_API_VERSION 0x0305 static const int mrt_api_version = MRT_API_VERSION; static const uint32_t mrt_api_support = (MRT_MFC_FLAGS_DISABLE_WRONGVIF | MRT_MFC_FLAGS_BORDER_VIF | MRT_MFC_RP | MRT_MFC_BW_UPCALL); VNET_DEFINE_STATIC(uint32_t, mrt_api_config); #define V_mrt_api_config VNET(mrt_api_config) VNET_DEFINE_STATIC(int, pim_assert_enabled); #define V_pim_assert_enabled VNET(pim_assert_enabled) static struct timeval pim_assert_interval = { 3, 0 }; /* Rate limit */ /* * Find a route for a given origin IP address and multicast group address. * Statistics must be updated by the caller. */ static __inline struct mfc * mfc_find(struct in_addr *o, struct in_addr *g) { struct mfc *rt; /* * Might be called both RLOCK and WLOCK. * Check if any, it's caller responsibility * to choose correct option. */ MRW_LOCK_ASSERT(); LIST_FOREACH(rt, &V_mfchashtbl[MFCHASH(*o, *g)], mfc_hash) { if (in_hosteq(rt->mfc_origin, *o) && in_hosteq(rt->mfc_mcastgrp, *g) && buf_ring_empty(rt->mfc_stall_ring)) break; } return (rt); } static __inline struct mfc * mfc_alloc(void) { struct mfc *rt; rt = malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT | M_ZERO); if (rt == NULL) return rt; rt->mfc_stall_ring = buf_ring_alloc(MAX_UPQ, M_MRTABLE, M_NOWAIT, &V_buf_ring_mtx); if (rt->mfc_stall_ring == NULL) { free(rt, M_MRTABLE); return NULL; } return rt; } /* * Handle MRT setsockopt commands to modify the multicast forwarding tables. */ static int X_ip_mrouter_set(struct socket *so, struct sockopt *sopt) { int error, optval; vifi_t vifi; struct vifctl vifc; struct mfcctl2 mfc; struct bw_upcall bw_upcall; uint32_t i; if (so != V_ip_mrouter && sopt->sopt_name != MRT_INIT) return EPERM; error = 0; switch (sopt->sopt_name) { case MRT_INIT: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) break; error = ip_mrouter_init(so, optval); break; case MRT_DONE: error = ip_mrouter_done(); break; case MRT_ADD_VIF: error = sooptcopyin(sopt, &vifc, sizeof vifc, sizeof vifc); if (error) break; error = add_vif(&vifc); break; case MRT_DEL_VIF: error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi); if (error) break; error = del_vif(vifi); break; case MRT_ADD_MFC: case MRT_DEL_MFC: /* * select data size depending on API version. */ if (sopt->sopt_name == MRT_ADD_MFC && V_mrt_api_config & MRT_API_FLAGS_ALL) { error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl2), sizeof(struct mfcctl2)); } else { error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl), sizeof(struct mfcctl)); bzero((caddr_t)&mfc + sizeof(struct mfcctl), sizeof(mfc) - sizeof(struct mfcctl)); } if (error) break; if (sopt->sopt_name == MRT_ADD_MFC) error = add_mfc(&mfc); else error = del_mfc(&mfc); break; case MRT_ASSERT: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) break; set_assert(optval); break; case MRT_API_CONFIG: error = sooptcopyin(sopt, &i, sizeof i, sizeof i); if (!error) error = set_api_config(&i); if (!error) error = sooptcopyout(sopt, &i, sizeof i); break; case MRT_ADD_BW_UPCALL: case MRT_DEL_BW_UPCALL: error = sooptcopyin(sopt, &bw_upcall, sizeof bw_upcall, sizeof bw_upcall); if (error) break; if (sopt->sopt_name == MRT_ADD_BW_UPCALL) error = add_bw_upcall(&bw_upcall); else error = del_bw_upcall(&bw_upcall); break; default: error = EOPNOTSUPP; break; } return error; } /* * Handle MRT getsockopt commands */ static int X_ip_mrouter_get(struct socket *so, struct sockopt *sopt) { int error; switch (sopt->sopt_name) { case MRT_VERSION: error = sooptcopyout(sopt, &mrt_api_version, sizeof mrt_api_version); break; case MRT_ASSERT: error = sooptcopyout(sopt, &V_pim_assert_enabled, sizeof V_pim_assert_enabled); break; case MRT_API_SUPPORT: error = sooptcopyout(sopt, &mrt_api_support, sizeof mrt_api_support); break; case MRT_API_CONFIG: error = sooptcopyout(sopt, &V_mrt_api_config, sizeof V_mrt_api_config); break; default: error = EOPNOTSUPP; break; } return error; } /* * Handle ioctl commands to obtain information from the cache */ static int X_mrt_ioctl(u_long cmd, caddr_t data, int fibnum __unused) { int error; /* * Currently the only function calling this ioctl routine is rtioctl_fib(). * Typically, only root can create the raw socket in order to execute * this ioctl method, however the request might be coming from a prison */ error = priv_check(curthread, PRIV_NETINET_MROUTE); if (error) return (error); switch (cmd) { case (SIOCGETVIFCNT): error = get_vif_cnt((struct sioc_vif_req *)data); break; case (SIOCGETSGCNT): error = get_sg_cnt((struct sioc_sg_req *)data); break; default: error = EINVAL; break; } return error; } /* * returns the packet, byte, rpf-failure count for the source group provided */ static int get_sg_cnt(struct sioc_sg_req *req) { struct mfc *rt; MRW_RLOCK(); rt = mfc_find(&req->src, &req->grp); if (rt == NULL) { MRW_RUNLOCK(); req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff; return EADDRNOTAVAIL; } req->pktcnt = rt->mfc_pkt_cnt; req->bytecnt = rt->mfc_byte_cnt; req->wrong_if = rt->mfc_wrong_if; MRW_RUNLOCK(); return 0; } /* * returns the input and output packet and byte counts on the vif provided */ static int get_vif_cnt(struct sioc_vif_req *req) { vifi_t vifi = req->vifi; MRW_RLOCK(); if (vifi >= V_numvifs) { MRW_RUNLOCK(); return EINVAL; } mtx_lock_spin(&V_viftable[vifi].v_spin); req->icount = V_viftable[vifi].v_pkt_in; req->ocount = V_viftable[vifi].v_pkt_out; req->ibytes = V_viftable[vifi].v_bytes_in; req->obytes = V_viftable[vifi].v_bytes_out; mtx_unlock_spin(&V_viftable[vifi].v_spin); MRW_RUNLOCK(); return 0; } static void if_detached_event(void *arg __unused, struct ifnet *ifp) { vifi_t vifi; u_long i, vifi_cnt = 0; struct ifnet *free_ptr, *multi_leave; MRW_WLOCK(); if (V_ip_mrouter == NULL) { MRW_WUNLOCK(); return; } /* * Tear down multicast forwarder state associated with this ifnet. * 1. Walk the vif list, matching vifs against this ifnet. * 2. Walk the multicast forwarding cache (mfc) looking for * inner matches with this vif's index. * 3. Expire any matching multicast forwarding cache entries. * 4. Free vif state. This should disable ALLMULTI on the interface. */ restart: for (vifi = 0; vifi < V_numvifs; vifi++) { if (V_viftable[vifi].v_ifp != ifp) continue; for (i = 0; i < mfchashsize; i++) { struct mfc *rt, *nrt; LIST_FOREACH_SAFE(rt, &V_mfchashtbl[i], mfc_hash, nrt) { if (rt->mfc_parent == vifi) { expire_mfc(rt); } } } del_vif_locked(vifi, &multi_leave, &free_ptr); if (free_ptr != NULL) vifi_cnt++; if (multi_leave) { MRW_WUNLOCK(); if_allmulti(multi_leave, 0); MRW_WLOCK(); goto restart; } } MRW_WUNLOCK(); /* * Free IFP. We don't have to use free_ptr here as it is the same * that ifp. Perform free as many times as required in case * refcount is greater than 1. */ for (i = 0; i < vifi_cnt; i++) if_free(ifp); } static void ip_mrouter_upcall_thread(void *arg, int pending __unused) { CURVNET_SET((struct vnet *) arg); MRW_WLOCK(); bw_upcalls_send(); MRW_WUNLOCK(); CURVNET_RESTORE(); } /* * Enable multicast forwarding. */ static int ip_mrouter_init(struct socket *so, int version) { CTR2(KTR_IPMF, "%s: so %p", __func__, so); if (version != 1) return ENOPROTOOPT; MRW_TEARDOWN_WLOCK(); MRW_WLOCK(); if (ip_mrouter_unloading) { MRW_WUNLOCK(); MRW_TEARDOWN_WUNLOCK(); return ENOPROTOOPT; } if (V_ip_mrouter != NULL) { MRW_WUNLOCK(); MRW_TEARDOWN_WUNLOCK(); return EADDRINUSE; } V_mfchashtbl = hashinit_flags(mfchashsize, M_MRTABLE, &V_mfchash, HASH_NOWAIT); if (V_mfchashtbl == NULL) { MRW_WUNLOCK(); MRW_TEARDOWN_WUNLOCK(); return (ENOMEM); } /* Create upcall ring */ mtx_init(&V_bw_upcalls_ring_mtx, "mroute upcall buf_ring mtx", NULL, MTX_DEF); V_bw_upcalls_ring = buf_ring_alloc(BW_UPCALLS_MAX, M_MRTABLE, M_NOWAIT, &V_bw_upcalls_ring_mtx); if (!V_bw_upcalls_ring) { MRW_WUNLOCK(); MRW_TEARDOWN_WUNLOCK(); return (ENOMEM); } TASK_INIT(&V_task, 0, ip_mrouter_upcall_thread, curvnet); taskqueue_cancel(V_task_queue, &V_task, NULL); taskqueue_unblock(V_task_queue); callout_reset(&V_expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls, curvnet); callout_reset(&V_bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send, curvnet); V_ip_mrouter = so; atomic_add_int(&ip_mrouter_cnt, 1); /* This is a mutex required by buf_ring init, but not used internally */ mtx_init(&V_buf_ring_mtx, "mroute buf_ring mtx", NULL, MTX_DEF); MRW_WUNLOCK(); MRW_TEARDOWN_WUNLOCK(); CTR1(KTR_IPMF, "%s: done", __func__); return 0; } /* * Disable multicast forwarding. */ static int X_ip_mrouter_done(void) { struct ifnet **ifps; int nifp; u_long i; vifi_t vifi; struct bw_upcall *bu; MRW_TEARDOWN_WLOCK(); if (V_ip_mrouter == NULL) { MRW_TEARDOWN_WUNLOCK(); return (EINVAL); } /* * Detach/disable hooks to the reset of the system. */ V_ip_mrouter = NULL; atomic_subtract_int(&ip_mrouter_cnt, 1); V_mrt_api_config = 0; /* * Wait for all epoch sections to complete to ensure * V_ip_mrouter = NULL is visible to others. */ NET_EPOCH_WAIT(); /* Stop and drain task queue */ taskqueue_block(V_task_queue); while (taskqueue_cancel(V_task_queue, &V_task, NULL)) { taskqueue_drain(V_task_queue, &V_task); } ifps = malloc(MAXVIFS * sizeof(*ifps), M_TEMP, M_WAITOK); MRW_WLOCK(); taskqueue_cancel(V_task_queue, &V_task, NULL); /* Destroy upcall ring */ while ((bu = buf_ring_dequeue_mc(V_bw_upcalls_ring)) != NULL) { free(bu, M_MRTABLE); } buf_ring_free(V_bw_upcalls_ring, M_MRTABLE); mtx_destroy(&V_bw_upcalls_ring_mtx); /* * For each phyint in use, prepare to disable promiscuous reception * of all IP multicasts. Defer the actual call until the lock is released; * just record the list of interfaces while locked. Some interfaces use * sx locks in their ioctl routines, which is not allowed while holding * a non-sleepable lock. */ KASSERT(V_numvifs <= MAXVIFS, ("More vifs than possible")); for (vifi = 0, nifp = 0; vifi < V_numvifs; vifi++) { if (!in_nullhost(V_viftable[vifi].v_lcl_addr) && !(V_viftable[vifi].v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) { ifps[nifp++] = V_viftable[vifi].v_ifp; } } bzero((caddr_t)V_viftable, sizeof(*V_viftable) * MAXVIFS); V_numvifs = 0; V_pim_assert_enabled = 0; callout_stop(&V_expire_upcalls_ch); callout_stop(&V_bw_upcalls_ch); /* * Free all multicast forwarding cache entries. * Do not use hashdestroy(), as we must perform other cleanup. */ for (i = 0; i < mfchashsize; i++) { struct mfc *rt, *nrt; LIST_FOREACH_SAFE(rt, &V_mfchashtbl[i], mfc_hash, nrt) { expire_mfc(rt); } } free(V_mfchashtbl, M_MRTABLE); V_mfchashtbl = NULL; bzero(V_nexpire, sizeof(V_nexpire[0]) * mfchashsize); V_reg_vif_num = VIFI_INVALID; mtx_destroy(&V_buf_ring_mtx); MRW_WUNLOCK(); MRW_TEARDOWN_WUNLOCK(); /* * Now drop our claim on promiscuous multicast on the interfaces recorded * above. This is safe to do now because ALLMULTI is reference counted. */ for (vifi = 0; vifi < nifp; vifi++) if_allmulti(ifps[vifi], 0); free(ifps, M_TEMP); CTR1(KTR_IPMF, "%s: done", __func__); return 0; } /* * Set PIM assert processing global */ static int set_assert(int i) { if ((i != 1) && (i != 0)) return EINVAL; V_pim_assert_enabled = i; return 0; } /* * Configure API capabilities */ int set_api_config(uint32_t *apival) { u_long i; /* * We can set the API capabilities only if it is the first operation * after MRT_INIT. I.e.: * - there are no vifs installed * - pim_assert is not enabled * - the MFC table is empty */ if (V_numvifs > 0) { *apival = 0; return EPERM; } if (V_pim_assert_enabled) { *apival = 0; return EPERM; } MRW_RLOCK(); for (i = 0; i < mfchashsize; i++) { if (LIST_FIRST(&V_mfchashtbl[i]) != NULL) { MRW_RUNLOCK(); *apival = 0; return EPERM; } } MRW_RUNLOCK(); V_mrt_api_config = *apival & mrt_api_support; *apival = V_mrt_api_config; return 0; } /* * Add a vif to the vif table */ static int add_vif(struct vifctl *vifcp) { struct vif *vifp = V_viftable + vifcp->vifc_vifi; struct sockaddr_in sin = {sizeof sin, AF_INET}; struct ifaddr *ifa; struct ifnet *ifp; int error; if (vifcp->vifc_vifi >= MAXVIFS) return EINVAL; /* rate limiting is no longer supported by this code */ if (vifcp->vifc_rate_limit != 0) { log(LOG_ERR, "rate limiting is no longer supported\n"); return EINVAL; } if (in_nullhost(vifcp->vifc_lcl_addr)) return EADDRNOTAVAIL; /* Find the interface with an address in AF_INET family */ if (vifcp->vifc_flags & VIFF_REGISTER) { /* * XXX: Because VIFF_REGISTER does not really need a valid * local interface (e.g. it could be 127.0.0.2), we don't * check its address. */ ifp = NULL; } else { struct epoch_tracker et; sin.sin_addr = vifcp->vifc_lcl_addr; NET_EPOCH_ENTER(et); ifa = ifa_ifwithaddr((struct sockaddr *)&sin); if (ifa == NULL) { NET_EPOCH_EXIT(et); return EADDRNOTAVAIL; } ifp = ifa->ifa_ifp; /* XXX FIXME we need to take a ref on ifp and cleanup properly! */ NET_EPOCH_EXIT(et); } if ((vifcp->vifc_flags & VIFF_TUNNEL) != 0) { CTR1(KTR_IPMF, "%s: tunnels are no longer supported", __func__); return EOPNOTSUPP; } else if (vifcp->vifc_flags & VIFF_REGISTER) { ifp = V_multicast_register_if = if_alloc(IFT_LOOP); CTR2(KTR_IPMF, "%s: add register vif for ifp %p", __func__, ifp); if (V_reg_vif_num == VIFI_INVALID) { if_initname(V_multicast_register_if, "register_vif", 0); V_reg_vif_num = vifcp->vifc_vifi; } } else { /* Make sure the interface supports multicast */ if ((ifp->if_flags & IFF_MULTICAST) == 0) return EOPNOTSUPP; /* Enable promiscuous reception of all IP multicasts from the if */ error = if_allmulti(ifp, 1); if (error) return error; } MRW_WLOCK(); if (!in_nullhost(vifp->v_lcl_addr)) { if (ifp) V_multicast_register_if = NULL; MRW_WUNLOCK(); if (ifp) if_free(ifp); return EADDRINUSE; } vifp->v_flags = vifcp->vifc_flags; vifp->v_threshold = vifcp->vifc_threshold; vifp->v_lcl_addr = vifcp->vifc_lcl_addr; vifp->v_rmt_addr = vifcp->vifc_rmt_addr; vifp->v_ifp = ifp; /* initialize per vif pkt counters */ vifp->v_pkt_in = 0; vifp->v_pkt_out = 0; vifp->v_bytes_in = 0; vifp->v_bytes_out = 0; sprintf(vifp->v_spin_name, "BM[%d] spin", vifcp->vifc_vifi); mtx_init(&vifp->v_spin, vifp->v_spin_name, NULL, MTX_SPIN); /* Adjust numvifs up if the vifi is higher than numvifs */ if (V_numvifs <= vifcp->vifc_vifi) V_numvifs = vifcp->vifc_vifi + 1; MRW_WUNLOCK(); CTR4(KTR_IPMF, "%s: add vif %d laddr 0x%08x thresh %x", __func__, (int)vifcp->vifc_vifi, ntohl(vifcp->vifc_lcl_addr.s_addr), (int)vifcp->vifc_threshold); return 0; } /* * Delete a vif from the vif table */ static int del_vif_locked(vifi_t vifi, struct ifnet **ifp_multi_leave, struct ifnet **ifp_free) { struct vif *vifp; *ifp_free = NULL; *ifp_multi_leave = NULL; MRW_WLOCK_ASSERT(); if (vifi >= V_numvifs) { return EINVAL; } vifp = &V_viftable[vifi]; if (in_nullhost(vifp->v_lcl_addr)) { return EADDRNOTAVAIL; } if (!(vifp->v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) *ifp_multi_leave = vifp->v_ifp; if (vifp->v_flags & VIFF_REGISTER) { V_reg_vif_num = VIFI_INVALID; if (vifp->v_ifp) { if (vifp->v_ifp == V_multicast_register_if) V_multicast_register_if = NULL; *ifp_free = vifp->v_ifp; } } mtx_destroy(&vifp->v_spin); bzero((caddr_t)vifp, sizeof (*vifp)); CTR2(KTR_IPMF, "%s: delete vif %d", __func__, (int)vifi); /* Adjust numvifs down */ for (vifi = V_numvifs; vifi > 0; vifi--) if (!in_nullhost(V_viftable[vifi-1].v_lcl_addr)) break; V_numvifs = vifi; return 0; } static int del_vif(vifi_t vifi) { int cc; struct ifnet *free_ptr, *multi_leave; MRW_WLOCK(); cc = del_vif_locked(vifi, &multi_leave, &free_ptr); MRW_WUNLOCK(); if (multi_leave) if_allmulti(multi_leave, 0); if (free_ptr) { if_free(free_ptr); } return cc; } /* * update an mfc entry without resetting counters and S,G addresses. */ static void update_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp) { int i; rt->mfc_parent = mfccp->mfcc_parent; for (i = 0; i < V_numvifs; i++) { rt->mfc_ttls[i] = mfccp->mfcc_ttls[i]; rt->mfc_flags[i] = mfccp->mfcc_flags[i] & V_mrt_api_config & MRT_MFC_FLAGS_ALL; } /* set the RP address */ if (V_mrt_api_config & MRT_MFC_RP) rt->mfc_rp = mfccp->mfcc_rp; else rt->mfc_rp.s_addr = INADDR_ANY; } /* * fully initialize an mfc entry from the parameter. */ static void init_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp) { rt->mfc_origin = mfccp->mfcc_origin; rt->mfc_mcastgrp = mfccp->mfcc_mcastgrp; update_mfc_params(rt, mfccp); /* initialize pkt counters per src-grp */ rt->mfc_pkt_cnt = 0; rt->mfc_byte_cnt = 0; rt->mfc_wrong_if = 0; timevalclear(&rt->mfc_last_assert); } static void expire_mfc(struct mfc *rt) { struct rtdetq *rte; MRW_WLOCK_ASSERT(); free_bw_list(rt->mfc_bw_meter_leq); free_bw_list(rt->mfc_bw_meter_geq); while (!buf_ring_empty(rt->mfc_stall_ring)) { rte = buf_ring_dequeue_mc(rt->mfc_stall_ring); if (rte) { m_freem(rte->m); free(rte, M_MRTABLE); } } buf_ring_free(rt->mfc_stall_ring, M_MRTABLE); LIST_REMOVE(rt, mfc_hash); free(rt, M_MRTABLE); } /* * Add an mfc entry */ static int add_mfc(struct mfcctl2 *mfccp) { struct mfc *rt; struct rtdetq *rte; u_long hash = 0; u_short nstl; struct epoch_tracker et; MRW_WLOCK(); rt = mfc_find(&mfccp->mfcc_origin, &mfccp->mfcc_mcastgrp); /* If an entry already exists, just update the fields */ if (rt) { CTR4(KTR_IPMF, "%s: update mfc orig 0x%08x group %lx parent %x", __func__, ntohl(mfccp->mfcc_origin.s_addr), (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), mfccp->mfcc_parent); update_mfc_params(rt, mfccp); MRW_WUNLOCK(); return (0); } /* * Find the entry for which the upcall was made and update */ nstl = 0; hash = MFCHASH(mfccp->mfcc_origin, mfccp->mfcc_mcastgrp); NET_EPOCH_ENTER(et); LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) { if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) && in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp) && !buf_ring_empty(rt->mfc_stall_ring)) { CTR5(KTR_IPMF, "%s: add mfc orig 0x%08x group %lx parent %x qh %p", __func__, ntohl(mfccp->mfcc_origin.s_addr), (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), mfccp->mfcc_parent, rt->mfc_stall_ring); if (nstl++) CTR1(KTR_IPMF, "%s: multiple matches", __func__); init_mfc_params(rt, mfccp); rt->mfc_expire = 0; /* Don't clean this guy up */ V_nexpire[hash]--; /* Free queued packets, but attempt to forward them first. */ while (!buf_ring_empty(rt->mfc_stall_ring)) { rte = buf_ring_dequeue_mc(rt->mfc_stall_ring); if (rte->ifp != NULL) ip_mdq(rte->m, rte->ifp, rt, -1); m_freem(rte->m); free(rte, M_MRTABLE); } } } NET_EPOCH_EXIT(et); /* * It is possible that an entry is being inserted without an upcall */ if (nstl == 0) { CTR1(KTR_IPMF, "%s: adding mfc w/o upcall", __func__); LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) { if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) && in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp)) { init_mfc_params(rt, mfccp); if (rt->mfc_expire) V_nexpire[hash]--; rt->mfc_expire = 0; break; /* XXX */ } } if (rt == NULL) { /* no upcall, so make a new entry */ rt = mfc_alloc(); if (rt == NULL) { MRW_WUNLOCK(); return (ENOBUFS); } init_mfc_params(rt, mfccp); rt->mfc_expire = 0; rt->mfc_bw_meter_leq = NULL; rt->mfc_bw_meter_geq = NULL; /* insert new entry at head of hash chain */ LIST_INSERT_HEAD(&V_mfchashtbl[hash], rt, mfc_hash); } } MRW_WUNLOCK(); return (0); } /* * Delete an mfc entry */ static int del_mfc(struct mfcctl2 *mfccp) { struct in_addr origin; struct in_addr mcastgrp; struct mfc *rt; origin = mfccp->mfcc_origin; mcastgrp = mfccp->mfcc_mcastgrp; CTR3(KTR_IPMF, "%s: delete mfc orig 0x%08x group %lx", __func__, ntohl(origin.s_addr), (u_long)ntohl(mcastgrp.s_addr)); MRW_WLOCK(); LIST_FOREACH(rt, &V_mfchashtbl[MFCHASH(origin, mcastgrp)], mfc_hash) { if (in_hosteq(rt->mfc_origin, origin) && in_hosteq(rt->mfc_mcastgrp, mcastgrp)) break; } if (rt == NULL) { MRW_WUNLOCK(); return EADDRNOTAVAIL; } expire_mfc(rt); MRW_WUNLOCK(); return (0); } /* * Send a message to the routing daemon on the multicast routing socket. */ static int socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src) { if (s) { SOCKBUF_LOCK(&s->so_rcv); if (sbappendaddr_locked(&s->so_rcv, (struct sockaddr *)src, mm, NULL) != 0) { sorwakeup_locked(s); return 0; } soroverflow_locked(s); } m_freem(mm); return -1; } /* * IP multicast forwarding function. This function assumes that the packet * pointed to by "ip" has arrived on (or is about to be sent to) the interface * pointed to by "ifp", and the packet is to be relayed to other networks * that have members of the packet's destination IP multicast group. * * The packet is returned unscathed to the caller, unless it is * erroneous, in which case a non-zero return value tells the caller to * discard it. */ #define TUNNEL_LEN 12 /* # bytes of IP option for tunnel encapsulation */ static int X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m, struct ip_moptions *imo) { struct mfc *rt; int error; vifi_t vifi; struct mbuf *mb0; struct rtdetq *rte; u_long hash; int hlen; M_ASSERTMAPPED(m); CTR3(KTR_IPMF, "ip_mforward: delete mfc orig 0x%08x group %lx ifp %p", ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr), ifp); if (ip->ip_hl < (sizeof(struct ip) + TUNNEL_LEN) >> 2 || ((u_char *)(ip + 1))[1] != IPOPT_LSRR) { /* * Packet arrived via a physical interface or * an encapsulated tunnel or a register_vif. */ } else { /* * Packet arrived through a source-route tunnel. * Source-route tunnels are no longer supported. */ return (1); } /* * BEGIN: MCAST ROUTING HOT PATH */ MRW_RLOCK(); if (imo && ((vifi = imo->imo_multicast_vif) < V_numvifs)) { if (ip->ip_ttl < MAXTTL) ip->ip_ttl++; /* compensate for -1 in *_send routines */ error = ip_mdq(m, ifp, NULL, vifi); MRW_RUNLOCK(); return error; } /* * Don't forward a packet with time-to-live of zero or one, * or a packet destined to a local-only group. */ if (ip->ip_ttl <= 1 || IN_LOCAL_GROUP(ntohl(ip->ip_dst.s_addr))) { MRW_RUNLOCK(); return 0; } mfc_find_retry: /* * Determine forwarding vifs from the forwarding cache table */ MRTSTAT_INC(mrts_mfc_lookups); rt = mfc_find(&ip->ip_src, &ip->ip_dst); /* Entry exists, so forward if necessary */ if (rt != NULL) { error = ip_mdq(m, ifp, rt, -1); /* Generic unlock here as we might release R or W lock */ MRW_UNLOCK(); return error; } /* * END: MCAST ROUTING HOT PATH */ /* Further processing must be done with WLOCK taken */ if ((MRW_WOWNED() == 0) && (MRW_LOCK_TRY_UPGRADE() == 0)) { MRW_RUNLOCK(); MRW_WLOCK(); goto mfc_find_retry; } /* * If we don't have a route for packet's origin, * Make a copy of the packet & send message to routing daemon */ hlen = ip->ip_hl << 2; MRTSTAT_INC(mrts_mfc_misses); MRTSTAT_INC(mrts_no_route); CTR2(KTR_IPMF, "ip_mforward: no mfc for (0x%08x,%lx)", ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr)); /* * Allocate mbufs early so that we don't do extra work if we are * just going to fail anyway. Make sure to pullup the header so * that other people can't step on it. */ rte = malloc((sizeof *rte), M_MRTABLE, M_NOWAIT|M_ZERO); if (rte == NULL) { MRW_WUNLOCK(); return ENOBUFS; } mb0 = m_copypacket(m, M_NOWAIT); if (mb0 && (!M_WRITABLE(mb0) || mb0->m_len < hlen)) mb0 = m_pullup(mb0, hlen); if (mb0 == NULL) { free(rte, M_MRTABLE); MRW_WUNLOCK(); return ENOBUFS; } /* is there an upcall waiting for this flow ? */ hash = MFCHASH(ip->ip_src, ip->ip_dst); LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) { if (in_hosteq(ip->ip_src, rt->mfc_origin) && in_hosteq(ip->ip_dst, rt->mfc_mcastgrp) && !buf_ring_empty(rt->mfc_stall_ring)) break; } if (rt == NULL) { int i; struct igmpmsg *im; struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; struct mbuf *mm; /* * Locate the vifi for the incoming interface for this packet. * If none found, drop packet. */ for (vifi = 0; vifi < V_numvifs && V_viftable[vifi].v_ifp != ifp; vifi++) ; if (vifi >= V_numvifs) /* vif not found, drop packet */ goto non_fatal; /* no upcall, so make a new entry */ rt = mfc_alloc(); if (rt == NULL) goto fail; /* Make a copy of the header to send to the user level process */ mm = m_copym(mb0, 0, hlen, M_NOWAIT); if (mm == NULL) goto fail1; /* * Send message to routing daemon to install * a route into the kernel table */ im = mtod(mm, struct igmpmsg*); im->im_msgtype = IGMPMSG_NOCACHE; im->im_mbz = 0; im->im_vif = vifi; MRTSTAT_INC(mrts_upcalls); k_igmpsrc.sin_addr = ip->ip_src; if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) { CTR0(KTR_IPMF, "ip_mforward: socket queue full"); MRTSTAT_INC(mrts_upq_sockfull); fail1: free(rt, M_MRTABLE); fail: free(rte, M_MRTABLE); m_freem(mb0); MRW_WUNLOCK(); return ENOBUFS; } /* insert new entry at head of hash chain */ rt->mfc_origin.s_addr = ip->ip_src.s_addr; rt->mfc_mcastgrp.s_addr = ip->ip_dst.s_addr; rt->mfc_expire = UPCALL_EXPIRE; V_nexpire[hash]++; for (i = 0; i < V_numvifs; i++) { rt->mfc_ttls[i] = 0; rt->mfc_flags[i] = 0; } rt->mfc_parent = -1; /* clear the RP address */ rt->mfc_rp.s_addr = INADDR_ANY; rt->mfc_bw_meter_leq = NULL; rt->mfc_bw_meter_geq = NULL; /* initialize pkt counters per src-grp */ rt->mfc_pkt_cnt = 0; rt->mfc_byte_cnt = 0; rt->mfc_wrong_if = 0; timevalclear(&rt->mfc_last_assert); buf_ring_enqueue(rt->mfc_stall_ring, rte); /* Add RT to hashtable as it didn't exist before */ LIST_INSERT_HEAD(&V_mfchashtbl[hash], rt, mfc_hash); } else { /* determine if queue has overflowed */ if (buf_ring_full(rt->mfc_stall_ring)) { MRTSTAT_INC(mrts_upq_ovflw); non_fatal: free(rte, M_MRTABLE); m_freem(mb0); MRW_WUNLOCK(); return (0); } buf_ring_enqueue(rt->mfc_stall_ring, rte); } rte->m = mb0; rte->ifp = ifp; MRW_WUNLOCK(); return 0; } /* * Clean up the cache entry if upcall is not serviced */ static void expire_upcalls(void *arg) { u_long i; CURVNET_SET((struct vnet *) arg); /*This callout is always run with MRW_WLOCK taken. */ for (i = 0; i < mfchashsize; i++) { struct mfc *rt, *nrt; if (V_nexpire[i] == 0) continue; LIST_FOREACH_SAFE(rt, &V_mfchashtbl[i], mfc_hash, nrt) { if (buf_ring_empty(rt->mfc_stall_ring)) continue; if (rt->mfc_expire == 0 || --rt->mfc_expire > 0) continue; MRTSTAT_INC(mrts_cache_cleanups); CTR3(KTR_IPMF, "%s: expire (%lx, %lx)", __func__, (u_long)ntohl(rt->mfc_origin.s_addr), (u_long)ntohl(rt->mfc_mcastgrp.s_addr)); expire_mfc(rt); } } callout_reset(&V_expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls, curvnet); CURVNET_RESTORE(); } /* * Packet forwarding routine once entry in the cache is made */ static int ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif) { struct ip *ip = mtod(m, struct ip *); vifi_t vifi; int plen = ntohs(ip->ip_len); M_ASSERTMAPPED(m); MRW_LOCK_ASSERT(); NET_EPOCH_ASSERT(); /* * If xmt_vif is not -1, send on only the requested vif. * * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.) */ if (xmt_vif < V_numvifs) { if (V_viftable[xmt_vif].v_flags & VIFF_REGISTER) pim_register_send(ip, V_viftable + xmt_vif, m, rt); else phyint_send(ip, V_viftable + xmt_vif, m); return 1; } /* * Don't forward if it didn't arrive from the parent vif for its origin. */ vifi = rt->mfc_parent; if ((vifi >= V_numvifs) || (V_viftable[vifi].v_ifp != ifp)) { CTR4(KTR_IPMF, "%s: rx on wrong ifp %p (vifi %d, v_ifp %p)", __func__, ifp, (int)vifi, V_viftable[vifi].v_ifp); MRTSTAT_INC(mrts_wrong_if); ++rt->mfc_wrong_if; /* * If we are doing PIM assert processing, send a message * to the routing daemon. * * XXX: A PIM-SM router needs the WRONGVIF detection so it * can complete the SPT switch, regardless of the type * of the iif (broadcast media, GRE tunnel, etc). */ if (V_pim_assert_enabled && (vifi < V_numvifs) && V_viftable[vifi].v_ifp) { if (ifp == V_multicast_register_if) PIMSTAT_INC(pims_rcv_registers_wrongiif); /* Get vifi for the incoming packet */ for (vifi = 0; vifi < V_numvifs && V_viftable[vifi].v_ifp != ifp; vifi++) ; if (vifi >= V_numvifs) return 0; /* The iif is not found: ignore the packet. */ if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_DISABLE_WRONGVIF) return 0; /* WRONGVIF disabled: ignore the packet */ if (ratecheck(&rt->mfc_last_assert, &pim_assert_interval)) { struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; struct igmpmsg *im; int hlen = ip->ip_hl << 2; struct mbuf *mm = m_copym(m, 0, hlen, M_NOWAIT); if (mm && (!M_WRITABLE(mm) || mm->m_len < hlen)) mm = m_pullup(mm, hlen); if (mm == NULL) return ENOBUFS; im = mtod(mm, struct igmpmsg *); im->im_msgtype = IGMPMSG_WRONGVIF; im->im_mbz = 0; im->im_vif = vifi; MRTSTAT_INC(mrts_upcalls); k_igmpsrc.sin_addr = im->im_src; if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) { CTR1(KTR_IPMF, "%s: socket queue full", __func__); MRTSTAT_INC(mrts_upq_sockfull); return ENOBUFS; } } } return 0; } /* If I sourced this packet, it counts as output, else it was input. */ mtx_lock_spin(&V_viftable[vifi].v_spin); if (in_hosteq(ip->ip_src, V_viftable[vifi].v_lcl_addr)) { V_viftable[vifi].v_pkt_out++; V_viftable[vifi].v_bytes_out += plen; } else { V_viftable[vifi].v_pkt_in++; V_viftable[vifi].v_bytes_in += plen; } mtx_unlock_spin(&V_viftable[vifi].v_spin); rt->mfc_pkt_cnt++; rt->mfc_byte_cnt += plen; /* * For each vif, decide if a copy of the packet should be forwarded. * Forward if: * - the ttl exceeds the vif's threshold * - there are group members downstream on interface */ for (vifi = 0; vifi < V_numvifs; vifi++) if ((rt->mfc_ttls[vifi] > 0) && (ip->ip_ttl > rt->mfc_ttls[vifi])) { V_viftable[vifi].v_pkt_out++; V_viftable[vifi].v_bytes_out += plen; if (V_viftable[vifi].v_flags & VIFF_REGISTER) pim_register_send(ip, V_viftable + vifi, m, rt); else phyint_send(ip, V_viftable + vifi, m); } /* * Perform upcall-related bw measuring. */ if ((rt->mfc_bw_meter_geq != NULL) || (rt->mfc_bw_meter_leq != NULL)) { struct bw_meter *x; struct timeval now; microtime(&now); /* Process meters for Greater-or-EQual case */ for (x = rt->mfc_bw_meter_geq; x != NULL; x = x->bm_mfc_next) bw_meter_geq_receive_packet(x, plen, &now); /* Process meters for Lower-or-EQual case */ for (x = rt->mfc_bw_meter_leq; x != NULL; x = x->bm_mfc_next) { /* * Record that a packet is received. * Spin lock has to be taken as callout context * (expire_bw_meter_leq) might modify these fields * as well */ mtx_lock_spin(&x->bm_spin); x->bm_measured.b_packets++; x->bm_measured.b_bytes += plen; mtx_unlock_spin(&x->bm_spin); } } return 0; } /* * Check if a vif number is legal/ok. This is used by in_mcast.c. */ static int X_legal_vif_num(int vif) { int ret; ret = 0; if (vif < 0) return (ret); MRW_RLOCK(); if (vif < V_numvifs) ret = 1; MRW_RUNLOCK(); return (ret); } /* * Return the local address used by this vif */ static u_long X_ip_mcast_src(int vifi) { in_addr_t addr; addr = INADDR_ANY; if (vifi < 0) return (addr); MRW_RLOCK(); if (vifi < V_numvifs) addr = V_viftable[vifi].v_lcl_addr.s_addr; MRW_RUNLOCK(); return (addr); } static void phyint_send(struct ip *ip, struct vif *vifp, struct mbuf *m) { struct mbuf *mb_copy; int hlen = ip->ip_hl << 2; MRW_LOCK_ASSERT(); M_ASSERTMAPPED(m); /* * Make a new reference to the packet; make sure that * the IP header is actually copied, not just referenced, * so that ip_output() only scribbles on the copy. */ mb_copy = m_copypacket(m, M_NOWAIT); if (mb_copy && (!M_WRITABLE(mb_copy) || mb_copy->m_len < hlen)) mb_copy = m_pullup(mb_copy, hlen); if (mb_copy == NULL) return; send_packet(vifp, mb_copy); } static void send_packet(struct vif *vifp, struct mbuf *m) { struct ip_moptions imo; int error __unused; MRW_LOCK_ASSERT(); NET_EPOCH_ASSERT(); imo.imo_multicast_ifp = vifp->v_ifp; imo.imo_multicast_ttl = mtod(m, struct ip *)->ip_ttl - 1; imo.imo_multicast_loop = !!in_mcast_loop; imo.imo_multicast_vif = -1; STAILQ_INIT(&imo.imo_head); /* * Re-entrancy should not be a problem here, because * the packets that we send out and are looped back at us * should get rejected because they appear to come from * the loopback interface, thus preventing looping. */ error = ip_output(m, NULL, NULL, IP_FORWARDING, &imo, NULL); CTR3(KTR_IPMF, "%s: vif %td err %d", __func__, (ptrdiff_t)(vifp - V_viftable), error); } /* * Stubs for old RSVP socket shim implementation. */ static int X_ip_rsvp_vif(struct socket *so __unused, struct sockopt *sopt __unused) { return (EOPNOTSUPP); } static void X_ip_rsvp_force_done(struct socket *so __unused) { } static int X_rsvp_input(struct mbuf **mp, int *offp, int proto) { struct mbuf *m; m = *mp; *mp = NULL; if (!V_rsvp_on) m_freem(m); return (IPPROTO_DONE); } /* * Code for bandwidth monitors */ /* * Define common interface for timeval-related methods */ #define BW_TIMEVALCMP(tvp, uvp, cmp) timevalcmp((tvp), (uvp), cmp) #define BW_TIMEVALDECR(vvp, uvp) timevalsub((vvp), (uvp)) #define BW_TIMEVALADD(vvp, uvp) timevaladd((vvp), (uvp)) static uint32_t compute_bw_meter_flags(struct bw_upcall *req) { uint32_t flags = 0; if (req->bu_flags & BW_UPCALL_UNIT_PACKETS) flags |= BW_METER_UNIT_PACKETS; if (req->bu_flags & BW_UPCALL_UNIT_BYTES) flags |= BW_METER_UNIT_BYTES; if (req->bu_flags & BW_UPCALL_GEQ) flags |= BW_METER_GEQ; if (req->bu_flags & BW_UPCALL_LEQ) flags |= BW_METER_LEQ; return flags; } static void expire_bw_meter_leq(void *arg) { struct bw_meter *x = arg; struct timeval now; /* * INFO: * callout is always executed with MRW_WLOCK taken */ CURVNET_SET((struct vnet *)x->arg); microtime(&now); /* * Test if we should deliver an upcall */ if (((x->bm_flags & BW_METER_UNIT_PACKETS) && (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) || ((x->bm_flags & BW_METER_UNIT_BYTES) && (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) { /* Prepare an upcall for delivery */ bw_meter_prepare_upcall(x, &now); } /* Send all upcalls that are pending delivery */ taskqueue_enqueue(V_task_queue, &V_task); /* Reset counters */ x->bm_start_time = now; /* Spin lock has to be taken as ip_forward context * might modify these fields as well */ mtx_lock_spin(&x->bm_spin); x->bm_measured.b_bytes = 0; x->bm_measured.b_packets = 0; mtx_unlock_spin(&x->bm_spin); callout_schedule(&x->bm_meter_callout, tvtohz(&x->bm_threshold.b_time)); CURVNET_RESTORE(); } /* * Add a bw_meter entry */ static int add_bw_upcall(struct bw_upcall *req) { struct mfc *mfc; struct timeval delta = { BW_UPCALL_THRESHOLD_INTERVAL_MIN_SEC, BW_UPCALL_THRESHOLD_INTERVAL_MIN_USEC }; struct timeval now; struct bw_meter *x, **bwm_ptr; uint32_t flags; if (!(V_mrt_api_config & MRT_MFC_BW_UPCALL)) return EOPNOTSUPP; /* Test if the flags are valid */ if (!(req->bu_flags & (BW_UPCALL_UNIT_PACKETS | BW_UPCALL_UNIT_BYTES))) return EINVAL; if (!(req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ))) return EINVAL; if ((req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ)) == (BW_UPCALL_GEQ | BW_UPCALL_LEQ)) return EINVAL; /* Test if the threshold time interval is valid */ if (BW_TIMEVALCMP(&req->bu_threshold.b_time, &delta, <)) return EINVAL; flags = compute_bw_meter_flags(req); /* * Find if we have already same bw_meter entry */ MRW_WLOCK(); mfc = mfc_find(&req->bu_src, &req->bu_dst); if (mfc == NULL) { MRW_WUNLOCK(); return EADDRNOTAVAIL; } /* Choose an appropriate bw_meter list */ if (req->bu_flags & BW_UPCALL_GEQ) bwm_ptr = &mfc->mfc_bw_meter_geq; else bwm_ptr = &mfc->mfc_bw_meter_leq; for (x = *bwm_ptr; x != NULL; x = x->bm_mfc_next) { if ((BW_TIMEVALCMP(&x->bm_threshold.b_time, &req->bu_threshold.b_time, ==)) && (x->bm_threshold.b_packets == req->bu_threshold.b_packets) && (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) && (x->bm_flags & BW_METER_USER_FLAGS) == flags) { MRW_WUNLOCK(); return 0; /* XXX Already installed */ } } /* Allocate the new bw_meter entry */ x = malloc(sizeof(*x), M_BWMETER, M_ZERO | M_NOWAIT); if (x == NULL) { MRW_WUNLOCK(); return ENOBUFS; } /* Set the new bw_meter entry */ x->bm_threshold.b_time = req->bu_threshold.b_time; microtime(&now); x->bm_start_time = now; x->bm_threshold.b_packets = req->bu_threshold.b_packets; x->bm_threshold.b_bytes = req->bu_threshold.b_bytes; x->bm_measured.b_packets = 0; x->bm_measured.b_bytes = 0; x->bm_flags = flags; x->bm_time_next = NULL; x->bm_mfc = mfc; x->arg = curvnet; sprintf(x->bm_spin_name, "BM spin %p", x); mtx_init(&x->bm_spin, x->bm_spin_name, NULL, MTX_SPIN); /* For LEQ case create periodic callout */ if (req->bu_flags & BW_UPCALL_LEQ) { callout_init_rw(&x->bm_meter_callout, &mrouter_lock, CALLOUT_SHAREDLOCK); callout_reset(&x->bm_meter_callout, tvtohz(&x->bm_threshold.b_time), expire_bw_meter_leq, x); } /* Add the new bw_meter entry to the front of entries for this MFC */ x->bm_mfc_next = *bwm_ptr; *bwm_ptr = x; MRW_WUNLOCK(); return 0; } static void free_bw_list(struct bw_meter *list) { while (list != NULL) { struct bw_meter *x = list; /* MRW_WLOCK must be held here */ if (x->bm_flags & BW_METER_LEQ) { callout_drain(&x->bm_meter_callout); mtx_destroy(&x->bm_spin); } list = list->bm_mfc_next; free(x, M_BWMETER); } } /* * Delete one or multiple bw_meter entries */ static int del_bw_upcall(struct bw_upcall *req) { struct mfc *mfc; struct bw_meter *x, **bwm_ptr; if (!(V_mrt_api_config & MRT_MFC_BW_UPCALL)) return EOPNOTSUPP; MRW_WLOCK(); /* Find the corresponding MFC entry */ mfc = mfc_find(&req->bu_src, &req->bu_dst); if (mfc == NULL) { MRW_WUNLOCK(); return EADDRNOTAVAIL; } else if (req->bu_flags & BW_UPCALL_DELETE_ALL) { /* * Delete all bw_meter entries for this mfc */ struct bw_meter *list; /* Free LEQ list */ list = mfc->mfc_bw_meter_leq; mfc->mfc_bw_meter_leq = NULL; free_bw_list(list); /* Free GEQ list */ list = mfc->mfc_bw_meter_geq; mfc->mfc_bw_meter_geq = NULL; free_bw_list(list); MRW_WUNLOCK(); return 0; } else { /* Delete a single bw_meter entry */ struct bw_meter *prev; uint32_t flags = 0; flags = compute_bw_meter_flags(req); /* Choose an appropriate bw_meter list */ if (req->bu_flags & BW_UPCALL_GEQ) bwm_ptr = &mfc->mfc_bw_meter_geq; else bwm_ptr = &mfc->mfc_bw_meter_leq; /* Find the bw_meter entry to delete */ for (prev = NULL, x = *bwm_ptr; x != NULL; prev = x, x = x->bm_mfc_next) { if ((BW_TIMEVALCMP(&x->bm_threshold.b_time, &req->bu_threshold.b_time, ==)) && (x->bm_threshold.b_packets == req->bu_threshold.b_packets) && (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) && (x->bm_flags & BW_METER_USER_FLAGS) == flags) break; } if (x != NULL) { /* Delete entry from the list for this MFC */ if (prev != NULL) prev->bm_mfc_next = x->bm_mfc_next; /* remove from middle*/ else *bwm_ptr = x->bm_mfc_next;/* new head of list */ if (req->bu_flags & BW_UPCALL_LEQ) callout_stop(&x->bm_meter_callout); MRW_WUNLOCK(); /* Free the bw_meter entry */ free(x, M_BWMETER); return 0; } else { MRW_WUNLOCK(); return EINVAL; } } __assert_unreachable(); } /* * Perform bandwidth measurement processing that may result in an upcall */ static void bw_meter_geq_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp) { struct timeval delta; MRW_LOCK_ASSERT(); delta = *nowp; BW_TIMEVALDECR(&delta, &x->bm_start_time); /* * Processing for ">=" type of bw_meter entry. * bm_spin does not have to be hold here as in GEQ * case this is the only context accessing bm_measured. */ if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) { /* Reset the bw_meter entry */ x->bm_start_time = *nowp; x->bm_measured.b_packets = 0; x->bm_measured.b_bytes = 0; x->bm_flags &= ~BW_METER_UPCALL_DELIVERED; } /* Record that a packet is received */ x->bm_measured.b_packets++; x->bm_measured.b_bytes += plen; /* * Test if we should deliver an upcall */ if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) { if (((x->bm_flags & BW_METER_UNIT_PACKETS) && (x->bm_measured.b_packets >= x->bm_threshold.b_packets)) || ((x->bm_flags & BW_METER_UNIT_BYTES) && (x->bm_measured.b_bytes >= x->bm_threshold.b_bytes))) { /* Prepare an upcall for delivery */ bw_meter_prepare_upcall(x, nowp); x->bm_flags |= BW_METER_UPCALL_DELIVERED; } } } /* * Prepare a bandwidth-related upcall */ static void bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp) { struct timeval delta; struct bw_upcall *u; MRW_LOCK_ASSERT(); /* * Compute the measured time interval */ delta = *nowp; BW_TIMEVALDECR(&delta, &x->bm_start_time); /* * Set the bw_upcall entry */ u = malloc(sizeof(struct bw_upcall), M_MRTABLE, M_NOWAIT | M_ZERO); if (!u) { log(LOG_WARNING, "bw_meter_prepare_upcall: cannot allocate entry\n"); return; } u->bu_src = x->bm_mfc->mfc_origin; u->bu_dst = x->bm_mfc->mfc_mcastgrp; u->bu_threshold.b_time = x->bm_threshold.b_time; u->bu_threshold.b_packets = x->bm_threshold.b_packets; u->bu_threshold.b_bytes = x->bm_threshold.b_bytes; u->bu_measured.b_time = delta; u->bu_measured.b_packets = x->bm_measured.b_packets; u->bu_measured.b_bytes = x->bm_measured.b_bytes; u->bu_flags = 0; if (x->bm_flags & BW_METER_UNIT_PACKETS) u->bu_flags |= BW_UPCALL_UNIT_PACKETS; if (x->bm_flags & BW_METER_UNIT_BYTES) u->bu_flags |= BW_UPCALL_UNIT_BYTES; if (x->bm_flags & BW_METER_GEQ) u->bu_flags |= BW_UPCALL_GEQ; if (x->bm_flags & BW_METER_LEQ) u->bu_flags |= BW_UPCALL_LEQ; if (buf_ring_enqueue(V_bw_upcalls_ring, u)) log(LOG_WARNING, "bw_meter_prepare_upcall: cannot enqueue upcall\n"); if (buf_ring_count(V_bw_upcalls_ring) > (BW_UPCALLS_MAX / 2)) { taskqueue_enqueue(V_task_queue, &V_task); } } /* * Send the pending bandwidth-related upcalls */ static void bw_upcalls_send(void) { struct mbuf *m; int len = 0; struct bw_upcall *bu; struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; static struct igmpmsg igmpmsg = { 0, /* unused1 */ 0, /* unused2 */ IGMPMSG_BW_UPCALL,/* im_msgtype */ 0, /* im_mbz */ 0, /* im_vif */ 0, /* unused3 */ { 0 }, /* im_src */ { 0 } /* im_dst */ }; MRW_LOCK_ASSERT(); if (buf_ring_empty(V_bw_upcalls_ring)) return; /* * Allocate a new mbuf, initialize it with the header and * the payload for the pending calls. */ m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n"); return; } m_copyback(m, 0, sizeof(struct igmpmsg), (caddr_t)&igmpmsg); len += sizeof(struct igmpmsg); while ((bu = buf_ring_dequeue_mc(V_bw_upcalls_ring)) != NULL) { m_copyback(m, len, sizeof(struct bw_upcall), (caddr_t)bu); len += sizeof(struct bw_upcall); free(bu, M_MRTABLE); } /* * Send the upcalls * XXX do we need to set the address in k_igmpsrc ? */ MRTSTAT_INC(mrts_upcalls); if (socket_send(V_ip_mrouter, m, &k_igmpsrc) < 0) { log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n"); MRTSTAT_INC(mrts_upq_sockfull); } } /* * A periodic function for sending all upcalls that are pending delivery */ static void expire_bw_upcalls_send(void *arg) { CURVNET_SET((struct vnet *) arg); /* This callout is run with MRW_RLOCK taken */ bw_upcalls_send(); callout_reset(&V_bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send, curvnet); CURVNET_RESTORE(); } /* * End of bandwidth monitoring code */ /* * Send the packet up to the user daemon, or eventually do kernel encapsulation * */ static int pim_register_send(struct ip *ip, struct vif *vifp, struct mbuf *m, struct mfc *rt) { struct mbuf *mb_copy, *mm; /* * Do not send IGMP_WHOLEPKT notifications to userland, if the * rendezvous point was unspecified, and we were told not to. */ if (pim_squelch_wholepkt != 0 && (V_mrt_api_config & MRT_MFC_RP) && in_nullhost(rt->mfc_rp)) return 0; mb_copy = pim_register_prepare(ip, m); if (mb_copy == NULL) return ENOBUFS; /* * Send all the fragments. Note that the mbuf for each fragment * is freed by the sending machinery. */ for (mm = mb_copy; mm; mm = mb_copy) { mb_copy = mm->m_nextpkt; mm->m_nextpkt = 0; mm = m_pullup(mm, sizeof(struct ip)); if (mm != NULL) { ip = mtod(mm, struct ip *); if ((V_mrt_api_config & MRT_MFC_RP) && !in_nullhost(rt->mfc_rp)) { pim_register_send_rp(ip, vifp, mm, rt); } else { pim_register_send_upcall(ip, vifp, mm, rt); } } } return 0; } /* * Return a copy of the data packet that is ready for PIM Register * encapsulation. * XXX: Note that in the returned copy the IP header is a valid one. */ static struct mbuf * pim_register_prepare(struct ip *ip, struct mbuf *m) { struct mbuf *mb_copy = NULL; int mtu; /* Take care of delayed checksums */ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { in_delayed_cksum(m); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } /* * Copy the old packet & pullup its IP header into the * new mbuf so we can modify it. */ mb_copy = m_copypacket(m, M_NOWAIT); if (mb_copy == NULL) return NULL; mb_copy = m_pullup(mb_copy, ip->ip_hl << 2); if (mb_copy == NULL) return NULL; /* take care of the TTL */ ip = mtod(mb_copy, struct ip *); --ip->ip_ttl; /* Compute the MTU after the PIM Register encapsulation */ mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr); if (ntohs(ip->ip_len) <= mtu) { /* Turn the IP header into a valid one */ ip->ip_sum = 0; ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2); } else { /* Fragment the packet */ mb_copy->m_pkthdr.csum_flags |= CSUM_IP; if (ip_fragment(ip, &mb_copy, mtu, 0) != 0) { m_freem(mb_copy); return NULL; } } return mb_copy; } /* * Send an upcall with the data packet to the user-level process. */ static int pim_register_send_upcall(struct ip *ip, struct vif *vifp, struct mbuf *mb_copy, struct mfc *rt) { struct mbuf *mb_first; int len = ntohs(ip->ip_len); struct igmpmsg *im; struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; MRW_LOCK_ASSERT(); /* * Add a new mbuf with an upcall header */ mb_first = m_gethdr(M_NOWAIT, MT_DATA); if (mb_first == NULL) { m_freem(mb_copy); return ENOBUFS; } mb_first->m_data += max_linkhdr; mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg); mb_first->m_len = sizeof(struct igmpmsg); mb_first->m_next = mb_copy; /* Send message to routing daemon */ im = mtod(mb_first, struct igmpmsg *); im->im_msgtype = IGMPMSG_WHOLEPKT; im->im_mbz = 0; im->im_vif = vifp - V_viftable; im->im_src = ip->ip_src; im->im_dst = ip->ip_dst; k_igmpsrc.sin_addr = ip->ip_src; MRTSTAT_INC(mrts_upcalls); if (socket_send(V_ip_mrouter, mb_first, &k_igmpsrc) < 0) { CTR1(KTR_IPMF, "%s: socket queue full", __func__); MRTSTAT_INC(mrts_upq_sockfull); return ENOBUFS; } /* Keep statistics */ PIMSTAT_INC(pims_snd_registers_msgs); PIMSTAT_ADD(pims_snd_registers_bytes, len); return 0; } /* * Encapsulate the data packet in PIM Register message and send it to the RP. */ static int pim_register_send_rp(struct ip *ip, struct vif *vifp, struct mbuf *mb_copy, struct mfc *rt) { struct mbuf *mb_first; struct ip *ip_outer; struct pim_encap_pimhdr *pimhdr; int len = ntohs(ip->ip_len); vifi_t vifi = rt->mfc_parent; MRW_LOCK_ASSERT(); if ((vifi >= V_numvifs) || in_nullhost(V_viftable[vifi].v_lcl_addr)) { m_freem(mb_copy); return EADDRNOTAVAIL; /* The iif vif is invalid */ } /* * Add a new mbuf with the encapsulating header */ mb_first = m_gethdr(M_NOWAIT, MT_DATA); if (mb_first == NULL) { m_freem(mb_copy); return ENOBUFS; } mb_first->m_data += max_linkhdr; mb_first->m_len = sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr); mb_first->m_next = mb_copy; mb_first->m_pkthdr.len = len + mb_first->m_len; /* * Fill in the encapsulating IP and PIM header */ ip_outer = mtod(mb_first, struct ip *); *ip_outer = pim_encap_iphdr; ip_outer->ip_len = htons(len + sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr)); ip_outer->ip_src = V_viftable[vifi].v_lcl_addr; ip_outer->ip_dst = rt->mfc_rp; /* * Copy the inner header TOS to the outer header, and take care of the * IP_DF bit. */ ip_outer->ip_tos = ip->ip_tos; if (ip->ip_off & htons(IP_DF)) ip_outer->ip_off |= htons(IP_DF); - ip_fillid(ip_outer); + ip_fillid(ip_outer, V_ip_random_id); pimhdr = (struct pim_encap_pimhdr *)((caddr_t)ip_outer + sizeof(pim_encap_iphdr)); *pimhdr = pim_encap_pimhdr; /* If the iif crosses a border, set the Border-bit */ if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & V_mrt_api_config) pimhdr->flags |= htonl(PIM_BORDER_REGISTER); mb_first->m_data += sizeof(pim_encap_iphdr); pimhdr->pim.pim_cksum = in_cksum(mb_first, sizeof(pim_encap_pimhdr)); mb_first->m_data -= sizeof(pim_encap_iphdr); send_packet(vifp, mb_first); /* Keep statistics */ PIMSTAT_INC(pims_snd_registers_msgs); PIMSTAT_ADD(pims_snd_registers_bytes, len); return 0; } /* * pim_encapcheck() is called by the encap4_input() path at runtime to * determine if a packet is for PIM; allowing PIM to be dynamically loaded * into the kernel. */ static int pim_encapcheck(const struct mbuf *m __unused, int off __unused, int proto __unused, void *arg __unused) { KASSERT(proto == IPPROTO_PIM, ("not for IPPROTO_PIM")); return (8); /* claim the datagram. */ } /* * PIM-SMv2 and PIM-DM messages processing. * Receives and verifies the PIM control messages, and passes them * up to the listening socket, using rip_input(). * The only message with special processing is the PIM_REGISTER message * (used by PIM-SM): the PIM header is stripped off, and the inner packet * is passed to if_simloop(). */ static int pim_input(struct mbuf *m, int off, int proto, void *arg __unused) { struct ip *ip = mtod(m, struct ip *); struct pim *pim; int iphlen = off; int minlen; int datalen = ntohs(ip->ip_len) - iphlen; int ip_tos; /* Keep statistics */ PIMSTAT_INC(pims_rcv_total_msgs); PIMSTAT_ADD(pims_rcv_total_bytes, datalen); /* * Validate lengths */ if (datalen < PIM_MINLEN) { PIMSTAT_INC(pims_rcv_tooshort); CTR3(KTR_IPMF, "%s: short packet (%d) from 0x%08x", __func__, datalen, ntohl(ip->ip_src.s_addr)); m_freem(m); return (IPPROTO_DONE); } /* * If the packet is at least as big as a REGISTER, go agead * and grab the PIM REGISTER header size, to avoid another * possible m_pullup() later. * * PIM_MINLEN == pimhdr + u_int32_t == 4 + 4 = 8 * PIM_REG_MINLEN == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28 */ minlen = iphlen + (datalen >= PIM_REG_MINLEN ? PIM_REG_MINLEN : PIM_MINLEN); /* * Get the IP and PIM headers in contiguous memory, and * possibly the PIM REGISTER header. */ if (m->m_len < minlen && (m = m_pullup(m, minlen)) == NULL) { CTR1(KTR_IPMF, "%s: m_pullup() failed", __func__); return (IPPROTO_DONE); } /* m_pullup() may have given us a new mbuf so reset ip. */ ip = mtod(m, struct ip *); ip_tos = ip->ip_tos; /* adjust mbuf to point to the PIM header */ m->m_data += iphlen; m->m_len -= iphlen; pim = mtod(m, struct pim *); /* * Validate checksum. If PIM REGISTER, exclude the data packet. * * XXX: some older PIMv2 implementations don't make this distinction, * so for compatibility reason perform the checksum over part of the * message, and if error, then over the whole message. */ if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER && in_cksum(m, PIM_MINLEN) == 0) { /* do nothing, checksum okay */ } else if (in_cksum(m, datalen)) { PIMSTAT_INC(pims_rcv_badsum); CTR1(KTR_IPMF, "%s: invalid checksum", __func__); m_freem(m); return (IPPROTO_DONE); } /* PIM version check */ if (PIM_VT_V(pim->pim_vt) < PIM_VERSION) { PIMSTAT_INC(pims_rcv_badversion); CTR3(KTR_IPMF, "%s: bad version %d expect %d", __func__, (int)PIM_VT_V(pim->pim_vt), PIM_VERSION); m_freem(m); return (IPPROTO_DONE); } /* restore mbuf back to the outer IP */ m->m_data -= iphlen; m->m_len += iphlen; if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) { /* * Since this is a REGISTER, we'll make a copy of the register * headers ip + pim + u_int32 + encap_ip, to be passed up to the * routing daemon. */ struct sockaddr_in dst = { sizeof(dst), AF_INET }; struct mbuf *mcp; struct ip *encap_ip; u_int32_t *reghdr; struct ifnet *vifp; MRW_RLOCK(); if ((V_reg_vif_num >= V_numvifs) || (V_reg_vif_num == VIFI_INVALID)) { MRW_RUNLOCK(); CTR2(KTR_IPMF, "%s: register vif not set: %d", __func__, (int)V_reg_vif_num); m_freem(m); return (IPPROTO_DONE); } /* XXX need refcnt? */ vifp = V_viftable[V_reg_vif_num].v_ifp; MRW_RUNLOCK(); /* * Validate length */ if (datalen < PIM_REG_MINLEN) { PIMSTAT_INC(pims_rcv_tooshort); PIMSTAT_INC(pims_rcv_badregisters); CTR1(KTR_IPMF, "%s: register packet size too small", __func__); m_freem(m); return (IPPROTO_DONE); } reghdr = (u_int32_t *)(pim + 1); encap_ip = (struct ip *)(reghdr + 1); CTR3(KTR_IPMF, "%s: register: encap ip src 0x%08x len %d", __func__, ntohl(encap_ip->ip_src.s_addr), ntohs(encap_ip->ip_len)); /* verify the version number of the inner packet */ if (encap_ip->ip_v != IPVERSION) { PIMSTAT_INC(pims_rcv_badregisters); CTR1(KTR_IPMF, "%s: bad encap ip version", __func__); m_freem(m); return (IPPROTO_DONE); } /* verify the inner packet is destined to a mcast group */ if (!IN_MULTICAST(ntohl(encap_ip->ip_dst.s_addr))) { PIMSTAT_INC(pims_rcv_badregisters); CTR2(KTR_IPMF, "%s: bad encap ip dest 0x%08x", __func__, ntohl(encap_ip->ip_dst.s_addr)); m_freem(m); return (IPPROTO_DONE); } /* If a NULL_REGISTER, pass it to the daemon */ if ((ntohl(*reghdr) & PIM_NULL_REGISTER)) goto pim_input_to_daemon; /* * Copy the TOS from the outer IP header to the inner IP header. */ if (encap_ip->ip_tos != ip_tos) { /* Outer TOS -> inner TOS */ encap_ip->ip_tos = ip_tos; /* Recompute the inner header checksum. Sigh... */ /* adjust mbuf to point to the inner IP header */ m->m_data += (iphlen + PIM_MINLEN); m->m_len -= (iphlen + PIM_MINLEN); encap_ip->ip_sum = 0; encap_ip->ip_sum = in_cksum(m, encap_ip->ip_hl << 2); /* restore mbuf to point back to the outer IP header */ m->m_data -= (iphlen + PIM_MINLEN); m->m_len += (iphlen + PIM_MINLEN); } /* * Decapsulate the inner IP packet and loopback to forward it * as a normal multicast packet. Also, make a copy of the * outer_iphdr + pimhdr + reghdr + encap_iphdr * to pass to the daemon later, so it can take the appropriate * actions (e.g., send back PIM_REGISTER_STOP). * XXX: here m->m_data points to the outer IP header. */ mcp = m_copym(m, 0, iphlen + PIM_REG_MINLEN, M_NOWAIT); if (mcp == NULL) { CTR1(KTR_IPMF, "%s: m_copym() failed", __func__); m_freem(m); return (IPPROTO_DONE); } /* Keep statistics */ /* XXX: registers_bytes include only the encap. mcast pkt */ PIMSTAT_INC(pims_rcv_registers_msgs); PIMSTAT_ADD(pims_rcv_registers_bytes, ntohs(encap_ip->ip_len)); /* * forward the inner ip packet; point m_data at the inner ip. */ m_adj(m, iphlen + PIM_MINLEN); CTR4(KTR_IPMF, "%s: forward decap'd REGISTER: src %lx dst %lx vif %d", __func__, (u_long)ntohl(encap_ip->ip_src.s_addr), (u_long)ntohl(encap_ip->ip_dst.s_addr), (int)V_reg_vif_num); /* NB: vifp was collected above; can it change on us? */ if_simloop(vifp, m, dst.sin_family, 0); /* prepare the register head to send to the mrouting daemon */ m = mcp; } pim_input_to_daemon: /* * Pass the PIM message up to the daemon; if it is a Register message, * pass the 'head' only up to the daemon. This includes the * outer IP header, PIM header, PIM-Register header and the * inner IP header. * XXX: the outer IP header pkt size of a Register is not adjust to * reflect the fact that the inner multicast data is truncated. */ return (rip_input(&m, &off, proto)); } static int sysctl_mfctable(SYSCTL_HANDLER_ARGS) { struct mfc *rt; int error, i; if (req->newptr) return (EPERM); if (V_mfchashtbl == NULL) /* XXX unlocked */ return (0); error = sysctl_wire_old_buffer(req, 0); if (error) return (error); MRW_RLOCK(); if (V_mfchashtbl == NULL) goto out_locked; for (i = 0; i < mfchashsize; i++) { LIST_FOREACH(rt, &V_mfchashtbl[i], mfc_hash) { error = SYSCTL_OUT(req, rt, sizeof(struct mfc)); if (error) goto out_locked; } } out_locked: MRW_RUNLOCK(); return (error); } static SYSCTL_NODE(_net_inet_ip, OID_AUTO, mfctable, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mfctable, "IPv4 Multicast Forwarding Table " "(struct *mfc[mfchashsize], netinet/ip_mroute.h)"); static int sysctl_viflist(SYSCTL_HANDLER_ARGS) { int error, i; if (req->newptr) return (EPERM); if (V_viftable == NULL) /* XXX unlocked */ return (0); error = sysctl_wire_old_buffer(req, MROUTE_VIF_SYSCTL_LEN * MAXVIFS); if (error) return (error); MRW_RLOCK(); /* Copy out user-visible portion of vif entry. */ for (i = 0; i < MAXVIFS; i++) { error = SYSCTL_OUT(req, &V_viftable[i], MROUTE_VIF_SYSCTL_LEN); if (error) break; } MRW_RUNLOCK(); return (error); } SYSCTL_PROC(_net_inet_ip, OID_AUTO, viftable, CTLTYPE_OPAQUE | CTLFLAG_VNET | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_viflist, "S,vif[MAXVIFS]", "IPv4 Multicast Interfaces (struct vif[MAXVIFS], netinet/ip_mroute.h)"); static void vnet_mroute_init(const void *unused __unused) { V_nexpire = malloc(mfchashsize, M_MRTABLE, M_WAITOK|M_ZERO); V_viftable = mallocarray(MAXVIFS, sizeof(*V_viftable), M_MRTABLE, M_WAITOK|M_ZERO); callout_init_rw(&V_expire_upcalls_ch, &mrouter_lock, 0); callout_init_rw(&V_bw_upcalls_ch, &mrouter_lock, 0); /* Prepare taskqueue */ V_task_queue = taskqueue_create_fast("ip_mroute_tskq", M_NOWAIT, taskqueue_thread_enqueue, &V_task_queue); taskqueue_start_threads(&V_task_queue, 1, PI_NET, "ip_mroute_tskq task"); } VNET_SYSINIT(vnet_mroute_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mroute_init, NULL); static void vnet_mroute_uninit(const void *unused __unused) { /* Taskqueue should be cancelled and drained before freeing */ taskqueue_free(V_task_queue); free(V_viftable, M_MRTABLE); free(V_nexpire, M_MRTABLE); V_nexpire = NULL; } VNET_SYSUNINIT(vnet_mroute_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, vnet_mroute_uninit, NULL); static int ip_mroute_modevent(module_t mod, int type, void *unused) { switch (type) { case MOD_LOAD: MRW_TEARDOWN_LOCK_INIT(); MRW_LOCK_INIT(); if_detach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, if_detached_event, NULL, EVENTHANDLER_PRI_ANY); if (if_detach_event_tag == NULL) { printf("ip_mroute: unable to register " "ifnet_departure_event handler\n"); MRW_LOCK_DESTROY(); return (EINVAL); } if (!powerof2(mfchashsize)) { printf("WARNING: %s not a power of 2; using default\n", "net.inet.ip.mfchashsize"); mfchashsize = MFCHASHSIZE; } pim_encap_cookie = ip_encap_attach(&ipv4_encap_cfg, NULL, M_WAITOK); ip_mcast_src = X_ip_mcast_src; ip_mforward = X_ip_mforward; ip_mrouter_done = X_ip_mrouter_done; ip_mrouter_get = X_ip_mrouter_get; ip_mrouter_set = X_ip_mrouter_set; ip_rsvp_force_done = X_ip_rsvp_force_done; ip_rsvp_vif = X_ip_rsvp_vif; legal_vif_num = X_legal_vif_num; mrt_ioctl = X_mrt_ioctl; rsvp_input_p = X_rsvp_input; break; case MOD_UNLOAD: /* * Typically module unload happens after the user-level * process has shutdown the kernel services (the check * below insures someone can't just yank the module out * from under a running process). But if the module is * just loaded and then unloaded w/o starting up a user * process we still need to cleanup. */ MRW_WLOCK(); if (ip_mrouter_cnt != 0) { MRW_WUNLOCK(); return (EINVAL); } ip_mrouter_unloading = 1; MRW_WUNLOCK(); EVENTHANDLER_DEREGISTER(ifnet_departure_event, if_detach_event_tag); if (pim_encap_cookie) { ip_encap_detach(pim_encap_cookie); pim_encap_cookie = NULL; } ip_mcast_src = NULL; ip_mforward = NULL; ip_mrouter_done = NULL; ip_mrouter_get = NULL; ip_mrouter_set = NULL; ip_rsvp_force_done = NULL; ip_rsvp_vif = NULL; legal_vif_num = NULL; mrt_ioctl = NULL; rsvp_input_p = NULL; MRW_LOCK_DESTROY(); MRW_TEARDOWN_LOCK_DESTROY(); break; default: return EOPNOTSUPP; } return 0; } static moduledata_t ip_mroutemod = { "ip_mroute", ip_mroute_modevent, 0 }; DECLARE_MODULE(ip_mroute, ip_mroutemod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE); diff --git a/sys/netinet/ip_output.c b/sys/netinet/ip_output.c index 35aaf85d6a4e..9d72300e8b68 100644 --- a/sys/netinet/ip_output.c +++ b/sys/netinet/ip_output.c @@ -1,1581 +1,1581 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1988, 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_inet.h" #include "opt_ipsec.h" #include "opt_kern_tls.h" #include "opt_mbuf_stress_test.h" #include "opt_ratelimit.h" #include "opt_route.h" #include "opt_rss.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SCTP) || defined(SCTP_SUPPORT) #include #include #endif #include #include #include #ifdef MBUF_STRESS_TEST static int mbuf_frag_size = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, mbuf_frag_size, CTLFLAG_RW, &mbuf_frag_size, 0, "Fragment outgoing mbufs to this size"); #endif static void ip_mloopback(struct ifnet *, const struct mbuf *, int); extern int in_mcast_loop; static inline int ip_output_pfil(struct mbuf **mp, struct ifnet *ifp, int flags, struct inpcb *inp, struct sockaddr_in *dst, int *fibnum, int *error) { struct m_tag *fwd_tag = NULL; struct mbuf *m; struct in_addr odst; struct ip *ip; int ret; m = *mp; ip = mtod(m, struct ip *); /* Run through list of hooks for output packets. */ odst.s_addr = ip->ip_dst.s_addr; if (flags & IP_FORWARDING) ret = pfil_mbuf_fwd(V_inet_pfil_head, mp, ifp, inp); else ret = pfil_mbuf_out(V_inet_pfil_head, mp, ifp, inp); switch (ret) { case PFIL_DROPPED: *error = EACCES; /* FALLTHROUGH */ case PFIL_CONSUMED: return 1; /* Finished */ case PFIL_PASS: *error = 0; } m = *mp; ip = mtod(m, struct ip *); /* See if destination IP address was changed by packet filter. */ if (odst.s_addr != ip->ip_dst.s_addr) { m->m_flags |= M_SKIP_FIREWALL; /* If destination is now ourself drop to ip_input(). */ if (in_localip(ip->ip_dst)) { m->m_flags |= M_FASTFWD_OURS; if (m->m_pkthdr.rcvif == NULL) m->m_pkthdr.rcvif = V_loif; if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; #if defined(SCTP) || defined(SCTP_SUPPORT) if (m->m_pkthdr.csum_flags & CSUM_SCTP) m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; #endif *error = netisr_queue(NETISR_IP, m); return 1; /* Finished */ } bzero(dst, sizeof(*dst)); dst->sin_family = AF_INET; dst->sin_len = sizeof(*dst); dst->sin_addr = ip->ip_dst; return -1; /* Reloop */ } /* See if fib was changed by packet filter. */ if ((*fibnum) != M_GETFIB(m)) { m->m_flags |= M_SKIP_FIREWALL; *fibnum = M_GETFIB(m); return -1; /* Reloop for FIB change */ } /* See if local, if yes, send it to netisr with IP_FASTFWD_OURS. */ if (m->m_flags & M_FASTFWD_OURS) { if (m->m_pkthdr.rcvif == NULL) m->m_pkthdr.rcvif = V_loif; if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } #if defined(SCTP) || defined(SCTP_SUPPORT) if (m->m_pkthdr.csum_flags & CSUM_SCTP) m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; #endif m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; *error = netisr_queue(NETISR_IP, m); return 1; /* Finished */ } /* Or forward to some other address? */ if ((m->m_flags & M_IP_NEXTHOP) && ((fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL)) != NULL)) { bcopy((fwd_tag+1), dst, sizeof(struct sockaddr_in)); m->m_flags |= M_SKIP_FIREWALL; m->m_flags &= ~M_IP_NEXTHOP; m_tag_delete(m, fwd_tag); return -1; /* Reloop for CHANGE of dst */ } return 0; } static int ip_output_send(struct inpcb *inp, struct ifnet *ifp, struct mbuf *m, const struct sockaddr *gw, struct route *ro, bool stamp_tag) { #ifdef KERN_TLS struct ktls_session *tls = NULL; #endif struct m_snd_tag *mst; int error; MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); mst = NULL; #ifdef KERN_TLS /* * If this is an unencrypted TLS record, save a reference to * the record. This local reference is used to call * ktls_output_eagain after the mbuf has been freed (thus * dropping the mbuf's reference) in if_output. */ if (m->m_next != NULL && mbuf_has_tls_session(m->m_next)) { tls = ktls_hold(m->m_next->m_epg_tls); mst = tls->snd_tag; /* * If a TLS session doesn't have a valid tag, it must * have had an earlier ifp mismatch, so drop this * packet. */ if (mst == NULL) { m_freem(m); error = EAGAIN; goto done; } /* * Always stamp tags that include NIC ktls. */ stamp_tag = true; } #endif #ifdef RATELIMIT if (inp != NULL && mst == NULL) { if ((inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) != 0 || (inp->inp_snd_tag != NULL && inp->inp_snd_tag->ifp != ifp)) in_pcboutput_txrtlmt(inp, ifp, m); if (inp->inp_snd_tag != NULL) mst = inp->inp_snd_tag; } #endif if (stamp_tag && mst != NULL) { KASSERT(m->m_pkthdr.rcvif == NULL, ("trying to add a send tag to a forwarded packet")); if (mst->ifp != ifp) { m_freem(m); error = EAGAIN; goto done; } /* stamp send tag on mbuf */ m->m_pkthdr.snd_tag = m_snd_tag_ref(mst); m->m_pkthdr.csum_flags |= CSUM_SND_TAG; } error = (*ifp->if_output)(ifp, m, gw, ro); done: /* Check for route change invalidating send tags. */ #ifdef KERN_TLS if (tls != NULL) { if (error == EAGAIN) error = ktls_output_eagain(inp, tls); ktls_free(tls); } #endif #ifdef RATELIMIT if (error == EAGAIN) in_pcboutput_eagain(inp); #endif return (error); } /* rte<>ro_flags translation */ static inline void rt_update_ro_flags(struct route *ro, const struct nhop_object *nh) { int nh_flags = nh->nh_flags; ro->ro_flags &= ~ (RT_REJECT|RT_BLACKHOLE|RT_HAS_GW); ro->ro_flags |= (nh_flags & NHF_REJECT) ? RT_REJECT : 0; ro->ro_flags |= (nh_flags & NHF_BLACKHOLE) ? RT_BLACKHOLE : 0; ro->ro_flags |= (nh_flags & NHF_GATEWAY) ? RT_HAS_GW : 0; } /* * IP output. The packet in mbuf chain m contains a skeletal IP * header (with len, off, ttl, proto, tos, src, dst). * The mbuf chain containing the packet will be freed. * The mbuf opt, if present, will not be freed. * If route ro is present and has ro_rt initialized, route lookup would be * skipped and ro->ro_rt would be used. If ro is present but ro->ro_rt is NULL, * then result of route lookup is stored in ro->ro_rt. * * In the IP forwarding case, the packet will arrive with options already * inserted, so must have a NULL opt pointer. */ int ip_output(struct mbuf *m, struct mbuf *opt, struct route *ro, int flags, struct ip_moptions *imo, struct inpcb *inp) { struct ip *ip; struct ifnet *ifp = NULL; /* keep compiler happy */ struct mbuf *m0; int hlen = sizeof (struct ip); int mtu = 0; int error = 0; int vlan_pcp = -1; struct sockaddr_in *dst; const struct sockaddr *gw; struct in_ifaddr *ia = NULL; struct in_addr src; bool isbroadcast; uint16_t ip_len, ip_off; struct route iproute; uint32_t fibnum; #if defined(IPSEC) || defined(IPSEC_SUPPORT) int no_route_but_check_spd = 0; #endif M_ASSERTPKTHDR(m); NET_EPOCH_ASSERT(); if (inp != NULL) { INP_LOCK_ASSERT(inp); M_SETFIB(m, inp->inp_inc.inc_fibnum); if ((flags & IP_NODEFAULTFLOWID) == 0) { m->m_pkthdr.flowid = inp->inp_flowid; M_HASHTYPE_SET(m, inp->inp_flowtype); } if ((inp->inp_flags2 & INP_2PCP_SET) != 0) vlan_pcp = (inp->inp_flags2 & INP_2PCP_MASK) >> INP_2PCP_SHIFT; #ifdef NUMA m->m_pkthdr.numa_domain = inp->inp_numa_domain; #endif } if (opt) { int len = 0; m = ip_insertoptions(m, opt, &len); if (len != 0) hlen = len; /* ip->ip_hl is updated above */ } ip = mtod(m, struct ip *); ip_len = ntohs(ip->ip_len); ip_off = ntohs(ip->ip_off); if ((flags & (IP_FORWARDING|IP_RAWOUTPUT)) == 0) { ip->ip_v = IPVERSION; ip->ip_hl = hlen >> 2; - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); } else { /* Header already set, fetch hlen from there */ hlen = ip->ip_hl << 2; } if ((flags & IP_FORWARDING) == 0) IPSTAT_INC(ips_localout); /* * dst/gw handling: * * gw is readonly but can point either to dst OR rt_gateway, * therefore we need restore gw if we're redoing lookup. */ fibnum = (inp != NULL) ? inp->inp_inc.inc_fibnum : M_GETFIB(m); if (ro == NULL) { ro = &iproute; bzero(ro, sizeof (*ro)); } dst = (struct sockaddr_in *)&ro->ro_dst; if (ro->ro_nh == NULL) { dst->sin_family = AF_INET; dst->sin_len = sizeof(*dst); dst->sin_addr = ip->ip_dst; } gw = (const struct sockaddr *)dst; again: /* * Validate route against routing table additions; * a better/more specific route might have been added. */ if (inp != NULL && ro->ro_nh != NULL) NH_VALIDATE(ro, &inp->inp_rt_cookie, fibnum); /* * If there is a cached route, * check that it is to the same destination * and is still up. If not, free it and try again. * The address family should also be checked in case of sharing the * cache with IPv6. * Also check whether routing cache needs invalidation. */ if (ro->ro_nh != NULL && ((!NH_IS_VALID(ro->ro_nh)) || dst->sin_family != AF_INET || dst->sin_addr.s_addr != ip->ip_dst.s_addr)) RO_INVALIDATE_CACHE(ro); ia = NULL; /* * If routing to interface only, short circuit routing lookup. * The use of an all-ones broadcast address implies this; an * interface is specified by the broadcast address of an interface, * or the destination address of a ptp interface. */ if (flags & IP_SENDONES) { if ((ia = ifatoia(ifa_ifwithbroadaddr(sintosa(dst), M_GETFIB(m)))) == NULL && (ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst), M_GETFIB(m)))) == NULL) { IPSTAT_INC(ips_noroute); error = ENETUNREACH; goto bad; } ip->ip_dst.s_addr = INADDR_BROADCAST; dst->sin_addr = ip->ip_dst; ifp = ia->ia_ifp; mtu = ifp->if_mtu; ip->ip_ttl = 1; isbroadcast = true; src = IA_SIN(ia)->sin_addr; } else if (flags & IP_ROUTETOIF) { if ((ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst), M_GETFIB(m)))) == NULL && (ia = ifatoia(ifa_ifwithnet(sintosa(dst), 0, M_GETFIB(m)))) == NULL) { IPSTAT_INC(ips_noroute); error = ENETUNREACH; goto bad; } ifp = ia->ia_ifp; mtu = ifp->if_mtu; ip->ip_ttl = 1; isbroadcast = ifp->if_flags & IFF_BROADCAST ? (in_broadcast(ip->ip_dst) || in_ifaddr_broadcast(dst->sin_addr, ia)) : 0; src = IA_SIN(ia)->sin_addr; } else if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) && imo != NULL && imo->imo_multicast_ifp != NULL) { /* * Bypass the normal routing lookup for multicast * packets if the interface is specified. */ ifp = imo->imo_multicast_ifp; mtu = ifp->if_mtu; IFP_TO_IA(ifp, ia); isbroadcast = false; /* Interface may have no addresses. */ if (ia != NULL) src = IA_SIN(ia)->sin_addr; else src.s_addr = INADDR_ANY; } else if (ro != &iproute) { if (ro->ro_nh == NULL) { /* * We want to do any cloning requested by the link * layer, as this is probably required in all cases * for correct operation (as it is for ARP). */ uint32_t flowid; flowid = m->m_pkthdr.flowid; ro->ro_nh = fib4_lookup(fibnum, dst->sin_addr, 0, NHR_REF, flowid); if (ro->ro_nh == NULL || (!NH_IS_VALID(ro->ro_nh))) { #if defined(IPSEC) || defined(IPSEC_SUPPORT) /* * There is no route for this packet, but it is * possible that a matching SPD entry exists. */ no_route_but_check_spd = 1; goto sendit; #endif IPSTAT_INC(ips_noroute); error = EHOSTUNREACH; goto bad; } } struct nhop_object *nh = ro->ro_nh; ia = ifatoia(nh->nh_ifa); ifp = nh->nh_ifp; counter_u64_add(nh->nh_pksent, 1); rt_update_ro_flags(ro, nh); if (nh->nh_flags & NHF_GATEWAY) gw = &nh->gw_sa; if (nh->nh_flags & NHF_HOST) isbroadcast = (nh->nh_flags & NHF_BROADCAST); else if ((ifp->if_flags & IFF_BROADCAST) && (gw->sa_family == AF_INET)) isbroadcast = in_broadcast(ip->ip_dst) || in_ifaddr_broadcast( ((const struct sockaddr_in *)gw)->sin_addr, ia); else isbroadcast = false; mtu = nh->nh_mtu; src = IA_SIN(ia)->sin_addr; } else { struct nhop_object *nh; nh = fib4_lookup(M_GETFIB(m), dst->sin_addr, 0, NHR_NONE, m->m_pkthdr.flowid); if (nh == NULL) { #if defined(IPSEC) || defined(IPSEC_SUPPORT) /* * There is no route for this packet, but it is * possible that a matching SPD entry exists. */ no_route_but_check_spd = 1; goto sendit; #endif IPSTAT_INC(ips_noroute); error = EHOSTUNREACH; goto bad; } ifp = nh->nh_ifp; mtu = nh->nh_mtu; rt_update_ro_flags(ro, nh); if (nh->nh_flags & NHF_GATEWAY) gw = &nh->gw_sa; ia = ifatoia(nh->nh_ifa); src = IA_SIN(ia)->sin_addr; isbroadcast = ((nh->nh_flags & (NHF_HOST | NHF_BROADCAST)) == (NHF_HOST | NHF_BROADCAST)) || ((ifp->if_flags & IFF_BROADCAST) && (gw->sa_family == AF_INET) && (in_broadcast(ip->ip_dst) || in_ifaddr_broadcast( ((const struct sockaddr_in *)gw)->sin_addr, ia))); } /* Catch a possible divide by zero later. */ KASSERT(mtu > 0, ("%s: mtu %d <= 0, ro=%p (nh_flags=0x%08x) ifp=%p", __func__, mtu, ro, (ro != NULL && ro->ro_nh != NULL) ? ro->ro_nh->nh_flags : 0, ifp)); if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { m->m_flags |= M_MCAST; /* * IP destination address is multicast. Make sure "gw" * still points to the address in "ro". (It may have been * changed to point to a gateway address, above.) */ gw = (const struct sockaddr *)dst; /* * See if the caller provided any multicast options */ if (imo != NULL) { ip->ip_ttl = imo->imo_multicast_ttl; if (imo->imo_multicast_vif != -1) ip->ip_src.s_addr = ip_mcast_src ? ip_mcast_src(imo->imo_multicast_vif) : INADDR_ANY; } else ip->ip_ttl = IP_DEFAULT_MULTICAST_TTL; /* * Confirm that the outgoing interface supports multicast. */ if ((imo == NULL) || (imo->imo_multicast_vif == -1)) { if ((ifp->if_flags & IFF_MULTICAST) == 0) { IPSTAT_INC(ips_noroute); error = ENETUNREACH; goto bad; } } /* * If source address not specified yet, use address * of outgoing interface. */ if (ip->ip_src.s_addr == INADDR_ANY) ip->ip_src = src; if ((imo == NULL && in_mcast_loop) || (imo && imo->imo_multicast_loop)) { /* * Loop back multicast datagram if not expressly * forbidden to do so, even if we are not a member * of the group; ip_input() will filter it later, * thus deferring a hash lookup and mutex acquisition * at the expense of a cheap copy using m_copym(). */ ip_mloopback(ifp, m, hlen); } else { /* * If we are acting as a multicast router, perform * multicast forwarding as if the packet had just * arrived on the interface to which we are about * to send. The multicast forwarding function * recursively calls this function, using the * IP_FORWARDING flag to prevent infinite recursion. * * Multicasts that are looped back by ip_mloopback(), * above, will be forwarded by the ip_input() routine, * if necessary. */ if (V_ip_mrouter && (flags & IP_FORWARDING) == 0) { /* * If rsvp daemon is not running, do not * set ip_moptions. This ensures that the packet * is multicast and not just sent down one link * as prescribed by rsvpd. */ if (!V_rsvp_on) imo = NULL; if (ip_mforward && ip_mforward(ip, ifp, m, imo) != 0) { m_freem(m); goto done; } } } /* * Multicasts with a time-to-live of zero may be looped- * back, above, but must not be transmitted on a network. * Also, multicasts addressed to the loopback interface * are not sent -- the above call to ip_mloopback() will * loop back a copy. ip_input() will drop the copy if * this host does not belong to the destination group on * the loopback interface. */ if (ip->ip_ttl == 0 || ifp->if_flags & IFF_LOOPBACK) { m_freem(m); goto done; } goto sendit; } /* * If the source address is not specified yet, use the address * of the outoing interface. */ if (ip->ip_src.s_addr == INADDR_ANY) ip->ip_src = src; /* * Look for broadcast address and * verify user is allowed to send * such a packet. */ if (isbroadcast) { if ((ifp->if_flags & IFF_BROADCAST) == 0) { error = EADDRNOTAVAIL; goto bad; } if ((flags & IP_ALLOWBROADCAST) == 0) { error = EACCES; goto bad; } /* don't allow broadcast messages to be fragmented */ if (ip_len > mtu) { error = EMSGSIZE; goto bad; } m->m_flags |= M_BCAST; } else { m->m_flags &= ~M_BCAST; } sendit: #if defined(IPSEC) || defined(IPSEC_SUPPORT) if (IPSEC_ENABLED(ipv4)) { struct ip ip_hdr; if ((error = IPSEC_OUTPUT(ipv4, ifp, m, inp, mtu)) != 0) { if (error == EINPROGRESS) error = 0; goto done; } /* Update variables that are affected by ipsec4_output(). */ m_copydata(m, 0, sizeof(ip_hdr), (char *)&ip_hdr); hlen = ip_hdr.ip_hl << 2; } /* * Check if there was a route for this packet; return error if not. */ if (no_route_but_check_spd) { IPSTAT_INC(ips_noroute); error = EHOSTUNREACH; goto bad; } #endif /* IPSEC */ /* Jump over all PFIL processing if hooks are not active. */ if (PFIL_HOOKED_OUT(V_inet_pfil_head)) { switch (ip_output_pfil(&m, ifp, flags, inp, dst, &fibnum, &error)) { case 1: /* Finished */ goto done; case 0: /* Continue normally */ ip = mtod(m, struct ip *); ip_len = ntohs(ip->ip_len); break; case -1: /* Need to try again */ /* Reset everything for a new round */ if (ro != NULL) { RO_NHFREE(ro); ro->ro_prepend = NULL; } gw = (const struct sockaddr *)dst; ip = mtod(m, struct ip *); goto again; } } if (vlan_pcp > -1) EVL_APPLY_PRI(m, vlan_pcp); /* IN_LOOPBACK must not appear on the wire - RFC1122. */ if (IN_LOOPBACK(ntohl(ip->ip_dst.s_addr)) || IN_LOOPBACK(ntohl(ip->ip_src.s_addr))) { if ((ifp->if_flags & IFF_LOOPBACK) == 0) { IPSTAT_INC(ips_badaddr); error = EADDRNOTAVAIL; goto bad; } } /* Ensure the packet data is mapped if the interface requires it. */ if ((ifp->if_capenable & IFCAP_MEXTPG) == 0) { struct mbuf *m1; error = mb_unmapped_to_ext(m, &m1); if (error != 0) { if (error == EINVAL) { if_printf(ifp, "TLS packet\n"); /* XXXKIB */ } else if (error == ENOMEM) { error = ENOBUFS; } IPSTAT_INC(ips_odropped); goto bad; } else { m = m1; } } m->m_pkthdr.csum_flags |= CSUM_IP; if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) { in_delayed_cksum(m); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } #if defined(SCTP) || defined(SCTP_SUPPORT) if (m->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) { sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2)); m->m_pkthdr.csum_flags &= ~CSUM_SCTP; } #endif /* * If small enough for interface, or the interface will take * care of the fragmentation for us, we can just send directly. * Note that if_vxlan could have requested TSO even though the outer * frame is UDP. It is correct to not fragment such datagrams and * instead just pass them on to the driver. */ if (ip_len <= mtu || (m->m_pkthdr.csum_flags & ifp->if_hwassist & (CSUM_TSO | CSUM_INNER_TSO)) != 0) { ip->ip_sum = 0; if (m->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) { ip->ip_sum = in_cksum(m, hlen); m->m_pkthdr.csum_flags &= ~CSUM_IP; } /* * Record statistics for this interface address. * With CSUM_TSO the byte/packet count will be slightly * incorrect because we count the IP+TCP headers only * once instead of for every generated packet. */ if (!(flags & IP_FORWARDING) && ia) { if (m->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_INNER_TSO)) counter_u64_add(ia->ia_ifa.ifa_opackets, m->m_pkthdr.len / m->m_pkthdr.tso_segsz); else counter_u64_add(ia->ia_ifa.ifa_opackets, 1); counter_u64_add(ia->ia_ifa.ifa_obytes, m->m_pkthdr.len); } #ifdef MBUF_STRESS_TEST if (mbuf_frag_size && m->m_pkthdr.len > mbuf_frag_size) m = m_fragment(m, M_NOWAIT, mbuf_frag_size); #endif /* * Reset layer specific mbuf flags * to avoid confusing lower layers. */ m_clrprotoflags(m); IP_PROBE(send, NULL, NULL, ip, ifp, ip, NULL); error = ip_output_send(inp, ifp, m, gw, ro, (flags & IP_NO_SND_TAG_RL) ? false : true); goto done; } /* Balk when DF bit is set or the interface didn't support TSO. */ if ((ip_off & IP_DF) || (m->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_INNER_TSO))) { error = EMSGSIZE; IPSTAT_INC(ips_cantfrag); goto bad; } /* * Too large for interface; fragment if possible. If successful, * on return, m will point to a list of packets to be sent. */ error = ip_fragment(ip, &m, mtu, ifp->if_hwassist); if (error) goto bad; for (; m; m = m0) { m0 = m->m_nextpkt; m->m_nextpkt = 0; if (error == 0) { /* Record statistics for this interface address. */ if (ia != NULL) { counter_u64_add(ia->ia_ifa.ifa_opackets, 1); counter_u64_add(ia->ia_ifa.ifa_obytes, m->m_pkthdr.len); } /* * Reset layer specific mbuf flags * to avoid confusing upper layers. */ m_clrprotoflags(m); IP_PROBE(send, NULL, NULL, mtod(m, struct ip *), ifp, mtod(m, struct ip *), NULL); error = ip_output_send(inp, ifp, m, gw, ro, true); } else m_freem(m); } if (error == 0) IPSTAT_INC(ips_fragmented); done: return (error); bad: m_freem(m); goto done; } /* * Create a chain of fragments which fit the given mtu. m_frag points to the * mbuf to be fragmented; on return it points to the chain with the fragments. * Return 0 if no error. If error, m_frag may contain a partially built * chain of fragments that should be freed by the caller. * * if_hwassist_flags is the hw offload capabilities (see if_data.ifi_hwassist) */ int ip_fragment(struct ip *ip, struct mbuf **m_frag, int mtu, u_long if_hwassist_flags) { int error = 0; int hlen = ip->ip_hl << 2; int len = (mtu - hlen) & ~7; /* size of payload in each fragment */ int off; struct mbuf *m0 = *m_frag; /* the original packet */ int firstlen; struct mbuf **mnext; int nfrags; uint16_t ip_len, ip_off; ip_len = ntohs(ip->ip_len); ip_off = ntohs(ip->ip_off); /* * Packet shall not have "Don't Fragment" flag and have at least 8 * bytes of payload. */ if (__predict_false((ip_off & IP_DF) || len < 8)) { IPSTAT_INC(ips_cantfrag); return (EMSGSIZE); } /* * If the interface will not calculate checksums on * fragmented packets, then do it here. */ if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { in_delayed_cksum(m0); m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } #if defined(SCTP) || defined(SCTP_SUPPORT) if (m0->m_pkthdr.csum_flags & CSUM_SCTP) { sctp_delayed_cksum(m0, hlen); m0->m_pkthdr.csum_flags &= ~CSUM_SCTP; } #endif if (len > PAGE_SIZE) { /* * Fragment large datagrams such that each segment * contains a multiple of PAGE_SIZE amount of data, * plus headers. This enables a receiver to perform * page-flipping zero-copy optimizations. * * XXX When does this help given that sender and receiver * could have different page sizes, and also mtu could * be less than the receiver's page size ? */ int newlen; off = MIN(mtu, m0->m_pkthdr.len); /* * firstlen (off - hlen) must be aligned on an * 8-byte boundary */ if (off < hlen) goto smart_frag_failure; off = ((off - hlen) & ~7) + hlen; newlen = (~PAGE_MASK) & mtu; if ((newlen + sizeof (struct ip)) > mtu) { /* we failed, go back the default */ smart_frag_failure: newlen = len; off = hlen + len; } len = newlen; } else { off = hlen + len; } firstlen = off - hlen; mnext = &m0->m_nextpkt; /* pointer to next packet */ /* * Loop through length of segment after first fragment, * make new header and copy data of each part and link onto chain. * Here, m0 is the original packet, m is the fragment being created. * The fragments are linked off the m_nextpkt of the original * packet, which after processing serves as the first fragment. */ for (nfrags = 1; off < ip_len; off += len, nfrags++) { struct ip *mhip; /* ip header on the fragment */ struct mbuf *m; int mhlen = sizeof (struct ip); m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { error = ENOBUFS; IPSTAT_INC(ips_odropped); goto done; } /* * Make sure the complete packet header gets copied * from the originating mbuf to the newly created * mbuf. This also ensures that existing firewall * classification(s), VLAN tags and so on get copied * to the resulting fragmented packet(s): */ if (m_dup_pkthdr(m, m0, M_NOWAIT) == 0) { m_free(m); error = ENOBUFS; IPSTAT_INC(ips_odropped); goto done; } /* * In the first mbuf, leave room for the link header, then * copy the original IP header including options. The payload * goes into an additional mbuf chain returned by m_copym(). */ m->m_data += max_linkhdr; mhip = mtod(m, struct ip *); *mhip = *ip; if (hlen > sizeof (struct ip)) { mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip); mhip->ip_v = IPVERSION; mhip->ip_hl = mhlen >> 2; } m->m_len = mhlen; /* XXX do we need to add ip_off below ? */ mhip->ip_off = ((off - hlen) >> 3) + ip_off; if (off + len >= ip_len) len = ip_len - off; else mhip->ip_off |= IP_MF; mhip->ip_len = htons((u_short)(len + mhlen)); m->m_next = m_copym(m0, off, len, M_NOWAIT); if (m->m_next == NULL) { /* copy failed */ m_free(m); error = ENOBUFS; /* ??? */ IPSTAT_INC(ips_odropped); goto done; } m->m_pkthdr.len = mhlen + len; #ifdef MAC mac_netinet_fragment(m0, m); #endif mhip->ip_off = htons(mhip->ip_off); mhip->ip_sum = 0; if (m->m_pkthdr.csum_flags & CSUM_IP & ~if_hwassist_flags) { mhip->ip_sum = in_cksum(m, mhlen); m->m_pkthdr.csum_flags &= ~CSUM_IP; } *mnext = m; mnext = &m->m_nextpkt; } IPSTAT_ADD(ips_ofragments, nfrags); /* * Update first fragment by trimming what's been copied out * and updating header. */ m_adj(m0, hlen + firstlen - ip_len); m0->m_pkthdr.len = hlen + firstlen; ip->ip_len = htons((u_short)m0->m_pkthdr.len); ip->ip_off = htons(ip_off | IP_MF); ip->ip_sum = 0; if (m0->m_pkthdr.csum_flags & CSUM_IP & ~if_hwassist_flags) { ip->ip_sum = in_cksum(m0, hlen); m0->m_pkthdr.csum_flags &= ~CSUM_IP; } done: *m_frag = m0; return error; } void in_delayed_cksum(struct mbuf *m) { struct ip *ip; struct udphdr *uh; uint16_t cklen, csum, offset; ip = mtod(m, struct ip *); offset = ip->ip_hl << 2 ; if (m->m_pkthdr.csum_flags & CSUM_UDP) { /* if udp header is not in the first mbuf copy udplen */ if (offset + sizeof(struct udphdr) > m->m_len) { m_copydata(m, offset + offsetof(struct udphdr, uh_ulen), sizeof(cklen), (caddr_t)&cklen); cklen = ntohs(cklen); } else { uh = (struct udphdr *)mtodo(m, offset); cklen = ntohs(uh->uh_ulen); } csum = in_cksum_skip(m, cklen + offset, offset); if (csum == 0) csum = 0xffff; } else { cklen = ntohs(ip->ip_len); csum = in_cksum_skip(m, cklen, offset); } offset += m->m_pkthdr.csum_data; /* checksum offset */ if (offset + sizeof(csum) > m->m_len) m_copyback(m, offset, sizeof(csum), (caddr_t)&csum); else *(u_short *)mtodo(m, offset) = csum; } /* * IP socket option processing. */ int ip_ctloutput(struct socket *so, struct sockopt *sopt) { struct inpcb *inp = sotoinpcb(so); int error, optval; #ifdef RSS uint32_t rss_bucket; int retval; #endif error = optval = 0; if (sopt->sopt_level != IPPROTO_IP) { error = EINVAL; if (sopt->sopt_level == SOL_SOCKET && sopt->sopt_dir == SOPT_SET) { switch (sopt->sopt_name) { case SO_SETFIB: error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); if (error != 0) break; INP_WLOCK(inp); if ((inp->inp_flags & INP_BOUNDFIB) != 0 && optval != so->so_fibnum) { INP_WUNLOCK(inp); error = EISCONN; break; } error = sosetfib(inp->inp_socket, optval); if (error == 0) inp->inp_inc.inc_fibnum = optval; INP_WUNLOCK(inp); break; case SO_MAX_PACING_RATE: #ifdef RATELIMIT INP_WLOCK(inp); inp->inp_flags2 |= INP_RATE_LIMIT_CHANGED; INP_WUNLOCK(inp); error = 0; #else error = EOPNOTSUPP; #endif break; default: break; } } return (error); } switch (sopt->sopt_dir) { case SOPT_SET: switch (sopt->sopt_name) { case IP_OPTIONS: #ifdef notyet case IP_RETOPTS: #endif { struct mbuf *m; if (sopt->sopt_valsize > MLEN) { error = EMSGSIZE; break; } m = m_get(sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA); if (m == NULL) { error = ENOBUFS; break; } m->m_len = sopt->sopt_valsize; error = sooptcopyin(sopt, mtod(m, char *), m->m_len, m->m_len); if (error) { m_free(m); break; } INP_WLOCK(inp); error = ip_pcbopts(inp, sopt->sopt_name, m); INP_WUNLOCK(inp); return (error); } case IP_BINDANY: if (sopt->sopt_td != NULL) { error = priv_check(sopt->sopt_td, PRIV_NETINET_BINDANY); if (error) break; } /* FALLTHROUGH */ case IP_TOS: case IP_TTL: case IP_MINTTL: case IP_RECVOPTS: case IP_RECVRETOPTS: case IP_ORIGDSTADDR: case IP_RECVDSTADDR: case IP_RECVTTL: case IP_RECVIF: case IP_ONESBCAST: case IP_DONTFRAG: case IP_RECVTOS: case IP_RECVFLOWID: #ifdef RSS case IP_RECVRSSBUCKETID: #endif case IP_VLAN_PCP: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) break; switch (sopt->sopt_name) { case IP_TOS: inp->inp_ip_tos = optval; break; case IP_TTL: inp->inp_ip_ttl = optval; break; case IP_MINTTL: if (optval >= 0 && optval <= MAXTTL) inp->inp_ip_minttl = optval; else error = EINVAL; break; #define OPTSET(bit) do { \ INP_WLOCK(inp); \ if (optval) \ inp->inp_flags |= bit; \ else \ inp->inp_flags &= ~bit; \ INP_WUNLOCK(inp); \ } while (0) #define OPTSET2(bit, val) do { \ INP_WLOCK(inp); \ if (val) \ inp->inp_flags2 |= bit; \ else \ inp->inp_flags2 &= ~bit; \ INP_WUNLOCK(inp); \ } while (0) case IP_RECVOPTS: OPTSET(INP_RECVOPTS); break; case IP_RECVRETOPTS: OPTSET(INP_RECVRETOPTS); break; case IP_RECVDSTADDR: OPTSET(INP_RECVDSTADDR); break; case IP_ORIGDSTADDR: OPTSET2(INP_ORIGDSTADDR, optval); break; case IP_RECVTTL: OPTSET(INP_RECVTTL); break; case IP_RECVIF: OPTSET(INP_RECVIF); break; case IP_ONESBCAST: OPTSET(INP_ONESBCAST); break; case IP_DONTFRAG: OPTSET(INP_DONTFRAG); break; case IP_BINDANY: OPTSET(INP_BINDANY); break; case IP_RECVTOS: OPTSET(INP_RECVTOS); break; case IP_RECVFLOWID: OPTSET2(INP_RECVFLOWID, optval); break; #ifdef RSS case IP_RECVRSSBUCKETID: OPTSET2(INP_RECVRSSBUCKETID, optval); break; #endif case IP_VLAN_PCP: if ((optval >= -1) && (optval <= (INP_2PCP_MASK >> INP_2PCP_SHIFT))) { if (optval == -1) { INP_WLOCK(inp); inp->inp_flags2 &= ~(INP_2PCP_SET | INP_2PCP_MASK); INP_WUNLOCK(inp); } else { INP_WLOCK(inp); inp->inp_flags2 |= INP_2PCP_SET; inp->inp_flags2 &= ~INP_2PCP_MASK; inp->inp_flags2 |= optval << INP_2PCP_SHIFT; INP_WUNLOCK(inp); } } else error = EINVAL; break; } break; #undef OPTSET #undef OPTSET2 /* * Multicast socket options are processed by the in_mcast * module. */ case IP_MULTICAST_IF: case IP_MULTICAST_VIF: case IP_MULTICAST_TTL: case IP_MULTICAST_LOOP: case IP_ADD_MEMBERSHIP: case IP_DROP_MEMBERSHIP: case IP_ADD_SOURCE_MEMBERSHIP: case IP_DROP_SOURCE_MEMBERSHIP: case IP_BLOCK_SOURCE: case IP_UNBLOCK_SOURCE: case IP_MSFILTER: case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: error = inp_setmoptions(inp, sopt); break; case IP_PORTRANGE: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) break; INP_WLOCK(inp); switch (optval) { case IP_PORTRANGE_DEFAULT: inp->inp_flags &= ~(INP_LOWPORT); inp->inp_flags &= ~(INP_HIGHPORT); break; case IP_PORTRANGE_HIGH: inp->inp_flags &= ~(INP_LOWPORT); inp->inp_flags |= INP_HIGHPORT; break; case IP_PORTRANGE_LOW: inp->inp_flags &= ~(INP_HIGHPORT); inp->inp_flags |= INP_LOWPORT; break; default: error = EINVAL; break; } INP_WUNLOCK(inp); break; #if defined(IPSEC) || defined(IPSEC_SUPPORT) case IP_IPSEC_POLICY: if (IPSEC_ENABLED(ipv4)) { error = IPSEC_PCBCTL(ipv4, inp, sopt); break; } /* FALLTHROUGH */ #endif /* IPSEC */ default: error = ENOPROTOOPT; break; } break; case SOPT_GET: switch (sopt->sopt_name) { case IP_OPTIONS: case IP_RETOPTS: INP_RLOCK(inp); if (inp->inp_options) { struct mbuf *options; options = m_copym(inp->inp_options, 0, M_COPYALL, M_NOWAIT); INP_RUNLOCK(inp); if (options != NULL) { error = sooptcopyout(sopt, mtod(options, char *), options->m_len); m_freem(options); } else error = ENOMEM; } else { INP_RUNLOCK(inp); sopt->sopt_valsize = 0; } break; case IP_TOS: case IP_TTL: case IP_MINTTL: case IP_RECVOPTS: case IP_RECVRETOPTS: case IP_ORIGDSTADDR: case IP_RECVDSTADDR: case IP_RECVTTL: case IP_RECVIF: case IP_PORTRANGE: case IP_ONESBCAST: case IP_DONTFRAG: case IP_BINDANY: case IP_RECVTOS: case IP_FLOWID: case IP_FLOWTYPE: case IP_RECVFLOWID: #ifdef RSS case IP_RSSBUCKETID: case IP_RECVRSSBUCKETID: #endif case IP_VLAN_PCP: switch (sopt->sopt_name) { case IP_TOS: optval = inp->inp_ip_tos; break; case IP_TTL: optval = inp->inp_ip_ttl; break; case IP_MINTTL: optval = inp->inp_ip_minttl; break; #define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0) #define OPTBIT2(bit) (inp->inp_flags2 & bit ? 1 : 0) case IP_RECVOPTS: optval = OPTBIT(INP_RECVOPTS); break; case IP_RECVRETOPTS: optval = OPTBIT(INP_RECVRETOPTS); break; case IP_RECVDSTADDR: optval = OPTBIT(INP_RECVDSTADDR); break; case IP_ORIGDSTADDR: optval = OPTBIT2(INP_ORIGDSTADDR); break; case IP_RECVTTL: optval = OPTBIT(INP_RECVTTL); break; case IP_RECVIF: optval = OPTBIT(INP_RECVIF); break; case IP_PORTRANGE: if (inp->inp_flags & INP_HIGHPORT) optval = IP_PORTRANGE_HIGH; else if (inp->inp_flags & INP_LOWPORT) optval = IP_PORTRANGE_LOW; else optval = 0; break; case IP_ONESBCAST: optval = OPTBIT(INP_ONESBCAST); break; case IP_DONTFRAG: optval = OPTBIT(INP_DONTFRAG); break; case IP_BINDANY: optval = OPTBIT(INP_BINDANY); break; case IP_RECVTOS: optval = OPTBIT(INP_RECVTOS); break; case IP_FLOWID: optval = inp->inp_flowid; break; case IP_FLOWTYPE: optval = inp->inp_flowtype; break; case IP_RECVFLOWID: optval = OPTBIT2(INP_RECVFLOWID); break; #ifdef RSS case IP_RSSBUCKETID: retval = rss_hash2bucket(inp->inp_flowid, inp->inp_flowtype, &rss_bucket); if (retval == 0) optval = rss_bucket; else error = EINVAL; break; case IP_RECVRSSBUCKETID: optval = OPTBIT2(INP_RECVRSSBUCKETID); break; #endif case IP_VLAN_PCP: if (OPTBIT2(INP_2PCP_SET)) { optval = (inp->inp_flags2 & INP_2PCP_MASK) >> INP_2PCP_SHIFT; } else { optval = -1; } break; } error = sooptcopyout(sopt, &optval, sizeof optval); break; /* * Multicast socket options are processed by the in_mcast * module. */ case IP_MULTICAST_IF: case IP_MULTICAST_VIF: case IP_MULTICAST_TTL: case IP_MULTICAST_LOOP: case IP_MSFILTER: error = inp_getmoptions(inp, sopt); break; #if defined(IPSEC) || defined(IPSEC_SUPPORT) case IP_IPSEC_POLICY: if (IPSEC_ENABLED(ipv4)) { error = IPSEC_PCBCTL(ipv4, inp, sopt); break; } /* FALLTHROUGH */ #endif /* IPSEC */ default: error = ENOPROTOOPT; break; } break; } return (error); } /* * Routine called from ip_output() to loop back a copy of an IP multicast * packet to the input queue of a specified interface. Note that this * calls the output routine of the loopback "driver", but with an interface * pointer that might NOT be a loopback interface -- evil, but easier than * replicating that code here. */ static void ip_mloopback(struct ifnet *ifp, const struct mbuf *m, int hlen) { struct ip *ip; struct mbuf *copym; /* * Make a deep copy of the packet because we're going to * modify the pack in order to generate checksums. */ copym = m_dup(m, M_NOWAIT); if (copym != NULL && (!M_WRITABLE(copym) || copym->m_len < hlen)) copym = m_pullup(copym, hlen); if (copym != NULL) { /* If needed, compute the checksum and mark it as valid. */ if (copym->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { in_delayed_cksum(copym); copym->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; copym->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; copym->m_pkthdr.csum_data = 0xffff; } /* * We don't bother to fragment if the IP length is greater * than the interface's MTU. Can this possibly matter? */ ip = mtod(copym, struct ip *); ip->ip_sum = 0; ip->ip_sum = in_cksum(copym, hlen); if_simloop(ifp, copym, AF_INET, 0); } } diff --git a/sys/netinet/ip_var.h b/sys/netinet/ip_var.h index 0f2ed8c43e64..18ca5861a40e 100644 --- a/sys/netinet/ip_var.h +++ b/sys/netinet/ip_var.h @@ -1,356 +1,358 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _NETINET_IP_VAR_H_ #define _NETINET_IP_VAR_H_ #include #include #include #include /* * Overlay for ip header used by other protocols (tcp, udp). */ struct ipovly { u_char ih_x1[9]; /* (unused) */ u_char ih_pr; /* protocol */ u_short ih_len; /* protocol length */ struct in_addr ih_src; /* source internet address */ struct in_addr ih_dst; /* destination internet address */ }; #ifdef _KERNEL /* * Ip reassembly queue structure. Each fragment * being reassembled is attached to one of these structures. * They are timed out after net.inet.ip.fragttl seconds, and may also be * reclaimed if memory becomes tight. */ struct ipq { TAILQ_ENTRY(ipq) ipq_list; /* to other reass headers */ time_t ipq_expire; /* time_uptime when ipq expires */ u_char ipq_nfrags; /* # frags in this packet */ u_char ipq_p; /* protocol of this fragment */ u_short ipq_id; /* sequence id for reassembly */ int ipq_maxoff; /* total length of packet */ struct mbuf *ipq_frags; /* to ip headers of fragments */ struct in_addr ipq_src,ipq_dst; struct label *ipq_label; /* MAC label */ }; #endif /* _KERNEL */ /* * Structure stored in mbuf in inpcb.ip_options * and passed to ip_output when ip options are in use. * The actual length of the options (including ipopt_dst) * is in m_len. */ #define MAX_IPOPTLEN 40 struct ipoption { struct in_addr ipopt_dst; /* first-hop dst if source routed */ char ipopt_list[MAX_IPOPTLEN]; /* options proper */ }; #if defined(_NETINET_IN_VAR_H_) && defined(_KERNEL) /* * Structure attached to inpcb.ip_moptions and * passed to ip_output when IP multicast options are in use. * This structure is lazy-allocated. */ struct ip_moptions { struct ifnet *imo_multicast_ifp; /* ifp for outgoing multicasts */ struct in_addr imo_multicast_addr; /* ifindex/addr on MULTICAST_IF */ u_long imo_multicast_vif; /* vif num outgoing multicasts */ u_char imo_multicast_ttl; /* TTL for outgoing multicasts */ u_char imo_multicast_loop; /* 1 => hear sends if a member */ struct ip_mfilter_head imo_head; /* group membership list */ }; #else struct ip_moptions; #endif struct ipstat { uint64_t ips_total; /* total packets received */ uint64_t ips_badsum; /* checksum bad */ uint64_t ips_tooshort; /* packet too short */ uint64_t ips_toosmall; /* not enough data */ uint64_t ips_badhlen; /* ip header length < data size */ uint64_t ips_badlen; /* ip length < ip header length */ uint64_t ips_fragments; /* fragments received */ uint64_t ips_fragdropped; /* frags dropped (dups, out of space) */ uint64_t ips_fragtimeout; /* fragments timed out */ uint64_t ips_forward; /* packets forwarded */ uint64_t ips_fastforward; /* packets fast forwarded */ uint64_t ips_cantforward; /* packets rcvd for unreachable dest */ uint64_t ips_redirectsent; /* packets forwarded on same net */ uint64_t ips_noproto; /* unknown or unsupported protocol */ uint64_t ips_delivered; /* datagrams delivered to upper level*/ uint64_t ips_localout; /* total ip packets generated here */ uint64_t ips_odropped; /* lost packets due to nobufs, etc. */ uint64_t ips_reassembled; /* total packets reassembled ok */ uint64_t ips_fragmented; /* datagrams successfully fragmented */ uint64_t ips_ofragments; /* output fragments created */ uint64_t ips_cantfrag; /* don't fragment flag was set, etc. */ uint64_t ips_badoptions; /* error in option processing */ uint64_t ips_noroute; /* packets discarded due to no route */ uint64_t ips_badvers; /* ip version != 4 */ uint64_t ips_rawout; /* total raw ip packets generated */ uint64_t ips_toolong; /* ip length > max ip packet size */ uint64_t ips_notmember; /* multicasts for unregistered grps */ uint64_t ips_nogif; /* no match gif found */ uint64_t ips_badaddr; /* invalid address on header */ }; #ifdef _KERNEL #include #include #include VNET_PCPUSTAT_DECLARE(struct ipstat, ipstat); /* * In-kernel consumers can use these accessor macros directly to update * stats. */ #define IPSTAT_ADD(name, val) \ do { \ MIB_SDT_PROBE1(ip, count, name, (val)); \ VNET_PCPUSTAT_ADD(struct ipstat, ipstat, name, (val)); \ } while (0) #define IPSTAT_SUB(name, val) IPSTAT_ADD(name, -(val)) #define IPSTAT_INC(name) IPSTAT_ADD(name, 1) #define IPSTAT_DEC(name) IPSTAT_SUB(name, 1) /* * Kernel module consumers must use this accessor macro. */ void kmod_ipstat_inc(int statnum); #define KMOD_IPSTAT_INC(name) \ do { \ MIB_SDT_PROBE1(ip, count, name, 1); \ kmod_ipstat_inc( \ offsetof(struct ipstat, name) / sizeof(uint64_t)); \ } while (0) void kmod_ipstat_dec(int statnum); #define KMOD_IPSTAT_DEC(name) \ do { \ MIB_SDT_PROBE1(ip, count, name, -1); \ kmod_ipstat_dec( \ offsetof(struct ipstat, name) / sizeof(uint64_t)); \ } while (0) /* flags passed to ip_output as last parameter */ #define IP_FORWARDING 0x1 /* most of ip header exists */ #define IP_RAWOUTPUT 0x2 /* raw ip header exists */ #define IP_SENDONES 0x4 /* send all-ones broadcast */ #define IP_SENDTOIF 0x8 /* send on specific ifnet */ #define IP_ROUTETOIF SO_DONTROUTE /* 0x10 bypass routing tables */ #define IP_ALLOWBROADCAST SO_BROADCAST /* 0x20 can send broadcast packets */ #define IP_NODEFAULTFLOWID 0x40 /* Don't set the flowid from inp */ #define IP_NO_SND_TAG_RL 0x80 /* Don't send down the ratelimit tag */ #ifdef __NO_STRICT_ALIGNMENT #define IP_HDR_ALIGNED_P(ip) 1 #else #define IP_HDR_ALIGNED_P(ip) ((((intptr_t) (ip)) & 3) == 0) #endif struct ip; struct inpcb; struct route; struct sockopt; struct inpcbinfo; VNET_DECLARE(int, ip_defttl); /* default IP ttl */ VNET_DECLARE(int, ipforwarding); /* ip forwarding */ VNET_DECLARE(int, ipsendredirects); #ifdef IPSTEALTH VNET_DECLARE(int, ipstealth); /* stealth forwarding */ #endif VNET_DECLARE(struct socket *, ip_rsvpd); /* reservation protocol daemon*/ VNET_DECLARE(struct socket *, ip_mrouter); /* multicast routing daemon */ extern int (*legal_vif_num)(int); extern u_long (*ip_mcast_src)(int); VNET_DECLARE(int, rsvp_on); VNET_DECLARE(int, drop_redirect); +VNET_DECLARE(int, ip_random_id); #define V_ip_id VNET(ip_id) #define V_ip_defttl VNET(ip_defttl) #define V_ipforwarding VNET(ipforwarding) #define V_ipsendredirects VNET(ipsendredirects) #ifdef IPSTEALTH #define V_ipstealth VNET(ipstealth) #endif #define V_ip_rsvpd VNET(ip_rsvpd) #define V_ip_mrouter VNET(ip_mrouter) #define V_rsvp_on VNET(rsvp_on) #define V_drop_redirect VNET(drop_redirect) +#define V_ip_random_id VNET(ip_random_id) void inp_freemoptions(struct ip_moptions *); int inp_getmoptions(struct inpcb *, struct sockopt *); int inp_setmoptions(struct inpcb *, struct sockopt *); int ip_ctloutput(struct socket *, struct sockopt *sopt); int ip_fragment(struct ip *ip, struct mbuf **m_frag, int mtu, u_long if_hwassist_flags); void ip_forward(struct mbuf *m, int srcrt); extern int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, struct ip_moptions *); int ip_output(struct mbuf *, struct mbuf *, struct route *, int, struct ip_moptions *, struct inpcb *); struct mbuf * ip_reass(struct mbuf *); void ip_savecontrol(struct inpcb *, struct mbuf **, struct ip *, struct mbuf *); -void ip_fillid(struct ip *); +void ip_fillid(struct ip *, bool); int rip_ctloutput(struct socket *, struct sockopt *); int ipip_input(struct mbuf **, int *, int); int rsvp_input(struct mbuf **, int *, int); int ip_rsvp_init(struct socket *); int ip_rsvp_done(void); extern int (*ip_rsvp_vif)(struct socket *, struct sockopt *); extern void (*ip_rsvp_force_done)(struct socket *); extern int (*rsvp_input_p)(struct mbuf **, int *, int); typedef int ipproto_input_t(struct mbuf **, int *, int); struct icmp; typedef void ipproto_ctlinput_t(struct icmp *); int ipproto_register(uint8_t, ipproto_input_t, ipproto_ctlinput_t); int ipproto_unregister(uint8_t); #define IPPROTO_REGISTER(prot, input, ctl) do { \ int error __diagused; \ error = ipproto_register(prot, input, ctl); \ MPASS(error == 0); \ } while (0) ipproto_input_t rip_input; ipproto_ctlinput_t rip_ctlinput; VNET_DECLARE(struct pfil_head *, inet_pfil_head); #define V_inet_pfil_head VNET(inet_pfil_head) #define PFIL_INET_NAME "inet" VNET_DECLARE(struct pfil_head *, inet_local_pfil_head); #define V_inet_local_pfil_head VNET(inet_local_pfil_head) #define PFIL_INET_LOCAL_NAME "inet-local" void in_delayed_cksum(struct mbuf *m); /* Hooks for ipfw, dummynet, divert etc. Most are declared in raw_ip.c */ /* * Reference to an ipfw or packet filter rule that can be carried * outside critical sections. * A rule is identified by rulenum:rule_id which is ordered. * In version chain_id the rule can be found in slot 'slot', so * we don't need a lookup if chain_id == chain->id. * * On exit from the firewall this structure refers to the rule after * the matching one (slot points to the new rule; rulenum:rule_id-1 * is the matching rule), and additional info (e.g. info often contains * the insn argument or tablearg in the low 16 bits, in host format). * On entry, the structure is valid if slot>0, and refers to the starting * rules. 'info' contains the reason for reinject, e.g. divert port, * divert direction, and so on. * * Packet Mark is an analogue to ipfw tags with O(1) lookup from mbuf while * regular tags require a single-linked list traversal. Mark is a 32-bit * number that can be looked up in a table [with 'number' table-type], matched * or compared with a number with optional mask applied before comparison. * Having generic nature, Mark can be used in a variety of needs. * For example, it could be used as a security group: mark will hold a * security group id and represent a group of packet flows that shares same * access control policy. * O_MASK opcode can match mark value bitwise so one can build a hierarchical * model designating different meanings for a bit range(s). */ struct ipfw_rule_ref { /* struct m_tag spans 24 bytes above this point, see mbuf_tags(9) */ /* spare space just to be save in case struct m_tag grows */ /* -- 32 bytes -- */ uint32_t slot; /* slot for matching rule */ uint32_t rulenum; /* matching rule number */ uint32_t rule_id; /* matching rule id */ uint32_t chain_id; /* ruleset id */ uint32_t info; /* see below */ uint32_t pkt_mark; /* packet mark */ uint32_t spare[2]; /* -- 64 bytes -- */ }; enum { IPFW_INFO_MASK = 0x0000ffff, IPFW_INFO_OUT = 0x00000000, /* outgoing, just for convenience */ IPFW_INFO_IN = 0x80000000, /* incoming, overloads dir */ IPFW_ONEPASS = 0x40000000, /* One-pass, do not reinject */ IPFW_IS_MASK = 0x30000000, /* which source ? */ IPFW_IS_DIVERT = 0x20000000, IPFW_IS_DUMMYNET =0x10000000, IPFW_IS_PIPE = 0x08000000, /* pipe=1, queue = 0 */ }; #define MTAG_IPFW 1148380143 /* IPFW-tagged cookie */ #define MTAG_IPFW_RULE 1262273568 /* rule reference */ #define MTAG_IPFW_CALL 1308397630 /* call stack */ struct ip_fw_args; typedef int (*ip_fw_ctl_ptr_t)(struct sockopt *); VNET_DECLARE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr); #define V_ip_fw_ctl_ptr VNET(ip_fw_ctl_ptr) /* Divert hooks. */ extern void (*ip_divert_ptr)(struct mbuf *m, bool incoming); /* ng_ipfw hooks -- XXX make it the same as divert and dummynet */ extern int (*ng_ipfw_input_p)(struct mbuf **, struct ip_fw_args *, bool); extern int (*ip_dn_ctl_ptr)(struct sockopt *); extern int (*ip_dn_io_ptr)(struct mbuf **, struct ip_fw_args *); /* pf specific mtag for divert(4) support */ __enum_uint8_decl(pf_mtag_dir) { PF_DIVERT_MTAG_DIR_IN = 1, PF_DIVERT_MTAG_DIR_OUT = 2 }; struct pf_divert_mtag { __enum_uint8(pf_mtag_dir) idir; /* initial pkt direction */ union { __enum_uint8(pf_mtag_dir) ndir; /* new dir after re-enter */ uint16_t port; /* initial divert(4) port */ }; }; #define MTAG_PF_DIVERT 1262273569 #endif /* _KERNEL */ #endif /* !_NETINET_IP_VAR_H_ */ diff --git a/sys/netinet/raw_ip.c b/sys/netinet/raw_ip.c index 0d677d954f11..7b6104da5402 100644 --- a/sys/netinet/raw_ip.c +++ b/sys/netinet/raw_ip.c @@ -1,1113 +1,1113 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1988, 1993 * The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" #include "opt_route.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern ipproto_input_t *ip_protox[]; VNET_DEFINE(int, ip_defttl) = IPDEFTTL; SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip_defttl), 0, "Maximum TTL on IP packets"); VNET_DEFINE(struct inpcbinfo, ripcbinfo); #define V_ripcbinfo VNET(ripcbinfo) /* * Control and data hooks for ipfw, dummynet, divert and so on. * The data hooks are not used here but it is convenient * to keep them all in one place. */ VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL; int (*ip_dn_ctl_ptr)(struct sockopt *); int (*ip_dn_io_ptr)(struct mbuf **, struct ip_fw_args *); void (*ip_divert_ptr)(struct mbuf *, bool); int (*ng_ipfw_input_p)(struct mbuf **, struct ip_fw_args *, bool); #ifdef INET /* * Hooks for multicast routing. They all default to NULL, so leave them not * initialized and rely on BSS being set to 0. */ /* * The socket used to communicate with the multicast routing daemon. */ VNET_DEFINE(struct socket *, ip_mrouter); /* * The various mrouter and rsvp functions. */ int (*ip_mrouter_set)(struct socket *, struct sockopt *); int (*ip_mrouter_get)(struct socket *, struct sockopt *); int (*ip_mrouter_done)(void); int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, struct ip_moptions *); int (*mrt_ioctl)(u_long, caddr_t, int); int (*legal_vif_num)(int); u_long (*ip_mcast_src)(int); int (*rsvp_input_p)(struct mbuf **, int *, int); int (*ip_rsvp_vif)(struct socket *, struct sockopt *); void (*ip_rsvp_force_done)(struct socket *); #endif /* INET */ #define V_rip_bind_all_fibs VNET(rip_bind_all_fibs) VNET_DEFINE(int, rip_bind_all_fibs) = 1; SYSCTL_INT(_net_inet_raw, OID_AUTO, bind_all_fibs, CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(rip_bind_all_fibs), 0, "Bound sockets receive traffic from all FIBs"); u_long rip_sendspace = 9216; SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); u_long rip_recvspace = 9216; SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); /* * Hash functions */ #define INP_PCBHASH_RAW_SIZE 256 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \ (((proto) + (laddr) + (faddr)) % (mask) + 1) #ifdef INET static void rip_inshash(struct inpcb *inp) { struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; struct inpcbhead *pcbhash; int hash; INP_HASH_WLOCK_ASSERT(pcbinfo); INP_WLOCK_ASSERT(inp); if (inp->inp_ip_p != 0 && inp->inp_laddr.s_addr != INADDR_ANY && inp->inp_faddr.s_addr != INADDR_ANY) { hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr, inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask); } else hash = 0; pcbhash = &pcbinfo->ipi_hash_exact[hash]; CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash_exact); } static void rip_delhash(struct inpcb *inp) { INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); INP_WLOCK_ASSERT(inp); CK_LIST_REMOVE(inp, inp_hash_exact); } #endif /* INET */ INPCBSTORAGE_DEFINE(ripcbstor, inpcb, "rawinp", "ripcb", "rip", "riphash"); static void rip_init(void *arg __unused) { in_pcbinfo_init(&V_ripcbinfo, &ripcbstor, INP_PCBHASH_RAW_SIZE, 1); } VNET_SYSINIT(rip_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, rip_init, NULL); #ifdef VIMAGE static void rip_destroy(void *unused __unused) { in_pcbinfo_destroy(&V_ripcbinfo); } VNET_SYSUNINIT(raw_ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, rip_destroy, NULL); #endif #ifdef INET static int rip_append(struct inpcb *inp, struct ip *ip, struct mbuf *m, struct sockaddr_in *ripsrc) { struct socket *so = inp->inp_socket; struct mbuf *n, *opts = NULL; INP_LOCK_ASSERT(inp); #if defined(IPSEC) || defined(IPSEC_SUPPORT) /* check AH/ESP integrity. */ if (IPSEC_ENABLED(ipv4) && IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) return (0); #endif /* IPSEC */ #ifdef MAC if (mac_inpcb_check_deliver(inp, m) != 0) return (0); #endif /* Check the minimum TTL for socket. */ if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) return (0); if ((n = m_copym(m, 0, M_COPYALL, M_NOWAIT)) == NULL) return (0); if ((inp->inp_flags & INP_CONTROLOPTS) || (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) ip_savecontrol(inp, &opts, ip, n); SOCKBUF_LOCK(&so->so_rcv); if (sbappendaddr_locked(&so->so_rcv, (struct sockaddr *)ripsrc, n, opts) == 0) { soroverflow_locked(so); m_freem(n); if (opts) m_freem(opts); return (0); } sorwakeup_locked(so); return (1); } struct rip_inp_match_ctx { struct ip *ip; int proto; }; static bool rip_inp_match1(const struct inpcb *inp, void *v) { struct rip_inp_match_ctx *ctx = v; if (inp->inp_ip_p != ctx->proto) return (false); #ifdef INET6 /* XXX inp locking */ if ((inp->inp_vflag & INP_IPV4) == 0) return (false); #endif if (inp->inp_laddr.s_addr != ctx->ip->ip_dst.s_addr) return (false); if (inp->inp_faddr.s_addr != ctx->ip->ip_src.s_addr) return (false); return (true); } static bool rip_inp_match2(const struct inpcb *inp, void *v) { struct rip_inp_match_ctx *ctx = v; if (inp->inp_ip_p && inp->inp_ip_p != ctx->proto) return (false); #ifdef INET6 /* XXX inp locking */ if ((inp->inp_vflag & INP_IPV4) == 0) return (false); #endif if (!in_nullhost(inp->inp_laddr) && !in_hosteq(inp->inp_laddr, ctx->ip->ip_dst)) return (false); if (!in_nullhost(inp->inp_faddr) && !in_hosteq(inp->inp_faddr, ctx->ip->ip_src)) return (false); return (true); } /* * Setup generic address and protocol structures for raw_input routine, then * pass them along with mbuf chain. */ int rip_input(struct mbuf **mp, int *offp, int proto) { struct rip_inp_match_ctx ctx = { .ip = mtod(*mp, struct ip *), .proto = proto, }; struct inpcb_iterator inpi = INP_ITERATOR(&V_ripcbinfo, INPLOOKUP_RLOCKPCB, rip_inp_match1, &ctx); struct ifnet *ifp; struct mbuf *m = *mp; struct inpcb *inp; struct sockaddr_in ripsrc; int appended, fib; M_ASSERTPKTHDR(m); *mp = NULL; appended = 0; bzero(&ripsrc, sizeof(ripsrc)); ripsrc.sin_len = sizeof(ripsrc); ripsrc.sin_family = AF_INET; ripsrc.sin_addr = ctx.ip->ip_src; fib = M_GETFIB(m); ifp = m->m_pkthdr.rcvif; inpi.hash = INP_PCBHASH_RAW(proto, ctx.ip->ip_src.s_addr, ctx.ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask); while ((inp = inp_next(&inpi)) != NULL) { INP_RLOCK_ASSERT(inp); if (jailed_without_vnet(inp->inp_cred) && prison_check_ip4(inp->inp_cred, &ctx.ip->ip_dst) != 0) { /* * XXX: If faddr was bound to multicast group, * jailed raw socket will drop datagram. */ continue; } if (V_rip_bind_all_fibs == 0 && fib != inp->inp_inc.inc_fibnum) /* * Sockets bound to a specific FIB can only receive * packets from that FIB. */ continue; appended += rip_append(inp, ctx.ip, m, &ripsrc); } inpi.hash = 0; inpi.match = rip_inp_match2; MPASS(inpi.inp == NULL); while ((inp = inp_next(&inpi)) != NULL) { INP_RLOCK_ASSERT(inp); if (jailed_without_vnet(inp->inp_cred) && !IN_MULTICAST(ntohl(ctx.ip->ip_dst.s_addr)) && prison_check_ip4(inp->inp_cred, &ctx.ip->ip_dst) != 0) /* * Allow raw socket in jail to receive multicast; * assume process had PRIV_NETINET_RAW at attach, * and fall through into normal filter path if so. */ continue; if (V_rip_bind_all_fibs == 0 && fib != inp->inp_inc.inc_fibnum) continue; /* * If this raw socket has multicast state, and we * have received a multicast, check if this socket * should receive it, as multicast filtering is now * the responsibility of the transport layer. */ if (inp->inp_moptions != NULL && IN_MULTICAST(ntohl(ctx.ip->ip_dst.s_addr))) { /* * If the incoming datagram is for IGMP, allow it * through unconditionally to the raw socket. * * In the case of IGMPv2, we may not have explicitly * joined the group, and may have set IFF_ALLMULTI * on the interface. imo_multi_filter() may discard * control traffic we actually need to see. * * Userland multicast routing daemons should continue * filter the control traffic appropriately. */ int blocked; blocked = MCAST_PASS; if (proto != IPPROTO_IGMP) { struct sockaddr_in group; bzero(&group, sizeof(struct sockaddr_in)); group.sin_len = sizeof(struct sockaddr_in); group.sin_family = AF_INET; group.sin_addr = ctx.ip->ip_dst; blocked = imo_multi_filter(inp->inp_moptions, ifp, (struct sockaddr *)&group, (struct sockaddr *)&ripsrc); } if (blocked != MCAST_PASS) { IPSTAT_INC(ips_notmember); continue; } } appended += rip_append(inp, ctx.ip, m, &ripsrc); } if (appended == 0 && ip_protox[ctx.ip->ip_p] == rip_input) { IPSTAT_INC(ips_noproto); IPSTAT_DEC(ips_delivered); icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PROTOCOL, 0, 0); } else m_freem(m); return (IPPROTO_DONE); } /* * Generate IP header and pass packet to ip_output. Tack on options user may * have setup with control call. */ static int rip_send(struct socket *so, int pruflags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { struct epoch_tracker et; struct ip *ip; struct inpcb *inp; in_addr_t *dst; int error, flags, cnt, hlen; u_char opttype, optlen, *cp; inp = sotoinpcb(so); KASSERT(inp != NULL, ("rip_send: inp == NULL")); if (control != NULL) { m_freem(control); control = NULL; } if (so->so_state & SS_ISCONNECTED) { if (nam) { error = EISCONN; m_freem(m); return (error); } dst = &inp->inp_faddr.s_addr; } else { if (nam == NULL) error = ENOTCONN; else if (nam->sa_family != AF_INET) error = EAFNOSUPPORT; else if (nam->sa_len != sizeof(struct sockaddr_in)) error = EINVAL; else error = 0; if (error != 0) { m_freem(m); return (error); } dst = &((struct sockaddr_in *)nam)->sin_addr.s_addr; } flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | IP_ALLOWBROADCAST; /* * If the user handed us a complete IP packet, use it. Otherwise, * allocate an mbuf for a header and fill it in. */ if ((inp->inp_flags & INP_HDRINCL) == 0) { if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { m_freem(m); return(EMSGSIZE); } M_PREPEND(m, sizeof(struct ip), M_NOWAIT); if (m == NULL) return(ENOBUFS); INP_RLOCK(inp); ip = mtod(m, struct ip *); ip->ip_tos = inp->inp_ip_tos; if (inp->inp_flags & INP_DONTFRAG) ip->ip_off = htons(IP_DF); else ip->ip_off = htons(0); ip->ip_p = inp->inp_ip_p; ip->ip_len = htons(m->m_pkthdr.len); ip->ip_src = inp->inp_laddr; ip->ip_dst.s_addr = *dst; #ifdef ROUTE_MPATH if (CALC_FLOWID_OUTBOUND) { uint32_t hash_type, hash_val; hash_val = fib4_calc_software_hash(ip->ip_src, ip->ip_dst, 0, 0, ip->ip_p, &hash_type); m->m_pkthdr.flowid = hash_val; M_HASHTYPE_SET(m, hash_type); flags |= IP_NODEFAULTFLOWID; } #endif if (jailed(inp->inp_cred)) { /* * prison_local_ip4() would be good enough but would * let a source of INADDR_ANY pass, which we do not * want to see from jails. */ if (ip->ip_src.s_addr == INADDR_ANY) { NET_EPOCH_ENTER(et); error = in_pcbladdr(inp, &ip->ip_dst, &ip->ip_src, inp->inp_cred); NET_EPOCH_EXIT(et); } else { error = prison_local_ip4(inp->inp_cred, &ip->ip_src); } if (error != 0) { INP_RUNLOCK(inp); m_freem(m); return (error); } } ip->ip_ttl = inp->inp_ip_ttl; } else { if (m->m_pkthdr.len > IP_MAXPACKET) { m_freem(m); return (EMSGSIZE); } if (m->m_pkthdr.len < sizeof(*ip)) { m_freem(m); return (EINVAL); } m = m_pullup(m, sizeof(*ip)); if (m == NULL) return (ENOMEM); ip = mtod(m, struct ip *); hlen = ip->ip_hl << 2; if (m->m_len < hlen) { m = m_pullup(m, hlen); if (m == NULL) return (EINVAL); ip = mtod(m, struct ip *); } #ifdef ROUTE_MPATH if (CALC_FLOWID_OUTBOUND) { uint32_t hash_type, hash_val; hash_val = fib4_calc_software_hash(ip->ip_dst, ip->ip_src, 0, 0, ip->ip_p, &hash_type); m->m_pkthdr.flowid = hash_val; M_HASHTYPE_SET(m, hash_type); flags |= IP_NODEFAULTFLOWID; } #endif INP_RLOCK(inp); /* * Don't allow both user specified and setsockopt options, * and don't allow packet length sizes that will crash. */ if ((hlen < sizeof (*ip)) || ((hlen > sizeof (*ip)) && inp->inp_options) || (ntohs(ip->ip_len) != m->m_pkthdr.len)) { INP_RUNLOCK(inp); m_freem(m); return (EINVAL); } error = prison_check_ip4(inp->inp_cred, &ip->ip_src); if (error != 0) { INP_RUNLOCK(inp); m_freem(m); return (error); } /* * Don't allow IP options which do not have the required * structure as specified in section 3.1 of RFC 791 on * pages 15-23. */ cp = (u_char *)(ip + 1); cnt = hlen - sizeof (struct ip); for (; cnt > 0; cnt -= optlen, cp += optlen) { opttype = cp[IPOPT_OPTVAL]; if (opttype == IPOPT_EOL) break; if (opttype == IPOPT_NOP) { optlen = 1; continue; } if (cnt < IPOPT_OLEN + sizeof(u_char)) { INP_RUNLOCK(inp); m_freem(m); return (EINVAL); } optlen = cp[IPOPT_OLEN]; if (optlen < IPOPT_OLEN + sizeof(u_char) || optlen > cnt) { INP_RUNLOCK(inp); m_freem(m); return (EINVAL); } } /* * This doesn't allow application to specify ID of zero, * but we got this limitation from the beginning of history. */ if (ip->ip_id == 0) - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); /* * XXX prevent ip_output from overwriting header fields. */ flags |= IP_RAWOUTPUT; IPSTAT_INC(ips_rawout); } if (inp->inp_flags & INP_ONESBCAST) flags |= IP_SENDONES; #ifdef MAC mac_inpcb_create_mbuf(inp, m); #endif NET_EPOCH_ENTER(et); error = ip_output(m, inp->inp_options, NULL, flags, inp->inp_moptions, inp); NET_EPOCH_EXIT(et); INP_RUNLOCK(inp); return (error); } /* * Raw IP socket option processing. * * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could * only be created by a privileged process, and as such, socket option * operations to manage system properties on any raw socket were allowed to * take place without explicit additional access control checks. However, * raw sockets can now also be created in jail(), and therefore explicit * checks are now required. Likewise, raw sockets can be used by a process * after it gives up privilege, so some caution is required. For options * passed down to the IP layer via ip_ctloutput(), checks are assumed to be * performed in ip_ctloutput() and therefore no check occurs here. * Unilaterally checking priv_check() here breaks normal IP socket option * operations on raw sockets. * * When adding new socket options here, make sure to add access control * checks here as necessary. */ int rip_ctloutput(struct socket *so, struct sockopt *sopt) { struct inpcb *inp = sotoinpcb(so); int error, optval; if (sopt->sopt_level != IPPROTO_IP) { if (sopt->sopt_dir == SOPT_SET && sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_SETFIB) return (ip_ctloutput(so, sopt)); return (EINVAL); } error = 0; switch (sopt->sopt_dir) { case SOPT_GET: switch (sopt->sopt_name) { case IP_HDRINCL: optval = inp->inp_flags & INP_HDRINCL; error = sooptcopyout(sopt, &optval, sizeof optval); break; case IP_FW3: /* generic ipfw v.3 functions */ case IP_FW_ADD: /* ADD actually returns the body... */ case IP_FW_GET: case IP_FW_TABLE_GETSIZE: case IP_FW_TABLE_LIST: case IP_FW_NAT_GET_CONFIG: case IP_FW_NAT_GET_LOG: if (V_ip_fw_ctl_ptr != NULL) error = V_ip_fw_ctl_ptr(sopt); else error = ENOPROTOOPT; break; case IP_DUMMYNET3: /* generic dummynet v.3 functions */ case IP_DUMMYNET_GET: if (ip_dn_ctl_ptr != NULL) error = ip_dn_ctl_ptr(sopt); else error = ENOPROTOOPT; break ; case MRT_INIT: case MRT_DONE: case MRT_ADD_VIF: case MRT_DEL_VIF: case MRT_ADD_MFC: case MRT_DEL_MFC: case MRT_VERSION: case MRT_ASSERT: case MRT_API_SUPPORT: case MRT_API_CONFIG: case MRT_ADD_BW_UPCALL: case MRT_DEL_BW_UPCALL: error = priv_check(curthread, PRIV_NETINET_MROUTE); if (error != 0) return (error); if (inp->inp_ip_p != IPPROTO_IGMP) return (EOPNOTSUPP); error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : EOPNOTSUPP; break; default: error = ip_ctloutput(so, sopt); break; } break; case SOPT_SET: switch (sopt->sopt_name) { case IP_HDRINCL: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) break; INP_WLOCK(inp); if (optval) inp->inp_flags |= INP_HDRINCL; else inp->inp_flags &= ~INP_HDRINCL; INP_WUNLOCK(inp); break; case IP_FW3: /* generic ipfw v.3 functions */ case IP_FW_ADD: case IP_FW_DEL: case IP_FW_FLUSH: case IP_FW_ZERO: case IP_FW_RESETLOG: case IP_FW_TABLE_ADD: case IP_FW_TABLE_DEL: case IP_FW_TABLE_FLUSH: case IP_FW_NAT_CFG: case IP_FW_NAT_DEL: if (V_ip_fw_ctl_ptr != NULL) error = V_ip_fw_ctl_ptr(sopt); else error = ENOPROTOOPT; break; case IP_DUMMYNET3: /* generic dummynet v.3 functions */ case IP_DUMMYNET_CONFIGURE: case IP_DUMMYNET_DEL: case IP_DUMMYNET_FLUSH: if (ip_dn_ctl_ptr != NULL) error = ip_dn_ctl_ptr(sopt); else error = ENOPROTOOPT ; break ; case IP_RSVP_ON: error = priv_check(curthread, PRIV_NETINET_MROUTE); if (error != 0) return (error); if (inp->inp_ip_p != IPPROTO_RSVP) return (EOPNOTSUPP); error = ip_rsvp_init(so); break; case IP_RSVP_OFF: error = priv_check(curthread, PRIV_NETINET_MROUTE); if (error != 0) return (error); error = ip_rsvp_done(); break; case IP_RSVP_VIF_ON: case IP_RSVP_VIF_OFF: error = priv_check(curthread, PRIV_NETINET_MROUTE); if (error != 0) return (error); if (inp->inp_ip_p != IPPROTO_RSVP) return (EOPNOTSUPP); error = ip_rsvp_vif ? ip_rsvp_vif(so, sopt) : EINVAL; break; case MRT_INIT: case MRT_DONE: case MRT_ADD_VIF: case MRT_DEL_VIF: case MRT_ADD_MFC: case MRT_DEL_MFC: case MRT_VERSION: case MRT_ASSERT: case MRT_API_SUPPORT: case MRT_API_CONFIG: case MRT_ADD_BW_UPCALL: case MRT_DEL_BW_UPCALL: error = priv_check(curthread, PRIV_NETINET_MROUTE); if (error != 0) return (error); if (inp->inp_ip_p != IPPROTO_IGMP) return (EOPNOTSUPP); error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : EOPNOTSUPP; break; default: error = ip_ctloutput(so, sopt); break; } break; } return (error); } void rip_ctlinput(struct icmp *icmp) { #if defined(IPSEC) || defined(IPSEC_SUPPORT) if (IPSEC_ENABLED(ipv4)) IPSEC_CTLINPUT(ipv4, icmp); #endif } static int rip_attach(struct socket *so, int proto, struct thread *td) { struct inpcb *inp; int error; inp = sotoinpcb(so); KASSERT(inp == NULL, ("rip_attach: inp != NULL")); error = priv_check(td, PRIV_NETINET_RAW); if (error) return (error); if (proto >= IPPROTO_MAX || proto < 0) return EPROTONOSUPPORT; error = soreserve(so, rip_sendspace, rip_recvspace); if (error) return (error); error = in_pcballoc(so, &V_ripcbinfo); if (error) return (error); inp = (struct inpcb *)so->so_pcb; inp->inp_ip_p = proto; inp->inp_ip_ttl = V_ip_defttl; INP_HASH_WLOCK(&V_ripcbinfo); rip_inshash(inp); INP_HASH_WUNLOCK(&V_ripcbinfo); INP_WUNLOCK(inp); return (0); } static void rip_detach(struct socket *so) { struct inpcb *inp; inp = sotoinpcb(so); KASSERT(inp != NULL, ("rip_detach: inp == NULL")); KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, ("rip_detach: not closed")); /* Disable mrouter first */ if (so == V_ip_mrouter && ip_mrouter_done) ip_mrouter_done(); INP_WLOCK(inp); INP_HASH_WLOCK(&V_ripcbinfo); rip_delhash(inp); INP_HASH_WUNLOCK(&V_ripcbinfo); if (ip_rsvp_force_done) ip_rsvp_force_done(so); if (so == V_ip_rsvpd) ip_rsvp_done(); in_pcbfree(inp); } static void rip_dodisconnect(struct socket *so, struct inpcb *inp) { struct inpcbinfo *pcbinfo; pcbinfo = inp->inp_pcbinfo; INP_WLOCK(inp); INP_HASH_WLOCK(pcbinfo); rip_delhash(inp); inp->inp_faddr.s_addr = INADDR_ANY; rip_inshash(inp); INP_HASH_WUNLOCK(pcbinfo); SOCK_LOCK(so); so->so_state &= ~SS_ISCONNECTED; SOCK_UNLOCK(so); INP_WUNLOCK(inp); } static void rip_abort(struct socket *so) { struct inpcb *inp; inp = sotoinpcb(so); KASSERT(inp != NULL, ("rip_abort: inp == NULL")); rip_dodisconnect(so, inp); } static void rip_close(struct socket *so) { struct inpcb *inp; inp = sotoinpcb(so); KASSERT(inp != NULL, ("rip_close: inp == NULL")); rip_dodisconnect(so, inp); } static int rip_disconnect(struct socket *so) { struct inpcb *inp; if ((so->so_state & SS_ISCONNECTED) == 0) return (ENOTCONN); inp = sotoinpcb(so); KASSERT(inp != NULL, ("rip_disconnect: inp == NULL")); rip_dodisconnect(so, inp); return (0); } static int rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { struct sockaddr_in *addr = (struct sockaddr_in *)nam; struct inpcb *inp; int error; if (nam->sa_family != AF_INET) return (EAFNOSUPPORT); if (nam->sa_len != sizeof(*addr)) return (EINVAL); error = prison_check_ip4(td->td_ucred, &addr->sin_addr); if (error != 0) return (error); inp = sotoinpcb(so); KASSERT(inp != NULL, ("rip_bind: inp == NULL")); if (CK_STAILQ_EMPTY(&V_ifnet) || (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || (addr->sin_addr.s_addr && (inp->inp_flags & INP_BINDANY) == 0 && ifa_ifwithaddr_check((struct sockaddr *)addr) == 0)) return (EADDRNOTAVAIL); INP_WLOCK(inp); INP_HASH_WLOCK(&V_ripcbinfo); rip_delhash(inp); inp->inp_laddr = addr->sin_addr; rip_inshash(inp); INP_HASH_WUNLOCK(&V_ripcbinfo); INP_WUNLOCK(inp); return (0); } static int rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { struct sockaddr_in *addr = (struct sockaddr_in *)nam; struct inpcb *inp; if (nam->sa_len != sizeof(*addr)) return (EINVAL); if (CK_STAILQ_EMPTY(&V_ifnet)) return (EADDRNOTAVAIL); if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) return (EAFNOSUPPORT); inp = sotoinpcb(so); KASSERT(inp != NULL, ("rip_connect: inp == NULL")); INP_WLOCK(inp); INP_HASH_WLOCK(&V_ripcbinfo); rip_delhash(inp); inp->inp_faddr = addr->sin_addr; rip_inshash(inp); INP_HASH_WUNLOCK(&V_ripcbinfo); soisconnected(so); INP_WUNLOCK(inp); return (0); } static int rip_shutdown(struct socket *so, enum shutdown_how how) { SOCK_LOCK(so); if (!(so->so_state & SS_ISCONNECTED)) { SOCK_UNLOCK(so); return (ENOTCONN); } SOCK_UNLOCK(so); switch (how) { case SHUT_RD: sorflush(so); break; case SHUT_RDWR: sorflush(so); /* FALLTHROUGH */ case SHUT_WR: socantsendmore(so); } return (0); } #endif /* INET */ static int rip_pcblist(SYSCTL_HANDLER_ARGS) { struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_ripcbinfo, INPLOOKUP_RLOCKPCB); struct xinpgen xig; struct inpcb *inp; int error; if (req->newptr != 0) return (EPERM); if (req->oldptr == 0) { int n; n = V_ripcbinfo.ipi_count; n += imax(n / 8, 10); req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb); return (0); } if ((error = sysctl_wire_old_buffer(req, 0)) != 0) return (error); bzero(&xig, sizeof(xig)); xig.xig_len = sizeof xig; xig.xig_count = V_ripcbinfo.ipi_count; xig.xig_gen = V_ripcbinfo.ipi_gencnt; xig.xig_sogen = so_gencnt; error = SYSCTL_OUT(req, &xig, sizeof xig); if (error) return (error); while ((inp = inp_next(&inpi)) != NULL) { if (inp->inp_gencnt <= xig.xig_gen && cr_canseeinpcb(req->td->td_ucred, inp) == 0) { struct xinpcb xi; in_pcbtoxinpcb(inp, &xi); error = SYSCTL_OUT(req, &xi, sizeof xi); if (error) { INP_RUNLOCK(inp); break; } } } if (!error) { /* * Give the user an updated idea of our state. If the * generation differs from what we told her before, she knows * that something happened while we were processing this * request, and it might be necessary to retry. */ xig.xig_gen = V_ripcbinfo.ipi_gencnt; xig.xig_sogen = so_gencnt; xig.xig_count = V_ripcbinfo.ipi_count; error = SYSCTL_OUT(req, &xig, sizeof xig); } return (error); } SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); #ifdef INET struct protosw rip_protosw = { .pr_type = SOCK_RAW, .pr_flags = PR_ATOMIC|PR_ADDR, .pr_ctloutput = rip_ctloutput, .pr_abort = rip_abort, .pr_attach = rip_attach, .pr_bind = rip_bind, .pr_connect = rip_connect, .pr_control = in_control, .pr_detach = rip_detach, .pr_disconnect = rip_disconnect, .pr_peeraddr = in_getpeeraddr, .pr_send = rip_send, .pr_shutdown = rip_shutdown, .pr_sockaddr = in_getsockaddr, .pr_sosetlabel = in_pcbsosetlabel, .pr_close = rip_close }; #endif /* INET */ diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c index 02ad901259f4..e4bdb4291972 100644 --- a/sys/netinet/sctp_output.c +++ b/sys/netinet/sctp_output.c @@ -1,13928 +1,13928 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(INET) || defined(INET6) #include #endif #include #include #define SCTP_MAX_GAPS_INARRAY 4 struct sack_track { uint8_t right_edge; /* mergable on the right edge */ uint8_t left_edge; /* mergable on the left edge */ uint8_t num_entries; uint8_t spare; struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; }; const struct sack_track sack_array[256] = { {0, 0, 0, 0, /* 0x00 */ {{0, 0}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 1, 0, /* 0x01 */ {{0, 0}, {0, 0}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x02 */ {{1, 1}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 1, 0, /* 0x03 */ {{0, 1}, {0, 0}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x04 */ {{2, 2}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x05 */ {{0, 0}, {2, 2}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x06 */ {{1, 2}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 1, 0, /* 0x07 */ {{0, 2}, {0, 0}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x08 */ {{3, 3}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x09 */ {{0, 0}, {3, 3}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x0a */ {{1, 1}, {3, 3}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x0b */ {{0, 1}, {3, 3}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x0c */ {{2, 3}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x0d */ {{0, 0}, {2, 3}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x0e */ {{1, 3}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 1, 0, /* 0x0f */ {{0, 3}, {0, 0}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x10 */ {{4, 4}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x11 */ {{0, 0}, {4, 4}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x12 */ {{1, 1}, {4, 4}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x13 */ {{0, 1}, {4, 4}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x14 */ {{2, 2}, {4, 4}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x15 */ {{0, 0}, {2, 2}, {4, 4}, {0, 0} } }, {0, 0, 2, 0, /* 0x16 */ {{1, 2}, {4, 4}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x17 */ {{0, 2}, {4, 4}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x18 */ {{3, 4}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x19 */ {{0, 0}, {3, 4}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x1a */ {{1, 1}, {3, 4}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x1b */ {{0, 1}, {3, 4}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x1c */ {{2, 4}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x1d */ {{0, 0}, {2, 4}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x1e */ {{1, 4}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 1, 0, /* 0x1f */ {{0, 4}, {0, 0}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x20 */ {{5, 5}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x21 */ {{0, 0}, {5, 5}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x22 */ {{1, 1}, {5, 5}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x23 */ {{0, 1}, {5, 5}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x24 */ {{2, 2}, {5, 5}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x25 */ {{0, 0}, {2, 2}, {5, 5}, {0, 0} } }, {0, 0, 2, 0, /* 0x26 */ {{1, 2}, {5, 5}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x27 */ {{0, 2}, {5, 5}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x28 */ {{3, 3}, {5, 5}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x29 */ {{0, 0}, {3, 3}, {5, 5}, {0, 0} } }, {0, 0, 3, 0, /* 0x2a */ {{1, 1}, {3, 3}, {5, 5}, {0, 0} } }, {1, 0, 3, 0, /* 0x2b */ {{0, 1}, {3, 3}, {5, 5}, {0, 0} } }, {0, 0, 2, 0, /* 0x2c */ {{2, 3}, {5, 5}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x2d */ {{0, 0}, {2, 3}, {5, 5}, {0, 0} } }, {0, 0, 2, 0, /* 0x2e */ {{1, 3}, {5, 5}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x2f */ {{0, 3}, {5, 5}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x30 */ {{4, 5}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x31 */ {{0, 0}, {4, 5}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x32 */ {{1, 1}, {4, 5}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x33 */ {{0, 1}, {4, 5}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x34 */ {{2, 2}, {4, 5}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x35 */ {{0, 0}, {2, 2}, {4, 5}, {0, 0} } }, {0, 0, 2, 0, /* 0x36 */ {{1, 2}, {4, 5}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x37 */ {{0, 2}, {4, 5}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x38 */ {{3, 5}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x39 */ {{0, 0}, {3, 5}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x3a */ {{1, 1}, {3, 5}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x3b */ {{0, 1}, {3, 5}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x3c */ {{2, 5}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x3d */ {{0, 0}, {2, 5}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x3e */ {{1, 5}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 1, 0, /* 0x3f */ {{0, 5}, {0, 0}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x40 */ {{6, 6}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x41 */ {{0, 0}, {6, 6}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x42 */ {{1, 1}, {6, 6}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x43 */ {{0, 1}, {6, 6}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x44 */ {{2, 2}, {6, 6}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x45 */ {{0, 0}, {2, 2}, {6, 6}, {0, 0} } }, {0, 0, 2, 0, /* 0x46 */ {{1, 2}, {6, 6}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x47 */ {{0, 2}, {6, 6}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x48 */ {{3, 3}, {6, 6}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x49 */ {{0, 0}, {3, 3}, {6, 6}, {0, 0} } }, {0, 0, 3, 0, /* 0x4a */ {{1, 1}, {3, 3}, {6, 6}, {0, 0} } }, {1, 0, 3, 0, /* 0x4b */ {{0, 1}, {3, 3}, {6, 6}, {0, 0} } }, {0, 0, 2, 0, /* 0x4c */ {{2, 3}, {6, 6}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x4d */ {{0, 0}, {2, 3}, {6, 6}, {0, 0} } }, {0, 0, 2, 0, /* 0x4e */ {{1, 3}, {6, 6}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x4f */ {{0, 3}, {6, 6}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x50 */ {{4, 4}, {6, 6}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x51 */ {{0, 0}, {4, 4}, {6, 6}, {0, 0} } }, {0, 0, 3, 0, /* 0x52 */ {{1, 1}, {4, 4}, {6, 6}, {0, 0} } }, {1, 0, 3, 0, /* 0x53 */ {{0, 1}, {4, 4}, {6, 6}, {0, 0} } }, {0, 0, 3, 0, /* 0x54 */ {{2, 2}, {4, 4}, {6, 6}, {0, 0} } }, {1, 0, 4, 0, /* 0x55 */ {{0, 0}, {2, 2}, {4, 4}, {6, 6} } }, {0, 0, 3, 0, /* 0x56 */ {{1, 2}, {4, 4}, {6, 6}, {0, 0} } }, {1, 0, 3, 0, /* 0x57 */ {{0, 2}, {4, 4}, {6, 6}, {0, 0} } }, {0, 0, 2, 0, /* 0x58 */ {{3, 4}, {6, 6}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x59 */ {{0, 0}, {3, 4}, {6, 6}, {0, 0} } }, {0, 0, 3, 0, /* 0x5a */ {{1, 1}, {3, 4}, {6, 6}, {0, 0} } }, {1, 0, 3, 0, /* 0x5b */ {{0, 1}, {3, 4}, {6, 6}, {0, 0} } }, {0, 0, 2, 0, /* 0x5c */ {{2, 4}, {6, 6}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x5d */ {{0, 0}, {2, 4}, {6, 6}, {0, 0} } }, {0, 0, 2, 0, /* 0x5e */ {{1, 4}, {6, 6}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x5f */ {{0, 4}, {6, 6}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x60 */ {{5, 6}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x61 */ {{0, 0}, {5, 6}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x62 */ {{1, 1}, {5, 6}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x63 */ {{0, 1}, {5, 6}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x64 */ {{2, 2}, {5, 6}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x65 */ {{0, 0}, {2, 2}, {5, 6}, {0, 0} } }, {0, 0, 2, 0, /* 0x66 */ {{1, 2}, {5, 6}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x67 */ {{0, 2}, {5, 6}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x68 */ {{3, 3}, {5, 6}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x69 */ {{0, 0}, {3, 3}, {5, 6}, {0, 0} } }, {0, 0, 3, 0, /* 0x6a */ {{1, 1}, {3, 3}, {5, 6}, {0, 0} } }, {1, 0, 3, 0, /* 0x6b */ {{0, 1}, {3, 3}, {5, 6}, {0, 0} } }, {0, 0, 2, 0, /* 0x6c */ {{2, 3}, {5, 6}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x6d */ {{0, 0}, {2, 3}, {5, 6}, {0, 0} } }, {0, 0, 2, 0, /* 0x6e */ {{1, 3}, {5, 6}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x6f */ {{0, 3}, {5, 6}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x70 */ {{4, 6}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x71 */ {{0, 0}, {4, 6}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x72 */ {{1, 1}, {4, 6}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x73 */ {{0, 1}, {4, 6}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x74 */ {{2, 2}, {4, 6}, {0, 0}, {0, 0} } }, {1, 0, 3, 0, /* 0x75 */ {{0, 0}, {2, 2}, {4, 6}, {0, 0} } }, {0, 0, 2, 0, /* 0x76 */ {{1, 2}, {4, 6}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x77 */ {{0, 2}, {4, 6}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x78 */ {{3, 6}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x79 */ {{0, 0}, {3, 6}, {0, 0}, {0, 0} } }, {0, 0, 2, 0, /* 0x7a */ {{1, 1}, {3, 6}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x7b */ {{0, 1}, {3, 6}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x7c */ {{2, 6}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 2, 0, /* 0x7d */ {{0, 0}, {2, 6}, {0, 0}, {0, 0} } }, {0, 0, 1, 0, /* 0x7e */ {{1, 6}, {0, 0}, {0, 0}, {0, 0} } }, {1, 0, 1, 0, /* 0x7f */ {{0, 6}, {0, 0}, {0, 0}, {0, 0} } }, {0, 1, 1, 0, /* 0x80 */ {{7, 7}, {0, 0}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0x81 */ {{0, 0}, {7, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0x82 */ {{1, 1}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0x83 */ {{0, 1}, {7, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0x84 */ {{2, 2}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0x85 */ {{0, 0}, {2, 2}, {7, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0x86 */ {{1, 2}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0x87 */ {{0, 2}, {7, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0x88 */ {{3, 3}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0x89 */ {{0, 0}, {3, 3}, {7, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0x8a */ {{1, 1}, {3, 3}, {7, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0x8b */ {{0, 1}, {3, 3}, {7, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0x8c */ {{2, 3}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0x8d */ {{0, 0}, {2, 3}, {7, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0x8e */ {{1, 3}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0x8f */ {{0, 3}, {7, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0x90 */ {{4, 4}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0x91 */ {{0, 0}, {4, 4}, {7, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0x92 */ {{1, 1}, {4, 4}, {7, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0x93 */ {{0, 1}, {4, 4}, {7, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0x94 */ {{2, 2}, {4, 4}, {7, 7}, {0, 0} } }, {1, 1, 4, 0, /* 0x95 */ {{0, 0}, {2, 2}, {4, 4}, {7, 7} } }, {0, 1, 3, 0, /* 0x96 */ {{1, 2}, {4, 4}, {7, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0x97 */ {{0, 2}, {4, 4}, {7, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0x98 */ {{3, 4}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0x99 */ {{0, 0}, {3, 4}, {7, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0x9a */ {{1, 1}, {3, 4}, {7, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0x9b */ {{0, 1}, {3, 4}, {7, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0x9c */ {{2, 4}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0x9d */ {{0, 0}, {2, 4}, {7, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0x9e */ {{1, 4}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0x9f */ {{0, 4}, {7, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xa0 */ {{5, 5}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xa1 */ {{0, 0}, {5, 5}, {7, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xa2 */ {{1, 1}, {5, 5}, {7, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xa3 */ {{0, 1}, {5, 5}, {7, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xa4 */ {{2, 2}, {5, 5}, {7, 7}, {0, 0} } }, {1, 1, 4, 0, /* 0xa5 */ {{0, 0}, {2, 2}, {5, 5}, {7, 7} } }, {0, 1, 3, 0, /* 0xa6 */ {{1, 2}, {5, 5}, {7, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xa7 */ {{0, 2}, {5, 5}, {7, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xa8 */ {{3, 3}, {5, 5}, {7, 7}, {0, 0} } }, {1, 1, 4, 0, /* 0xa9 */ {{0, 0}, {3, 3}, {5, 5}, {7, 7} } }, {0, 1, 4, 0, /* 0xaa */ {{1, 1}, {3, 3}, {5, 5}, {7, 7} } }, {1, 1, 4, 0, /* 0xab */ {{0, 1}, {3, 3}, {5, 5}, {7, 7} } }, {0, 1, 3, 0, /* 0xac */ {{2, 3}, {5, 5}, {7, 7}, {0, 0} } }, {1, 1, 4, 0, /* 0xad */ {{0, 0}, {2, 3}, {5, 5}, {7, 7} } }, {0, 1, 3, 0, /* 0xae */ {{1, 3}, {5, 5}, {7, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xaf */ {{0, 3}, {5, 5}, {7, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xb0 */ {{4, 5}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xb1 */ {{0, 0}, {4, 5}, {7, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xb2 */ {{1, 1}, {4, 5}, {7, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xb3 */ {{0, 1}, {4, 5}, {7, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xb4 */ {{2, 2}, {4, 5}, {7, 7}, {0, 0} } }, {1, 1, 4, 0, /* 0xb5 */ {{0, 0}, {2, 2}, {4, 5}, {7, 7} } }, {0, 1, 3, 0, /* 0xb6 */ {{1, 2}, {4, 5}, {7, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xb7 */ {{0, 2}, {4, 5}, {7, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xb8 */ {{3, 5}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xb9 */ {{0, 0}, {3, 5}, {7, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xba */ {{1, 1}, {3, 5}, {7, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xbb */ {{0, 1}, {3, 5}, {7, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xbc */ {{2, 5}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xbd */ {{0, 0}, {2, 5}, {7, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xbe */ {{1, 5}, {7, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xbf */ {{0, 5}, {7, 7}, {0, 0}, {0, 0} } }, {0, 1, 1, 0, /* 0xc0 */ {{6, 7}, {0, 0}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xc1 */ {{0, 0}, {6, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xc2 */ {{1, 1}, {6, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xc3 */ {{0, 1}, {6, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xc4 */ {{2, 2}, {6, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xc5 */ {{0, 0}, {2, 2}, {6, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xc6 */ {{1, 2}, {6, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xc7 */ {{0, 2}, {6, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xc8 */ {{3, 3}, {6, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xc9 */ {{0, 0}, {3, 3}, {6, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xca */ {{1, 1}, {3, 3}, {6, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xcb */ {{0, 1}, {3, 3}, {6, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xcc */ {{2, 3}, {6, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xcd */ {{0, 0}, {2, 3}, {6, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xce */ {{1, 3}, {6, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xcf */ {{0, 3}, {6, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xd0 */ {{4, 4}, {6, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xd1 */ {{0, 0}, {4, 4}, {6, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xd2 */ {{1, 1}, {4, 4}, {6, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xd3 */ {{0, 1}, {4, 4}, {6, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xd4 */ {{2, 2}, {4, 4}, {6, 7}, {0, 0} } }, {1, 1, 4, 0, /* 0xd5 */ {{0, 0}, {2, 2}, {4, 4}, {6, 7} } }, {0, 1, 3, 0, /* 0xd6 */ {{1, 2}, {4, 4}, {6, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xd7 */ {{0, 2}, {4, 4}, {6, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xd8 */ {{3, 4}, {6, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xd9 */ {{0, 0}, {3, 4}, {6, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xda */ {{1, 1}, {3, 4}, {6, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xdb */ {{0, 1}, {3, 4}, {6, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xdc */ {{2, 4}, {6, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xdd */ {{0, 0}, {2, 4}, {6, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xde */ {{1, 4}, {6, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xdf */ {{0, 4}, {6, 7}, {0, 0}, {0, 0} } }, {0, 1, 1, 0, /* 0xe0 */ {{5, 7}, {0, 0}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xe1 */ {{0, 0}, {5, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xe2 */ {{1, 1}, {5, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xe3 */ {{0, 1}, {5, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xe4 */ {{2, 2}, {5, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xe5 */ {{0, 0}, {2, 2}, {5, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xe6 */ {{1, 2}, {5, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xe7 */ {{0, 2}, {5, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xe8 */ {{3, 3}, {5, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xe9 */ {{0, 0}, {3, 3}, {5, 7}, {0, 0} } }, {0, 1, 3, 0, /* 0xea */ {{1, 1}, {3, 3}, {5, 7}, {0, 0} } }, {1, 1, 3, 0, /* 0xeb */ {{0, 1}, {3, 3}, {5, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xec */ {{2, 3}, {5, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xed */ {{0, 0}, {2, 3}, {5, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xee */ {{1, 3}, {5, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xef */ {{0, 3}, {5, 7}, {0, 0}, {0, 0} } }, {0, 1, 1, 0, /* 0xf0 */ {{4, 7}, {0, 0}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xf1 */ {{0, 0}, {4, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xf2 */ {{1, 1}, {4, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xf3 */ {{0, 1}, {4, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xf4 */ {{2, 2}, {4, 7}, {0, 0}, {0, 0} } }, {1, 1, 3, 0, /* 0xf5 */ {{0, 0}, {2, 2}, {4, 7}, {0, 0} } }, {0, 1, 2, 0, /* 0xf6 */ {{1, 2}, {4, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xf7 */ {{0, 2}, {4, 7}, {0, 0}, {0, 0} } }, {0, 1, 1, 0, /* 0xf8 */ {{3, 7}, {0, 0}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xf9 */ {{0, 0}, {3, 7}, {0, 0}, {0, 0} } }, {0, 1, 2, 0, /* 0xfa */ {{1, 1}, {3, 7}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xfb */ {{0, 1}, {3, 7}, {0, 0}, {0, 0} } }, {0, 1, 1, 0, /* 0xfc */ {{2, 7}, {0, 0}, {0, 0}, {0, 0} } }, {1, 1, 2, 0, /* 0xfd */ {{0, 0}, {2, 7}, {0, 0}, {0, 0} } }, {0, 1, 1, 0, /* 0xfe */ {{1, 7}, {0, 0}, {0, 0}, {0, 0} } }, {1, 1, 1, 0, /* 0xff */ {{0, 7}, {0, 0}, {0, 0}, {0, 0} } } }; int sctp_is_address_in_scope(struct sctp_ifa *ifa, struct sctp_scoping *scope, int do_update) { if ((scope->loopback_scope == 0) && (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) { /* * skip loopback if not in scope * */ return (0); } switch (ifa->address.sa.sa_family) { #ifdef INET case AF_INET: if (scope->ipv4_addr_legal) { struct sockaddr_in *sin; sin = &ifa->address.sin; if (sin->sin_addr.s_addr == 0) { /* not in scope , unspecified */ return (0); } if ((scope->ipv4_local_scope == 0) && (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { /* private address not in scope */ return (0); } } else { return (0); } break; #endif #ifdef INET6 case AF_INET6: if (scope->ipv6_addr_legal) { struct sockaddr_in6 *sin6; /* * Must update the flags, bummer, which means any * IFA locks must now be applied HERE <-> */ if (do_update) { sctp_gather_internal_ifa_flags(ifa); } if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { return (0); } /* ok to use deprecated addresses? */ sin6 = &ifa->address.sin6; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* skip unspecified addresses */ return (0); } if ( /* (local_scope == 0) && */ (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) { return (0); } if ((scope->site_scope == 0) && (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { return (0); } } else { return (0); } break; #endif default: return (0); } return (1); } static struct mbuf * sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len) { #if defined(INET) || defined(INET6) struct sctp_paramhdr *paramh; struct mbuf *mret; uint16_t plen; #endif switch (ifa->address.sa.sa_family) { #ifdef INET case AF_INET: plen = (uint16_t)sizeof(struct sctp_ipv4addr_param); break; #endif #ifdef INET6 case AF_INET6: plen = (uint16_t)sizeof(struct sctp_ipv6addr_param); break; #endif default: return (m); } #if defined(INET) || defined(INET6) if (M_TRAILINGSPACE(m) >= plen) { /* easy side we just drop it on the end */ paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m))); mret = m; } else { /* Need more space */ mret = m; while (SCTP_BUF_NEXT(mret) != NULL) { mret = SCTP_BUF_NEXT(mret); } SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA); if (SCTP_BUF_NEXT(mret) == NULL) { /* We are hosed, can't add more addresses */ return (m); } mret = SCTP_BUF_NEXT(mret); paramh = mtod(mret, struct sctp_paramhdr *); } /* now add the parameter */ switch (ifa->address.sa.sa_family) { #ifdef INET case AF_INET: { struct sctp_ipv4addr_param *ipv4p; struct sockaddr_in *sin; sin = &ifa->address.sin; ipv4p = (struct sctp_ipv4addr_param *)paramh; paramh->param_type = htons(SCTP_IPV4_ADDRESS); paramh->param_length = htons(plen); ipv4p->addr = sin->sin_addr.s_addr; SCTP_BUF_LEN(mret) += plen; break; } #endif #ifdef INET6 case AF_INET6: { struct sctp_ipv6addr_param *ipv6p; struct sockaddr_in6 *sin6; sin6 = &ifa->address.sin6; ipv6p = (struct sctp_ipv6addr_param *)paramh; paramh->param_type = htons(SCTP_IPV6_ADDRESS); paramh->param_length = htons(plen); memcpy(ipv6p->addr, &sin6->sin6_addr, sizeof(ipv6p->addr)); /* clear embedded scope in the address */ in6_clearscope((struct in6_addr *)ipv6p->addr); SCTP_BUF_LEN(mret) += plen; break; } #endif default: return (m); } if (len != NULL) { *len += plen; } return (mret); #endif } struct mbuf * sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_scoping *scope, struct mbuf *m_at, int cnt_inits_to, uint16_t *padding_len, uint16_t *chunk_len) { struct sctp_vrf *vrf = NULL; int cnt, limit_out = 0, total_count; uint32_t vrf_id; vrf_id = inp->def_vrf_id; SCTP_IPI_ADDR_RLOCK(); vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { SCTP_IPI_ADDR_RUNLOCK(); return (m_at); } if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { struct sctp_ifa *sctp_ifap; struct sctp_ifn *sctp_ifnp; cnt = cnt_inits_to; if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) { limit_out = 1; cnt = SCTP_ADDRESS_LIMIT; goto skip_count; } LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { if ((scope->loopback_scope == 0) && SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { /* * Skip loopback devices if loopback_scope * not set */ continue; } LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { #ifdef INET if ((sctp_ifap->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sctp_ifap->address.sin.sin_addr) != 0)) { continue; } #endif #ifdef INET6 if ((sctp_ifap->address.sa.sa_family == AF_INET6) && (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sctp_ifap->address.sin6.sin6_addr) != 0)) { continue; } #endif if (sctp_is_addr_restricted(stcb, sctp_ifap)) { continue; } if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) { continue; } cnt++; if (cnt > SCTP_ADDRESS_LIMIT) { break; } } if (cnt > SCTP_ADDRESS_LIMIT) { break; } } skip_count: if (cnt > 1) { total_count = 0; LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { cnt = 0; if ((scope->loopback_scope == 0) && SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { /* * Skip loopback devices if * loopback_scope not set */ continue; } LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { #ifdef INET if ((sctp_ifap->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sctp_ifap->address.sin.sin_addr) != 0)) { continue; } #endif #ifdef INET6 if ((sctp_ifap->address.sa.sa_family == AF_INET6) && (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sctp_ifap->address.sin6.sin6_addr) != 0)) { continue; } #endif if (sctp_is_addr_restricted(stcb, sctp_ifap)) { continue; } if (sctp_is_address_in_scope(sctp_ifap, scope, 0) == 0) { continue; } if ((chunk_len != NULL) && (padding_len != NULL) && (*padding_len > 0)) { memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len); SCTP_BUF_LEN(m_at) += *padding_len; *chunk_len += *padding_len; *padding_len = 0; } m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len); if (limit_out) { cnt++; total_count++; if (cnt >= 2) { /* * two from each * address */ break; } if (total_count > SCTP_ADDRESS_LIMIT) { /* No more addresses */ break; } } } } } } else { struct sctp_laddr *laddr; cnt = cnt_inits_to; /* First, how many ? */ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { continue; } if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) /* * Address being deleted by the system, dont * list. */ continue; if (laddr->action == SCTP_DEL_IP_ADDRESS) { /* * Address being deleted on this ep don't * list. */ continue; } if (sctp_is_address_in_scope(laddr->ifa, scope, 1) == 0) { continue; } cnt++; } /* * To get through a NAT we only list addresses if we have * more than one. That way if you just bind a single address * we let the source of the init dictate our address. */ if (cnt > 1) { cnt = cnt_inits_to; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { continue; } if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { continue; } if (sctp_is_address_in_scope(laddr->ifa, scope, 0) == 0) { continue; } if ((chunk_len != NULL) && (padding_len != NULL) && (*padding_len > 0)) { memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len); SCTP_BUF_LEN(m_at) += *padding_len; *chunk_len += *padding_len; *padding_len = 0; } m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len); cnt++; if (cnt >= SCTP_ADDRESS_LIMIT) { break; } } } } SCTP_IPI_ADDR_RUNLOCK(); return (m_at); } static struct sctp_ifa * sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa, uint8_t dest_is_loop, uint8_t dest_is_priv, sa_family_t fam) { uint8_t dest_is_global = 0; /* dest_is_priv is true if destination is a private address */ /* dest_is_loop is true if destination is a loopback addresses */ /** * Here we determine if its a preferred address. A preferred address * means it is the same scope or higher scope then the destination. * L = loopback, P = private, G = global * ----------------------------------------- * src | dest | result * ---------------------------------------- * L | L | yes * ----------------------------------------- * P | L | yes-v4 no-v6 * ----------------------------------------- * G | L | yes-v4 no-v6 * ----------------------------------------- * L | P | no * ----------------------------------------- * P | P | yes * ----------------------------------------- * G | P | no * ----------------------------------------- * L | G | no * ----------------------------------------- * P | G | no * ----------------------------------------- * G | G | yes * ----------------------------------------- */ if (ifa->address.sa.sa_family != fam) { /* forget mis-matched family */ return (NULL); } if ((dest_is_priv == 0) && (dest_is_loop == 0)) { dest_is_global = 1; } SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:"); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa); /* Ok the address may be ok */ #ifdef INET6 if (fam == AF_INET6) { /* ok to use deprecated addresses? no lets not! */ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n"); return (NULL); } if (ifa->src_is_priv && !ifa->src_is_loop) { if (dest_is_loop) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n"); return (NULL); } } if (ifa->src_is_glob) { if (dest_is_loop) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n"); return (NULL); } } } #endif /* * Now that we know what is what, implement or table this could in * theory be done slicker (it used to be), but this is * straightforward and easier to validate :-) */ SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n", ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob); SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n", dest_is_loop, dest_is_priv, dest_is_global); if ((ifa->src_is_loop) && (dest_is_priv)) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n"); return (NULL); } if ((ifa->src_is_glob) && (dest_is_priv)) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n"); return (NULL); } if ((ifa->src_is_loop) && (dest_is_global)) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n"); return (NULL); } if ((ifa->src_is_priv) && (dest_is_global)) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n"); return (NULL); } SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n"); /* its a preferred address */ return (ifa); } static struct sctp_ifa * sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa, uint8_t dest_is_loop, uint8_t dest_is_priv, sa_family_t fam) { uint8_t dest_is_global = 0; /** * Here we determine if its a acceptable address. A acceptable * address means it is the same scope or higher scope but we can * allow for NAT which means its ok to have a global dest and a * private src. * * L = loopback, P = private, G = global * ----------------------------------------- * src | dest | result * ----------------------------------------- * L | L | yes * ----------------------------------------- * P | L | yes-v4 no-v6 * ----------------------------------------- * G | L | yes * ----------------------------------------- * L | P | no * ----------------------------------------- * P | P | yes * ----------------------------------------- * G | P | yes - May not work * ----------------------------------------- * L | G | no * ----------------------------------------- * P | G | yes - May not work * ----------------------------------------- * G | G | yes * ----------------------------------------- */ if (ifa->address.sa.sa_family != fam) { /* forget non matching family */ SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n", ifa->address.sa.sa_family, fam); return (NULL); } /* Ok the address may be ok */ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa); SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n", dest_is_loop, dest_is_priv); if ((dest_is_loop == 0) && (dest_is_priv == 0)) { dest_is_global = 1; } #ifdef INET6 if (fam == AF_INET6) { /* ok to use deprecated addresses? */ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { return (NULL); } if (ifa->src_is_priv) { /* Special case, linklocal to loop */ if (dest_is_loop) return (NULL); } } #endif /* * Now that we know what is what, implement our table. This could in * theory be done slicker (it used to be), but this is * straightforward and easier to validate :-) */ SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n", ifa->src_is_loop, dest_is_priv); if ((ifa->src_is_loop == 1) && (dest_is_priv)) { return (NULL); } SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n", ifa->src_is_loop, dest_is_global); if ((ifa->src_is_loop == 1) && (dest_is_global)) { return (NULL); } SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n"); /* its an acceptable address */ return (ifa); } int sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) { struct sctp_laddr *laddr; if (stcb == NULL) { /* There are no restrictions, no TCB :-) */ return (0); } LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { if (laddr->ifa == NULL) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", __func__); continue; } if (laddr->ifa == ifa) { /* Yes it is on the list */ return (1); } } return (0); } int sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) { struct sctp_laddr *laddr; if (ifa == NULL) return (0); LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", __func__); continue; } if ((laddr->ifa == ifa) && laddr->action == 0) /* same pointer */ return (1); } return (0); } static struct sctp_ifa * sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, sctp_route_t *ro, uint32_t vrf_id, int non_asoc_addr_ok, uint8_t dest_is_priv, uint8_t dest_is_loop, sa_family_t fam) { struct sctp_laddr *laddr, *starting_point; void *ifn; int resettotop = 0; struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa, *sifa; struct sctp_vrf *vrf; uint32_t ifn_index; vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) return (NULL); ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); sctp_ifn = sctp_find_ifn(ifn, ifn_index); /* * first question, is the ifn we will emit on in our list, if so, we * want such an address. Note that we first looked for a preferred * address. */ if (sctp_ifn) { /* is a preferred one on the interface we route out? */ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin.sin_addr) != 0)) { continue; } #endif #ifdef INET6 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin6.sin6_addr) != 0)) { continue; } #endif if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) continue; sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) continue; if (sctp_is_addr_in_ep(inp, sifa)) { atomic_add_int(&sifa->refcount, 1); return (sifa); } } } /* * ok, now we now need to find one on the list of the addresses. We * can't get one on the emitting interface so let's find first a * preferred one. If not that an acceptable one otherwise... we * return NULL. */ starting_point = inp->next_addr_touse; once_again: if (inp->next_addr_touse == NULL) { inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); resettotop = 1; } for (laddr = inp->next_addr_touse; laddr; laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { if (laddr->ifa == NULL) { /* address has been removed */ continue; } if (laddr->action == SCTP_DEL_IP_ADDRESS) { /* address is being deleted */ continue; } sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) continue; atomic_add_int(&sifa->refcount, 1); return (sifa); } if (resettotop == 0) { inp->next_addr_touse = NULL; goto once_again; } inp->next_addr_touse = starting_point; resettotop = 0; once_again_too: if (inp->next_addr_touse == NULL) { inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); resettotop = 1; } /* ok, what about an acceptable address in the inp */ for (laddr = inp->next_addr_touse; laddr; laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { if (laddr->ifa == NULL) { /* address has been removed */ continue; } if (laddr->action == SCTP_DEL_IP_ADDRESS) { /* address is being deleted */ continue; } sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) continue; atomic_add_int(&sifa->refcount, 1); return (sifa); } if (resettotop == 0) { inp->next_addr_touse = NULL; goto once_again_too; } /* * no address bound can be a source for the destination we are in * trouble */ return (NULL); } static struct sctp_ifa * sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb, sctp_route_t *ro, uint32_t vrf_id, uint8_t dest_is_priv, uint8_t dest_is_loop, int non_asoc_addr_ok, sa_family_t fam) { struct sctp_laddr *laddr, *starting_point; void *ifn; struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa, *sifa; uint8_t start_at_beginning = 0; struct sctp_vrf *vrf; uint32_t ifn_index; /* * first question, is the ifn we will emit on in our list, if so, we * want that one. */ vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) return (NULL); ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); sctp_ifn = sctp_find_ifn(ifn, ifn_index); /* * first question, is the ifn we will emit on in our list? If so, * we want that one. First we look for a preferred. Second, we go * for an acceptable. */ if (sctp_ifn) { /* first try for a preferred address on the ep */ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin.sin_addr) != 0)) { continue; } #endif #ifdef INET6 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin6.sin6_addr) != 0)) { continue; } #endif if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) continue; if (sctp_is_addr_in_ep(inp, sctp_ifa)) { sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) continue; if (((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, sifa))) || (non_asoc_addr_ok && (sctp_is_addr_restricted(stcb, sifa)) && (!sctp_is_addr_pending(stcb, sifa)))) { /* on the no-no list */ continue; } atomic_add_int(&sifa->refcount, 1); return (sifa); } } /* next try for an acceptable address on the ep */ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin.sin_addr) != 0)) { continue; } #endif #ifdef INET6 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin6.sin6_addr) != 0)) { continue; } #endif if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) continue; if (sctp_is_addr_in_ep(inp, sctp_ifa)) { sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) continue; if (((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, sifa))) || (non_asoc_addr_ok && (sctp_is_addr_restricted(stcb, sifa)) && (!sctp_is_addr_pending(stcb, sifa)))) { /* on the no-no list */ continue; } atomic_add_int(&sifa->refcount, 1); return (sifa); } } } /* * if we can't find one like that then we must look at all addresses * bound to pick one at first preferable then secondly acceptable. */ starting_point = stcb->asoc.last_used_address; sctp_from_the_top: if (stcb->asoc.last_used_address == NULL) { start_at_beginning = 1; stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); } /* search beginning with the last used address */ for (laddr = stcb->asoc.last_used_address; laddr; laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { if (laddr->ifa == NULL) { /* address has been removed */ continue; } if (laddr->action == SCTP_DEL_IP_ADDRESS) { /* address is being deleted */ continue; } sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) continue; if (((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, sifa))) || (non_asoc_addr_ok && (sctp_is_addr_restricted(stcb, sifa)) && (!sctp_is_addr_pending(stcb, sifa)))) { /* on the no-no list */ continue; } stcb->asoc.last_used_address = laddr; atomic_add_int(&sifa->refcount, 1); return (sifa); } if (start_at_beginning == 0) { stcb->asoc.last_used_address = NULL; goto sctp_from_the_top; } /* now try for any higher scope than the destination */ stcb->asoc.last_used_address = starting_point; start_at_beginning = 0; sctp_from_the_top2: if (stcb->asoc.last_used_address == NULL) { start_at_beginning = 1; stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); } /* search beginning with the last used address */ for (laddr = stcb->asoc.last_used_address; laddr; laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { if (laddr->ifa == NULL) { /* address has been removed */ continue; } if (laddr->action == SCTP_DEL_IP_ADDRESS) { /* address is being deleted */ continue; } sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) continue; if (((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, sifa))) || (non_asoc_addr_ok && (sctp_is_addr_restricted(stcb, sifa)) && (!sctp_is_addr_pending(stcb, sifa)))) { /* on the no-no list */ continue; } stcb->asoc.last_used_address = laddr; atomic_add_int(&sifa->refcount, 1); return (sifa); } if (start_at_beginning == 0) { stcb->asoc.last_used_address = NULL; goto sctp_from_the_top2; } return (NULL); } static struct sctp_ifa * sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, struct sctp_inpcb *inp, struct sctp_tcb *stcb, int non_asoc_addr_ok, uint8_t dest_is_loop, uint8_t dest_is_priv, int addr_wanted, sa_family_t fam, sctp_route_t *ro) { struct sctp_ifa *ifa, *sifa; int num_eligible_addr = 0; #ifdef INET6 struct sockaddr_in6 sin6, lsa6; if (fam == AF_INET6) { memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6)); (void)sa6_recoverscope(&sin6); } #endif /* INET6 */ LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { #ifdef INET if ((ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, &ifa->address.sin.sin_addr) != 0)) { continue; } #endif #ifdef INET6 if ((ifa->address.sa.sa_family == AF_INET6) && (prison_check_ip6(inp->ip_inp.inp.inp_cred, &ifa->address.sin6.sin6_addr) != 0)) { continue; } #endif if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) continue; sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) continue; #ifdef INET6 if (fam == AF_INET6 && dest_is_loop && sifa->src_is_loop && sifa->src_is_priv) { /* * don't allow fe80::1 to be a src on loop ::1, we * don't list it to the peer so we will get an * abort. */ continue; } if (fam == AF_INET6 && IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) && IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) { /* * link-local <-> link-local must belong to the same * scope. */ memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6)); (void)sa6_recoverscope(&lsa6); if (sin6.sin6_scope_id != lsa6.sin6_scope_id) { continue; } } #endif /* INET6 */ /* * Check if the IPv6 address matches to next-hop. In the * mobile case, old IPv6 address may be not deleted from the * interface. Then, the interface has previous and new * addresses. We should use one corresponding to the * next-hop. (by micchie) */ #ifdef INET6 if (stcb && fam == AF_INET6 && sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro) == 0) { continue; } } #endif #ifdef INET /* Avoid topologically incorrect IPv4 address */ if (stcb && fam == AF_INET && sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { if (sctp_v4src_match_nexthop(sifa, ro) == 0) { continue; } } #endif if (stcb) { if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { continue; } if (((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, sifa))) || (non_asoc_addr_ok && (sctp_is_addr_restricted(stcb, sifa)) && (!sctp_is_addr_pending(stcb, sifa)))) { /* * It is restricted for some reason.. * probably not yet added. */ continue; } } if (num_eligible_addr >= addr_wanted) { return (sifa); } num_eligible_addr++; } return (NULL); } static int sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, struct sctp_inpcb *inp, struct sctp_tcb *stcb, int non_asoc_addr_ok, uint8_t dest_is_loop, uint8_t dest_is_priv, sa_family_t fam) { struct sctp_ifa *ifa, *sifa; int num_eligible_addr = 0; LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { #ifdef INET if ((ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, &ifa->address.sin.sin_addr) != 0)) { continue; } #endif #ifdef INET6 if ((ifa->address.sa.sa_family == AF_INET6) && (stcb != NULL) && (prison_check_ip6(inp->ip_inp.inp.inp_cred, &ifa->address.sin6.sin6_addr) != 0)) { continue; } #endif if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) { continue; } sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) { continue; } if (stcb) { if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { continue; } if (((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, sifa))) || (non_asoc_addr_ok && (sctp_is_addr_restricted(stcb, sifa)) && (!sctp_is_addr_pending(stcb, sifa)))) { /* * It is restricted for some reason.. * probably not yet added. */ continue; } } num_eligible_addr++; } return (num_eligible_addr); } static struct sctp_ifa * sctp_choose_boundall(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, sctp_route_t *ro, uint32_t vrf_id, uint8_t dest_is_priv, uint8_t dest_is_loop, int non_asoc_addr_ok, sa_family_t fam) { int cur_addr_num = 0, num_preferred = 0; void *ifn; struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn; struct sctp_ifa *sctp_ifa, *sifa; uint32_t ifn_index; struct sctp_vrf *vrf; #ifdef INET int retried = 0; #endif /*- * For boundall we can use any address in the association. * If non_asoc_addr_ok is set we can use any address (at least in * theory). So we look for preferred addresses first. If we find one, * we use it. Otherwise we next try to get an address on the * interface, which we should be able to do (unless non_asoc_addr_ok * is false and we are routed out that way). In these cases where we * can't use the address of the interface we go through all the * ifn's looking for an address we can use and fill that in. Punting * means we send back address 0, which will probably cause problems * actually since then IP will fill in the address of the route ifn, * which means we probably already rejected it.. i.e. here comes an * abort :-<. */ vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) return (NULL); ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index); emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index); if (sctp_ifn == NULL) { /* ?? We don't have this guy ?? */ SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n"); goto bound_all_plan_b; } SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n", ifn_index, sctp_ifn->ifn_name); if (net) { cur_addr_num = net->indx_of_eligible_next_to_use; } num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop, dest_is_priv, fam); SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n", num_preferred, sctp_ifn->ifn_name); if (num_preferred == 0) { /* * no eligible addresses, we must use some other interface * address if we can find one. */ goto bound_all_plan_b; } /* * Ok we have num_eligible_addr set with how many we can use, this * may vary from call to call due to addresses being deprecated * etc.. */ if (cur_addr_num >= num_preferred) { cur_addr_num = 0; } /* * select the nth address from the list (where cur_addr_num is the * nth) and 0 is the first one, 1 is the second one etc... */ SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num); sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop, dest_is_priv, cur_addr_num, fam, ro); /* if sctp_ifa is NULL something changed??, fall to plan b. */ if (sctp_ifa) { atomic_add_int(&sctp_ifa->refcount, 1); if (net) { /* save off where the next one we will want */ net->indx_of_eligible_next_to_use = cur_addr_num + 1; } return (sctp_ifa); } /* * plan_b: Look at all interfaces and find a preferred address. If * no preferred fall through to plan_c. */ bound_all_plan_b: SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n"); LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n", sctp_ifn->ifn_name); if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { /* wrong base scope */ SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n"); continue; } if ((sctp_ifn == looked_at) && looked_at) { /* already looked at this guy */ SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n"); continue; } num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop, dest_is_priv, fam); SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found ifn:%p %d preferred source addresses\n", ifn, num_preferred); if (num_preferred == 0) { /* None on this interface. */ SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n"); continue; } SCTPDBG(SCTP_DEBUG_OUTPUT2, "num preferred:%d on interface:%p cur_addr_num:%d\n", num_preferred, (void *)sctp_ifn, cur_addr_num); /* * Ok we have num_eligible_addr set with how many we can * use, this may vary from call to call due to addresses * being deprecated etc.. */ if (cur_addr_num >= num_preferred) { cur_addr_num = 0; } sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop, dest_is_priv, cur_addr_num, fam, ro); if (sifa == NULL) continue; if (net) { net->indx_of_eligible_next_to_use = cur_addr_num + 1; SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n", cur_addr_num); SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:"); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:"); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa); } atomic_add_int(&sifa->refcount, 1); return (sifa); } #ifdef INET again_with_private_addresses_allowed: #endif /* plan_c: do we have an acceptable address on the emit interface */ sifa = NULL; SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n"); if (emit_ifn == NULL) { SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n"); goto plan_d; } LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) { SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa); #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin.sin_addr) != 0)) { SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n"); continue; } #endif #ifdef INET6 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin6.sin6_addr) != 0)) { SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n"); continue; } #endif if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) { SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n"); continue; } sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) { SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n"); continue; } if (stcb) { if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) { SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n"); sifa = NULL; continue; } if (((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, sifa))) || (non_asoc_addr_ok && (sctp_is_addr_restricted(stcb, sifa)) && (!sctp_is_addr_pending(stcb, sifa)))) { /* * It is restricted for some reason.. * probably not yet added. */ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n"); sifa = NULL; continue; } } atomic_add_int(&sifa->refcount, 1); goto out; } plan_d: /* * plan_d: We are in trouble. No preferred address on the emit * interface. And not even a preferred address on all interfaces. Go * out and see if we can find an acceptable address somewhere * amongst all interfaces. */ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at); LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { /* wrong base scope */ continue; } LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin.sin_addr) != 0)) { continue; } #endif #ifdef INET6 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin6.sin6_addr) != 0)) { continue; } #endif if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) continue; sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam); if (sifa == NULL) continue; if (stcb) { if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) { sifa = NULL; continue; } if (((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, sifa))) || (non_asoc_addr_ok && (sctp_is_addr_restricted(stcb, sifa)) && (!sctp_is_addr_pending(stcb, sifa)))) { /* * It is restricted for some * reason.. probably not yet added. */ sifa = NULL; continue; } } goto out; } } #ifdef INET if (stcb) { if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) { stcb->asoc.scope.ipv4_local_scope = 1; retried = 1; goto again_with_private_addresses_allowed; } else if (retried == 1) { stcb->asoc.scope.ipv4_local_scope = 0; } } #endif out: #ifdef INET if (sifa) { if (retried == 1) { LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { /* wrong base scope */ continue; } LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { struct sctp_ifa *tmp_sifa; #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin.sin_addr) != 0)) { continue; } #endif #ifdef INET6 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sctp_ifa->address.sin6.sin6_addr) != 0)) { continue; } #endif if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) continue; tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam); if (tmp_sifa == NULL) { continue; } if (tmp_sifa == sifa) { continue; } if (stcb) { if (sctp_is_address_in_scope(tmp_sifa, &stcb->asoc.scope, 0) == 0) { continue; } if (((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, tmp_sifa))) || (non_asoc_addr_ok && (sctp_is_addr_restricted(stcb, tmp_sifa)) && (!sctp_is_addr_pending(stcb, tmp_sifa)))) { /* * It is restricted * for some reason.. * probably not yet * added. */ continue; } } if ((tmp_sifa->address.sin.sin_family == AF_INET) && (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) { sctp_add_local_addr_restricted(stcb, tmp_sifa); } } } } atomic_add_int(&sifa->refcount, 1); } #endif return (sifa); } /* tcb may be NULL */ struct sctp_ifa * sctp_source_address_selection(struct sctp_inpcb *inp, struct sctp_tcb *stcb, sctp_route_t *ro, struct sctp_nets *net, int non_asoc_addr_ok, uint32_t vrf_id) { struct sctp_ifa *answer; uint8_t dest_is_priv, dest_is_loop; sa_family_t fam; #ifdef INET struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst; #endif #ifdef INET6 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst; #endif /** * Rules: * - Find the route if needed, cache if I can. * - Look at interface address in route, Is it in the bound list. If so we * have the best source. * - If not we must rotate amongst the addresses. * * Caveats and issues * * Do we need to pay attention to scope. We can have a private address * or a global address we are sourcing or sending to. So if we draw * it out * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz * For V4 * ------------------------------------------ * source * dest * result * ----------------------------------------- * Private * Global * NAT * ----------------------------------------- * Private * Private * No problem * ----------------------------------------- * Global * Private * Huh, How will this work? * ----------------------------------------- * Global * Global * No Problem *------------------------------------------ * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz * For V6 *------------------------------------------ * source * dest * result * ----------------------------------------- * Linklocal * Global * * ----------------------------------------- * Linklocal * Linklocal * No problem * ----------------------------------------- * Global * Linklocal * Huh, How will this work? * ----------------------------------------- * Global * Global * No Problem *------------------------------------------ * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz * * And then we add to that what happens if there are multiple addresses * assigned to an interface. Remember the ifa on a ifn is a linked * list of addresses. So one interface can have more than one IP * address. What happens if we have both a private and a global * address? Do we then use context of destination to sort out which * one is best? And what about NAT's sending P->G may get you a NAT * translation, or should you select the G thats on the interface in * preference. * * Decisions: * * - count the number of addresses on the interface. * - if it is one, no problem except case . * For we will assume a NAT out there. * - if there are more than one, then we need to worry about scope P * or G. We should prefer G -> G and P -> P if possible. * Then as a secondary fall back to mixed types G->P being a last * ditch one. * - The above all works for bound all, but bound specific we need to * use the same concept but instead only consider the bound * addresses. If the bound set is NOT assigned to the interface then * we must use rotation amongst the bound addresses.. */ if (ro->ro_nh == NULL) { /* * Need a route to cache. */ SCTP_RTALLOC(ro, vrf_id, inp->fibnum); } if (ro->ro_nh == NULL) { return (NULL); } fam = ro->ro_dst.sa_family; dest_is_priv = dest_is_loop = 0; /* Setup our scopes for the destination */ switch (fam) { #ifdef INET case AF_INET: /* Scope based on outbound address */ if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { dest_is_loop = 1; if (net != NULL) { /* mark it as local */ net->addr_is_local = 1; } } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) { dest_is_priv = 1; } break; #endif #ifdef INET6 case AF_INET6: /* Scope based on outbound address */ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) || SCTP_ROUTE_IS_REAL_LOOP(ro)) { /* * If the address is a loopback address, which * consists of "::1" OR "fe80::1%lo0", we are * loopback scope. But we don't use dest_is_priv * (link local addresses). */ dest_is_loop = 1; if (net != NULL) { /* mark it as local */ net->addr_is_local = 1; } } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { dest_is_priv = 1; } break; #endif } SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:"); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst); SCTP_IPI_ADDR_RLOCK(); if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* * Bound all case */ answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id, dest_is_priv, dest_is_loop, non_asoc_addr_ok, fam); SCTP_IPI_ADDR_RUNLOCK(); return (answer); } /* * Subset bound case */ if (stcb) { answer = sctp_choose_boundspecific_stcb(inp, stcb, ro, vrf_id, dest_is_priv, dest_is_loop, non_asoc_addr_ok, fam); } else { answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id, non_asoc_addr_ok, dest_is_priv, dest_is_loop, fam); } SCTP_IPI_ADDR_RUNLOCK(); return (answer); } static bool sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize) { struct cmsghdr cmh; struct sctp_sndinfo sndinfo; struct sctp_prinfo prinfo; struct sctp_authinfo authinfo; int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off; bool found; /* * Independent of how many mbufs, find the c_type inside the control * structure and copy out the data. */ found = false; tot_len = SCTP_BUF_LEN(control); for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) { rem_len = tot_len - off; if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) { /* There is not enough room for one more. */ return (found); } m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh); if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { /* We dont't have a complete CMSG header. */ return (found); } if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) { /* We don't have the complete CMSG. */ return (found); } cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh)); cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh)); if ((cmh.cmsg_level == IPPROTO_SCTP) && ((c_type == cmh.cmsg_type) || ((c_type == SCTP_SNDRCV) && ((cmh.cmsg_type == SCTP_SNDINFO) || (cmh.cmsg_type == SCTP_PRINFO) || (cmh.cmsg_type == SCTP_AUTHINFO))))) { if (c_type == cmh.cmsg_type) { if (cpsize > INT_MAX) { return (found); } if (cmsg_data_len < (int)cpsize) { return (found); } /* It is exactly what we want. Copy it out. */ m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data); return (1); } else { struct sctp_sndrcvinfo *sndrcvinfo; sndrcvinfo = (struct sctp_sndrcvinfo *)data; if (!found) { if (cpsize < sizeof(struct sctp_sndrcvinfo)) { return (found); } memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo)); } switch (cmh.cmsg_type) { case SCTP_SNDINFO: if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) { return (found); } m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo); sndrcvinfo->sinfo_stream = sndinfo.snd_sid; sndrcvinfo->sinfo_flags = sndinfo.snd_flags; sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid; sndrcvinfo->sinfo_context = sndinfo.snd_context; sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id; break; case SCTP_PRINFO: if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) { return (found); } m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo); if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) { sndrcvinfo->sinfo_timetolive = prinfo.pr_value; } else { sndrcvinfo->sinfo_timetolive = 0; } sndrcvinfo->sinfo_flags |= prinfo.pr_policy; break; case SCTP_AUTHINFO: if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) { return (found); } m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo); sndrcvinfo->sinfo_keynumber_valid = 1; sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber; break; default: return (found); } found = true; } } } return (found); } static int sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error) { struct cmsghdr cmh; struct sctp_initmsg initmsg; #ifdef INET struct sockaddr_in sin; #endif #ifdef INET6 struct sockaddr_in6 sin6; #endif int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off; tot_len = SCTP_BUF_LEN(control); for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) { rem_len = tot_len - off; if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) { /* There is not enough room for one more. */ *error = EINVAL; return (1); } m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh); if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { /* We dont't have a complete CMSG header. */ *error = EINVAL; return (1); } if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) { /* We don't have the complete CMSG. */ *error = EINVAL; return (1); } cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh)); cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh)); if (cmh.cmsg_level == IPPROTO_SCTP) { switch (cmh.cmsg_type) { case SCTP_INIT: if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) { *error = EINVAL; return (1); } m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg); if (initmsg.sinit_max_attempts) stcb->asoc.max_init_times = initmsg.sinit_max_attempts; if (initmsg.sinit_num_ostreams) stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams; if (initmsg.sinit_max_instreams) stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams; if (initmsg.sinit_max_init_timeo) stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo; if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) { struct sctp_stream_out *tmp_str; unsigned int i; #if defined(SCTP_DETAILED_STR_STATS) int j; #endif /* Default is NOT correct */ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n", stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams); SCTP_TCB_UNLOCK(stcb); SCTP_MALLOC(tmp_str, struct sctp_stream_out *, (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)), SCTP_M_STRMO); SCTP_TCB_LOCK(stcb); if (tmp_str != NULL) { SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO); stcb->asoc.strmout = tmp_str; stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams; } else { stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt; } for (i = 0; i < stcb->asoc.streamoutcnt; i++) { TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL); stcb->asoc.strmout[i].chunks_on_queues = 0; #if defined(SCTP_DETAILED_STR_STATS) for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { stcb->asoc.strmout[i].abandoned_sent[j] = 0; stcb->asoc.strmout[i].abandoned_unsent[j] = 0; } #else stcb->asoc.strmout[i].abandoned_sent[0] = 0; stcb->asoc.strmout[i].abandoned_unsent[0] = 0; #endif stcb->asoc.strmout[i].next_mid_ordered = 0; stcb->asoc.strmout[i].next_mid_unordered = 0; stcb->asoc.strmout[i].sid = i; stcb->asoc.strmout[i].last_msg_incomplete = 0; stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING; } } break; #ifdef INET case SCTP_DSTADDRV4: if (cmsg_data_len < (int)sizeof(struct in_addr)) { *error = EINVAL; return (1); } memset(&sin, 0, sizeof(struct sockaddr_in)); sin.sin_family = AF_INET; sin.sin_len = sizeof(struct sockaddr_in); sin.sin_port = stcb->rport; m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr); if (in_broadcast(sin.sin_addr) || IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { *error = EINVAL; return (1); } if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { *error = ENOBUFS; return (1); } break; #endif #ifdef INET6 case SCTP_DSTADDRV6: if (cmsg_data_len < (int)sizeof(struct in6_addr)) { *error = EINVAL; return (1); } memset(&sin6, 0, sizeof(struct sockaddr_in6)); sin6.sin6_family = AF_INET6; sin6.sin6_len = sizeof(struct sockaddr_in6); sin6.sin6_port = stcb->rport; m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr); if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) || IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) { *error = EINVAL; return (1); } #ifdef INET if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) { in6_sin6_2_sin(&sin, &sin6); if (in_broadcast(sin.sin_addr) || IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { *error = EINVAL; return (1); } if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { *error = ENOBUFS; return (1); } } else #endif if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { *error = ENOBUFS; return (1); } break; #endif default: break; } } } return (0); } #if defined(INET) || defined(INET6) static struct sctp_tcb * sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p, uint16_t port, struct mbuf *control, struct sctp_nets **net_p, int *error) { struct cmsghdr cmh; struct sctp_tcb *stcb; struct sockaddr *addr; #ifdef INET struct sockaddr_in sin; #endif #ifdef INET6 struct sockaddr_in6 sin6; #endif int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off; tot_len = SCTP_BUF_LEN(control); for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) { rem_len = tot_len - off; if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) { /* There is not enough room for one more. */ *error = EINVAL; return (NULL); } m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh); if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { /* We dont't have a complete CMSG header. */ *error = EINVAL; return (NULL); } if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) { /* We don't have the complete CMSG. */ *error = EINVAL; return (NULL); } cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh)); cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh)); if (cmh.cmsg_level == IPPROTO_SCTP) { switch (cmh.cmsg_type) { #ifdef INET case SCTP_DSTADDRV4: if (cmsg_data_len < (int)sizeof(struct in_addr)) { *error = EINVAL; return (NULL); } memset(&sin, 0, sizeof(struct sockaddr_in)); sin.sin_family = AF_INET; sin.sin_len = sizeof(struct sockaddr_in); sin.sin_port = port; m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr); addr = (struct sockaddr *)&sin; break; #endif #ifdef INET6 case SCTP_DSTADDRV6: if (cmsg_data_len < (int)sizeof(struct in6_addr)) { *error = EINVAL; return (NULL); } memset(&sin6, 0, sizeof(struct sockaddr_in6)); sin6.sin6_family = AF_INET6; sin6.sin6_len = sizeof(struct sockaddr_in6); sin6.sin6_port = port; m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr); #ifdef INET if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) { in6_sin6_2_sin(&sin, &sin6); addr = (struct sockaddr *)&sin; } else #endif addr = (struct sockaddr *)&sin6; break; #endif default: addr = NULL; break; } if (addr) { stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL); if (stcb != NULL) { return (stcb); } } } } return (NULL); } #endif static struct mbuf * sctp_add_cookie(struct mbuf *init, int init_offset, struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature) { struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; struct sctp_state_cookie *stc; struct sctp_paramhdr *ph; uint16_t cookie_sz; mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + sizeof(struct sctp_paramhdr)), 0, M_NOWAIT, 1, MT_DATA); if (mret == NULL) { return (NULL); } copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT); if (copy_init == NULL) { sctp_m_freem(mret); return (NULL); } #ifdef SCTP_MBUF_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY); } #endif copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, M_NOWAIT); if (copy_initack == NULL) { sctp_m_freem(mret); sctp_m_freem(copy_init); return (NULL); } #ifdef SCTP_MBUF_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY); } #endif /* easy side we just drop it on the end */ ph = mtod(mret, struct sctp_paramhdr *); SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + sizeof(struct sctp_paramhdr); stc = (struct sctp_state_cookie *)((caddr_t)ph + sizeof(struct sctp_paramhdr)); ph->param_type = htons(SCTP_STATE_COOKIE); ph->param_length = 0; /* fill in at the end */ /* Fill in the stc cookie data */ memcpy(stc, stc_in, sizeof(struct sctp_state_cookie)); /* tack the INIT and then the INIT-ACK onto the chain */ cookie_sz = 0; for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { cookie_sz += SCTP_BUF_LEN(m_at); if (SCTP_BUF_NEXT(m_at) == NULL) { SCTP_BUF_NEXT(m_at) = copy_init; break; } } for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { cookie_sz += SCTP_BUF_LEN(m_at); if (SCTP_BUF_NEXT(m_at) == NULL) { SCTP_BUF_NEXT(m_at) = copy_initack; break; } } for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { cookie_sz += SCTP_BUF_LEN(m_at); if (SCTP_BUF_NEXT(m_at) == NULL) { break; } } sig = sctp_get_mbuf_for_msg(SCTP_SIGNATURE_SIZE, 0, M_NOWAIT, 1, MT_DATA); if (sig == NULL) { /* no space, so free the entire chain */ sctp_m_freem(mret); return (NULL); } SCTP_BUF_NEXT(m_at) = sig; SCTP_BUF_LEN(sig) = SCTP_SIGNATURE_SIZE; cookie_sz += SCTP_SIGNATURE_SIZE; ph->param_length = htons(cookie_sz); *signature = (uint8_t *)mtod(sig, caddr_t); memset(*signature, 0, SCTP_SIGNATURE_SIZE); return (mret); } static uint8_t sctp_get_ect(struct sctp_tcb *stcb) { if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) { return (SCTP_ECT0_BIT); } else { return (0); } } #if defined(INET) || defined(INET6) static void sctp_handle_no_route(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n"); if (net) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was "); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa); if (net->dest_state & SCTP_ADDR_CONFIRMED) { if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net); sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, (void *)net, so_locked); net->dest_state &= ~SCTP_ADDR_REACHABLE; net->dest_state &= ~SCTP_ADDR_PF; } } if (stcb) { if (net == stcb->asoc.primary_destination) { /* need a new primary */ struct sctp_nets *alt; alt = sctp_find_alternate_net(stcb, net, 0); if (alt != net) { if (stcb->asoc.alternate) { sctp_free_remote_addr(stcb->asoc.alternate); } stcb->asoc.alternate = alt; atomic_add_int(&stcb->asoc.alternate->ref_count, 1); if (net->ro._s_addr) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; } net->src_addr_selected = 0; } } } } } #endif static int sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, struct sctp_tcb *stcb, /* may be NULL */ struct sctp_nets *net, struct sockaddr *to, struct mbuf *m, uint32_t auth_offset, struct sctp_auth_chunk *auth, uint16_t auth_keyid, int nofragment_flag, int ecn_ok, int out_of_asoc_ok, uint16_t src_port, uint16_t dest_port, uint32_t v_tag, uint16_t port, union sctp_sockstore *over_addr, uint8_t mflowtype, uint32_t mflowid, bool use_zero_crc, int so_locked) { /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ /** * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header * WITH an SCTPHDR but no IP header, endpoint inp and sa structure: * - fill in the HMAC digest of any AUTH chunk in the packet. * - calculate and fill in the SCTP checksum. * - prepend an IP address header. * - if boundall use INADDR_ANY. * - if boundspecific do source address selection. * - set fragmentation option for ipV4. * - On return from IP output, check/adjust mtu size of output * interface and smallest_mtu size as well. */ /* Will need ifdefs around this */ struct mbuf *newm; struct sctphdr *sctphdr; int packet_length; int ret; #if defined(INET) || defined(INET6) uint32_t vrf_id; #endif #if defined(INET) || defined(INET6) struct mbuf *o_pak; sctp_route_t *ro = NULL; struct udphdr *udp = NULL; #endif uint8_t tos_value; if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); sctp_m_freem(m); return (EFAULT); } #if defined(INET) || defined(INET6) if (stcb) { vrf_id = stcb->asoc.vrf_id; } else { vrf_id = inp->def_vrf_id; } #endif /* fill in the HMAC digest for any AUTH chunk in the packet */ if ((auth != NULL) && (stcb != NULL)) { sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid); } if (net) { tos_value = net->dscp; } else if (stcb) { tos_value = stcb->asoc.default_dscp; } else { tos_value = inp->sctp_ep.default_dscp; } switch (to->sa_family) { #ifdef INET case AF_INET: { struct ip *ip = NULL; sctp_route_t iproute; int len; len = SCTP_MIN_V4_OVERHEAD; if (port) { len += sizeof(struct udphdr); } newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); if (newm == NULL) { sctp_m_freem(m); SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } SCTP_ALIGN_TO_END(newm, len); SCTP_BUF_LEN(newm) = len; SCTP_BUF_NEXT(newm) = m; m = newm; if (net != NULL) { m->m_pkthdr.flowid = net->flowid; M_HASHTYPE_SET(m, net->flowtype); } else { m->m_pkthdr.flowid = mflowid; M_HASHTYPE_SET(m, mflowtype); } packet_length = sctp_calculate_len(m); ip = mtod(m, struct ip *); ip->ip_v = IPVERSION; ip->ip_hl = (sizeof(struct ip) >> 2); if (tos_value == 0) { /* * This means especially, that it is not set * at the SCTP layer. So use the value from * the IP layer. */ tos_value = inp->ip_inp.inp.inp_ip_tos; } tos_value &= 0xfc; if (ecn_ok) { tos_value |= sctp_get_ect(stcb); } if ((nofragment_flag) && (port == 0)) { ip->ip_off = htons(IP_DF); } else { ip->ip_off = htons(0); } /* FreeBSD has a function for ip_id's */ - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; ip->ip_len = htons(packet_length); ip->ip_tos = tos_value; if (port) { ip->ip_p = IPPROTO_UDP; } else { ip->ip_p = IPPROTO_SCTP; } ip->ip_sum = 0; if (net == NULL) { ro = &iproute; memset(&iproute, 0, sizeof(iproute)); memcpy(&ro->ro_dst, to, to->sa_len); } else { ro = (sctp_route_t *)&net->ro; } /* Now the address selection part */ ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; /* call the routine to select the src address */ if (net && out_of_asoc_ok == 0) { if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; RO_NHFREE(ro); } if (net->src_addr_selected == 0) { /* Cache the source address */ net->ro._s_addr = sctp_source_address_selection(inp, stcb, ro, net, 0, vrf_id); net->src_addr_selected = 1; } if (net->ro._s_addr == NULL) { /* No route to host */ net->src_addr_selected = 0; sctp_handle_no_route(stcb, net, so_locked); SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); sctp_m_freem(m); return (EHOSTUNREACH); } ip->ip_src = net->ro._s_addr->address.sin.sin_addr; } else { if (over_addr == NULL) { struct sctp_ifa *_lsrc; _lsrc = sctp_source_address_selection(inp, stcb, ro, net, out_of_asoc_ok, vrf_id); if (_lsrc == NULL) { sctp_handle_no_route(stcb, net, so_locked); SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); sctp_m_freem(m); return (EHOSTUNREACH); } ip->ip_src = _lsrc->address.sin.sin_addr; sctp_free_ifa(_lsrc); } else { ip->ip_src = over_addr->sin.sin_addr; SCTP_RTALLOC(ro, vrf_id, inp->fibnum); } } if (port) { if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { sctp_handle_no_route(stcb, net, so_locked); SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); sctp_m_freem(m); return (EHOSTUNREACH); } udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); udp->uh_dport = port; udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip))); if (V_udp_cksum) { udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); } else { udp->uh_sum = 0; } sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); } else { sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip)); } sctphdr->src_port = src_port; sctphdr->dest_port = dest_port; sctphdr->v_tag = v_tag; sctphdr->checksum = 0; /* * If source address selection fails and we find no * route then the ip_output should fail as well with * a NO_ROUTE_TO_HOST type error. We probably should * catch that somewhere and abort the association * right away (assuming this is an INIT being sent). */ if (ro->ro_nh == NULL) { /* * src addr selection failed to find a route * (or valid source addr), so we can't get * there from here (yet)! */ sctp_handle_no_route(stcb, net, so_locked); SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); sctp_m_freem(m); return (EHOSTUNREACH); } if (ro != &iproute) { memcpy(&iproute, ro, sizeof(*ro)); } SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n", (uint32_t)(ntohl(ip->ip_src.s_addr))); SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n", (uint32_t)(ntohl(ip->ip_dst.s_addr))); SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", (void *)ro->ro_nh); if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { /* failed to prepend data, give up */ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); sctp_m_freem(m); return (ENOMEM); } SCTP_ATTACH_CHAIN(o_pak, m, packet_length); if (port) { if (use_zero_crc) { SCTP_STAT_INCR(sctps_sendzerocrc); } else { sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr)); SCTP_STAT_INCR(sctps_sendswcrc); } if (V_udp_cksum) { SCTP_ENABLE_UDP_CSUM(o_pak); } } else { if (use_zero_crc) { SCTP_STAT_INCR(sctps_sendzerocrc); } else { m->m_pkthdr.csum_flags = CSUM_SCTP; m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); SCTP_STAT_INCR(sctps_sendhwcrc); } } #ifdef SCTP_PACKET_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) sctp_packet_log(o_pak); #endif /* send it out. table id is taken from stcb */ SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr); SCTP_IP_OUTPUT(ret, o_pak, ro, inp, vrf_id); if (port) { UDPSTAT_INC(udps_opackets); } SCTP_STAT_INCR(sctps_sendpackets); SCTP_STAT_INCR_COUNTER64(sctps_outpackets); if (ret) SCTP_STAT_INCR(sctps_senderrors); SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret); if (net == NULL) { /* free tempy routes */ RO_NHFREE(ro); } else { if ((ro->ro_nh != NULL) && (net->ro._s_addr) && ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) { uint32_t mtu; mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh); if (mtu > 0) { if (net->port) { mtu -= sizeof(struct udphdr); } if (mtu < net->mtu) { net->mtu = mtu; if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) { sctp_pathmtu_adjustment(stcb, mtu, true); } } } } else if (ro->ro_nh == NULL) { /* route was freed */ if (net->ro._s_addr && net->src_addr_selected) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; } net->src_addr_selected = 0; } } return (ret); } #endif #ifdef INET6 case AF_INET6: { uint32_t flowlabel, flowinfo; struct ip6_hdr *ip6h; struct route_in6 ip6route; struct ifnet *ifp; struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; int prev_scope = 0; struct sockaddr_in6 lsa6_storage; int error; u_short prev_port = 0; int len; if (net) { flowlabel = net->flowlabel; } else if (stcb) { flowlabel = stcb->asoc.default_flowlabel; } else { flowlabel = inp->sctp_ep.default_flowlabel; } if (flowlabel == 0) { /* * This means especially, that it is not set * at the SCTP layer. So use the value from * the IP layer. */ flowlabel = ntohl(((struct inpcb *)inp)->inp_flow); } flowlabel &= 0x000fffff; len = SCTP_MIN_OVERHEAD; if (port) { len += sizeof(struct udphdr); } newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); if (newm == NULL) { sctp_m_freem(m); SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } SCTP_ALIGN_TO_END(newm, len); SCTP_BUF_LEN(newm) = len; SCTP_BUF_NEXT(newm) = m; m = newm; if (net != NULL) { m->m_pkthdr.flowid = net->flowid; M_HASHTYPE_SET(m, net->flowtype); } else { m->m_pkthdr.flowid = mflowid; M_HASHTYPE_SET(m, mflowtype); } packet_length = sctp_calculate_len(m); ip6h = mtod(m, struct ip6_hdr *); /* protect *sin6 from overwrite */ sin6 = (struct sockaddr_in6 *)to; tmp = *sin6; sin6 = &tmp; /* KAME hack: embed scopeid */ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) { SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); sctp_m_freem(m); return (EINVAL); } if (net == NULL) { memset(&ip6route, 0, sizeof(ip6route)); ro = (sctp_route_t *)&ip6route; memcpy(&ro->ro_dst, sin6, sin6->sin6_len); } else { ro = (sctp_route_t *)&net->ro; } /* * We assume here that inp_flow is in host byte * order within the TCB! */ if (tos_value == 0) { /* * This means especially, that it is not set * at the SCTP layer. So use the value from * the IP layer. */ tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff; } tos_value &= 0xfc; if (ecn_ok) { tos_value |= sctp_get_ect(stcb); } flowinfo = 0x06; flowinfo <<= 8; flowinfo |= tos_value; flowinfo <<= 20; flowinfo |= flowlabel; ip6h->ip6_flow = htonl(flowinfo); if (port) { ip6h->ip6_nxt = IPPROTO_UDP; } else { ip6h->ip6_nxt = IPPROTO_SCTP; } ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr))); ip6h->ip6_dst = sin6->sin6_addr; /* * Add SRC address selection here: we can only reuse * to a limited degree the kame src-addr-sel, since * we can try their selection but it may not be * bound. */ memset(&lsa6_tmp, 0, sizeof(lsa6_tmp)); lsa6_tmp.sin6_family = AF_INET6; lsa6_tmp.sin6_len = sizeof(lsa6_tmp); lsa6 = &lsa6_tmp; if (net && out_of_asoc_ok == 0) { if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; RO_NHFREE(ro); } if (net->src_addr_selected == 0) { sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; /* KAME hack: embed scopeid */ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) { SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); sctp_m_freem(m); return (EINVAL); } /* Cache the source address */ net->ro._s_addr = sctp_source_address_selection(inp, stcb, ro, net, 0, vrf_id); (void)sa6_recoverscope(sin6); net->src_addr_selected = 1; } if (net->ro._s_addr == NULL) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n"); net->src_addr_selected = 0; sctp_handle_no_route(stcb, net, so_locked); SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); sctp_m_freem(m); return (EHOSTUNREACH); } lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr; } else { sin6 = (struct sockaddr_in6 *)&ro->ro_dst; /* KAME hack: embed scopeid */ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) { SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); sctp_m_freem(m); return (EINVAL); } if (over_addr == NULL) { struct sctp_ifa *_lsrc; _lsrc = sctp_source_address_selection(inp, stcb, ro, net, out_of_asoc_ok, vrf_id); if (_lsrc == NULL) { sctp_handle_no_route(stcb, net, so_locked); SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); sctp_m_freem(m); return (EHOSTUNREACH); } lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr; sctp_free_ifa(_lsrc); } else { lsa6->sin6_addr = over_addr->sin6.sin6_addr; SCTP_RTALLOC(ro, vrf_id, inp->fibnum); } (void)sa6_recoverscope(sin6); } lsa6->sin6_port = inp->sctp_lport; if (ro->ro_nh == NULL) { /* * src addr selection failed to find a route * (or valid source addr), so we can't get * there from here! */ sctp_handle_no_route(stcb, net, so_locked); SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); sctp_m_freem(m); return (EHOSTUNREACH); } /* * XXX: sa6 may not have a valid sin6_scope_id in * the non-SCOPEDROUTING case. */ memset(&lsa6_storage, 0, sizeof(lsa6_storage)); lsa6_storage.sin6_family = AF_INET6; lsa6_storage.sin6_len = sizeof(lsa6_storage); lsa6_storage.sin6_addr = lsa6->sin6_addr; if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error); sctp_m_freem(m); return (error); } /* XXX */ lsa6_storage.sin6_addr = lsa6->sin6_addr; lsa6_storage.sin6_port = inp->sctp_lport; lsa6 = &lsa6_storage; ip6h->ip6_src = lsa6->sin6_addr; if (port) { if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { sctp_handle_no_route(stcb, net, so_locked); SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); sctp_m_freem(m); return (EHOSTUNREACH); } udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); udp->uh_dport = port; udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr))); udp->uh_sum = 0; sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); } else { sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); } sctphdr->src_port = src_port; sctphdr->dest_port = dest_port; sctphdr->v_tag = v_tag; sctphdr->checksum = 0; /* * We set the hop limit now since there is a good * chance that our ro pointer is now filled */ ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro); ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); #ifdef SCTP_DEBUG /* Copy to be sure something bad is not happening */ sin6->sin6_addr = ip6h->ip6_dst; lsa6->sin6_addr = ip6h->ip6_src; #endif SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n"); SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: "); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6); SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: "); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6); if (net) { sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; /* * preserve the port and scope for link * local send */ prev_scope = sin6->sin6_scope_id; prev_port = sin6->sin6_port; } if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { /* failed to prepend data, give up */ sctp_m_freem(m); SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } SCTP_ATTACH_CHAIN(o_pak, m, packet_length); if (port) { sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); SCTP_STAT_INCR(sctps_sendswcrc); if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) { udp->uh_sum = 0xffff; } } else { m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); SCTP_STAT_INCR(sctps_sendhwcrc); } /* send it out. table id is taken from stcb */ #ifdef SCTP_PACKET_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) sctp_packet_log(o_pak); #endif SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr); SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, inp, vrf_id); if (net) { /* for link local this must be done */ sin6->sin6_scope_id = prev_scope; sin6->sin6_port = prev_port; } SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); if (port) { UDPSTAT_INC(udps_opackets); } SCTP_STAT_INCR(sctps_sendpackets); SCTP_STAT_INCR_COUNTER64(sctps_outpackets); if (ret) { SCTP_STAT_INCR(sctps_senderrors); } if (net == NULL) { /* Now if we had a temp route free it */ RO_NHFREE(ro); } else { /* * PMTU check versus smallest asoc MTU goes * here */ if (ro->ro_nh == NULL) { /* Route was freed */ if (net->ro._s_addr && net->src_addr_selected) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; } net->src_addr_selected = 0; } if ((ro->ro_nh != NULL) && (net->ro._s_addr) && ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) { uint32_t mtu; mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh); if (mtu > 0) { if (net->port) { mtu -= sizeof(struct udphdr); } if (mtu < net->mtu) { net->mtu = mtu; if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) { sctp_pathmtu_adjustment(stcb, mtu, false); } } } } else if (ifp != NULL) { if ((ND_IFINFO(ifp)->linkmtu > 0) && (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { sctp_pathmtu_adjustment(stcb, ND_IFINFO(ifp)->linkmtu, false); } } } return (ret); } #endif default: SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", ((struct sockaddr *)to)->sa_family); sctp_m_freem(m); SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); return (EFAULT); } } void sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked) { struct mbuf *m, *m_last; struct sctp_nets *net; struct sctp_init_chunk *init; struct sctp_supported_addr_param *sup_addr; struct sctp_adaptation_layer_indication *ali; struct sctp_zero_checksum_acceptable *zero_chksum; struct sctp_supported_chunk_types_param *pr_supported; struct sctp_paramhdr *ph; int cnt_inits_to = 0; int error; uint16_t num_ext, chunk_len, padding_len, parameter_len; /* INIT's always go to the primary (and usually ONLY address) */ net = stcb->asoc.primary_destination; if (net == NULL) { net = TAILQ_FIRST(&stcb->asoc.nets); if (net == NULL) { /* TSNH */ return; } /* we confirm any address we send an INIT to */ net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; (void)sctp_set_primary_addr(stcb, NULL, net); } else { /* we confirm any address we send an INIT to */ net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; } SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n"); #ifdef INET6 if (net->ro._l_addr.sa.sa_family == AF_INET6) { /* * special hook, if we are sending to link local it will not * show up in our private address count. */ if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr)) cnt_inits_to = 1; } #endif if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { /* This case should not happen */ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n"); return; } /* start the INIT timer */ sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA); if (m == NULL) { /* No memory, INIT timer will re-attempt. */ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n"); return; } chunk_len = (uint16_t)sizeof(struct sctp_init_chunk); padding_len = 0; /* Now lets put the chunk header in place */ init = mtod(m, struct sctp_init_chunk *); /* now the chunk header */ init->ch.chunk_type = SCTP_INITIATION; init->ch.chunk_flags = 0; /* fill in later from mbuf we build */ init->ch.chunk_length = 0; /* place in my tag */ init->init.initiate_tag = htonl(stcb->asoc.my_vtag); /* set up some of the credits. */ init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0, SCTP_MINIMAL_RWND)); init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); init->init.initial_tsn = htonl(stcb->asoc.init_seq_number); /* Adaptation layer indication parameter */ if (inp->sctp_ep.adaptation_layer_indicator_provided) { parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication); ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len); ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); ali->ph.param_length = htons(parameter_len); ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator); chunk_len += parameter_len; } /* ECN parameter */ if (stcb->asoc.ecn_supported == 1) { parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len); ph->param_type = htons(SCTP_ECN_CAPABLE); ph->param_length = htons(parameter_len); chunk_len += parameter_len; } /* PR-SCTP supported parameter */ if (stcb->asoc.prsctp_supported == 1) { parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len); ph->param_type = htons(SCTP_PRSCTP_SUPPORTED); ph->param_length = htons(parameter_len); chunk_len += parameter_len; } /* Zero checksum acceptable parameter */ if (stcb->asoc.rcv_edmid != SCTP_EDMID_NONE) { parameter_len = (uint16_t)sizeof(struct sctp_zero_checksum_acceptable); zero_chksum = (struct sctp_zero_checksum_acceptable *)(mtod(m, caddr_t)+chunk_len); zero_chksum->ph.param_type = htons(SCTP_ZERO_CHECKSUM_ACCEPTABLE); zero_chksum->ph.param_length = htons(parameter_len); zero_chksum->edmid = htonl(stcb->asoc.rcv_edmid); chunk_len += parameter_len; } /* Add NAT friendly parameter. */ if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) { parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len); ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); ph->param_length = htons(parameter_len); chunk_len += parameter_len; } /* And now tell the peer which extensions we support */ num_ext = 0; pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len); if (stcb->asoc.prsctp_supported == 1) { pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; if (stcb->asoc.idata_supported) { pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN; } } if (stcb->asoc.auth_supported == 1) { pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; } if (stcb->asoc.asconf_supported == 1) { pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; } if (stcb->asoc.reconfig_supported == 1) { pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; } if (stcb->asoc.idata_supported) { pr_supported->chunk_types[num_ext++] = SCTP_IDATA; } if (stcb->asoc.nrsack_supported == 1) { pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; } if (stcb->asoc.pktdrop_supported == 1) { pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; } if (num_ext > 0) { parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext; pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); pr_supported->ph.param_length = htons(parameter_len); padding_len = SCTP_SIZE32(parameter_len) - parameter_len; chunk_len += parameter_len; } /* add authentication parameters */ if (stcb->asoc.auth_supported) { /* attach RANDOM parameter, if available */ if (stcb->asoc.authinfo.random != NULL) { struct sctp_auth_random *randp; if (padding_len > 0) { memset(mtod(m, caddr_t)+chunk_len, 0, padding_len); chunk_len += padding_len; padding_len = 0; } randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len); parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len; /* random key already contains the header */ memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len); padding_len = SCTP_SIZE32(parameter_len) - parameter_len; chunk_len += parameter_len; } /* add HMAC_ALGO parameter */ if (stcb->asoc.local_hmacs != NULL) { struct sctp_auth_hmac_algo *hmacs; if (padding_len > 0) { memset(mtod(m, caddr_t)+chunk_len, 0, padding_len); chunk_len += padding_len; padding_len = 0; } hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len); parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) + stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t)); hmacs->ph.param_type = htons(SCTP_HMAC_LIST); hmacs->ph.param_length = htons(parameter_len); sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids); padding_len = SCTP_SIZE32(parameter_len) - parameter_len; chunk_len += parameter_len; } /* add CHUNKS parameter */ if (stcb->asoc.local_auth_chunks != NULL) { struct sctp_auth_chunk_list *chunks; if (padding_len > 0) { memset(mtod(m, caddr_t)+chunk_len, 0, padding_len); chunk_len += padding_len; padding_len = 0; } chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len); parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) + sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks)); chunks->ph.param_type = htons(SCTP_CHUNK_LIST); chunks->ph.param_length = htons(parameter_len); sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types); padding_len = SCTP_SIZE32(parameter_len) - parameter_len; chunk_len += parameter_len; } } /* now any cookie time extensions */ if (stcb->asoc.cookie_preserve_req > 0) { struct sctp_cookie_perserve_param *cookie_preserve; if (padding_len > 0) { memset(mtod(m, caddr_t)+chunk_len, 0, padding_len); chunk_len += padding_len; padding_len = 0; } parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param); cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len); cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); cookie_preserve->ph.param_length = htons(parameter_len); cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); stcb->asoc.cookie_preserve_req = 0; chunk_len += parameter_len; } if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) { uint8_t i; if (padding_len > 0) { memset(mtod(m, caddr_t)+chunk_len, 0, padding_len); chunk_len += padding_len; padding_len = 0; } parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); if (stcb->asoc.scope.ipv4_addr_legal) { parameter_len += (uint16_t)sizeof(uint16_t); } if (stcb->asoc.scope.ipv6_addr_legal) { parameter_len += (uint16_t)sizeof(uint16_t); } sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len); sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); sup_addr->ph.param_length = htons(parameter_len); i = 0; if (stcb->asoc.scope.ipv4_addr_legal) { sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS); } if (stcb->asoc.scope.ipv6_addr_legal) { sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS); } padding_len = 4 - 2 * i; chunk_len += parameter_len; } SCTP_BUF_LEN(m) = chunk_len; /* now the addresses */ /* * To optimize this we could put the scoping stuff into a structure * and remove the individual uint8's from the assoc structure. Then * we could just sifa in the address within the stcb. But for now * this is a quick hack to get the address stuff teased apart. */ m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, m, cnt_inits_to, &padding_len, &chunk_len); init->ch.chunk_length = htons(chunk_len); if (padding_len > 0) { if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) { sctp_m_freem(m); return; } } SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n"); if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, (struct sockaddr *)&net->ro._l_addr, m, 0, NULL, 0, 0, 0, 0, inp->sctp_lport, stcb->rport, htonl(0), net->port, NULL, 0, 0, false, so_locked))) { SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error); if (error == ENOBUFS) { stcb->asoc.ifp_had_enobuf = 1; SCTP_STAT_INCR(sctps_lowlevelerr); } } else { stcb->asoc.ifp_had_enobuf = 0; } SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); } struct mbuf * sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly, int *cookie_found, uint32_t *edmid) { /* * Given a mbuf containing an INIT or INIT-ACK with the param_offset * being equal to the beginning of the params i.e. (iphlen + * sizeof(struct sctp_init_msg) parse through the parameters to the * end of the mbuf verifying that all parameters are known. * * For unknown parameters build and return a mbuf with * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop * processing this chunk stop, and set *abort_processing to 1. * * By having param_offset be pre-set to where parameters begin it is * hoped that this routine may be reused in the future by new * features. */ struct sctp_zero_checksum_acceptable zero_chksum, *zero_chksum_p; struct sctp_paramhdr *phdr, params; struct mbuf *mat, *m_tmp, *op_err, *op_err_last; int at, limit, pad_needed; uint16_t ptype, plen, padded_size; *abort_processing = 0; if (cookie_found != NULL) { *cookie_found = 0; } if (edmid != NULL) { *edmid = SCTP_EDMID_NONE; } mat = in_initpkt; limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); at = param_offset; op_err = NULL; op_err_last = NULL; pad_needed = 0; SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n"); phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { ptype = ntohs(phdr->param_type); plen = ntohs(phdr->param_length); if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) { /* wacked parameter */ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen); goto invalid_size; } limit -= SCTP_SIZE32(plen); /*- * All parameters for all chunks that we know/understand are * listed here. We process them other places and make * appropriate stop actions per the upper bits. However this * is the generic routine processor's can call to get back * an operr.. to either incorporate (init-ack) or send. */ padded_size = SCTP_SIZE32(plen); switch (ptype) { /* Param's with variable size */ case SCTP_HEARTBEAT_INFO: case SCTP_UNRECOG_PARAM: case SCTP_ERROR_CAUSE_IND: /* ok skip fwd */ at += padded_size; break; case SCTP_STATE_COOKIE: if (cookie_found != NULL) { *cookie_found = 1; } at += padded_size; break; /* Param's with variable size within a range */ case SCTP_CHUNK_LIST: case SCTP_SUPPORTED_CHUNK_EXT: if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen); goto invalid_size; } at += padded_size; break; case SCTP_SUPPORTED_ADDRTYPE: if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen); goto invalid_size; } at += padded_size; break; case SCTP_ZERO_CHECKSUM_ACCEPTABLE: if (padded_size != sizeof(struct sctp_zero_checksum_acceptable)) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error checksum acceptable %d\n", plen); goto invalid_size; } if (edmid != NULL) { phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)&zero_chksum, sizeof(struct sctp_zero_checksum_acceptable)); if (phdr != NULL) { zero_chksum_p = (struct sctp_zero_checksum_acceptable *)phdr; *edmid = ntohl(zero_chksum_p->edmid); } } at += padded_size; break; case SCTP_RANDOM: if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen); goto invalid_size; } at += padded_size; break; case SCTP_SET_PRIM_ADDR: case SCTP_DEL_IP_ADDRESS: case SCTP_ADD_IP_ADDRESS: if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) && (padded_size != sizeof(struct sctp_asconf_addr_param))) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen); goto invalid_size; } at += padded_size; break; /* Param's with a fixed size */ case SCTP_IPV4_ADDRESS: if (padded_size != sizeof(struct sctp_ipv4addr_param)) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen); goto invalid_size; } at += padded_size; break; case SCTP_IPV6_ADDRESS: if (padded_size != sizeof(struct sctp_ipv6addr_param)) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen); goto invalid_size; } at += padded_size; break; case SCTP_COOKIE_PRESERVE: if (padded_size != sizeof(struct sctp_cookie_perserve_param)) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen); goto invalid_size; } at += padded_size; break; case SCTP_HAS_NAT_SUPPORT: if (padded_size != sizeof(struct sctp_paramhdr)) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error nat support %d\n", plen); goto invalid_size; } *nat_friendly = 1; at += padded_size; break; case SCTP_PRSCTP_SUPPORTED: if (padded_size != sizeof(struct sctp_paramhdr)) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp %d\n", plen); goto invalid_size; } at += padded_size; break; case SCTP_ECN_CAPABLE: if (padded_size != sizeof(struct sctp_paramhdr)) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen); goto invalid_size; } at += padded_size; break; case SCTP_ULP_ADAPTATION: if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen); goto invalid_size; } at += padded_size; break; case SCTP_SUCCESS_REPORT: if (padded_size != sizeof(struct sctp_asconf_paramhdr)) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen); goto invalid_size; } at += padded_size; break; case SCTP_HOSTNAME_ADDRESS: { /* Hostname parameters are deprecated. */ struct sctp_gen_error_cause *cause; int l_len; SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n"); *abort_processing = 1; sctp_m_freem(op_err); op_err = NULL; op_err_last = NULL; #ifdef INET6 l_len = SCTP_MIN_OVERHEAD; #else l_len = SCTP_MIN_V4_OVERHEAD; #endif l_len += sizeof(struct sctp_chunkhdr); l_len += sizeof(struct sctp_gen_error_cause); op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); if (op_err != NULL) { /* * Pre-reserve space for IP, SCTP, * and chunk header. */ #ifdef INET6 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); #else SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); #endif SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); cause = mtod(op_err, struct sctp_gen_error_cause *); cause->code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); cause->length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen)); SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT); if (SCTP_BUF_NEXT(op_err) == NULL) { sctp_m_freem(op_err); op_err = NULL; op_err_last = NULL; } } return (op_err); } default: /* * we do not recognize the parameter figure out what * we do. */ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype); if ((ptype & 0x4000) == 0x4000) { /* Report bit is set?? */ SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n"); if (op_err == NULL) { int l_len; /* Ok need to try to get an mbuf */ #ifdef INET6 l_len = SCTP_MIN_OVERHEAD; #else l_len = SCTP_MIN_V4_OVERHEAD; #endif l_len += sizeof(struct sctp_chunkhdr); l_len += sizeof(struct sctp_paramhdr); op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); if (op_err) { SCTP_BUF_LEN(op_err) = 0; #ifdef INET6 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); #else SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); #endif SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); op_err_last = op_err; } } if (op_err != NULL) { /* If we have space */ struct sctp_paramhdr *param; if (pad_needed > 0) { op_err_last = sctp_add_pad_tombuf(op_err_last, pad_needed); } if (op_err_last == NULL) { sctp_m_freem(op_err); op_err = NULL; op_err_last = NULL; goto more_processing; } if (M_TRAILINGSPACE(op_err_last) < (int)sizeof(struct sctp_paramhdr)) { m_tmp = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA); if (m_tmp == NULL) { sctp_m_freem(op_err); op_err = NULL; op_err_last = NULL; goto more_processing; } SCTP_BUF_LEN(m_tmp) = 0; SCTP_BUF_NEXT(m_tmp) = NULL; SCTP_BUF_NEXT(op_err_last) = m_tmp; op_err_last = m_tmp; } param = (struct sctp_paramhdr *)(mtod(op_err_last, caddr_t)+SCTP_BUF_LEN(op_err_last)); param->param_type = htons(SCTP_UNRECOG_PARAM); param->param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen); SCTP_BUF_LEN(op_err_last) += sizeof(struct sctp_paramhdr); SCTP_BUF_NEXT(op_err_last) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT); if (SCTP_BUF_NEXT(op_err_last) == NULL) { sctp_m_freem(op_err); op_err = NULL; op_err_last = NULL; goto more_processing; } else { while (SCTP_BUF_NEXT(op_err_last) != NULL) { op_err_last = SCTP_BUF_NEXT(op_err_last); } } if (plen % 4 != 0) { pad_needed = 4 - (plen % 4); } else { pad_needed = 0; } } } more_processing: if ((ptype & 0x8000) == 0x0000) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n"); return (op_err); } else { /* skip this chunk and continue processing */ SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n"); at += SCTP_SIZE32(plen); } break; } phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); } return (op_err); invalid_size: SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n"); *abort_processing = 1; sctp_m_freem(op_err); op_err = NULL; op_err_last = NULL; if (phdr != NULL) { struct sctp_paramhdr *param; int l_len; #ifdef INET6 l_len = SCTP_MIN_OVERHEAD; #else l_len = SCTP_MIN_V4_OVERHEAD; #endif l_len += sizeof(struct sctp_chunkhdr); l_len += (2 * sizeof(struct sctp_paramhdr)); op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); if (op_err) { SCTP_BUF_LEN(op_err) = 0; #ifdef INET6 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); #else SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); #endif SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); SCTP_BUF_LEN(op_err) = 2 * sizeof(struct sctp_paramhdr); param = mtod(op_err, struct sctp_paramhdr *); param->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); param->param_length = htons(2 * sizeof(struct sctp_paramhdr)); param++; param->param_type = htons(ptype); param->param_length = htons(plen); } } return (op_err); } /* * Given a INIT chunk, look through the parameters to verify that there * are no new addresses. * Return true, if there is a new address or there is a problem parsing the parameters. Provide an optional error cause used when sending an ABORT. * Return false, if there are no new addresses and there is no problem in parameter processing. */ static bool sctp_are_there_new_addresses(struct sctp_association *asoc, struct mbuf *in_initpkt, int offset, int limit, struct sockaddr *src, struct mbuf **op_err) { struct sockaddr *sa_touse; struct sockaddr *sa; struct sctp_paramhdr *phdr, params; struct sctp_nets *net; #ifdef INET struct sockaddr_in sin4, *sa4; #endif #ifdef INET6 struct sockaddr_in6 sin6, *sa6; #endif uint16_t ptype, plen; bool fnd, check_src; *op_err = NULL; #ifdef INET memset(&sin4, 0, sizeof(sin4)); sin4.sin_family = AF_INET; sin4.sin_len = sizeof(sin4); #endif #ifdef INET6 memset(&sin6, 0, sizeof(sin6)); sin6.sin6_family = AF_INET6; sin6.sin6_len = sizeof(sin6); #endif /* First what about the src address of the pkt ? */ check_src = false; switch (src->sa_family) { #ifdef INET case AF_INET: if (asoc->scope.ipv4_addr_legal) { check_src = true; } break; #endif #ifdef INET6 case AF_INET6: if (asoc->scope.ipv6_addr_legal) { check_src = true; } break; #endif default: /* TSNH */ break; } if (check_src) { fnd = false; TAILQ_FOREACH(net, &asoc->nets, sctp_next) { sa = (struct sockaddr *)&net->ro._l_addr; if (sa->sa_family == src->sa_family) { #ifdef INET if (sa->sa_family == AF_INET) { struct sockaddr_in *src4; sa4 = (struct sockaddr_in *)sa; src4 = (struct sockaddr_in *)src; if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) { fnd = true; break; } } #endif #ifdef INET6 if (sa->sa_family == AF_INET6) { struct sockaddr_in6 *src6; sa6 = (struct sockaddr_in6 *)sa; src6 = (struct sockaddr_in6 *)src; if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) { fnd = true; break; } } #endif } } if (!fnd) { /* * If sending an ABORT in case of an additional * address, don't use the new address error cause. * This looks no different than if no listener was * present. */ *op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Address added"); return (true); } } /* Ok so far lets munge through the rest of the packet */ offset += sizeof(struct sctp_init_chunk); phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); while (phdr) { sa_touse = NULL; ptype = ntohs(phdr->param_type); plen = ntohs(phdr->param_length); if (offset + plen > limit) { *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Partial parameter"); return (true); } if (plen < sizeof(struct sctp_paramhdr)) { *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length too small"); return (true); } switch (ptype) { #ifdef INET case SCTP_IPV4_ADDRESS: { struct sctp_ipv4addr_param *p4, p4_buf; if (plen != sizeof(struct sctp_ipv4addr_param)) { *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length illegal"); return (true); } phdr = sctp_get_next_param(in_initpkt, offset, (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); if (phdr == NULL) { *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, ""); return (true); } if (asoc->scope.ipv4_addr_legal) { p4 = (struct sctp_ipv4addr_param *)phdr; sin4.sin_addr.s_addr = p4->addr; sa_touse = (struct sockaddr *)&sin4; } break; } #endif #ifdef INET6 case SCTP_IPV6_ADDRESS: { struct sctp_ipv6addr_param *p6, p6_buf; if (plen != sizeof(struct sctp_ipv6addr_param)) { *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length illegal"); return (true); } phdr = sctp_get_next_param(in_initpkt, offset, (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); if (phdr == NULL) { *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, ""); return (true); } if (asoc->scope.ipv6_addr_legal) { p6 = (struct sctp_ipv6addr_param *)phdr; memcpy((caddr_t)&sin6.sin6_addr, p6->addr, sizeof(p6->addr)); sa_touse = (struct sockaddr *)&sin6; } break; } #endif default: sa_touse = NULL; break; } if (sa_touse) { /* ok, sa_touse points to one to check */ fnd = false; TAILQ_FOREACH(net, &asoc->nets, sctp_next) { sa = (struct sockaddr *)&net->ro._l_addr; if (sa->sa_family != sa_touse->sa_family) { continue; } #ifdef INET if (sa->sa_family == AF_INET) { sa4 = (struct sockaddr_in *)sa; if (sa4->sin_addr.s_addr == sin4.sin_addr.s_addr) { fnd = true; break; } } #endif #ifdef INET6 if (sa->sa_family == AF_INET6) { sa6 = (struct sockaddr_in6 *)sa; if (SCTP6_ARE_ADDR_EQUAL( sa6, &sin6)) { fnd = true; break; } } #endif } if (!fnd) { /* * If sending an ABORT in case of an * additional address, don't use the new * address error cause. This looks no * different than if no listener was * present. */ *op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Address added"); return (true); } } offset += SCTP_SIZE32(plen); if (offset >= limit) { break; } phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); } return (false); } /* * Given a MBUF chain that was sent into us containing an INIT. Build a * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done * a pullup to include IPv6/4header, SCTP header and initial part of INIT * message (i.e. the struct sctp_init_msg). */ void sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *src_net, struct mbuf *init_pkt, int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_init_chunk *init_chk, uint8_t mflowtype, uint32_t mflowid, uint32_t vrf_id, uint16_t port) { struct sctp_association *asoc; struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err; struct sctp_init_ack_chunk *initack; struct sctp_adaptation_layer_indication *ali; struct sctp_zero_checksum_acceptable *zero_chksum; struct sctp_supported_chunk_types_param *pr_supported; struct sctp_paramhdr *ph; union sctp_sockstore *over_addr; struct sctp_scoping scp; struct timeval now; #ifdef INET struct sockaddr_in *dst4 = (struct sockaddr_in *)dst; struct sockaddr_in *src4 = (struct sockaddr_in *)src; struct sockaddr_in *sin; #endif #ifdef INET6 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst; struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src; struct sockaddr_in6 *sin6; #endif struct sockaddr *to; struct sctp_state_cookie stc; struct sctp_nets *net = NULL; uint8_t *signature = NULL; int cnt_inits_to = 0; uint16_t his_limit, i_want; int abort_flag; int nat_friendly = 0; int error; struct socket *so; uint32_t edmid; uint16_t num_ext, chunk_len, padding_len, parameter_len; bool use_zero_crc; if (stcb) { asoc = &stcb->asoc; } else { asoc = NULL; } if ((asoc != NULL) && (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) { if (sctp_are_there_new_addresses(asoc, init_pkt, offset, offset + ntohs(init_chk->ch.chunk_length), src, &op_err)) { /* * new addresses, out of here in non-cookie-wait * states */ sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err, mflowtype, mflowid, inp->fibnum, vrf_id, port); return; } if (src_net != NULL && (src_net->port != port)) { /* * change of remote encapsulation port, out of here * in non-cookie-wait states * * Send an ABORT, without an specific error cause. * This looks no different than if no listener was * present. */ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Remote encapsulation port changed"); sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err, mflowtype, mflowid, inp->fibnum, vrf_id, port); return; } } abort_flag = 0; op_err = sctp_arethere_unrecognized_parameters(init_pkt, (offset + sizeof(struct sctp_init_chunk)), &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly, NULL, &edmid); if (abort_flag) { do_a_abort: if (op_err == NULL) { char msg[SCTP_DIAG_INFO_LEN]; SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); } sctp_send_abort(init_pkt, iphlen, src, dst, sh, init_chk->init.initiate_tag, op_err, mflowtype, mflowid, inp->fibnum, vrf_id, port); return; } m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); if (m == NULL) { /* No memory, INIT timer will re-attempt. */ sctp_m_freem(op_err); return; } chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk); padding_len = 0; /* * We might not overwrite the identification[] completely and on * some platforms time_entered will contain some padding. Therefore * zero out the cookie to avoid putting uninitialized memory on the * wire. */ memset(&stc, 0, sizeof(struct sctp_state_cookie)); /* the time I built cookie */ (void)SCTP_GETTIME_TIMEVAL(&now); stc.time_entered.tv_sec = now.tv_sec; stc.time_entered.tv_usec = now.tv_usec; /* populate any tie tags */ if (asoc != NULL) { /* unlock before tag selections */ stc.tie_tag_my_vtag = asoc->my_vtag_nonce; stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; stc.cookie_life = asoc->cookie_life; net = asoc->primary_destination; } else { stc.tie_tag_my_vtag = 0; stc.tie_tag_peer_vtag = 0; /* life I will award this cookie */ stc.cookie_life = inp->sctp_ep.def_cookie_life; } /* copy in the ports for later check */ stc.myport = sh->dest_port; stc.peerport = sh->src_port; /* * If we wanted to honor cookie life extensions, we would add to * stc.cookie_life. For now we should NOT honor any extension */ stc.site_scope = stc.local_scope = stc.loopback_scope = 0; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { stc.ipv6_addr_legal = 1; if (SCTP_IPV6_V6ONLY(inp)) { stc.ipv4_addr_legal = 0; } else { stc.ipv4_addr_legal = 1; } } else { stc.ipv6_addr_legal = 0; stc.ipv4_addr_legal = 1; } stc.ipv4_scope = 0; if (net == NULL) { to = src; switch (dst->sa_family) { #ifdef INET case AF_INET: { /* lookup address */ stc.address[0] = src4->sin_addr.s_addr; stc.address[1] = 0; stc.address[2] = 0; stc.address[3] = 0; stc.addr_type = SCTP_IPV4_ADDRESS; /* local from address */ stc.laddress[0] = dst4->sin_addr.s_addr; stc.laddress[1] = 0; stc.laddress[2] = 0; stc.laddress[3] = 0; stc.laddr_type = SCTP_IPV4_ADDRESS; /* scope_id is only for v6 */ stc.scope_id = 0; if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) || (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))) { stc.ipv4_scope = 1; } /* Must use the address in this case */ if (sctp_is_address_on_local_host(src, vrf_id)) { stc.loopback_scope = 1; stc.ipv4_scope = 1; stc.site_scope = 1; stc.local_scope = 0; } break; } #endif #ifdef INET6 case AF_INET6: { stc.addr_type = SCTP_IPV6_ADDRESS; memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr)); stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr)); if (sctp_is_address_on_local_host(src, vrf_id)) { stc.loopback_scope = 1; stc.local_scope = 0; stc.site_scope = 1; stc.ipv4_scope = 1; } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) || IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) { /* * If the new destination or source * is a LINK_LOCAL we must have * common both site and local scope. * Don't set local scope though * since we must depend on the * source to be added implicitly. We * cannot assure just because we * share one link that all links are * common. */ stc.local_scope = 0; stc.site_scope = 1; stc.ipv4_scope = 1; /* * we start counting for the private * address stuff at 1. since the * link local we source from won't * show up in our scoped count. */ cnt_inits_to = 1; /* * pull out the scope_id from * incoming pkt */ } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) || IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) { /* * If the new destination or source * is SITE_LOCAL then we must have * site scope in common. */ stc.site_scope = 1; } memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr)); stc.laddr_type = SCTP_IPV6_ADDRESS; break; } #endif default: /* TSNH */ goto do_a_abort; break; } } else { /* set the scope per the existing tcb */ #ifdef INET6 struct sctp_nets *lnet; #endif stc.loopback_scope = asoc->scope.loopback_scope; stc.ipv4_scope = asoc->scope.ipv4_local_scope; stc.site_scope = asoc->scope.site_scope; stc.local_scope = asoc->scope.local_scope; #ifdef INET6 /* Why do we not consider IPv4 LL addresses? */ TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { /* * if we have a LL address, start * counting at 1. */ cnt_inits_to = 1; } } } #endif /* use the net pointer */ to = (struct sockaddr *)&net->ro._l_addr; switch (to->sa_family) { #ifdef INET case AF_INET: sin = (struct sockaddr_in *)to; stc.address[0] = sin->sin_addr.s_addr; stc.address[1] = 0; stc.address[2] = 0; stc.address[3] = 0; stc.addr_type = SCTP_IPV4_ADDRESS; if (net->src_addr_selected == 0) { /* * strange case here, the INIT should have * did the selection. */ net->ro._s_addr = sctp_source_address_selection(inp, stcb, (sctp_route_t *)&net->ro, net, 0, vrf_id); if (net->ro._s_addr == NULL) { sctp_m_freem(op_err); sctp_m_freem(m); return; } net->src_addr_selected = 1; } stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr; stc.laddress[1] = 0; stc.laddress[2] = 0; stc.laddress[3] = 0; stc.laddr_type = SCTP_IPV4_ADDRESS; /* scope_id is only for v6 */ stc.scope_id = 0; break; #endif #ifdef INET6 case AF_INET6: sin6 = (struct sockaddr_in6 *)to; memcpy(&stc.address, &sin6->sin6_addr, sizeof(struct in6_addr)); stc.addr_type = SCTP_IPV6_ADDRESS; stc.scope_id = sin6->sin6_scope_id; if (net->src_addr_selected == 0) { /* * strange case here, the INIT should have * done the selection. */ net->ro._s_addr = sctp_source_address_selection(inp, stcb, (sctp_route_t *)&net->ro, net, 0, vrf_id); if (net->ro._s_addr == NULL) { sctp_m_freem(op_err); sctp_m_freem(m); return; } net->src_addr_selected = 1; } memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr, sizeof(struct in6_addr)); stc.laddr_type = SCTP_IPV6_ADDRESS; break; #endif } } if (asoc != NULL) { stc.rcv_edmid = asoc->rcv_edmid; } else { stc.rcv_edmid = inp->rcv_edmid; } /* Now lets put the SCTP header in place */ initack = mtod(m, struct sctp_init_ack_chunk *); /* Save it off for quick ref */ stc.peers_vtag = ntohl(init_chk->init.initiate_tag); /* who are we */ memcpy(stc.identification, SCTP_VERSION_STRING, min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); memset(stc.reserved, 0, SCTP_RESERVE_SPACE); /* now the chunk header */ initack->ch.chunk_type = SCTP_INITIATION_ACK; initack->ch.chunk_flags = 0; /* fill in later from mbuf we build */ initack->ch.chunk_length = 0; /* place in my tag */ if ((asoc != NULL) && ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) || (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) { /* re-use the v-tags and init-seq here */ initack->init.initiate_tag = htonl(asoc->my_vtag); initack->init.initial_tsn = htonl(asoc->init_seq_number); } else { uint32_t vtag, itsn; if (asoc) { atomic_add_int(&asoc->refcnt, 1); SCTP_TCB_UNLOCK(stcb); new_tag: SCTP_INP_INFO_RLOCK(); vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); SCTP_INP_INFO_RUNLOCK(); if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) { /* * Got a duplicate vtag on some guy behind a * nat make sure we don't use it. */ goto new_tag; } initack->init.initiate_tag = htonl(vtag); /* get a TSN to use too */ itsn = sctp_select_initial_TSN(&inp->sctp_ep); initack->init.initial_tsn = htonl(itsn); SCTP_TCB_LOCK(stcb); atomic_subtract_int(&asoc->refcnt, 1); } else { SCTP_INP_INCR_REF(inp); SCTP_INP_RUNLOCK(inp); SCTP_INP_INFO_RLOCK(); vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); SCTP_INP_INFO_RUNLOCK(); initack->init.initiate_tag = htonl(vtag); /* get a TSN to use too */ initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); SCTP_INP_RLOCK(inp); SCTP_INP_DECR_REF(inp); } } /* save away my tag to */ stc.my_vtag = initack->init.initiate_tag; /* set up some of the credits. */ so = inp->sctp_socket; if (so == NULL) { /* memory problem */ sctp_m_freem(op_err); sctp_m_freem(m); return; } else { initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND)); } /* set what I want */ his_limit = ntohs(init_chk->init.num_inbound_streams); /* choose what I want */ if (asoc != NULL) { if (asoc->streamoutcnt > asoc->pre_open_streams) { i_want = asoc->streamoutcnt; } else { i_want = asoc->pre_open_streams; } } else { i_want = inp->sctp_ep.pre_open_stream_count; } if (his_limit < i_want) { /* I Want more :< */ initack->init.num_outbound_streams = init_chk->init.num_inbound_streams; } else { /* I can have what I want :> */ initack->init.num_outbound_streams = htons(i_want); } /* tell him his limit. */ initack->init.num_inbound_streams = htons(inp->sctp_ep.max_open_streams_intome); /* adaptation layer indication parameter */ if (inp->sctp_ep.adaptation_layer_indicator_provided) { parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication); ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len); ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); ali->ph.param_length = htons(parameter_len); ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator); chunk_len += parameter_len; } /* ECN parameter */ if (((asoc != NULL) && (asoc->ecn_supported == 1)) || ((asoc == NULL) && (inp->ecn_supported == 1))) { parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len); ph->param_type = htons(SCTP_ECN_CAPABLE); ph->param_length = htons(parameter_len); chunk_len += parameter_len; } /* PR-SCTP supported parameter */ if (((asoc != NULL) && (asoc->prsctp_supported == 1)) || ((asoc == NULL) && (inp->prsctp_supported == 1))) { parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len); ph->param_type = htons(SCTP_PRSCTP_SUPPORTED); ph->param_length = htons(parameter_len); chunk_len += parameter_len; } /* Zero checksum acceptable parameter */ if (((asoc != NULL) && (asoc->rcv_edmid != SCTP_EDMID_NONE)) || ((asoc == NULL) && (inp->rcv_edmid != SCTP_EDMID_NONE))) { parameter_len = (uint16_t)sizeof(struct sctp_zero_checksum_acceptable); zero_chksum = (struct sctp_zero_checksum_acceptable *)(mtod(m, caddr_t)+chunk_len); zero_chksum->ph.param_type = htons(SCTP_ZERO_CHECKSUM_ACCEPTABLE); zero_chksum->ph.param_length = htons(parameter_len); if (asoc != NULL) { zero_chksum->edmid = htonl(asoc->rcv_edmid); } else { zero_chksum->edmid = htonl(inp->rcv_edmid); } chunk_len += parameter_len; } /* Add NAT friendly parameter */ if (nat_friendly) { parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len); ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); ph->param_length = htons(parameter_len); chunk_len += parameter_len; } /* And now tell the peer which extensions we support */ num_ext = 0; pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len); if (((asoc != NULL) && (asoc->prsctp_supported == 1)) || ((asoc == NULL) && (inp->prsctp_supported == 1))) { pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; if (((asoc != NULL) && (asoc->idata_supported == 1)) || ((asoc == NULL) && (inp->idata_supported == 1))) { pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN; } } if (((asoc != NULL) && (asoc->auth_supported == 1)) || ((asoc == NULL) && (inp->auth_supported == 1))) { pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; } if (((asoc != NULL) && (asoc->asconf_supported == 1)) || ((asoc == NULL) && (inp->asconf_supported == 1))) { pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; } if (((asoc != NULL) && (asoc->reconfig_supported == 1)) || ((asoc == NULL) && (inp->reconfig_supported == 1))) { pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; } if (((asoc != NULL) && (asoc->idata_supported == 1)) || ((asoc == NULL) && (inp->idata_supported == 1))) { pr_supported->chunk_types[num_ext++] = SCTP_IDATA; } if (((asoc != NULL) && (asoc->nrsack_supported == 1)) || ((asoc == NULL) && (inp->nrsack_supported == 1))) { pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; } if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) || ((asoc == NULL) && (inp->pktdrop_supported == 1))) { pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; } if (num_ext > 0) { parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext; pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); pr_supported->ph.param_length = htons(parameter_len); padding_len = SCTP_SIZE32(parameter_len) - parameter_len; chunk_len += parameter_len; } /* add authentication parameters */ if (((asoc != NULL) && (asoc->auth_supported == 1)) || ((asoc == NULL) && (inp->auth_supported == 1))) { struct sctp_auth_random *randp; struct sctp_auth_hmac_algo *hmacs; struct sctp_auth_chunk_list *chunks; if (padding_len > 0) { memset(mtod(m, caddr_t)+chunk_len, 0, padding_len); chunk_len += padding_len; padding_len = 0; } /* generate and add RANDOM parameter */ randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len); parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + SCTP_AUTH_RANDOM_SIZE_DEFAULT; randp->ph.param_type = htons(SCTP_RANDOM); randp->ph.param_length = htons(parameter_len); SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT); padding_len = SCTP_SIZE32(parameter_len) - parameter_len; chunk_len += parameter_len; if (padding_len > 0) { memset(mtod(m, caddr_t)+chunk_len, 0, padding_len); chunk_len += padding_len; padding_len = 0; } /* add HMAC_ALGO parameter */ hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len); parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) + sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, (uint8_t *)hmacs->hmac_ids); hmacs->ph.param_type = htons(SCTP_HMAC_LIST); hmacs->ph.param_length = htons(parameter_len); padding_len = SCTP_SIZE32(parameter_len) - parameter_len; chunk_len += parameter_len; if (padding_len > 0) { memset(mtod(m, caddr_t)+chunk_len, 0, padding_len); chunk_len += padding_len; padding_len = 0; } /* add CHUNKS parameter */ chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len); parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) + sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, chunks->chunk_types); chunks->ph.param_type = htons(SCTP_CHUNK_LIST); chunks->ph.param_length = htons(parameter_len); padding_len = SCTP_SIZE32(parameter_len) - parameter_len; chunk_len += parameter_len; } SCTP_BUF_LEN(m) = chunk_len; m_last = m; /* now the addresses */ /* * To optimize this we could put the scoping stuff into a structure * and remove the individual uint8's from the stc structure. Then we * could just sifa in the address within the stc.. but for now this * is a quick hack to get the address stuff teased apart. */ scp.ipv4_addr_legal = stc.ipv4_addr_legal; scp.ipv6_addr_legal = stc.ipv6_addr_legal; scp.loopback_scope = stc.loopback_scope; scp.ipv4_local_scope = stc.ipv4_scope; scp.local_scope = stc.local_scope; scp.site_scope = stc.site_scope; m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last, cnt_inits_to, &padding_len, &chunk_len); /* padding_len can only be positive, if no addresses have been added */ if (padding_len > 0) { memset(mtod(m, caddr_t)+chunk_len, 0, padding_len); chunk_len += padding_len; SCTP_BUF_LEN(m) += padding_len; padding_len = 0; } /* tack on the operational error if present */ if (op_err) { parameter_len = 0; for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) { parameter_len += SCTP_BUF_LEN(m_tmp); } padding_len = SCTP_SIZE32(parameter_len) - parameter_len; SCTP_BUF_NEXT(m_last) = op_err; while (SCTP_BUF_NEXT(m_last) != NULL) { m_last = SCTP_BUF_NEXT(m_last); } chunk_len += parameter_len; } if (padding_len > 0) { m_last = sctp_add_pad_tombuf(m_last, padding_len); if (m_last == NULL) { /* Houston we have a problem, no space */ sctp_m_freem(m); return; } chunk_len += padding_len; padding_len = 0; } /* Now we must build a cookie */ m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature); if (m_cookie == NULL) { /* memory problem */ sctp_m_freem(m); return; } /* Now append the cookie to the end and update the space/size */ SCTP_BUF_NEXT(m_last) = m_cookie; parameter_len = 0; for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) { parameter_len += SCTP_BUF_LEN(m_tmp); if (SCTP_BUF_NEXT(m_tmp) == NULL) { m_last = m_tmp; } } padding_len = SCTP_SIZE32(parameter_len) - parameter_len; chunk_len += parameter_len; /* * Place in the size, but we don't include the last pad (if any) in * the INIT-ACK. */ initack->ch.chunk_length = htons(chunk_len); /* * Time to sign the cookie, we don't sign over the cookie signature * though thus we set trailer. */ (void)sctp_hmac_m(SCTP_HMAC, (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr), (uint8_t *)signature, SCTP_SIGNATURE_SIZE); /* * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return * here since the timer will drive a retranmission. */ if (padding_len > 0) { if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) { sctp_m_freem(m); return; } } if (stc.loopback_scope) { over_addr = (union sctp_sockstore *)dst; } else { over_addr = NULL; } if (asoc != NULL) { use_zero_crc = (asoc->rcv_edmid != SCTP_EDMID_NONE) && (asoc->rcv_edmid == edmid); } else { use_zero_crc = (inp->rcv_edmid != SCTP_EDMID_NONE) && (inp->rcv_edmid == edmid); } if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, 0, 0, inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag, port, over_addr, mflowtype, mflowid, use_zero_crc, SCTP_SO_NOT_LOCKED))) { SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error); if (error == ENOBUFS) { if (asoc != NULL) { asoc->ifp_had_enobuf = 1; } SCTP_STAT_INCR(sctps_lowlevelerr); } } else { if (asoc != NULL) { asoc->ifp_had_enobuf = 0; } } SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } static void sctp_prune_prsctp(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_nonpad_sndrcvinfo *srcv, int dataout) { int freed_spc = 0; struct sctp_tmit_chunk *chk, *nchk; SCTP_TCB_LOCK_ASSERT(stcb); if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) { TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { /* * Look for chunks marked with the PR_SCTP flag AND * the buffer space flag. If the one being sent is * equal or greater priority then purge the old one * and free some space. */ if (PR_SCTP_BUF_ENABLED(chk->flags)) { /* * This one is PR-SCTP AND buffer space * limited type */ if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) { /* * Lower numbers equates to higher * priority. So if the one we are * looking at has a larger priority, * we want to drop the data and NOT * retransmit it. */ if (chk->data) { /* * We release the book_size * if the mbuf is here */ int ret_spc; uint8_t sent; if (chk->sent > SCTP_DATAGRAM_UNSENT) sent = 1; else sent = 0; ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, sent, SCTP_SO_LOCKED); freed_spc += ret_spc; if (freed_spc >= dataout) { return; } } /* if chunk was present */ } /* if of sufficient priority */ } /* if chunk has enabled */ } /* tailqforeach */ TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { /* Here we must move to the sent queue and mark */ if (PR_SCTP_BUF_ENABLED(chk->flags)) { if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) { if (chk->data) { /* * We release the book_size * if the mbuf is here */ int ret_spc; ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 0, SCTP_SO_LOCKED); freed_spc += ret_spc; if (freed_spc >= dataout) { return; } } /* end if chk->data */ } /* end if right class */ } /* end if chk pr-sctp */ } /* tailqforeachsafe (chk) */ } /* if enabled in asoc */ } uint32_t sctp_get_frag_point(struct sctp_tcb *stcb) { struct sctp_association *asoc; uint32_t frag_point, overhead; asoc = &stcb->asoc; /* Consider IP header and SCTP common header. */ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { overhead = SCTP_MIN_OVERHEAD; } else { overhead = SCTP_MIN_V4_OVERHEAD; } /* Consider DATA/IDATA chunk header and AUTH header, if needed. */ if (asoc->idata_supported) { overhead += sizeof(struct sctp_idata_chunk); if (sctp_auth_is_required_chunk(SCTP_IDATA, asoc->peer_auth_chunks)) { overhead += sctp_get_auth_chunk_len(asoc->peer_hmac_id); } } else { overhead += sizeof(struct sctp_data_chunk); if (sctp_auth_is_required_chunk(SCTP_DATA, asoc->peer_auth_chunks)) { overhead += sctp_get_auth_chunk_len(asoc->peer_hmac_id); } } KASSERT(overhead % 4 == 0, ("overhead (%u) not a multiple of 4", overhead)); /* Consider padding. */ if (asoc->smallest_mtu % 4 > 0) { overhead += (asoc->smallest_mtu % 4); } KASSERT(asoc->smallest_mtu > overhead, ("Association MTU (%u) too small for overhead (%u)", asoc->smallest_mtu, overhead)); frag_point = asoc->smallest_mtu - overhead; KASSERT(frag_point % 4 == 0, ("frag_point (%u) not a multiple of 4", frag_point)); /* Honor MAXSEG socket option. */ if ((asoc->sctp_frag_point > 0) && (asoc->sctp_frag_point < frag_point)) { frag_point = asoc->sctp_frag_point; } return (frag_point); } static void sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp) { /* * We assume that the user wants PR_SCTP_TTL if the user provides a * positive lifetime but does not specify any PR_SCTP policy. */ if (PR_SCTP_ENABLED(sp->sinfo_flags)) { sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); } else if (sp->timetolive > 0) { sp->sinfo_flags |= SCTP_PR_SCTP_TTL; sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); } else { return; } switch (PR_SCTP_POLICY(sp->sinfo_flags)) { case CHUNK_FLAGS_PR_SCTP_BUF: /* * Time to live is a priority stored in tv_sec when doing * the buffer drop thing. */ sp->ts.tv_sec = sp->timetolive; sp->ts.tv_usec = 0; break; case CHUNK_FLAGS_PR_SCTP_TTL: { struct timeval tv; (void)SCTP_GETTIME_TIMEVAL(&sp->ts); tv.tv_sec = sp->timetolive / 1000; tv.tv_usec = (sp->timetolive * 1000) % 1000000; /* * TODO sctp_constants.h needs alternative time * macros when _KERNEL is undefined. */ timevaladd(&sp->ts, &tv); } break; case CHUNK_FLAGS_PR_SCTP_RTX: /* * Time to live is a the number or retransmissions stored in * tv_sec. */ sp->ts.tv_sec = sp->timetolive; sp->ts.tv_usec = 0; break; default: SCTPDBG(SCTP_DEBUG_USRREQ1, "Unknown PR_SCTP policy %u.\n", PR_SCTP_POLICY(sp->sinfo_flags)); break; } } static int sctp_msg_append(struct sctp_tcb *stcb, struct sctp_nets *net, struct mbuf *m, struct sctp_nonpad_sndrcvinfo *srcv) { int error = 0; struct mbuf *at; struct sctp_stream_queue_pending *sp = NULL; struct sctp_stream_out *strm; SCTP_TCB_LOCK_ASSERT(stcb); /* * Given an mbuf chain, put it into the association send queue and * place it on the wheel */ if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { /* Invalid stream number */ SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); error = EINVAL; goto out_now; } if ((stcb->asoc.stream_locked) && (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); error = EINVAL; goto out_now; } if ((stcb->asoc.strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) && (stcb->asoc.strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) { /* * Can't queue any data while stream reset is underway. */ if (stcb->asoc.strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) { error = EAGAIN; } else { error = EINVAL; } goto out_now; } /* Now can we send this? */ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) || (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { /* got data while shutting down */ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EPIPE); error = EPIPE; goto out_now; } sctp_alloc_a_strmoq(stcb, sp); if (sp == NULL) { SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); error = ENOMEM; goto out_now; } sp->sinfo_flags = srcv->sinfo_flags; sp->timetolive = srcv->sinfo_timetolive; sp->ppid = srcv->sinfo_ppid; sp->context = srcv->sinfo_context; sp->fsn = 0; if (sp->sinfo_flags & SCTP_ADDR_OVER) { sp->net = net; atomic_add_int(&sp->net->ref_count, 1); } else { sp->net = NULL; } (void)SCTP_GETTIME_TIMEVAL(&sp->ts); sp->sid = srcv->sinfo_stream; sp->msg_is_complete = 1; sp->sender_all_done = 1; sp->some_taken = 0; sp->data = m; sp->tail_mbuf = NULL; sctp_set_prsctp_policy(sp); /* * We could in theory (for sendall) sifa the length in, but we would * still have to hunt through the chain since we need to setup the * tail_mbuf */ sp->length = 0; for (at = m; at; at = SCTP_BUF_NEXT(at)) { if (SCTP_BUF_NEXT(at) == NULL) sp->tail_mbuf = at; sp->length += SCTP_BUF_LEN(at); } if (srcv->sinfo_keynumber_valid) { sp->auth_keyid = srcv->sinfo_keynumber; } else { sp->auth_keyid = stcb->asoc.authinfo.active_keyid; } if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { sctp_auth_key_acquire(stcb, sp->auth_keyid); sp->holds_key_ref = 1; } strm = &stcb->asoc.strmout[srcv->sinfo_stream]; sctp_snd_sb_alloc(stcb, sp->length); atomic_add_int(&stcb->asoc.stream_queue_cnt, 1); TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp); m = NULL; out_now: if (m) { sctp_m_freem(m); } return (error); } static struct mbuf * sctp_copy_mbufchain(struct mbuf *clonechain, struct mbuf *outchain, struct mbuf **endofchain, int can_take_mbuf, int sizeofcpy, uint8_t copy_by_ref) { struct mbuf *m; struct mbuf *appendchain; caddr_t cp; int len; if (endofchain == NULL) { /* error */ error_out: if (outchain) sctp_m_freem(outchain); return (NULL); } if (can_take_mbuf) { appendchain = clonechain; } else { if (!copy_by_ref && (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))) { /* Its not in a cluster */ if (*endofchain == NULL) { /* lets get a mbuf cluster */ if (outchain == NULL) { /* This is the general case */ new_mbuf: outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER); if (outchain == NULL) { goto error_out; } SCTP_BUF_LEN(outchain) = 0; *endofchain = outchain; /* get the prepend space */ SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4)); } else { /* * We really should not get a NULL * in endofchain */ /* find end */ m = outchain; while (m) { if (SCTP_BUF_NEXT(m) == NULL) { *endofchain = m; break; } m = SCTP_BUF_NEXT(m); } /* sanity */ if (*endofchain == NULL) { /* * huh, TSNH XXX maybe we * should panic */ sctp_m_freem(outchain); goto new_mbuf; } } /* get the new end of length */ len = (int)M_TRAILINGSPACE(*endofchain); } else { /* how much is left at the end? */ len = (int)M_TRAILINGSPACE(*endofchain); } /* Find the end of the data, for appending */ cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain))); /* Now lets copy it out */ if (len >= sizeofcpy) { /* It all fits, copy it in */ m_copydata(clonechain, 0, sizeofcpy, cp); SCTP_BUF_LEN((*endofchain)) += sizeofcpy; } else { /* fill up the end of the chain */ if (len > 0) { m_copydata(clonechain, 0, len, cp); SCTP_BUF_LEN((*endofchain)) += len; /* now we need another one */ sizeofcpy -= len; } m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER); if (m == NULL) { /* We failed */ goto error_out; } SCTP_BUF_NEXT((*endofchain)) = m; *endofchain = m; cp = mtod((*endofchain), caddr_t); m_copydata(clonechain, len, sizeofcpy, cp); SCTP_BUF_LEN((*endofchain)) += sizeofcpy; } return (outchain); } else { /* copy the old fashion way */ appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT); #ifdef SCTP_MBUF_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY); } #endif } } if (appendchain == NULL) { /* error */ if (outchain) sctp_m_freem(outchain); return (NULL); } if (outchain) { /* tack on to the end */ if (*endofchain != NULL) { SCTP_BUF_NEXT(((*endofchain))) = appendchain; } else { m = outchain; while (m) { if (SCTP_BUF_NEXT(m) == NULL) { SCTP_BUF_NEXT(m) = appendchain; break; } m = SCTP_BUF_NEXT(m); } } /* * save off the end and update the end-chain position */ m = appendchain; while (m) { if (SCTP_BUF_NEXT(m) == NULL) { *endofchain = m; break; } m = SCTP_BUF_NEXT(m); } return (outchain); } else { /* save off the end and update the end-chain position */ m = appendchain; while (m) { if (SCTP_BUF_NEXT(m) == NULL) { *endofchain = m; break; } m = SCTP_BUF_NEXT(m); } return (appendchain); } } static int sctp_med_chunk_output(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_association *asoc, int *num_out, int *reason_code, int control_only, int from_where, struct timeval *now, int *now_filled, uint32_t frag_point, int so_locked); static void sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, uint32_t val SCTP_UNUSED) { struct sctp_copy_all *ca; struct mbuf *m; int ret = 0; int added_control = 0; int un_sent, do_chunk_output = 1; struct sctp_association *asoc; struct sctp_nets *net; ca = (struct sctp_copy_all *)ptr; if (ca->m == NULL) { return; } if (ca->inp != inp) { /* TSNH */ return; } if (ca->sndlen > 0) { m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT); if (m == NULL) { /* can't copy so we are done */ ca->cnt_failed++; return; } #ifdef SCTP_MBUF_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { sctp_log_mbc(m, SCTP_MBUF_ICOPY); } #endif } else { m = NULL; } SCTP_TCB_LOCK_ASSERT(stcb); if (stcb->asoc.alternate) { net = stcb->asoc.alternate; } else { net = stcb->asoc.primary_destination; } if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { /* Abort this assoc with m as the user defined reason */ if (m != NULL) { SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT); } else { m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA); if (m != NULL) { SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); } } if (m != NULL) { struct sctp_paramhdr *ph; ph = mtod(m, struct sctp_paramhdr *); ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen)); } /* * We add one here to keep the assoc from dis-appearing on * us. */ atomic_add_int(&stcb->asoc.refcnt, 1); sctp_abort_an_association(inp, stcb, m, false, SCTP_SO_NOT_LOCKED); /* * sctp_abort_an_association calls sctp_free_asoc() free * association will NOT free it since we incremented the * refcnt .. we do this to prevent it being freed and things * getting tricky since we could end up (from free_asoc) * calling inpcb_free which would get a recursive lock call * to the iterator lock.. But as a consequence of that the * stcb will return to us un-locked.. since free_asoc * returns with either no TCB or the TCB unlocked, we must * relock.. to unlock in the iterator timer :-0 */ SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); goto no_chunk_output; } else { if (m != NULL) { ret = sctp_msg_append(stcb, net, m, &ca->sndrcv); } asoc = &stcb->asoc; if (ca->sndrcv.sinfo_flags & SCTP_EOF) { /* shutdown this assoc */ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) { if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { goto abort_anyway; } /* * there is nothing queued to send, so I'm * done... */ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) && (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { /* * only send SHUTDOWN the first time * through */ if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); sctp_stop_timers_for_shutdown(stcb); sctp_send_shutdown(stcb, net); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, NULL); added_control = 1; do_chunk_output = 0; } } else { /* * we still got (or just got) data to send, * so set SHUTDOWN_PENDING */ /* * XXX sockets draft says that SCTP_EOF * should be sent with no data. currently, * we will allow user data to be sent first * and move to SHUTDOWN-PENDING */ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) && (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); } SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; abort_anyway: SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); atomic_add_int(&stcb->asoc.refcnt, 1); sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); atomic_subtract_int(&stcb->asoc.refcnt, 1); goto no_chunk_output; } } } } } un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb))); if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && (stcb->asoc.total_flight > 0) && (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { do_chunk_output = 0; } if (do_chunk_output) sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED); else if (added_control) { struct timeval now; int num_out, reason, now_filled = 0; (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, &reason, 1, 1, &now, &now_filled, sctp_get_frag_point(stcb), SCTP_SO_NOT_LOCKED); } no_chunk_output: if (ret) { ca->cnt_failed++; } else { ca->cnt_sent++; } } static void sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED) { struct sctp_copy_all *ca; ca = (struct sctp_copy_all *)ptr; /* * Do a notify here? Kacheong suggests that the notify be done at * the send time.. so you would push up a notification if any send * failed. Don't know if this is feasible since the only failures we * have is "memory" related and if you cannot get an mbuf to send * the data you surely can't get an mbuf to send up to notify the * user you can't send the data :-> */ /* now free everything */ if (ca->inp) { /* Lets clear the flag to allow others to run. */ SCTP_INP_WLOCK(ca->inp); ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP; SCTP_INP_WUNLOCK(ca->inp); } sctp_m_freem(ca->m); SCTP_FREE(ca, SCTP_M_COPYAL); } static struct mbuf * sctp_copy_out_all(struct uio *uio, ssize_t len) { struct mbuf *ret, *at; ssize_t left, willcpy, cancpy, error; ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA); if (ret == NULL) { /* TSNH */ return (NULL); } left = len; SCTP_BUF_LEN(ret) = 0; /* save space for the data chunk header */ cancpy = (int)M_TRAILINGSPACE(ret); willcpy = min(cancpy, left); at = ret; while (left > 0) { /* Align data to the end */ error = uiomove(mtod(at, caddr_t), (int)willcpy, uio); if (error) { err_out_now: sctp_m_freem(at); return (NULL); } SCTP_BUF_LEN(at) = (int)willcpy; SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; left -= willcpy; if (left > 0) { SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA); if (SCTP_BUF_NEXT(at) == NULL) { goto err_out_now; } at = SCTP_BUF_NEXT(at); SCTP_BUF_LEN(at) = 0; cancpy = (int)M_TRAILINGSPACE(at); willcpy = min(cancpy, left); } } return (ret); } static int sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, struct sctp_nonpad_sndrcvinfo *srcv) { struct sctp_copy_all *ca; struct mbuf *mat; ssize_t sndlen; int ret; if (uio != NULL) { sndlen = uio->uio_resid; } else { sndlen = 0; for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { sndlen += SCTP_BUF_LEN(mat); } } if (sndlen > (ssize_t)SCTP_BASE_SYSCTL(sctp_sendall_limit)) { /* You must not be larger than the limit! */ return (EMSGSIZE); } SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), SCTP_M_COPYAL); if (ca == NULL) { sctp_m_freem(m); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } memset(ca, 0, sizeof(struct sctp_copy_all)); ca->inp = inp; if (srcv != NULL) { memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo)); } /* Serialize. */ SCTP_INP_WLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) != 0) { SCTP_INP_WUNLOCK(inp); sctp_m_freem(m); SCTP_FREE(ca, SCTP_M_COPYAL); return (EBUSY); } inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP; SCTP_INP_WUNLOCK(inp); /* * take off the sendall flag, it would be bad if we failed to do * this :-0 */ ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; /* get length and mbuf chain */ ca->sndlen = sndlen; if (uio != NULL) { ca->m = sctp_copy_out_all(uio, ca->sndlen); if (ca->m == NULL) { SCTP_FREE(ca, SCTP_M_COPYAL); sctp_m_freem(m); SCTP_INP_WLOCK(inp); inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP; SCTP_INP_WUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } } else { ca->m = m; } ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL, SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, SCTP_ASOC_ANY_STATE, (void *)ca, 0, sctp_sendall_completes, inp, 1); if (ret != 0) { SCTP_INP_WLOCK(inp); inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP; SCTP_INP_WUNLOCK(inp); SCTP_FREE(ca, SCTP_M_COPYAL); SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); return (EFAULT); } return (0); } void sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) { struct sctp_tmit_chunk *chk, *nchk; TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt--; if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); } } } void sctp_toss_old_asconf(struct sctp_tcb *stcb) { struct sctp_association *asoc; struct sctp_tmit_chunk *chk, *nchk; struct sctp_asconf_chunk *acp; asoc = &stcb->asoc; TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { /* find SCTP_ASCONF chunk in queue */ if (chk->rec.chunk_id.id == SCTP_ASCONF) { if (chk->data) { acp = mtod(chk->data, struct sctp_asconf_chunk *); if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) { /* Not Acked yet */ break; } } TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt--; if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); } } } static void sctp_clean_up_datalist(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_tmit_chunk **data_list, int bundle_at, struct sctp_nets *net) { int i; struct sctp_tmit_chunk *tp1; for (i = 0; i < bundle_at; i++) { /* off of the send queue */ TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next); asoc->send_queue_cnt--; if (i > 0) { /* * Any chunk NOT 0 you zap the time chunk 0 gets * zapped or set based on if a RTO measurement is * needed. */ data_list[i]->do_rtt = 0; } /* record time */ data_list[i]->sent_rcv_time = net->last_sent_time; data_list[i]->rec.data.cwnd_at_send = net->cwnd; data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn; if (data_list[i]->whoTo == NULL) { data_list[i]->whoTo = net; atomic_add_int(&net->ref_count, 1); } /* on to the sent queue */ tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) { struct sctp_tmit_chunk *tpp; /* need to move back */ back_up_more: tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); if (tpp == NULL) { TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); goto all_done; } tp1 = tpp; if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) { goto back_up_more; } TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); } else { TAILQ_INSERT_TAIL(&asoc->sent_queue, data_list[i], sctp_next); } all_done: /* This does not lower until the cum-ack passes it */ asoc->sent_queue_cnt++; if ((asoc->peers_rwnd <= 0) && (asoc->total_flight == 0) && (bundle_at == 1)) { /* Mark the chunk as being a window probe */ SCTP_STAT_INCR(sctps_windowprobed); } #ifdef SCTP_AUDITING_ENABLED sctp_audit_log(0xC2, 3); #endif data_list[i]->sent = SCTP_DATAGRAM_SENT; data_list[i]->snd_count = 1; data_list[i]->rec.data.chunk_was_revoked = 0; if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { sctp_misc_ints(SCTP_FLIGHT_LOG_UP, data_list[i]->whoTo->flight_size, data_list[i]->book_size, (uint32_t)(uintptr_t)data_list[i]->whoTo, data_list[i]->rec.data.tsn); } sctp_flight_size_increase(data_list[i]); sctp_total_flight_increase(stcb, data_list[i]); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); } asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, (uint32_t)(data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { /* SWS sender side engages */ asoc->peers_rwnd = 0; } } if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) { (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net); } } static void sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked) { struct sctp_tmit_chunk *chk, *nchk; TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) || (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || (chk->rec.chunk_id.id == SCTP_ECN_CWR) || (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { /* Stray chunks must be cleaned up */ clean_up_anyway: TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt--; if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { asoc->fwd_tsn_cnt--; } sctp_free_a_chunk(stcb, chk, so_locked); } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { /* special handling, we must look into the param */ if (chk != asoc->str_reset) { goto clean_up_anyway; } } } } static uint32_t sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length, uint32_t space_left, uint32_t frag_point, int eeor_on) { /* * Make a decision on if I should split a msg into multiple parts. * This is only asked of incomplete messages. */ if (eeor_on) { /* * If we are doing EEOR we need to always send it if its the * entire thing, since it might be all the guy is putting in * the hopper. */ if (space_left >= length) { /*- * If we have data outstanding, * we get another chance when the sack * arrives to transmit - wait for more data */ if (stcb->asoc.total_flight == 0) { /* * If nothing is in flight, we zero the * packet counter. */ return (length); } return (0); } else { /* You can fill the rest */ return (space_left); } } /*- * For those strange folk that make the send buffer * smaller than our fragmentation point, we can't * get a full msg in so we have to allow splitting. */ if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) { return (length); } if ((length <= space_left) || ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) { /* Sub-optimal residual don't split in non-eeor mode. */ return (0); } /* * If we reach here length is larger than the space_left. Do we wish * to split it for the sake of packet putting together? */ if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) { /* Its ok to split it */ return (min(space_left, frag_point)); } /* Nope, can't split */ return (0); } static uint32_t sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net, struct sctp_stream_out *strq, uint32_t space_left, uint32_t frag_point, int *giveup, int eeor_mode, int *bail, int so_locked) { /* Move from the stream to the send_queue keeping track of the total */ struct sctp_association *asoc; struct sctp_stream_queue_pending *sp; struct sctp_tmit_chunk *chk; struct sctp_data_chunk *dchkh = NULL; struct sctp_idata_chunk *ndchkh = NULL; uint32_t to_move, length; int leading; uint8_t rcv_flags = 0; uint8_t some_taken; SCTP_TCB_LOCK_ASSERT(stcb); asoc = &stcb->asoc; one_more_time: /* sa_ignore FREED_MEMORY */ sp = TAILQ_FIRST(&strq->outqueue); if (sp == NULL) { sp = TAILQ_FIRST(&strq->outqueue); if (sp) { goto one_more_time; } if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) && (stcb->asoc.idata_supported == 0) && (strq->last_msg_incomplete)) { SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", strq->sid, strq->last_msg_incomplete); strq->last_msg_incomplete = 0; } to_move = 0; goto out_of; } if ((sp->msg_is_complete) && (sp->length == 0)) { if (sp->sender_all_done) { /* * We are doing deferred cleanup. Last time through * when we took all the data the sender_all_done was * not set. */ if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) { SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", sp->sender_all_done, sp->length, sp->msg_is_complete, sp->put_last_out); } atomic_subtract_int(&asoc->stream_queue_cnt, 1); TAILQ_REMOVE(&strq->outqueue, sp, next); stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp); if ((strq->state == SCTP_STREAM_RESET_PENDING) && (strq->chunks_on_queues == 0) && TAILQ_EMPTY(&strq->outqueue)) { stcb->asoc.trigger_reset = 1; } if (sp->net) { sctp_free_remote_addr(sp->net); sp->net = NULL; } if (sp->data) { sctp_m_freem(sp->data); sp->data = NULL; } sctp_free_a_strmoq(stcb, sp, so_locked); /* back to get the next msg */ goto one_more_time; } else { /* * sender just finished this but still holds a * reference */ *giveup = 1; to_move = 0; goto out_of; } } else { /* is there some to get */ if (sp->length == 0) { /* no */ *giveup = 1; to_move = 0; goto out_of; } else if (sp->discard_rest) { /* Whack down the size */ atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length); if ((stcb->sctp_socket != NULL) && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { SCTP_SB_DECR(&stcb->sctp_socket->so_snd, sp->length); } if (sp->data) { sctp_m_freem(sp->data); sp->data = NULL; sp->tail_mbuf = NULL; } sp->length = 0; sp->some_taken = 1; *giveup = 1; to_move = 0; goto out_of; } } some_taken = sp->some_taken; length = sp->length; if (sp->msg_is_complete) { /* The message is complete */ to_move = min(length, frag_point); if (to_move == length) { /* All of it fits in the MTU */ if (sp->some_taken) { rcv_flags |= SCTP_DATA_LAST_FRAG; } else { rcv_flags |= SCTP_DATA_NOT_FRAG; } sp->put_last_out = 1; if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) { rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY; } } else { /* Not all of it fits, we fragment */ if (sp->some_taken == 0) { rcv_flags |= SCTP_DATA_FIRST_FRAG; } sp->some_taken = 1; } } else { to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode); if (to_move > 0) { if (to_move >= length) { to_move = length; } if (sp->some_taken == 0) { rcv_flags |= SCTP_DATA_FIRST_FRAG; sp->some_taken = 1; } } else { /* Nothing to take. */ *giveup = 1; to_move = 0; goto out_of; } } /* If we reach here, we can copy out a chunk */ sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { /* No chunk memory */ *giveup = 1; to_move = 0; goto out_of; } /* * Setup for unordered if needed by looking at the user sent info * flags. */ if (sp->sinfo_flags & SCTP_UNORDERED) { rcv_flags |= SCTP_DATA_UNORDERED; } if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) { rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY; } /* clear out the chunk before setting up */ memset(chk, 0, sizeof(*chk)); chk->rec.data.rcv_flags = rcv_flags; if (to_move >= length) { /* we think we can steal the whole thing */ if (to_move < sp->length) { /* bail, it changed */ goto dont_do_it; } chk->data = sp->data; chk->last_mbuf = sp->tail_mbuf; /* register the stealing */ sp->data = sp->tail_mbuf = NULL; } else { struct mbuf *m; dont_do_it: chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT); chk->last_mbuf = NULL; if (chk->data == NULL) { sp->some_taken = some_taken; sctp_free_a_chunk(stcb, chk, so_locked); *bail = 1; to_move = 0; goto out_of; } #ifdef SCTP_MBUF_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY); } #endif /* Pull off the data */ m_adj(sp->data, to_move); /* Now lets work our way down and compact it */ m = sp->data; while (m && (SCTP_BUF_LEN(m) == 0)) { sp->data = SCTP_BUF_NEXT(m); SCTP_BUF_NEXT(m) = NULL; if (sp->tail_mbuf == m) { /*- * Freeing tail? TSNH since * we supposedly were taking less * than the sp->length. */ #ifdef INVARIANTS panic("Huh, freeing tail? - TSNH"); #else SCTP_PRINTF("Huh, freeing tail? - TSNH\n"); sp->tail_mbuf = sp->data = NULL; sp->length = 0; #endif } sctp_m_free(m); m = sp->data; } } if (SCTP_BUF_IS_EXTENDED(chk->data)) { chk->copy_by_ref = 1; } else { chk->copy_by_ref = 0; } /* * get last_mbuf and counts of mb usage This is ugly but hopefully * its only one mbuf. */ if (chk->last_mbuf == NULL) { chk->last_mbuf = chk->data; while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); } } if (to_move > length) { /*- This should not happen either * since we always lower to_move to the size * of sp->length if its larger. */ #ifdef INVARIANTS panic("Huh, how can to_move be larger?"); #else SCTP_PRINTF("Huh, how can to_move be larger?\n"); sp->length = 0; #endif } else { atomic_subtract_int(&sp->length, to_move); } leading = SCTP_DATA_CHUNK_OVERHEAD(stcb); if (M_LEADINGSPACE(chk->data) < leading) { /* Not enough room for a chunk header, get some */ struct mbuf *m; m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA); if (m == NULL) { /* * we're in trouble here. _PREPEND below will free * all the data if there is no leading space, so we * must put the data back and restore. */ if (sp->data == NULL) { /* unsteal the data */ sp->data = chk->data; sp->tail_mbuf = chk->last_mbuf; } else { struct mbuf *m_tmp; /* reassemble the data */ m_tmp = sp->data; sp->data = chk->data; SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp; } sp->some_taken = some_taken; atomic_add_int(&sp->length, to_move); chk->data = NULL; *bail = 1; sctp_free_a_chunk(stcb, chk, so_locked); to_move = 0; goto out_of; } else { SCTP_BUF_LEN(m) = 0; SCTP_BUF_NEXT(m) = chk->data; chk->data = m; M_ALIGN(chk->data, 4); } } SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT); if (chk->data == NULL) { /* HELP, TSNH since we assured it would not above? */ #ifdef INVARIANTS panic("prepend fails HELP?"); #else SCTP_PRINTF("prepend fails HELP?\n"); sctp_free_a_chunk(stcb, chk, so_locked); #endif *bail = 1; to_move = 0; goto out_of; } sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb)); chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb)); chk->book_size_scale = 0; chk->sent = SCTP_DATAGRAM_UNSENT; chk->flags = 0; chk->asoc = &stcb->asoc; chk->pad_inplace = 0; chk->no_fr_allowed = 0; if (stcb->asoc.idata_supported == 0) { if (rcv_flags & SCTP_DATA_UNORDERED) { /* Just use 0. The receiver ignores the values. */ chk->rec.data.mid = 0; } else { chk->rec.data.mid = strq->next_mid_ordered; if (rcv_flags & SCTP_DATA_LAST_FRAG) { strq->next_mid_ordered++; } } } else { if (rcv_flags & SCTP_DATA_UNORDERED) { chk->rec.data.mid = strq->next_mid_unordered; if (rcv_flags & SCTP_DATA_LAST_FRAG) { strq->next_mid_unordered++; } } else { chk->rec.data.mid = strq->next_mid_ordered; if (rcv_flags & SCTP_DATA_LAST_FRAG) { strq->next_mid_ordered++; } } } chk->rec.data.sid = sp->sid; chk->rec.data.ppid = sp->ppid; chk->rec.data.context = sp->context; chk->rec.data.doing_fast_retransmit = 0; chk->rec.data.timetodrop = sp->ts; chk->flags = sp->act_flags; if (sp->net) { chk->whoTo = sp->net; atomic_add_int(&chk->whoTo->ref_count, 1); } else chk->whoTo = NULL; if (sp->holds_key_ref) { chk->auth_keyid = sp->auth_keyid; sctp_auth_key_acquire(stcb, chk->auth_keyid); chk->holds_key_ref = 1; } stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, to_move); chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) { sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, (uint32_t)(uintptr_t)stcb, sp->length, (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)), chk->rec.data.tsn); } if (stcb->asoc.idata_supported == 0) { dchkh = mtod(chk->data, struct sctp_data_chunk *); } else { ndchkh = mtod(chk->data, struct sctp_idata_chunk *); } /* * Put the rest of the things in place now. Size was done earlier in * previous loop prior to padding. */ SCTP_TCB_LOCK_ASSERT(stcb); #ifdef SCTP_ASOCLOG_OF_TSNS if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) { asoc->tsn_out_at = 0; asoc->tsn_out_wrapped = 1; } asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn; asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid; asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid; asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size; asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags; asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb; asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at; asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2; asoc->tsn_out_at++; #endif if (stcb->asoc.idata_supported == 0) { dchkh->ch.chunk_type = SCTP_DATA; dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; dchkh->dp.tsn = htonl(chk->rec.data.tsn); dchkh->dp.sid = htons(strq->sid); dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid); dchkh->dp.ppid = chk->rec.data.ppid; dchkh->ch.chunk_length = htons(chk->send_size); } else { ndchkh->ch.chunk_type = SCTP_IDATA; ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags; ndchkh->dp.tsn = htonl(chk->rec.data.tsn); ndchkh->dp.sid = htons(strq->sid); ndchkh->dp.reserved = htons(0); ndchkh->dp.mid = htonl(chk->rec.data.mid); if (sp->fsn == 0) ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid; else ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn); sp->fsn++; ndchkh->ch.chunk_length = htons(chk->send_size); } /* Now advance the chk->send_size by the actual pad needed. */ if (chk->send_size < SCTP_SIZE32(chk->book_size)) { /* need a pad */ struct mbuf *lm; int pads; pads = SCTP_SIZE32(chk->book_size) - chk->send_size; lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf); if (lm != NULL) { chk->last_mbuf = lm; chk->pad_inplace = 1; } chk->send_size += pads; } if (PR_SCTP_ENABLED(chk->flags)) { asoc->pr_sctp_cnt++; } if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) { /* All done pull and kill the message */ if (sp->put_last_out == 0) { SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n"); SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", sp->sender_all_done, sp->length, sp->msg_is_complete, sp->put_last_out); } atomic_subtract_int(&asoc->stream_queue_cnt, 1); TAILQ_REMOVE(&strq->outqueue, sp, next); stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp); if ((strq->state == SCTP_STREAM_RESET_PENDING) && (strq->chunks_on_queues == 0) && TAILQ_EMPTY(&strq->outqueue)) { stcb->asoc.trigger_reset = 1; } if (sp->net) { sctp_free_remote_addr(sp->net); sp->net = NULL; } if (sp->data) { sctp_m_freem(sp->data); sp->data = NULL; } sctp_free_a_strmoq(stcb, sp, so_locked); } asoc->chunks_on_out_queue++; strq->chunks_on_queues++; TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); asoc->send_queue_cnt++; out_of: return (to_move); } static void sctp_fill_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t frag_point, int eeor_mode, int *quit_now, int so_locked) { struct sctp_association *asoc; struct sctp_stream_out *strq; uint32_t space_left, moved, total_moved; int bail, giveup; SCTP_TCB_LOCK_ASSERT(stcb); asoc = &stcb->asoc; total_moved = 0; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: space_left = net->mtu - SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: space_left = net->mtu - SCTP_MIN_OVERHEAD; break; #endif default: /* TSNH */ space_left = net->mtu; break; } /* Need an allowance for the data chunk header too */ space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb); /* must make even word boundary */ space_left &= 0xfffffffc; strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); giveup = 0; bail = 0; while ((space_left > 0) && (strq != NULL)) { moved = sctp_move_to_outqueue(stcb, net, strq, space_left, frag_point, &giveup, eeor_mode, &bail, so_locked); if ((giveup != 0) || (bail != 0)) { break; } strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); total_moved += moved; if (space_left >= moved) { space_left -= moved; } else { space_left = 0; } if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) { space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb); } else { space_left = 0; } space_left &= 0xfffffffc; } if (bail != 0) *quit_now = 1; stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc); if (total_moved == 0) { if ((stcb->asoc.sctp_cmt_on_off == 0) && (net == stcb->asoc.primary_destination)) { /* ran dry for primary network net */ SCTP_STAT_INCR(sctps_primary_randry); } else if (stcb->asoc.sctp_cmt_on_off > 0) { /* ran dry with CMT on */ SCTP_STAT_INCR(sctps_cmt_randry); } } } void sctp_fix_ecn_echo(struct sctp_association *asoc) { struct sctp_tmit_chunk *chk; TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { chk->sent = SCTP_DATAGRAM_UNSENT; } } } void sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net) { struct sctp_association *asoc; struct sctp_tmit_chunk *chk; struct sctp_stream_queue_pending *sp; unsigned int i; if (net == NULL) { return; } asoc = &stcb->asoc; for (i = 0; i < stcb->asoc.streamoutcnt; i++) { TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { if (sp->net == net) { sctp_free_remote_addr(sp->net); sp->net = NULL; } } } TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { if (chk->whoTo == net) { sctp_free_remote_addr(chk->whoTo); chk->whoTo = NULL; } } } int sctp_med_chunk_output(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_association *asoc, int *num_out, int *reason_code, int control_only, int from_where, struct timeval *now, int *now_filled, uint32_t frag_point, int so_locked) { /** * Ok this is the generic chunk service queue. we must do the * following: * - Service the stream queue that is next, moving any * message (note I must get a complete message i.e. FIRST/MIDDLE and * LAST to the out queue in one pass) and assigning TSN's. This * only applies though if the peer does not support NDATA. For NDATA * chunks its ok to not send the entire message ;-) * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and * formulate and send the low level chunks. Making sure to combine * any control in the control chunk queue also. */ struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL; struct mbuf *outchain, *endoutchain; struct sctp_tmit_chunk *chk, *nchk; /* temp arrays for unlinking */ struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; int no_fragmentflg, error; unsigned int max_rwnd_per_dest, max_send_per_dest; int one_chunk, hbflag, skip_data_for_this_net; int asconf, cookie, no_out_cnt; int bundle_at, ctl_cnt, no_data_chunks, eeor_mode; unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; int tsns_sent = 0; uint32_t auth_offset; struct sctp_auth_chunk *auth; uint16_t auth_keyid; int override_ok = 1; int skip_fill_up = 0; int data_auth_reqd = 0; /* * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the * destination. */ int quit_now = 0; bool use_zero_crc; *num_out = 0; *reason_code = 0; auth_keyid = stcb->asoc.authinfo.active_keyid; if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) || (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { eeor_mode = 1; } else { eeor_mode = 0; } ctl_cnt = no_out_cnt = asconf = cookie = 0; /* * First lets prime the pump. For each destination, if there is room * in the flight size, attempt to pull an MTU's worth out of the * stream queues into the general send_queue */ #ifdef SCTP_AUDITING_ENABLED sctp_audit_log(0xC2, 2); #endif SCTP_TCB_LOCK_ASSERT(stcb); hbflag = 0; if (control_only) no_data_chunks = 1; else no_data_chunks = 0; /* Nothing to possible to send? */ if ((TAILQ_EMPTY(&asoc->control_send_queue) || (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) && TAILQ_EMPTY(&asoc->asconf_send_queue) && TAILQ_EMPTY(&asoc->send_queue) && sctp_is_there_unsent_data(stcb, so_locked) == 0) { nothing_to_send: *reason_code = 9; return (0); } if (asoc->peers_rwnd == 0) { /* No room in peers rwnd */ *reason_code = 1; if (asoc->total_flight > 0) { /* we are allowed one chunk in flight */ no_data_chunks = 1; } } if (stcb->asoc.ecn_echo_cnt_onq) { /* Record where a sack goes, if any */ if (no_data_chunks && (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) { /* Nothing but ECNe to send - we don't do that */ goto nothing_to_send; } TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { sack_goes_to = chk->whoTo; break; } } } max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets); if (stcb->sctp_socket) max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets; else max_send_per_dest = 0; if (no_data_chunks == 0) { /* How many non-directed chunks are there? */ TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { if (chk->whoTo == NULL) { /* * We already have non-directed chunks on * the queue, no need to do a fill-up. */ skip_fill_up = 1; break; } } } if ((no_data_chunks == 0) && (skip_fill_up == 0) && (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) { TAILQ_FOREACH(net, &asoc->nets, sctp_next) { /* * This for loop we are in takes in each net, if * its's got space in cwnd and has data sent to it * (when CMT is off) then it calls * sctp_fill_outqueue for the net. This gets data on * the send queue for that network. * * In sctp_fill_outqueue TSN's are assigned and data * is copied out of the stream buffers. Note mostly * copy by reference (we hope). */ net->window_probe = 0; if ((net != stcb->asoc.alternate) && ((net->dest_state & SCTP_ADDR_PF) || ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) || (net->dest_state & SCTP_ADDR_UNCONFIRMED))) { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, net, 1, SCTP_CWND_LOG_FILL_OUTQ_CALLED); } continue; } if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && (net->flight_size == 0)) { (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net); } if (net->flight_size >= net->cwnd) { /* skip this network, no room - can't fill */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, net, 3, SCTP_CWND_LOG_FILL_OUTQ_CALLED); } continue; } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED); } sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked); if (quit_now) { /* memory alloc failure */ no_data_chunks = 1; break; } } } /* now service each destination and send out what we can for it */ /* Nothing to send? */ if (TAILQ_EMPTY(&asoc->control_send_queue) && TAILQ_EMPTY(&asoc->asconf_send_queue) && TAILQ_EMPTY(&asoc->send_queue)) { *reason_code = 8; return (0); } if (asoc->sctp_cmt_on_off > 0) { /* get the last start point */ start_at = asoc->last_net_cmt_send_started; if (start_at == NULL) { /* null so to beginning */ start_at = TAILQ_FIRST(&asoc->nets); } else { start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next); if (start_at == NULL) { start_at = TAILQ_FIRST(&asoc->nets); } } asoc->last_net_cmt_send_started = start_at; } else { start_at = TAILQ_FIRST(&asoc->nets); } TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { if (chk->whoTo == NULL) { if (asoc->alternate) { chk->whoTo = asoc->alternate; } else { chk->whoTo = asoc->primary_destination; } atomic_add_int(&chk->whoTo->ref_count, 1); } } old_start_at = NULL; again_one_more_time: for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { /* how much can we send? */ /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */ if (old_start_at && (old_start_at == net)) { /* through list completely. */ break; } tsns_sent = 0xa; if (TAILQ_EMPTY(&asoc->control_send_queue) && TAILQ_EMPTY(&asoc->asconf_send_queue) && (net->flight_size >= net->cwnd)) { /* * Nothing on control or asconf and flight is full, * we can skip even in the CMT case. */ continue; } bundle_at = 0; endoutchain = outchain = NULL; auth = NULL; auth_offset = 0; no_fragmentflg = 1; one_chunk = 0; if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { skip_data_for_this_net = 1; } else { skip_data_for_this_net = 0; } switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { #ifdef INET case AF_INET: mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: mtu = net->mtu - SCTP_MIN_OVERHEAD; break; #endif default: /* TSNH */ mtu = net->mtu; break; } mx_mtu = mtu; to_out = 0; if (mtu > asoc->peers_rwnd) { if (asoc->total_flight > 0) { /* We have a packet in flight somewhere */ r_mtu = asoc->peers_rwnd; } else { /* We are always allowed to send one MTU out */ one_chunk = 1; r_mtu = mtu; } } else { r_mtu = mtu; } error = 0; /************************/ /* ASCONF transmission */ /************************/ /* Now first lets go through the asconf queue */ TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { if (chk->rec.chunk_id.id != SCTP_ASCONF) { continue; } if (chk->whoTo == NULL) { if (asoc->alternate == NULL) { if (asoc->primary_destination != net) { break; } } else { if (asoc->alternate != net) { break; } } } else { if (chk->whoTo != net) { break; } } if (chk->data == NULL) { break; } if (chk->sent != SCTP_DATAGRAM_UNSENT && chk->sent != SCTP_DATAGRAM_RESEND) { break; } /* * if no AUTH is yet included and this chunk * requires it, make sure to account for it. We * don't apply the size until the AUTH chunk is * actually added below in case there is no room for * this chunk. NOTE: we overload the use of "omtu" * here */ if ((auth == NULL) && sctp_auth_is_required_chunk(chk->rec.chunk_id.id, stcb->asoc.peer_auth_chunks)) { omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); } else omtu = 0; /* Here we do NOT factor the r_mtu */ if ((chk->send_size < (int)(mtu - omtu)) || (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { /* * We probably should glom the mbuf chain * from the chk->data for control but the * problem is it becomes yet one more level * of tracking to do if for some reason * output fails. Then I have got to * reconstruct the merged control chain.. el * yucko.. for now we take the easy way and * do the copy */ /* * Add an AUTH chunk, if chunk requires it * save the offset into the chain for AUTH */ if ((auth == NULL) && (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, stcb->asoc.peer_auth_chunks))) { outchain = sctp_add_auth_chunk(outchain, &endoutchain, &auth, &auth_offset, stcb, chk->rec.chunk_id.id); SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, (int)chk->rec.chunk_id.can_take_data, chk->send_size, chk->copy_by_ref); if (outchain == NULL) { *reason_code = 8; SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); /* update our MTU size */ if (mtu > (chk->send_size + omtu)) mtu -= (chk->send_size + omtu); else mtu = 0; to_out += (chk->send_size + omtu); /* Do clear IP_DF ? */ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { no_fragmentflg = 0; } if (chk->rec.chunk_id.can_take_data) chk->data = NULL; /* * set hb flag since we can use these for * RTO */ hbflag = 1; asconf = 1; /* * should sysctl this: don't bundle data * with ASCONF since it requires AUTH */ no_data_chunks = 1; chk->sent = SCTP_DATAGRAM_SENT; if (chk->whoTo == NULL) { chk->whoTo = net; atomic_add_int(&net->ref_count, 1); } chk->snd_count++; if (mtu == 0) { /* * Ok we are out of room but we can * output without effecting the * flight size since this little guy * is a control only packet. */ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); /* * do NOT clear the asconf flag as * it is used to do appropriate * source address selection. */ if (*now_filled == 0) { (void)SCTP_GETTIME_TIMEVAL(now); *now_filled = 1; } net->last_sent_time = *now; hbflag = 0; if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, (struct sockaddr *)&net->ro._l_addr, outchain, auth_offset, auth, stcb->asoc.authinfo.active_keyid, no_fragmentflg, 0, asconf, inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), net->port, NULL, 0, 0, false, so_locked))) { /* * error, we could not * output */ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); if (from_where == 0) { SCTP_STAT_INCR(sctps_lowlevelerrusr); } if (error == ENOBUFS) { asoc->ifp_had_enobuf = 1; SCTP_STAT_INCR(sctps_lowlevelerr); } /* error, could not output */ if (error == EHOSTUNREACH) { /* * Destination went * unreachable * during this send */ sctp_move_chunks_from_net(stcb, net); } asconf = 0; *reason_code = 7; break; } else { asoc->ifp_had_enobuf = 0; } /* * increase the number we sent, if a * cookie is sent we don't tell them * any was sent out. */ outchain = endoutchain = NULL; auth = NULL; auth_offset = 0; asconf = 0; if (!no_out_cnt) *num_out += ctl_cnt; /* recalc a clean slate and setup */ switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: mtu = net->mtu - SCTP_MIN_OVERHEAD; break; #endif default: /* TSNH */ mtu = net->mtu; break; } to_out = 0; no_fragmentflg = 1; } } } if (error != 0) { /* try next net */ continue; } /************************/ /* Control transmission */ /************************/ /* Now first lets go through the control queue */ TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { if ((sack_goes_to) && (chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (chk->whoTo != sack_goes_to)) { /* * if we have a sack in queue, and we are * looking at an ecn echo that is NOT queued * to where the sack is going.. */ if (chk->whoTo == net) { /* * Don't transmit it to where its * going (current net) */ continue; } else if (sack_goes_to == net) { /* * But do transmit it to this * address */ goto skip_net_check; } } if (chk->whoTo == NULL) { if (asoc->alternate == NULL) { if (asoc->primary_destination != net) { continue; } } else { if (asoc->alternate != net) { continue; } } } else { if (chk->whoTo != net) { continue; } } skip_net_check: if (chk->data == NULL) { continue; } if (chk->sent != SCTP_DATAGRAM_UNSENT) { /* * It must be unsent. Cookies and ASCONF's * hang around but there timers will force * when marked for resend. */ continue; } /* * if no AUTH is yet included and this chunk * requires it, make sure to account for it. We * don't apply the size until the AUTH chunk is * actually added below in case there is no room for * this chunk. NOTE: we overload the use of "omtu" * here */ if ((auth == NULL) && sctp_auth_is_required_chunk(chk->rec.chunk_id.id, stcb->asoc.peer_auth_chunks)) { omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); } else omtu = 0; /* Here we do NOT factor the r_mtu */ if ((chk->send_size <= (int)(mtu - omtu)) || (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { /* * We probably should glom the mbuf chain * from the chk->data for control but the * problem is it becomes yet one more level * of tracking to do if for some reason * output fails. Then I have got to * reconstruct the merged control chain.. el * yucko.. for now we take the easy way and * do the copy */ /* * Add an AUTH chunk, if chunk requires it * save the offset into the chain for AUTH */ if ((auth == NULL) && (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, stcb->asoc.peer_auth_chunks))) { outchain = sctp_add_auth_chunk(outchain, &endoutchain, &auth, &auth_offset, stcb, chk->rec.chunk_id.id); SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, (int)chk->rec.chunk_id.can_take_data, chk->send_size, chk->copy_by_ref); if (outchain == NULL) { *reason_code = 8; SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); /* update our MTU size */ if (mtu > (chk->send_size + omtu)) mtu -= (chk->send_size + omtu); else mtu = 0; to_out += (chk->send_size + omtu); /* Do clear IP_DF ? */ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { no_fragmentflg = 0; } if (chk->rec.chunk_id.can_take_data) chk->data = NULL; /* Mark things to be removed, if needed */ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || (chk->rec.chunk_id.id == SCTP_ECN_CWR) || (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) { hbflag = 1; } /* remove these chunks at the end */ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { /* turn off the timer */ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1); } } ctl_cnt++; } else { /* * Other chunks, since they have * timers running (i.e. COOKIE) we * just "trust" that it gets sent or * retransmitted. */ ctl_cnt++; if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { cookie = 1; no_out_cnt = 1; } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { /* * Increment ecne send count * here this means we may be * over-zealous in our * counting if the send * fails, but its the best * place to do it (we used * to do it in the queue of * the chunk, but that did * not tell how many times * it was sent. */ SCTP_STAT_INCR(sctps_sendecne); } chk->sent = SCTP_DATAGRAM_SENT; if (chk->whoTo == NULL) { chk->whoTo = net; atomic_add_int(&net->ref_count, 1); } chk->snd_count++; } if (mtu == 0) { /* * Ok we are out of room but we can * output without effecting the * flight size since this little guy * is a control only packet. */ switch (asoc->snd_edmid) { case SCTP_EDMID_LOWER_LAYER_DTLS: use_zero_crc = true; break; default: use_zero_crc = false; break; } if (asconf) { sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); use_zero_crc = false; /* * do NOT clear the asconf * flag as it is used to do * appropriate source * address selection. */ } if (cookie) { sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); use_zero_crc = false; cookie = 0; } /* Only HB or ASCONF advances time */ if (hbflag) { if (*now_filled == 0) { (void)SCTP_GETTIME_TIMEVAL(now); *now_filled = 1; } net->last_sent_time = *now; hbflag = 0; } if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, (struct sockaddr *)&net->ro._l_addr, outchain, auth_offset, auth, stcb->asoc.authinfo.active_keyid, no_fragmentflg, 0, asconf, inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), net->port, NULL, 0, 0, use_zero_crc, so_locked))) { /* * error, we could not * output */ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); if (from_where == 0) { SCTP_STAT_INCR(sctps_lowlevelerrusr); } if (error == ENOBUFS) { asoc->ifp_had_enobuf = 1; SCTP_STAT_INCR(sctps_lowlevelerr); } if (error == EHOSTUNREACH) { /* * Destination went * unreachable * during this send */ sctp_move_chunks_from_net(stcb, net); } asconf = 0; *reason_code = 7; break; } else { asoc->ifp_had_enobuf = 0; } /* * increase the number we sent, if a * cookie is sent we don't tell them * any was sent out. */ outchain = endoutchain = NULL; auth = NULL; auth_offset = 0; asconf = 0; if (!no_out_cnt) *num_out += ctl_cnt; /* recalc a clean slate and setup */ switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: mtu = net->mtu - SCTP_MIN_OVERHEAD; break; #endif default: /* TSNH */ mtu = net->mtu; break; } to_out = 0; no_fragmentflg = 1; } } } if (error != 0) { /* try next net */ continue; } /* JRI: if dest is in PF state, do not send data to it */ if ((asoc->sctp_cmt_on_off > 0) && (net != stcb->asoc.alternate) && (net->dest_state & SCTP_ADDR_PF)) { goto no_data_fill; } if (net->flight_size >= net->cwnd) { goto no_data_fill; } if ((asoc->sctp_cmt_on_off > 0) && (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) && (net->flight_size > max_rwnd_per_dest)) { goto no_data_fill; } /* * We need a specific accounting for the usage of the send * buffer. We also need to check the number of messages per * net. For now, this is better than nothing and it disabled * by default... */ if ((asoc->sctp_cmt_on_off > 0) && (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) && (max_send_per_dest > 0) && (net->flight_size > max_send_per_dest)) { goto no_data_fill; } /*********************/ /* Data transmission */ /*********************/ /* * if AUTH for DATA is required and no AUTH has been added * yet, account for this in the mtu now... if no data can be * bundled, this adjustment won't matter anyways since the * packet will be going out... */ data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks); if (data_auth_reqd && (auth == NULL)) { mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); } /* now lets add any data within the MTU constraints */ switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { #ifdef INET case AF_INET: if (net->mtu > SCTP_MIN_V4_OVERHEAD) omtu = net->mtu - SCTP_MIN_V4_OVERHEAD; else omtu = 0; break; #endif #ifdef INET6 case AF_INET6: if (net->mtu > SCTP_MIN_OVERHEAD) omtu = net->mtu - SCTP_MIN_OVERHEAD; else omtu = 0; break; #endif default: /* TSNH */ omtu = 0; break; } if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && (skip_data_for_this_net == 0)) || (cookie)) { TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { if (no_data_chunks) { /* let only control go out */ *reason_code = 1; break; } if (net->flight_size >= net->cwnd) { /* skip this net, no room for data */ *reason_code = 2; break; } if ((chk->whoTo != NULL) && (chk->whoTo != net)) { /* Don't send the chunk on this net */ continue; } if (asoc->sctp_cmt_on_off == 0) { if ((asoc->alternate) && (asoc->alternate != net) && (chk->whoTo == NULL)) { continue; } else if ((net != asoc->primary_destination) && (asoc->alternate == NULL) && (chk->whoTo == NULL)) { continue; } } if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { /*- * strange, we have a chunk that is * to big for its destination and * yet no fragment ok flag. * Something went wrong when the * PMTU changed...we did not mark * this chunk for some reason?? I * will fix it here by letting IP * fragment it for now and printing * a warning. This really should not * happen ... */ SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", chk->send_size, mtu); chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; } if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { struct sctp_data_chunk *dchkh; dchkh = mtod(chk->data, struct sctp_data_chunk *); dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY; } if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { /* ok we will add this one */ /* * Add an AUTH chunk, if chunk * requires it, save the offset into * the chain for AUTH */ if (data_auth_reqd) { if (auth == NULL) { outchain = sctp_add_auth_chunk(outchain, &endoutchain, &auth, &auth_offset, stcb, SCTP_DATA); auth_keyid = chk->auth_keyid; override_ok = 0; SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } else if (override_ok) { /* * use this data's * keyid */ auth_keyid = chk->auth_keyid; override_ok = 0; } else if (auth_keyid != chk->auth_keyid) { /* * different keyid, * so done bundling */ break; } } outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, chk->send_size, chk->copy_by_ref); if (outchain == NULL) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n"); if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); } *reason_code = 3; SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } /* update our MTU size */ /* Do clear IP_DF ? */ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { no_fragmentflg = 0; } /* unsigned subtraction of mtu */ if (mtu > chk->send_size) mtu -= chk->send_size; else mtu = 0; /* unsigned subtraction of r_mtu */ if (r_mtu > chk->send_size) r_mtu -= chk->send_size; else r_mtu = 0; to_out += chk->send_size; if ((to_out > mx_mtu) && no_fragmentflg) { #ifdef INVARIANTS panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out); #else SCTP_PRINTF("Exceeding mtu of %d out size is %d\n", mx_mtu, to_out); #endif } chk->window_probe = 0; data_list[bundle_at++] = chk; if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { break; } if (chk->sent == SCTP_DATAGRAM_UNSENT) { if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); } else { SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); } if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) /* * Count number of * user msg's that * were fragmented * we do this by * counting when we * see a LAST * fragment only. */ SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); } if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { if ((one_chunk) && (stcb->asoc.total_flight == 0)) { data_list[0]->window_probe = 1; net->window_probe = 1; } break; } } else { /* * Must be sent in order of the * TSN's (on a network) */ break; } } /* for (chunk gather loop for this net) */ } /* if asoc.state OPEN */ no_data_fill: /* Is there something to send for this destination? */ if (outchain) { switch (asoc->snd_edmid) { case SCTP_EDMID_LOWER_LAYER_DTLS: use_zero_crc = true; break; default: use_zero_crc = false; break; } /* We may need to start a control timer or two */ if (asconf) { sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); use_zero_crc = false; /* * do NOT clear the asconf flag as it is * used to do appropriate source address * selection. */ } if (cookie) { sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); use_zero_crc = false; cookie = 0; } /* must start a send timer if data is being sent */ if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { /* * no timer running on this destination * restart it. */ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); } if (bundle_at || hbflag) { /* For data/asconf and hb set time */ if (*now_filled == 0) { (void)SCTP_GETTIME_TIMEVAL(now); *now_filled = 1; } net->last_sent_time = *now; } /* Now send it, if there is anything to send :> */ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, (struct sockaddr *)&net->ro._l_addr, outchain, auth_offset, auth, auth_keyid, no_fragmentflg, bundle_at, asconf, inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), net->port, NULL, 0, 0, use_zero_crc, so_locked))) { /* error, we could not output */ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); if (from_where == 0) { SCTP_STAT_INCR(sctps_lowlevelerrusr); } if (error == ENOBUFS) { asoc->ifp_had_enobuf = 1; SCTP_STAT_INCR(sctps_lowlevelerr); } if (error == EHOSTUNREACH) { /* * Destination went unreachable * during this send */ sctp_move_chunks_from_net(stcb, net); } asconf = 0; *reason_code = 6; /*- * I add this line to be paranoid. As far as * I can tell the continue, takes us back to * the top of the for, but just to make sure * I will reset these again here. */ ctl_cnt = 0; continue; /* This takes us back to the * for() for the nets. */ } else { asoc->ifp_had_enobuf = 0; } endoutchain = NULL; auth = NULL; auth_offset = 0; asconf = 0; if (!no_out_cnt) { *num_out += (ctl_cnt + bundle_at); } if (bundle_at) { /* setup for a RTO measurement */ tsns_sent = data_list[0]->rec.data.tsn; /* fill time if not already filled */ if (*now_filled == 0) { (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); *now_filled = 1; *now = asoc->time_last_sent; } else { asoc->time_last_sent = *now; } if (net->rto_needed) { data_list[0]->do_rtt = 1; net->rto_needed = 0; } SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); } if (one_chunk) { break; } } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); } } if (old_start_at == NULL) { old_start_at = start_at; start_at = TAILQ_FIRST(&asoc->nets); if (old_start_at) goto again_one_more_time; } /* * At the end there should be no NON timed chunks hanging on this * queue. */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); } if ((*num_out == 0) && (*reason_code == 0)) { *reason_code = 4; } else { *reason_code = 5; } sctp_clean_up_ctl(stcb, asoc, so_locked); return (0); } void sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) { /*- * Prepend a OPERATIONAL_ERROR chunk header and put on the end of * the control chunk queue. */ struct sctp_chunkhdr *hdr; struct sctp_tmit_chunk *chk; struct mbuf *mat, *last_mbuf; uint32_t chunk_length; uint16_t padding_length; SCTP_TCB_LOCK_ASSERT(stcb); SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT); if (op_err == NULL) { return; } last_mbuf = NULL; chunk_length = 0; for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) { chunk_length += SCTP_BUF_LEN(mat); if (SCTP_BUF_NEXT(mat) == NULL) { last_mbuf = mat; } } if (chunk_length > SCTP_MAX_CHUNK_LENGTH) { sctp_m_freem(op_err); return; } padding_length = chunk_length % 4; if (padding_length != 0) { padding_length = 4 - padding_length; } if (padding_length != 0) { if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) { sctp_m_freem(op_err); return; } } sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { /* no memory */ sctp_m_freem(op_err); return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; chk->rec.chunk_id.can_take_data = 0; chk->flags = 0; chk->send_size = (uint16_t)chunk_length; chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->asoc = &stcb->asoc; chk->data = op_err; chk->whoTo = NULL; hdr = mtod(op_err, struct sctp_chunkhdr *); hdr->chunk_type = SCTP_OPERATION_ERROR; hdr->chunk_flags = 0; hdr->chunk_length = htons(chk->send_size); TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); chk->asoc->ctrl_queue_cnt++; } int sctp_send_cookie_echo(struct mbuf *m, int offset, int limit, struct sctp_tcb *stcb, struct sctp_nets *net) { /*- * pull out the cookie and put it at the front of the control chunk * queue. */ int at; struct mbuf *cookie; struct sctp_paramhdr param, *phdr; struct sctp_chunkhdr *hdr; struct sctp_tmit_chunk *chk; uint16_t ptype, plen; SCTP_TCB_LOCK_ASSERT(stcb); /* First find the cookie in the param area */ cookie = NULL; at = offset + sizeof(struct sctp_init_chunk); for (;;) { phdr = sctp_get_next_param(m, at, ¶m, sizeof(param)); if (phdr == NULL) { return (-3); } ptype = ntohs(phdr->param_type); plen = ntohs(phdr->param_length); if (plen < sizeof(struct sctp_paramhdr)) { return (-6); } if (ptype == SCTP_STATE_COOKIE) { int pad; /* found the cookie */ if (at + plen > limit) { return (-7); } cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT); if (cookie == NULL) { /* No memory */ return (-2); } if ((pad = (plen % 4)) > 0) { pad = 4 - pad; } if (pad > 0) { if (sctp_pad_lastmbuf(cookie, pad, NULL) == NULL) { return (-8); } } #ifdef SCTP_MBUF_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { sctp_log_mbc(cookie, SCTP_MBUF_ICOPY); } #endif break; } at += SCTP_SIZE32(plen); } /* ok, we got the cookie lets change it into a cookie echo chunk */ /* first the change from param to cookie */ hdr = mtod(cookie, struct sctp_chunkhdr *); hdr->chunk_type = SCTP_COOKIE_ECHO; hdr->chunk_flags = 0; /* get the chunk stuff now and place it in the FRONT of the queue */ sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { /* no memory */ sctp_m_freem(cookie); return (-5); } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; chk->rec.chunk_id.can_take_data = 0; chk->flags = CHUNK_FLAGS_FRAGMENT_OK; chk->send_size = SCTP_SIZE32(plen); chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->asoc = &stcb->asoc; chk->data = cookie; chk->whoTo = net; atomic_add_int(&chk->whoTo->ref_count, 1); TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); chk->asoc->ctrl_queue_cnt++; return (0); } void sctp_send_heartbeat_ack(struct sctp_tcb *stcb, struct mbuf *m, int offset, int chk_length, struct sctp_nets *net) { /* * take a HB request and make it into a HB ack and send it. */ struct mbuf *outchain; struct sctp_chunkhdr *chdr; struct sctp_tmit_chunk *chk; if (net == NULL) /* must have a net pointer */ return; outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT); if (outchain == NULL) { /* gak out of memory */ return; } #ifdef SCTP_MBUF_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { sctp_log_mbc(outchain, SCTP_MBUF_ICOPY); } #endif chdr = mtod(outchain, struct sctp_chunkhdr *); chdr->chunk_type = SCTP_HEARTBEAT_ACK; chdr->chunk_flags = 0; if (chk_length % 4 != 0) { sctp_pad_lastmbuf(outchain, 4 - (chk_length % 4), NULL); } sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { /* no memory */ sctp_m_freem(outchain); return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; chk->rec.chunk_id.can_take_data = 1; chk->flags = 0; chk->send_size = chk_length; chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->asoc = &stcb->asoc; chk->data = outchain; chk->whoTo = net; atomic_add_int(&chk->whoTo->ref_count, 1); TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); chk->asoc->ctrl_queue_cnt++; } void sctp_send_cookie_ack(struct sctp_tcb *stcb) { /* formulate and queue a cookie-ack back to sender */ struct mbuf *cookie_ack; struct sctp_chunkhdr *hdr; struct sctp_tmit_chunk *chk; SCTP_TCB_LOCK_ASSERT(stcb); cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER); if (cookie_ack == NULL) { /* no mbuf's */ return; } SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { /* no memory */ sctp_m_freem(cookie_ack); return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_COOKIE_ACK; chk->rec.chunk_id.can_take_data = 1; chk->flags = 0; chk->send_size = sizeof(struct sctp_chunkhdr); chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->asoc = &stcb->asoc; chk->data = cookie_ack; if (chk->asoc->last_control_chunk_from != NULL) { chk->whoTo = chk->asoc->last_control_chunk_from; atomic_add_int(&chk->whoTo->ref_count, 1); } else { chk->whoTo = NULL; } hdr = mtod(cookie_ack, struct sctp_chunkhdr *); hdr->chunk_type = SCTP_COOKIE_ACK; hdr->chunk_flags = 0; hdr->chunk_length = htons(chk->send_size); SCTP_BUF_LEN(cookie_ack) = chk->send_size; TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); chk->asoc->ctrl_queue_cnt++; return; } void sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) { /* formulate and queue a SHUTDOWN-ACK back to the sender */ struct mbuf *m_shutdown_ack; struct sctp_shutdown_ack_chunk *ack_cp; struct sctp_tmit_chunk *chk; m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER); if (m_shutdown_ack == NULL) { /* no mbuf's */ return; } SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { /* no memory */ sctp_m_freem(m_shutdown_ack); return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; chk->rec.chunk_id.can_take_data = 1; chk->flags = 0; chk->send_size = sizeof(struct sctp_chunkhdr); chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->asoc = &stcb->asoc; chk->data = m_shutdown_ack; chk->whoTo = net; if (chk->whoTo) { atomic_add_int(&chk->whoTo->ref_count, 1); } ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; ack_cp->ch.chunk_flags = 0; ack_cp->ch.chunk_length = htons(chk->send_size); SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); chk->asoc->ctrl_queue_cnt++; return; } void sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) { /* formulate and queue a SHUTDOWN to the sender */ struct mbuf *m_shutdown; struct sctp_shutdown_chunk *shutdown_cp; struct sctp_tmit_chunk *chk; TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) { /* We already have a SHUTDOWN queued. Reuse it. */ if (chk->whoTo) { sctp_free_remote_addr(chk->whoTo); chk->whoTo = NULL; } break; } } if (chk == NULL) { m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER); if (m_shutdown == NULL) { /* no mbuf's */ return; } SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { /* no memory */ sctp_m_freem(m_shutdown); return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_SHUTDOWN; chk->rec.chunk_id.can_take_data = 1; chk->flags = 0; chk->send_size = sizeof(struct sctp_shutdown_chunk); chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->asoc = &stcb->asoc; chk->data = m_shutdown; chk->whoTo = net; if (chk->whoTo) { atomic_add_int(&chk->whoTo->ref_count, 1); } shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; shutdown_cp->ch.chunk_flags = 0; shutdown_cp->ch.chunk_length = htons(chk->send_size); shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); SCTP_BUF_LEN(m_shutdown) = chk->send_size; TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); chk->asoc->ctrl_queue_cnt++; } else { TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next); chk->whoTo = net; if (chk->whoTo) { atomic_add_int(&chk->whoTo->ref_count, 1); } shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *); shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); } return; } void sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked) { /* * formulate and queue an ASCONF to the peer. ASCONF parameters * should be queued on the assoc queue. */ struct sctp_tmit_chunk *chk; struct mbuf *m_asconf; int len; SCTP_TCB_LOCK_ASSERT(stcb); if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) && (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) { /* can't send a new one if there is one in flight already */ return; } /* compose an ASCONF chunk, maximum length is PMTU */ m_asconf = sctp_compose_asconf(stcb, &len, addr_locked); if (m_asconf == NULL) { return; } sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { /* no memory */ sctp_m_freem(m_asconf); return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_ASCONF; chk->rec.chunk_id.can_take_data = 0; chk->flags = CHUNK_FLAGS_FRAGMENT_OK; chk->data = m_asconf; chk->send_size = len; chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->asoc = &stcb->asoc; chk->whoTo = net; if (chk->whoTo) { atomic_add_int(&chk->whoTo->ref_count, 1); } TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next); chk->asoc->ctrl_queue_cnt++; return; } void sctp_send_asconf_ack(struct sctp_tcb *stcb) { /* * formulate and queue a asconf-ack back to sender. the asconf-ack * must be stored in the tcb. */ struct sctp_tmit_chunk *chk; struct sctp_asconf_ack *ack, *latest_ack; struct mbuf *m_ack; struct sctp_nets *net = NULL; SCTP_TCB_LOCK_ASSERT(stcb); /* Get the latest ASCONF-ACK */ latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead); if (latest_ack == NULL) { return; } if (latest_ack->last_sent_to != NULL && latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) { /* we're doing a retransmission */ net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); if (net == NULL) { /* no alternate */ if (stcb->asoc.last_control_chunk_from == NULL) { if (stcb->asoc.alternate) { net = stcb->asoc.alternate; } else { net = stcb->asoc.primary_destination; } } else { net = stcb->asoc.last_control_chunk_from; } } } else { /* normal case */ if (stcb->asoc.last_control_chunk_from == NULL) { if (stcb->asoc.alternate) { net = stcb->asoc.alternate; } else { net = stcb->asoc.primary_destination; } } else { net = stcb->asoc.last_control_chunk_from; } } latest_ack->last_sent_to = net; TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) { if (ack->data == NULL) { continue; } /* copy the asconf_ack */ m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT); if (m_ack == NULL) { /* couldn't copy it */ return; } #ifdef SCTP_MBUF_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY); } #endif sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { /* no memory */ if (m_ack) sctp_m_freem(m_ack); return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_ASCONF_ACK; chk->rec.chunk_id.can_take_data = 1; chk->flags = CHUNK_FLAGS_FRAGMENT_OK; chk->whoTo = net; if (chk->whoTo) { atomic_add_int(&chk->whoTo->ref_count, 1); } chk->data = m_ack; chk->send_size = ack->len; chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->asoc = &stcb->asoc; TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); chk->asoc->ctrl_queue_cnt++; } return; } static int sctp_chunk_retransmission(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_association *asoc, int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked) { /*- * send out one MTU of retransmission. If fast_retransmit is * happening we ignore the cwnd. Otherwise we obey the cwnd and * rwnd. For a Cookie or Asconf in the control chunk queue we * retransmit them by themselves. * * For data chunks we will pick out the lowest TSN's in the sent_queue * marked for resend and bundle them all together (up to a MTU of * destination). The address to send to should have been * selected/changed where the retransmission was marked (i.e. in FR * or t3-timeout routines). */ struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; struct sctp_tmit_chunk *chk, *fwd; struct mbuf *m, *endofchain; struct sctp_nets *net = NULL; uint32_t tsns_sent = 0; int no_fragmentflg, bundle_at; unsigned int mtu; int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; struct sctp_auth_chunk *auth = NULL; uint32_t auth_offset = 0; uint16_t auth_keyid; int override_ok = 1; int data_auth_reqd = 0; uint32_t dmtu = 0; bool use_zero_crc; SCTP_TCB_LOCK_ASSERT(stcb); tmr_started = ctl_cnt = 0; no_fragmentflg = 1; fwd_tsn = 0; *cnt_out = 0; fwd = NULL; endofchain = m = NULL; auth_keyid = stcb->asoc.authinfo.active_keyid; #ifdef SCTP_AUDITING_ENABLED sctp_audit_log(0xC3, 1); #endif if ((TAILQ_EMPTY(&asoc->sent_queue)) && (TAILQ_EMPTY(&asoc->control_send_queue))) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n", asoc->sent_queue_retran_cnt); asoc->sent_queue_cnt = 0; asoc->sent_queue_cnt_removeable = 0; /* send back 0/0 so we enter normal transmission */ *cnt_out = 0; return (0); } TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { if (chk->sent != SCTP_DATAGRAM_RESEND) { continue; } if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { if (chk != asoc->str_reset) { /* * not eligible for retran if its * not ours */ continue; } } ctl_cnt++; if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { fwd_tsn = 1; } /* * Add an AUTH chunk, if chunk requires it save the * offset into the chain for AUTH */ if ((auth == NULL) && (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, stcb->asoc.peer_auth_chunks))) { m = sctp_add_auth_chunk(m, &endofchain, &auth, &auth_offset, stcb, chk->rec.chunk_id.id); SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); break; } } one_chunk = 0; /* do we have control chunks to retransmit? */ if (m != NULL) { /* Start a timer no matter if we succeed or fail */ switch (asoc->snd_edmid) { case SCTP_EDMID_LOWER_LAYER_DTLS: use_zero_crc = true; break; default: use_zero_crc = false; break; } if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); use_zero_crc = false; } else if (chk->rec.chunk_id.id == SCTP_ASCONF) { /* XXXMT: Can this happen? */ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); use_zero_crc = false; } chk->snd_count++; /* update our count */ if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset, auth, stcb->asoc.authinfo.active_keyid, no_fragmentflg, 0, 0, inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), chk->whoTo->port, NULL, 0, 0, use_zero_crc, so_locked))) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); if (error == ENOBUFS) { asoc->ifp_had_enobuf = 1; SCTP_STAT_INCR(sctps_lowlevelerr); } return (error); } else { asoc->ifp_had_enobuf = 0; } endofchain = NULL; auth = NULL; auth_offset = 0; /* * We don't want to mark the net->sent time here since this * we use this for HB and retrans cannot measure RTT */ /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ *cnt_out += 1; chk->sent = SCTP_DATAGRAM_SENT; sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); if (fwd_tsn == 0) { return (0); } else { /* Clean up the fwd-tsn list */ sctp_clean_up_ctl(stcb, asoc, so_locked); return (0); } } /* * Ok, it is just data retransmission we need to do or that and a * fwd-tsn with it all. */ if (TAILQ_EMPTY(&asoc->sent_queue)) { return (SCTP_RETRAN_DONE); } if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) || (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) { /* not yet open, resend the cookie and that is it */ return (1); } #ifdef SCTP_AUDITING_ENABLED sctp_auditing(20, inp, stcb, NULL); #endif data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks); TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { if (chk->sent != SCTP_DATAGRAM_RESEND) { /* No, not sent to this net or not ready for rtx */ continue; } if (chk->data == NULL) { SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n", chk->rec.data.tsn, chk->snd_count, chk->sent); continue; } if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) && (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) { struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; SCTP_SNPRINTF(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up", chk->rec.data.tsn, chk->snd_count); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); atomic_add_int(&stcb->asoc.refcnt, 1); sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, so_locked); SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); return (SCTP_RETRAN_EXIT); } /* pick up the net */ net = chk->whoTo; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: mtu = net->mtu - SCTP_MIN_OVERHEAD; break; #endif default: /* TSNH */ mtu = net->mtu; break; } if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { /* No room in peers rwnd */ uint32_t tsn; tsn = asoc->last_acked_seq + 1; if (tsn == chk->rec.data.tsn) { /* * we make a special exception for this * case. The peer has no rwnd but is missing * the lowest chunk.. which is probably what * is holding up the rwnd. */ goto one_chunk_around; } return (1); } one_chunk_around: if (asoc->peers_rwnd < mtu) { one_chunk = 1; if ((asoc->peers_rwnd == 0) && (asoc->total_flight == 0)) { chk->window_probe = 1; chk->whoTo->window_probe = 1; } } #ifdef SCTP_AUDITING_ENABLED sctp_audit_log(0xC3, 2); #endif bundle_at = 0; m = NULL; net->fast_retran_ip = 0; if (chk->rec.data.doing_fast_retransmit == 0) { /* * if no FR in progress skip destination that have * flight_size > cwnd. */ if (net->flight_size >= net->cwnd) { continue; } } else { /* * Mark the destination net to have FR recovery * limits put on it. */ *fr_done = 1; net->fast_retran_ip = 1; } /* * if no AUTH is yet included and this chunk requires it, * make sure to account for it. We don't apply the size * until the AUTH chunk is actually added below in case * there is no room for this chunk. */ if (data_auth_reqd && (auth == NULL)) { dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); } else dmtu = 0; if ((chk->send_size <= (mtu - dmtu)) || (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { /* ok we will add this one */ if (data_auth_reqd) { if (auth == NULL) { m = sctp_add_auth_chunk(m, &endofchain, &auth, &auth_offset, stcb, SCTP_DATA); auth_keyid = chk->auth_keyid; override_ok = 0; SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } else if (override_ok) { auth_keyid = chk->auth_keyid; override_ok = 0; } else if (chk->auth_keyid != auth_keyid) { /* different keyid, so done bundling */ break; } } m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); if (m == NULL) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } /* Do clear IP_DF ? */ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { no_fragmentflg = 0; } /* update our MTU size */ if (mtu > (chk->send_size + dmtu)) mtu -= (chk->send_size + dmtu); else mtu = 0; data_list[bundle_at++] = chk; if (one_chunk && (asoc->total_flight <= 0)) { SCTP_STAT_INCR(sctps_windowprobed); } } if (one_chunk == 0) { /* * now are there anymore forward from chk to pick * up? */ for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) { if (fwd->sent != SCTP_DATAGRAM_RESEND) { /* Nope, not for retran */ continue; } if (fwd->whoTo != net) { /* Nope, not the net in question */ continue; } if (data_auth_reqd && (auth == NULL)) { dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); } else dmtu = 0; if (fwd->send_size <= (mtu - dmtu)) { if (data_auth_reqd) { if (auth == NULL) { m = sctp_add_auth_chunk(m, &endofchain, &auth, &auth_offset, stcb, SCTP_DATA); auth_keyid = fwd->auth_keyid; override_ok = 0; SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } else if (override_ok) { auth_keyid = fwd->auth_keyid; override_ok = 0; } else if (fwd->auth_keyid != auth_keyid) { /* * different keyid, * so done bundling */ break; } } m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); if (m == NULL) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } /* Do clear IP_DF ? */ if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { no_fragmentflg = 0; } /* update our MTU size */ if (mtu > (fwd->send_size + dmtu)) mtu -= (fwd->send_size + dmtu); else mtu = 0; data_list[bundle_at++] = fwd; if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { break; } } else { /* can't fit so we are done */ break; } } } /* Is there something to send for this destination? */ if (m) { /* * No matter if we fail/or succeed we should start a * timer. A failure is like a lost IP packet :-) */ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { /* * no timer running on this destination * restart it. */ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); tmr_started = 1; } switch (asoc->snd_edmid) { case SCTP_EDMID_LOWER_LAYER_DTLS: use_zero_crc = true; break; default: use_zero_crc = false; break; } /* Now lets send it, if there is anything to send :> */ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, (struct sockaddr *)&net->ro._l_addr, m, auth_offset, auth, auth_keyid, no_fragmentflg, 0, 0, inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), net->port, NULL, 0, 0, use_zero_crc, so_locked))) { /* error, we could not output */ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); if (error == ENOBUFS) { asoc->ifp_had_enobuf = 1; SCTP_STAT_INCR(sctps_lowlevelerr); } return (error); } else { asoc->ifp_had_enobuf = 0; } endofchain = NULL; auth = NULL; auth_offset = 0; /* For HB's */ /* * We don't want to mark the net->sent time here * since this we use this for HB and retrans cannot * measure RTT */ /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ /* For auto-close */ if (*now_filled == 0) { (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); *now = asoc->time_last_sent; *now_filled = 1; } else { asoc->time_last_sent = *now; } *cnt_out += bundle_at; #ifdef SCTP_AUDITING_ENABLED sctp_audit_log(0xC4, bundle_at); #endif if (bundle_at) { tsns_sent = data_list[0]->rec.data.tsn; } for (i = 0; i < bundle_at; i++) { SCTP_STAT_INCR(sctps_sendretransdata); data_list[i]->sent = SCTP_DATAGRAM_SENT; /* * When we have a revoked data, and we * retransmit it, then we clear the revoked * flag since this flag dictates if we * subtracted from the fs */ if (data_list[i]->rec.data.chunk_was_revoked) { /* Deflate the cwnd */ data_list[i]->whoTo->cwnd -= data_list[i]->book_size; data_list[i]->rec.data.chunk_was_revoked = 0; } data_list[i]->snd_count++; sctp_ucount_decr(asoc->sent_queue_retran_cnt); /* record the time */ data_list[i]->sent_rcv_time = asoc->time_last_sent; if (data_list[i]->book_size_scale) { /* * need to double the book size on * this one */ data_list[i]->book_size_scale = 0; /* * Since we double the booksize, we * must also double the output queue * size, since this get shrunk when * we free by this amount. */ atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size); data_list[i]->book_size *= 2; } else { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); } asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, (uint32_t)(data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND, data_list[i]->whoTo->flight_size, data_list[i]->book_size, (uint32_t)(uintptr_t)data_list[i]->whoTo, data_list[i]->rec.data.tsn); } sctp_flight_size_increase(data_list[i]); sctp_total_flight_increase(stcb, data_list[i]); if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { /* SWS sender side engages */ asoc->peers_rwnd = 0; } if ((i == 0) && (data_list[i]->rec.data.doing_fast_retransmit)) { SCTP_STAT_INCR(sctps_sendfastretrans); if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && (tmr_started == 0)) { /*- * ok we just fast-retrans'd * the lowest TSN, i.e the * first on the list. In * this case we want to give * some more time to get a * SACK back without a * t3-expiring. */ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2); sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); } } } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); } #ifdef SCTP_AUDITING_ENABLED sctp_auditing(21, inp, stcb, NULL); #endif } else { /* None will fit */ return (1); } if (asoc->sent_queue_retran_cnt <= 0) { /* all done we have no more to retran */ asoc->sent_queue_retran_cnt = 0; break; } if (one_chunk) { /* No more room in rwnd */ return (1); } /* stop the for loop here. we sent out a packet */ break; } return (0); } static void sctp_timer_validation(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_association *asoc) { struct sctp_nets *net; /* Validate that a timer is running somewhere */ TAILQ_FOREACH(net, &asoc->nets, sctp_next) { if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { /* Here is a timer */ return; } } SCTP_TCB_LOCK_ASSERT(stcb); /* Gak, we did not have a timer somewhere */ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n"); if (asoc->alternate) { sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate); } else { sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); } return; } void sctp_chunk_output(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_where, int so_locked) { /*- * Ok this is the generic chunk service queue. we must do the * following: * - See if there are retransmits pending, if so we must * do these first. * - Service the stream queue that is next, moving any * message (note I must get a complete message i.e. * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning * TSN's * - Check to see if the cwnd/rwnd allows any output, if so we * go ahead and formulate and send the low level chunks. Making sure * to combine any control in the control chunk queue also. */ struct sctp_association *asoc; struct sctp_nets *net; int error = 0, num_out, tot_out = 0, ret = 0, reason_code; unsigned int burst_cnt = 0; struct timeval now; int now_filled = 0; int nagle_on; uint32_t frag_point = sctp_get_frag_point(stcb); int un_sent = 0; int fr_done; unsigned int tot_frs = 0; asoc = &stcb->asoc; do_it_again: /* The Nagle algorithm is only applied when handling a send call. */ if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { nagle_on = 0; } else { nagle_on = 1; } } else { nagle_on = 0; } SCTP_TCB_LOCK_ASSERT(stcb); un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); if ((un_sent <= 0) && (TAILQ_EMPTY(&asoc->control_send_queue)) && (TAILQ_EMPTY(&asoc->asconf_send_queue)) && (asoc->sent_queue_retran_cnt == 0) && (asoc->trigger_reset == 0)) { /* Nothing to do unless there is something to be sent left */ return; } /* * Do we have something to send, data or control AND a sack timer * running, if so piggy-back the sack. */ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { sctp_send_sack(stcb, so_locked); sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3); } while (asoc->sent_queue_retran_cnt) { /*- * Ok, it is retransmission time only, we send out only ONE * packet with a single call off to the retran code. */ if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { /*- * Special hook for handling cookies discarded * by peer that carried data. Send cookie-ack only * and then the next call with get the retran's. */ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where, &now, &now_filled, frag_point, so_locked); return; } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { /* if its not from a HB then do it */ fr_done = 0; ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked); if (fr_done) { tot_frs++; } } else { /* * its from any other place, we don't allow retran * output (only control) */ ret = 1; } if (ret > 0) { /* Can't send anymore */ /*- * now lets push out control by calling med-level * output once. this assures that we WILL send HB's * if queued too. */ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where, &now, &now_filled, frag_point, so_locked); #ifdef SCTP_AUDITING_ENABLED sctp_auditing(8, inp, stcb, NULL); #endif sctp_timer_validation(inp, stcb, asoc); return; } if (ret < 0) { /*- * The count was off.. retran is not happening so do * the normal retransmission. */ #ifdef SCTP_AUDITING_ENABLED sctp_auditing(9, inp, stcb, NULL); #endif if (ret == SCTP_RETRAN_EXIT) { return; } break; } if (from_where == SCTP_OUTPUT_FROM_T3) { /* Only one transmission allowed out of a timeout */ #ifdef SCTP_AUDITING_ENABLED sctp_auditing(10, inp, stcb, NULL); #endif /* Push out any control */ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where, &now, &now_filled, frag_point, so_locked); return; } if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) { /* Hit FR burst limit */ return; } if ((num_out == 0) && (ret == 0)) { /* No more retrans to send */ break; } } #ifdef SCTP_AUDITING_ENABLED sctp_auditing(12, inp, stcb, NULL); #endif /* Check for bad destinations, if they exist move chunks around. */ TAILQ_FOREACH(net, &asoc->nets, sctp_next) { if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { /*- * if possible move things off of this address we * still may send below due to the dormant state but * we try to find an alternate address to send to * and if we have one we move all queued data on the * out wheel to this alternate address. */ if (net->ref_count > 1) sctp_move_chunks_from_net(stcb, net); } else { /*- * if ((asoc->sat_network) || (net->addr_is_local)) * { burst_limit = asoc->max_burst * * SCTP_SAT_NETWORK_BURST_INCR; } */ if (asoc->max_burst > 0) { if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) { if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) { /* * JRS - Use the congestion * control given in the * congestion control module */ asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED); } SCTP_STAT_INCR(sctps_maxburstqueued); } net->fast_retran_ip = 0; } else { if (net->flight_size == 0) { /* * Should be decaying the * cwnd here */ ; } } } } } burst_cnt = 0; do { error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 0, from_where, &now, &now_filled, frag_point, so_locked); if (error) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); } break; } SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out); tot_out += num_out; burst_cnt++; if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); if (num_out == 0) { sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); } } if (nagle_on) { /* * When the Nagle algorithm is used, look at how * much is unsent, then if its smaller than an MTU * and we have data in flight we stop, except if we * are handling a fragmented user message. */ un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight; if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && (stcb->asoc.total_flight > 0)) { /* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/ break; } } if (TAILQ_EMPTY(&asoc->control_send_queue) && TAILQ_EMPTY(&asoc->send_queue) && sctp_is_there_unsent_data(stcb, so_locked) == 0) { /* Nothing left to send */ break; } if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { /* Nothing left to send */ break; } } while (num_out && ((asoc->max_burst == 0) || SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) || (burst_cnt < asoc->max_burst))); if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) { if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) { SCTP_STAT_INCR(sctps_maxburstqueued); asoc->burst_limit_applied = 1; if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); } } else { asoc->burst_limit_applied = 0; } } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); } SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n", tot_out); /*- * Now we need to clean up the control chunk chain if a ECNE is on * it. It must be marked as UNSENT again so next call will continue * to send it until such time that we get a CWR, to remove it. */ if (stcb->asoc.ecn_echo_cnt_onq) sctp_fix_ecn_echo(asoc); if (stcb->asoc.trigger_reset) { if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) { goto do_it_again; } } return; } int sctp_output( struct sctp_inpcb *inp, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p, int flags) { if (inp == NULL) { SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); return (EINVAL); } if (inp->sctp_socket == NULL) { SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); return (EINVAL); } return (sctp_sosend(inp->sctp_socket, addr, (struct uio *)NULL, m, control, flags, p )); } void send_forward_tsn(struct sctp_tcb *stcb, struct sctp_association *asoc) { struct sctp_tmit_chunk *chk, *at, *tp1, *last; struct sctp_forward_tsn_chunk *fwdtsn; struct sctp_strseq *strseq; struct sctp_strseq_mid *strseq_m; uint32_t advance_peer_ack_point; unsigned int cnt_of_space, i, ovh; unsigned int space_needed; unsigned int cnt_of_skipped = 0; SCTP_TCB_LOCK_ASSERT(stcb); TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { /* mark it to unsent */ chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; /* Do we correct its output location? */ if (chk->whoTo) { sctp_free_remote_addr(chk->whoTo); chk->whoTo = NULL; } goto sctp_fill_in_rest; } } /* Ok if we reach here we must build one */ sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { return; } asoc->fwd_tsn_cnt++; chk->copy_by_ref = 0; /* * We don't do the old thing here since this is used not for on-wire * but to tell if we are sending a fwd-tsn by the stack during * output. And if its a IFORWARD or a FORWARD it is a fwd-tsn. */ chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; chk->rec.chunk_id.can_take_data = 0; chk->flags = 0; chk->asoc = asoc; chk->whoTo = NULL; chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); if (chk->data == NULL) { sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); return; } SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt++; sctp_fill_in_rest: /*- * Here we go through and fill out the part that deals with * stream/seq of the ones we skip. */ SCTP_BUF_LEN(chk->data) = 0; TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { if ((at->sent != SCTP_FORWARD_TSN_SKIP) && (at->sent != SCTP_DATAGRAM_NR_ACKED)) { /* no more to look at */ break; } if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) { /* We don't report these */ continue; } cnt_of_skipped++; } if (asoc->idata_supported) { space_needed = (sizeof(struct sctp_forward_tsn_chunk) + (cnt_of_skipped * sizeof(struct sctp_strseq_mid))); } else { space_needed = (sizeof(struct sctp_forward_tsn_chunk) + (cnt_of_skipped * sizeof(struct sctp_strseq))); } cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data); if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { ovh = SCTP_MIN_OVERHEAD; } else { ovh = SCTP_MIN_V4_OVERHEAD; } if (cnt_of_space > (asoc->smallest_mtu - ovh)) { /* trim to a mtu size */ cnt_of_space = asoc->smallest_mtu - ovh; } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { sctp_misc_ints(SCTP_FWD_TSN_CHECK, 0xff, 0, cnt_of_skipped, asoc->advanced_peer_ack_point); } advance_peer_ack_point = asoc->advanced_peer_ack_point; if (cnt_of_space < space_needed) { /*- * ok we must trim down the chunk by lowering the * advance peer ack point. */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { sctp_misc_ints(SCTP_FWD_TSN_CHECK, 0xff, 0xff, cnt_of_space, space_needed); } cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk); if (asoc->idata_supported) { cnt_of_skipped /= sizeof(struct sctp_strseq_mid); } else { cnt_of_skipped /= sizeof(struct sctp_strseq); } /*- * Go through and find the TSN that will be the one * we report. */ at = TAILQ_FIRST(&asoc->sent_queue); if (at != NULL) { for (i = 0; i < cnt_of_skipped; i++) { tp1 = TAILQ_NEXT(at, sctp_next); if (tp1 == NULL) { break; } at = tp1; } } if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { sctp_misc_ints(SCTP_FWD_TSN_CHECK, 0xff, cnt_of_skipped, at->rec.data.tsn, asoc->advanced_peer_ack_point); } last = at; /*- * last now points to last one I can report, update * peer ack point */ if (last) { advance_peer_ack_point = last->rec.data.tsn; } if (asoc->idata_supported) { space_needed = sizeof(struct sctp_forward_tsn_chunk) + cnt_of_skipped * sizeof(struct sctp_strseq_mid); } else { space_needed = sizeof(struct sctp_forward_tsn_chunk) + cnt_of_skipped * sizeof(struct sctp_strseq); } } chk->send_size = space_needed; /* Setup the chunk */ fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); fwdtsn->ch.chunk_length = htons(chk->send_size); fwdtsn->ch.chunk_flags = 0; if (asoc->idata_supported) { fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN; } else { fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; } fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point); SCTP_BUF_LEN(chk->data) = chk->send_size; fwdtsn++; /*- * Move pointer to after the fwdtsn and transfer to the * strseq pointer. */ if (asoc->idata_supported) { strseq_m = (struct sctp_strseq_mid *)fwdtsn; strseq = NULL; } else { strseq = (struct sctp_strseq *)fwdtsn; strseq_m = NULL; } /*- * Now populate the strseq list. This is done blindly * without pulling out duplicate stream info. This is * inefficient but won't harm the process since the peer will * look at these in sequence and will thus release anything. * It could mean we exceed the PMTU and chop off some that * we could have included.. but this is unlikely (aka 1432/4 * would mean 300+ stream seq's would have to be reported in * one FWD-TSN. With a bit of work we can later FIX this to * optimize and pull out duplicates.. but it does add more * overhead. So for now... not! */ i = 0; TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { if (i >= cnt_of_skipped) { break; } if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) { /* We don't report these */ continue; } if (at->rec.data.tsn == advance_peer_ack_point) { at->rec.data.fwd_tsn_cnt = 0; } if (asoc->idata_supported) { strseq_m->sid = htons(at->rec.data.sid); if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG); } else { strseq_m->flags = 0; } strseq_m->mid = htonl(at->rec.data.mid); strseq_m++; } else { strseq->sid = htons(at->rec.data.sid); strseq->ssn = htons((uint16_t)at->rec.data.mid); strseq++; } i++; } return; } void sctp_send_sack(struct sctp_tcb *stcb, int so_locked) { /*- * Queue up a SACK or NR-SACK in the control queue. * We must first check to see if a SACK or NR-SACK is * somehow on the control queue. * If so, we will take and and remove the old one. */ struct sctp_association *asoc; struct sctp_tmit_chunk *chk, *a_chk; struct sctp_sack_chunk *sack; struct sctp_nr_sack_chunk *nr_sack; struct sctp_gap_ack_block *gap_descriptor; const struct sack_track *selector; int mergeable = 0; int offset; caddr_t limit; uint32_t *dup; int limit_reached = 0; unsigned int i, siz, j; unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space; int num_dups = 0; int space_req; uint32_t highest_tsn; uint8_t flags; uint8_t type; uint8_t tsn_map; if (stcb->asoc.nrsack_supported == 1) { type = SCTP_NR_SELECTIVE_ACK; } else { type = SCTP_SELECTIVE_ACK; } a_chk = NULL; asoc = &stcb->asoc; SCTP_TCB_LOCK_ASSERT(stcb); if (asoc->last_data_chunk_from == NULL) { /* Hmm we never received anything */ return; } sctp_slide_mapping_arrays(stcb); sctp_set_rwnd(stcb, asoc); TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { if (chk->rec.chunk_id.id == type) { /* Hmm, found a sack already on queue, remove it */ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt--; a_chk = chk; if (a_chk->data) { sctp_m_freem(a_chk->data); a_chk->data = NULL; } if (a_chk->whoTo) { sctp_free_remote_addr(a_chk->whoTo); a_chk->whoTo = NULL; } break; } } if (a_chk == NULL) { sctp_alloc_a_chunk(stcb, a_chk); if (a_chk == NULL) { /* No memory so we drop the idea, and set a timer */ if (stcb->asoc.delayed_ack) { sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4); sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL); } else { stcb->asoc.send_sack = 1; } return; } a_chk->copy_by_ref = 0; a_chk->rec.chunk_id.id = type; a_chk->rec.chunk_id.can_take_data = 1; } /* Clear our pkt counts */ asoc->data_pkts_seen = 0; a_chk->flags = 0; a_chk->asoc = asoc; a_chk->snd_count = 0; a_chk->send_size = 0; /* fill in later */ a_chk->sent = SCTP_DATAGRAM_UNSENT; a_chk->whoTo = NULL; if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) == 0) { /*- * Ok, the destination for the SACK is unreachable, lets see if * we can select an alternate to asoc->last_data_chunk_from */ a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); if (a_chk->whoTo == NULL) { /* Nope, no alternate */ a_chk->whoTo = asoc->last_data_chunk_from; } } else { a_chk->whoTo = asoc->last_data_chunk_from; } if (a_chk->whoTo) { atomic_add_int(&a_chk->whoTo->ref_count, 1); } if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) { highest_tsn = asoc->highest_tsn_inside_map; } else { highest_tsn = asoc->highest_tsn_inside_nr_map; } if (highest_tsn == asoc->cumulative_tsn) { /* no gaps */ if (type == SCTP_SELECTIVE_ACK) { space_req = sizeof(struct sctp_sack_chunk); } else { space_req = sizeof(struct sctp_nr_sack_chunk); } } else { /* gaps get a cluster */ space_req = MCLBYTES; } /* Ok now lets formulate a MBUF with our sack */ a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA); if ((a_chk->data == NULL) || (a_chk->whoTo == NULL)) { /* rats, no mbuf memory */ if (a_chk->data) { /* was a problem with the destination */ sctp_m_freem(a_chk->data); a_chk->data = NULL; } sctp_free_a_chunk(stcb, a_chk, so_locked); /* sa_ignore NO_NULL_CHK */ if (stcb->asoc.delayed_ack) { sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL); } else { stcb->asoc.send_sack = 1; } return; } /* ok, lets go through and fill it in */ SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); space = (unsigned int)M_TRAILINGSPACE(a_chk->data); if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); } limit = mtod(a_chk->data, caddr_t); limit += space; flags = 0; if ((asoc->sctp_cmt_on_off > 0) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { /*- * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been * received, then set high bit to 1, else 0. Reset * pkts_rcvd. */ flags |= (asoc->cmt_dac_pkts_rcvd << 6); asoc->cmt_dac_pkts_rcvd = 0; } #ifdef SCTP_ASOCLOG_OF_TSNS stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn; stcb->asoc.cumack_log_atsnt++; if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) { stcb->asoc.cumack_log_atsnt = 0; } #endif /* reset the readers interpretation */ stcb->freed_by_sorcv_sincelast = 0; if (type == SCTP_SELECTIVE_ACK) { sack = mtod(a_chk->data, struct sctp_sack_chunk *); nr_sack = NULL; gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); if (highest_tsn > asoc->mapping_array_base_tsn) { siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8; } else { siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + highest_tsn + 7) / 8; } } else { sack = NULL; nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *); gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk)); if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) { siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; } else { siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8; } } if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { offset = 1; } else { offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; } if (((type == SCTP_SELECTIVE_ACK) && SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) || ((type == SCTP_NR_SELECTIVE_ACK) && SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) { /* we have a gap .. maybe */ for (i = 0; i < siz; i++) { tsn_map = asoc->mapping_array[i]; if (type == SCTP_SELECTIVE_ACK) { tsn_map |= asoc->nr_mapping_array[i]; } if (i == 0) { /* * Clear all bits corresponding to TSNs * smaller or equal to the cumulative TSN. */ tsn_map &= (~0U << (1 - offset)); } selector = &sack_array[tsn_map]; if (mergeable && selector->right_edge) { /* * Backup, left and right edges were ok to * merge. */ num_gap_blocks--; gap_descriptor--; } if (selector->num_entries == 0) mergeable = 0; else { for (j = 0; j < selector->num_entries; j++) { if (mergeable && selector->right_edge) { /* * do a merge by NOT setting * the left side */ mergeable = 0; } else { /* * no merge, set the left * side */ mergeable = 0; gap_descriptor->start = htons((selector->gaps[j].start + offset)); } gap_descriptor->end = htons((selector->gaps[j].end + offset)); num_gap_blocks++; gap_descriptor++; if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { /* no more room */ limit_reached = 1; break; } } if (selector->left_edge) { mergeable = 1; } } if (limit_reached) { /* Reached the limit stop */ break; } offset += 8; } } if ((type == SCTP_NR_SELECTIVE_ACK) && (limit_reached == 0)) { mergeable = 0; if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) { siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; } else { siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8; } if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { offset = 1; } else { offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; } if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) { /* we have a gap .. maybe */ for (i = 0; i < siz; i++) { tsn_map = asoc->nr_mapping_array[i]; if (i == 0) { /* * Clear all bits corresponding to * TSNs smaller or equal to the * cumulative TSN. */ tsn_map &= (~0U << (1 - offset)); } selector = &sack_array[tsn_map]; if (mergeable && selector->right_edge) { /* * Backup, left and right edges were * ok to merge. */ num_nr_gap_blocks--; gap_descriptor--; } if (selector->num_entries == 0) mergeable = 0; else { for (j = 0; j < selector->num_entries; j++) { if (mergeable && selector->right_edge) { /* * do a merge by NOT * setting the left * side */ mergeable = 0; } else { /* * no merge, set the * left side */ mergeable = 0; gap_descriptor->start = htons((selector->gaps[j].start + offset)); } gap_descriptor->end = htons((selector->gaps[j].end + offset)); num_nr_gap_blocks++; gap_descriptor++; if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { /* no more room */ limit_reached = 1; break; } } if (selector->left_edge) { mergeable = 1; } } if (limit_reached) { /* Reached the limit stop */ break; } offset += 8; } } } /* now we must add any dups we are going to report. */ if ((limit_reached == 0) && (asoc->numduptsns)) { dup = (uint32_t *)gap_descriptor; for (i = 0; i < asoc->numduptsns; i++) { *dup = htonl(asoc->dup_tsns[i]); dup++; num_dups++; if (((caddr_t)dup + sizeof(uint32_t)) > limit) { /* no more room */ break; } } asoc->numduptsns = 0; } /* * now that the chunk is prepared queue it to the control chunk * queue. */ if (type == SCTP_SELECTIVE_ACK) { a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) + (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + num_dups * sizeof(int32_t)); SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); sack->sack.a_rwnd = htonl(asoc->my_rwnd); sack->sack.num_gap_ack_blks = htons(num_gap_blocks); sack->sack.num_dup_tsns = htons(num_dups); sack->ch.chunk_type = type; sack->ch.chunk_flags = flags; sack->ch.chunk_length = htons(a_chk->send_size); } else { a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) + (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + num_dups * sizeof(int32_t)); SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd); nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks); nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks); nr_sack->nr_sack.num_dup_tsns = htons(num_dups); nr_sack->nr_sack.reserved = 0; nr_sack->ch.chunk_type = type; nr_sack->ch.chunk_flags = flags; nr_sack->ch.chunk_length = htons(a_chk->send_size); } TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); asoc->my_last_reported_rwnd = asoc->my_rwnd; asoc->ctrl_queue_cnt++; asoc->send_sack = 0; SCTP_STAT_INCR(sctps_sendsacks); return; } void sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked) { struct mbuf *m_abort, *m, *m_last; struct mbuf *m_out, *m_end = NULL; struct sctp_abort_chunk *abort; struct sctp_auth_chunk *auth = NULL; struct sctp_nets *net; uint32_t vtag; uint32_t auth_offset = 0; int error; uint16_t cause_len, chunk_len, padding_len; bool use_zero_crc; SCTP_TCB_LOCK_ASSERT(stcb); /*- * Add an AUTH chunk, if chunk requires it and save the offset into * the chain for AUTH */ if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, stcb->asoc.peer_auth_chunks)) { m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset, stcb, SCTP_ABORT_ASSOCIATION); SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } else { m_out = NULL; } switch (stcb->asoc.snd_edmid) { case SCTP_EDMID_LOWER_LAYER_DTLS: use_zero_crc = true; break; default: use_zero_crc = false; break; } m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER); if (m_abort == NULL) { if (m_out) { sctp_m_freem(m_out); } if (operr) { sctp_m_freem(operr); } return; } /* link in any error */ SCTP_BUF_NEXT(m_abort) = operr; cause_len = 0; m_last = NULL; for (m = operr; m; m = SCTP_BUF_NEXT(m)) { cause_len += (uint16_t)SCTP_BUF_LEN(m); if (SCTP_BUF_NEXT(m) == NULL) { m_last = m; } } SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk); chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len; padding_len = SCTP_SIZE32(chunk_len) - chunk_len; if (m_out == NULL) { /* NO Auth chunk prepended, so reserve space in front */ SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); m_out = m_abort; } else { /* Put AUTH chunk at the front of the chain */ SCTP_BUF_NEXT(m_end) = m_abort; } if (stcb->asoc.alternate) { net = stcb->asoc.alternate; } else { net = stcb->asoc.primary_destination; } /* Fill in the ABORT chunk header. */ abort = mtod(m_abort, struct sctp_abort_chunk *); abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; if (stcb->asoc.peer_vtag == 0) { /* This happens iff the assoc is in COOKIE-WAIT state. */ vtag = stcb->asoc.my_vtag; abort->ch.chunk_flags = SCTP_HAD_NO_TCB; } else { vtag = stcb->asoc.peer_vtag; abort->ch.chunk_flags = 0; } abort->ch.chunk_length = htons(chunk_len); /* Add padding, if necessary. */ if (padding_len > 0) { if ((m_last == NULL) || (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) { sctp_m_freem(m_out); return; } } if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, (struct sockaddr *)&net->ro._l_addr, m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0, stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag), stcb->asoc.primary_destination->port, NULL, 0, 0, use_zero_crc, so_locked))) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); if (error == ENOBUFS) { stcb->asoc.ifp_had_enobuf = 1; SCTP_STAT_INCR(sctps_lowlevelerr); } } else { stcb->asoc.ifp_had_enobuf = 0; } SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } void sctp_send_shutdown_complete(struct sctp_tcb *stcb, struct sctp_nets *net, int reflect_vtag) { /* formulate and SEND a SHUTDOWN-COMPLETE */ struct mbuf *m_shutdown_comp; struct sctp_shutdown_complete_chunk *shutdown_complete; uint32_t vtag; int error; uint8_t flags; bool use_zero_crc; m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER); if (m_shutdown_comp == NULL) { /* no mbuf's */ return; } if (reflect_vtag) { flags = SCTP_HAD_NO_TCB; vtag = stcb->asoc.my_vtag; } else { flags = 0; vtag = stcb->asoc.peer_vtag; } switch (stcb->asoc.snd_edmid) { case SCTP_EDMID_LOWER_LAYER_DTLS: use_zero_crc = true; break; default: use_zero_crc = false; break; } shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *); shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; shutdown_complete->ch.chunk_flags = flags; shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk); if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, (struct sockaddr *)&net->ro._l_addr, m_shutdown_comp, 0, NULL, 0, 1, 0, 0, stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag), net->port, NULL, 0, 0, use_zero_crc, SCTP_SO_NOT_LOCKED))) { SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); if (error == ENOBUFS) { stcb->asoc.ifp_had_enobuf = 1; SCTP_STAT_INCR(sctps_lowlevelerr); } } else { stcb->asoc.ifp_had_enobuf = 0; } SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); return; } static void sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint32_t vtag, uint8_t type, struct mbuf *cause, uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, uint32_t vrf_id, uint16_t port) { struct mbuf *o_pak; struct mbuf *mout; struct sctphdr *shout; struct sctp_chunkhdr *ch; #if defined(INET) || defined(INET6) struct udphdr *udp; #endif int ret, len, cause_len, padding_len; #ifdef INET struct sockaddr_in *src_sin, *dst_sin; struct ip *ip; #endif #ifdef INET6 struct sockaddr_in6 *src_sin6, *dst_sin6; struct ip6_hdr *ip6; #endif /* Compute the length of the cause and add final padding. */ cause_len = 0; if (cause != NULL) { struct mbuf *m_at, *m_last = NULL; for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) { if (SCTP_BUF_NEXT(m_at) == NULL) m_last = m_at; cause_len += SCTP_BUF_LEN(m_at); } padding_len = cause_len % 4; if (padding_len != 0) { padding_len = 4 - padding_len; } if (padding_len != 0) { if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) { sctp_m_freem(cause); return; } } } else { padding_len = 0; } /* Get an mbuf for the header. */ len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); switch (dst->sa_family) { #ifdef INET case AF_INET: len += sizeof(struct ip); break; #endif #ifdef INET6 case AF_INET6: len += sizeof(struct ip6_hdr); break; #endif default: break; } #if defined(INET) || defined(INET6) if (port) { len += sizeof(struct udphdr); } #endif mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA); if (mout == NULL) { if (cause) { sctp_m_freem(cause); } return; } SCTP_BUF_RESV_UF(mout, max_linkhdr); SCTP_BUF_LEN(mout) = len; SCTP_BUF_NEXT(mout) = cause; M_SETFIB(mout, fibnum); mout->m_pkthdr.flowid = mflowid; M_HASHTYPE_SET(mout, mflowtype); #ifdef INET ip = NULL; #endif #ifdef INET6 ip6 = NULL; #endif switch (dst->sa_family) { #ifdef INET case AF_INET: src_sin = (struct sockaddr_in *)src; dst_sin = (struct sockaddr_in *)dst; ip = mtod(mout, struct ip *); ip->ip_v = IPVERSION; ip->ip_hl = (sizeof(struct ip) >> 2); ip->ip_tos = 0; ip->ip_off = htons(IP_DF); - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); ip->ip_ttl = MODULE_GLOBAL(ip_defttl); if (port) { ip->ip_p = IPPROTO_UDP; } else { ip->ip_p = IPPROTO_SCTP; } ip->ip_src.s_addr = dst_sin->sin_addr.s_addr; ip->ip_dst.s_addr = src_sin->sin_addr.s_addr; ip->ip_sum = 0; len = sizeof(struct ip); shout = (struct sctphdr *)((caddr_t)ip + len); break; #endif #ifdef INET6 case AF_INET6: src_sin6 = (struct sockaddr_in6 *)src; dst_sin6 = (struct sockaddr_in6 *)dst; ip6 = mtod(mout, struct ip6_hdr *); ip6->ip6_flow = htonl(0x60000000); if (V_ip6_auto_flowlabel) { ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); } ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim); if (port) { ip6->ip6_nxt = IPPROTO_UDP; } else { ip6->ip6_nxt = IPPROTO_SCTP; } ip6->ip6_src = dst_sin6->sin6_addr; ip6->ip6_dst = src_sin6->sin6_addr; len = sizeof(struct ip6_hdr); shout = (struct sctphdr *)((caddr_t)ip6 + len); break; #endif default: len = 0; shout = mtod(mout, struct sctphdr *); break; } #if defined(INET) || defined(INET6) if (port) { if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { sctp_m_freem(mout); return; } udp = (struct udphdr *)shout; udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); udp->uh_dport = port; udp->uh_sum = 0; udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) + cause_len + padding_len)); len += sizeof(struct udphdr); shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr)); } else { udp = NULL; } #endif shout->src_port = sh->dest_port; shout->dest_port = sh->src_port; shout->checksum = 0; if (vtag) { shout->v_tag = htonl(vtag); } else { shout->v_tag = sh->v_tag; } len += sizeof(struct sctphdr); ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr)); ch->chunk_type = type; if (vtag) { ch->chunk_flags = 0; } else { ch->chunk_flags = SCTP_HAD_NO_TCB; } ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len)); len += sizeof(struct sctp_chunkhdr); len += cause_len + padding_len; if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { sctp_m_freem(mout); return; } SCTP_ATTACH_CHAIN(o_pak, mout, len); switch (dst->sa_family) { #ifdef INET case AF_INET: if (port) { if (V_udp_cksum) { udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); } else { udp->uh_sum = 0; } } ip->ip_len = htons(len); if (port) { shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr)); SCTP_STAT_INCR(sctps_sendswcrc); if (V_udp_cksum) { SCTP_ENABLE_UDP_CSUM(o_pak); } } else { mout->m_pkthdr.csum_flags = CSUM_SCTP; mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); SCTP_STAT_INCR(sctps_sendhwcrc); } #ifdef SCTP_PACKET_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { sctp_packet_log(o_pak); } #endif SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout); SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id); break; #endif #ifdef INET6 case AF_INET6: ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr))); if (port) { shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); SCTP_STAT_INCR(sctps_sendswcrc); if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) { udp->uh_sum = 0xffff; } } else { mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); SCTP_STAT_INCR(sctps_sendhwcrc); } #ifdef SCTP_PACKET_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { sctp_packet_log(o_pak); } #endif SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout); SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id); break; #endif default: SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", dst->sa_family); sctp_m_freem(mout); SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); return; } SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); if (port) { UDPSTAT_INC(udps_opackets); } SCTP_STAT_INCR(sctps_sendpackets); SCTP_STAT_INCR_COUNTER64(sctps_outpackets); SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); if (ret) { SCTP_STAT_INCR(sctps_senderrors); } return; } void sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, uint32_t vrf_id, uint16_t port) { sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL, mflowtype, mflowid, fibnum, vrf_id, port); } void sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked) { struct sctp_tmit_chunk *chk; struct sctp_heartbeat_chunk *hb; struct timeval now; SCTP_TCB_LOCK_ASSERT(stcb); if (net == NULL) { return; } (void)SCTP_GETTIME_TIMEVAL(&now); switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: break; #endif #ifdef INET6 case AF_INET6: break; #endif default: return; } sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n"); return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; chk->rec.chunk_id.can_take_data = 1; chk->flags = 0; chk->asoc = &stcb->asoc; chk->send_size = sizeof(struct sctp_heartbeat_chunk); chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); if (chk->data == NULL) { sctp_free_a_chunk(stcb, chk, so_locked); return; } SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); SCTP_BUF_LEN(chk->data) = chk->send_size; chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->whoTo = net; atomic_add_int(&chk->whoTo->ref_count, 1); /* Now we have a mbuf that we can fill in with the details */ hb = mtod(chk->data, struct sctp_heartbeat_chunk *); memset(hb, 0, sizeof(struct sctp_heartbeat_chunk)); /* fill out chunk header */ hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; hb->ch.chunk_flags = 0; hb->ch.chunk_length = htons(chk->send_size); /* Fill out hb parameter */ hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); hb->heartbeat.hb_info.time_value_1 = now.tv_sec; hb->heartbeat.hb_info.time_value_2 = now.tv_usec; /* Did our user request this one, put it in */ hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family; hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len; if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { /* * we only take from the entropy pool if the address is not * confirmed. */ net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); } else { net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; } switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: memcpy(hb->heartbeat.hb_info.address, &net->ro._l_addr.sin.sin_addr, sizeof(net->ro._l_addr.sin.sin_addr)); break; #endif #ifdef INET6 case AF_INET6: memcpy(hb->heartbeat.hb_info.address, &net->ro._l_addr.sin6.sin6_addr, sizeof(net->ro._l_addr.sin6.sin6_addr)); break; #endif default: if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } sctp_free_a_chunk(stcb, chk, so_locked); return; break; } net->hb_responded = 0; TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); stcb->asoc.ctrl_queue_cnt++; SCTP_STAT_INCR(sctps_sendheartbeat); return; } void sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn) { struct sctp_association *asoc; struct sctp_ecne_chunk *ecne; struct sctp_tmit_chunk *chk; if (net == NULL) { return; } asoc = &stcb->asoc; SCTP_TCB_LOCK_ASSERT(stcb); TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) { /* found a previous ECN_ECHO update it if needed */ uint32_t cnt, ctsn; ecne = mtod(chk->data, struct sctp_ecne_chunk *); ctsn = ntohl(ecne->tsn); if (SCTP_TSN_GT(high_tsn, ctsn)) { ecne->tsn = htonl(high_tsn); SCTP_STAT_INCR(sctps_queue_upd_ecne); } cnt = ntohl(ecne->num_pkts_since_cwr); cnt++; ecne->num_pkts_since_cwr = htonl(cnt); return; } } /* nope could not find one to update so we must build one */ sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { return; } SCTP_STAT_INCR(sctps_queue_upd_ecne); chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_ECN_ECHO; chk->rec.chunk_id.can_take_data = 0; chk->flags = 0; chk->asoc = &stcb->asoc; chk->send_size = sizeof(struct sctp_ecne_chunk); chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); if (chk->data == NULL) { sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); return; } SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); SCTP_BUF_LEN(chk->data) = chk->send_size; chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->whoTo = net; atomic_add_int(&chk->whoTo->ref_count, 1); stcb->asoc.ecn_echo_cnt_onq++; ecne = mtod(chk->data, struct sctp_ecne_chunk *); ecne->ch.chunk_type = SCTP_ECN_ECHO; ecne->ch.chunk_flags = 0; ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); ecne->tsn = htonl(high_tsn); ecne->num_pkts_since_cwr = htonl(1); TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt++; } void sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, struct mbuf *m, int len, int iphlen, int bad_crc) { struct sctp_association *asoc; struct sctp_pktdrop_chunk *drp; struct sctp_tmit_chunk *chk; uint8_t *datap; int was_trunc = 0; int fullsz = 0; long spc; int offset; struct sctp_chunkhdr *ch, chunk_buf; unsigned int chk_length; if (!stcb) { return; } asoc = &stcb->asoc; SCTP_TCB_LOCK_ASSERT(stcb); if (asoc->pktdrop_supported == 0) { /*- * peer must declare support before I send one. */ return; } if (stcb->sctp_socket == NULL) { return; } sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; chk->rec.chunk_id.can_take_data = 1; chk->flags = 0; len -= iphlen; chk->send_size = len; /* Validate that we do not have an ABORT in here. */ offset = iphlen + sizeof(struct sctphdr); ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), (uint8_t *)&chunk_buf); while (ch != NULL) { chk_length = ntohs(ch->chunk_length); if (chk_length < sizeof(*ch)) { /* break to abort land */ break; } switch (ch->chunk_type) { case SCTP_PACKET_DROPPED: case SCTP_ABORT_ASSOCIATION: case SCTP_INITIATION_ACK: /** * We don't respond with an PKT-DROP to an ABORT * or PKT-DROP. We also do not respond to an * INIT-ACK, because we can't know if the initiation * tag is correct or not. */ sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); return; default: break; } offset += SCTP_SIZE32(chk_length); ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), (uint8_t *)&chunk_buf); } if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) > min(stcb->asoc.smallest_mtu, MCLBYTES)) { /* * only send 1 mtu worth, trim off the excess on the end. */ fullsz = len; len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD; was_trunc = 1; } chk->asoc = &stcb->asoc; chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); if (chk->data == NULL) { jump_out: sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); return; } SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); drp = mtod(chk->data, struct sctp_pktdrop_chunk *); if (drp == NULL) { sctp_m_freem(chk->data); chk->data = NULL; goto jump_out; } chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); chk->book_size_scale = 0; if (was_trunc) { drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; drp->trunc_len = htons(fullsz); /* * Len is already adjusted to size minus overhead above take * out the pkt_drop chunk itself from it. */ chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk)); len = chk->send_size; } else { /* no truncation needed */ drp->ch.chunk_flags = 0; drp->trunc_len = htons(0); } if (bad_crc) { drp->ch.chunk_flags |= SCTP_BADCRC; } chk->send_size += sizeof(struct sctp_pktdrop_chunk); SCTP_BUF_LEN(chk->data) = chk->send_size; chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; if (net) { /* we should hit here */ chk->whoTo = net; atomic_add_int(&chk->whoTo->ref_count, 1); } else { chk->whoTo = NULL; } drp->ch.chunk_type = SCTP_PACKET_DROPPED; drp->ch.chunk_length = htons(chk->send_size); spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket); if (spc < 0) { spc = 0; } drp->bottle_bw = htonl(spc); if (asoc->my_rwnd) { drp->current_onq = htonl(asoc->size_on_reasm_queue + asoc->size_on_all_streams + asoc->my_rwnd_control_len + SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv)); } else { /*- * If my rwnd is 0, possibly from mbuf depletion as well as * space used, tell the peer there is NO space aka onq == bw */ drp->current_onq = htonl(spc); } drp->reserved = 0; datap = drp->data; m_copydata(m, iphlen, len, (caddr_t)datap); TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt++; } void sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override) { struct sctp_association *asoc; struct sctp_cwr_chunk *cwr; struct sctp_tmit_chunk *chk; SCTP_TCB_LOCK_ASSERT(stcb); if (net == NULL) { return; } asoc = &stcb->asoc; TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) { /* * found a previous CWR queued to same destination * update it if needed */ uint32_t ctsn; cwr = mtod(chk->data, struct sctp_cwr_chunk *); ctsn = ntohl(cwr->tsn); if (SCTP_TSN_GT(high_tsn, ctsn)) { cwr->tsn = htonl(high_tsn); } if (override & SCTP_CWR_REDUCE_OVERRIDE) { /* Make sure override is carried */ cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE; } return; } } sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_ECN_CWR; chk->rec.chunk_id.can_take_data = 1; chk->flags = 0; chk->asoc = asoc; chk->send_size = sizeof(struct sctp_cwr_chunk); chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); if (chk->data == NULL) { sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); return; } SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); SCTP_BUF_LEN(chk->data) = chk->send_size; chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; chk->whoTo = net; atomic_add_int(&chk->whoTo->ref_count, 1); cwr = mtod(chk->data, struct sctp_cwr_chunk *); cwr->ch.chunk_type = SCTP_ECN_CWR; cwr->ch.chunk_flags = override; cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); cwr->tsn = htonl(high_tsn); TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt++; } static int sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, uint32_t seq, uint32_t resp_seq, uint32_t last_sent) { uint16_t len, old_len, i; struct sctp_stream_reset_out_request *req_out; struct sctp_chunkhdr *ch; int at; int number_entries = 0; ch = mtod(chk->data, struct sctp_chunkhdr *); old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); /* get to new offset for the param. */ req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); /* now how long will this param be? */ for (i = 0; i < stcb->asoc.streamoutcnt; i++) { if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) && (stcb->asoc.strmout[i].chunks_on_queues == 0) && TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { number_entries++; } } if (number_entries == 0) { return (0); } if (number_entries == stcb->asoc.streamoutcnt) { number_entries = 0; } if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) { number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET; } len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); req_out->ph.param_length = htons(len); req_out->request_seq = htonl(seq); req_out->response_seq = htonl(resp_seq); req_out->send_reset_at_tsn = htonl(last_sent); at = 0; if (number_entries) { for (i = 0; i < stcb->asoc.streamoutcnt; i++) { if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) && (stcb->asoc.strmout[i].chunks_on_queues == 0) && TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { req_out->list_of_streams[at] = htons(i); at++; stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT; if (at >= number_entries) { break; } } } } else { for (i = 0; i < stcb->asoc.streamoutcnt; i++) { stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT; } } if (SCTP_SIZE32(len) > len) { /*- * Need to worry about the pad we may end up adding to the * end. This is easy since the struct is either aligned to 4 * bytes or 2 bytes off. */ req_out->list_of_streams[number_entries] = 0; } /* now fix the chunk length */ ch->chunk_length = htons(len + old_len); chk->book_size = len + old_len; chk->book_size_scale = 0; chk->send_size = SCTP_SIZE32(chk->book_size); SCTP_BUF_LEN(chk->data) = chk->send_size; return (1); } static void sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, int number_entries, uint16_t *list, uint32_t seq) { uint16_t len, old_len, i; struct sctp_stream_reset_in_request *req_in; struct sctp_chunkhdr *ch; ch = mtod(chk->data, struct sctp_chunkhdr *); old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); /* get to new offset for the param. */ req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); /* now how long will this param be? */ len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); req_in->ph.param_length = htons(len); req_in->request_seq = htonl(seq); if (number_entries) { for (i = 0; i < number_entries; i++) { req_in->list_of_streams[i] = htons(list[i]); } } if (SCTP_SIZE32(len) > len) { /*- * Need to worry about the pad we may end up adding to the * end. This is easy since the struct is either aligned to 4 * bytes or 2 bytes off. */ req_in->list_of_streams[number_entries] = 0; } /* now fix the chunk length */ ch->chunk_length = htons(len + old_len); chk->book_size = len + old_len; chk->book_size_scale = 0; chk->send_size = SCTP_SIZE32(chk->book_size); SCTP_BUF_LEN(chk->data) = chk->send_size; return; } static void sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, uint32_t seq) { uint16_t len, old_len; struct sctp_stream_reset_tsn_request *req_tsn; struct sctp_chunkhdr *ch; ch = mtod(chk->data, struct sctp_chunkhdr *); old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); /* get to new offset for the param. */ req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); /* now how long will this param be? */ len = sizeof(struct sctp_stream_reset_tsn_request); req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); req_tsn->ph.param_length = htons(len); req_tsn->request_seq = htonl(seq); /* now fix the chunk length */ ch->chunk_length = htons(len + old_len); chk->send_size = len + old_len; chk->book_size = SCTP_SIZE32(chk->send_size); chk->book_size_scale = 0; SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); return; } void sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, uint32_t resp_seq, uint32_t result) { uint16_t len, old_len; struct sctp_stream_reset_response *resp; struct sctp_chunkhdr *ch; ch = mtod(chk->data, struct sctp_chunkhdr *); old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); /* get to new offset for the param. */ resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); /* now how long will this param be? */ len = sizeof(struct sctp_stream_reset_response); resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); resp->ph.param_length = htons(len); resp->response_seq = htonl(resp_seq); resp->result = ntohl(result); /* now fix the chunk length */ ch->chunk_length = htons(len + old_len); chk->book_size = len + old_len; chk->book_size_scale = 0; chk->send_size = SCTP_SIZE32(chk->book_size); SCTP_BUF_LEN(chk->data) = chk->send_size; return; } void sctp_send_deferred_reset_response(struct sctp_tcb *stcb, struct sctp_stream_reset_list *ent, int response) { struct sctp_association *asoc; struct sctp_tmit_chunk *chk; struct sctp_chunkhdr *ch; asoc = &stcb->asoc; /* * Reset our last reset action to the new one IP -> response * (PERFORMED probably). This assures that if we fail to send, a * retran from the peer will get the new response. */ asoc->last_reset_action[0] = response; if (asoc->stream_reset_outstanding) { return; } sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return; } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_STREAM_RESET; chk->rec.chunk_id.can_take_data = 0; chk->flags = 0; chk->asoc = &stcb->asoc; chk->book_size = sizeof(struct sctp_chunkhdr); chk->send_size = SCTP_SIZE32(chk->book_size); chk->book_size_scale = 0; chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); if (chk->data == NULL) { sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED); SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return; } SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); /* setup chunk parameters */ chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; if (stcb->asoc.alternate) { chk->whoTo = stcb->asoc.alternate; } else { chk->whoTo = stcb->asoc.primary_destination; } ch = mtod(chk->data, struct sctp_chunkhdr *); ch->chunk_type = SCTP_STREAM_RESET; ch->chunk_flags = 0; ch->chunk_length = htons(chk->book_size); atomic_add_int(&chk->whoTo->ref_count, 1); SCTP_BUF_LEN(chk->data) = chk->send_size; sctp_add_stream_reset_result(chk, ent->seq, response); /* insert the chunk for sending */ TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt++; } void sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, uint32_t resp_seq, uint32_t result, uint32_t send_una, uint32_t recv_next) { uint16_t len, old_len; struct sctp_stream_reset_response_tsn *resp; struct sctp_chunkhdr *ch; ch = mtod(chk->data, struct sctp_chunkhdr *); old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); /* get to new offset for the param. */ resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); /* now how long will this param be? */ len = sizeof(struct sctp_stream_reset_response_tsn); resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); resp->ph.param_length = htons(len); resp->response_seq = htonl(resp_seq); resp->result = htonl(result); resp->senders_next_tsn = htonl(send_una); resp->receivers_next_tsn = htonl(recv_next); /* now fix the chunk length */ ch->chunk_length = htons(len + old_len); chk->book_size = len + old_len; chk->send_size = SCTP_SIZE32(chk->book_size); chk->book_size_scale = 0; SCTP_BUF_LEN(chk->data) = chk->send_size; return; } static void sctp_add_an_out_stream(struct sctp_tmit_chunk *chk, uint32_t seq, uint16_t adding) { uint16_t len, old_len; struct sctp_chunkhdr *ch; struct sctp_stream_reset_add_strm *addstr; ch = mtod(chk->data, struct sctp_chunkhdr *); old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); /* get to new offset for the param. */ addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); /* now how long will this param be? */ len = sizeof(struct sctp_stream_reset_add_strm); /* Fill it out. */ addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS); addstr->ph.param_length = htons(len); addstr->request_seq = htonl(seq); addstr->number_of_streams = htons(adding); addstr->reserved = 0; /* now fix the chunk length */ ch->chunk_length = htons(len + old_len); chk->send_size = len + old_len; chk->book_size = SCTP_SIZE32(chk->send_size); chk->book_size_scale = 0; SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); return; } static void sctp_add_an_in_stream(struct sctp_tmit_chunk *chk, uint32_t seq, uint16_t adding) { uint16_t len, old_len; struct sctp_chunkhdr *ch; struct sctp_stream_reset_add_strm *addstr; ch = mtod(chk->data, struct sctp_chunkhdr *); old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); /* get to new offset for the param. */ addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); /* now how long will this param be? */ len = sizeof(struct sctp_stream_reset_add_strm); /* Fill it out. */ addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS); addstr->ph.param_length = htons(len); addstr->request_seq = htonl(seq); addstr->number_of_streams = htons(adding); addstr->reserved = 0; /* now fix the chunk length */ ch->chunk_length = htons(len + old_len); chk->send_size = len + old_len; chk->book_size = SCTP_SIZE32(chk->send_size); chk->book_size_scale = 0; SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); return; } int sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked) { struct sctp_association *asoc; struct sctp_tmit_chunk *chk; struct sctp_chunkhdr *ch; uint32_t seq; asoc = &stcb->asoc; asoc->trigger_reset = 0; if (asoc->stream_reset_outstanding) { return (EALREADY); } sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_STREAM_RESET; chk->rec.chunk_id.can_take_data = 0; chk->flags = 0; chk->asoc = &stcb->asoc; chk->book_size = sizeof(struct sctp_chunkhdr); chk->send_size = SCTP_SIZE32(chk->book_size); chk->book_size_scale = 0; chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); if (chk->data == NULL) { sctp_free_a_chunk(stcb, chk, so_locked); SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); /* setup chunk parameters */ chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; if (stcb->asoc.alternate) { chk->whoTo = stcb->asoc.alternate; } else { chk->whoTo = stcb->asoc.primary_destination; } ch = mtod(chk->data, struct sctp_chunkhdr *); ch->chunk_type = SCTP_STREAM_RESET; ch->chunk_flags = 0; ch->chunk_length = htons(chk->book_size); atomic_add_int(&chk->whoTo->ref_count, 1); SCTP_BUF_LEN(chk->data) = chk->send_size; seq = stcb->asoc.str_reset_seq_out; if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) { seq++; asoc->stream_reset_outstanding++; } else { m_freem(chk->data); chk->data = NULL; sctp_free_a_chunk(stcb, chk, so_locked); return (ENOENT); } asoc->str_reset = chk; /* insert the chunk for sending */ TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt++; if (stcb->asoc.send_sack) { sctp_send_sack(stcb, so_locked); } sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); return (0); } int sctp_send_str_reset_req(struct sctp_tcb *stcb, uint16_t number_entries, uint16_t *list, uint8_t send_in_req, uint8_t send_tsn_req, uint8_t add_stream, uint16_t adding_o, uint16_t adding_i, uint8_t peer_asked) { struct sctp_association *asoc; struct sctp_tmit_chunk *chk; struct sctp_chunkhdr *ch; int can_send_out_req = 0; uint32_t seq; SCTP_TCB_LOCK_ASSERT(stcb); asoc = &stcb->asoc; if (asoc->stream_reset_outstanding) { /*- * Already one pending, must get ACK back to clear the flag. */ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY); return (EBUSY); } if ((send_in_req == 0) && (send_tsn_req == 0) && (add_stream == 0)) { /* nothing to do */ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); return (EINVAL); } if (send_tsn_req && send_in_req) { /* error, can't do that */ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); return (EINVAL); } else if (send_in_req) { can_send_out_req = 1; } if (number_entries > (MCLBYTES - SCTP_MIN_OVERHEAD - sizeof(struct sctp_chunkhdr) - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)) { SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } sctp_alloc_a_chunk(stcb, chk); if (chk == NULL) { SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } chk->copy_by_ref = 0; chk->rec.chunk_id.id = SCTP_STREAM_RESET; chk->rec.chunk_id.can_take_data = 0; chk->flags = 0; chk->asoc = &stcb->asoc; chk->book_size = sizeof(struct sctp_chunkhdr); chk->send_size = SCTP_SIZE32(chk->book_size); chk->book_size_scale = 0; chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); if (chk->data == NULL) { sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED); SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); /* setup chunk parameters */ chk->sent = SCTP_DATAGRAM_UNSENT; chk->snd_count = 0; if (stcb->asoc.alternate) { chk->whoTo = stcb->asoc.alternate; } else { chk->whoTo = stcb->asoc.primary_destination; } atomic_add_int(&chk->whoTo->ref_count, 1); ch = mtod(chk->data, struct sctp_chunkhdr *); ch->chunk_type = SCTP_STREAM_RESET; ch->chunk_flags = 0; ch->chunk_length = htons(chk->book_size); SCTP_BUF_LEN(chk->data) = chk->send_size; seq = stcb->asoc.str_reset_seq_out; if (can_send_out_req) { int ret; ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1)); if (ret) { seq++; asoc->stream_reset_outstanding++; } } if ((add_stream & 1) && ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) { /* Need to allocate more */ struct sctp_stream_out *oldstream; struct sctp_stream_queue_pending *sp, *nsp; int i; #if defined(SCTP_DETAILED_STR_STATS) int j; #endif oldstream = stcb->asoc.strmout; /* get some more */ SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *, (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out), SCTP_M_STRMO); if (stcb->asoc.strmout == NULL) { uint8_t x; stcb->asoc.strmout = oldstream; /* Turn off the bit */ x = add_stream & 0xfe; add_stream = x; goto skip_stuff; } /* * Ok now we proceed with copying the old out stuff and * initializing the new stuff. */ stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, false); for (i = 0; i < stcb->asoc.streamoutcnt; i++) { TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); /* FIX ME FIX ME */ /* * This should be a SS_COPY operation FIX ME STREAM * SCHEDULER EXPERT */ stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]); stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues; #if defined(SCTP_DETAILED_STR_STATS) for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { stcb->asoc.strmout[i].abandoned_sent[j] = oldstream[i].abandoned_sent[j]; stcb->asoc.strmout[i].abandoned_unsent[j] = oldstream[i].abandoned_unsent[j]; } #else stcb->asoc.strmout[i].abandoned_sent[0] = oldstream[i].abandoned_sent[0]; stcb->asoc.strmout[i].abandoned_unsent[0] = oldstream[i].abandoned_unsent[0]; #endif stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered; stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered; stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; stcb->asoc.strmout[i].sid = i; stcb->asoc.strmout[i].state = oldstream[i].state; /* now anything on those queues? */ TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) { TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); } } /* now the new streams */ stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc); for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) { TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); stcb->asoc.strmout[i].chunks_on_queues = 0; #if defined(SCTP_DETAILED_STR_STATS) for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { stcb->asoc.strmout[i].abandoned_sent[j] = 0; stcb->asoc.strmout[i].abandoned_unsent[j] = 0; } #else stcb->asoc.strmout[i].abandoned_sent[0] = 0; stcb->asoc.strmout[i].abandoned_unsent[0] = 0; #endif stcb->asoc.strmout[i].next_mid_ordered = 0; stcb->asoc.strmout[i].next_mid_unordered = 0; stcb->asoc.strmout[i].sid = i; stcb->asoc.strmout[i].last_msg_incomplete = 0; stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL); stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED; } stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o; SCTP_FREE(oldstream, SCTP_M_STRMO); } skip_stuff: if ((add_stream & 1) && (adding_o > 0)) { asoc->strm_pending_add_size = adding_o; asoc->peer_req_out = peer_asked; sctp_add_an_out_stream(chk, seq, adding_o); seq++; asoc->stream_reset_outstanding++; } if ((add_stream & 2) && (adding_i > 0)) { sctp_add_an_in_stream(chk, seq, adding_i); seq++; asoc->stream_reset_outstanding++; } if (send_in_req) { sctp_add_stream_reset_in(chk, number_entries, list, seq); seq++; asoc->stream_reset_outstanding++; } if (send_tsn_req) { sctp_add_stream_reset_tsn(chk, seq); asoc->stream_reset_outstanding++; } asoc->str_reset = chk; /* insert the chunk for sending */ TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt++; if (stcb->asoc.send_sack) { sctp_send_sack(stcb, SCTP_SO_LOCKED); } sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); return (0); } void sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, uint32_t vrf_id, uint16_t port) { /* Don't respond to an ABORT with an ABORT. */ if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { if (cause) sctp_m_freem(cause); return; } sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause, mflowtype, mflowid, fibnum, vrf_id, port); return; } void sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, uint32_t vrf_id, uint16_t port) { sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause, mflowtype, mflowid, fibnum, vrf_id, port); return; } static struct mbuf * sctp_copy_resume(struct uio *uio, int max_send_len, int user_marks_eor, int *error, uint32_t *sndout, struct mbuf **new_tail) { struct mbuf *m; m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, (M_PKTHDR | (user_marks_eor ? M_EOR : 0))); if (m == NULL) { /* The only possible error is EFAULT. */ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); *error = EFAULT; } else { *sndout = m_length(m, NULL); *new_tail = m_last(m); } return (m); } static int sctp_copy_one(struct sctp_stream_queue_pending *sp, struct uio *uio, int resv_upfront) { sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, resv_upfront, 0); if (sp->data == NULL) { /* The only possible error is EFAULT. */ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); return (EFAULT); } sp->tail_mbuf = m_last(sp->data); return (0); } static struct sctp_stream_queue_pending * sctp_copy_it_in(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_nonpad_sndrcvinfo *srcv, struct uio *uio, struct sctp_nets *net, ssize_t max_send_len, int user_marks_eor, int *error) { /*- * This routine must be very careful in its work. Protocol * processing is up and running so care must be taken to spl...() * when you need to do something that may effect the stcb/asoc. The * sb is locked however. When data is copied the protocol processing * should be enabled since this is a slower operation... */ struct sctp_stream_queue_pending *sp; int resv_in_first; *error = 0; sctp_alloc_a_strmoq(stcb, sp); if (sp == NULL) { SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); *error = ENOMEM; goto out_now; } sp->act_flags = 0; sp->sender_all_done = 0; sp->sinfo_flags = srcv->sinfo_flags; sp->timetolive = srcv->sinfo_timetolive; sp->ppid = srcv->sinfo_ppid; sp->context = srcv->sinfo_context; sp->fsn = 0; (void)SCTP_GETTIME_TIMEVAL(&sp->ts); sp->sid = srcv->sinfo_stream; sp->length = (uint32_t)min(uio->uio_resid, max_send_len); if ((sp->length == (uint32_t)uio->uio_resid) && ((user_marks_eor == 0) || (srcv->sinfo_flags & SCTP_EOF) || (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { sp->msg_is_complete = 1; } else { sp->msg_is_complete = 0; } sp->sender_all_done = 0; sp->some_taken = 0; sp->put_last_out = 0; resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb); sp->data = sp->tail_mbuf = NULL; if (sp->length == 0) { goto skip_copy; } if (srcv->sinfo_keynumber_valid) { sp->auth_keyid = srcv->sinfo_keynumber; } else { sp->auth_keyid = stcb->asoc.authinfo.active_keyid; } if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { sctp_auth_key_acquire(stcb, sp->auth_keyid); sp->holds_key_ref = 1; } *error = sctp_copy_one(sp, uio, resv_in_first); skip_copy: if (*error) { sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); sp = NULL; } else { if (sp->sinfo_flags & SCTP_ADDR_OVER) { sp->net = net; atomic_add_int(&sp->net->ref_count, 1); } else { sp->net = NULL; } sctp_set_prsctp_policy(sp); } out_now: return (sp); } int sctp_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *p) { struct sctp_sndrcvinfo sndrcvninfo; #if defined(INET) && defined(INET6) struct sockaddr_in sin; #endif struct sockaddr *addr_to_use; int error; bool use_sndinfo; if (control != NULL) { /* process cmsg snd/rcv info (maybe a assoc-id) */ use_sndinfo = sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control, sizeof(sndrcvninfo)); } else { use_sndinfo = false; } #if defined(INET) && defined(INET6) if ((addr != NULL) && (addr->sa_family == AF_INET6)) { struct sockaddr_in6 *sin6; if (addr->sa_len != sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); return (EINVAL); } sin6 = (struct sockaddr_in6 *)addr; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin, sin6); addr_to_use = (struct sockaddr *)&sin; } else { addr_to_use = addr; } } else { addr_to_use = addr; } #else addr_to_use = addr; #endif error = sctp_lower_sosend(so, addr_to_use, uio, top, control, flags, use_sndinfo ? &sndrcvninfo : NULL, p); return (error); } int sctp_lower_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct sctp_sndrcvinfo *srcv, struct thread *p) { struct sctp_nonpad_sndrcvinfo sndrcvninfo_buf; struct epoch_tracker et; struct timeval now; struct sctp_block_entry be; struct sctp_inpcb *inp; struct sctp_tcb *stcb = NULL; struct sctp_nets *net; struct sctp_association *asoc; struct sctp_inpcb *t_inp; struct sctp_nonpad_sndrcvinfo *sndrcvninfo; ssize_t sndlen = 0, max_len, local_add_more; ssize_t local_soresv = 0; sctp_assoc_t sinfo_assoc_id; int user_marks_eor; int nagle_applies = 0; int error; int queue_only = 0, queue_only_for_init = 0; int un_sent; int now_filled = 0; unsigned int inqueue_bytes = 0; uint16_t port; uint16_t sinfo_flags; uint16_t sinfo_stream; bool create_lock_applied = false; bool free_cnt_applied = false; bool some_on_control; bool got_all_of_the_send = false; bool non_blocking = false; error = 0; net = NULL; stcb = NULL; if ((uio == NULL) && (top == NULL)) { error = EINVAL; goto out_unlocked; } if (addr != NULL) { union sctp_sockstore *raddr = (union sctp_sockstore *)addr; switch (raddr->sa.sa_family) { #ifdef INET case AF_INET: if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) { error = EINVAL; goto out_unlocked; } port = raddr->sin.sin_port; break; #endif #ifdef INET6 case AF_INET6: if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) { error = EINVAL; goto out_unlocked; } port = raddr->sin6.sin6_port; break; #endif default: error = EAFNOSUPPORT; goto out_unlocked; } } else { port = 0; } if (uio != NULL) { if (uio->uio_resid < 0) { error = EINVAL; goto out_unlocked; } sndlen = uio->uio_resid; } else { sndlen = SCTP_HEADER_LEN(top); } SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zd\n", (void *)addr, sndlen); t_inp = inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { error = EINVAL; goto out_unlocked; } user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); if ((uio == NULL) && (user_marks_eor != 0)) { /*- * We do not support eeor mode for * sending with mbuf chains (like sendfile). */ error = EINVAL; goto out_unlocked; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && SCTP_IS_LISTENING(inp)) { /* The listener can NOT send. */ error = EINVAL; goto out_unlocked; } atomic_add_int(&inp->total_sends, 1); if (srcv != NULL) { sndrcvninfo = (struct sctp_nonpad_sndrcvinfo *)srcv; sinfo_assoc_id = sndrcvninfo->sinfo_assoc_id; sinfo_flags = sndrcvninfo->sinfo_flags; if (INVALID_SINFO_FLAG(sinfo_flags) || PR_SCTP_INVALID_POLICY(sinfo_flags)) { error = EINVAL; goto out_unlocked; } if (sinfo_flags != 0) { SCTP_STAT_INCR(sctps_sends_with_flags); } } else { sndrcvninfo = NULL; sinfo_flags = inp->def_send.sinfo_flags; sinfo_assoc_id = inp->def_send.sinfo_assoc_id; } if (flags & MSG_EOR) { sinfo_flags |= SCTP_EOR; } if (flags & MSG_EOF) { sinfo_flags |= SCTP_EOF; } if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) { error = EINVAL; goto out_unlocked; } SCTP_INP_RLOCK(inp); if ((sinfo_flags & SCTP_SENDALL) && (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { SCTP_INP_RUNLOCK(inp); error = sctp_sendall(inp, uio, top, sndrcvninfo); top = NULL; goto out_unlocked; } /* Now we must find the association. */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb != NULL) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(inp); } else if (sinfo_assoc_id > SCTP_ALL_ASSOC) { stcb = sctp_findasoc_ep_asocid_locked(inp, sinfo_assoc_id, 1); SCTP_INP_RUNLOCK(inp); if (stcb != NULL) { SCTP_TCB_LOCK_ASSERT(stcb); } } else if (addr != NULL) { /*- * Since we did not use findep we must * increment it, and if we don't find a tcb * decrement it. */ SCTP_INP_INCR_REF(inp); SCTP_INP_RUNLOCK(inp); stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } else { SCTP_TCB_LOCK_ASSERT(stcb); } } else { SCTP_INP_RUNLOCK(inp); } #ifdef INVARIANTS if (stcb != NULL) { SCTP_TCB_LOCK_ASSERT(stcb); } #endif if ((stcb == NULL) && (addr != NULL)) { /* Possible implicit send? */ SCTP_ASOC_CREATE_LOCK(inp); create_lock_applied = true; if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { error = EINVAL; goto out_unlocked; } if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && (addr->sa_family == AF_INET6)) { error = EINVAL; goto out_unlocked; } SCTP_INP_WLOCK(inp); SCTP_INP_INCR_REF(inp); SCTP_INP_WUNLOCK(inp); /* With the lock applied look again */ stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); #if defined(INET) || defined(INET6) if ((stcb == NULL) && (control != NULL) && (port > 0)) { stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error); } #endif if (stcb == NULL) { SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } else { SCTP_TCB_LOCK_ASSERT(stcb); SCTP_ASOC_CREATE_UNLOCK(inp); create_lock_applied = false; } if (error != 0) { goto out_unlocked; } if (t_inp != inp) { error = ENOTCONN; goto out_unlocked; } } if (stcb == NULL) { if (addr == NULL) { error = ENOENT; goto out_unlocked; } else { /* We must go ahead and start the INIT process */ uint32_t vrf_id; if ((sinfo_flags & SCTP_ABORT) || ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) { /*- * User asks to abort a non-existent assoc, * or EOF a non-existent assoc with no data */ error = ENOENT; goto out_unlocked; } /* get an asoc/stcb struct */ vrf_id = inp->def_vrf_id; KASSERT(create_lock_applied, ("create_lock_applied is false")); stcb = sctp_aloc_assoc_connected(inp, addr, &error, 0, 0, vrf_id, inp->sctp_ep.pre_open_stream_count, inp->sctp_ep.port, p, SCTP_INITIALIZE_AUTH_PARAMS); if (stcb == NULL) { /* error is setup for us in the call. */ KASSERT(error != 0, ("error is 0 although stcb is NULL")); goto out_unlocked; } SCTP_TCB_LOCK_ASSERT(stcb); SCTP_ASOC_CREATE_UNLOCK(inp); create_lock_applied = false; /* * Turn on queue only flag to prevent data from * being sent */ queue_only = 1; SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); if (control != NULL) { if (sctp_process_cmsgs_for_init(stcb, control, &error)) { sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); stcb = NULL; KASSERT(error != 0, ("error is 0 although sctp_process_cmsgs_for_init() indicated an error")); goto out_unlocked; } } /* out with the INIT */ queue_only_for_init = 1; /*- * we may want to dig in after this call and adjust the MTU * value. It defaulted to 1500 (constant) but the ro * structure may now have an update and thus we may need to * change it BEFORE we append the message. */ } } KASSERT(!create_lock_applied, ("create_lock_applied is true")); KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); asoc = &stcb->asoc; if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || (asoc->state & SCTP_STATE_WAS_ABORTED)) { if (asoc->state & SCTP_STATE_WAS_ABORTED) { /* XXX: Could also be ECONNABORTED, not enough info. */ error = ECONNRESET; } else { error = ENOTCONN; } goto out_unlocked; } if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { queue_only = 1; } /* Keep the stcb from being freed under our feet. */ atomic_add_int(&asoc->refcnt, 1); free_cnt_applied = true; if (sndrcvninfo == NULL) { /* Use a local copy to have a consistent view. */ sndrcvninfo_buf = asoc->def_send; sndrcvninfo = &sndrcvninfo_buf; sinfo_flags = sndrcvninfo->sinfo_flags; if (flags & MSG_EOR) { sinfo_flags |= SCTP_EOR; } if (flags & MSG_EOF) { sinfo_flags |= SCTP_EOF; } } /* Are we aborting? */ if (sinfo_flags & SCTP_ABORT) { struct mbuf *mm; struct sctp_paramhdr *ph; ssize_t tot_demand, tot_out = 0, max_out; SCTP_STAT_INCR(sctps_sends_with_abort); if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { /* It has to be up before we abort. */ error = EINVAL; goto out_unlocked; } /* How big is the user initiated abort? */ if (top != NULL) { struct mbuf *cntm; if (sndlen != 0) { for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) { tot_out += SCTP_BUF_LEN(cntm); } } mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA); } else { /* Must fit in a MTU */ tot_out = sndlen; tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); if (tot_demand > SCTP_DEFAULT_ADD_MORE) { error = EMSGSIZE; goto out_unlocked; } mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_NOWAIT, 1, MT_DATA); } if (mm == NULL) { error = ENOMEM; goto out_unlocked; } max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); max_out -= sizeof(struct sctp_abort_msg); if (tot_out > max_out) { tot_out = max_out; } ph = mtod(mm, struct sctp_paramhdr *); ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out)); ph++; SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr)); if (top == NULL) { SCTP_TCB_UNLOCK(stcb); error = uiomove((caddr_t)ph, (int)tot_out, uio); SCTP_TCB_LOCK(stcb); if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || (asoc->state & SCTP_STATE_WAS_ABORTED)) { sctp_m_freem(mm); if (asoc->state & SCTP_STATE_WAS_ABORTED) { /* * XXX: Could also be ECONNABORTED, * not enough info. */ error = ECONNRESET; } else { error = ENOTCONN; } goto out_unlocked; } if (error != 0) { /*- * Here if we can't get his data we * still abort we just don't get to * send the users note :-0 */ sctp_m_freem(mm); mm = NULL; error = 0; } } else { if (sndlen != 0) { SCTP_BUF_NEXT(mm) = top; } } atomic_subtract_int(&asoc->refcnt, 1); free_cnt_applied = false; /* release this lock, otherwise we hang on ourselves */ NET_EPOCH_ENTER(et); sctp_abort_an_association(stcb->sctp_ep, stcb, mm, false, SCTP_SO_LOCKED); NET_EPOCH_EXIT(et); stcb = NULL; /* * In this case top is already chained to mm avoid double * free, since we free it below if top != NULL and driver * would free it after sending the packet out */ if (sndlen != 0) { top = NULL; } goto out_unlocked; } KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); if (sinfo_flags & SCTP_ADDR_OVER) { if (addr != NULL) { net = sctp_findnet(stcb, addr); } else { net = NULL; } if ((net == NULL) || ((port != 0) && (port != stcb->rport))) { error = EINVAL; goto out_unlocked; } } else { if (asoc->alternate != NULL) { net = asoc->alternate; } else { net = asoc->primary_destination; } } if (sndlen == 0) { if (sinfo_flags & SCTP_EOF) { got_all_of_the_send = true; goto dataless_eof; } else { error = EINVAL; goto out_unlocked; } } if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) { if (sndlen > (ssize_t)asoc->smallest_mtu) { error = EMSGSIZE; goto out_unlocked; } } sinfo_stream = sndrcvninfo->sinfo_stream; /* Is the stream no. valid? */ if (sinfo_stream >= asoc->streamoutcnt) { /* Invalid stream number */ error = EINVAL; goto out_unlocked; } if ((asoc->strmout[sinfo_stream].state != SCTP_STREAM_OPEN) && (asoc->strmout[sinfo_stream].state != SCTP_STREAM_OPENING)) { /* * Can't queue any data while stream reset is underway. */ if (asoc->strmout[sinfo_stream].state > SCTP_STREAM_OPEN) { error = EAGAIN; } else { error = EINVAL; } goto out_unlocked; } atomic_add_int(&stcb->total_sends, 1); if (SCTP_SO_IS_NBIO(so) || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) { non_blocking = true; } if (non_blocking) { ssize_t amount; inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); if (user_marks_eor == 0) { amount = sndlen; } else { amount = 1; } if ((SCTP_SB_LIMIT_SND(so) < (amount + inqueue_bytes + asoc->sb_send_resv)) || (asoc->chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { if ((sndlen > (ssize_t)SCTP_SB_LIMIT_SND(so)) && (user_marks_eor == 0)) { error = EMSGSIZE; } else { error = EWOULDBLOCK; } goto out_unlocked; } } atomic_add_int(&asoc->sb_send_resv, (int)sndlen); local_soresv = sndlen; KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); /* Ok, we will attempt a msgsnd :> */ if (p != NULL) { p->td_ru.ru_msgsnd++; } /* Calculate the maximum we can send */ inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; } else { max_len = 0; } /* Unless E_EOR mode is on, we must make a send FIT in one call. */ if ((user_marks_eor == 0) && (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) { /* It will NEVER fit. */ error = EMSGSIZE; goto out_unlocked; } if (user_marks_eor != 0) { local_add_more = (ssize_t)min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold)); } else { /*- * For non-eeor the whole message must fit in * the socket send buffer. */ local_add_more = sndlen; } if (non_blocking) { goto skip_preblock; } if (((max_len <= local_add_more) && ((ssize_t)SCTP_SB_LIMIT_SND(so) >= local_add_more)) || (max_len == 0) || ((asoc->chunks_on_out_queue + asoc->stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { /* No room right now! */ inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); SOCKBUF_LOCK(&so->so_snd); while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) || ((asoc->stream_queue_cnt + asoc->chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %zd) || (%d+%d > %d)\n", (unsigned int)SCTP_SB_LIMIT_SND(so), inqueue_bytes, local_add_more, asoc->stream_queue_cnt, asoc->chunks_on_out_queue, SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen); } be.error = 0; stcb->block_entry = &be; SCTP_TCB_UNLOCK(stcb); error = sbwait(so, SO_SND); if (error == 0) { if (so->so_error != 0) { error = so->so_error; } if (be.error != 0) { error = be.error; } } SOCKBUF_UNLOCK(&so->so_snd); SCTP_TCB_LOCK(stcb); stcb->block_entry = NULL; if (error != 0) { goto out_unlocked; } if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || (asoc->state & SCTP_STATE_WAS_ABORTED)) { if (asoc->state & SCTP_STATE_WAS_ABORTED) { /* * XXX: Could also be ECONNABORTED, * not enough info. */ error = ECONNRESET; } else { error = ENOTCONN; } goto out_unlocked; } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, asoc, asoc->total_output_queue_size); } inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); SOCKBUF_LOCK(&so->so_snd); } if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; } else { max_len = 0; } SOCKBUF_UNLOCK(&so->so_snd); } skip_preblock: KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); /* * sndlen covers for mbuf case uio_resid covers for the non-mbuf * case NOTE: uio will be null when top/mbuf is passed */ if (top == NULL) { struct sctp_stream_queue_pending *sp; struct sctp_stream_out *strm; uint32_t sndout; if ((asoc->stream_locked) && (asoc->stream_locked_on != sinfo_stream)) { error = EINVAL; goto out; } strm = &asoc->strmout[sinfo_stream]; if (strm->last_msg_incomplete == 0) { do_a_copy_in: SCTP_TCB_UNLOCK(stcb); sp = sctp_copy_it_in(stcb, asoc, sndrcvninfo, uio, net, max_len, user_marks_eor, &error); SCTP_TCB_LOCK(stcb); if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || (asoc->state & SCTP_STATE_WAS_ABORTED)) { if (asoc->state & SCTP_STATE_WAS_ABORTED) { /* * XXX: Could also be ECONNABORTED, * not enough info. */ error = ECONNRESET; } else { error = ENOTCONN; } goto out; } if (error != 0) { goto out; } /* * Reject the sending of a new user message, if the * association is about to be shut down. */ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { if (sp->data != 0) { sctp_m_freem(sp->data); sp->data = NULL; sp->tail_mbuf = NULL; sp->length = 0; } if (sp->net != NULL) { sctp_free_remote_addr(sp->net); sp->net = NULL; } sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); error = EPIPE; goto out_unlocked; } /* The out streams might be reallocated. */ strm = &asoc->strmout[sinfo_stream]; if (sp->msg_is_complete) { strm->last_msg_incomplete = 0; asoc->stream_locked = 0; } else { /* * Just got locked to this guy in case of an * interrupt. */ strm->last_msg_incomplete = 1; if (asoc->idata_supported == 0) { asoc->stream_locked = 1; asoc->stream_locked_on = sinfo_stream; } sp->sender_all_done = 0; } sctp_snd_sb_alloc(stcb, sp->length); atomic_add_int(&asoc->stream_queue_cnt, 1); if (sinfo_flags & SCTP_UNORDERED) { SCTP_STAT_INCR(sctps_sends_with_unord); } sp->processing = 1; TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); asoc->ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp); } else { sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); if (sp == NULL) { /* ???? Huh ??? last msg is gone */ #ifdef INVARIANTS panic("Warning: Last msg marked incomplete, yet nothing left?"); #else SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n"); strm->last_msg_incomplete = 0; #endif goto do_a_copy_in; } if (sp->processing != 0) { error = EINVAL; goto out; } else { sp->processing = 1; } } KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); while (uio->uio_resid > 0) { /* How much room do we have? */ struct mbuf *new_tail, *mm; inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; } else { max_len = 0; } if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) || ((max_len > 0) && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) || (uio->uio_resid <= max_len)) { SCTP_TCB_UNLOCK(stcb); sndout = 0; new_tail = NULL; mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail); SCTP_TCB_LOCK(stcb); if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || (asoc->state & SCTP_STATE_WAS_ABORTED)) { /* * We need to get out. Peer probably * aborted. */ sctp_m_freem(mm); if (asoc->state & SCTP_STATE_WAS_ABORTED) { /* * XXX: Could also be * ECONNABORTED, not enough * info. */ error = ECONNRESET; } else { error = ENOTCONN; } goto out; } if ((mm == NULL) || (error != 0)) { if (mm != NULL) { sctp_m_freem(mm); } if (sp != NULL) { sp->processing = 0; } goto out; } /* Update the mbuf and count */ if (sp->tail_mbuf != NULL) { /* Tack it to the end. */ SCTP_BUF_NEXT(sp->tail_mbuf) = mm; } else { /* A stolen mbuf. */ sp->data = mm; } sp->tail_mbuf = new_tail; sctp_snd_sb_alloc(stcb, sndout); atomic_add_int(&sp->length, sndout); if (sinfo_flags & SCTP_SACK_IMMEDIATELY) { sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY; } /* Did we reach EOR? */ if ((uio->uio_resid == 0) && ((user_marks_eor == 0) || (sinfo_flags & SCTP_EOF) || (user_marks_eor && (sinfo_flags & SCTP_EOR)))) { sp->msg_is_complete = 1; } else { sp->msg_is_complete = 0; } } KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); if (uio->uio_resid == 0) { /* got it all? */ continue; } /* PR-SCTP? */ if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) { /* * This is ugly but we must assure locking * order */ sctp_prune_prsctp(stcb, asoc, sndrcvninfo, (int)sndlen); inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; else max_len = 0; if (max_len > 0) { continue; } } /* wait for space now */ if (non_blocking) { /* Non-blocking io in place out */ if (sp != NULL) { sp->processing = 0; } goto skip_out_eof; } /* What about the INIT, send it maybe */ if (queue_only_for_init) { if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { /* a collision took us forward? */ queue_only = 0; } else { NET_EPOCH_ENTER(et); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); NET_EPOCH_EXIT(et); SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); queue_only = 1; } } if ((net->flight_size > net->cwnd) && (asoc->sctp_cmt_on_off == 0)) { SCTP_STAT_INCR(sctps_send_cwnd_avoid); queue_only = 1; } else if (asoc->ifp_had_enobuf) { SCTP_STAT_INCR(sctps_ifnomemqueued); if (net->flight_size > (2 * net->mtu)) { queue_only = 1; } asoc->ifp_had_enobuf = 0; } un_sent = asoc->total_output_queue_size - asoc->total_flight; if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && (asoc->total_flight > 0) && (asoc->stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && (un_sent < (int)(asoc->smallest_mtu - SCTP_MIN_OVERHEAD))) { /*- * Ok, Nagle is set on and we have data outstanding. * Don't send anything and let SACKs drive out the * data unless we have a "full" segment to send. */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); } SCTP_STAT_INCR(sctps_naglequeued); nagle_applies = 1; } else { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); } SCTP_STAT_INCR(sctps_naglesent); nagle_applies = 0; } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, nagle_applies, un_sent); sctp_misc_ints(SCTP_CWNDLOG_PRESEND, asoc->total_output_queue_size, asoc->total_flight, asoc->chunks_on_out_queue, asoc->total_flight_count); } if (queue_only_for_init) { queue_only_for_init = 0; } if ((queue_only == 0) && (nagle_applies == 0)) { /*- * need to start chunk output * before blocking.. note that if * a lock is already applied, then * the input via the net is happening * and I don't need to start output :-D */ NET_EPOCH_ENTER(et); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); NET_EPOCH_EXIT(et); } /*- * This is a bit strange, but I think it will * work. The total_output_queue_size is locked and * protected by the TCB_LOCK, which we just released. * There is a race that can occur between releasing it * above, and me getting the socket lock, where sacks * come in but we have not put the SB_WAIT on the * so_snd buffer to get the wakeup. After the LOCK * is applied the sack_processing will also need to * LOCK the so->so_snd to do the actual sowwakeup(). So * once we have the socket buffer lock if we recheck the * size we KNOW we will get to sleep safely with the * wakeup flag in place. */ inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); SOCKBUF_LOCK(&so->so_snd); if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes + min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, asoc, uio->uio_resid); } be.error = 0; stcb->block_entry = &be; SCTP_TCB_UNLOCK(stcb); error = sbwait(so, SO_SND); if (error == 0) { if (so->so_error != 0) error = so->so_error; if (be.error != 0) { error = be.error; } } SOCKBUF_UNLOCK(&so->so_snd); SCTP_TCB_LOCK(stcb); stcb->block_entry = NULL; if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || (asoc->state & SCTP_STATE_WAS_ABORTED)) { if (asoc->state & SCTP_STATE_WAS_ABORTED) { /* * XXX: Could also be * ECONNABORTED, not enough * info. */ error = ECONNRESET; } else { error = ENOTCONN; } goto out_unlocked; } if (error != 0) { if (sp != NULL) { sp->processing = 0; } goto out_unlocked; } } else { SOCKBUF_UNLOCK(&so->so_snd); } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, asoc, asoc->total_output_queue_size); } } KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); /* The out streams might be reallocated. */ strm = &asoc->strmout[sinfo_stream]; if (sp != NULL) { if (sp->msg_is_complete == 0) { strm->last_msg_incomplete = 1; if (asoc->idata_supported == 0) { asoc->stream_locked = 1; asoc->stream_locked_on = sinfo_stream; } } else { sp->sender_all_done = 1; strm->last_msg_incomplete = 0; asoc->stream_locked = 0; } sp->processing = 0; } else { SCTP_PRINTF("Huh no sp TSNH?\n"); strm->last_msg_incomplete = 0; asoc->stream_locked = 0; } if (uio->uio_resid == 0) { got_all_of_the_send = true; } } else { error = sctp_msg_append(stcb, net, top, sndrcvninfo); top = NULL; if ((sinfo_flags & SCTP_EOF) != 0) { got_all_of_the_send = true; } } if (error != 0) { goto out; } dataless_eof: KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); /* EOF thing ? */ if ((sinfo_flags & SCTP_EOF) && got_all_of_the_send) { SCTP_STAT_INCR(sctps_sends_with_eof); error = 0; if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) { if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { goto abort_anyway; } /* there is nothing queued to send, so I'm done... */ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) && (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { struct sctp_nets *netp; /* only send SHUTDOWN the first time through */ if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); sctp_stop_timers_for_shutdown(stcb); if (asoc->alternate != NULL) { netp = asoc->alternate; } else { netp = asoc->primary_destination; } sctp_send_shutdown(stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, NULL); } } else { /*- * we still got (or just got) data to send, so set * SHUTDOWN_PENDING */ /*- * XXX sockets draft says that SCTP_EOF should be * sent with no data. currently, we will allow user * data to be sent first and move to * SHUTDOWN-PENDING */ if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) && (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); } SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; abort_anyway: if (free_cnt_applied) { atomic_subtract_int(&asoc->refcnt, 1); free_cnt_applied = false; } SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); NET_EPOCH_ENTER(et); sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_LOCKED); NET_EPOCH_EXIT(et); stcb = NULL; error = ECONNABORTED; goto out; } sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY); } } } skip_out_eof: KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); some_on_control = !TAILQ_EMPTY(&asoc->control_send_queue); if (queue_only_for_init) { if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { /* a collision took us forward? */ queue_only = 0; } else { NET_EPOCH_ENTER(et); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); NET_EPOCH_EXIT(et); SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); queue_only = 1; } } KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); if ((net->flight_size > net->cwnd) && (asoc->sctp_cmt_on_off == 0)) { SCTP_STAT_INCR(sctps_send_cwnd_avoid); queue_only = 1; } else if (asoc->ifp_had_enobuf) { SCTP_STAT_INCR(sctps_ifnomemqueued); if (net->flight_size > (2 * net->mtu)) { queue_only = 1; } asoc->ifp_had_enobuf = 0; } un_sent = asoc->total_output_queue_size - asoc->total_flight; if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && (asoc->total_flight > 0) && (asoc->stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && (un_sent < (int)(asoc->smallest_mtu - SCTP_MIN_OVERHEAD))) { /*- * Ok, Nagle is set on and we have data outstanding. * Don't send anything and let SACKs drive out the * data unless wen have a "full" segment to send. */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); } SCTP_STAT_INCR(sctps_naglequeued); nagle_applies = 1; } else { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); } SCTP_STAT_INCR(sctps_naglesent); nagle_applies = 0; } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, nagle_applies, un_sent); sctp_misc_ints(SCTP_CWNDLOG_PRESEND, asoc->total_output_queue_size, asoc->total_flight, asoc->chunks_on_out_queue, asoc->total_flight_count); } KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); NET_EPOCH_ENTER(et); if ((queue_only == 0) && (nagle_applies == 0) && (asoc->peers_rwnd && un_sent)) { sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); } else if ((queue_only == 0) && (asoc->peers_rwnd == 0) && (asoc->total_flight == 0)) { /* We get to have a probe outstanding */ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); } else if (some_on_control) { int num_out, reason; /* Here we do control only */ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason, 1, 1, &now, &now_filled, sctp_get_frag_point(stcb), SCTP_SO_LOCKED); } NET_EPOCH_EXIT(et); SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n", queue_only, asoc->peers_rwnd, un_sent, asoc->total_flight, asoc->chunks_on_out_queue, asoc->total_output_queue_size, error); KASSERT(stcb != NULL, ("stcb is NULL")); SCTP_TCB_LOCK_ASSERT(stcb); KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, ("Association about to be freed")); KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, ("Association was aborted")); out: out_unlocked: if (create_lock_applied) { SCTP_ASOC_CREATE_UNLOCK(inp); } if (stcb != NULL) { if (local_soresv) { atomic_subtract_int(&asoc->sb_send_resv, (int)sndlen); } if (free_cnt_applied) { atomic_subtract_int(&asoc->refcnt, 1); } SCTP_TCB_UNLOCK(stcb); } if (top != NULL) { sctp_m_freem(top); } if (control != NULL) { sctp_m_freem(control); } SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error); return (error); } /* * generate an AUTHentication chunk, if required */ struct mbuf * sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, struct sctp_auth_chunk **auth_ret, uint32_t *offset, struct sctp_tcb *stcb, uint8_t chunk) { struct mbuf *m_auth; struct sctp_auth_chunk *auth; int chunk_len; struct mbuf *cn; if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || (stcb == NULL)) return (m); if (stcb->asoc.auth_supported == 0) { return (m); } /* does the requested chunk require auth? */ if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { return (m); } m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER); if (m_auth == NULL) { /* no mbuf's */ return (m); } /* reserve some space if this will be the first mbuf */ if (m == NULL) SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); /* fill in the AUTH chunk details */ auth = mtod(m_auth, struct sctp_auth_chunk *); memset(auth, 0, sizeof(*auth)); auth->ch.chunk_type = SCTP_AUTHENTICATION; auth->ch.chunk_flags = 0; chunk_len = sizeof(*auth) + sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); auth->ch.chunk_length = htons(chunk_len); auth->hmac_id = htons(stcb->asoc.peer_hmac_id); /* key id and hmac digest will be computed and filled in upon send */ /* save the offset where the auth was inserted into the chain */ *offset = 0; for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) { *offset += SCTP_BUF_LEN(cn); } /* update length and return pointer to the auth chunk */ SCTP_BUF_LEN(m_auth) = chunk_len; m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); if (auth_ret != NULL) *auth_ret = auth; return (m); } #ifdef INET6 int sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) { struct nd_prefix *pfx = NULL; struct nd_pfxrouter *pfxrtr = NULL; struct sockaddr_in6 gw6; if (ro == NULL || ro->ro_nh == NULL || src6->sin6_family != AF_INET6) return (0); /* get prefix entry of address */ ND6_RLOCK(); LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) { if (pfx->ndpr_stateflags & NDPRF_DETACHED) continue; if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr, &src6->sin6_addr, &pfx->ndpr_mask)) break; } /* no prefix entry in the prefix list */ if (pfx == NULL) { ND6_RUNLOCK(); SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for "); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); return (0); } SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is "); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); /* search installed gateway from prefix entry */ LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) { memset(&gw6, 0, sizeof(struct sockaddr_in6)); gw6.sin6_family = AF_INET6; gw6.sin6_len = sizeof(struct sockaddr_in6); memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr, sizeof(struct in6_addr)); SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is "); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6); SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is "); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa); if (sctp_cmpaddr((struct sockaddr *)&gw6, &ro->ro_nh->gw_sa)) { ND6_RUNLOCK(); SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n"); return (1); } } ND6_RUNLOCK(); SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n"); return (0); } #endif int sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) { #ifdef INET struct sockaddr_in *sin, *mask; struct ifaddr *ifa; struct in_addr srcnetaddr, gwnetaddr; if (ro == NULL || ro->ro_nh == NULL || sifa->address.sa.sa_family != AF_INET) { return (0); } ifa = (struct ifaddr *)sifa->ifa; mask = (struct sockaddr_in *)(ifa->ifa_netmask); sin = &sifa->address.sin; srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); SCTPDBG(SCTP_DEBUG_OUTPUT2, "match_nexthop4: src address is "); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); SCTPDBG(SCTP_DEBUG_OUTPUT2, "network address is %x\n", srcnetaddr.s_addr); sin = &ro->ro_nh->gw4_sa; gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); SCTPDBG(SCTP_DEBUG_OUTPUT2, "match_nexthop4: nexthop is "); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa); SCTPDBG(SCTP_DEBUG_OUTPUT2, "network address is %x\n", gwnetaddr.s_addr); if (srcnetaddr.s_addr == gwnetaddr.s_addr) { return (1); } #endif return (0); } diff --git a/sys/netipsec/ipsec.c b/sys/netipsec/ipsec.c index 8d604a24eeea..6bacc68b7441 100644 --- a/sys/netipsec/ipsec.c +++ b/sys/netipsec/ipsec.c @@ -1,1582 +1,1586 @@ /* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * IPsec controller part. */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #endif #include #ifdef INET6 #include #endif #include #include #ifdef INET6 #include #endif #include #include #include #include /*XXX*/ #include #include #include #include #include #include #include #include /* NB: name changed so netstat doesn't use it. */ VNET_PCPUSTAT_DEFINE(struct ipsecstat, ipsec4stat); VNET_PCPUSTAT_SYSINIT(ipsec4stat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(ipsec4stat); #endif /* VIMAGE */ /* DF bit on encap. 0: clear 1: set 2: copy */ VNET_DEFINE(int, ip4_ipsec_dfbit) = 0; VNET_DEFINE(int, ip4_ipsec_min_pmtu) = 576; VNET_DEFINE(int, ip4_esp_trans_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip4_esp_net_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip4_ah_trans_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip4_ah_net_deflev) = IPSEC_LEVEL_USE; /* ECN ignore(-1)/forbidden(0)/allowed(1) */ VNET_DEFINE(int, ip4_ipsec_ecn) = 0; +VNET_DEFINE(int, ip4_ipsec_random_id) = 0; VNET_DEFINE_STATIC(int, ip4_filtertunnel) = 0; #define V_ip4_filtertunnel VNET(ip4_filtertunnel) VNET_DEFINE_STATIC(int, check_policy_history) = 0; #define V_check_policy_history VNET(check_policy_history) VNET_DEFINE_STATIC(struct secpolicy *, def_policy) = NULL; #define V_def_policy VNET(def_policy) static int sysctl_def_policy(SYSCTL_HANDLER_ARGS) { int error, value; value = V_def_policy->policy; error = sysctl_handle_int(oidp, &value, 0, req); if (error == 0) { if (value != IPSEC_POLICY_DISCARD && value != IPSEC_POLICY_NONE) return (EINVAL); V_def_policy->policy = value; } return (error); } /* * Crypto support requirements: * * 1 require hardware support * -1 require software support * 0 take anything */ VNET_DEFINE(int, crypto_support) = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE; /* * Use asynchronous mode to parallelize crypto jobs: * * 0 - disabled * 1 - enabled */ VNET_DEFINE(int, async_crypto) = 0; /* * TCP/UDP checksum handling policy for transport mode NAT-T (RFC3948) * * 0 - auto: incrementally recompute, when checksum delta is known; * if checksum delta isn't known, reset checksum to zero for UDP, * and mark csum_flags as valid for TCP. * 1 - fully recompute TCP/UDP checksum. */ VNET_DEFINE(int, natt_cksum_policy) = 0; FEATURE(ipsec, "Internet Protocol Security (IPsec)"); FEATURE(ipsec_natt, "UDP Encapsulation of IPsec ESP Packets ('NAT-T')"); /* net.inet.ipsec */ SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sysctl_def_policy, "I", "IPsec default policy."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_esp_trans_deflev), 0, "Default ESP transport mode level"); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_esp_net_deflev), 0, "Default ESP tunnel mode level."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_ah_trans_deflev), 0, "AH transfer mode default level."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_ah_net_deflev), 0, "AH tunnel mode default level."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS, ah_cleartos, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ah_cleartos), 0, "If set, clear type-of-service field when doing AH computation."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT, dfbit, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_ipsec_dfbit), 0, "Do not fragment bit on encap."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_MIN_PMTU, min_pmtu, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_ipsec_min_pmtu), 0, "Lowest acceptable PMTU value."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN, ecn, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_ipsec_ecn), 0, "Explicit Congestion Notification handling."); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_RANDOM_ID, random_id, + CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_ipsec_random_id), 0, + "Assign random ip_id values."); SYSCTL_INT(_net_inet_ipsec, OID_AUTO, crypto_support, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(crypto_support), 0, "Crypto driver selection."); SYSCTL_INT(_net_inet_ipsec, OID_AUTO, async_crypto, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(async_crypto), 0, "Use asynchronous mode to parallelize crypto jobs."); SYSCTL_INT(_net_inet_ipsec, OID_AUTO, check_policy_history, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(check_policy_history), 0, "Use strict check of inbound packets to security policy compliance."); SYSCTL_INT(_net_inet_ipsec, OID_AUTO, natt_cksum_policy, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(natt_cksum_policy), 0, "Method to fix TCP/UDP checksum for transport mode IPsec after NAT."); SYSCTL_INT(_net_inet_ipsec, OID_AUTO, filtertunnel, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_filtertunnel), 0, "If set, filter packets from an IPsec tunnel."); SYSCTL_VNET_PCPUSTAT(_net_inet_ipsec, OID_AUTO, ipsecstats, struct ipsecstat, ipsec4stat, "IPsec IPv4 statistics."); #ifdef REGRESSION /* * When set to 1, IPsec will send packets with the same sequence number. * This allows to verify if the other side has proper replay attacks detection. */ VNET_DEFINE(int, ipsec_replay) = 0; SYSCTL_INT(_net_inet_ipsec, OID_AUTO, test_replay, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipsec_replay), 0, "Emulate replay attack"); /* * When set 1, IPsec will send packets with corrupted HMAC. * This allows to verify if the other side properly detects modified packets. */ VNET_DEFINE(int, ipsec_integrity) = 0; SYSCTL_INT(_net_inet_ipsec, OID_AUTO, test_integrity, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipsec_integrity), 0, "Emulate man-in-the-middle attack"); #endif #ifdef INET6 VNET_PCPUSTAT_DEFINE(struct ipsecstat, ipsec6stat); VNET_PCPUSTAT_SYSINIT(ipsec6stat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(ipsec6stat); #endif /* VIMAGE */ VNET_DEFINE(int, ip6_esp_trans_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip6_esp_net_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip6_ah_trans_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip6_ah_net_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip6_ipsec_ecn) = 0; /* ECN ignore(-1)/forbidden(0)/allowed(1) */ VNET_DEFINE_STATIC(int, ip6_filtertunnel) = 0; #define V_ip6_filtertunnel VNET(ip6_filtertunnel) /* net.inet6.ipsec6 */ SYSCTL_PROC(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sysctl_def_policy, "I", "IPsec default policy."); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_esp_trans_deflev), 0, "Default ESP transport mode level."); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_esp_net_deflev), 0, "Default ESP tunnel mode level."); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_ah_trans_deflev), 0, "AH transfer mode default level."); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_ah_net_deflev), 0, "AH tunnel mode default level."); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN, ecn, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_ipsec_ecn), 0, "Explicit Congestion Notification handling."); SYSCTL_INT(_net_inet6_ipsec6, OID_AUTO, filtertunnel, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_filtertunnel), 0, "If set, filter packets from an IPsec tunnel."); SYSCTL_VNET_PCPUSTAT(_net_inet6_ipsec6, IPSECCTL_STATS, ipsecstats, struct ipsecstat, ipsec6stat, "IPsec IPv6 statistics."); #endif /* INET6 */ static int ipsec_in_reject(struct secpolicy *, struct inpcb *, const struct mbuf *); #ifdef INET static void ipsec4_get_ulp(const struct mbuf *, const struct ip *, struct secpolicyindex *, int); static void ipsec4_setspidx_ipaddr(const struct mbuf *, struct ip *, struct secpolicyindex *); #endif #ifdef INET6 static void ipsec6_get_ulp(const struct mbuf *m, struct secpolicyindex *, int); static void ipsec6_setspidx_ipaddr(const struct mbuf *, struct secpolicyindex *); #endif /* * Return a held reference to the default SP. */ static struct secpolicy * key_allocsp_default(void) { key_addref(V_def_policy); return (V_def_policy); } static void ipsec_invalidate_cache(struct inpcb *inp, u_int dir) { struct secpolicy *sp; INP_WLOCK_ASSERT(inp); if (dir == IPSEC_DIR_OUTBOUND) { if (inp->inp_sp->flags & INP_INBOUND_POLICY) return; sp = inp->inp_sp->sp_in; inp->inp_sp->sp_in = NULL; } else { if (inp->inp_sp->flags & INP_OUTBOUND_POLICY) return; sp = inp->inp_sp->sp_out; inp->inp_sp->sp_out = NULL; } if (sp != NULL) key_freesp(&sp); /* release extra reference */ } static void ipsec_cachepolicy(struct inpcb *inp, struct secpolicy *sp, u_int dir) { uint32_t genid; int downgrade; INP_LOCK_ASSERT(inp); if (dir == IPSEC_DIR_OUTBOUND) { /* Do we have configured PCB policy? */ if (inp->inp_sp->flags & INP_OUTBOUND_POLICY) return; /* Another thread has already set cached policy */ if (inp->inp_sp->sp_out != NULL) return; /* * Do not cache OUTBOUND policy if PCB isn't connected, * i.e. foreign address is INADDR_ANY/UNSPECIFIED. */ #ifdef INET if ((inp->inp_vflag & INP_IPV4) != 0 && inp->inp_faddr.s_addr == INADDR_ANY) return; #endif #ifdef INET6 if ((inp->inp_vflag & INP_IPV6) != 0 && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) return; #endif } else { /* Do we have configured PCB policy? */ if (inp->inp_sp->flags & INP_INBOUND_POLICY) return; /* Another thread has already set cached policy */ if (inp->inp_sp->sp_in != NULL) return; /* * Do not cache INBOUND policy for listen socket, * that is bound to INADDR_ANY/UNSPECIFIED address. */ #ifdef INET if ((inp->inp_vflag & INP_IPV4) != 0 && inp->inp_faddr.s_addr == INADDR_ANY) return; #endif #ifdef INET6 if ((inp->inp_vflag & INP_IPV6) != 0 && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) return; #endif } downgrade = 0; if (!INP_WLOCKED(inp)) { if ((downgrade = INP_TRY_UPGRADE(inp)) == 0) return; } if (dir == IPSEC_DIR_OUTBOUND) inp->inp_sp->sp_out = sp; else inp->inp_sp->sp_in = sp; /* * SP is already referenced by the lookup code. * We take extra reference here to avoid race in the * ipsec_getpcbpolicy() function - SP will not be freed in the * time between we take SP pointer from the cache and key_addref() * call. */ key_addref(sp); genid = key_getspgen(); if (genid != inp->inp_sp->genid) { ipsec_invalidate_cache(inp, dir); inp->inp_sp->genid = genid; } KEYDBG(IPSEC_STAMP, printf("%s: PCB(%p): cached %s SP(%p)\n", __func__, inp, dir == IPSEC_DIR_OUTBOUND ? "OUTBOUND": "INBOUND", sp)); if (downgrade != 0) INP_DOWNGRADE(inp); } static struct secpolicy * ipsec_checkpolicy(struct secpolicy *sp, struct inpcb *inp, int *error) { /* Save found OUTBOUND policy into PCB SP cache. */ if (inp != NULL && inp->inp_sp != NULL && inp->inp_sp->sp_out == NULL) ipsec_cachepolicy(inp, sp, IPSEC_DIR_OUTBOUND); switch (sp->policy) { default: printf("%s: invalid policy %u\n", __func__, sp->policy); /* FALLTHROUGH */ case IPSEC_POLICY_DISCARD: *error = -EINVAL; /* Packet is discarded by caller. */ /* FALLTHROUGH */ case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_NONE: key_freesp(&sp); sp = NULL; /* NB: force NULL result. */ break; case IPSEC_POLICY_IPSEC: /* XXXAE: handle LARVAL SP */ break; } KEYDBG(IPSEC_DUMP, printf("%s: get SP(%p), error %d\n", __func__, sp, *error)); return (sp); } static struct secpolicy * ipsec_getpcbpolicy(struct inpcb *inp, u_int dir) { struct secpolicy *sp; int flags, downgrade; if (inp == NULL || inp->inp_sp == NULL) return (NULL); INP_LOCK_ASSERT(inp); flags = inp->inp_sp->flags; if (dir == IPSEC_DIR_OUTBOUND) { sp = inp->inp_sp->sp_out; flags &= INP_OUTBOUND_POLICY; } else { sp = inp->inp_sp->sp_in; flags &= INP_INBOUND_POLICY; } /* * Check flags. If we have PCB SP, just return it. * Otherwise we need to check that cached SP entry isn't stale. */ if (flags == 0) { if (sp == NULL) return (NULL); if (inp->inp_sp->genid != key_getspgen()) { /* Invalidate the cache. */ downgrade = 0; if (!INP_WLOCKED(inp)) { if ((downgrade = INP_TRY_UPGRADE(inp)) == 0) return (NULL); } ipsec_invalidate_cache(inp, IPSEC_DIR_OUTBOUND); ipsec_invalidate_cache(inp, IPSEC_DIR_INBOUND); if (downgrade != 0) INP_DOWNGRADE(inp); return (NULL); } KEYDBG(IPSEC_STAMP, printf("%s: PCB(%p): cache hit SP(%p)\n", __func__, inp, sp)); /* Return referenced cached policy */ } key_addref(sp); return (sp); } #ifdef INET static void ipsec4_get_ulp(const struct mbuf *m, const struct ip *ip1, struct secpolicyindex *spidx, int needport) { uint8_t nxt; int off; /* Sanity check. */ IPSEC_ASSERT(m->m_pkthdr.len >= sizeof(struct ip), ("packet too short")); if (ip1->ip_off & htons(IP_MF | IP_OFFMASK)) goto done; off = ip1->ip_hl << 2; nxt = ip1->ip_p; while (off < m->m_pkthdr.len) { struct ip6_ext ip6e; struct tcphdr th; struct udphdr uh; switch (nxt) { case IPPROTO_TCP: spidx->ul_proto = nxt; if (!needport) goto done_proto; if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) goto done; m_copydata(m, off, sizeof (th), (caddr_t) &th); spidx->src.sin.sin_port = th.th_sport; spidx->dst.sin.sin_port = th.th_dport; return; case IPPROTO_UDP: spidx->ul_proto = nxt; if (!needport) goto done_proto; if (off + sizeof(struct udphdr) > m->m_pkthdr.len) goto done; m_copydata(m, off, sizeof (uh), (caddr_t) &uh); spidx->src.sin.sin_port = uh.uh_sport; spidx->dst.sin.sin_port = uh.uh_dport; return; case IPPROTO_AH: if (off + sizeof(ip6e) > m->m_pkthdr.len) goto done; /* XXX Sigh, this works but is totally bogus. */ m_copydata(m, off, sizeof(ip6e), (caddr_t) &ip6e); off += (ip6e.ip6e_len + 2) << 2; nxt = ip6e.ip6e_nxt; break; case IPPROTO_ICMP: default: /* XXX Intermediate headers??? */ spidx->ul_proto = nxt; goto done_proto; } } done: spidx->ul_proto = IPSEC_ULPROTO_ANY; done_proto: spidx->src.sin.sin_port = IPSEC_PORT_ANY; spidx->dst.sin.sin_port = IPSEC_PORT_ANY; KEYDBG(IPSEC_DUMP, printf("%s: ", __func__); kdebug_secpolicyindex(spidx, NULL)); } static void ipsec4_setspidx_ipaddr(const struct mbuf *m, struct ip *ip1, struct secpolicyindex *spidx) { ipsec4_setsockaddrs(m, ip1, &spidx->src, &spidx->dst); spidx->prefs = sizeof(struct in_addr) << 3; spidx->prefd = sizeof(struct in_addr) << 3; } static struct secpolicy * ipsec4_getpolicy(const struct mbuf *m, struct inpcb *inp, struct ip *ip1, u_int dir, int needport) { struct secpolicyindex spidx; struct secpolicy *sp; sp = ipsec_getpcbpolicy(inp, dir); if (sp == NULL && key_havesp(dir)) { /* Make an index to look for a policy. */ ipsec4_setspidx_ipaddr(m, ip1, &spidx); ipsec4_get_ulp(m, ip1, &spidx, needport); spidx.dir = dir; sp = key_allocsp(&spidx, dir); } if (sp == NULL) /* No SP found, use system default. */ sp = key_allocsp_default(); return (sp); } /* * Check security policy for *OUTBOUND* IPv4 packet. */ struct secpolicy * ipsec4_checkpolicy(const struct mbuf *m, struct inpcb *inp, struct ip *ip1, int *error, int needport) { struct secpolicy *sp; *error = 0; sp = ipsec4_getpolicy(m, inp, ip1, IPSEC_DIR_OUTBOUND, needport); if (sp != NULL) sp = ipsec_checkpolicy(sp, inp, error); if (sp == NULL) { switch (*error) { case 0: /* No IPsec required: BYPASS or NONE */ break; case -EINVAL: IPSECSTAT_INC(ips_out_polvio); break; default: IPSECSTAT_INC(ips_out_inval); } } KEYDBG(IPSEC_STAMP, printf("%s: using SP(%p), error %d\n", __func__, sp, *error)); if (sp != NULL) KEYDBG(IPSEC_DATA, kdebug_secpolicy(sp)); return (sp); } /* * Check IPv4 packet against *INBOUND* security policy. * This function is called from tcp_input(), udp_input(), * rip_input() and sctp_input(). */ int ipsec4_in_reject1(const struct mbuf *m, struct ip *ip1, struct inpcb *inp) { struct secpolicy *sp; #ifdef IPSEC_OFFLOAD struct ipsec_accel_in_tag *tag; #endif struct ip ip_hdr; int result; #ifdef IPSEC_OFFLOAD tag = ipsec_accel_input_tag_lookup(m); if (tag != NULL) return (0); #endif if (ip1 == NULL) { ip1 = &ip_hdr; m_copydata(m, 0, sizeof(*ip1), (char *)ip1); } sp = ipsec4_getpolicy(m, inp, ip1, IPSEC_DIR_INBOUND, 0); result = ipsec_in_reject(sp, inp, m); key_freesp(&sp); if (result != 0) IPSECSTAT_INC(ips_in_polvio); return (result); } int ipsec4_in_reject(const struct mbuf *m, struct inpcb *inp) { return (ipsec4_in_reject1(m, NULL, inp)); } /* * IPSEC_CAP() method implementation for IPv4. */ int ipsec4_capability(struct mbuf *m, u_int cap) { switch (cap) { case IPSEC_CAP_BYPASS_FILTER: /* * Bypass packet filtering for packets previously handled * by IPsec. */ if (!V_ip4_filtertunnel && m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL) return (1); return (0); case IPSEC_CAP_OPERABLE: /* Do we have active security policies? */ return (key_havesp_any()); }; return (EOPNOTSUPP); } #endif /* INET */ #ifdef INET6 static void ipsec6_get_ulp(const struct mbuf *m, struct secpolicyindex *spidx, int needport) { struct tcphdr th; struct udphdr uh; struct icmp6_hdr ih; int off, nxt; IPSEC_ASSERT(m->m_pkthdr.len >= sizeof(struct ip6_hdr), ("packet too short")); /* Set default. */ spidx->ul_proto = IPSEC_ULPROTO_ANY; spidx->src.sin6.sin6_port = IPSEC_PORT_ANY; spidx->dst.sin6.sin6_port = IPSEC_PORT_ANY; nxt = -1; off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt); if (off < 0 || m->m_pkthdr.len < off) return; switch (nxt) { case IPPROTO_TCP: spidx->ul_proto = nxt; if (!needport) break; if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) break; m_copydata(m, off, sizeof(th), (caddr_t)&th); spidx->src.sin6.sin6_port = th.th_sport; spidx->dst.sin6.sin6_port = th.th_dport; break; case IPPROTO_UDP: spidx->ul_proto = nxt; if (!needport) break; if (off + sizeof(struct udphdr) > m->m_pkthdr.len) break; m_copydata(m, off, sizeof(uh), (caddr_t)&uh); spidx->src.sin6.sin6_port = uh.uh_sport; spidx->dst.sin6.sin6_port = uh.uh_dport; break; case IPPROTO_ICMPV6: spidx->ul_proto = nxt; if (off + sizeof(struct icmp6_hdr) > m->m_pkthdr.len) break; m_copydata(m, off, sizeof(ih), (caddr_t)&ih); spidx->src.sin6.sin6_port = htons((uint16_t)ih.icmp6_type); spidx->dst.sin6.sin6_port = htons((uint16_t)ih.icmp6_code); break; default: /* XXX Intermediate headers??? */ spidx->ul_proto = nxt; break; } KEYDBG(IPSEC_DUMP, printf("%s: ", __func__); kdebug_secpolicyindex(spidx, NULL)); } static void ipsec6_setspidx_ipaddr(const struct mbuf *m, struct secpolicyindex *spidx) { ipsec6_setsockaddrs(m, &spidx->src, &spidx->dst); spidx->prefs = sizeof(struct in6_addr) << 3; spidx->prefd = sizeof(struct in6_addr) << 3; } static struct secpolicy * ipsec6_getpolicy(const struct mbuf *m, struct inpcb *inp, u_int dir, int needport) { struct secpolicyindex spidx; struct secpolicy *sp; sp = ipsec_getpcbpolicy(inp, dir); if (sp == NULL && key_havesp(dir)) { /* Make an index to look for a policy. */ ipsec6_setspidx_ipaddr(m, &spidx); ipsec6_get_ulp(m, &spidx, needport); spidx.dir = dir; sp = key_allocsp(&spidx, dir); } if (sp == NULL) /* No SP found, use system default. */ sp = key_allocsp_default(); return (sp); } /* * Check security policy for *OUTBOUND* IPv6 packet. */ struct secpolicy * ipsec6_checkpolicy(const struct mbuf *m, struct inpcb *inp, int *error, int needport) { struct secpolicy *sp; *error = 0; sp = ipsec6_getpolicy(m, inp, IPSEC_DIR_OUTBOUND, needport); if (sp != NULL) sp = ipsec_checkpolicy(sp, inp, error); if (sp == NULL) { switch (*error) { case 0: /* No IPsec required: BYPASS or NONE */ break; case -EINVAL: IPSEC6STAT_INC(ips_out_polvio); break; default: IPSEC6STAT_INC(ips_out_inval); } } KEYDBG(IPSEC_STAMP, printf("%s: using SP(%p), error %d\n", __func__, sp, *error)); if (sp != NULL) KEYDBG(IPSEC_DATA, kdebug_secpolicy(sp)); return (sp); } /* * Check IPv6 packet against inbound security policy. * This function is called from tcp6_input(), udp6_input(), * rip6_input() and sctp_input(). */ int ipsec6_in_reject(const struct mbuf *m, struct inpcb *inp) { struct secpolicy *sp; #ifdef IPSEC_OFFLOAD struct ipsec_accel_in_tag *tag; #endif int result; #ifdef IPSEC_OFFLOAD tag = ipsec_accel_input_tag_lookup(m); if (tag != NULL) return (0); #endif sp = ipsec6_getpolicy(m, inp, IPSEC_DIR_INBOUND, 0); result = ipsec_in_reject(sp, inp, m); key_freesp(&sp); if (result) IPSEC6STAT_INC(ips_in_polvio); return (result); } /* * IPSEC_CAP() method implementation for IPv6. */ int ipsec6_capability(struct mbuf *m, u_int cap) { switch (cap) { case IPSEC_CAP_BYPASS_FILTER: /* * Bypass packet filtering for packets previously handled * by IPsec. */ if (!V_ip6_filtertunnel && m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL) return (1); return (0); case IPSEC_CAP_OPERABLE: /* Do we have active security policies? */ return (key_havesp_any()); }; return (EOPNOTSUPP); } #endif /* INET6 */ int ipsec_run_hhooks(struct ipsec_ctx_data *ctx, int type) { int idx; switch (ctx->af) { #ifdef INET case AF_INET: idx = HHOOK_IPSEC_INET; break; #endif #ifdef INET6 case AF_INET6: idx = HHOOK_IPSEC_INET6; break; #endif default: return (EPFNOSUPPORT); } if (type == HHOOK_TYPE_IPSEC_IN) HHOOKS_RUN_IF(V_ipsec_hhh_in[idx], ctx, NULL); else HHOOKS_RUN_IF(V_ipsec_hhh_out[idx], ctx, NULL); if (*ctx->mp == NULL) return (EACCES); return (0); } /* * Return current level. * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned. */ u_int ipsec_get_reqlevel(struct secpolicy *sp, u_int idx) { struct ipsecrequest *isr; u_int esp_trans_deflev, esp_net_deflev; u_int ah_trans_deflev, ah_net_deflev; u_int level = 0; IPSEC_ASSERT(idx < sp->tcount, ("Wrong IPsec request index %d", idx)); /* XXX Note that we have ipseclog() expanded here - code sync issue. */ #define IPSEC_CHECK_DEFAULT(lev) \ (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE && \ (lev) != IPSEC_LEVEL_UNIQUE) \ ? (V_ipsec_debug ? \ log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\ (lev), IPSEC_LEVEL_REQUIRE) : 0), \ (lev) = IPSEC_LEVEL_REQUIRE, (lev) : (lev)) /* * IPsec VTI uses unique security policy with fake spidx filled * with zeroes. Just return IPSEC_LEVEL_REQUIRE instead of doing * full level lookup for such policies. */ if (sp->state == IPSEC_SPSTATE_IFNET) { IPSEC_ASSERT(sp->req[idx]->level == IPSEC_LEVEL_UNIQUE, ("Wrong IPsec request level %d", sp->req[idx]->level)); return (IPSEC_LEVEL_REQUIRE); } /* Set default level. */ switch (sp->spidx.src.sa.sa_family) { #ifdef INET case AF_INET: esp_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip4_esp_trans_deflev); esp_net_deflev = IPSEC_CHECK_DEFAULT(V_ip4_esp_net_deflev); ah_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip4_ah_trans_deflev); ah_net_deflev = IPSEC_CHECK_DEFAULT(V_ip4_ah_net_deflev); break; #endif #ifdef INET6 case AF_INET6: esp_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip6_esp_trans_deflev); esp_net_deflev = IPSEC_CHECK_DEFAULT(V_ip6_esp_net_deflev); ah_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip6_ah_trans_deflev); ah_net_deflev = IPSEC_CHECK_DEFAULT(V_ip6_ah_net_deflev); break; #endif /* INET6 */ default: panic("%s: unknown af %u", __func__, sp->spidx.src.sa.sa_family); } #undef IPSEC_CHECK_DEFAULT isr = sp->req[idx]; /* Set level. */ switch (isr->level) { case IPSEC_LEVEL_DEFAULT: switch (isr->saidx.proto) { case IPPROTO_ESP: if (isr->saidx.mode == IPSEC_MODE_TUNNEL) level = esp_net_deflev; else level = esp_trans_deflev; break; case IPPROTO_AH: if (isr->saidx.mode == IPSEC_MODE_TUNNEL) level = ah_net_deflev; else level = ah_trans_deflev; break; case IPPROTO_IPCOMP: /* * We don't really care, as IPcomp document says that * we shouldn't compress small packets. */ level = IPSEC_LEVEL_USE; break; default: panic("%s: Illegal protocol defined %u\n", __func__, isr->saidx.proto); } break; case IPSEC_LEVEL_USE: case IPSEC_LEVEL_REQUIRE: level = isr->level; break; case IPSEC_LEVEL_UNIQUE: level = IPSEC_LEVEL_REQUIRE; break; default: panic("%s: Illegal IPsec level %u\n", __func__, isr->level); } return (level); } static int ipsec_check_history(const struct mbuf *m, struct secpolicy *sp, u_int idx) { struct xform_history *xh; struct m_tag *mtag; mtag = NULL; while ((mtag = m_tag_find(__DECONST(struct mbuf *, m), PACKET_TAG_IPSEC_IN_DONE, mtag)) != NULL) { xh = (struct xform_history *)(mtag + 1); KEYDBG(IPSEC_DATA, char buf[IPSEC_ADDRSTRLEN]; printf("%s: mode %s proto %u dst %s\n", __func__, kdebug_secasindex_mode(xh->mode), xh->proto, ipsec_address(&xh->dst, buf, sizeof(buf)))); if (xh->proto != sp->req[idx]->saidx.proto) continue; /* If SA had IPSEC_MODE_ANY, consider this as match. */ if (xh->mode != sp->req[idx]->saidx.mode && xh->mode != IPSEC_MODE_ANY) continue; /* * For transport mode IPsec request doesn't contain * addresses. We need to use address from spidx. */ if (sp->req[idx]->saidx.mode == IPSEC_MODE_TRANSPORT) { if (key_sockaddrcmp_withmask(&xh->dst.sa, &sp->spidx.dst.sa, sp->spidx.prefd) != 0) continue; } else { if (key_sockaddrcmp(&xh->dst.sa, &sp->req[idx]->saidx.dst.sa, 0) != 0) continue; } return (0); /* matched */ } return (1); } /* * Check security policy requirements against the actual * packet contents. Return one if the packet should be * rejected as "invalid"; otherwise return zero to have the * packet treated as "valid". * * OUT: * 0: valid * 1: invalid */ static int ipsec_in_reject(struct secpolicy *sp, struct inpcb *inp, const struct mbuf *m) { int i; KEYDBG(IPSEC_STAMP, printf("%s: PCB(%p): using SP(%p)\n", __func__, inp, sp)); KEYDBG(IPSEC_DATA, kdebug_secpolicy(sp)); if (inp != NULL && inp->inp_sp != NULL && inp->inp_sp->sp_in == NULL) ipsec_cachepolicy(inp, sp, IPSEC_DIR_INBOUND); /* Check policy. */ switch (sp->policy) { case IPSEC_POLICY_DISCARD: return (1); case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_NONE: return (0); } IPSEC_ASSERT(sp->policy == IPSEC_POLICY_IPSEC, ("invalid policy %u", sp->policy)); /* * ipsec[46]_common_input_cb after each transform adds * PACKET_TAG_IPSEC_IN_DONE mbuf tag. It contains SPI, proto, mode * and destination address from saidx. We can compare info from * these tags with requirements in SP. */ for (i = 0; i < sp->tcount; i++) { /* * Do not check IPcomp, since IPcomp document * says that we shouldn't compress small packets. * IPComp policy should always be treated as being * in "use" level. */ if (sp->req[i]->saidx.proto == IPPROTO_IPCOMP || ipsec_get_reqlevel(sp, i) != IPSEC_LEVEL_REQUIRE) continue; if (V_check_policy_history != 0 && ipsec_check_history(m, sp, i) != 0) return (1); else switch (sp->req[i]->saidx.proto) { case IPPROTO_ESP: if ((m->m_flags & M_DECRYPTED) == 0) { KEYDBG(IPSEC_DUMP, printf("%s: ESP m_flags:%x\n", __func__, m->m_flags)); return (1); } break; case IPPROTO_AH: if ((m->m_flags & M_AUTHIPHDR) == 0) { KEYDBG(IPSEC_DUMP, printf("%s: AH m_flags:%x\n", __func__, m->m_flags)); return (1); } break; } } return (0); /* Valid. */ } /* * Compute the byte size to be occupied by IPsec header. * In case it is tunnelled, it includes the size of outer IP header. */ size_t ipsec_hdrsiz_internal(struct secpolicy *sp) { size_t size; int i; KEYDBG(IPSEC_STAMP, printf("%s: using SP(%p)\n", __func__, sp)); KEYDBG(IPSEC_DATA, kdebug_secpolicy(sp)); switch (sp->policy) { case IPSEC_POLICY_DISCARD: case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_NONE: return (0); } IPSEC_ASSERT(sp->policy == IPSEC_POLICY_IPSEC, ("invalid policy %u", sp->policy)); /* * XXX: for each transform we need to lookup suitable SA * and use info from SA to calculate headers size. * XXX: for NAT-T we need to cosider UDP header size. */ size = 0; for (i = 0; i < sp->tcount; i++) { switch (sp->req[i]->saidx.proto) { case IPPROTO_ESP: size += esp_hdrsiz(NULL); break; case IPPROTO_AH: size += ah_hdrsiz(NULL); break; case IPPROTO_IPCOMP: size += sizeof(struct ipcomp); break; } if (sp->req[i]->saidx.mode == IPSEC_MODE_TUNNEL) { switch (sp->req[i]->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: size += sizeof(struct ip); break; #endif #ifdef INET6 case AF_INET6: size += sizeof(struct ip6_hdr); break; #endif default: ipseclog((LOG_ERR, "%s: unknown AF %d in " "IPsec tunnel SA\n", __func__, sp->req[i]->saidx.dst.sa.sa_family)); break; } } } return (size); } /* * Compute ESP/AH header size for protocols with PCB, including * outer IP header. Currently only tcp_output() uses it. */ size_t ipsec_hdrsiz_inpcb(struct inpcb *inp) { struct secpolicyindex spidx; struct secpolicy *sp; size_t sz; sp = ipsec_getpcbpolicy(inp, IPSEC_DIR_OUTBOUND); if (sp == NULL && key_havesp(IPSEC_DIR_OUTBOUND)) { ipsec_setspidx_inpcb(inp, &spidx, IPSEC_DIR_OUTBOUND); sp = key_allocsp(&spidx, IPSEC_DIR_OUTBOUND); } if (sp == NULL) sp = key_allocsp_default(); sz = ipsec_hdrsiz_internal(sp); key_freesp(&sp); return (sz); } #define IPSEC_BITMAP_INDEX_MASK(w) (w - 1) #define IPSEC_REDUNDANT_BIT_SHIFTS 5 #define IPSEC_REDUNDANT_BITS (1 << IPSEC_REDUNDANT_BIT_SHIFTS) #define IPSEC_BITMAP_LOC_MASK (IPSEC_REDUNDANT_BITS - 1) /* * Functions below are responsible for checking and updating bitmap. * These are used to separate ipsec_chkreplay() and ipsec_updatereplay() * from window implementation * * Based on RFC 6479. Blocks are 32 bits unsigned integers */ static inline int check_window(const struct secreplay *replay, uint64_t seq) { int index, bit_location; SECREPLAY_ASSERT(replay); bit_location = seq & IPSEC_BITMAP_LOC_MASK; index = (seq >> IPSEC_REDUNDANT_BIT_SHIFTS) & IPSEC_BITMAP_INDEX_MASK(replay->bitmap_size); /* This packet already seen? */ return ((replay->bitmap)[index] & (1 << bit_location)); } static inline void advance_window(const struct secreplay *replay, uint64_t seq) { int i; uint64_t index, index_cur, diff; SECREPLAY_ASSERT(replay); index_cur = replay->last >> IPSEC_REDUNDANT_BIT_SHIFTS; index = seq >> IPSEC_REDUNDANT_BIT_SHIFTS; diff = index - index_cur; if (diff > replay->bitmap_size) { /* something unusual in this case */ diff = replay->bitmap_size; } for (i = 0; i < diff; i++) { replay->bitmap[(i + index_cur + 1) & IPSEC_BITMAP_INDEX_MASK(replay->bitmap_size)] = 0; } } static inline void set_window(const struct secreplay *replay, uint64_t seq) { int index, bit_location; SECREPLAY_ASSERT(replay); bit_location = seq & IPSEC_BITMAP_LOC_MASK; index = (seq >> IPSEC_REDUNDANT_BIT_SHIFTS) & IPSEC_BITMAP_INDEX_MASK(replay->bitmap_size); replay->bitmap[index] |= (1 << bit_location); } /* * Check the variable replay window. * ipsec_chkreplay() performs replay check before ICV verification. * ipsec_updatereplay() updates replay bitmap. This must be called after * ICV verification (it also performs replay check, which is usually done * beforehand). * 0 (zero) is returned if packet disallowed, 1 if packet permitted. * * Based on RFC 4303 */ int ipsec_chkreplay(uint32_t seq, uint32_t *seqhigh, struct secasvar *sav) { char buf[128]; struct secreplay *replay; uint32_t window; uint32_t tl, th, bl; uint32_t seqh; IPSEC_ASSERT(sav != NULL, ("Null SA")); IPSEC_ASSERT(sav->replay != NULL, ("Null replay state")); replay = sav->replay; /* No need to check replay if disabled. */ if (replay->wsize == 0) { return (1); } SECREPLAY_LOCK(replay); /* Zero sequence number is not allowed. */ if (seq == 0 && replay->last == 0) { SECREPLAY_UNLOCK(replay); return (0); } window = replay->wsize << 3; /* Size of window */ tl = (uint32_t)replay->last; /* Top of window, lower part */ th = (uint32_t)(replay->last >> 32); /* Top of window, high part */ bl = tl - window + 1; /* Bottom of window, lower part */ /* * We keep the high part intact when: * 1) the seq is within [bl, 0xffffffff] and the whole window is * within one subspace; * 2) the seq is within [0, bl) and window spans two subspaces. */ if ((tl >= window - 1 && seq >= bl) || (tl < window - 1 && seq < bl)) { *seqhigh = th; if (seq <= tl) { /* Sequence number inside window - check against replay */ if (check_window(replay, seq)) { SECREPLAY_UNLOCK(replay); return (0); } } SECREPLAY_UNLOCK(replay); /* Sequence number above top of window or not found in bitmap */ return (1); } /* * If ESN is not enabled and packet with highest sequence number * was received we should report overflow */ if (tl == 0xffffffff && !(sav->flags & SADB_X_SAFLAGS_ESN)) { /* Set overflow flag. */ replay->overflow++; if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) { if (sav->sah->saidx.proto == IPPROTO_ESP) ESPSTAT_INC(esps_wrap); else if (sav->sah->saidx.proto == IPPROTO_AH) AHSTAT_INC(ahs_wrap); SECREPLAY_UNLOCK(replay); return (0); } ipseclog((LOG_WARNING, "%s: replay counter made %d cycle. %s\n", __func__, replay->overflow, ipsec_sa2str(sav, buf, sizeof(buf)))); } /* * Seq is within [bl, 0xffffffff] and bl is within * [0xffffffff-window, 0xffffffff]. This means we got a seq * which is within our replay window, but in the previous * subspace. */ if (tl < window - 1 && seq >= bl) { if (th == 0) return (0); *seqhigh = th - 1; seqh = th - 1; if (check_window(replay, seq)) { SECREPLAY_UNLOCK(replay); return (0); } SECREPLAY_UNLOCK(replay); return (1); } /* * Seq is within [0, bl) but the whole window is within one subspace. * This means that seq has wrapped and is in next subspace */ *seqhigh = th + 1; seqh = th + 1; /* Don't let high part wrap. */ if (seqh == 0) { /* Set overflow flag. */ replay->overflow++; if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) { if (sav->sah->saidx.proto == IPPROTO_ESP) ESPSTAT_INC(esps_wrap); else if (sav->sah->saidx.proto == IPPROTO_AH) AHSTAT_INC(ahs_wrap); SECREPLAY_UNLOCK(replay); return (0); } ipseclog((LOG_WARNING, "%s: replay counter made %d cycle. %s\n", __func__, replay->overflow, ipsec_sa2str(sav, buf, sizeof(buf)))); } SECREPLAY_UNLOCK(replay); return (1); } /* * Check replay counter whether to update or not. * OUT: 0: OK * 1: NG */ int ipsec_updatereplay(uint32_t seq, struct secasvar *sav) { struct secreplay *replay; uint32_t window; uint32_t tl, th, bl; uint32_t seqh; IPSEC_ASSERT(sav != NULL, ("Null SA")); IPSEC_ASSERT(sav->replay != NULL, ("Null replay state")); replay = sav->replay; /* No need to check replay if disabled. */ if (replay->wsize == 0) return (0); SECREPLAY_LOCK(replay); /* Zero sequence number is not allowed. */ if (seq == 0 && replay->last == 0) { SECREPLAY_UNLOCK(replay); return (1); } window = replay->wsize << 3; /* Size of window */ tl = (uint32_t)replay->last; /* Top of window, lower part */ th = (uint32_t)(replay->last >> 32); /* Top of window, high part */ bl = tl - window + 1; /* Bottom of window, lower part */ /* * We keep the high part intact when: * 1) the seq is within [bl, 0xffffffff] and the whole window is * within one subspace; * 2) the seq is within [0, bl) and window spans two subspaces. */ if ((tl >= window - 1 && seq >= bl) || (tl < window - 1 && seq < bl)) { seqh = th; if (seq <= tl) { /* Sequence number inside window - check against replay */ if (check_window(replay, seq)) { SECREPLAY_UNLOCK(replay); return (1); } set_window(replay, seq); } else { advance_window(replay, ((uint64_t)seqh << 32) | seq); set_window(replay, seq); replay->last = ((uint64_t)seqh << 32) | seq; } /* Sequence number above top of window or not found in bitmap */ replay->count++; SECREPLAY_UNLOCK(replay); return (0); } if (!(sav->flags & SADB_X_SAFLAGS_ESN)) { SECREPLAY_UNLOCK(replay); return (1); } /* * Seq is within [bl, 0xffffffff] and bl is within * [0xffffffff-window, 0xffffffff]. This means we got a seq * which is within our replay window, but in the previous * subspace. */ if (tl < window - 1 && seq >= bl) { if (th == 0) { SECREPLAY_UNLOCK(replay); return (1); } if (check_window(replay, seq)) { SECREPLAY_UNLOCK(replay); return (1); } set_window(replay, seq); replay->count++; SECREPLAY_UNLOCK(replay); return (0); } /* * Seq is within [0, bl) but the whole window is within one subspace. * This means that seq has wrapped and is in next subspace */ seqh = th + 1; /* Don't let high part wrap. */ if (seqh == 0) { SECREPLAY_UNLOCK(replay); return (1); } advance_window(replay, ((uint64_t)seqh << 32) | seq); set_window(replay, seq); replay->last = ((uint64_t)seqh << 32) | seq; replay->count++; SECREPLAY_UNLOCK(replay); return (0); } int ipsec_updateid(struct secasvar *sav, crypto_session_t *new, crypto_session_t *old) { crypto_session_t tmp; /* * tdb_cryptoid is initialized by xform_init(). * Then it can be changed only when some crypto error occurred or * when SA is deleted. We stored used cryptoid in the xform_data * structure. In case when crypto error occurred and crypto * subsystem has reinited the session, it returns new cryptoid * and EAGAIN error code. * * This function will be called when we got EAGAIN from crypto * subsystem. * *new is cryptoid that was returned by crypto subsystem in * the crp_sid. * *old is the original cryptoid that we stored in xform_data. * * For first failed request *old == sav->tdb_cryptoid, then * we update sav->tdb_cryptoid and redo crypto_dispatch(). * For next failed request *old != sav->tdb_cryptoid, then * we store cryptoid from first request into the *new variable * and crp_sid from this second session will be returned via * *old pointer, so caller can release second session. * * XXXAE: check this more carefully. */ KEYDBG(IPSEC_STAMP, printf("%s: SA(%p) moves cryptoid %p -> %p\n", __func__, sav, *old, *new)); KEYDBG(IPSEC_DATA, kdebug_secasv(sav)); SECASVAR_WLOCK(sav); if (sav->tdb_cryptoid != *old) { /* cryptoid was already updated */ tmp = *new; *new = sav->tdb_cryptoid; *old = tmp; SECASVAR_WUNLOCK(sav); return (1); } sav->tdb_cryptoid = *new; SECASVAR_WUNLOCK(sav); return (0); } int ipsec_initialized(void) { return (V_def_policy != NULL); } static void def_policy_init(const void *unused __unused) { V_def_policy = key_newsp(); if (V_def_policy != NULL) { V_def_policy->policy = IPSEC_POLICY_NONE; /* Force INPCB SP cache invalidation */ key_bumpspgen(); } else printf("%s: failed to initialize default policy\n", __func__); } static void def_policy_uninit(const void *unused __unused) { if (V_def_policy != NULL) { key_freesp(&V_def_policy); key_bumpspgen(); } } VNET_SYSINIT(def_policy_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_FIRST, def_policy_init, NULL); VNET_SYSUNINIT(def_policy_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_FIRST, def_policy_uninit, NULL); diff --git a/sys/netipsec/ipsec.h b/sys/netipsec/ipsec.h index f8c5b10e7bd6..3acb6a4044f1 100644 --- a/sys/netipsec/ipsec.h +++ b/sys/netipsec/ipsec.h @@ -1,384 +1,387 @@ /* $KAME: ipsec.h,v 1.53 2001/11/20 08:32:38 itojun Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * IPsec controller part. */ #ifndef _NETIPSEC_IPSEC_H_ #define _NETIPSEC_IPSEC_H_ #include #include #ifdef _KERNEL #include #include #include #include #include #define IPSEC_ASSERT(_c,_m) KASSERT(_c, _m) /* * Security Policy Index * Ensure that both address families in the "src" and "dst" are same. * When the value of the ul_proto is ICMPv6, the port field in "src" * specifies ICMPv6 type, and the port field in "dst" specifies ICMPv6 code. */ struct secpolicyindex { union sockaddr_union src; /* IP src address for SP */ union sockaddr_union dst; /* IP dst address for SP */ uint8_t ul_proto; /* upper layer Protocol */ uint8_t dir; /* direction of packet flow */ uint8_t prefs; /* prefix length in bits for src */ uint8_t prefd; /* prefix length in bits for dst */ }; /* Request for IPsec */ struct ipsecrequest { struct secasindex saidx;/* hint for search proper SA */ /* if __ss_len == 0 then no address specified.*/ u_int level; /* IPsec level defined below. */ }; struct ipsec_accel_adddel_sp_tq { struct vnet *adddel_vnet; struct task adddel_task; int adddel_scheduled; }; /* Security Policy Data Base */ struct secpolicy { TAILQ_ENTRY(secpolicy) chain; LIST_ENTRY(secpolicy) idhash; LIST_ENTRY(secpolicy) drainq; struct secpolicyindex spidx; /* selector */ #define IPSEC_MAXREQ 4 struct ipsecrequest *req[IPSEC_MAXREQ]; u_int tcount; /* IPsec transforms count */ volatile u_int refcnt; /* reference count */ u_int policy; /* policy_type per pfkeyv2.h */ u_int state; #define IPSEC_SPSTATE_DEAD 0 #define IPSEC_SPSTATE_LARVAL 1 #define IPSEC_SPSTATE_ALIVE 2 #define IPSEC_SPSTATE_PCB 3 #define IPSEC_SPSTATE_IFNET 4 uint32_t priority; /* priority of this policy */ uint32_t id; /* It's unique number on the system. */ /* * lifetime handler. * the policy can be used without limitiation if both lifetime and * validtime are zero. * "lifetime" is passed by sadb_lifetime.sadb_lifetime_addtime. * "validtime" is passed by sadb_lifetime.sadb_lifetime_usetime. */ time_t created; /* time created the policy */ time_t lastused; /* updated every when kernel sends a packet */ long lifetime; /* duration of the lifetime of this policy */ long validtime; /* duration this policy is valid without use */ CK_LIST_HEAD(, ifp_handle_sp) accel_ifps; struct ipsec_accel_adddel_sp_tq accel_add_tq; struct ipsec_accel_adddel_sp_tq accel_del_tq; struct inpcb *ipsec_accel_add_sp_inp; const char *accel_ifname; }; /* * PCB security policies. * Application can setup private security policies for socket. * Such policies can have IPSEC, BYPASS and ENTRUST type. * By default, policies are set to NULL. This means that they have ENTRUST type. * When application sets BYPASS or IPSEC type policy, the flags field * is also updated. When flags is not set, the system could store * used security policy into the sp_in/sp_out pointer to speed up further * lookups. */ struct inpcbpolicy { struct secpolicy *sp_in; struct secpolicy *sp_out; uint32_t genid; uint16_t flags; #define INP_INBOUND_POLICY 0x0001 #define INP_OUTBOUND_POLICY 0x0002 uint16_t hdrsz; }; /* SP acquiring list table. */ struct secspacq { LIST_ENTRY(secspacq) chain; struct secpolicyindex spidx; time_t created; /* for lifetime */ int count; /* for lifetime */ /* XXX: here is mbuf place holder to be sent ? */ }; #endif /* _KERNEL */ /* buffer size for formatted output of ipsec address */ #define IPSEC_ADDRSTRLEN (INET6_ADDRSTRLEN + 11) /* according to IANA assignment, port 0x0000 and proto 0xff are reserved. */ #define IPSEC_PORT_ANY 0 #define IPSEC_ULPROTO_ANY 255 #define IPSEC_PROTO_ANY 255 /* mode of security protocol */ /* NOTE: DON'T use IPSEC_MODE_ANY at SPD. It's only use in SAD */ #define IPSEC_MODE_ANY 0 /* i.e. wildcard. */ #define IPSEC_MODE_TRANSPORT 1 #define IPSEC_MODE_TUNNEL 2 #define IPSEC_MODE_TCPMD5 3 /* TCP MD5 mode */ /* * Direction of security policy. * NOTE: Since INVALID is used just as flag. * The other are used for loop counter too. */ #define IPSEC_DIR_ANY 0 #define IPSEC_DIR_INBOUND 1 #define IPSEC_DIR_OUTBOUND 2 #define IPSEC_DIR_MAX 3 #define IPSEC_DIR_INVALID 4 /* Policy level */ /* * IPSEC, ENTRUST and BYPASS are allowed for setsockopt() in PCB, * DISCARD, IPSEC and NONE are allowed for setkey() in SPD. * DISCARD and NONE are allowed for system default. */ #define IPSEC_POLICY_DISCARD 0 /* discarding packet */ #define IPSEC_POLICY_NONE 1 /* through IPsec engine */ #define IPSEC_POLICY_IPSEC 2 /* do IPsec */ #define IPSEC_POLICY_ENTRUST 3 /* consulting SPD if present. */ #define IPSEC_POLICY_BYPASS 4 /* only for privileged socket. */ /* Policy scope */ #define IPSEC_POLICYSCOPE_ANY 0x00 /* unspecified */ #define IPSEC_POLICYSCOPE_GLOBAL 0x01 /* global scope */ #define IPSEC_POLICYSCOPE_IFNET 0x02 /* if_ipsec(4) scope */ #define IPSEC_POLICYSCOPE_PCB 0x04 /* PCB scope */ /* Security protocol level */ #define IPSEC_LEVEL_DEFAULT 0 /* reference to system default */ #define IPSEC_LEVEL_USE 1 /* use SA if present. */ #define IPSEC_LEVEL_REQUIRE 2 /* require SA. */ #define IPSEC_LEVEL_UNIQUE 3 /* unique SA. */ #define IPSEC_MANUAL_REQID_MAX 0x3fff /* * if security policy level == unique, this id * indicate to a relative SA for use, else is * zero. * 1 - 0x3fff are reserved for manual keying. * 0 are reserved for above reason. Others is * for kernel use. * Note that this id doesn't identify SA * by only itself. */ #define IPSEC_REPLAYWSIZE 32 /* statistics for ipsec processing */ struct ipsecstat { uint64_t ips_in_polvio; /* input: sec policy violation */ uint64_t ips_in_nomem; /* input: no memory available */ uint64_t ips_in_inval; /* input: generic error */ uint64_t ips_out_polvio; /* output: sec policy violation */ uint64_t ips_out_nosa; /* output: SA unavailable */ uint64_t ips_out_nomem; /* output: no memory available */ uint64_t ips_out_noroute; /* output: no route available */ uint64_t ips_out_inval; /* output: generic error */ uint64_t ips_out_bundlesa; /* output: bundled SA processed */ uint64_t ips_spdcache_hits; /* SPD cache hits */ uint64_t ips_spdcache_misses; /* SPD cache misses */ uint64_t ips_clcopied; /* clusters copied during clone */ uint64_t ips_mbinserted; /* mbufs inserted during makespace */ /* * Temporary statistics for performance analysis. */ /* See where ESP/AH/IPCOMP header land in mbuf on input */ uint64_t ips_input_front; uint64_t ips_input_middle; uint64_t ips_input_end; }; /* * Definitions for IPsec & Key sysctl operations. */ #define IPSECCTL_STATS 1 /* stats */ #define IPSECCTL_DEF_POLICY 2 #define IPSECCTL_DEF_ESP_TRANSLEV 3 /* int; ESP transport mode */ #define IPSECCTL_DEF_ESP_NETLEV 4 /* int; ESP tunnel mode */ #define IPSECCTL_DEF_AH_TRANSLEV 5 /* int; AH transport mode */ #define IPSECCTL_DEF_AH_NETLEV 6 /* int; AH tunnel mode */ #if 0 /* obsolete, do not reuse */ #define IPSECCTL_INBOUND_CALL_IKE 7 #endif #define IPSECCTL_AH_CLEARTOS 8 #define IPSECCTL_AH_OFFSETMASK 9 #define IPSECCTL_DFBIT 10 #define IPSECCTL_ECN 11 #define IPSECCTL_DEBUG 12 #define IPSECCTL_ESP_RANDPAD 13 #define IPSECCTL_MIN_PMTU 14 +#define IPSECCTL_RANDOM_ID 15 #ifdef _KERNEL #include struct ipsec_ctx_data; #define IPSEC_INIT_CTX(_ctx, _mp, _inp, _sav, _af, _enc) do { \ (_ctx)->mp = (_mp); \ (_ctx)->inp = (_inp); \ (_ctx)->sav = (_sav); \ (_ctx)->af = (_af); \ (_ctx)->enc = (_enc); \ } while(0) int ipsec_run_hhooks(struct ipsec_ctx_data *ctx, int direction); VNET_DECLARE(int, ipsec_debug); #define V_ipsec_debug VNET(ipsec_debug) #ifdef REGRESSION VNET_DECLARE(int, ipsec_replay); VNET_DECLARE(int, ipsec_integrity); #define V_ipsec_replay VNET(ipsec_replay) #define V_ipsec_integrity VNET(ipsec_integrity) #endif VNET_PCPUSTAT_DECLARE(struct ipsecstat, ipsec4stat); VNET_DECLARE(int, ip4_esp_trans_deflev); VNET_DECLARE(int, ip4_esp_net_deflev); VNET_DECLARE(int, ip4_ah_trans_deflev); VNET_DECLARE(int, ip4_ah_net_deflev); VNET_DECLARE(int, ip4_ipsec_dfbit); VNET_DECLARE(int, ip4_ipsec_min_pmtu); VNET_DECLARE(int, ip4_ipsec_ecn); +VNET_DECLARE(int, ip4_ipsec_random_id); VNET_DECLARE(int, crypto_support); VNET_DECLARE(int, async_crypto); VNET_DECLARE(int, natt_cksum_policy); #define IPSECSTAT_INC(name) \ do { \ MIB_SDT_PROBE1(ipsec, count, name, 1); \ VNET_PCPUSTAT_ADD(struct ipsecstat, ipsec4stat, name, 1); \ } while (0) #define V_ip4_esp_trans_deflev VNET(ip4_esp_trans_deflev) #define V_ip4_esp_net_deflev VNET(ip4_esp_net_deflev) #define V_ip4_ah_trans_deflev VNET(ip4_ah_trans_deflev) #define V_ip4_ah_net_deflev VNET(ip4_ah_net_deflev) #define V_ip4_ipsec_dfbit VNET(ip4_ipsec_dfbit) #define V_ip4_ipsec_min_pmtu VNET(ip4_ipsec_min_pmtu) #define V_ip4_ipsec_ecn VNET(ip4_ipsec_ecn) +#define V_ip4_ipsec_random_id VNET(ip4_ipsec_random_id) #define V_crypto_support VNET(crypto_support) #define V_async_crypto VNET(async_crypto) #define V_natt_cksum_policy VNET(natt_cksum_policy) #define ipseclog(x) do { if (V_ipsec_debug) log x; } while (0) /* for openbsd compatibility */ #ifdef IPSEC_DEBUG #define IPSEC_DEBUG_DECLARE(x) x #define DPRINTF(x) do { if (V_ipsec_debug) printf x; } while (0) #else #define IPSEC_DEBUG_DECLARE(x) #define DPRINTF(x) #endif struct inpcb; struct ip; struct m_tag; struct secasvar; struct sockopt; struct tcphdr; union sockaddr_union; int ipsec_if_input(struct mbuf *, struct secasvar *, uint32_t); struct ipsecrequest *ipsec_newisr(void); void ipsec_delisr(struct ipsecrequest *); struct secpolicy *ipsec4_checkpolicy(const struct mbuf *, struct inpcb *, struct ip *, int *, int); u_int ipsec_get_reqlevel(struct secpolicy *, u_int); void udp_ipsec_adjust_cksum(struct mbuf *, struct secasvar *, int, int); int udp_ipsec_output(struct mbuf *, struct secasvar *); int ipsec_chkreplay(uint32_t, uint32_t *, struct secasvar *); int ipsec_updatereplay(uint32_t, struct secasvar *); int ipsec_updateid(struct secasvar *, crypto_session_t *, crypto_session_t *); int ipsec_initialized(void); size_t ipsec_hdrsiz_internal(struct secpolicy *); void ipsec_setspidx_inpcb(struct inpcb *, struct secpolicyindex *, u_int); void ipsec4_setsockaddrs(const struct mbuf *, const struct ip *, union sockaddr_union *, union sockaddr_union *); int ipsec4_common_input_cb(struct mbuf *, struct secasvar *, int, int); int ipsec4_check_pmtu(struct ifnet *, struct mbuf *, struct ip *ip1, struct secpolicy *, int); int ipsec4_process_packet(struct ifnet *, struct mbuf *, struct ip *ip1, struct secpolicy *, struct inpcb *, u_long); int ipsec_process_done(struct mbuf *, struct secpolicy *, struct secasvar *, u_int); void m_checkalignment(const char* where, struct mbuf *m0, int off, int len); struct mbuf *m_makespace(struct mbuf *m0, int skip, int hlen, int *off); caddr_t m_pad(struct mbuf *m, int n); int m_striphdr(struct mbuf *m, int skip, int hlen); SYSCTL_DECL(_net_inet_ipsec); SYSCTL_DECL(_net_inet6_ipsec6); #endif /* _KERNEL */ #ifndef _KERNEL caddr_t ipsec_set_policy(const char *, int); int ipsec_get_policylen(c_caddr_t); char *ipsec_dump_policy(c_caddr_t, const char *); const char *ipsec_strerror(void); #endif /* ! KERNEL */ #endif /* _NETIPSEC_IPSEC_H_ */ diff --git a/sys/netipsec/ipsec_output.c b/sys/netipsec/ipsec_output.c index 8d8a304e7af4..b394ff81d9c6 100644 --- a/sys/netipsec/ipsec_output.c +++ b/sys/netipsec/ipsec_output.c @@ -1,1239 +1,1239 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting * Copyright (c) 2016 Andrey V. Elsukov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * IPsec output processing. */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #endif #include #include #include #ifdef INET6 #include #include #endif #include #ifdef INET6 #include #endif #if defined(SCTP) || defined(SCTP_SUPPORT) #include #endif #include #include #include #include #ifdef INET6 #include #endif #include #include #include #include #include #include #include #include #include #include #define IPSEC_OSTAT_INC(proto, name) do { \ if ((proto) == IPPROTO_ESP) \ ESPSTAT_INC(esps_##name); \ else if ((proto) == IPPROTO_AH)\ AHSTAT_INC(ahs_##name); \ else \ IPCOMPSTAT_INC(ipcomps_##name); \ } while (0) static int ipsec_encap(struct mbuf **mp, struct secasindex *saidx); static size_t ipsec_get_pmtu(struct secasvar *sav); #ifdef INET static struct secasvar * ipsec4_allocsa(struct ifnet *ifp, struct mbuf *m, const struct ip *ip, struct secpolicy *sp, u_int *pidx, int *error) { struct secasindex *saidx, tmpsaidx; struct ipsecrequest *isr; struct sockaddr_in *sin; struct secasvar *sav; /* * Check system global policy controls. */ next: isr = sp->req[*pidx]; if ((isr->saidx.proto == IPPROTO_ESP && !V_esp_enable) || (isr->saidx.proto == IPPROTO_AH && !V_ah_enable) || (isr->saidx.proto == IPPROTO_IPCOMP && !V_ipcomp_enable)) { DPRINTF(("%s: IPsec outbound packet dropped due" " to policy (check your sysctls)\n", __func__)); IPSEC_OSTAT_INC(isr->saidx.proto, pdrops); *error = EHOSTUNREACH; return (NULL); } /* * Craft SA index to search for proper SA. Note that * we only initialize unspecified SA peers for transport * mode; for tunnel mode they must already be filled in. */ if (isr->saidx.mode == IPSEC_MODE_TRANSPORT) { saidx = &tmpsaidx; *saidx = isr->saidx; if (saidx->src.sa.sa_len == 0) { sin = &saidx->src.sin; sin->sin_len = sizeof(*sin); sin->sin_family = AF_INET; sin->sin_port = IPSEC_PORT_ANY; sin->sin_addr = ip->ip_src; } if (saidx->dst.sa.sa_len == 0) { sin = &saidx->dst.sin; sin->sin_len = sizeof(*sin); sin->sin_family = AF_INET; sin->sin_port = IPSEC_PORT_ANY; sin->sin_addr = ip->ip_dst; } } else saidx = &sp->req[*pidx]->saidx; /* * Lookup SA and validate it. */ sav = key_allocsa_policy(sp, saidx, error); if (sav == NULL) { IPSECSTAT_INC(ips_out_nosa); if (*error != 0) return (NULL); if (ipsec_get_reqlevel(sp, *pidx) != IPSEC_LEVEL_REQUIRE) { /* * We have no SA and policy that doesn't require * this IPsec transform, thus we can continue w/o * IPsec processing, i.e. return EJUSTRETURN. * But first check if there is some bundled transform. */ if (sp->tcount > ++(*pidx)) goto next; *error = EJUSTRETURN; } return (NULL); } IPSEC_ASSERT(sav->tdb_xform != NULL, ("SA with NULL tdb_xform")); return (sav); } /* * IPsec output logic for IPv4. */ static int ipsec4_perform_request(struct ifnet *ifp, struct mbuf *m, struct ip *ip1, struct secpolicy *sp, struct inpcb *inp, u_int idx, u_long mtu) { struct ipsec_ctx_data ctx; union sockaddr_union *dst; struct secasvar *sav; struct ip *ip; struct mbuf *m1; int error, hwassist, i, off; bool accel; IPSEC_ASSERT(idx < sp->tcount, ("Wrong IPsec request index %d", idx)); /* * We hold the reference to SP. Content of SP couldn't be changed. * Craft secasindex and do lookup for suitable SA. * Then do encapsulation if needed and call xform's output. * We need to store SP in the xform callback parameters. * In xform callback we will extract SP and it can be used to * determine next transform. At the end of transform we can * release reference to SP. */ sav = ipsec4_allocsa(ifp, m, ip1, sp, &idx, &error); if (sav == NULL) { if (error == EJUSTRETURN) { /* No IPsec required */ (void)ipsec_accel_output(ifp, m, inp, sp, NULL, AF_INET, mtu, &hwassist); key_freesp(&sp); return (error); } goto bad; } /* * XXXAE: most likely ip_sum at this point is wrong. */ IPSEC_INIT_CTX(&ctx, &m, inp, sav, AF_INET, IPSEC_ENC_BEFORE); if ((error = ipsec_run_hhooks(&ctx, HHOOK_TYPE_IPSEC_OUT)) != 0) goto bad; /* Re-calculate *ip1 after potential change of m in the hook. */ m_copydata(m, 0, sizeof(*ip1), (char *)ip1); hwassist = 0; accel = ipsec_accel_output(ifp, m, inp, sp, sav, AF_INET, mtu, &hwassist); /* * Do delayed checksums now because we send before * this is done in the normal processing path. */ if ((m->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~hwassist) != 0) { in_delayed_cksum(m); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } #if defined(SCTP) || defined(SCTP_SUPPORT) if ((m->m_pkthdr.csum_flags & CSUM_SCTP & ~hwassist) != 0) { sctp_delayed_cksum(m, (uint32_t)(ip1->ip_hl << 2)); m->m_pkthdr.csum_flags &= ~CSUM_SCTP; } #endif if (accel) return (EJUSTRETURN); error = mb_unmapped_to_ext(m, &m1); if (error != 0) { if (error == EINVAL) { if (bootverbose) if_printf(ifp, "Tx TLS+IPSEC packet\n"); } return (error); } m = m1; ip = mtod(m, struct ip *); dst = &sav->sah->saidx.dst; /* Do the appropriate encapsulation, if necessary */ if (sp->req[idx]->saidx.mode == IPSEC_MODE_TUNNEL || /* Tunnel requ'd */ dst->sa.sa_family != AF_INET || /* PF mismatch */ (dst->sa.sa_family == AF_INET && /* Proxy */ dst->sin.sin_addr.s_addr != INADDR_ANY && dst->sin.sin_addr.s_addr != ip->ip_dst.s_addr)) { /* Fix IPv4 header checksum and length */ ip->ip_len = htons(m->m_pkthdr.len); ip->ip_sum = 0; ip->ip_sum = in_cksum(m, ip->ip_hl << 2); error = ipsec_encap(&m, &sav->sah->saidx); if (error != 0) { DPRINTF(("%s: encapsulation for SPI 0x%08x failed " "with error %d\n", __func__, ntohl(sav->spi), error)); /* XXXAE: IPSEC_OSTAT_INC(tunnel); */ goto bad; } inp = NULL; } IPSEC_INIT_CTX(&ctx, &m, inp, sav, dst->sa.sa_family, IPSEC_ENC_AFTER); if ((error = ipsec_run_hhooks(&ctx, HHOOK_TYPE_IPSEC_OUT)) != 0) goto bad; /* * Dispatch to the appropriate IPsec transform logic. The * packet will be returned for transmission after crypto * processing, etc. are completed. * * NB: m & sav are ``passed to caller'' who's responsible for * reclaiming their resources. */ switch(dst->sa.sa_family) { case AF_INET: ip = mtod(m, struct ip *); i = ip->ip_hl << 2; off = offsetof(struct ip, ip_p); break; #ifdef INET6 case AF_INET6: i = sizeof(struct ip6_hdr); off = offsetof(struct ip6_hdr, ip6_nxt); break; #endif /* INET6 */ default: DPRINTF(("%s: unsupported protocol family %u\n", __func__, dst->sa.sa_family)); error = EPFNOSUPPORT; IPSEC_OSTAT_INC(sav->sah->saidx.proto, nopf); goto bad; } error = (*sav->tdb_xform->xf_output)(m, sp, sav, idx, i, off); return (error); bad: IPSECSTAT_INC(ips_out_inval); if (m != NULL) m_freem(m); if (sav != NULL) key_freesav(&sav); key_freesp(&sp); return (error); } int ipsec4_process_packet(struct ifnet *ifp, struct mbuf *m, struct ip *ip1, struct secpolicy *sp, struct inpcb *inp, u_long mtu) { return (ipsec4_perform_request(ifp, m, ip1, sp, inp, 0, mtu)); } int ipsec4_check_pmtu(struct ifnet *ifp, struct mbuf *m, struct ip *ip1, struct secpolicy *sp, int forwarding) { struct secasvar *sav; size_t hlen, pmtu; uint32_t idx; int error; /* Don't check PMTU if the frame won't have DF bit set. */ if (!V_ip4_ipsec_dfbit) return (0); if (V_ip4_ipsec_dfbit == 1) goto setdf; /* V_ip4_ipsec_dfbit > 1 - we will copy it from inner header. */ if ((ip1->ip_off & htons(IP_DF)) == 0) return (0); setdf: idx = sp->tcount - 1; sav = ipsec4_allocsa(ifp, m, ip1, sp, &idx, &error); if (sav == NULL) { key_freesp(&sp); /* * No matching SA was found and SADB_ACQUIRE message was generated. * Since we have matched a SP to this packet drop it silently. */ if (error == 0) error = EINPROGRESS; if (error != EJUSTRETURN) m_freem(m); return (error); } pmtu = ipsec_get_pmtu(sav); if (pmtu == 0) { key_freesav(&sav); return (0); } hlen = ipsec_hdrsiz_internal(sp); key_freesav(&sav); if (m_length(m, NULL) + hlen > pmtu) { /* * If we're forwarding generate ICMP message here, * so that it contains pmtu subtracted by header size. * Set error to EINPROGRESS, in order for the frame * to be dropped silently. */ if (forwarding) { if (pmtu > hlen) icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, pmtu - hlen); else m_freem(m); key_freesp(&sp); return (EINPROGRESS); /* Pretend that we consumed it. */ } else { m_freem(m); key_freesp(&sp); return (EMSGSIZE); } } return (0); } static int ipsec4_common_output1(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp, struct ip *ip1, int forwarding, u_long mtu) { struct secpolicy *sp; int error; /* Lookup for the corresponding outbound security policy */ sp = ipsec4_checkpolicy(m, inp, ip1, &error, !forwarding); if (sp == NULL) { if (error == -EINVAL) { /* Discarded by policy. */ m_freem(m); return (EACCES); } return (0); /* No IPsec required. */ } /* * Usually we have to have tunnel mode IPsec security policy * when we are forwarding a packet. Otherwise we could not handle * encrypted replies, because they are not destined for us. But * some users are doing source address translation for forwarded * packets, and thus, even if they are forwarded, the replies will * return back to us. */ /* NB: callee frees mbuf and releases reference to SP */ error = ipsec4_check_pmtu(ifp, m, ip1, sp, forwarding); if (error != 0) { if (error == EJUSTRETURN) return (0); return (error); } error = ipsec4_process_packet(ifp, m, ip1, sp, inp, mtu); if (error == EJUSTRETURN) { /* * We had a SP with a level of 'use' and no SA. We * will just continue to process the packet without * IPsec processing and return without error. */ return (0); } if (error == 0) return (EINPROGRESS); /* consumed by IPsec */ return (error); } static int ipsec4_common_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp, struct ip *ip1, int forwarding, u_long mtu) { struct ip ip_hdr; struct ip *ip; if (((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.len < sizeof(*ip)) || ((m->m_flags & M_PKTHDR) == 0 && m->m_len < sizeof(*ip))) { m_free(m); return (EACCES); } if (ip1 != NULL) { ip = ip1; } else { ip = &ip_hdr; m_copydata(m, 0, sizeof(*ip), (char *)ip); } return (ipsec4_common_output1(ifp, m, inp, ip, forwarding, mtu)); } /* * IPSEC_OUTPUT() method implementation for IPv4. * 0 - no IPsec handling needed * other values - mbuf consumed by IPsec. */ int ipsec4_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp, u_long mtu) { /* * If the packet is resubmitted to ip_output (e.g. after * AH, ESP, etc. processing), there will be a tag to bypass * the lookup and related policy checking. */ if (m_tag_find(m, PACKET_TAG_IPSEC_OUT_DONE, NULL) != NULL) return (0); return (ipsec4_common_output(ifp, m, inp, NULL, 0, mtu)); } /* * IPSEC_FORWARD() method implementation for IPv4. * 0 - no IPsec handling needed * other values - mbuf consumed by IPsec. */ int ipsec4_forward(struct mbuf *m) { struct ip ip_hdr; m_copydata(m, 0, sizeof(ip_hdr), (char *)&ip_hdr); /* * Check if this packet has an active inbound SP and needs to be * dropped instead of forwarded. */ if (ipsec4_in_reject1(m, &ip_hdr, NULL) != 0) { m_freem(m); return (EACCES); } return (ipsec4_common_output(NULL /* XXXKIB */, m, NULL, &ip_hdr, 1, 0)); } #endif #ifdef INET6 static int in6_sa_equal_addrwithscope(const struct sockaddr_in6 *sa, const struct in6_addr *ia) { struct in6_addr ia2; if (IN6_IS_SCOPE_LINKLOCAL(&sa->sin6_addr)) { memcpy(&ia2, &sa->sin6_addr, sizeof(ia2)); ia2.s6_addr16[1] = htons(sa->sin6_scope_id); return (IN6_ARE_ADDR_EQUAL(ia, &ia2)); } return (IN6_ARE_ADDR_EQUAL(&sa->sin6_addr, ia)); } static struct secasvar * ipsec6_allocsa(struct ifnet *ifp, struct mbuf *m, struct secpolicy *sp, u_int *pidx, int *error) { struct secasindex *saidx, tmpsaidx; struct ipsecrequest *isr; struct sockaddr_in6 *sin6; struct secasvar *sav; struct ip6_hdr *ip6; /* * Check system global policy controls. */ next: isr = sp->req[*pidx]; if ((isr->saidx.proto == IPPROTO_ESP && !V_esp_enable) || (isr->saidx.proto == IPPROTO_AH && !V_ah_enable) || (isr->saidx.proto == IPPROTO_IPCOMP && !V_ipcomp_enable)) { DPRINTF(("%s: IPsec outbound packet dropped due" " to policy (check your sysctls)\n", __func__)); IPSEC_OSTAT_INC(isr->saidx.proto, pdrops); *error = EHOSTUNREACH; return (NULL); } /* * Craft SA index to search for proper SA. Note that * we only fillin unspecified SA peers for transport * mode; for tunnel mode they must already be filled in. */ if (isr->saidx.mode == IPSEC_MODE_TRANSPORT) { saidx = &tmpsaidx; *saidx = isr->saidx; ip6 = mtod(m, struct ip6_hdr *); if (saidx->src.sin6.sin6_len == 0) { sin6 = (struct sockaddr_in6 *)&saidx->src; sin6->sin6_len = sizeof(*sin6); sin6->sin6_family = AF_INET6; sin6->sin6_port = IPSEC_PORT_ANY; sin6->sin6_addr = ip6->ip6_src; if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { /* fix scope id for comparing SPD */ sin6->sin6_addr.s6_addr16[1] = 0; sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]); } } if (saidx->dst.sin6.sin6_len == 0) { sin6 = (struct sockaddr_in6 *)&saidx->dst; sin6->sin6_len = sizeof(*sin6); sin6->sin6_family = AF_INET6; sin6->sin6_port = IPSEC_PORT_ANY; sin6->sin6_addr = ip6->ip6_dst; if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) { /* fix scope id for comparing SPD */ sin6->sin6_addr.s6_addr16[1] = 0; sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]); } } } else saidx = &sp->req[*pidx]->saidx; /* * Lookup SA and validate it. */ sav = key_allocsa_policy(sp, saidx, error); if (sav == NULL) { IPSEC6STAT_INC(ips_out_nosa); if (*error != 0) return (NULL); if (ipsec_get_reqlevel(sp, *pidx) != IPSEC_LEVEL_REQUIRE) { /* * We have no SA and policy that doesn't require * this IPsec transform, thus we can continue w/o * IPsec processing, i.e. return EJUSTRETURN. * But first check if there is some bundled transform. */ if (sp->tcount > ++(*pidx)) goto next; *error = EJUSTRETURN; } return (NULL); } IPSEC_ASSERT(sav->tdb_xform != NULL, ("SA with NULL tdb_xform")); return (sav); } /* * IPsec output logic for IPv6. */ static int ipsec6_perform_request(struct ifnet *ifp, struct mbuf *m, struct secpolicy *sp, struct inpcb *inp, u_int idx, u_long mtu) { struct ipsec_ctx_data ctx; union sockaddr_union *dst; struct secasvar *sav; struct ip6_hdr *ip6; int error, hwassist, i, off; bool accel; IPSEC_ASSERT(idx < sp->tcount, ("Wrong IPsec request index %d", idx)); sav = ipsec6_allocsa(ifp, m, sp, &idx, &error); if (sav == NULL) { if (error == EJUSTRETURN) { /* No IPsec required */ (void)ipsec_accel_output(ifp, m, inp, sp, NULL, AF_INET6, mtu, &hwassist); key_freesp(&sp); return (error); } goto bad; } /* Fix IP length in case if it is not set yet. */ ip6 = mtod(m, struct ip6_hdr *); ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); IPSEC_INIT_CTX(&ctx, &m, inp, sav, AF_INET6, IPSEC_ENC_BEFORE); if ((error = ipsec_run_hhooks(&ctx, HHOOK_TYPE_IPSEC_OUT)) != 0) goto bad; hwassist = 0; accel = ipsec_accel_output(ifp, m, inp, sp, sav, AF_INET6, mtu, &hwassist); /* * Do delayed checksums now because we send before * this is done in the normal processing path. */ if ((m->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 & ~hwassist) != 0) { in6_delayed_cksum(m, m->m_pkthdr.len - sizeof(struct ip6_hdr), sizeof(struct ip6_hdr)); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6; } #if defined(SCTP) || defined(SCTP_SUPPORT) if ((m->m_pkthdr.csum_flags & CSUM_SCTP_IPV6 & ~hwassist) != 0) { sctp_delayed_cksum(m, sizeof(struct ip6_hdr)); m->m_pkthdr.csum_flags &= ~CSUM_SCTP_IPV6; } #endif if (accel) return (EJUSTRETURN); ip6 = mtod(m, struct ip6_hdr *); /* pfil can change mbuf */ dst = &sav->sah->saidx.dst; /* Do the appropriate encapsulation, if necessary */ if (sp->req[idx]->saidx.mode == IPSEC_MODE_TUNNEL || /* Tunnel requ'd */ dst->sa.sa_family != AF_INET6 || /* PF mismatch */ ((dst->sa.sa_family == AF_INET6) && (!IN6_IS_ADDR_UNSPECIFIED(&dst->sin6.sin6_addr)) && (!in6_sa_equal_addrwithscope(&dst->sin6, &ip6->ip6_dst)))) { if (m->m_pkthdr.len - sizeof(*ip6) > IPV6_MAXPACKET) { /* No jumbogram support. */ error = ENXIO; /*XXX*/ goto bad; } error = ipsec_encap(&m, &sav->sah->saidx); if (error != 0) { DPRINTF(("%s: encapsulation for SPI 0x%08x failed " "with error %d\n", __func__, ntohl(sav->spi), error)); /* XXXAE: IPSEC_OSTAT_INC(tunnel); */ goto bad; } inp = NULL; } IPSEC_INIT_CTX(&ctx, &m, inp, sav, dst->sa.sa_family, IPSEC_ENC_AFTER); if ((error = ipsec_run_hhooks(&ctx, HHOOK_TYPE_IPSEC_OUT)) != 0) goto bad; switch(dst->sa.sa_family) { #ifdef INET case AF_INET: { struct ip *ip; ip = mtod(m, struct ip *); i = ip->ip_hl << 2; off = offsetof(struct ip, ip_p); } break; #endif /* AF_INET */ case AF_INET6: i = sizeof(struct ip6_hdr); off = offsetof(struct ip6_hdr, ip6_nxt); break; default: DPRINTF(("%s: unsupported protocol family %u\n", __func__, dst->sa.sa_family)); error = EPFNOSUPPORT; IPSEC_OSTAT_INC(sav->sah->saidx.proto, nopf); goto bad; } error = (*sav->tdb_xform->xf_output)(m, sp, sav, idx, i, off); return (error); bad: IPSEC6STAT_INC(ips_out_inval); if (m != NULL) m_freem(m); if (sav != NULL) key_freesav(&sav); key_freesp(&sp); return (error); } int ipsec6_process_packet(struct ifnet *ifp, struct mbuf *m, struct secpolicy *sp, struct inpcb *inp, u_long mtu) { return (ipsec6_perform_request(ifp, m, sp, inp, 0, mtu)); } /* * IPv6 implementation is based on IPv4 implementation. */ int ipsec6_check_pmtu(struct ifnet *ifp, struct mbuf *m, struct secpolicy *sp, int forwarding) { struct secasvar *sav; size_t hlen, pmtu; uint32_t idx; int error; /* * According to RFC8200 L3 fragmentation is supposed to be done only on * locally generated packets. During L3 forwarding packets that are too * big are always supposed to be dropped, with an ICMPv6 packet being * sent back. */ if (!forwarding) return (0); idx = sp->tcount - 1; sav = ipsec6_allocsa(ifp, m, sp, &idx, &error); if (sav == NULL) { key_freesp(&sp); /* * No matching SA was found and SADB_ACQUIRE message was generated. * Since we have matched a SP to this packet drop it silently. */ if (error == 0) error = EINPROGRESS; if (error != EJUSTRETURN) m_freem(m); return (error); } pmtu = ipsec_get_pmtu(sav); if (pmtu == 0) { key_freesav(&sav); return (0); } hlen = ipsec_hdrsiz_internal(sp); key_freesav(&sav); if (m_length(m, NULL) + hlen > pmtu) { /* * If we're forwarding generate ICMPv6 message here, * so that it contains pmtu subtracted by header size. * Set error to EINPROGRESS, in order for the frame * to be dropped silently. */ if (forwarding) { if (pmtu > hlen) icmp6_error(m, ICMP6_PACKET_TOO_BIG, 0, pmtu - hlen); else m_freem(m); key_freesp(&sp); return (EINPROGRESS); /* Pretend that we consumed it. */ } } return (0); } static int ipsec6_common_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp, int forwarding, u_long mtu) { struct secpolicy *sp; int error; /* Lookup for the corresponding outbound security policy */ sp = ipsec6_checkpolicy(m, inp, &error, !forwarding); if (sp == NULL) { if (error == -EINVAL) { /* Discarded by policy. */ m_freem(m); return (EACCES); } return (0); /* No IPsec required. */ } error = ipsec6_check_pmtu(ifp, m, sp, forwarding); if (error != 0) { if (error == EJUSTRETURN) return (0); return (error); } /* NB: callee frees mbuf and releases reference to SP */ error = ipsec6_process_packet(ifp, m, sp, inp, mtu); if (error == EJUSTRETURN) { /* * We had a SP with a level of 'use' and no SA. We * will just continue to process the packet without * IPsec processing and return without error. */ return (0); } if (error == 0) return (EINPROGRESS); /* consumed by IPsec */ return (error); } /* * IPSEC_OUTPUT() method implementation for IPv6. * 0 - no IPsec handling needed * other values - mbuf consumed by IPsec. */ int ipsec6_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp, u_long mtu) { /* * If the packet is resubmitted to ip_output (e.g. after * AH, ESP, etc. processing), there will be a tag to bypass * the lookup and related policy checking. */ if (m_tag_find(m, PACKET_TAG_IPSEC_OUT_DONE, NULL) != NULL) return (0); return (ipsec6_common_output(ifp, m, inp, 0, mtu)); } /* * IPSEC_FORWARD() method implementation for IPv6. * 0 - no IPsec handling needed * other values - mbuf consumed by IPsec. */ int ipsec6_forward(struct mbuf *m) { /* * Check if this packet has an active inbound SP and needs to be * dropped instead of forwarded. */ if (ipsec6_in_reject(m, NULL) != 0) { m_freem(m); return (EACCES); } return (ipsec6_common_output(NULL /* XXXKIB */, m, NULL, 1, 0)); } #endif /* INET6 */ int ipsec_process_done(struct mbuf *m, struct secpolicy *sp, struct secasvar *sav, u_int idx) { struct epoch_tracker et; struct xform_history *xh; struct secasindex *saidx; struct m_tag *mtag; #ifdef INET struct ip *ip; #endif int error; if (sav->state >= SADB_SASTATE_DEAD) { error = ESRCH; goto bad; } saidx = &sav->sah->saidx; switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: ip = mtod(m, struct ip *); /* Fix the header length, for AH processing. */ ip->ip_len = htons(m->m_pkthdr.len); break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Fix the header length, for AH processing. */ if (m->m_pkthdr.len < sizeof (struct ip6_hdr)) { error = ENXIO; goto bad; } if (m->m_pkthdr.len - sizeof (struct ip6_hdr) > IPV6_MAXPACKET) { /* No jumbogram support. */ error = ENXIO; /*?*/ goto bad; } mtod(m, struct ip6_hdr *)->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr)); break; #endif /* INET6 */ default: DPRINTF(("%s: unknown protocol family %u\n", __func__, saidx->dst.sa.sa_family)); error = ENXIO; goto bad; } /* * Add a record of what we've done to the packet. */ mtag = m_tag_get(PACKET_TAG_IPSEC_OUT_DONE, sizeof(*xh), M_NOWAIT); if (mtag == NULL) { DPRINTF(("%s: could not get packet tag\n", __func__)); error = ENOMEM; goto bad; } xh = (struct xform_history *)(mtag + 1); xh->dst = saidx->dst; xh->proto = saidx->proto; xh->mode = saidx->mode; xh->spi = sav->spi; m_tag_prepend(m, mtag); key_sa_recordxfer(sav, m); /* record data transfer */ /* * If there's another (bundled) SA to apply, do so. * Note that this puts a burden on the kernel stack size. * If this is a problem we'll need to introduce a queue * to set the packet on so we can unwind the stack before * doing further processing. */ if (++idx < sp->tcount) { switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: key_freesav(&sav); IPSECSTAT_INC(ips_out_bundlesa); return (ipsec4_perform_request(NULL, m, ip, sp, NULL, idx, 0)); /* NOTREACHED */ #endif #ifdef INET6 case AF_INET6: key_freesav(&sav); IPSEC6STAT_INC(ips_out_bundlesa); return (ipsec6_perform_request(NULL, m, sp, NULL, idx, 0)); /* NOTREACHED */ #endif /* INET6 */ default: DPRINTF(("%s: unknown protocol family %u\n", __func__, saidx->dst.sa.sa_family)); error = EPFNOSUPPORT; goto bad; } } key_freesp(&sp), sp = NULL; /* Release reference to SP */ #if defined(INET) || defined(INET6) /* * Do UDP encapsulation if SA requires it. */ if (sav->natt != NULL) { error = udp_ipsec_output(m, sav); if (error != 0) goto bad; } #endif /* INET || INET6 */ /* * We're done with IPsec processing, transmit the packet using the * appropriate network protocol (IP or IPv6). */ NET_EPOCH_ENTER(et); switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: key_freesav(&sav); error = ip_output(m, NULL, NULL, IP_RAWOUTPUT, NULL, NULL); break; #endif /* INET */ #ifdef INET6 case AF_INET6: key_freesav(&sav); error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); break; #endif /* INET6 */ default: panic("ipsec_process_done"); } NET_EPOCH_EXIT(et); return (error); bad: m_freem(m); key_freesav(&sav); if (sp != NULL) key_freesp(&sp); return (error); } /* * ipsec_prepend() is optimized version of M_PREPEND(). * ipsec_encap() is called by IPsec output routine for tunnel mode SA. * It is expected that after IP encapsulation some IPsec transform will * be performed. Each IPsec transform inserts its variable length header * just after outer IP header using m_makespace(). If given mbuf has not * enough free space at the beginning, we allocate new mbuf and reserve * some space at the beginning and at the end. * This helps avoid allocating of new mbuf and data copying in m_makespace(), * we place outer header in the middle of mbuf's data with reserved leading * and trailing space: * [ LEADINGSPACE ][ Outer IP header ][ TRAILINGSPACE ] * LEADINGSPACE will be used to add ethernet header, TRAILINGSPACE will * be used to inject AH/ESP/IPCOMP header. */ #define IPSEC_TRAILINGSPACE (sizeof(struct udphdr) +/* NAT-T */ \ max(sizeof(struct newesp) + EALG_MAX_BLOCK_LEN, /* ESP + IV */ \ sizeof(struct newah) + HASH_MAX_LEN /* AH + ICV */)) static struct mbuf * ipsec_prepend(struct mbuf *m, int len, int how) { struct mbuf *n; M_ASSERTPKTHDR(m); IPSEC_ASSERT(len < MHLEN, ("wrong length")); if (M_LEADINGSPACE(m) >= len) { /* No need to allocate new mbuf. */ m->m_data -= len; m->m_len += len; m->m_pkthdr.len += len; return (m); } n = m_gethdr(how, m->m_type); if (n == NULL) { m_freem(m); return (NULL); } m_move_pkthdr(n, m); n->m_next = m; if (len + IPSEC_TRAILINGSPACE < M_SIZE(n)) m_align(n, len + IPSEC_TRAILINGSPACE); n->m_len = len; n->m_pkthdr.len += len; return (n); } static size_t ipsec_get_pmtu(struct secasvar *sav) { union sockaddr_union *dst; struct in_conninfo inc; size_t pmtu; dst = &sav->sah->saidx.dst; memset(&inc, 0, sizeof(inc)); switch (dst->sa.sa_family) { #ifdef INET case AF_INET: inc.inc_faddr = satosin(&dst->sa)->sin_addr; break; #endif #ifdef INET6 case AF_INET6: inc.inc6_faddr = satosin6(&dst->sa)->sin6_addr; inc.inc_flags |= INC_ISIPV6; break; #endif default: return (0); } pmtu = tcp_hc_getmtu(&inc); if (pmtu != 0) return (pmtu); /* No entry in hostcache. Assume that PMTU is equal to link's MTU */ switch (dst->sa.sa_family) { #ifdef INET case AF_INET: pmtu = tcp_maxmtu(&inc, NULL); break; #endif #ifdef INET6 case AF_INET6: pmtu = tcp_maxmtu6(&inc, NULL); break; #endif default: return (0); } if (pmtu == 0) return (0); tcp_hc_updatemtu(&inc, pmtu); return (pmtu); } static int ipsec_encap(struct mbuf **mp, struct secasindex *saidx) { #ifdef INET6 struct ip6_hdr *ip6; #endif struct ip *ip; #ifdef INET int setdf = V_ip4_ipsec_dfbit == 1 ? 1: 0; #endif uint8_t itos, proto; ip = mtod(*mp, struct ip *); switch (ip->ip_v) { #ifdef INET case IPVERSION: proto = IPPROTO_IPIP; /* * Copy IP_DF flag from the inner header if * system-wide control variable is greater than 1. */ if (V_ip4_ipsec_dfbit > 1) setdf = (ip->ip_off & htons(IP_DF)) != 0; itos = ip->ip_tos; break; #endif #ifdef INET6 case (IPV6_VERSION >> 4): proto = IPPROTO_IPV6; ip6 = mtod(*mp, struct ip6_hdr *); itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; /* scoped address handling */ in6_clearscope(&ip6->ip6_src); in6_clearscope(&ip6->ip6_dst); break; #endif default: return (EAFNOSUPPORT); } switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: if (saidx->src.sa.sa_family != AF_INET || saidx->src.sin.sin_addr.s_addr == INADDR_ANY || saidx->dst.sin.sin_addr.s_addr == INADDR_ANY) return (EINVAL); *mp = ipsec_prepend(*mp, sizeof(struct ip), M_NOWAIT); if (*mp == NULL) return (ENOBUFS); ip = mtod(*mp, struct ip *); ip->ip_v = IPVERSION; ip->ip_hl = sizeof(struct ip) >> 2; ip->ip_p = proto; ip->ip_len = htons((*mp)->m_pkthdr.len); ip->ip_ttl = V_ip_defttl; ip->ip_sum = 0; ip->ip_off = setdf ? htons(IP_DF): 0; ip->ip_src = saidx->src.sin.sin_addr; ip->ip_dst = saidx->dst.sin.sin_addr; ip_ecn_ingress(V_ip4_ipsec_ecn, &ip->ip_tos, &itos); - ip_fillid(ip); + ip_fillid(ip, V_ip4_ipsec_random_id); break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (saidx->src.sa.sa_family != AF_INET6 || IN6_IS_ADDR_UNSPECIFIED(&saidx->src.sin6.sin6_addr) || IN6_IS_ADDR_UNSPECIFIED(&saidx->dst.sin6.sin6_addr)) return (EINVAL); *mp = ipsec_prepend(*mp, sizeof(struct ip6_hdr), M_NOWAIT); if (*mp == NULL) return (ENOBUFS); ip6 = mtod(*mp, struct ip6_hdr *); ip6->ip6_flow = 0; ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_hlim = V_ip6_defhlim; ip6->ip6_nxt = proto; ip6->ip6_dst = saidx->dst.sin6.sin6_addr; /* For link-local address embed scope zone id */ if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) ip6->ip6_dst.s6_addr16[1] = htons(saidx->dst.sin6.sin6_scope_id & 0xffff); ip6->ip6_src = saidx->src.sin6.sin6_addr; if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) ip6->ip6_src.s6_addr16[1] = htons(saidx->src.sin6.sin6_scope_id & 0xffff); ip6->ip6_plen = htons((*mp)->m_pkthdr.len - sizeof(*ip6)); ip_ecn_ingress(V_ip6_ipsec_ecn, &proto, &itos); ip6->ip6_flow |= htonl((uint32_t)proto << 20); break; #endif /* INET6 */ default: return (EAFNOSUPPORT); } (*mp)->m_flags &= ~(M_BCAST | M_MCAST); return (0); } diff --git a/sys/netpfil/ipfilter/netinet/fil.c b/sys/netpfil/ipfilter/netinet/fil.c index c1b49196b712..2a75190a3ec7 100644 --- a/sys/netpfil/ipfilter/netinet/fil.c +++ b/sys/netpfil/ipfilter/netinet/fil.c @@ -1,9954 +1,9954 @@ /* * Copyright (C) 2012 by Darren Reed. * * See the IPFILTER.LICENCE file for details on licencing. * * Copyright 2008 Sun Microsystems. * * $Id$ * */ #if defined(KERNEL) || defined(_KERNEL) # undef KERNEL # undef _KERNEL # define KERNEL 1 # define _KERNEL 1 #endif #include #include #include #include #if defined(_KERNEL) && defined(__FreeBSD__) # if !defined(IPFILTER_LKM) # include "opt_inet6.h" # endif # include #else # include #endif #if defined(__SVR4) || defined(sun) /* SOLARIS */ # include #endif # include #if defined(_KERNEL) # include # include #else # include # include # include # include # include # define _KERNEL # include # undef _KERNEL #endif #if !defined(__SVR4) # include #else # include # if (SOLARIS2 < 5) && defined(sun) # include # endif #endif # include #include #include #ifdef sun # include #endif #include #include #include #include # include # include #include "netinet/ip_compat.h" #ifdef USE_INET6 # include # if !SOLARIS && defined(_KERNEL) # include # endif #endif #include "netinet/ip_fil.h" #include "netinet/ip_nat.h" #include "netinet/ip_frag.h" #include "netinet/ip_state.h" #include "netinet/ip_proxy.h" #include "netinet/ip_auth.h" #ifdef IPFILTER_SCAN # include "netinet/ip_scan.h" #endif #include "netinet/ip_sync.h" #include "netinet/ip_lookup.h" #include "netinet/ip_pool.h" #include "netinet/ip_htable.h" #ifdef IPFILTER_COMPILED # include "netinet/ip_rules.h" #endif #if defined(IPFILTER_BPF) && defined(_KERNEL) # include #endif #if defined(__FreeBSD__) # include #endif #include "netinet/ipl.h" #if defined(__NetBSD__) && (__NetBSD_Version__ >= 104230000) # include extern struct callout ipf_slowtimer_ch; #endif /* END OF INCLUDES */ #ifndef _KERNEL # include "ipf.h" # include "ipt.h" extern int opts; extern int blockreason; #endif /* _KERNEL */ #define FASTROUTE_RECURSION #define LBUMP(x) softc->x++ #define LBUMPD(x, y) do { softc->x.y++; DT(y); } while (0) static inline int ipf_check_ipf(fr_info_t *, frentry_t *, int); static u_32_t ipf_checkcipso(fr_info_t *, u_char *, int); static u_32_t ipf_checkripso(u_char *); static u_32_t ipf_decaps(fr_info_t *, u_32_t, int); #ifdef IPFILTER_LOG static frentry_t *ipf_dolog(fr_info_t *, u_32_t *); #endif static int ipf_flushlist(ipf_main_softc_t *, int *, frentry_t **); static int ipf_flush_groups(ipf_main_softc_t *, frgroup_t **, int); static ipfunc_t ipf_findfunc(ipfunc_t); static void *ipf_findlookup(ipf_main_softc_t *, int, frentry_t *, i6addr_t *, i6addr_t *); static frentry_t *ipf_firewall(fr_info_t *, u_32_t *); static int ipf_fr_matcharray(fr_info_t *, int *); static int ipf_frruleiter(ipf_main_softc_t *, void *, int, void *); static void ipf_funcfini(ipf_main_softc_t *, frentry_t *); static int ipf_funcinit(ipf_main_softc_t *, frentry_t *); static int ipf_geniter(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *); static void ipf_getstat(ipf_main_softc_t *, struct friostat *, int); static int ipf_group_flush(ipf_main_softc_t *, frgroup_t *); static void ipf_group_free(frgroup_t *); static int ipf_grpmapfini(struct ipf_main_softc_s *, frentry_t *); static int ipf_grpmapinit(struct ipf_main_softc_s *, frentry_t *); static frentry_t *ipf_nextrule(ipf_main_softc_t *, int, int, frentry_t *, int); static int ipf_portcheck(frpcmp_t *, u_32_t); static inline int ipf_pr_ah(fr_info_t *); static inline void ipf_pr_esp(fr_info_t *); static inline void ipf_pr_gre(fr_info_t *); static inline void ipf_pr_udp(fr_info_t *); static inline void ipf_pr_tcp(fr_info_t *); static inline void ipf_pr_icmp(fr_info_t *); static inline void ipf_pr_ipv4hdr(fr_info_t *); static inline void ipf_pr_short(fr_info_t *, int); static inline int ipf_pr_tcpcommon(fr_info_t *); static inline int ipf_pr_udpcommon(fr_info_t *); static void ipf_rule_delete(ipf_main_softc_t *, frentry_t *f, int, int); static void ipf_rule_expire_insert(ipf_main_softc_t *, frentry_t *, int); static int ipf_synclist(ipf_main_softc_t *, frentry_t *, void *); static void ipf_token_flush(ipf_main_softc_t *); static void ipf_token_unlink(ipf_main_softc_t *, ipftoken_t *); static ipftuneable_t *ipf_tune_findbyname(ipftuneable_t *, const char *); static ipftuneable_t *ipf_tune_findbycookie(ipftuneable_t **, void *, void **); static int ipf_updateipid(fr_info_t *); static int ipf_settimeout(struct ipf_main_softc_s *, struct ipftuneable *, ipftuneval_t *); #if !defined(_KERNEL) || SOLARIS static int ppsratecheck(struct timeval *, int *, int); #endif /* * bit values for identifying presence of individual IP options * All of these tables should be ordered by increasing key value on the left * hand side to allow for binary searching of the array and include a trailer * with a 0 for the bitmask for linear searches to easily find the end with. */ static const struct optlist ipopts[] = { { IPOPT_NOP, 0x000001 }, { IPOPT_RR, 0x000002 }, { IPOPT_ZSU, 0x000004 }, { IPOPT_MTUP, 0x000008 }, { IPOPT_MTUR, 0x000010 }, { IPOPT_ENCODE, 0x000020 }, { IPOPT_TS, 0x000040 }, { IPOPT_TR, 0x000080 }, { IPOPT_SECURITY, 0x000100 }, { IPOPT_LSRR, 0x000200 }, { IPOPT_E_SEC, 0x000400 }, { IPOPT_CIPSO, 0x000800 }, { IPOPT_SATID, 0x001000 }, { IPOPT_SSRR, 0x002000 }, { IPOPT_ADDEXT, 0x004000 }, { IPOPT_VISA, 0x008000 }, { IPOPT_IMITD, 0x010000 }, { IPOPT_EIP, 0x020000 }, { IPOPT_FINN, 0x040000 }, { 0, 0x000000 } }; #ifdef USE_INET6 static const struct optlist ip6exthdr[] = { { IPPROTO_HOPOPTS, 0x000001 }, { IPPROTO_IPV6, 0x000002 }, { IPPROTO_ROUTING, 0x000004 }, { IPPROTO_FRAGMENT, 0x000008 }, { IPPROTO_ESP, 0x000010 }, { IPPROTO_AH, 0x000020 }, { IPPROTO_NONE, 0x000040 }, { IPPROTO_DSTOPTS, 0x000080 }, { IPPROTO_MOBILITY, 0x000100 }, { 0, 0 } }; #endif /* * bit values for identifying presence of individual IP security options */ static const struct optlist secopt[] = { { IPSO_CLASS_RES4, 0x01 }, { IPSO_CLASS_TOPS, 0x02 }, { IPSO_CLASS_SECR, 0x04 }, { IPSO_CLASS_RES3, 0x08 }, { IPSO_CLASS_CONF, 0x10 }, { IPSO_CLASS_UNCL, 0x20 }, { IPSO_CLASS_RES2, 0x40 }, { IPSO_CLASS_RES1, 0x80 } }; char ipfilter_version[] = IPL_VERSION; int ipf_features = 0 #ifdef IPFILTER_LKM | IPF_FEAT_LKM #endif #ifdef IPFILTER_LOG | IPF_FEAT_LOG #endif | IPF_FEAT_LOOKUP #ifdef IPFILTER_BPF | IPF_FEAT_BPF #endif #ifdef IPFILTER_COMPILED | IPF_FEAT_COMPILED #endif #ifdef IPFILTER_CKSUM | IPF_FEAT_CKSUM #endif | IPF_FEAT_SYNC #ifdef IPFILTER_SCAN | IPF_FEAT_SCAN #endif #ifdef USE_INET6 | IPF_FEAT_IPV6 #endif ; /* * Table of functions available for use with call rules. */ static ipfunc_resolve_t ipf_availfuncs[] = { { "srcgrpmap", ipf_srcgrpmap, ipf_grpmapinit, ipf_grpmapfini }, { "dstgrpmap", ipf_dstgrpmap, ipf_grpmapinit, ipf_grpmapfini }, { "", NULL, NULL, NULL } }; static ipftuneable_t ipf_main_tuneables[] = { { { (void *)offsetof(struct ipf_main_softc_s, ipf_flags) }, "ipf_flags", 0, 0xffffffff, stsizeof(ipf_main_softc_t, ipf_flags), 0, NULL, NULL }, { { (void *)offsetof(struct ipf_main_softc_s, ipf_active) }, "active", 0, 0, stsizeof(ipf_main_softc_t, ipf_active), IPFT_RDONLY, NULL, NULL }, { { (void *)offsetof(ipf_main_softc_t, ipf_control_forwarding) }, "control_forwarding", 0, 1, stsizeof(ipf_main_softc_t, ipf_control_forwarding), 0, NULL, NULL }, { { (void *)offsetof(ipf_main_softc_t, ipf_update_ipid) }, "update_ipid", 0, 1, stsizeof(ipf_main_softc_t, ipf_update_ipid), 0, NULL, NULL }, { { (void *)offsetof(ipf_main_softc_t, ipf_chksrc) }, "chksrc", 0, 1, stsizeof(ipf_main_softc_t, ipf_chksrc), 0, NULL, NULL }, { { (void *)offsetof(ipf_main_softc_t, ipf_minttl) }, "min_ttl", 0, 1, stsizeof(ipf_main_softc_t, ipf_minttl), 0, NULL, NULL }, { { (void *)offsetof(ipf_main_softc_t, ipf_icmpminfragmtu) }, "icmp_minfragmtu", 0, 1, stsizeof(ipf_main_softc_t, ipf_icmpminfragmtu), 0, NULL, NULL }, { { (void *)offsetof(ipf_main_softc_t, ipf_pass) }, "default_pass", 0, 0xffffffff, stsizeof(ipf_main_softc_t, ipf_pass), 0, NULL, NULL }, { { (void *)offsetof(ipf_main_softc_t, ipf_tcpidletimeout) }, "tcp_idle_timeout", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_tcpidletimeout), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_tcpclosewait) }, "tcp_close_wait", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_tcpclosewait), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_tcplastack) }, "tcp_last_ack", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_tcplastack), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_tcptimeout) }, "tcp_timeout", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_tcptimeout), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_tcpsynsent) }, "tcp_syn_sent", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_tcpsynsent), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_tcpsynrecv) }, "tcp_syn_received", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_tcpsynrecv), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_tcpclosed) }, "tcp_closed", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_tcpclosed), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_tcphalfclosed) }, "tcp_half_closed", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_tcphalfclosed), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_tcptimewait) }, "tcp_time_wait", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_tcptimewait), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_udptimeout) }, "udp_timeout", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_udptimeout), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_udpacktimeout) }, "udp_ack_timeout", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_udpacktimeout), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_icmptimeout) }, "icmp_timeout", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_icmptimeout), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_icmpacktimeout) }, "icmp_ack_timeout", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_icmpacktimeout), 0, NULL, ipf_settimeout }, { { (void *)offsetof(ipf_main_softc_t, ipf_iptimeout) }, "ip_timeout", 1, 0x7fffffff, stsizeof(ipf_main_softc_t, ipf_iptimeout), 0, NULL, ipf_settimeout }, #if defined(INSTANCES) && defined(_KERNEL) { { (void *)offsetof(ipf_main_softc_t, ipf_get_loopback) }, "intercept_loopback", 0, 1, stsizeof(ipf_main_softc_t, ipf_get_loopback), 0, NULL, ipf_set_loopback }, #endif { { 0 }, NULL, 0, 0, 0, 0, NULL, NULL } }; /* * The next section of code is a collection of small routines that set * fields in the fr_info_t structure passed based on properties of the * current packet. There are different routines for the same protocol * for each of IPv4 and IPv6. Adding a new protocol, for which there * will "special" inspection for setup, is now more easily done by adding * a new routine and expanding the ipf_pr_ipinit*() function rather than by * adding more code to a growing switch statement. */ #ifdef USE_INET6 static inline int ipf_pr_ah6(fr_info_t *); static inline void ipf_pr_esp6(fr_info_t *); static inline void ipf_pr_gre6(fr_info_t *); static inline void ipf_pr_udp6(fr_info_t *); static inline void ipf_pr_tcp6(fr_info_t *); static inline void ipf_pr_icmp6(fr_info_t *); static inline void ipf_pr_ipv6hdr(fr_info_t *); static inline void ipf_pr_short6(fr_info_t *, int); static inline int ipf_pr_hopopts6(fr_info_t *); static inline int ipf_pr_mobility6(fr_info_t *); static inline int ipf_pr_routing6(fr_info_t *); static inline int ipf_pr_dstopts6(fr_info_t *); static inline int ipf_pr_fragment6(fr_info_t *); static inline struct ip6_ext *ipf_pr_ipv6exthdr(fr_info_t *, int, int); /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_short6 */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* xmin(I) - minimum header size */ /* */ /* IPv6 Only */ /* This is function enforces the 'is a packet too short to be legit' rule */ /* for IPv6 and marks the packet with FI_SHORT if so. See function comment */ /* for ipf_pr_short() for more details. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_short6(fr_info_t *fin, int xmin) { if (fin->fin_dlen < xmin) fin->fin_flx |= FI_SHORT; } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_ipv6hdr */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* Copy values from the IPv6 header into the fr_info_t struct and call the */ /* per-protocol analyzer if it exists. In validating the packet, a protocol*/ /* analyzer may pullup or free the packet itself so we need to be vigiliant */ /* of that possibility arising. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_ipv6hdr(fr_info_t *fin) { ip6_t *ip6 = (ip6_t *)fin->fin_ip; int p, go = 1, i, hdrcount; fr_ip_t *fi = &fin->fin_fi; fin->fin_off = 0; fi->fi_tos = 0; fi->fi_optmsk = 0; fi->fi_secmsk = 0; fi->fi_auth = 0; p = ip6->ip6_nxt; fin->fin_crc = p; fi->fi_ttl = ip6->ip6_hlim; fi->fi_src.in6 = ip6->ip6_src; fin->fin_crc += fi->fi_src.i6[0]; fin->fin_crc += fi->fi_src.i6[1]; fin->fin_crc += fi->fi_src.i6[2]; fin->fin_crc += fi->fi_src.i6[3]; fi->fi_dst.in6 = ip6->ip6_dst; fin->fin_crc += fi->fi_dst.i6[0]; fin->fin_crc += fi->fi_dst.i6[1]; fin->fin_crc += fi->fi_dst.i6[2]; fin->fin_crc += fi->fi_dst.i6[3]; fin->fin_id = 0; if (IN6_IS_ADDR_MULTICAST(&fi->fi_dst.in6)) fin->fin_flx |= FI_MULTICAST|FI_MBCAST; hdrcount = 0; while (go && !(fin->fin_flx & FI_SHORT)) { switch (p) { case IPPROTO_UDP : ipf_pr_udp6(fin); go = 0; break; case IPPROTO_TCP : ipf_pr_tcp6(fin); go = 0; break; case IPPROTO_ICMPV6 : ipf_pr_icmp6(fin); go = 0; break; case IPPROTO_GRE : ipf_pr_gre6(fin); go = 0; break; case IPPROTO_HOPOPTS : p = ipf_pr_hopopts6(fin); break; case IPPROTO_MOBILITY : p = ipf_pr_mobility6(fin); break; case IPPROTO_DSTOPTS : p = ipf_pr_dstopts6(fin); break; case IPPROTO_ROUTING : p = ipf_pr_routing6(fin); break; case IPPROTO_AH : p = ipf_pr_ah6(fin); break; case IPPROTO_ESP : ipf_pr_esp6(fin); go = 0; break; case IPPROTO_IPV6 : for (i = 0; ip6exthdr[i].ol_bit != 0; i++) if (ip6exthdr[i].ol_val == p) { fin->fin_flx |= ip6exthdr[i].ol_bit; break; } go = 0; break; case IPPROTO_NONE : go = 0; break; case IPPROTO_FRAGMENT : p = ipf_pr_fragment6(fin); /* * Given that the only fragments we want to let through * (where fin_off != 0) are those where the non-first * fragments only have data, we can safely stop looking * at headers if this is a non-leading fragment. */ if (fin->fin_off != 0) go = 0; break; default : go = 0; break; } hdrcount++; /* * It is important to note that at this point, for the * extension headers (go != 0), the entire header may not have * been pulled up when the code gets to this point. This is * only done for "go != 0" because the other header handlers * will all pullup their complete header. The other indicator * of an incomplete packet is that this was just an extension * header. */ if ((go != 0) && (p != IPPROTO_NONE) && (ipf_pr_pullup(fin, 0) == -1)) { p = IPPROTO_NONE; break; } } /* * Some of the above functions, like ipf_pr_esp6(), can call ipf_pullup * and destroy whatever packet was here. The caller of this function * expects us to return if there is a problem with ipf_pullup. */ if (fin->fin_m == NULL) { ipf_main_softc_t *softc = fin->fin_main_soft; LBUMPD(ipf_stats[fin->fin_out], fr_v6_bad); return; } fi->fi_p = p; /* * IPv6 fragment case 1 - see comment for ipf_pr_fragment6(). * "go != 0" implies the above loop hasn't arrived at a layer 4 header. */ if ((go != 0) && (fin->fin_flx & FI_FRAG) && (fin->fin_off == 0)) { ipf_main_softc_t *softc = fin->fin_main_soft; fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_ipv6_frag_1, fr_info_t *, fin, int, go); LBUMPD(ipf_stats[fin->fin_out], fr_v6_badfrag); LBUMP(ipf_stats[fin->fin_out].fr_v6_bad); } } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_ipv6exthdr */ /* Returns: struct ip6_ext * - pointer to the start of the next header */ /* or NULL if there is a prolblem. */ /* Parameters: fin(I) - pointer to packet information */ /* multiple(I) - flag indicating yes/no if multiple occurances */ /* of this extension header are allowed. */ /* proto(I) - protocol number for this extension header */ /* */ /* IPv6 Only */ /* This function embodies a number of common checks that all IPv6 extension */ /* headers must be subjected to. For example, making sure the packet is */ /* big enough for it to be in, checking if it is repeated and setting a */ /* flag to indicate its presence. */ /* ------------------------------------------------------------------------ */ static inline struct ip6_ext * ipf_pr_ipv6exthdr(fr_info_t *fin, int multiple, int proto) { ipf_main_softc_t *softc = fin->fin_main_soft; struct ip6_ext *hdr; u_short shift; int i; fin->fin_flx |= FI_V6EXTHDR; /* 8 is default length of extension hdr */ if ((fin->fin_dlen - 8) < 0) { fin->fin_flx |= FI_SHORT; LBUMPD(ipf_stats[fin->fin_out], fr_v6_ext_short); return (NULL); } if (ipf_pr_pullup(fin, 8) == -1) { LBUMPD(ipf_stats[fin->fin_out], fr_v6_ext_pullup); return (NULL); } hdr = fin->fin_dp; switch (proto) { case IPPROTO_FRAGMENT : shift = 8; break; default : shift = 8 + (hdr->ip6e_len << 3); break; } if (shift > fin->fin_dlen) { /* Nasty extension header length? */ fin->fin_flx |= FI_BAD; DT3(ipf_fi_bad_pr_ipv6exthdr_len, fr_info_t *, fin, u_short, shift, u_short, fin->fin_dlen); LBUMPD(ipf_stats[fin->fin_out], fr_v6_ext_hlen); return (NULL); } fin->fin_dp = (char *)fin->fin_dp + shift; fin->fin_dlen -= shift; /* * If we have seen a fragment header, do not set any flags to indicate * the presence of this extension header as it has no impact on the * end result until after it has been defragmented. */ if (fin->fin_flx & FI_FRAG) return (hdr); for (i = 0; ip6exthdr[i].ol_bit != 0; i++) if (ip6exthdr[i].ol_val == proto) { /* * Most IPv6 extension headers are only allowed once. */ if ((multiple == 0) && ((fin->fin_optmsk & ip6exthdr[i].ol_bit) != 0)) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_ipv6exthdr_once, fr_info_t *, fin, u_int, (fin->fin_optmsk & ip6exthdr[i].ol_bit)); } else fin->fin_optmsk |= ip6exthdr[i].ol_bit; break; } return (hdr); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_hopopts6 */ /* Returns: int - value of the next header or IPPROTO_NONE if error */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* This is function checks pending hop by hop options extension header */ /* ------------------------------------------------------------------------ */ static inline int ipf_pr_hopopts6(fr_info_t *fin) { struct ip6_ext *hdr; hdr = ipf_pr_ipv6exthdr(fin, 0, IPPROTO_HOPOPTS); if (hdr == NULL) return (IPPROTO_NONE); return (hdr->ip6e_nxt); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_mobility6 */ /* Returns: int - value of the next header or IPPROTO_NONE if error */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* This is function checks the IPv6 mobility extension header */ /* ------------------------------------------------------------------------ */ static inline int ipf_pr_mobility6(fr_info_t *fin) { struct ip6_ext *hdr; hdr = ipf_pr_ipv6exthdr(fin, 0, IPPROTO_MOBILITY); if (hdr == NULL) return (IPPROTO_NONE); return (hdr->ip6e_nxt); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_routing6 */ /* Returns: int - value of the next header or IPPROTO_NONE if error */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* This is function checks pending routing extension header */ /* ------------------------------------------------------------------------ */ static inline int ipf_pr_routing6(fr_info_t *fin) { struct ip6_routing *hdr; hdr = (struct ip6_routing *)ipf_pr_ipv6exthdr(fin, 0, IPPROTO_ROUTING); if (hdr == NULL) return (IPPROTO_NONE); switch (hdr->ip6r_type) { case 0 : /* * Nasty extension header length? */ if (((hdr->ip6r_len >> 1) < hdr->ip6r_segleft) || (hdr->ip6r_segleft && (hdr->ip6r_len & 1))) { ipf_main_softc_t *softc = fin->fin_main_soft; fin->fin_flx |= FI_BAD; DT1(ipf_fi_bad_routing6, fr_info_t *, fin); LBUMPD(ipf_stats[fin->fin_out], fr_v6_rh_bad); return (IPPROTO_NONE); } break; default : break; } return (hdr->ip6r_nxt); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_fragment6 */ /* Returns: int - value of the next header or IPPROTO_NONE if error */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* Examine the IPv6 fragment header and extract fragment offset information.*/ /* */ /* Fragments in IPv6 are extraordinarily difficult to deal with - much more */ /* so than in IPv4. There are 5 cases of fragments with IPv6 that all */ /* packets with a fragment header can fit into. They are as follows: */ /* */ /* 1. [IPv6][0-n EH][FH][0-n EH] (no L4HDR present) */ /* 2. [IPV6][0-n EH][FH][0-n EH][L4HDR part] (short) */ /* 3. [IPV6][0-n EH][FH][L4HDR part][0-n data] (short) */ /* 4. [IPV6][0-n EH][FH][0-n EH][L4HDR][0-n data] */ /* 5. [IPV6][0-n EH][FH][data] */ /* */ /* IPV6 = IPv6 header, FH = Fragment Header, */ /* 0-n EH = 0 or more extension headers, 0-n data = 0 or more bytes of data */ /* */ /* Packets that match 1, 2, 3 will be dropped as the only reasonable */ /* scenario in which they happen is in extreme circumstances that are most */ /* likely to be an indication of an attack rather than normal traffic. */ /* A type 3 packet may be sent by an attacked after a type 4 packet. There */ /* are two rules that can be used to guard against type 3 packets: L4 */ /* headers must always be in a packet that has the offset field set to 0 */ /* and no packet is allowed to overlay that where offset = 0. */ /* ------------------------------------------------------------------------ */ static inline int ipf_pr_fragment6(fr_info_t *fin) { ipf_main_softc_t *softc = fin->fin_main_soft; struct ip6_frag *frag; fin->fin_flx |= FI_FRAG; frag = (struct ip6_frag *)ipf_pr_ipv6exthdr(fin, 0, IPPROTO_FRAGMENT); if (frag == NULL) { LBUMPD(ipf_stats[fin->fin_out], fr_v6_frag_bad); return (IPPROTO_NONE); } if ((frag->ip6f_offlg & IP6F_MORE_FRAG) != 0) { /* * Any fragment that isn't the last fragment must have its * length as a multiple of 8. */ if ((fin->fin_plen & 7) != 0) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_frag_not_8, fr_info_t *, fin, u_int, (fin->fin_plen & 7)); } } fin->fin_fraghdr = frag; fin->fin_id = frag->ip6f_ident; fin->fin_off = ntohs(frag->ip6f_offlg & IP6F_OFF_MASK); if (fin->fin_off != 0) fin->fin_flx |= FI_FRAGBODY; /* * Jumbograms aren't handled, so the max. length is 64k */ if ((fin->fin_off << 3) + fin->fin_dlen > 65535) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_jumbogram, fr_info_t *, fin, u_int, ((fin->fin_off << 3) + fin->fin_dlen)); } /* * We don't know where the transport layer header (or whatever is next * is), as it could be behind destination options (amongst others) so * return the fragment header as the type of packet this is. Note that * this effectively disables the fragment cache for > 1 protocol at a * time. */ return (frag->ip6f_nxt); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_dstopts6 */ /* Returns: int - value of the next header or IPPROTO_NONE if error */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* This is function checks pending destination options extension header */ /* ------------------------------------------------------------------------ */ static inline int ipf_pr_dstopts6(fr_info_t *fin) { ipf_main_softc_t *softc = fin->fin_main_soft; struct ip6_ext *hdr; hdr = ipf_pr_ipv6exthdr(fin, 0, IPPROTO_DSTOPTS); if (hdr == NULL) { LBUMPD(ipf_stats[fin->fin_out], fr_v6_dst_bad); return (IPPROTO_NONE); } return (hdr->ip6e_nxt); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_icmp6 */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* This routine is mainly concerned with determining the minimum valid size */ /* for an ICMPv6 packet. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_icmp6(fr_info_t *fin) { int minicmpsz = sizeof(struct icmp6_hdr); struct icmp6_hdr *icmp6; if (ipf_pr_pullup(fin, ICMP6ERR_MINPKTLEN - sizeof(ip6_t)) == -1) { ipf_main_softc_t *softc = fin->fin_main_soft; LBUMPD(ipf_stats[fin->fin_out], fr_v6_icmp6_pullup); return; } if (fin->fin_dlen > 1) { ip6_t *ip6; icmp6 = fin->fin_dp; fin->fin_data[0] = *(u_short *)icmp6; if ((icmp6->icmp6_type & ICMP6_INFOMSG_MASK) != 0) fin->fin_flx |= FI_ICMPQUERY; switch (icmp6->icmp6_type) { case ICMP6_ECHO_REPLY : case ICMP6_ECHO_REQUEST : if (fin->fin_dlen >= 6) fin->fin_data[1] = icmp6->icmp6_id; minicmpsz = ICMP6ERR_MINPKTLEN - sizeof(ip6_t); break; case ICMP6_DST_UNREACH : case ICMP6_PACKET_TOO_BIG : case ICMP6_TIME_EXCEEDED : case ICMP6_PARAM_PROB : fin->fin_flx |= FI_ICMPERR; minicmpsz = ICMP6ERR_IPICMPHLEN - sizeof(ip6_t); if (fin->fin_plen < ICMP6ERR_IPICMPHLEN) break; if (M_LEN(fin->fin_m) < fin->fin_plen) { if (ipf_coalesce(fin) != 1) return; } if (ipf_pr_pullup(fin, ICMP6ERR_MINPKTLEN) == -1) return; /* * If the destination of this packet doesn't match the * source of the original packet then this packet is * not correct. */ icmp6 = fin->fin_dp; ip6 = (ip6_t *)((char *)icmp6 + ICMPERR_ICMPHLEN); if (IP6_NEQ(&fin->fin_fi.fi_dst, (i6addr_t *)&ip6->ip6_src)) { fin->fin_flx |= FI_BAD; DT1(ipf_fi_bad_icmp6, fr_info_t *, fin); } break; default : break; } } ipf_pr_short6(fin, minicmpsz); if ((fin->fin_flx & (FI_SHORT|FI_BAD)) == 0) { u_char p = fin->fin_p; fin->fin_p = IPPROTO_ICMPV6; ipf_checkv6sum(fin); fin->fin_p = p; } } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_udp6 */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* Analyse the packet for IPv6/UDP properties. */ /* Is not expected to be called for fragmented packets. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_udp6(fr_info_t *fin) { if (ipf_pr_udpcommon(fin) == 0) { u_char p = fin->fin_p; fin->fin_p = IPPROTO_UDP; ipf_checkv6sum(fin); fin->fin_p = p; } } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_tcp6 */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* Analyse the packet for IPv6/TCP properties. */ /* Is not expected to be called for fragmented packets. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_tcp6(fr_info_t *fin) { if (ipf_pr_tcpcommon(fin) == 0) { u_char p = fin->fin_p; fin->fin_p = IPPROTO_TCP; ipf_checkv6sum(fin); fin->fin_p = p; } } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_esp6 */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* Analyse the packet for ESP properties. */ /* The minimum length is taken to be the SPI (32bits) plus a tail (32bits) */ /* even though the newer ESP packets must also have a sequence number that */ /* is 32bits as well, it is not possible(?) to determine the version from a */ /* simple packet header. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_esp6(fr_info_t *fin) { if ((fin->fin_off == 0) && (ipf_pr_pullup(fin, 8) == -1)) { ipf_main_softc_t *softc = fin->fin_main_soft; LBUMPD(ipf_stats[fin->fin_out], fr_v6_esp_pullup); return; } } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_ah6 */ /* Returns: int - value of the next header or IPPROTO_NONE if error */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv6 Only */ /* Analyse the packet for AH properties. */ /* The minimum length is taken to be the combination of all fields in the */ /* header being present and no authentication data (null algorithm used.) */ /* ------------------------------------------------------------------------ */ static inline int ipf_pr_ah6(fr_info_t *fin) { authhdr_t *ah; fin->fin_flx |= FI_AH; ah = (authhdr_t *)ipf_pr_ipv6exthdr(fin, 0, IPPROTO_HOPOPTS); if (ah == NULL) { ipf_main_softc_t *softc = fin->fin_main_soft; LBUMPD(ipf_stats[fin->fin_out], fr_v6_ah_bad); return (IPPROTO_NONE); } ipf_pr_short6(fin, sizeof(*ah)); /* * No need for another pullup, ipf_pr_ipv6exthdr() will pullup * enough data to satisfy ah_next (the very first one.) */ return (ah->ah_next); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_gre6 */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* Analyse the packet for GRE properties. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_gre6(fr_info_t *fin) { grehdr_t *gre; if (ipf_pr_pullup(fin, sizeof(grehdr_t)) == -1) { ipf_main_softc_t *softc = fin->fin_main_soft; LBUMPD(ipf_stats[fin->fin_out], fr_v6_gre_pullup); return; } gre = fin->fin_dp; if (GRE_REV(gre->gr_flags) == 1) fin->fin_data[0] = gre->gr_call; } #endif /* USE_INET6 */ /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_pullup */ /* Returns: int - 0 == pullup succeeded, -1 == failure */ /* Parameters: fin(I) - pointer to packet information */ /* plen(I) - length (excluding L3 header) to pullup */ /* */ /* Short inline function to cut down on code duplication to perform a call */ /* to ipf_pullup to ensure there is the required amount of data, */ /* consecutively in the packet buffer. */ /* */ /* This function pulls up 'extra' data at the location of fin_dp. fin_dp */ /* points to the first byte after the complete layer 3 header, which will */ /* include all of the known extension headers for IPv6 or options for IPv4. */ /* */ /* Since fr_pullup() expects the total length of bytes to be pulled up, it */ /* is necessary to add those we can already assume to be pulled up (fin_dp */ /* - fin_ip) to what is passed through. */ /* ------------------------------------------------------------------------ */ int ipf_pr_pullup(fr_info_t *fin, int plen) { ipf_main_softc_t *softc = fin->fin_main_soft; if (fin->fin_m != NULL) { if (fin->fin_dp != NULL) plen += (char *)fin->fin_dp - ((char *)fin->fin_ip + fin->fin_hlen); plen += fin->fin_hlen; if (M_LEN(fin->fin_m) < plen + fin->fin_ipoff) { #if defined(_KERNEL) if (ipf_pullup(fin->fin_m, fin, plen) == NULL) { DT1(ipf_pullup_fail, fr_info_t *, fin); LBUMP(ipf_stats[fin->fin_out].fr_pull[1]); fin->fin_reason = FRB_PULLUP; fin->fin_flx |= FI_BAD; return (-1); } LBUMP(ipf_stats[fin->fin_out].fr_pull[0]); #else LBUMP(ipf_stats[fin->fin_out].fr_pull[1]); /* * Fake ipf_pullup failing */ fin->fin_reason = FRB_PULLUP; *fin->fin_mp = NULL; fin->fin_m = NULL; fin->fin_ip = NULL; fin->fin_flx |= FI_BAD; return (-1); #endif } } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_short */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* xmin(I) - minimum header size */ /* */ /* Check if a packet is "short" as defined by xmin. The rule we are */ /* applying here is that the packet must not be fragmented within the layer */ /* 4 header. That is, it must not be a fragment that has its offset set to */ /* start within the layer 4 header (hdrmin) or if it is at offset 0, the */ /* entire layer 4 header must be present (min). */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_short(fr_info_t *fin, int xmin) { if (fin->fin_off == 0) { if (fin->fin_dlen < xmin) fin->fin_flx |= FI_SHORT; } else if (fin->fin_off < xmin) { fin->fin_flx |= FI_SHORT; } } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_icmp */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv4 Only */ /* Do a sanity check on the packet for ICMP (v4). In nearly all cases, */ /* except extrememly bad packets, both type and code will be present. */ /* The expected minimum size of an ICMP packet is very much dependent on */ /* the type of it. */ /* */ /* XXX - other ICMP sanity checks? */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_icmp(fr_info_t *fin) { ipf_main_softc_t *softc = fin->fin_main_soft; int minicmpsz = sizeof(struct icmp); icmphdr_t *icmp; ip_t *oip; ipf_pr_short(fin, ICMPERR_ICMPHLEN); if (fin->fin_off != 0) { LBUMPD(ipf_stats[fin->fin_out], fr_v4_icmp_frag); return; } if (ipf_pr_pullup(fin, ICMPERR_ICMPHLEN) == -1) { LBUMPD(ipf_stats[fin->fin_out], fr_v4_icmp_pullup); return; } icmp = fin->fin_dp; fin->fin_data[0] = *(u_short *)icmp; fin->fin_data[1] = icmp->icmp_id; switch (icmp->icmp_type) { case ICMP_ECHOREPLY : case ICMP_ECHO : /* Router discovery messaes - RFC 1256 */ case ICMP_ROUTERADVERT : case ICMP_ROUTERSOLICIT : fin->fin_flx |= FI_ICMPQUERY; minicmpsz = ICMP_MINLEN; break; /* * type(1) + code(1) + cksum(2) + id(2) seq(2) + * 3 * timestamp(3 * 4) */ case ICMP_TSTAMP : case ICMP_TSTAMPREPLY : fin->fin_flx |= FI_ICMPQUERY; minicmpsz = 20; break; /* * type(1) + code(1) + cksum(2) + id(2) seq(2) + * mask(4) */ case ICMP_IREQ : case ICMP_IREQREPLY : case ICMP_MASKREQ : case ICMP_MASKREPLY : fin->fin_flx |= FI_ICMPQUERY; minicmpsz = 12; break; /* * type(1) + code(1) + cksum(2) + id(2) seq(2) + ip(20+) */ case ICMP_UNREACH : #ifdef icmp_nextmtu if (icmp->icmp_code == ICMP_UNREACH_NEEDFRAG) { if (icmp->icmp_nextmtu < softc->ipf_icmpminfragmtu) { fin->fin_flx |= FI_BAD; DT3(ipf_fi_bad_icmp_nextmtu, fr_info_t *, fin, u_int, icmp->icmp_nextmtu, u_int, softc->ipf_icmpminfragmtu); } } #endif /* FALLTHROUGH */ case ICMP_SOURCEQUENCH : case ICMP_REDIRECT : case ICMP_TIMXCEED : case ICMP_PARAMPROB : fin->fin_flx |= FI_ICMPERR; if (ipf_coalesce(fin) != 1) { LBUMPD(ipf_stats[fin->fin_out], fr_icmp_coalesce); return; } /* * ICMP error packets should not be generated for IP * packets that are a fragment that isn't the first * fragment. */ oip = (ip_t *)((char *)fin->fin_dp + ICMPERR_ICMPHLEN); if ((ntohs(oip->ip_off) & IP_OFFMASK) != 0) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_icmp_err, fr_info_t, fin, u_int, (ntohs(oip->ip_off) & IP_OFFMASK)); } /* * If the destination of this packet doesn't match the * source of the original packet then this packet is * not correct. */ if (oip->ip_src.s_addr != fin->fin_daddr) { fin->fin_flx |= FI_BAD; DT1(ipf_fi_bad_src_ne_dst, fr_info_t *, fin); } break; default : break; } ipf_pr_short(fin, minicmpsz); ipf_checkv4sum(fin); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_tcpcommon */ /* Returns: int - 0 = header ok, 1 = bad packet, -1 = buffer error */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* TCP header sanity checking. Look for bad combinations of TCP flags, */ /* and make some checks with how they interact with other fields. */ /* If compiled with IPFILTER_CKSUM, check to see if the TCP checksum is */ /* valid and mark the packet as bad if not. */ /* ------------------------------------------------------------------------ */ static inline int ipf_pr_tcpcommon(fr_info_t *fin) { ipf_main_softc_t *softc = fin->fin_main_soft; int flags, tlen; tcphdr_t *tcp; fin->fin_flx |= FI_TCPUDP; if (fin->fin_off != 0) { LBUMPD(ipf_stats[fin->fin_out], fr_tcp_frag); return (0); } if (ipf_pr_pullup(fin, sizeof(*tcp)) == -1) { LBUMPD(ipf_stats[fin->fin_out], fr_tcp_pullup); return (-1); } tcp = fin->fin_dp; if (fin->fin_dlen > 3) { fin->fin_sport = ntohs(tcp->th_sport); fin->fin_dport = ntohs(tcp->th_dport); } if ((fin->fin_flx & FI_SHORT) != 0) { LBUMPD(ipf_stats[fin->fin_out], fr_tcp_short); return (1); } /* * Use of the TCP data offset *must* result in a value that is at * least the same size as the TCP header. */ tlen = TCP_OFF(tcp) << 2; if (tlen < sizeof(tcphdr_t)) { LBUMPD(ipf_stats[fin->fin_out], fr_tcp_small); fin->fin_flx |= FI_BAD; DT3(ipf_fi_bad_tlen, fr_info_t, fin, u_int, tlen, u_int, sizeof(tcphdr_t)); return (1); } flags = tcp_get_flags(tcp); fin->fin_tcpf = tcp_get_flags(tcp); /* * If the urgent flag is set, then the urgent pointer must * also be set and vice versa. Good TCP packets do not have * just one of these set. */ if ((flags & TH_URG) != 0 && (tcp->th_urp == 0)) { fin->fin_flx |= FI_BAD; DT3(ipf_fi_bad_th_urg, fr_info_t*, fin, u_int, (flags & TH_URG), u_int, tcp->th_urp); #if 0 } else if ((flags & TH_URG) == 0 && (tcp->th_urp != 0)) { /* * Ignore this case (#if 0) as it shows up in "real" * traffic with bogus values in the urgent pointer field. */ fin->fin_flx |= FI_BAD; DT3(ipf_fi_bad_th_urg0, fr_info_t *, fin, u_int, (flags & TH_URG), u_int, tcp->th_urp); #endif } else if (((flags & (TH_SYN|TH_FIN)) != 0) && ((flags & (TH_RST|TH_ACK)) == TH_RST)) { /* TH_FIN|TH_RST|TH_ACK seems to appear "naturally" */ fin->fin_flx |= FI_BAD; DT1(ipf_fi_bad_th_fin_rst_ack, fr_info_t, fin); #if 1 } else if (((flags & TH_SYN) != 0) && ((flags & (TH_URG|TH_PUSH)) != 0)) { /* * SYN with URG and PUSH set is not for normal TCP but it is * possible(?) with T/TCP...but who uses T/TCP? */ fin->fin_flx |= FI_BAD; DT1(ipf_fi_bad_th_syn_urg_psh, fr_info_t *, fin); #endif } else if (!(flags & TH_ACK)) { /* * If the ack bit isn't set, then either the SYN or * RST bit must be set. If the SYN bit is set, then * we expect the ACK field to be 0. If the ACK is * not set and if URG, PSH or FIN are set, consdier * that to indicate a bad TCP packet. */ if ((flags == TH_SYN) && (tcp->th_ack != 0)) { /* * Cisco PIX sets the ACK field to a random value. * In light of this, do not set FI_BAD until a patch * is available from Cisco to ensure that * interoperability between existing systems is * achieved. */ /*fin->fin_flx |= FI_BAD*/; /*DT1(ipf_fi_bad_th_syn_ack, fr_info_t *, fin);*/ } else if (!(flags & (TH_RST|TH_SYN))) { fin->fin_flx |= FI_BAD; DT1(ipf_fi_bad_th_rst_syn, fr_info_t *, fin); } else if ((flags & (TH_URG|TH_PUSH|TH_FIN)) != 0) { fin->fin_flx |= FI_BAD; DT1(ipf_fi_bad_th_urg_push_fin, fr_info_t *, fin); } } if (fin->fin_flx & FI_BAD) { LBUMPD(ipf_stats[fin->fin_out], fr_tcp_bad_flags); return (1); } /* * At this point, it's not exactly clear what is to be gained by * marking up which TCP options are and are not present. The one we * are most interested in is the TCP window scale. This is only in * a SYN packet [RFC1323] so we don't need this here...? * Now if we were to analyse the header for passive fingerprinting, * then that might add some weight to adding this... */ if (tlen == sizeof(tcphdr_t)) { return (0); } if (ipf_pr_pullup(fin, tlen) == -1) { LBUMPD(ipf_stats[fin->fin_out], fr_tcp_pullup); return (-1); } #if 0 tcp = fin->fin_dp; ip = fin->fin_ip; s = (u_char *)(tcp + 1); off = IP_HL(ip) << 2; # ifdef _KERNEL if (fin->fin_mp != NULL) { mb_t *m = *fin->fin_mp; if (off + tlen > M_LEN(m)) return; } # endif for (tlen -= (int)sizeof(*tcp); tlen > 0; ) { opt = *s; if (opt == '\0') break; else if (opt == TCPOPT_NOP) ol = 1; else { if (tlen < 2) break; ol = (int)*(s + 1); if (ol < 2 || ol > tlen) break; } for (i = 9, mv = 4; mv >= 0; ) { op = ipopts + i; if (opt == (u_char)op->ol_val) { optmsk |= op->ol_bit; break; } } tlen -= ol; s += ol; } #endif /* 0 */ return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_udpcommon */ /* Returns: int - 0 = header ok, 1 = bad packet */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* Extract the UDP source and destination ports, if present. If compiled */ /* with IPFILTER_CKSUM, check to see if the UDP checksum is valid. */ /* ------------------------------------------------------------------------ */ static inline int ipf_pr_udpcommon(fr_info_t *fin) { udphdr_t *udp; fin->fin_flx |= FI_TCPUDP; if (!fin->fin_off && (fin->fin_dlen > 3)) { if (ipf_pr_pullup(fin, sizeof(*udp)) == -1) { ipf_main_softc_t *softc = fin->fin_main_soft; fin->fin_flx |= FI_SHORT; LBUMPD(ipf_stats[fin->fin_out], fr_udp_pullup); return (1); } udp = fin->fin_dp; fin->fin_sport = ntohs(udp->uh_sport); fin->fin_dport = ntohs(udp->uh_dport); } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_tcp */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv4 Only */ /* Analyse the packet for IPv4/TCP properties. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_tcp(fr_info_t *fin) { ipf_pr_short(fin, sizeof(tcphdr_t)); if (ipf_pr_tcpcommon(fin) == 0) ipf_checkv4sum(fin); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_udp */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv4 Only */ /* Analyse the packet for IPv4/UDP properties. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_udp(fr_info_t *fin) { ipf_pr_short(fin, sizeof(udphdr_t)); if (ipf_pr_udpcommon(fin) == 0) ipf_checkv4sum(fin); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_esp */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* Analyse the packet for ESP properties. */ /* The minimum length is taken to be the SPI (32bits) plus a tail (32bits) */ /* even though the newer ESP packets must also have a sequence number that */ /* is 32bits as well, it is not possible(?) to determine the version from a */ /* simple packet header. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_esp(fr_info_t *fin) { if (fin->fin_off == 0) { ipf_pr_short(fin, 8); if (ipf_pr_pullup(fin, 8) == -1) { ipf_main_softc_t *softc = fin->fin_main_soft; LBUMPD(ipf_stats[fin->fin_out], fr_v4_esp_pullup); } } } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_ah */ /* Returns: int - value of the next header or IPPROTO_NONE if error */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* Analyse the packet for AH properties. */ /* The minimum length is taken to be the combination of all fields in the */ /* header being present and no authentication data (null algorithm used.) */ /* ------------------------------------------------------------------------ */ static inline int ipf_pr_ah(fr_info_t *fin) { ipf_main_softc_t *softc = fin->fin_main_soft; authhdr_t *ah; int len; fin->fin_flx |= FI_AH; ipf_pr_short(fin, sizeof(*ah)); if (((fin->fin_flx & FI_SHORT) != 0) || (fin->fin_off != 0)) { LBUMPD(ipf_stats[fin->fin_out], fr_v4_ah_bad); return (IPPROTO_NONE); } if (ipf_pr_pullup(fin, sizeof(*ah)) == -1) { DT(fr_v4_ah_pullup_1); LBUMP(ipf_stats[fin->fin_out].fr_v4_ah_pullup); return (IPPROTO_NONE); } ah = (authhdr_t *)fin->fin_dp; len = (ah->ah_plen + 2) << 2; ipf_pr_short(fin, len); if (ipf_pr_pullup(fin, len) == -1) { DT(fr_v4_ah_pullup_2); LBUMP(ipf_stats[fin->fin_out].fr_v4_ah_pullup); return (IPPROTO_NONE); } /* * Adjust fin_dp and fin_dlen for skipping over the authentication * header. */ fin->fin_dp = (char *)fin->fin_dp + len; fin->fin_dlen -= len; return (ah->ah_next); } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_gre */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* Analyse the packet for GRE properties. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_gre(fr_info_t *fin) { ipf_main_softc_t *softc = fin->fin_main_soft; grehdr_t *gre; ipf_pr_short(fin, sizeof(grehdr_t)); if (fin->fin_off != 0) { LBUMPD(ipf_stats[fin->fin_out], fr_v4_gre_frag); return; } if (ipf_pr_pullup(fin, sizeof(grehdr_t)) == -1) { LBUMPD(ipf_stats[fin->fin_out], fr_v4_gre_pullup); return; } gre = fin->fin_dp; if (GRE_REV(gre->gr_flags) == 1) fin->fin_data[0] = gre->gr_call; } /* ------------------------------------------------------------------------ */ /* Function: ipf_pr_ipv4hdr */ /* Returns: void */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* IPv4 Only */ /* Analyze the IPv4 header and set fields in the fr_info_t structure. */ /* Check all options present and flag their presence if any exist. */ /* ------------------------------------------------------------------------ */ static inline void ipf_pr_ipv4hdr(fr_info_t *fin) { u_short optmsk = 0, secmsk = 0, auth = 0; int hlen, ol, mv, p, i; const struct optlist *op; u_char *s, opt; u_short off; fr_ip_t *fi; ip_t *ip; fi = &fin->fin_fi; hlen = fin->fin_hlen; ip = fin->fin_ip; p = ip->ip_p; fi->fi_p = p; fin->fin_crc = p; fi->fi_tos = ip->ip_tos; fin->fin_id = ntohs(ip->ip_id); off = ntohs(ip->ip_off); /* Get both TTL and protocol */ fi->fi_p = ip->ip_p; fi->fi_ttl = ip->ip_ttl; /* Zero out bits not used in IPv6 address */ fi->fi_src.i6[1] = 0; fi->fi_src.i6[2] = 0; fi->fi_src.i6[3] = 0; fi->fi_dst.i6[1] = 0; fi->fi_dst.i6[2] = 0; fi->fi_dst.i6[3] = 0; fi->fi_saddr = ip->ip_src.s_addr; fin->fin_crc += fi->fi_saddr; fi->fi_daddr = ip->ip_dst.s_addr; fin->fin_crc += fi->fi_daddr; if (IN_MULTICAST(ntohl(fi->fi_daddr))) fin->fin_flx |= FI_MULTICAST|FI_MBCAST; /* * set packet attribute flags based on the offset and * calculate the byte offset that it represents. */ off &= IP_MF|IP_OFFMASK; if (off != 0) { int morefrag = off & IP_MF; fi->fi_flx |= FI_FRAG; off &= IP_OFFMASK; if (off == 1 && p == IPPROTO_TCP) { fin->fin_flx |= FI_SHORT; /* RFC 3128 */ DT1(ipf_fi_tcp_frag_off_1, fr_info_t *, fin); } if (off != 0) { fin->fin_flx |= FI_FRAGBODY; off <<= 3; if ((off + fin->fin_dlen > 65535) || (fin->fin_dlen == 0) || ((morefrag != 0) && ((fin->fin_dlen & 7) != 0))) { /* * The length of the packet, starting at its * offset cannot exceed 65535 (0xffff) as the * length of an IP packet is only 16 bits. * * Any fragment that isn't the last fragment * must have a length greater than 0 and it * must be an even multiple of 8. */ fi->fi_flx |= FI_BAD; DT1(ipf_fi_bad_fragbody_gt_65535, fr_info_t *, fin); } } } fin->fin_off = off; /* * Call per-protocol setup and checking */ if (p == IPPROTO_AH) { /* * Treat AH differently because we expect there to be another * layer 4 header after it. */ p = ipf_pr_ah(fin); } switch (p) { case IPPROTO_UDP : ipf_pr_udp(fin); break; case IPPROTO_TCP : ipf_pr_tcp(fin); break; case IPPROTO_ICMP : ipf_pr_icmp(fin); break; case IPPROTO_ESP : ipf_pr_esp(fin); break; case IPPROTO_GRE : ipf_pr_gre(fin); break; } ip = fin->fin_ip; if (ip == NULL) return; /* * If it is a standard IP header (no options), set the flag fields * which relate to options to 0. */ if (hlen == sizeof(*ip)) { fi->fi_optmsk = 0; fi->fi_secmsk = 0; fi->fi_auth = 0; return; } /* * So the IP header has some IP options attached. Walk the entire * list of options present with this packet and set flags to indicate * which ones are here and which ones are not. For the somewhat out * of date and obscure security classification options, set a flag to * represent which classification is present. */ fi->fi_flx |= FI_OPTIONS; for (s = (u_char *)(ip + 1), hlen -= (int)sizeof(*ip); hlen > 0; ) { opt = *s; if (opt == '\0') break; else if (opt == IPOPT_NOP) ol = 1; else { if (hlen < 2) break; ol = (int)*(s + 1); if (ol < 2 || ol > hlen) break; } for (i = 9, mv = 4; mv >= 0; ) { op = ipopts + i; if ((opt == (u_char)op->ol_val) && (ol > 4)) { u_32_t doi; switch (opt) { case IPOPT_SECURITY : if (optmsk & op->ol_bit) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_ipopt_security, fr_info_t *, fin, u_short, (optmsk & op->ol_bit)); } else { doi = ipf_checkripso(s); secmsk = doi >> 16; auth = doi & 0xffff; } break; case IPOPT_CIPSO : if (optmsk & op->ol_bit) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_ipopt_cipso, fr_info_t *, fin, u_short, (optmsk & op->ol_bit)); } else { doi = ipf_checkcipso(fin, s, ol); secmsk = doi >> 16; auth = doi & 0xffff; } break; } optmsk |= op->ol_bit; } if (opt < op->ol_val) i -= mv; else i += mv; mv--; } hlen -= ol; s += ol; } /* * */ if (auth && !(auth & 0x0100)) auth &= 0xff00; fi->fi_optmsk = optmsk; fi->fi_secmsk = secmsk; fi->fi_auth = auth; } /* ------------------------------------------------------------------------ */ /* Function: ipf_checkripso */ /* Returns: void */ /* Parameters: s(I) - pointer to start of RIPSO option */ /* */ /* ------------------------------------------------------------------------ */ static u_32_t ipf_checkripso(u_char *s) { const struct optlist *sp; u_short secmsk = 0, auth = 0; u_char sec; int j, m; sec = *(s + 2); /* classification */ for (j = 3, m = 2; m >= 0; ) { sp = secopt + j; if (sec == sp->ol_val) { secmsk |= sp->ol_bit; auth = *(s + 3); auth *= 256; auth += *(s + 4); break; } if (sec < sp->ol_val) j -= m; else j += m; m--; } return (secmsk << 16) | auth; } /* ------------------------------------------------------------------------ */ /* Function: ipf_checkcipso */ /* Returns: u_32_t - 0 = failure, else the doi from the header */ /* Parameters: fin(IO) - pointer to packet information */ /* s(I) - pointer to start of CIPSO option */ /* ol(I) - length of CIPSO option field */ /* */ /* This function returns the domain of integrity (DOI) field from the CIPSO */ /* header and returns that whilst also storing the highest sensitivity */ /* value found in the fr_info_t structure. */ /* */ /* No attempt is made to extract the category bitmaps as these are defined */ /* by the user (rather than the protocol) and can be rather numerous on the */ /* end nodes. */ /* ------------------------------------------------------------------------ */ static u_32_t ipf_checkcipso(fr_info_t *fin, u_char *s, int ol) { ipf_main_softc_t *softc = fin->fin_main_soft; fr_ip_t *fi; u_32_t doi; u_char *t, tag, tlen, sensitivity; int len; if (ol < 6 || ol > 40) { LBUMPD(ipf_stats[fin->fin_out], fr_v4_cipso_bad); fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkcipso_ol, fr_info_t *, fin, u_int, ol); return (0); } fi = &fin->fin_fi; fi->fi_sensitivity = 0; /* * The DOI field MUST be there. */ bcopy(s + 2, &doi, sizeof(doi)); t = (u_char *)s + 6; for (len = ol - 6; len >= 2; len -= tlen, t+= tlen) { tag = *t; tlen = *(t + 1); if (tlen > len || tlen < 4 || tlen > 34) { LBUMPD(ipf_stats[fin->fin_out], fr_v4_cipso_tlen); fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkcipso_tlen, fr_info_t *, fin, u_int, tlen); return (0); } sensitivity = 0; /* * Tag numbers 0, 1, 2, 5 are laid out in the CIPSO Internet * draft (16 July 1992) that has expired. */ if (tag == 0) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkcipso_tag, fr_info_t *, fin, u_int, tag); continue; } else if (tag == 1) { if (*(t + 2) != 0) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkcipso_tag1_t2, fr_info_t *, fin, u_int, (*t + 2)); continue; } sensitivity = *(t + 3); /* Category bitmap for categories 0-239 */ } else if (tag == 4) { if (*(t + 2) != 0) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkcipso_tag4_t2, fr_info_t *, fin, u_int, (*t + 2)); continue; } sensitivity = *(t + 3); /* Enumerated categories, 16bits each, upto 15 */ } else if (tag == 5) { if (*(t + 2) != 0) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkcipso_tag5_t2, fr_info_t *, fin, u_int, (*t + 2)); continue; } sensitivity = *(t + 3); /* Range of categories (2*16bits), up to 7 pairs */ } else if (tag > 127) { /* Custom defined DOI */ ; } else { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkcipso_tag127, fr_info_t *, fin, u_int, tag); continue; } if (sensitivity > fi->fi_sensitivity) fi->fi_sensitivity = sensitivity; } return (doi); } /* ------------------------------------------------------------------------ */ /* Function: ipf_makefrip */ /* Returns: int - 0 == packet ok, -1 == packet freed */ /* Parameters: hlen(I) - length of IP packet header */ /* ip(I) - pointer to the IP header */ /* fin(IO) - pointer to packet information */ /* */ /* Compact the IP header into a structure which contains just the info. */ /* which is useful for comparing IP headers with and store this information */ /* in the fr_info_t structure pointer to by fin. At present, it is assumed */ /* this function will be called with either an IPv4 or IPv6 packet. */ /* ------------------------------------------------------------------------ */ int ipf_makefrip(int hlen, ip_t *ip, fr_info_t *fin) { ipf_main_softc_t *softc = fin->fin_main_soft; int v; fin->fin_depth = 0; fin->fin_hlen = (u_short)hlen; fin->fin_ip = ip; fin->fin_rule = 0xffffffff; fin->fin_group[0] = -1; fin->fin_group[1] = '\0'; fin->fin_dp = (char *)ip + hlen; v = fin->fin_v; if (v == 4) { fin->fin_plen = ntohs(ip->ip_len); fin->fin_dlen = fin->fin_plen - hlen; ipf_pr_ipv4hdr(fin); #ifdef USE_INET6 } else if (v == 6) { fin->fin_plen = ntohs(((ip6_t *)ip)->ip6_plen); fin->fin_dlen = fin->fin_plen; fin->fin_plen += hlen; ipf_pr_ipv6hdr(fin); #endif } if (fin->fin_ip == NULL) { LBUMP(ipf_stats[fin->fin_out].fr_ip_freed); return (-1); } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_portcheck */ /* Returns: int - 1 == port matched, 0 == port match failed */ /* Parameters: frp(I) - pointer to port check `expression' */ /* pop(I) - port number to evaluate */ /* */ /* Perform a comparison of a port number against some other(s), using a */ /* structure with compare information stored in it. */ /* ------------------------------------------------------------------------ */ static inline int ipf_portcheck(frpcmp_t *frp, u_32_t pop) { int err = 1; u_32_t po; po = frp->frp_port; /* * Do opposite test to that required and continue if that succeeds. */ switch (frp->frp_cmp) { case FR_EQUAL : if (pop != po) /* EQUAL */ err = 0; break; case FR_NEQUAL : if (pop == po) /* NOTEQUAL */ err = 0; break; case FR_LESST : if (pop >= po) /* LESSTHAN */ err = 0; break; case FR_GREATERT : if (pop <= po) /* GREATERTHAN */ err = 0; break; case FR_LESSTE : if (pop > po) /* LT or EQ */ err = 0; break; case FR_GREATERTE : if (pop < po) /* GT or EQ */ err = 0; break; case FR_OUTRANGE : if (pop >= po && pop <= frp->frp_top) /* Out of range */ err = 0; break; case FR_INRANGE : if (pop <= po || pop >= frp->frp_top) /* In range */ err = 0; break; case FR_INCRANGE : if (pop < po || pop > frp->frp_top) /* Inclusive range */ err = 0; break; default : break; } return (err); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tcpudpchk */ /* Returns: int - 1 == protocol matched, 0 == check failed */ /* Parameters: fda(I) - pointer to packet information */ /* ft(I) - pointer to structure with comparison data */ /* */ /* Compares the current pcket (assuming it is TCP/UDP) information with a */ /* structure containing information that we want to match against. */ /* ------------------------------------------------------------------------ */ int ipf_tcpudpchk(fr_ip_t *fi, frtuc_t *ft) { int err = 1; /* * Both ports should *always* be in the first fragment. * So far, I cannot find any cases where they can not be. * * compare destination ports */ if (ft->ftu_dcmp) err = ipf_portcheck(&ft->ftu_dst, fi->fi_ports[1]); /* * compare source ports */ if (err && ft->ftu_scmp) err = ipf_portcheck(&ft->ftu_src, fi->fi_ports[0]); /* * If we don't have all the TCP/UDP header, then how can we * expect to do any sort of match on it ? If we were looking for * TCP flags, then NO match. If not, then match (which should * satisfy the "short" class too). */ if (err && (fi->fi_p == IPPROTO_TCP)) { if (fi->fi_flx & FI_SHORT) return (!(ft->ftu_tcpf | ft->ftu_tcpfm)); /* * Match the flags ? If not, abort this match. */ if (ft->ftu_tcpfm && ft->ftu_tcpf != (fi->fi_tcpf & ft->ftu_tcpfm)) { FR_DEBUG(("f. %#x & %#x != %#x\n", fi->fi_tcpf, ft->ftu_tcpfm, ft->ftu_tcpf)); err = 0; } } return (err); } /* ------------------------------------------------------------------------ */ /* Function: ipf_check_ipf */ /* Returns: int - 0 == match, else no match */ /* Parameters: fin(I) - pointer to packet information */ /* fr(I) - pointer to filter rule */ /* portcmp(I) - flag indicating whether to attempt matching on */ /* TCP/UDP port data. */ /* */ /* Check to see if a packet matches an IPFilter rule. Checks of addresses, */ /* port numbers, etc, for "standard" IPFilter rules are all orchestrated in */ /* this function. */ /* ------------------------------------------------------------------------ */ static inline int ipf_check_ipf(fr_info_t *fin, frentry_t *fr, int portcmp) { u_32_t *ld, *lm, *lip; fripf_t *fri; fr_ip_t *fi; int i; fi = &fin->fin_fi; fri = fr->fr_ipf; lip = (u_32_t *)fi; lm = (u_32_t *)&fri->fri_mip; ld = (u_32_t *)&fri->fri_ip; /* * first 32 bits to check coversion: * IP version, TOS, TTL, protocol */ i = ((*lip & *lm) != *ld); FR_DEBUG(("0. %#08x & %#08x != %#08x\n", ntohl(*lip), ntohl(*lm), ntohl(*ld))); if (i) return (1); /* * Next 32 bits is a constructed bitmask indicating which IP options * are present (if any) in this packet. */ lip++, lm++, ld++; i = ((*lip & *lm) != *ld); FR_DEBUG(("1. %#08x & %#08x != %#08x\n", ntohl(*lip), ntohl(*lm), ntohl(*ld))); if (i != 0) return (1); lip++, lm++, ld++; /* * Unrolled loops (4 each, for 32 bits) for address checks. */ /* * Check the source address. */ if (fr->fr_satype == FRI_LOOKUP) { i = (*fr->fr_srcfunc)(fin->fin_main_soft, fr->fr_srcptr, fi->fi_v, lip, fin->fin_plen); if (i == -1) return (1); lip += 3; lm += 3; ld += 3; } else { i = ((*lip & *lm) != *ld); FR_DEBUG(("2a. %#08x & %#08x != %#08x\n", ntohl(*lip), ntohl(*lm), ntohl(*ld))); if (fi->fi_v == 6) { lip++, lm++, ld++; i |= ((*lip & *lm) != *ld); FR_DEBUG(("2b. %#08x & %#08x != %#08x\n", ntohl(*lip), ntohl(*lm), ntohl(*ld))); lip++, lm++, ld++; i |= ((*lip & *lm) != *ld); FR_DEBUG(("2c. %#08x & %#08x != %#08x\n", ntohl(*lip), ntohl(*lm), ntohl(*ld))); lip++, lm++, ld++; i |= ((*lip & *lm) != *ld); FR_DEBUG(("2d. %#08x & %#08x != %#08x\n", ntohl(*lip), ntohl(*lm), ntohl(*ld))); } else { lip += 3; lm += 3; ld += 3; } } i ^= (fr->fr_flags & FR_NOTSRCIP) >> 6; if (i != 0) return (1); /* * Check the destination address. */ lip++, lm++, ld++; if (fr->fr_datype == FRI_LOOKUP) { i = (*fr->fr_dstfunc)(fin->fin_main_soft, fr->fr_dstptr, fi->fi_v, lip, fin->fin_plen); if (i == -1) return (1); lip += 3; lm += 3; ld += 3; } else { i = ((*lip & *lm) != *ld); FR_DEBUG(("3a. %#08x & %#08x != %#08x\n", ntohl(*lip), ntohl(*lm), ntohl(*ld))); if (fi->fi_v == 6) { lip++, lm++, ld++; i |= ((*lip & *lm) != *ld); FR_DEBUG(("3b. %#08x & %#08x != %#08x\n", ntohl(*lip), ntohl(*lm), ntohl(*ld))); lip++, lm++, ld++; i |= ((*lip & *lm) != *ld); FR_DEBUG(("3c. %#08x & %#08x != %#08x\n", ntohl(*lip), ntohl(*lm), ntohl(*ld))); lip++, lm++, ld++; i |= ((*lip & *lm) != *ld); FR_DEBUG(("3d. %#08x & %#08x != %#08x\n", ntohl(*lip), ntohl(*lm), ntohl(*ld))); } else { lip += 3; lm += 3; ld += 3; } } i ^= (fr->fr_flags & FR_NOTDSTIP) >> 7; if (i != 0) return (1); /* * IP addresses matched. The next 32bits contains: * mast of old IP header security & authentication bits. */ lip++, lm++, ld++; i = (*ld - (*lip & *lm)); FR_DEBUG(("4. %#08x & %#08x != %#08x\n", *lip, *lm, *ld)); /* * Next we have 32 bits of packet flags. */ lip++, lm++, ld++; i |= (*ld - (*lip & *lm)); FR_DEBUG(("5. %#08x & %#08x != %#08x\n", *lip, *lm, *ld)); if (i == 0) { /* * If a fragment, then only the first has what we're * looking for here... */ if (portcmp) { if (!ipf_tcpudpchk(&fin->fin_fi, &fr->fr_tuc)) i = 1; } else { if (fr->fr_dcmp || fr->fr_scmp || fr->fr_tcpf || fr->fr_tcpfm) i = 1; if (fr->fr_icmpm || fr->fr_icmp) { if (((fi->fi_p != IPPROTO_ICMP) && (fi->fi_p != IPPROTO_ICMPV6)) || fin->fin_off || (fin->fin_dlen < 2)) i = 1; else if ((fin->fin_data[0] & fr->fr_icmpm) != fr->fr_icmp) { FR_DEBUG(("i. %#x & %#x != %#x\n", fin->fin_data[0], fr->fr_icmpm, fr->fr_icmp)); i = 1; } } } } return (i); } /* ------------------------------------------------------------------------ */ /* Function: ipf_scanlist */ /* Returns: int - result flags of scanning filter list */ /* Parameters: fin(I) - pointer to packet information */ /* pass(I) - default result to return for filtering */ /* */ /* Check the input/output list of rules for a match to the current packet. */ /* If a match is found, the value of fr_flags from the rule becomes the */ /* return value and fin->fin_fr points to the matched rule. */ /* */ /* This function may be called recursively upto 16 times (limit inbuilt.) */ /* When unwinding, it should finish up with fin_depth as 0. */ /* */ /* Could be per interface, but this gets real nasty when you don't have, */ /* or can't easily change, the kernel source code to . */ /* ------------------------------------------------------------------------ */ int ipf_scanlist(fr_info_t *fin, u_32_t pass) { ipf_main_softc_t *softc = fin->fin_main_soft; int rulen, portcmp, off, skip; struct frentry *fr, *fnext; u_32_t passt, passo; /* * Do not allow nesting deeper than 16 levels. */ if (fin->fin_depth >= 16) return (pass); fr = fin->fin_fr; /* * If there are no rules in this list, return now. */ if (fr == NULL) return (pass); skip = 0; portcmp = 0; fin->fin_depth++; fin->fin_fr = NULL; off = fin->fin_off; if ((fin->fin_flx & FI_TCPUDP) && (fin->fin_dlen > 3) && !off) portcmp = 1; for (rulen = 0; fr; fr = fnext, rulen++) { fnext = fr->fr_next; if (skip != 0) { FR_VERBOSE(("SKIP %d (%#x)\n", skip, fr->fr_flags)); skip--; continue; } /* * In all checks below, a null (zero) value in the * filter struture is taken to mean a wildcard. * * check that we are working for the right interface */ #ifdef _KERNEL if (fr->fr_ifa && fr->fr_ifa != fin->fin_ifp) continue; #else if (opts & (OPT_VERBOSE|OPT_DEBUG)) printf("\n"); FR_VERBOSE(("%c", FR_ISSKIP(pass) ? 's' : FR_ISPASS(pass) ? 'p' : FR_ISACCOUNT(pass) ? 'A' : FR_ISAUTH(pass) ? 'a' : (pass & FR_NOMATCH) ? 'n' :'b')); if (fr->fr_ifa && fr->fr_ifa != fin->fin_ifp) continue; FR_VERBOSE((":i")); #endif switch (fr->fr_type) { case FR_T_IPF : case FR_T_IPF_BUILTIN : if (ipf_check_ipf(fin, fr, portcmp)) continue; break; #if defined(IPFILTER_BPF) case FR_T_BPFOPC : case FR_T_BPFOPC_BUILTIN : { u_char *mc; int wlen; if (*fin->fin_mp == NULL) continue; if (fin->fin_family != fr->fr_family) continue; mc = (u_char *)fin->fin_m; wlen = fin->fin_dlen + fin->fin_hlen; if (!bpf_filter(fr->fr_data, mc, wlen, 0)) continue; break; } #endif case FR_T_CALLFUNC_BUILTIN : { frentry_t *f; f = (*fr->fr_func)(fin, &pass); if (f != NULL) fr = f; else continue; break; } case FR_T_IPFEXPR : case FR_T_IPFEXPR_BUILTIN : if (fin->fin_family != fr->fr_family) continue; if (ipf_fr_matcharray(fin, fr->fr_data) == 0) continue; break; default : break; } if ((fin->fin_out == 0) && (fr->fr_nattag.ipt_num[0] != 0)) { if (fin->fin_nattag == NULL) continue; if (ipf_matchtag(&fr->fr_nattag, fin->fin_nattag) == 0) continue; } FR_VERBOSE(("=%d/%d.%d *", fr->fr_grhead, fr->fr_group, rulen)); passt = fr->fr_flags; /* * If the rule is a "call now" rule, then call the function * in the rule, if it exists and use the results from that. * If the function pointer is bad, just make like we ignore * it, except for increasing the hit counter. */ if ((passt & FR_CALLNOW) != 0) { frentry_t *frs; ATOMIC_INC64(fr->fr_hits); if ((fr->fr_func == NULL) || (fr->fr_func == (ipfunc_t)-1)) continue; frs = fin->fin_fr; fin->fin_fr = fr; fr = (*fr->fr_func)(fin, &passt); if (fr == NULL) { fin->fin_fr = frs; continue; } passt = fr->fr_flags; } fin->fin_fr = fr; #ifdef IPFILTER_LOG /* * Just log this packet... */ if ((passt & FR_LOGMASK) == FR_LOG) { if (ipf_log_pkt(fin, passt) == -1) { if (passt & FR_LOGORBLOCK) { DT(frb_logfail); passt &= ~FR_CMDMASK; passt |= FR_BLOCK|FR_QUICK; fin->fin_reason = FRB_LOGFAIL; } } } #endif /* IPFILTER_LOG */ MUTEX_ENTER(&fr->fr_lock); fr->fr_bytes += (U_QUAD_T)fin->fin_plen; fr->fr_hits++; MUTEX_EXIT(&fr->fr_lock); fin->fin_rule = rulen; passo = pass; if (FR_ISSKIP(passt)) { skip = fr->fr_arg; continue; } else if (((passt & FR_LOGMASK) != FR_LOG) && ((passt & FR_LOGMASK) != FR_DECAPSULATE)) { pass = passt; } if (passt & (FR_RETICMP|FR_FAKEICMP)) fin->fin_icode = fr->fr_icode; if (fr->fr_group != -1) { (void) strncpy(fin->fin_group, FR_NAME(fr, fr_group), strlen(FR_NAME(fr, fr_group))); } else { fin->fin_group[0] = '\0'; } FR_DEBUG(("pass %#x/%#x/%x\n", passo, pass, passt)); if (fr->fr_grphead != NULL) { fin->fin_fr = fr->fr_grphead->fg_start; FR_VERBOSE(("group %s\n", FR_NAME(fr, fr_grhead))); if (FR_ISDECAPS(passt)) passt = ipf_decaps(fin, pass, fr->fr_icode); else passt = ipf_scanlist(fin, pass); if (fin->fin_fr == NULL) { fin->fin_rule = rulen; if (fr->fr_group != -1) (void) strncpy(fin->fin_group, fr->fr_names + fr->fr_group, strlen(fr->fr_names + fr->fr_group)); fin->fin_fr = fr; passt = pass; } pass = passt; } if (pass & FR_QUICK) { /* * Finally, if we've asked to track state for this * packet, set it up. Add state for "quick" rules * here so that if the action fails we can consider * the rule to "not match" and keep on processing * filter rules. */ if ((pass & FR_KEEPSTATE) && !FR_ISAUTH(pass) && !(fin->fin_flx & FI_STATE)) { int out = fin->fin_out; fin->fin_fr = fr; if (ipf_state_add(softc, fin, NULL, 0) == 0) { LBUMPD(ipf_stats[out], fr_ads); } else { LBUMPD(ipf_stats[out], fr_bads); pass = passo; continue; } } break; } } fin->fin_depth--; return (pass); } /* ------------------------------------------------------------------------ */ /* Function: ipf_acctpkt */ /* Returns: frentry_t* - always returns NULL */ /* Parameters: fin(I) - pointer to packet information */ /* passp(IO) - pointer to current/new filter decision (unused) */ /* */ /* Checks a packet against accounting rules, if there are any for the given */ /* IP protocol version. */ /* */ /* N.B.: this function returns NULL to match the prototype used by other */ /* functions called from the IPFilter "mainline" in ipf_check(). */ /* ------------------------------------------------------------------------ */ frentry_t * ipf_acctpkt(fr_info_t *fin, u_32_t *passp) { ipf_main_softc_t *softc = fin->fin_main_soft; char group[FR_GROUPLEN]; frentry_t *fr, *frsave; u_32_t pass, rulen; passp = passp; fr = softc->ipf_acct[fin->fin_out][softc->ipf_active]; if (fr != NULL) { frsave = fin->fin_fr; bcopy(fin->fin_group, group, FR_GROUPLEN); rulen = fin->fin_rule; fin->fin_fr = fr; pass = ipf_scanlist(fin, FR_NOMATCH); if (FR_ISACCOUNT(pass)) { LBUMPD(ipf_stats[0], fr_acct); } fin->fin_fr = frsave; bcopy(group, fin->fin_group, FR_GROUPLEN); fin->fin_rule = rulen; } return (NULL); } /* ------------------------------------------------------------------------ */ /* Function: ipf_firewall */ /* Returns: frentry_t* - returns pointer to matched rule, if no matches */ /* were found, returns NULL. */ /* Parameters: fin(I) - pointer to packet information */ /* passp(IO) - pointer to current/new filter decision (unused) */ /* */ /* Applies an appropriate set of firewall rules to the packet, to see if */ /* there are any matches. The first check is to see if a match can be seen */ /* in the cache. If not, then search an appropriate list of rules. Once a */ /* matching rule is found, take any appropriate actions as defined by the */ /* rule - except logging. */ /* ------------------------------------------------------------------------ */ static frentry_t * ipf_firewall(fr_info_t *fin, u_32_t *passp) { ipf_main_softc_t *softc = fin->fin_main_soft; frentry_t *fr; u_32_t pass; int out; out = fin->fin_out; pass = *passp; /* * This rule cache will only affect packets that are not being * statefully filtered. */ fin->fin_fr = softc->ipf_rules[out][softc->ipf_active]; if (fin->fin_fr != NULL) pass = ipf_scanlist(fin, softc->ipf_pass); if ((pass & FR_NOMATCH)) { LBUMPD(ipf_stats[out], fr_nom); } fr = fin->fin_fr; /* * Apply packets per second rate-limiting to a rule as required. */ if ((fr != NULL) && (fr->fr_pps != 0) && !ppsratecheck(&fr->fr_lastpkt, &fr->fr_curpps, fr->fr_pps)) { DT2(frb_ppsrate, fr_info_t *, fin, frentry_t *, fr); pass &= ~(FR_CMDMASK|FR_RETICMP|FR_RETRST); pass |= FR_BLOCK; LBUMPD(ipf_stats[out], fr_ppshit); fin->fin_reason = FRB_PPSRATE; } /* * If we fail to add a packet to the authorization queue, then we * drop the packet later. However, if it was added then pretend * we've dropped it already. */ if (FR_ISAUTH(pass)) { if (ipf_auth_new(fin->fin_m, fin) != 0) { DT1(frb_authnew, fr_info_t *, fin); fin->fin_m = *fin->fin_mp = NULL; fin->fin_reason = FRB_AUTHNEW; fin->fin_error = 0; } else { IPFERROR(1); fin->fin_error = ENOSPC; } } if ((fr != NULL) && (fr->fr_func != NULL) && (fr->fr_func != (ipfunc_t)-1) && !(pass & FR_CALLNOW)) (void) (*fr->fr_func)(fin, &pass); /* * If a rule is a pre-auth rule, check again in the list of rules * loaded for authenticated use. It does not particulary matter * if this search fails because a "preauth" result, from a rule, * is treated as "not a pass", hence the packet is blocked. */ if (FR_ISPREAUTH(pass)) { pass = ipf_auth_pre_scanlist(softc, fin, pass); } /* * If the rule has "keep frag" and the packet is actually a fragment, * then create a fragment state entry. */ if (pass & FR_KEEPFRAG) { if (fin->fin_flx & FI_FRAG) { if (ipf_frag_new(softc, fin, pass) == -1) { LBUMP(ipf_stats[out].fr_bnfr); } else { LBUMP(ipf_stats[out].fr_nfr); } } else { LBUMP(ipf_stats[out].fr_cfr); } } fr = fin->fin_fr; *passp = pass; return (fr); } /* ------------------------------------------------------------------------ */ /* Function: ipf_check */ /* Returns: int - 0 == packet allowed through, */ /* User space: */ /* -1 == packet blocked */ /* 1 == packet not matched */ /* -2 == requires authentication */ /* Kernel: */ /* > 0 == filter error # for packet */ /* Parameters: ctx(I) - pointer to the instance context */ /* ip(I) - pointer to start of IPv4/6 packet */ /* hlen(I) - length of header */ /* ifp(I) - pointer to interface this packet is on */ /* out(I) - 0 == packet going in, 1 == packet going out */ /* mp(IO) - pointer to caller's buffer pointer that holds this */ /* IP packet. */ /* Solaris: */ /* qpi(I) - pointer to STREAMS queue information for this */ /* interface & direction. */ /* */ /* ipf_check() is the master function for all IPFilter packet processing. */ /* It orchestrates: Network Address Translation (NAT), checking for packet */ /* authorisation (or pre-authorisation), presence of related state info., */ /* generating log entries, IP packet accounting, routing of packets as */ /* directed by firewall rules and of course whether or not to allow the */ /* packet to be further processed by the kernel. */ /* */ /* For packets blocked, the contents of "mp" will be NULL'd and the buffer */ /* freed. Packets passed may be returned with the pointer pointed to by */ /* by "mp" changed to a new buffer. */ /* ------------------------------------------------------------------------ */ int ipf_check(void *ctx, ip_t *ip, int hlen, struct ifnet *ifp, int out #if defined(_KERNEL) && SOLARIS , void* qif, mb_t **mp) #else , mb_t **mp) #endif { /* * The above really sucks, but short of writing a diff */ ipf_main_softc_t *softc = ctx; fr_info_t frinfo; fr_info_t *fin = &frinfo; u_32_t pass = softc->ipf_pass; frentry_t *fr = NULL; int v = IP_V(ip); mb_t *mc = NULL; mb_t *m; /* * The first part of ipf_check() deals with making sure that what goes * into the filtering engine makes some sense. Information about the * the packet is distilled, collected into a fr_info_t structure and * the an attempt to ensure the buffer the packet is in is big enough * to hold all the required packet headers. */ #ifdef _KERNEL # if SOLARIS qpktinfo_t *qpi = qif; # ifdef __sparc if ((u_int)ip & 0x3) return (2); # endif # else SPL_INT(s); # endif if (softc->ipf_running <= 0) { return (0); } bzero((char *)fin, sizeof(*fin)); # if SOLARIS if (qpi->qpi_flags & QF_BROADCAST) fin->fin_flx |= FI_MBCAST|FI_BROADCAST; if (qpi->qpi_flags & QF_MULTICAST) fin->fin_flx |= FI_MBCAST|FI_MULTICAST; m = qpi->qpi_m; fin->fin_qfm = m; fin->fin_qpi = qpi; # else /* SOLARIS */ m = *mp; # if defined(M_MCAST) if ((m->m_flags & M_MCAST) != 0) fin->fin_flx |= FI_MBCAST|FI_MULTICAST; # endif # if defined(M_MLOOP) if ((m->m_flags & M_MLOOP) != 0) fin->fin_flx |= FI_MBCAST|FI_MULTICAST; # endif # if defined(M_BCAST) if ((m->m_flags & M_BCAST) != 0) fin->fin_flx |= FI_MBCAST|FI_BROADCAST; # endif # ifdef M_CANFASTFWD /* * XXX For now, IP Filter and fast-forwarding of cached flows * XXX are mutually exclusive. Eventually, IP Filter should * XXX get a "can-fast-forward" filter rule. */ m->m_flags &= ~M_CANFASTFWD; # endif /* M_CANFASTFWD */ # if defined(CSUM_DELAY_DATA) && !defined(__FreeBSD__) /* * disable delayed checksums. */ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { in_delayed_cksum(m); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } # endif /* CSUM_DELAY_DATA */ # endif /* SOLARIS */ #else bzero((char *)fin, sizeof(*fin)); m = *mp; # if defined(M_MCAST) if ((m->m_flags & M_MCAST) != 0) fin->fin_flx |= FI_MBCAST|FI_MULTICAST; # endif # if defined(M_MLOOP) if ((m->m_flags & M_MLOOP) != 0) fin->fin_flx |= FI_MBCAST|FI_MULTICAST; # endif # if defined(M_BCAST) if ((m->m_flags & M_BCAST) != 0) fin->fin_flx |= FI_MBCAST|FI_BROADCAST; # endif #endif /* _KERNEL */ fin->fin_v = v; fin->fin_m = m; fin->fin_ip = ip; fin->fin_mp = mp; fin->fin_out = out; fin->fin_ifp = ifp; fin->fin_error = ENETUNREACH; fin->fin_hlen = (u_short)hlen; fin->fin_dp = (char *)ip + hlen; fin->fin_main_soft = softc; fin->fin_ipoff = (char *)ip - MTOD(m, char *); SPL_NET(s); #ifdef USE_INET6 if (v == 6) { LBUMP(ipf_stats[out].fr_ipv6); /* * Jumbo grams are quite likely too big for internal buffer * structures to handle comfortably, for now, so just drop * them. */ if (((ip6_t *)ip)->ip6_plen == 0) { DT1(frb_jumbo, ip6_t *, (ip6_t *)ip); pass = FR_BLOCK|FR_NOMATCH; fin->fin_reason = FRB_JUMBO; goto finished; } fin->fin_family = AF_INET6; } else #endif { fin->fin_family = AF_INET; } if (ipf_makefrip(hlen, ip, fin) == -1) { DT1(frb_makefrip, fr_info_t *, fin); pass = FR_BLOCK|FR_NOMATCH; fin->fin_reason = FRB_MAKEFRIP; goto finished; } /* * For at least IPv6 packets, if a m_pullup() fails then this pointer * becomes NULL and so we have no packet to free. */ if (*fin->fin_mp == NULL) goto finished; if (!out) { if (v == 4) { if (softc->ipf_chksrc && !ipf_verifysrc(fin)) { LBUMPD(ipf_stats[0], fr_v4_badsrc); fin->fin_flx |= FI_BADSRC; } if (fin->fin_ip->ip_ttl < softc->ipf_minttl) { LBUMPD(ipf_stats[0], fr_v4_badttl); fin->fin_flx |= FI_LOWTTL; } } #ifdef USE_INET6 else if (v == 6) { if (((ip6_t *)ip)->ip6_hlim < softc->ipf_minttl) { LBUMPD(ipf_stats[0], fr_v6_badttl); fin->fin_flx |= FI_LOWTTL; } } #endif } if (fin->fin_flx & FI_SHORT) { LBUMPD(ipf_stats[out], fr_short); } READ_ENTER(&softc->ipf_mutex); if (!out) { switch (fin->fin_v) { case 4 : if (ipf_nat_checkin(fin, &pass) == -1) { goto filterdone; } break; #ifdef USE_INET6 case 6 : if (ipf_nat6_checkin(fin, &pass) == -1) { goto filterdone; } break; #endif default : break; } } /* * Check auth now. * If a packet is found in the auth table, then skip checking * the access lists for permission but we do need to consider * the result as if it were from the ACL's. In addition, being * found in the auth table means it has been seen before, so do * not pass it through accounting (again), lest it be counted twice. */ fr = ipf_auth_check(fin, &pass); if (!out && (fr == NULL)) (void) ipf_acctpkt(fin, NULL); if (fr == NULL) { if ((fin->fin_flx & FI_FRAG) != 0) fr = ipf_frag_known(fin, &pass); if (fr == NULL) fr = ipf_state_check(fin, &pass); } if ((pass & FR_NOMATCH) || (fr == NULL)) fr = ipf_firewall(fin, &pass); /* * If we've asked to track state for this packet, set it up. * Here rather than ipf_firewall because ipf_checkauth may decide * to return a packet for "keep state" */ if ((pass & FR_KEEPSTATE) && (fin->fin_m != NULL) && !(fin->fin_flx & FI_STATE)) { if (ipf_state_add(softc, fin, NULL, 0) == 0) { LBUMP(ipf_stats[out].fr_ads); } else { LBUMP(ipf_stats[out].fr_bads); if (FR_ISPASS(pass)) { DT(frb_stateadd); pass &= ~FR_CMDMASK; pass |= FR_BLOCK; fin->fin_reason = FRB_STATEADD; } } } fin->fin_fr = fr; if ((fr != NULL) && !(fin->fin_flx & FI_STATE)) { fin->fin_dif = &fr->fr_dif; fin->fin_tif = &fr->fr_tifs[fin->fin_rev]; } /* * Only count/translate packets which will be passed on, out the * interface. */ if (out && FR_ISPASS(pass)) { (void) ipf_acctpkt(fin, NULL); switch (fin->fin_v) { case 4 : if (ipf_nat_checkout(fin, &pass) == -1) { ; } else if ((softc->ipf_update_ipid != 0) && (v == 4)) { if (ipf_updateipid(fin) == -1) { DT(frb_updateipid); LBUMP(ipf_stats[1].fr_ipud); pass &= ~FR_CMDMASK; pass |= FR_BLOCK; fin->fin_reason = FRB_UPDATEIPID; } else { LBUMP(ipf_stats[0].fr_ipud); } } break; #ifdef USE_INET6 case 6 : (void) ipf_nat6_checkout(fin, &pass); break; #endif default : break; } } filterdone: #ifdef IPFILTER_LOG if ((softc->ipf_flags & FF_LOGGING) || (pass & FR_LOGMASK)) { (void) ipf_dolog(fin, &pass); } #endif /* * The FI_STATE flag is cleared here so that calling ipf_state_check * will work when called from inside of fr_fastroute. Although * there is a similar flag, FI_NATED, for NAT, it does have the same * impact on code execution. */ fin->fin_flx &= ~FI_STATE; #if defined(FASTROUTE_RECURSION) /* * Up the reference on fr_lock and exit ipf_mutex. The generation of * a packet below can sometimes cause a recursive call into IPFilter. * On those platforms where that does happen, we need to hang onto * the filter rule just in case someone decides to remove or flush it * in the meantime. */ if (fr != NULL) { MUTEX_ENTER(&fr->fr_lock); fr->fr_ref++; MUTEX_EXIT(&fr->fr_lock); } RWLOCK_EXIT(&softc->ipf_mutex); #endif if ((pass & FR_RETMASK) != 0) { /* * Should we return an ICMP packet to indicate error * status passing through the packet filter ? * WARNING: ICMP error packets AND TCP RST packets should * ONLY be sent in repsonse to incoming packets. Sending * them in response to outbound packets can result in a * panic on some operating systems. */ if (!out) { if (pass & FR_RETICMP) { int dst; if ((pass & FR_RETMASK) == FR_FAKEICMP) dst = 1; else dst = 0; (void) ipf_send_icmp_err(ICMP_UNREACH, fin, dst); LBUMP(ipf_stats[0].fr_ret); } else if (((pass & FR_RETMASK) == FR_RETRST) && !(fin->fin_flx & FI_SHORT)) { if (((fin->fin_flx & FI_OOW) != 0) || (ipf_send_reset(fin) == 0)) { LBUMP(ipf_stats[1].fr_ret); } } /* * When using return-* with auth rules, the auth code * takes over disposing of this packet. */ if (FR_ISAUTH(pass) && (fin->fin_m != NULL)) { DT1(frb_authcapture, fr_info_t *, fin); fin->fin_m = *fin->fin_mp = NULL; fin->fin_reason = FRB_AUTHCAPTURE; m = NULL; } } else { if (pass & FR_RETRST) { fin->fin_error = ECONNRESET; } } } /* * After the above so that ICMP unreachables and TCP RSTs get * created properly. */ if (FR_ISBLOCK(pass) && (fin->fin_flx & FI_NEWNAT)) ipf_nat_uncreate(fin); /* * If we didn't drop off the bottom of the list of rules (and thus * the 'current' rule fr is not NULL), then we may have some extra * instructions about what to do with a packet. * Once we're finished return to our caller, freeing the packet if * we are dropping it. */ if (fr != NULL) { frdest_t *fdp; /* * Generate a duplicated packet first because ipf_fastroute * can lead to fin_m being free'd... not good. */ fdp = fin->fin_dif; if ((fdp != NULL) && (fdp->fd_ptr != NULL) && (fdp->fd_ptr != (void *)-1)) { mc = M_COPY(fin->fin_m); if (mc != NULL) ipf_fastroute(mc, &mc, fin, fdp); } fdp = fin->fin_tif; if (!out && (pass & FR_FASTROUTE)) { /* * For fastroute rule, no destination interface defined * so pass NULL as the frdest_t parameter */ (void) ipf_fastroute(fin->fin_m, mp, fin, NULL); m = *mp = NULL; } else if ((fdp != NULL) && (fdp->fd_ptr != NULL) && (fdp->fd_ptr != (struct ifnet *)-1)) { /* this is for to rules: */ ipf_fastroute(fin->fin_m, mp, fin, fdp); m = *mp = NULL; } #if defined(FASTROUTE_RECURSION) (void) ipf_derefrule(softc, &fr); #endif } #if !defined(FASTROUTE_RECURSION) RWLOCK_EXIT(&softc->ipf_mutex); #endif finished: if (!FR_ISPASS(pass)) { LBUMP(ipf_stats[out].fr_block); if (*mp != NULL) { #ifdef _KERNEL FREE_MB_T(*mp); #endif m = *mp = NULL; } } else { LBUMP(ipf_stats[out].fr_pass); } SPL_X(s); if (fin->fin_m == NULL && fin->fin_flx & FI_BAD && fin->fin_reason == FRB_PULLUP) { /* m_pullup() has freed the mbuf */ LBUMP(ipf_stats[out].fr_blocked[fin->fin_reason]); return (-1); } #ifdef _KERNEL if (FR_ISPASS(pass)) return (0); LBUMP(ipf_stats[out].fr_blocked[fin->fin_reason]); return (fin->fin_error); #else /* _KERNEL */ if (*mp != NULL) (*mp)->mb_ifp = fin->fin_ifp; blockreason = fin->fin_reason; FR_VERBOSE(("fin_flx %#x pass %#x ", fin->fin_flx, pass)); /*if ((pass & FR_CMDMASK) == (softc->ipf_pass & FR_CMDMASK))*/ if ((pass & FR_NOMATCH) != 0) return (1); if ((pass & FR_RETMASK) != 0) switch (pass & FR_RETMASK) { case FR_RETRST : return (3); case FR_RETICMP : return (4); case FR_FAKEICMP : return (5); } switch (pass & FR_CMDMASK) { case FR_PASS : return (0); case FR_BLOCK : return (-1); case FR_AUTH : return (-2); case FR_ACCOUNT : return (-3); case FR_PREAUTH : return (-4); } return (2); #endif /* _KERNEL */ } #ifdef IPFILTER_LOG /* ------------------------------------------------------------------------ */ /* Function: ipf_dolog */ /* Returns: frentry_t* - returns contents of fin_fr (no change made) */ /* Parameters: fin(I) - pointer to packet information */ /* passp(IO) - pointer to current/new filter decision (unused) */ /* */ /* Checks flags set to see how a packet should be logged, if it is to be */ /* logged. Adjust statistics based on its success or not. */ /* ------------------------------------------------------------------------ */ frentry_t * ipf_dolog(fr_info_t *fin, u_32_t *passp) { ipf_main_softc_t *softc = fin->fin_main_soft; u_32_t pass; int out; out = fin->fin_out; pass = *passp; if ((softc->ipf_flags & FF_LOGNOMATCH) && (pass & FR_NOMATCH)) { pass |= FF_LOGNOMATCH; LBUMPD(ipf_stats[out], fr_npkl); goto logit; } else if (((pass & FR_LOGMASK) == FR_LOGP) || (FR_ISPASS(pass) && (softc->ipf_flags & FF_LOGPASS))) { if ((pass & FR_LOGMASK) != FR_LOGP) pass |= FF_LOGPASS; LBUMPD(ipf_stats[out], fr_ppkl); goto logit; } else if (((pass & FR_LOGMASK) == FR_LOGB) || (FR_ISBLOCK(pass) && (softc->ipf_flags & FF_LOGBLOCK))) { if ((pass & FR_LOGMASK) != FR_LOGB) pass |= FF_LOGBLOCK; LBUMPD(ipf_stats[out], fr_bpkl); logit: if (ipf_log_pkt(fin, pass) == -1) { /* * If the "or-block" option has been used then * block the packet if we failed to log it. */ if ((pass & FR_LOGORBLOCK) && FR_ISPASS(pass)) { DT1(frb_logfail2, u_int, pass); pass &= ~FR_CMDMASK; pass |= FR_BLOCK; fin->fin_reason = FRB_LOGFAIL2; } } *passp = pass; } return (fin->fin_fr); } #endif /* IPFILTER_LOG */ /* ------------------------------------------------------------------------ */ /* Function: ipf_cksum */ /* Returns: u_short - IP header checksum */ /* Parameters: addr(I) - pointer to start of buffer to checksum */ /* len(I) - length of buffer in bytes */ /* */ /* Calculate the two's complement 16 bit checksum of the buffer passed. */ /* */ /* N.B.: addr should be 16bit aligned. */ /* ------------------------------------------------------------------------ */ u_short ipf_cksum(u_short *addr, int len) { u_32_t sum = 0; for (sum = 0; len > 1; len -= 2) sum += *addr++; /* mop up an odd byte, if necessary */ if (len == 1) sum += *(u_char *)addr; /* * add back carry outs from top 16 bits to low 16 bits */ sum = (sum >> 16) + (sum & 0xffff); /* add hi 16 to low 16 */ sum += (sum >> 16); /* add carry */ return (u_short)(~sum); } /* ------------------------------------------------------------------------ */ /* Function: fr_cksum */ /* Returns: u_short - layer 4 checksum */ /* Parameters: fin(I) - pointer to packet information */ /* ip(I) - pointer to IP header */ /* l4proto(I) - protocol to caclulate checksum for */ /* l4hdr(I) - pointer to layer 4 header */ /* */ /* Calculates the TCP checksum for the packet held in "m", using the data */ /* in the IP header "ip" to seed it. */ /* */ /* NB: This function assumes we've pullup'd enough for all of the IP header */ /* and the TCP header. We also assume that data blocks aren't allocated in */ /* odd sizes. */ /* */ /* Expects ip_len and ip_off to be in network byte order when called. */ /* ------------------------------------------------------------------------ */ u_short fr_cksum(fr_info_t *fin, ip_t *ip, int l4proto, void *l4hdr) { u_short *sp, slen, sumsave, *csump; u_int sum, sum2; int hlen; int off; #ifdef USE_INET6 ip6_t *ip6; #endif csump = NULL; sumsave = 0; sp = NULL; slen = 0; hlen = 0; sum = 0; sum = htons((u_short)l4proto); /* * Add up IP Header portion */ #ifdef USE_INET6 if (IP_V(ip) == 4) { #endif hlen = IP_HL(ip) << 2; off = hlen; sp = (u_short *)&ip->ip_src; sum += *sp++; /* ip_src */ sum += *sp++; sum += *sp++; /* ip_dst */ sum += *sp++; slen = fin->fin_plen - off; sum += htons(slen); #ifdef USE_INET6 } else if (IP_V(ip) == 6) { mb_t *m; m = fin->fin_m; ip6 = (ip6_t *)ip; off = ((caddr_t)ip6 - m->m_data) + sizeof(struct ip6_hdr); int len = ntohs(ip6->ip6_plen) - (off - sizeof(*ip6)); return (ipf_pcksum6(m, ip6, off, len)); } else { return (0xffff); } #endif switch (l4proto) { case IPPROTO_UDP : csump = &((udphdr_t *)l4hdr)->uh_sum; break; case IPPROTO_TCP : csump = &((tcphdr_t *)l4hdr)->th_sum; break; case IPPROTO_ICMP : csump = &((icmphdr_t *)l4hdr)->icmp_cksum; sum = 0; /* Pseudo-checksum is not included */ break; #ifdef USE_INET6 case IPPROTO_ICMPV6 : csump = &((struct icmp6_hdr *)l4hdr)->icmp6_cksum; break; #endif default : break; } if (csump != NULL) { sumsave = *csump; *csump = 0; } sum2 = ipf_pcksum(fin, off, sum); if (csump != NULL) *csump = sumsave; return (sum2); } /* ------------------------------------------------------------------------ */ /* Function: ipf_findgroup */ /* Returns: frgroup_t * - NULL = group not found, else pointer to group */ /* Parameters: softc(I) - pointer to soft context main structure */ /* group(I) - group name to search for */ /* unit(I) - device to which this group belongs */ /* set(I) - which set of rules (inactive/inactive) this is */ /* fgpp(O) - pointer to place to store pointer to the pointer */ /* to where to add the next (last) group or where */ /* to delete group from. */ /* */ /* Search amongst the defined groups for a particular group number. */ /* ------------------------------------------------------------------------ */ frgroup_t * ipf_findgroup(ipf_main_softc_t *softc, char *group, minor_t unit, int set, frgroup_t ***fgpp) { frgroup_t *fg, **fgp; /* * Which list of groups to search in is dependent on which list of * rules are being operated on. */ fgp = &softc->ipf_groups[unit][set]; while ((fg = *fgp) != NULL) { if (strncmp(group, fg->fg_name, FR_GROUPLEN) == 0) break; else fgp = &fg->fg_next; } if (fgpp != NULL) *fgpp = fgp; return (fg); } /* ------------------------------------------------------------------------ */ /* Function: ipf_group_add */ /* Returns: frgroup_t * - NULL == did not create group, */ /* != NULL == pointer to the group */ /* Parameters: softc(I) - pointer to soft context main structure */ /* num(I) - group number to add */ /* head(I) - rule pointer that is using this as the head */ /* flags(I) - rule flags which describe the type of rule it is */ /* unit(I) - device to which this group will belong to */ /* set(I) - which set of rules (inactive/inactive) this is */ /* Write Locks: ipf_mutex */ /* */ /* Add a new group head, or if it already exists, increase the reference */ /* count to it. */ /* ------------------------------------------------------------------------ */ frgroup_t * ipf_group_add(ipf_main_softc_t *softc, char *group, void *head, u_32_t flags, minor_t unit, int set) { frgroup_t *fg, **fgp; u_32_t gflags; if (group == NULL) return (NULL); if (unit == IPL_LOGIPF && *group == '\0') return (NULL); fgp = NULL; gflags = flags & FR_INOUT; fg = ipf_findgroup(softc, group, unit, set, &fgp); if (fg != NULL) { if (fg->fg_head == NULL && head != NULL) fg->fg_head = head; if (fg->fg_flags == 0) fg->fg_flags = gflags; else if (gflags != fg->fg_flags) return (NULL); fg->fg_ref++; return (fg); } KMALLOC(fg, frgroup_t *); if (fg != NULL) { fg->fg_head = head; fg->fg_start = NULL; fg->fg_next = *fgp; bcopy(group, fg->fg_name, strlen(group) + 1); fg->fg_flags = gflags; fg->fg_ref = 1; fg->fg_set = &softc->ipf_groups[unit][set]; *fgp = fg; } return (fg); } /* ------------------------------------------------------------------------ */ /* Function: ipf_group_del */ /* Returns: int - number of rules deleted */ /* Parameters: softc(I) - pointer to soft context main structure */ /* group(I) - group name to delete */ /* fr(I) - filter rule from which group is referenced */ /* Write Locks: ipf_mutex */ /* */ /* This function is called whenever a reference to a group is to be dropped */ /* and thus its reference count needs to be lowered and the group free'd if */ /* the reference count reaches zero. Passing in fr is really for the sole */ /* purpose of knowing when the head rule is being deleted. */ /* ------------------------------------------------------------------------ */ void ipf_group_del(ipf_main_softc_t *softc, frgroup_t *group, frentry_t *fr) { if (group->fg_head == fr) group->fg_head = NULL; group->fg_ref--; if ((group->fg_ref == 0) && (group->fg_start == NULL)) ipf_group_free(group); } /* ------------------------------------------------------------------------ */ /* Function: ipf_group_free */ /* Returns: Nil */ /* Parameters: group(I) - pointer to filter rule group */ /* */ /* Remove the group from the list of groups and free it. */ /* ------------------------------------------------------------------------ */ static void ipf_group_free(frgroup_t *group) { frgroup_t **gp; for (gp = group->fg_set; *gp != NULL; gp = &(*gp)->fg_next) { if (*gp == group) { *gp = group->fg_next; break; } } KFREE(group); } /* ------------------------------------------------------------------------ */ /* Function: ipf_group_flush */ /* Returns: int - number of rules flush from group */ /* Parameters: softc(I) - pointer to soft context main structure */ /* Parameters: group(I) - pointer to filter rule group */ /* */ /* Remove all of the rules that currently are listed under the given group. */ /* ------------------------------------------------------------------------ */ static int ipf_group_flush(ipf_main_softc_t *softc, frgroup_t *group) { int gone = 0; (void) ipf_flushlist(softc, &gone, &group->fg_start); return (gone); } /* ------------------------------------------------------------------------ */ /* Function: ipf_getrulen */ /* Returns: frentry_t * - NULL == not found, else pointer to rule n */ /* Parameters: softc(I) - pointer to soft context main structure */ /* Parameters: unit(I) - device for which to count the rule's number */ /* flags(I) - which set of rules to find the rule in */ /* group(I) - group name */ /* n(I) - rule number to find */ /* */ /* Find rule # n in group # g and return a pointer to it. Return NULl if */ /* group # g doesn't exist or there are less than n rules in the group. */ /* ------------------------------------------------------------------------ */ frentry_t * ipf_getrulen(ipf_main_softc_t *softc, int unit, char *group, u_32_t n) { frentry_t *fr; frgroup_t *fg; fg = ipf_findgroup(softc, group, unit, softc->ipf_active, NULL); if (fg == NULL) return (NULL); for (fr = fg->fg_start; fr && n; fr = fr->fr_next, n--) ; if (n != 0) return (NULL); return (fr); } /* ------------------------------------------------------------------------ */ /* Function: ipf_flushlist */ /* Returns: int - >= 0 - number of flushed rules */ /* Parameters: softc(I) - pointer to soft context main structure */ /* nfreedp(O) - pointer to int where flush count is stored */ /* listp(I) - pointer to list to flush pointer */ /* Write Locks: ipf_mutex */ /* */ /* Recursively flush rules from the list, descending groups as they are */ /* encountered. if a rule is the head of a group and it has lost all its */ /* group members, then also delete the group reference. nfreedp is needed */ /* to store the accumulating count of rules removed, whereas the returned */ /* value is just the number removed from the current list. The latter is */ /* needed to correctly adjust reference counts on rules that define groups. */ /* */ /* NOTE: Rules not loaded from user space cannot be flushed. */ /* ------------------------------------------------------------------------ */ static int ipf_flushlist(ipf_main_softc_t *softc, int *nfreedp, frentry_t **listp) { int freed = 0; frentry_t *fp; while ((fp = *listp) != NULL) { if ((fp->fr_type & FR_T_BUILTIN) || !(fp->fr_flags & FR_COPIED)) { listp = &fp->fr_next; continue; } *listp = fp->fr_next; if (fp->fr_next != NULL) fp->fr_next->fr_pnext = fp->fr_pnext; fp->fr_pnext = NULL; if (fp->fr_grphead != NULL) { freed += ipf_group_flush(softc, fp->fr_grphead); fp->fr_names[fp->fr_grhead] = '\0'; } if (fp->fr_icmpgrp != NULL) { freed += ipf_group_flush(softc, fp->fr_icmpgrp); fp->fr_names[fp->fr_icmphead] = '\0'; } if (fp->fr_srctrack.ht_max_nodes) ipf_rb_ht_flush(&fp->fr_srctrack); fp->fr_next = NULL; ASSERT(fp->fr_ref > 0); if (ipf_derefrule(softc, &fp) == 0) freed++; } *nfreedp += freed; return (freed); } /* ------------------------------------------------------------------------ */ /* Function: ipf_flush */ /* Returns: int - >= 0 - number of flushed rules */ /* Parameters: softc(I) - pointer to soft context main structure */ /* unit(I) - device for which to flush rules */ /* flags(I) - which set of rules to flush */ /* */ /* Calls flushlist() for all filter rules (accounting, firewall - both IPv4 */ /* and IPv6) as defined by the value of flags. */ /* ------------------------------------------------------------------------ */ int ipf_flush(ipf_main_softc_t *softc, minor_t unit, int flags) { int flushed = 0, set; WRITE_ENTER(&softc->ipf_mutex); set = softc->ipf_active; if ((flags & FR_INACTIVE) == FR_INACTIVE) set = 1 - set; if (flags & FR_OUTQUE) { ipf_flushlist(softc, &flushed, &softc->ipf_rules[1][set]); ipf_flushlist(softc, &flushed, &softc->ipf_acct[1][set]); } if (flags & FR_INQUE) { ipf_flushlist(softc, &flushed, &softc->ipf_rules[0][set]); ipf_flushlist(softc, &flushed, &softc->ipf_acct[0][set]); } flushed += ipf_flush_groups(softc, &softc->ipf_groups[unit][set], flags & (FR_INQUE|FR_OUTQUE)); RWLOCK_EXIT(&softc->ipf_mutex); if (unit == IPL_LOGIPF) { int tmp; tmp = ipf_flush(softc, IPL_LOGCOUNT, flags); if (tmp >= 0) flushed += tmp; } return (flushed); } /* ------------------------------------------------------------------------ */ /* Function: ipf_flush_groups */ /* Returns: int - >= 0 - number of flushed rules */ /* Parameters: softc(I) - soft context pointerto work with */ /* grhead(I) - pointer to the start of the group list to flush */ /* flags(I) - which set of rules to flush */ /* */ /* Walk through all of the groups under the given group head and remove all */ /* of those that match the flags passed in. The for loop here is bit more */ /* complicated than usual because the removal of a rule with ipf_derefrule */ /* may end up removing not only the structure pointed to by "fg" but also */ /* what is fg_next and fg_next after that. So if a filter rule is actually */ /* removed from the group then it is necessary to start again. */ /* ------------------------------------------------------------------------ */ static int ipf_flush_groups(ipf_main_softc_t *softc, frgroup_t **grhead, int flags) { frentry_t *fr, **frp; frgroup_t *fg, **fgp; int flushed = 0; int removed = 0; for (fgp = grhead; (fg = *fgp) != NULL; ) { while ((fg != NULL) && ((fg->fg_flags & flags) == 0)) fg = fg->fg_next; if (fg == NULL) break; removed = 0; frp = &fg->fg_start; while ((removed == 0) && ((fr = *frp) != NULL)) { if ((fr->fr_flags & flags) == 0) { frp = &fr->fr_next; } else { if (fr->fr_next != NULL) fr->fr_next->fr_pnext = fr->fr_pnext; *frp = fr->fr_next; fr->fr_pnext = NULL; fr->fr_next = NULL; (void) ipf_derefrule(softc, &fr); flushed++; removed++; } } if (removed == 0) fgp = &fg->fg_next; } return (flushed); } /* ------------------------------------------------------------------------ */ /* Function: memstr */ /* Returns: char * - NULL if failed, != NULL pointer to matching bytes */ /* Parameters: src(I) - pointer to byte sequence to match */ /* dst(I) - pointer to byte sequence to search */ /* slen(I) - match length */ /* dlen(I) - length available to search in */ /* */ /* Search dst for a sequence of bytes matching those at src and extend for */ /* slen bytes. */ /* ------------------------------------------------------------------------ */ char * memstr(const char *src, char *dst, size_t slen, size_t dlen) { char *s = NULL; while (dlen >= slen) { if (bcmp(src, dst, slen) == 0) { s = dst; break; } dst++; dlen--; } return (s); } /* ------------------------------------------------------------------------ */ /* Function: ipf_fixskip */ /* Returns: Nil */ /* Parameters: listp(IO) - pointer to start of list with skip rule */ /* rp(I) - rule added/removed with skip in it. */ /* addremove(I) - adjustment (-1/+1) to make to skip count, */ /* depending on whether a rule was just added */ /* or removed. */ /* */ /* Adjust all the rules in a list which would have skip'd past the position */ /* where we are inserting to skip to the right place given the change. */ /* ------------------------------------------------------------------------ */ void ipf_fixskip(frentry_t **listp, frentry_t *rp, int addremove) { int rules, rn; frentry_t *fp; rules = 0; for (fp = *listp; (fp != NULL) && (fp != rp); fp = fp->fr_next) rules++; if (fp == NULL) return; for (rn = 0, fp = *listp; fp && (fp != rp); fp = fp->fr_next, rn++) if (FR_ISSKIP(fp->fr_flags) && (rn + fp->fr_arg >= rules)) fp->fr_arg += addremove; } #ifdef _KERNEL /* ------------------------------------------------------------------------ */ /* Function: count4bits */ /* Returns: int - >= 0 - number of consecutive bits in input */ /* Parameters: ip(I) - 32bit IP address */ /* */ /* IPv4 ONLY */ /* count consecutive 1's in bit mask. If the mask generated by counting */ /* consecutive 1's is different to that passed, return -1, else return # */ /* of bits. */ /* ------------------------------------------------------------------------ */ int count4bits(u_32_t ip) { u_32_t ipn; int cnt = 0, i, j; ip = ipn = ntohl(ip); for (i = 32; i; i--, ipn *= 2) if (ipn & 0x80000000) cnt++; else break; ipn = 0; for (i = 32, j = cnt; i; i--, j--) { ipn *= 2; if (j > 0) ipn++; } if (ipn == ip) return (cnt); return (-1); } /* ------------------------------------------------------------------------ */ /* Function: count6bits */ /* Returns: int - >= 0 - number of consecutive bits in input */ /* Parameters: msk(I) - pointer to start of IPv6 bitmask */ /* */ /* IPv6 ONLY */ /* count consecutive 1's in bit mask. */ /* ------------------------------------------------------------------------ */ # ifdef USE_INET6 int count6bits(u_32_t *msk) { int i = 0, k; u_32_t j; for (k = 3; k >= 0; k--) if (msk[k] == 0xffffffff) i += 32; else { for (j = msk[k]; j; j <<= 1) if (j & 0x80000000) i++; } return (i); } # endif #endif /* _KERNEL */ /* ------------------------------------------------------------------------ */ /* Function: ipf_synclist */ /* Returns: int - 0 = no failures, else indication of first failure */ /* Parameters: fr(I) - start of filter list to sync interface names for */ /* ifp(I) - interface pointer for limiting sync lookups */ /* Write Locks: ipf_mutex */ /* */ /* Walk through a list of filter rules and resolve any interface names into */ /* pointers. Where dynamic addresses are used, also update the IP address */ /* used in the rule. The interface pointer is used to limit the lookups to */ /* a specific set of matching names if it is non-NULL. */ /* Errors can occur when resolving the destination name of to/dup-to fields */ /* when the name points to a pool and that pool doest not exist. If this */ /* does happen then it is necessary to check if there are any lookup refs */ /* that need to be dropped before returning with an error. */ /* ------------------------------------------------------------------------ */ static int ipf_synclist(ipf_main_softc_t *softc, frentry_t *fr, void *ifp) { frentry_t *frt, *start = fr; frdest_t *fdp; char *name; int error; void *ifa; int v, i; error = 0; for (; fr; fr = fr->fr_next) { if (fr->fr_family == AF_INET) v = 4; else if (fr->fr_family == AF_INET6) v = 6; else v = 0; /* * Lookup all the interface names that are part of the rule. */ for (i = 0; i < FR_NUM(fr->fr_ifas); i++) { if ((ifp != NULL) && (fr->fr_ifas[i] != ifp)) continue; if (fr->fr_ifnames[i] == -1) continue; name = FR_NAME(fr, fr_ifnames[i]); fr->fr_ifas[i] = ipf_resolvenic(softc, name, v); } if ((fr->fr_type & ~FR_T_BUILTIN) == FR_T_IPF) { if (fr->fr_satype != FRI_NORMAL && fr->fr_satype != FRI_LOOKUP) { ifa = ipf_resolvenic(softc, fr->fr_names + fr->fr_sifpidx, v); ipf_ifpaddr(softc, v, fr->fr_satype, ifa, &fr->fr_src6, &fr->fr_smsk6); } if (fr->fr_datype != FRI_NORMAL && fr->fr_datype != FRI_LOOKUP) { ifa = ipf_resolvenic(softc, fr->fr_names + fr->fr_sifpidx, v); ipf_ifpaddr(softc, v, fr->fr_datype, ifa, &fr->fr_dst6, &fr->fr_dmsk6); } } fdp = &fr->fr_tifs[0]; if ((ifp == NULL) || (fdp->fd_ptr == ifp)) { error = ipf_resolvedest(softc, fr->fr_names, fdp, v); if (error != 0) goto unwind; } fdp = &fr->fr_tifs[1]; if ((ifp == NULL) || (fdp->fd_ptr == ifp)) { error = ipf_resolvedest(softc, fr->fr_names, fdp, v); if (error != 0) goto unwind; } fdp = &fr->fr_dif; if ((ifp == NULL) || (fdp->fd_ptr == ifp)) { error = ipf_resolvedest(softc, fr->fr_names, fdp, v); if (error != 0) goto unwind; } if (((fr->fr_type & ~FR_T_BUILTIN) == FR_T_IPF) && (fr->fr_satype == FRI_LOOKUP) && (fr->fr_srcptr == NULL)) { fr->fr_srcptr = ipf_lookup_res_num(softc, fr->fr_srctype, IPL_LOGIPF, fr->fr_srcnum, &fr->fr_srcfunc); } if (((fr->fr_type & ~FR_T_BUILTIN) == FR_T_IPF) && (fr->fr_datype == FRI_LOOKUP) && (fr->fr_dstptr == NULL)) { fr->fr_dstptr = ipf_lookup_res_num(softc, fr->fr_dsttype, IPL_LOGIPF, fr->fr_dstnum, &fr->fr_dstfunc); } } return (0); unwind: for (frt = start; frt != fr; fr = fr->fr_next) { if (((frt->fr_type & ~FR_T_BUILTIN) == FR_T_IPF) && (frt->fr_satype == FRI_LOOKUP) && (frt->fr_srcptr != NULL)) ipf_lookup_deref(softc, frt->fr_srctype, frt->fr_srcptr); if (((frt->fr_type & ~FR_T_BUILTIN) == FR_T_IPF) && (frt->fr_datype == FRI_LOOKUP) && (frt->fr_dstptr != NULL)) ipf_lookup_deref(softc, frt->fr_dsttype, frt->fr_dstptr); } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync */ /* Returns: void */ /* Parameters: Nil */ /* */ /* ipf_sync() is called when we suspect that the interface list or */ /* information about interfaces (like IP#) has changed. Go through all */ /* filter rules, NAT entries and the state table and check if anything */ /* needs to be changed/updated. */ /* ------------------------------------------------------------------------ */ int ipf_sync(ipf_main_softc_t *softc, void *ifp) { int i; #if !SOLARIS ipf_nat_sync(softc, ifp); ipf_state_sync(softc, ifp); ipf_lookup_sync(softc, ifp); #endif WRITE_ENTER(&softc->ipf_mutex); (void) ipf_synclist(softc, softc->ipf_acct[0][softc->ipf_active], ifp); (void) ipf_synclist(softc, softc->ipf_acct[1][softc->ipf_active], ifp); (void) ipf_synclist(softc, softc->ipf_rules[0][softc->ipf_active], ifp); (void) ipf_synclist(softc, softc->ipf_rules[1][softc->ipf_active], ifp); for (i = 0; i < IPL_LOGSIZE; i++) { frgroup_t *g; for (g = softc->ipf_groups[i][0]; g != NULL; g = g->fg_next) (void) ipf_synclist(softc, g->fg_start, ifp); for (g = softc->ipf_groups[i][1]; g != NULL; g = g->fg_next) (void) ipf_synclist(softc, g->fg_start, ifp); } RWLOCK_EXIT(&softc->ipf_mutex); return (0); } /* * In the functions below, bcopy() is called because the pointer being * copied _from_ in this instance is a pointer to a char buf (which could * end up being unaligned) and on the kernel's local stack. */ /* ------------------------------------------------------------------------ */ /* Function: copyinptr */ /* Returns: int - 0 = success, else failure */ /* Parameters: src(I) - pointer to the source address */ /* dst(I) - destination address */ /* size(I) - number of bytes to copy */ /* */ /* Copy a block of data in from user space, given a pointer to the pointer */ /* to start copying from (src) and a pointer to where to store it (dst). */ /* NB: src - pointer to user space pointer, dst - kernel space pointer */ /* ------------------------------------------------------------------------ */ int copyinptr(ipf_main_softc_t *softc, void *src, void *dst, size_t size) { caddr_t ca; int error; #if SOLARIS error = COPYIN(src, &ca, sizeof(ca)); if (error != 0) return (error); #else bcopy(src, (caddr_t)&ca, sizeof(ca)); #endif error = COPYIN(ca, dst, size); if (error != 0) { IPFERROR(3); error = EFAULT; } return (error); } /* ------------------------------------------------------------------------ */ /* Function: copyoutptr */ /* Returns: int - 0 = success, else failure */ /* Parameters: src(I) - pointer to the source address */ /* dst(I) - destination address */ /* size(I) - number of bytes to copy */ /* */ /* Copy a block of data out to user space, given a pointer to the pointer */ /* to start copying from (src) and a pointer to where to store it (dst). */ /* NB: src - kernel space pointer, dst - pointer to user space pointer. */ /* ------------------------------------------------------------------------ */ int copyoutptr(ipf_main_softc_t *softc, void *src, void *dst, size_t size) { caddr_t ca; int error; bcopy(dst, (caddr_t)&ca, sizeof(ca)); error = COPYOUT(src, ca, size); if (error != 0) { IPFERROR(4); error = EFAULT; } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_lock */ /* Returns: int - 0 = success, else error */ /* Parameters: data(I) - pointer to lock value to set */ /* lockp(O) - pointer to location to store old lock value */ /* */ /* Get the new value for the lock integer, set it and return the old value */ /* in *lockp. */ /* ------------------------------------------------------------------------ */ int ipf_lock(caddr_t data, int *lockp) { int arg, err; err = BCOPYIN(data, &arg, sizeof(arg)); if (err != 0) return (EFAULT); err = BCOPYOUT(lockp, data, sizeof(*lockp)); if (err != 0) return (EFAULT); *lockp = arg; return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_getstat */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* fiop(I) - pointer to ipfilter stats structure */ /* rev(I) - version claim by program doing ioctl */ /* */ /* Stores a copy of current pointers, counters, etc, in the friostat */ /* structure. */ /* If IPFILTER_COMPAT is compiled, we pretend to be whatever version the */ /* program is looking for. This ensure that validation of the version it */ /* expects will always succeed. Thus kernels with IPFILTER_COMPAT will */ /* allow older binaries to work but kernels without it will not. */ /* ------------------------------------------------------------------------ */ /*ARGSUSED*/ static void ipf_getstat(ipf_main_softc_t *softc, friostat_t *fiop, int rev) { int i; bcopy((char *)softc->ipf_stats, (char *)fiop->f_st, sizeof(ipf_statistics_t) * 2); fiop->f_locks[IPL_LOGSTATE] = -1; fiop->f_locks[IPL_LOGNAT] = -1; fiop->f_locks[IPL_LOGIPF] = -1; fiop->f_locks[IPL_LOGAUTH] = -1; fiop->f_ipf[0][0] = softc->ipf_rules[0][0]; fiop->f_acct[0][0] = softc->ipf_acct[0][0]; fiop->f_ipf[0][1] = softc->ipf_rules[0][1]; fiop->f_acct[0][1] = softc->ipf_acct[0][1]; fiop->f_ipf[1][0] = softc->ipf_rules[1][0]; fiop->f_acct[1][0] = softc->ipf_acct[1][0]; fiop->f_ipf[1][1] = softc->ipf_rules[1][1]; fiop->f_acct[1][1] = softc->ipf_acct[1][1]; fiop->f_ticks = softc->ipf_ticks; fiop->f_active = softc->ipf_active; fiop->f_froute[0] = softc->ipf_frouteok[0]; fiop->f_froute[1] = softc->ipf_frouteok[1]; fiop->f_rb_no_mem = softc->ipf_rb_no_mem; fiop->f_rb_node_max = softc->ipf_rb_node_max; fiop->f_running = softc->ipf_running; for (i = 0; i < IPL_LOGSIZE; i++) { fiop->f_groups[i][0] = softc->ipf_groups[i][0]; fiop->f_groups[i][1] = softc->ipf_groups[i][1]; } #ifdef IPFILTER_LOG fiop->f_log_ok = ipf_log_logok(softc, IPL_LOGIPF); fiop->f_log_fail = ipf_log_failures(softc, IPL_LOGIPF); fiop->f_logging = 1; #else fiop->f_log_ok = 0; fiop->f_log_fail = 0; fiop->f_logging = 0; #endif fiop->f_defpass = softc->ipf_pass; fiop->f_features = ipf_features; #ifdef IPFILTER_COMPAT snprintf(fiop->f_version, sizeof(friostat.f_version), "IP Filter: v%d.%d.%d", (rev / 1000000) % 100, (rev / 10000) % 100, (rev / 100) % 100); #else rev = rev; (void) strncpy(fiop->f_version, ipfilter_version, sizeof(fiop->f_version)); #endif } #ifdef USE_INET6 int icmptoicmp6types[ICMP_MAXTYPE+1] = { ICMP6_ECHO_REPLY, /* 0: ICMP_ECHOREPLY */ -1, /* 1: UNUSED */ -1, /* 2: UNUSED */ ICMP6_DST_UNREACH, /* 3: ICMP_UNREACH */ -1, /* 4: ICMP_SOURCEQUENCH */ ND_REDIRECT, /* 5: ICMP_REDIRECT */ -1, /* 6: UNUSED */ -1, /* 7: UNUSED */ ICMP6_ECHO_REQUEST, /* 8: ICMP_ECHO */ -1, /* 9: UNUSED */ -1, /* 10: UNUSED */ ICMP6_TIME_EXCEEDED, /* 11: ICMP_TIMXCEED */ ICMP6_PARAM_PROB, /* 12: ICMP_PARAMPROB */ -1, /* 13: ICMP_TSTAMP */ -1, /* 14: ICMP_TSTAMPREPLY */ -1, /* 15: ICMP_IREQ */ -1, /* 16: ICMP_IREQREPLY */ -1, /* 17: ICMP_MASKREQ */ -1, /* 18: ICMP_MASKREPLY */ }; int icmptoicmp6unreach[ICMP_MAX_UNREACH] = { ICMP6_DST_UNREACH_ADDR, /* 0: ICMP_UNREACH_NET */ ICMP6_DST_UNREACH_ADDR, /* 1: ICMP_UNREACH_HOST */ -1, /* 2: ICMP_UNREACH_PROTOCOL */ ICMP6_DST_UNREACH_NOPORT, /* 3: ICMP_UNREACH_PORT */ -1, /* 4: ICMP_UNREACH_NEEDFRAG */ ICMP6_DST_UNREACH_NOTNEIGHBOR, /* 5: ICMP_UNREACH_SRCFAIL */ ICMP6_DST_UNREACH_ADDR, /* 6: ICMP_UNREACH_NET_UNKNOWN */ ICMP6_DST_UNREACH_ADDR, /* 7: ICMP_UNREACH_HOST_UNKNOWN */ -1, /* 8: ICMP_UNREACH_ISOLATED */ ICMP6_DST_UNREACH_ADMIN, /* 9: ICMP_UNREACH_NET_PROHIB */ ICMP6_DST_UNREACH_ADMIN, /* 10: ICMP_UNREACH_HOST_PROHIB */ -1, /* 11: ICMP_UNREACH_TOSNET */ -1, /* 12: ICMP_UNREACH_TOSHOST */ ICMP6_DST_UNREACH_ADMIN, /* 13: ICMP_UNREACH_ADMIN_PROHIBIT */ }; int icmpreplytype6[ICMP6_MAXTYPE + 1]; #endif int icmpreplytype4[ICMP_MAXTYPE + 1]; /* ------------------------------------------------------------------------ */ /* Function: ipf_matchicmpqueryreply */ /* Returns: int - 1 if "icmp" is a valid reply to "ic" else 0. */ /* Parameters: v(I) - IP protocol version (4 or 6) */ /* ic(I) - ICMP information */ /* icmp(I) - ICMP packet header */ /* rev(I) - direction (0 = forward/1 = reverse) of packet */ /* */ /* Check if the ICMP packet defined by the header pointed to by icmp is a */ /* reply to one as described by what's in ic. If it is a match, return 1, */ /* else return 0 for no match. */ /* ------------------------------------------------------------------------ */ int ipf_matchicmpqueryreply(int v, icmpinfo_t *ic, icmphdr_t *icmp, int rev) { int ictype; ictype = ic->ici_type; if (v == 4) { /* * If we matched its type on the way in, then when going out * it will still be the same type. */ if ((!rev && (icmp->icmp_type == ictype)) || (rev && (icmpreplytype4[ictype] == icmp->icmp_type))) { if (icmp->icmp_type != ICMP_ECHOREPLY) return (1); if (icmp->icmp_id == ic->ici_id) return (1); } } #ifdef USE_INET6 else if (v == 6) { if ((!rev && (icmp->icmp_type == ictype)) || (rev && (icmpreplytype6[ictype] == icmp->icmp_type))) { if (icmp->icmp_type != ICMP6_ECHO_REPLY) return (1); if (icmp->icmp_id == ic->ici_id) return (1); } } #endif return (0); } /* * IFNAMES are located in the variable length field starting at * frentry.fr_names. As pointers within the struct cannot be passed * to the kernel from ipf(8), an offset is used. An offset of -1 means it * is unused (invalid). If it is used (valid) it is an offset to the * character string of an interface name or a comment. The following * macros will assist those who follow to understand the code. */ #define IPF_IFNAME_VALID(_a) (_a != -1) #define IPF_IFNAME_INVALID(_a) (_a == -1) #define IPF_IFNAMES_DIFFERENT(_a) \ !((IPF_IFNAME_INVALID(fr1->_a) && \ IPF_IFNAME_INVALID(fr2->_a)) || \ (IPF_IFNAME_VALID(fr1->_a) && \ IPF_IFNAME_VALID(fr2->_a) && \ !strcmp(FR_NAME(fr1, _a), FR_NAME(fr2, _a)))) #define IPF_FRDEST_DIFFERENT(_a) \ (memcmp(&fr1->_a.fd_addr, &fr2->_a.fd_addr, \ offsetof(frdest_t, fd_name) - offsetof(frdest_t, fd_addr)) || \ IPF_IFNAMES_DIFFERENT(_a.fd_name)) /* ------------------------------------------------------------------------ */ /* Function: ipf_rule_compare */ /* Parameters: fr1(I) - first rule structure to compare */ /* fr2(I) - second rule structure to compare */ /* Returns: int - 0 == rules are the same, else mismatch */ /* */ /* Compare two rules and return 0 if they match or a number indicating */ /* which of the individual checks failed. */ /* ------------------------------------------------------------------------ */ static int ipf_rule_compare(frentry_t *fr1, frentry_t *fr2) { int i; if (fr1->fr_cksum != fr2->fr_cksum) return (1); if (fr1->fr_size != fr2->fr_size) return (2); if (fr1->fr_dsize != fr2->fr_dsize) return (3); if (bcmp((char *)&fr1->fr_func, (char *)&fr2->fr_func, FR_CMPSIZ) != 0) return (4); /* * XXX: There is still a bug here as different rules with the * the same interfaces but in a different order will compare * differently. But since multiple interfaces in a rule doesn't * work anyway a simple straightforward compare is performed * here. Ultimately frentry_t creation will need to be * revisited in ipf_y.y. While the other issue, recognition * of only the first interface in a list of interfaces will * need to be separately addressed along with why only four. */ for (i = 0; i < FR_NUM(fr1->fr_ifnames); i++) { /* * XXX: It's either the same index or uninitialized. * We assume this because multiple interfaces * referenced by the same rule doesn't work anyway. */ if (IPF_IFNAMES_DIFFERENT(fr_ifnames[i])) return (5); } if (IPF_FRDEST_DIFFERENT(fr_tif)) return (6); if (IPF_FRDEST_DIFFERENT(fr_rif)) return (7); if (IPF_FRDEST_DIFFERENT(fr_dif)) return (8); if (!fr1->fr_data && !fr2->fr_data) return (0); /* move along, nothing to see here */ if (fr1->fr_data && fr2->fr_data) { if (bcmp(fr1->fr_caddr, fr2->fr_caddr, fr1->fr_dsize) == 0) return (0); /* same */ } return (9); } /* ------------------------------------------------------------------------ */ /* Function: frrequest */ /* Returns: int - 0 == success, > 0 == errno value */ /* Parameters: unit(I) - device for which this is for */ /* req(I) - ioctl command (SIOC*) */ /* data(I) - pointr to ioctl data */ /* set(I) - 1 or 0 (filter set) */ /* makecopy(I) - flag indicating whether data points to a rule */ /* in kernel space & hence doesn't need copying. */ /* */ /* This function handles all the requests which operate on the list of */ /* filter rules. This includes adding, deleting, insertion. It is also */ /* responsible for creating groups when a "head" rule is loaded. Interface */ /* names are resolved here and other sanity checks are made on the content */ /* of the rule structure being loaded. If a rule has user defined timeouts */ /* then make sure they are created and initialised before exiting. */ /* ------------------------------------------------------------------------ */ int frrequest(ipf_main_softc_t *softc, int unit, ioctlcmd_t req, caddr_t data, int set, int makecopy) { int error = 0, in, family, need_free = 0; enum { OP_ADD, /* add rule */ OP_REM, /* remove rule */ OP_ZERO /* zero statistics and counters */ } addrem = OP_ADD; frentry_t frd, *fp, *f, **fprev, **ftail; void *ptr, *uptr, *cptr; u_int *p, *pp; frgroup_t *fg; char *group; ptr = NULL; cptr = NULL; fg = NULL; fp = &frd; if (makecopy != 0) { bzero(fp, sizeof(frd)); error = ipf_inobj(softc, data, NULL, fp, IPFOBJ_FRENTRY); if (error) { return (error); } if ((fp->fr_type & FR_T_BUILTIN) != 0) { IPFERROR(6); return (EINVAL); } KMALLOCS(f, frentry_t *, fp->fr_size); if (f == NULL) { IPFERROR(131); return (ENOMEM); } bzero(f, fp->fr_size); error = ipf_inobjsz(softc, data, f, IPFOBJ_FRENTRY, fp->fr_size); if (error) { KFREES(f, fp->fr_size); return (error); } fp = f; f = NULL; fp->fr_next = NULL; fp->fr_dnext = NULL; fp->fr_pnext = NULL; fp->fr_pdnext = NULL; fp->fr_grp = NULL; fp->fr_grphead = NULL; fp->fr_icmpgrp = NULL; fp->fr_isc = (void *)-1; fp->fr_ptr = NULL; fp->fr_ref = 0; fp->fr_flags |= FR_COPIED; } else { fp = (frentry_t *)data; if ((fp->fr_type & FR_T_BUILTIN) == 0) { IPFERROR(7); return (EINVAL); } fp->fr_flags &= ~FR_COPIED; } if (((fp->fr_dsize == 0) && (fp->fr_data != NULL)) || ((fp->fr_dsize != 0) && (fp->fr_data == NULL))) { IPFERROR(8); error = EINVAL; goto donenolock; } family = fp->fr_family; uptr = fp->fr_data; if (req == (ioctlcmd_t)SIOCINAFR || req == (ioctlcmd_t)SIOCINIFR || req == (ioctlcmd_t)SIOCADAFR || req == (ioctlcmd_t)SIOCADIFR) addrem = OP_ADD; /* Add rule */ else if (req == (ioctlcmd_t)SIOCRMAFR || req == (ioctlcmd_t)SIOCRMIFR) addrem = OP_REM; /* Remove rule */ else if (req == (ioctlcmd_t)SIOCZRLST) addrem = OP_ZERO; /* Zero statistics and counters */ else { IPFERROR(9); error = EINVAL; goto donenolock; } /* * Only filter rules for IPv4 or IPv6 are accepted. */ if (family == AF_INET) { /*EMPTY*/; #ifdef USE_INET6 } else if (family == AF_INET6) { /*EMPTY*/; #endif } else if (family != 0) { IPFERROR(10); error = EINVAL; goto donenolock; } /* * If the rule is being loaded from user space, i.e. we had to copy it * into kernel space, then do not trust the function pointer in the * rule. */ if ((makecopy == 1) && (fp->fr_func != NULL)) { if (ipf_findfunc(fp->fr_func) == NULL) { IPFERROR(11); error = ESRCH; goto donenolock; } if (addrem == OP_ADD) { error = ipf_funcinit(softc, fp); if (error != 0) goto donenolock; } } if ((fp->fr_flags & FR_CALLNOW) && ((fp->fr_func == NULL) || (fp->fr_func == (ipfunc_t)-1))) { IPFERROR(142); error = ESRCH; goto donenolock; } if (((fp->fr_flags & FR_CMDMASK) == FR_CALL) && ((fp->fr_func == NULL) || (fp->fr_func == (ipfunc_t)-1))) { IPFERROR(143); error = ESRCH; goto donenolock; } ptr = NULL; cptr = NULL; if (FR_ISACCOUNT(fp->fr_flags)) unit = IPL_LOGCOUNT; /* * Check that each group name in the rule has a start index that * is valid. */ if (fp->fr_icmphead != -1) { if ((fp->fr_icmphead < 0) || (fp->fr_icmphead >= fp->fr_namelen)) { IPFERROR(136); error = EINVAL; goto donenolock; } if (!strcmp(FR_NAME(fp, fr_icmphead), "0")) fp->fr_names[fp->fr_icmphead] = '\0'; } if (fp->fr_grhead != -1) { if ((fp->fr_grhead < 0) || (fp->fr_grhead >= fp->fr_namelen)) { IPFERROR(137); error = EINVAL; goto donenolock; } if (!strcmp(FR_NAME(fp, fr_grhead), "0")) fp->fr_names[fp->fr_grhead] = '\0'; } if (fp->fr_group != -1) { if ((fp->fr_group < 0) || (fp->fr_group >= fp->fr_namelen)) { IPFERROR(138); error = EINVAL; goto donenolock; } if ((req != (int)SIOCZRLST) && (fp->fr_group != -1)) { /* * Allow loading rules that are in groups to cause * them to be created if they don't already exit. */ group = FR_NAME(fp, fr_group); if (addrem == OP_ADD) { fg = ipf_group_add(softc, group, NULL, fp->fr_flags, unit, set); fp->fr_grp = fg; } else { fg = ipf_findgroup(softc, group, unit, set, NULL); if (fg == NULL) { IPFERROR(12); error = ESRCH; goto donenolock; } } if (fg->fg_flags == 0) { fg->fg_flags = fp->fr_flags & FR_INOUT; } else if (fg->fg_flags != (fp->fr_flags & FR_INOUT)) { IPFERROR(13); error = ESRCH; goto donenolock; } } } else { /* * If a rule is going to be part of a group then it does * not matter whether it is an in or out rule, but if it * isn't in a group, then it does... */ if ((fp->fr_flags & (FR_INQUE|FR_OUTQUE)) == 0) { IPFERROR(14); error = EINVAL; goto donenolock; } } in = (fp->fr_flags & FR_INQUE) ? 0 : 1; /* * Work out which rule list this change is being applied to. */ ftail = NULL; fprev = NULL; if (unit == IPL_LOGAUTH) { if ((fp->fr_tifs[0].fd_ptr != NULL) || (fp->fr_tifs[1].fd_ptr != NULL) || (fp->fr_dif.fd_ptr != NULL) || (fp->fr_flags & FR_FASTROUTE)) { softc->ipf_interror = 145; error = EINVAL; goto donenolock; } fprev = ipf_auth_rulehead(softc); } else { if (FR_ISACCOUNT(fp->fr_flags)) fprev = &softc->ipf_acct[in][set]; else if ((fp->fr_flags & (FR_OUTQUE|FR_INQUE)) != 0) fprev = &softc->ipf_rules[in][set]; } if (fprev == NULL) { IPFERROR(15); error = ESRCH; goto donenolock; } if (fg != NULL) fprev = &fg->fg_start; /* * Copy in extra data for the rule. */ if (fp->fr_dsize != 0) { if (makecopy != 0) { KMALLOCS(ptr, void *, fp->fr_dsize); if (ptr == NULL) { IPFERROR(16); error = ENOMEM; goto donenolock; } /* * The bcopy case is for when the data is appended * to the rule by ipf_in_compat(). */ if (uptr >= (void *)fp && uptr < (void *)((char *)fp + fp->fr_size)) { bcopy(uptr, ptr, fp->fr_dsize); error = 0; } else { error = COPYIN(uptr, ptr, fp->fr_dsize); if (error != 0) { IPFERROR(17); error = EFAULT; goto donenolock; } } } else { ptr = uptr; } fp->fr_data = ptr; } else { fp->fr_data = NULL; } /* * Perform per-rule type sanity checks of their members. * All code after this needs to be aware that allocated memory * may need to be free'd before exiting. */ switch (fp->fr_type & ~FR_T_BUILTIN) { #if defined(IPFILTER_BPF) case FR_T_BPFOPC : if (fp->fr_dsize == 0) { IPFERROR(19); error = EINVAL; break; } if (!bpf_validate(ptr, fp->fr_dsize/sizeof(struct bpf_insn))) { IPFERROR(20); error = EINVAL; break; } break; #endif case FR_T_IPF : /* * Preparation for error case at the bottom of this function. */ if (fp->fr_datype == FRI_LOOKUP) fp->fr_dstptr = NULL; if (fp->fr_satype == FRI_LOOKUP) fp->fr_srcptr = NULL; if (fp->fr_dsize != sizeof(fripf_t)) { IPFERROR(21); error = EINVAL; break; } /* * Allowing a rule with both "keep state" and "with oow" is * pointless because adding a state entry to the table will * fail with the out of window (oow) flag set. */ if ((fp->fr_flags & FR_KEEPSTATE) && (fp->fr_flx & FI_OOW)) { IPFERROR(22); error = EINVAL; break; } switch (fp->fr_satype) { case FRI_BROADCAST : case FRI_DYNAMIC : case FRI_NETWORK : case FRI_NETMASKED : case FRI_PEERADDR : if (fp->fr_sifpidx < 0) { IPFERROR(23); error = EINVAL; } break; case FRI_LOOKUP : fp->fr_srcptr = ipf_findlookup(softc, unit, fp, &fp->fr_src6, &fp->fr_smsk6); if (fp->fr_srcfunc == NULL) { IPFERROR(132); error = ESRCH; break; } break; case FRI_NORMAL : break; default : IPFERROR(133); error = EINVAL; break; } if (error != 0) break; switch (fp->fr_datype) { case FRI_BROADCAST : case FRI_DYNAMIC : case FRI_NETWORK : case FRI_NETMASKED : case FRI_PEERADDR : if (fp->fr_difpidx < 0) { IPFERROR(24); error = EINVAL; } break; case FRI_LOOKUP : fp->fr_dstptr = ipf_findlookup(softc, unit, fp, &fp->fr_dst6, &fp->fr_dmsk6); if (fp->fr_dstfunc == NULL) { IPFERROR(134); error = ESRCH; } break; case FRI_NORMAL : break; default : IPFERROR(135); error = EINVAL; } break; case FR_T_NONE : case FR_T_CALLFUNC : case FR_T_COMPIPF : break; case FR_T_IPFEXPR : if (ipf_matcharray_verify(fp->fr_data, fp->fr_dsize) == -1) { IPFERROR(25); error = EINVAL; } break; default : IPFERROR(26); error = EINVAL; break; } if (error != 0) goto donenolock; if (fp->fr_tif.fd_name != -1) { if ((fp->fr_tif.fd_name < 0) || (fp->fr_tif.fd_name >= fp->fr_namelen)) { IPFERROR(139); error = EINVAL; goto donenolock; } } if (fp->fr_dif.fd_name != -1) { if ((fp->fr_dif.fd_name < 0) || (fp->fr_dif.fd_name >= fp->fr_namelen)) { IPFERROR(140); error = EINVAL; goto donenolock; } } if (fp->fr_rif.fd_name != -1) { if ((fp->fr_rif.fd_name < 0) || (fp->fr_rif.fd_name >= fp->fr_namelen)) { IPFERROR(141); error = EINVAL; goto donenolock; } } /* * Lookup all the interface names that are part of the rule. */ error = ipf_synclist(softc, fp, NULL); if (error != 0) goto donenolock; fp->fr_statecnt = 0; if (fp->fr_srctrack.ht_max_nodes != 0) ipf_rb_ht_init(&fp->fr_srctrack); /* * Look for an existing matching filter rule, but don't include the * next or interface pointer in the comparison (fr_next, fr_ifa). * This elminates rules which are indentical being loaded. Checksum * the constant part of the filter rule to make comparisons quicker * (this meaning no pointers are included). */ pp = (u_int *)(fp->fr_caddr + fp->fr_dsize); for (fp->fr_cksum = 0, p = (u_int *)fp->fr_data; p < pp; p++) fp->fr_cksum += *p; WRITE_ENTER(&softc->ipf_mutex); /* * Now that the filter rule lists are locked, we can walk the * chain of them without fear. */ ftail = fprev; for (f = *ftail; (f = *ftail) != NULL; ftail = &f->fr_next) { if (fp->fr_collect <= f->fr_collect) { ftail = fprev; f = NULL; break; } fprev = ftail; } for (; (f = *ftail) != NULL; ftail = &f->fr_next) { if (ipf_rule_compare(fp, f) == 0) break; } /* * If zero'ing statistics, copy current to caller and zero. */ if (addrem == OP_ZERO) { if (f == NULL) { IPFERROR(27); error = ESRCH; } else { /* * Copy and reduce lock because of impending copyout. * Well we should, but if we do then the atomicity of * this call and the correctness of fr_hits and * fr_bytes cannot be guaranteed. As it is, this code * only resets them to 0 if they are successfully * copied out into user space. */ bcopy((char *)f, (char *)fp, f->fr_size); /* MUTEX_DOWNGRADE(&softc->ipf_mutex); */ /* * When we copy this rule back out, set the data * pointer to be what it was in user space. */ fp->fr_data = uptr; error = ipf_outobj(softc, data, fp, IPFOBJ_FRENTRY); if (error == 0) { if ((f->fr_dsize != 0) && (uptr != NULL)) { error = COPYOUT(f->fr_data, uptr, f->fr_dsize); if (error == 0) { f->fr_hits = 0; f->fr_bytes = 0; } else { IPFERROR(28); error = EFAULT; } } } } if (makecopy != 0) { if (ptr != NULL) { KFREES(ptr, fp->fr_dsize); } KFREES(fp, fp->fr_size); } RWLOCK_EXIT(&softc->ipf_mutex); return (error); } if (f == NULL) { /* * At the end of this, ftail must point to the place where the * new rule is to be saved/inserted/added. * For SIOCAD*FR, this should be the last rule in the group of * rules that have equal fr_collect fields. * For SIOCIN*FR, ... */ if (req == (ioctlcmd_t)SIOCADAFR || req == (ioctlcmd_t)SIOCADIFR) { for (ftail = fprev; (f = *ftail) != NULL; ) { if (f->fr_collect > fp->fr_collect) break; ftail = &f->fr_next; fprev = ftail; } ftail = fprev; f = NULL; ptr = NULL; } else if (req == (ioctlcmd_t)SIOCINAFR || req == (ioctlcmd_t)SIOCINIFR) { while ((f = *fprev) != NULL) { if (f->fr_collect >= fp->fr_collect) break; fprev = &f->fr_next; } ftail = fprev; if (fp->fr_hits != 0) { while (fp->fr_hits && (f = *ftail)) { if (f->fr_collect != fp->fr_collect) break; fprev = ftail; ftail = &f->fr_next; fp->fr_hits--; } } f = NULL; ptr = NULL; } } /* * Request to remove a rule. */ if (addrem == OP_REM) { if (f == NULL) { IPFERROR(29); error = ESRCH; } else { /* * Do not allow activity from user space to interfere * with rules not loaded that way. */ if ((makecopy == 1) && !(f->fr_flags & FR_COPIED)) { IPFERROR(30); error = EPERM; goto done; } /* * Return EBUSY if the rule is being reference by * something else (eg state information.) */ if (f->fr_ref > 1) { IPFERROR(31); error = EBUSY; goto done; } #ifdef IPFILTER_SCAN if (f->fr_isctag != -1 && (f->fr_isc != (struct ipscan *)-1)) ipf_scan_detachfr(f); #endif if (unit == IPL_LOGAUTH) { error = ipf_auth_precmd(softc, req, f, ftail); goto done; } ipf_rule_delete(softc, f, unit, set); need_free = makecopy; } } else { /* * Not removing, so we must be adding/inserting a rule. */ if (f != NULL) { IPFERROR(32); error = EEXIST; goto done; } if (unit == IPL_LOGAUTH) { error = ipf_auth_precmd(softc, req, fp, ftail); goto done; } MUTEX_NUKE(&fp->fr_lock); MUTEX_INIT(&fp->fr_lock, "filter rule lock"); if (fp->fr_die != 0) ipf_rule_expire_insert(softc, fp, set); fp->fr_hits = 0; if (makecopy != 0) fp->fr_ref = 1; fp->fr_pnext = ftail; fp->fr_next = *ftail; if (fp->fr_next != NULL) fp->fr_next->fr_pnext = &fp->fr_next; *ftail = fp; ipf_fixskip(ftail, fp, 1); fp->fr_icmpgrp = NULL; if (fp->fr_icmphead != -1) { group = FR_NAME(fp, fr_icmphead); fg = ipf_group_add(softc, group, fp, 0, unit, set); fp->fr_icmpgrp = fg; } fp->fr_grphead = NULL; if (fp->fr_grhead != -1) { group = FR_NAME(fp, fr_grhead); fg = ipf_group_add(softc, group, fp, fp->fr_flags, unit, set); fp->fr_grphead = fg; } } done: RWLOCK_EXIT(&softc->ipf_mutex); donenolock: if (need_free || (error != 0)) { if ((fp->fr_type & ~FR_T_BUILTIN) == FR_T_IPF) { if ((fp->fr_satype == FRI_LOOKUP) && (fp->fr_srcptr != NULL)) ipf_lookup_deref(softc, fp->fr_srctype, fp->fr_srcptr); if ((fp->fr_datype == FRI_LOOKUP) && (fp->fr_dstptr != NULL)) ipf_lookup_deref(softc, fp->fr_dsttype, fp->fr_dstptr); } if (fp->fr_grp != NULL) { WRITE_ENTER(&softc->ipf_mutex); ipf_group_del(softc, fp->fr_grp, fp); RWLOCK_EXIT(&softc->ipf_mutex); } if ((ptr != NULL) && (makecopy != 0)) { KFREES(ptr, fp->fr_dsize); } KFREES(fp, fp->fr_size); } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_rule_delete */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* f(I) - pointer to the rule being deleted */ /* ftail(I) - pointer to the pointer to f */ /* unit(I) - device for which this is for */ /* set(I) - 1 or 0 (filter set) */ /* */ /* This function attempts to do what it can to delete a filter rule: remove */ /* it from any linked lists and remove any groups it is responsible for. */ /* But in the end, removing a rule can only drop the reference count - we */ /* must use that as the guide for whether or not it can be freed. */ /* ------------------------------------------------------------------------ */ static void ipf_rule_delete(ipf_main_softc_t *softc, frentry_t *f, int unit, int set) { /* * If fr_pdnext is set, then the rule is on the expire list, so * remove it from there. */ if (f->fr_pdnext != NULL) { *f->fr_pdnext = f->fr_dnext; if (f->fr_dnext != NULL) f->fr_dnext->fr_pdnext = f->fr_pdnext; f->fr_pdnext = NULL; f->fr_dnext = NULL; } ipf_fixskip(f->fr_pnext, f, -1); if (f->fr_pnext != NULL) *f->fr_pnext = f->fr_next; if (f->fr_next != NULL) f->fr_next->fr_pnext = f->fr_pnext; f->fr_pnext = NULL; f->fr_next = NULL; (void) ipf_derefrule(softc, &f); } /* ------------------------------------------------------------------------ */ /* Function: ipf_rule_expire_insert */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* f(I) - pointer to rule to be added to expire list */ /* set(I) - 1 or 0 (filter set) */ /* */ /* If the new rule has a given expiration time, insert it into the list of */ /* expiring rules with the ones to be removed first added to the front of */ /* the list. The insertion is O(n) but it is kept sorted for quick scans at */ /* expiration interval checks. */ /* ------------------------------------------------------------------------ */ static void ipf_rule_expire_insert(ipf_main_softc_t *softc, frentry_t *f, int set) { frentry_t *fr; /* */ f->fr_die = softc->ipf_ticks + IPF_TTLVAL(f->fr_die); for (fr = softc->ipf_rule_explist[set]; fr != NULL; fr = fr->fr_dnext) { if (f->fr_die < fr->fr_die) break; if (fr->fr_dnext == NULL) { /* * We've got to the last rule and everything * wanted to be expired before this new node, * so we have to tack it on the end... */ fr->fr_dnext = f; f->fr_pdnext = &fr->fr_dnext; fr = NULL; break; } } if (softc->ipf_rule_explist[set] == NULL) { softc->ipf_rule_explist[set] = f; f->fr_pdnext = &softc->ipf_rule_explist[set]; } else if (fr != NULL) { f->fr_dnext = fr; f->fr_pdnext = fr->fr_pdnext; fr->fr_pdnext = &f->fr_dnext; } } /* ------------------------------------------------------------------------ */ /* Function: ipf_findlookup */ /* Returns: NULL = failure, else success */ /* Parameters: softc(I) - pointer to soft context main structure */ /* unit(I) - ipf device we want to find match for */ /* fp(I) - rule for which lookup is for */ /* addrp(I) - pointer to lookup information in address struct */ /* maskp(O) - pointer to lookup information for storage */ /* */ /* When using pools and hash tables to store addresses for matching in */ /* rules, it is necessary to resolve both the object referred to by the */ /* name or address (and return that pointer) and also provide the means by */ /* which to determine if an address belongs to that object to make the */ /* packet matching quicker. */ /* ------------------------------------------------------------------------ */ static void * ipf_findlookup(ipf_main_softc_t *softc, int unit, frentry_t *fr, i6addr_t *addrp, i6addr_t *maskp) { void *ptr = NULL; switch (addrp->iplookupsubtype) { case 0 : ptr = ipf_lookup_res_num(softc, unit, addrp->iplookuptype, addrp->iplookupnum, &maskp->iplookupfunc); break; case 1 : if (addrp->iplookupname < 0) break; if (addrp->iplookupname >= fr->fr_namelen) break; ptr = ipf_lookup_res_name(softc, unit, addrp->iplookuptype, fr->fr_names + addrp->iplookupname, &maskp->iplookupfunc); break; default : break; } return (ptr); } /* ------------------------------------------------------------------------ */ /* Function: ipf_funcinit */ /* Returns: int - 0 == success, else ESRCH: cannot resolve rule details */ /* Parameters: softc(I) - pointer to soft context main structure */ /* fr(I) - pointer to filter rule */ /* */ /* If a rule is a call rule, then check if the function it points to needs */ /* an init function to be called now the rule has been loaded. */ /* ------------------------------------------------------------------------ */ static int ipf_funcinit(ipf_main_softc_t *softc, frentry_t *fr) { ipfunc_resolve_t *ft; int err; IPFERROR(34); err = ESRCH; for (ft = ipf_availfuncs; ft->ipfu_addr != NULL; ft++) if (ft->ipfu_addr == fr->fr_func) { err = 0; if (ft->ipfu_init != NULL) err = (*ft->ipfu_init)(softc, fr); break; } return (err); } /* ------------------------------------------------------------------------ */ /* Function: ipf_funcfini */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* fr(I) - pointer to filter rule */ /* */ /* For a given filter rule, call the matching "fini" function if the rule */ /* is using a known function that would have resulted in the "init" being */ /* called for ealier. */ /* ------------------------------------------------------------------------ */ static void ipf_funcfini(ipf_main_softc_t *softc, frentry_t *fr) { ipfunc_resolve_t *ft; for (ft = ipf_availfuncs; ft->ipfu_addr != NULL; ft++) if (ft->ipfu_addr == fr->fr_func) { if (ft->ipfu_fini != NULL) (void) (*ft->ipfu_fini)(softc, fr); break; } } /* ------------------------------------------------------------------------ */ /* Function: ipf_findfunc */ /* Returns: ipfunc_t - pointer to function if found, else NULL */ /* Parameters: funcptr(I) - function pointer to lookup */ /* */ /* Look for a function in the table of known functions. */ /* ------------------------------------------------------------------------ */ static ipfunc_t ipf_findfunc(ipfunc_t funcptr) { ipfunc_resolve_t *ft; for (ft = ipf_availfuncs; ft->ipfu_addr != NULL; ft++) if (ft->ipfu_addr == funcptr) return (funcptr); return (NULL); } /* ------------------------------------------------------------------------ */ /* Function: ipf_resolvefunc */ /* Returns: int - 0 == success, else error */ /* Parameters: data(IO) - ioctl data pointer to ipfunc_resolve_t struct */ /* */ /* Copy in a ipfunc_resolve_t structure and then fill in the missing field. */ /* This will either be the function name (if the pointer is set) or the */ /* function pointer if the name is set. When found, fill in the other one */ /* so that the entire, complete, structure can be copied back to user space.*/ /* ------------------------------------------------------------------------ */ int ipf_resolvefunc(ipf_main_softc_t *softc, void *data) { ipfunc_resolve_t res, *ft; int error; error = BCOPYIN(data, &res, sizeof(res)); if (error != 0) { IPFERROR(123); return (EFAULT); } if (res.ipfu_addr == NULL && res.ipfu_name[0] != '\0') { for (ft = ipf_availfuncs; ft->ipfu_addr != NULL; ft++) if (strncmp(res.ipfu_name, ft->ipfu_name, sizeof(res.ipfu_name)) == 0) { res.ipfu_addr = ft->ipfu_addr; res.ipfu_init = ft->ipfu_init; if (COPYOUT(&res, data, sizeof(res)) != 0) { IPFERROR(35); return (EFAULT); } return (0); } } if (res.ipfu_addr != NULL && res.ipfu_name[0] == '\0') { for (ft = ipf_availfuncs; ft->ipfu_addr != NULL; ft++) if (ft->ipfu_addr == res.ipfu_addr) { (void) strncpy(res.ipfu_name, ft->ipfu_name, sizeof(res.ipfu_name)); res.ipfu_init = ft->ipfu_init; if (COPYOUT(&res, data, sizeof(res)) != 0) { IPFERROR(36); return (EFAULT); } return (0); } } IPFERROR(37); return (ESRCH); } #if !defined(_KERNEL) || SOLARIS /* * From: NetBSD * ppsratecheck(): packets (or events) per second limitation. */ int ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) /* maxpps: maximum pps allowed */ { struct timeval tv, delta; int rv; GETKTIME(&tv); delta.tv_sec = tv.tv_sec - lasttime->tv_sec; delta.tv_usec = tv.tv_usec - lasttime->tv_usec; if (delta.tv_usec < 0) { delta.tv_sec--; delta.tv_usec += 1000000; } /* * check for 0,0 is so that the message will be seen at least once. * if more than one second have passed since the last update of * lasttime, reset the counter. * * we do increment *curpps even in *curpps < maxpps case, as some may * try to use *curpps for stat purposes as well. */ if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || delta.tv_sec >= 1) { *lasttime = tv; *curpps = 0; rv = 1; } else if (maxpps < 0) rv = 1; else if (*curpps < maxpps) rv = 1; else rv = 0; *curpps = *curpps + 1; return (rv); } #endif /* ------------------------------------------------------------------------ */ /* Function: ipf_derefrule */ /* Returns: int - 0 == rule freed up, else rule not freed */ /* Parameters: fr(I) - pointer to filter rule */ /* */ /* Decrement the reference counter to a rule by one. If it reaches zero, */ /* free it and any associated storage space being used by it. */ /* ------------------------------------------------------------------------ */ int ipf_derefrule(ipf_main_softc_t *softc, frentry_t **frp) { frentry_t *fr; frdest_t *fdp; fr = *frp; *frp = NULL; MUTEX_ENTER(&fr->fr_lock); fr->fr_ref--; if (fr->fr_ref == 0) { MUTEX_EXIT(&fr->fr_lock); MUTEX_DESTROY(&fr->fr_lock); ipf_funcfini(softc, fr); fdp = &fr->fr_tif; if (fdp->fd_type == FRD_DSTLIST) ipf_lookup_deref(softc, IPLT_DSTLIST, fdp->fd_ptr); fdp = &fr->fr_rif; if (fdp->fd_type == FRD_DSTLIST) ipf_lookup_deref(softc, IPLT_DSTLIST, fdp->fd_ptr); fdp = &fr->fr_dif; if (fdp->fd_type == FRD_DSTLIST) ipf_lookup_deref(softc, IPLT_DSTLIST, fdp->fd_ptr); if ((fr->fr_type & ~FR_T_BUILTIN) == FR_T_IPF && fr->fr_satype == FRI_LOOKUP) ipf_lookup_deref(softc, fr->fr_srctype, fr->fr_srcptr); if ((fr->fr_type & ~FR_T_BUILTIN) == FR_T_IPF && fr->fr_datype == FRI_LOOKUP) ipf_lookup_deref(softc, fr->fr_dsttype, fr->fr_dstptr); if (fr->fr_grp != NULL) ipf_group_del(softc, fr->fr_grp, fr); if (fr->fr_grphead != NULL) ipf_group_del(softc, fr->fr_grphead, fr); if (fr->fr_icmpgrp != NULL) ipf_group_del(softc, fr->fr_icmpgrp, fr); if ((fr->fr_flags & FR_COPIED) != 0) { if (fr->fr_dsize) { KFREES(fr->fr_data, fr->fr_dsize); } KFREES(fr, fr->fr_size); return (0); } return (1); } else { MUTEX_EXIT(&fr->fr_lock); } return (-1); } /* ------------------------------------------------------------------------ */ /* Function: ipf_grpmapinit */ /* Returns: int - 0 == success, else ESRCH because table entry not found*/ /* Parameters: fr(I) - pointer to rule to find hash table for */ /* */ /* Looks for group hash table fr_arg and stores a pointer to it in fr_ptr. */ /* fr_ptr is later used by ipf_srcgrpmap and ipf_dstgrpmap. */ /* ------------------------------------------------------------------------ */ static int ipf_grpmapinit(ipf_main_softc_t *softc, frentry_t *fr) { char name[FR_GROUPLEN]; iphtable_t *iph; (void) snprintf(name, sizeof(name), "%d", fr->fr_arg); iph = ipf_lookup_find_htable(softc, IPL_LOGIPF, name); if (iph == NULL) { IPFERROR(38); return (ESRCH); } if ((iph->iph_flags & FR_INOUT) != (fr->fr_flags & FR_INOUT)) { IPFERROR(39); return (ESRCH); } iph->iph_ref++; fr->fr_ptr = iph; return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_grpmapfini */ /* Returns: int - 0 == success, else ESRCH because table entry not found*/ /* Parameters: softc(I) - pointer to soft context main structure */ /* fr(I) - pointer to rule to release hash table for */ /* */ /* For rules that have had ipf_grpmapinit called, ipf_lookup_deref needs to */ /* be called to undo what ipf_grpmapinit caused to be done. */ /* ------------------------------------------------------------------------ */ static int ipf_grpmapfini(ipf_main_softc_t *softc, frentry_t *fr) { iphtable_t *iph; iph = fr->fr_ptr; if (iph != NULL) ipf_lookup_deref(softc, IPLT_HASH, iph); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_srcgrpmap */ /* Returns: frentry_t * - pointer to "new last matching" rule or NULL */ /* Parameters: fin(I) - pointer to packet information */ /* passp(IO) - pointer to current/new filter decision (unused) */ /* */ /* Look for a rule group head in a hash table, using the source address as */ /* the key, and descend into that group and continue matching rules against */ /* the packet. */ /* ------------------------------------------------------------------------ */ frentry_t * ipf_srcgrpmap(fr_info_t *fin, u_32_t *passp) { frgroup_t *fg; void *rval; rval = ipf_iphmfindgroup(fin->fin_main_soft, fin->fin_fr->fr_ptr, &fin->fin_src); if (rval == NULL) return (NULL); fg = rval; fin->fin_fr = fg->fg_start; (void) ipf_scanlist(fin, *passp); return (fin->fin_fr); } /* ------------------------------------------------------------------------ */ /* Function: ipf_dstgrpmap */ /* Returns: frentry_t * - pointer to "new last matching" rule or NULL */ /* Parameters: fin(I) - pointer to packet information */ /* passp(IO) - pointer to current/new filter decision (unused) */ /* */ /* Look for a rule group head in a hash table, using the destination */ /* address as the key, and descend into that group and continue matching */ /* rules against the packet. */ /* ------------------------------------------------------------------------ */ frentry_t * ipf_dstgrpmap(fr_info_t *fin, u_32_t *passp) { frgroup_t *fg; void *rval; rval = ipf_iphmfindgroup(fin->fin_main_soft, fin->fin_fr->fr_ptr, &fin->fin_dst); if (rval == NULL) return (NULL); fg = rval; fin->fin_fr = fg->fg_start; (void) ipf_scanlist(fin, *passp); return (fin->fin_fr); } /* * Queue functions * =============== * These functions manage objects on queues for efficient timeouts. There * are a number of system defined queues as well as user defined timeouts. * It is expected that a lock is held in the domain in which the queue * belongs (i.e. either state or NAT) when calling any of these functions * that prevents ipf_freetimeoutqueue() from being called at the same time * as any other. */ /* ------------------------------------------------------------------------ */ /* Function: ipf_addtimeoutqueue */ /* Returns: struct ifqtq * - NULL if malloc fails, else pointer to */ /* timeout queue with given interval. */ /* Parameters: parent(I) - pointer to pointer to parent node of this list */ /* of interface queues. */ /* seconds(I) - timeout value in seconds for this queue. */ /* */ /* This routine first looks for a timeout queue that matches the interval */ /* being requested. If it finds one, increments the reference counter and */ /* returns a pointer to it. If none are found, it allocates a new one and */ /* inserts it at the top of the list. */ /* */ /* Locking. */ /* It is assumed that the caller of this function has an appropriate lock */ /* held (exclusively) in the domain that encompases 'parent'. */ /* ------------------------------------------------------------------------ */ ipftq_t * ipf_addtimeoutqueue(ipf_main_softc_t *softc, ipftq_t **parent, u_int seconds) { ipftq_t *ifq; u_int period; period = seconds * IPF_HZ_DIVIDE; MUTEX_ENTER(&softc->ipf_timeoutlock); for (ifq = *parent; ifq != NULL; ifq = ifq->ifq_next) { if (ifq->ifq_ttl == period) { /* * Reset the delete flag, if set, so the structure * gets reused rather than freed and reallocated. */ MUTEX_ENTER(&ifq->ifq_lock); ifq->ifq_flags &= ~IFQF_DELETE; ifq->ifq_ref++; MUTEX_EXIT(&ifq->ifq_lock); MUTEX_EXIT(&softc->ipf_timeoutlock); return (ifq); } } KMALLOC(ifq, ipftq_t *); if (ifq != NULL) { MUTEX_NUKE(&ifq->ifq_lock); IPFTQ_INIT(ifq, period, "ipftq mutex"); ifq->ifq_next = *parent; ifq->ifq_pnext = parent; ifq->ifq_flags = IFQF_USER; ifq->ifq_ref++; *parent = ifq; softc->ipf_userifqs++; } MUTEX_EXIT(&softc->ipf_timeoutlock); return (ifq); } /* ------------------------------------------------------------------------ */ /* Function: ipf_deletetimeoutqueue */ /* Returns: int - new reference count value of the timeout queue */ /* Parameters: ifq(I) - timeout queue which is losing a reference. */ /* Locks: ifq->ifq_lock */ /* */ /* This routine must be called when we're discarding a pointer to a timeout */ /* queue object, taking care of the reference counter. */ /* */ /* Now that this just sets a DELETE flag, it requires the expire code to */ /* check the list of user defined timeout queues and call the free function */ /* below (currently commented out) to stop memory leaking. It is done this */ /* way because the locking may not be sufficient to safely do a free when */ /* this function is called. */ /* ------------------------------------------------------------------------ */ int ipf_deletetimeoutqueue(ipftq_t *ifq) { ifq->ifq_ref--; if ((ifq->ifq_ref == 0) && ((ifq->ifq_flags & IFQF_USER) != 0)) { ifq->ifq_flags |= IFQF_DELETE; } return (ifq->ifq_ref); } /* ------------------------------------------------------------------------ */ /* Function: ipf_freetimeoutqueue */ /* Parameters: ifq(I) - timeout queue which is losing a reference. */ /* Returns: Nil */ /* */ /* Locking: */ /* It is assumed that the caller of this function has an appropriate lock */ /* held (exclusively) in the domain that encompases the callers "domain". */ /* The ifq_lock for this structure should not be held. */ /* */ /* Remove a user defined timeout queue from the list of queues it is in and */ /* tidy up after this is done. */ /* ------------------------------------------------------------------------ */ void ipf_freetimeoutqueue(ipf_main_softc_t *softc, ipftq_t *ifq) { if (((ifq->ifq_flags & IFQF_DELETE) == 0) || (ifq->ifq_ref != 0) || ((ifq->ifq_flags & IFQF_USER) == 0)) { printf("ipf_freetimeoutqueue(%lx) flags 0x%x ttl %d ref %d\n", (u_long)ifq, ifq->ifq_flags, ifq->ifq_ttl, ifq->ifq_ref); return; } /* * Remove from its position in the list. */ *ifq->ifq_pnext = ifq->ifq_next; if (ifq->ifq_next != NULL) ifq->ifq_next->ifq_pnext = ifq->ifq_pnext; ifq->ifq_next = NULL; ifq->ifq_pnext = NULL; MUTEX_DESTROY(&ifq->ifq_lock); ATOMIC_DEC(softc->ipf_userifqs); KFREE(ifq); } /* ------------------------------------------------------------------------ */ /* Function: ipf_deletequeueentry */ /* Returns: Nil */ /* Parameters: tqe(I) - timeout queue entry to delete */ /* */ /* Remove a tail queue entry from its queue and make it an orphan. */ /* ipf_deletetimeoutqueue is called to make sure the reference count on the */ /* queue is correct. We can't, however, call ipf_freetimeoutqueue because */ /* the correct lock(s) may not be held that would make it safe to do so. */ /* ------------------------------------------------------------------------ */ void ipf_deletequeueentry(ipftqent_t *tqe) { ipftq_t *ifq; ifq = tqe->tqe_ifq; MUTEX_ENTER(&ifq->ifq_lock); if (tqe->tqe_pnext != NULL) { *tqe->tqe_pnext = tqe->tqe_next; if (tqe->tqe_next != NULL) tqe->tqe_next->tqe_pnext = tqe->tqe_pnext; else /* we must be the tail anyway */ ifq->ifq_tail = tqe->tqe_pnext; tqe->tqe_pnext = NULL; tqe->tqe_ifq = NULL; } (void) ipf_deletetimeoutqueue(ifq); ASSERT(ifq->ifq_ref > 0); MUTEX_EXIT(&ifq->ifq_lock); } /* ------------------------------------------------------------------------ */ /* Function: ipf_queuefront */ /* Returns: Nil */ /* Parameters: tqe(I) - pointer to timeout queue entry */ /* */ /* Move a queue entry to the front of the queue, if it isn't already there. */ /* ------------------------------------------------------------------------ */ void ipf_queuefront(ipftqent_t *tqe) { ipftq_t *ifq; ifq = tqe->tqe_ifq; if (ifq == NULL) return; MUTEX_ENTER(&ifq->ifq_lock); if (ifq->ifq_head != tqe) { *tqe->tqe_pnext = tqe->tqe_next; if (tqe->tqe_next) tqe->tqe_next->tqe_pnext = tqe->tqe_pnext; else ifq->ifq_tail = tqe->tqe_pnext; tqe->tqe_next = ifq->ifq_head; ifq->ifq_head->tqe_pnext = &tqe->tqe_next; ifq->ifq_head = tqe; tqe->tqe_pnext = &ifq->ifq_head; } MUTEX_EXIT(&ifq->ifq_lock); } /* ------------------------------------------------------------------------ */ /* Function: ipf_queueback */ /* Returns: Nil */ /* Parameters: ticks(I) - ipf tick time to use with this call */ /* tqe(I) - pointer to timeout queue entry */ /* */ /* Move a queue entry to the back of the queue, if it isn't already there. */ /* We use use ticks to calculate the expiration and mark for when we last */ /* touched the structure. */ /* ------------------------------------------------------------------------ */ void ipf_queueback(u_long ticks, ipftqent_t *tqe) { ipftq_t *ifq; ifq = tqe->tqe_ifq; if (ifq == NULL) return; tqe->tqe_die = ticks + ifq->ifq_ttl; tqe->tqe_touched = ticks; MUTEX_ENTER(&ifq->ifq_lock); if (tqe->tqe_next != NULL) { /* at the end already ? */ /* * Remove from list */ *tqe->tqe_pnext = tqe->tqe_next; tqe->tqe_next->tqe_pnext = tqe->tqe_pnext; /* * Make it the last entry. */ tqe->tqe_next = NULL; tqe->tqe_pnext = ifq->ifq_tail; *ifq->ifq_tail = tqe; ifq->ifq_tail = &tqe->tqe_next; } MUTEX_EXIT(&ifq->ifq_lock); } /* ------------------------------------------------------------------------ */ /* Function: ipf_queueappend */ /* Returns: Nil */ /* Parameters: ticks(I) - ipf tick time to use with this call */ /* tqe(I) - pointer to timeout queue entry */ /* ifq(I) - pointer to timeout queue */ /* parent(I) - owing object pointer */ /* */ /* Add a new item to this queue and put it on the very end. */ /* We use use ticks to calculate the expiration and mark for when we last */ /* touched the structure. */ /* ------------------------------------------------------------------------ */ void ipf_queueappend(u_long ticks, ipftqent_t *tqe, ipftq_t *ifq, void *parent) { MUTEX_ENTER(&ifq->ifq_lock); tqe->tqe_parent = parent; tqe->tqe_pnext = ifq->ifq_tail; *ifq->ifq_tail = tqe; ifq->ifq_tail = &tqe->tqe_next; tqe->tqe_next = NULL; tqe->tqe_ifq = ifq; tqe->tqe_die = ticks + ifq->ifq_ttl; tqe->tqe_touched = ticks; ifq->ifq_ref++; MUTEX_EXIT(&ifq->ifq_lock); } /* ------------------------------------------------------------------------ */ /* Function: ipf_movequeue */ /* Returns: Nil */ /* Parameters: tq(I) - pointer to timeout queue information */ /* oifp(I) - old timeout queue entry was on */ /* nifp(I) - new timeout queue to put entry on */ /* */ /* Move a queue entry from one timeout queue to another timeout queue. */ /* If it notices that the current entry is already last and does not need */ /* to move queue, the return. */ /* ------------------------------------------------------------------------ */ void ipf_movequeue(u_long ticks, ipftqent_t *tqe, ipftq_t *oifq, ipftq_t *nifq) { /* * If the queue hasn't changed and we last touched this entry at the * same ipf time, then we're not going to achieve anything by either * changing the ttl or moving it on the queue. */ if (oifq == nifq && tqe->tqe_touched == ticks) return; /* * For any of this to be outside the lock, there is a risk that two * packets entering simultaneously, with one changing to a different * queue and one not, could end up with things in a bizarre state. */ MUTEX_ENTER(&oifq->ifq_lock); tqe->tqe_touched = ticks; tqe->tqe_die = ticks + nifq->ifq_ttl; /* * Is the operation here going to be a no-op ? */ if (oifq == nifq) { if ((tqe->tqe_next == NULL) || (tqe->tqe_next->tqe_die == tqe->tqe_die)) { MUTEX_EXIT(&oifq->ifq_lock); return; } } /* * Remove from the old queue */ *tqe->tqe_pnext = tqe->tqe_next; if (tqe->tqe_next) tqe->tqe_next->tqe_pnext = tqe->tqe_pnext; else oifq->ifq_tail = tqe->tqe_pnext; tqe->tqe_next = NULL; /* * If we're moving from one queue to another, release the * lock on the old queue and get a lock on the new queue. * For user defined queues, if we're moving off it, call * delete in case it can now be freed. */ if (oifq != nifq) { tqe->tqe_ifq = NULL; (void) ipf_deletetimeoutqueue(oifq); MUTEX_EXIT(&oifq->ifq_lock); MUTEX_ENTER(&nifq->ifq_lock); tqe->tqe_ifq = nifq; nifq->ifq_ref++; } /* * Add to the bottom of the new queue */ tqe->tqe_pnext = nifq->ifq_tail; *nifq->ifq_tail = tqe; nifq->ifq_tail = &tqe->tqe_next; MUTEX_EXIT(&nifq->ifq_lock); } /* ------------------------------------------------------------------------ */ /* Function: ipf_updateipid */ /* Returns: int - 0 == success, -1 == error (packet should be droppped) */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* When we are doing NAT, change the IP of every packet to represent a */ /* single sequence of packets coming from the host, hiding any host */ /* specific sequencing that might otherwise be revealed. If the packet is */ /* a fragment, then store the 'new' IPid in the fragment cache and look up */ /* the fragment cache for non-leading fragments. If a non-leading fragment */ /* has no match in the cache, return an error. */ /* ------------------------------------------------------------------------ */ static int ipf_updateipid(fr_info_t *fin) { u_short id, ido, sums; u_32_t sumd, sum; ip_t *ip; ip = fin->fin_ip; ido = ntohs(ip->ip_id); if (fin->fin_off != 0) { sum = ipf_frag_ipidknown(fin); if (sum == 0xffffffff) return (-1); sum &= 0xffff; id = (u_short)sum; ip->ip_id = htons(id); } else { - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); id = ntohs(ip->ip_id); if ((fin->fin_flx & FI_FRAG) != 0) (void) ipf_frag_ipidnew(fin, (u_32_t)id); } if (id == ido) return (0); CALC_SUMD(ido, id, sumd); /* DESTRUCTIVE MACRO! id,ido change */ sum = (~ntohs(ip->ip_sum)) & 0xffff; sum += sumd; sum = (sum >> 16) + (sum & 0xffff); sum = (sum >> 16) + (sum & 0xffff); sums = ~(u_short)sum; ip->ip_sum = htons(sums); return (0); } #ifdef NEED_FRGETIFNAME /* ------------------------------------------------------------------------ */ /* Function: ipf_getifname */ /* Returns: char * - pointer to interface name */ /* Parameters: ifp(I) - pointer to network interface */ /* buffer(O) - pointer to where to store interface name */ /* */ /* Constructs an interface name in the buffer passed. The buffer passed is */ /* expected to be at least LIFNAMSIZ in bytes big. If buffer is passed in */ /* as a NULL pointer then return a pointer to a static array. */ /* ------------------------------------------------------------------------ */ char * ipf_getifname(struct ifnet *ifp, char *buffer) { static char namebuf[LIFNAMSIZ]; # if SOLARIS || defined(__FreeBSD__) int unit, space; char temp[20]; char *s; # endif if (buffer == NULL) buffer = namebuf; (void) strncpy(buffer, ifp->if_name, LIFNAMSIZ); buffer[LIFNAMSIZ - 1] = '\0'; # if SOLARIS || defined(__FreeBSD__) for (s = buffer; *s; s++) ; unit = ifp->if_unit; space = LIFNAMSIZ - (s - buffer); if ((space > 0) && (unit >= 0)) { (void) snprintf(temp, sizeof(name), "%d", unit); (void) strncpy(s, temp, space); } # endif return (buffer); } #endif /* ------------------------------------------------------------------------ */ /* Function: ipf_ioctlswitch */ /* Returns: int - -1 continue processing, else ioctl return value */ /* Parameters: unit(I) - device unit opened */ /* data(I) - pointer to ioctl data */ /* cmd(I) - ioctl command */ /* mode(I) - mode value */ /* uid(I) - uid making the ioctl call */ /* ctx(I) - pointer to context data */ /* */ /* Based on the value of unit, call the appropriate ioctl handler or return */ /* EIO if ipfilter is not running. Also checks if write perms are req'd */ /* for the device in order to execute the ioctl. A special case is made */ /* SIOCIPFINTERROR so that the same code isn't required in every handler. */ /* The context data pointer is passed through as this is used as the key */ /* for locating a matching token for continued access for walking lists, */ /* etc. */ /* ------------------------------------------------------------------------ */ int ipf_ioctlswitch(ipf_main_softc_t *softc, int unit, void *data, ioctlcmd_t cmd, int mode, int uid, void *ctx) { int error = 0; switch (cmd) { case SIOCIPFINTERROR : error = BCOPYOUT(&softc->ipf_interror, data, sizeof(softc->ipf_interror)); if (error != 0) { IPFERROR(40); error = EFAULT; } return (error); default : break; } switch (unit) { case IPL_LOGIPF : error = ipf_ipf_ioctl(softc, data, cmd, mode, uid, ctx); break; case IPL_LOGNAT : if (softc->ipf_running > 0) { error = ipf_nat_ioctl(softc, data, cmd, mode, uid, ctx); } else { IPFERROR(42); error = EIO; } break; case IPL_LOGSTATE : if (softc->ipf_running > 0) { error = ipf_state_ioctl(softc, data, cmd, mode, uid, ctx); } else { IPFERROR(43); error = EIO; } break; case IPL_LOGAUTH : if (softc->ipf_running > 0) { error = ipf_auth_ioctl(softc, data, cmd, mode, uid, ctx); } else { IPFERROR(44); error = EIO; } break; case IPL_LOGSYNC : if (softc->ipf_running > 0) { error = ipf_sync_ioctl(softc, data, cmd, mode, uid, ctx); } else { error = EIO; IPFERROR(45); } break; case IPL_LOGSCAN : #ifdef IPFILTER_SCAN if (softc->ipf_running > 0) error = ipf_scan_ioctl(softc, data, cmd, mode, uid, ctx); else #endif { error = EIO; IPFERROR(46); } break; case IPL_LOGLOOKUP : if (softc->ipf_running > 0) { error = ipf_lookup_ioctl(softc, data, cmd, mode, uid, ctx); } else { error = EIO; IPFERROR(47); } break; default : IPFERROR(48); error = EIO; break; } return (error); } /* * This array defines the expected size of objects coming into the kernel * for the various recognised object types. The first column is flags (see * below), 2nd column is current size, 3rd column is the version number of * when the current size became current. * Flags: * 1 = minimum size, not absolute size */ static const int ipf_objbytes[IPFOBJ_COUNT][3] = { { 1, sizeof(struct frentry), 5010000 }, /* 0 */ { 1, sizeof(struct friostat), 5010000 }, { 0, sizeof(struct fr_info), 5010000 }, { 0, sizeof(struct ipf_authstat), 4010100 }, { 0, sizeof(struct ipfrstat), 5010000 }, { 1, sizeof(struct ipnat), 5010000 }, /* 5 */ { 0, sizeof(struct natstat), 5010000 }, { 0, sizeof(struct ipstate_save), 5010000 }, { 1, sizeof(struct nat_save), 5010000 }, { 0, sizeof(struct natlookup), 5010000 }, { 1, sizeof(struct ipstate), 5010000 }, /* 10 */ { 0, sizeof(struct ips_stat), 5010000 }, { 0, sizeof(struct frauth), 5010000 }, { 0, sizeof(struct ipftune), 4010100 }, { 0, sizeof(struct nat), 5010000 }, { 0, sizeof(struct ipfruleiter), 4011400 }, /* 15 */ { 0, sizeof(struct ipfgeniter), 4011400 }, { 0, sizeof(struct ipftable), 4011400 }, { 0, sizeof(struct ipflookupiter), 4011400 }, { 0, sizeof(struct ipftq) * IPF_TCP_NSTATES }, { 1, 0, 0 }, /* IPFEXPR */ { 0, 0, 0 }, /* PROXYCTL */ { 0, sizeof (struct fripf), 5010000 } }; /* ------------------------------------------------------------------------ */ /* Function: ipf_inobj */ /* Returns: int - 0 = success, else failure */ /* Parameters: softc(I) - soft context pointerto work with */ /* data(I) - pointer to ioctl data */ /* objp(O) - where to store ipfobj structure */ /* ptr(I) - pointer to data to copy out */ /* type(I) - type of structure being moved */ /* */ /* Copy in the contents of what the ipfobj_t points to. In future, we */ /* add things to check for version numbers, sizes, etc, to make it backward */ /* compatible at the ABI for user land. */ /* If objp is not NULL then we assume that the caller wants to see what is */ /* in the ipfobj_t structure being copied in. As an example, this can tell */ /* the caller what version of ipfilter the ioctl program was written to. */ /* ------------------------------------------------------------------------ */ int ipf_inobj(ipf_main_softc_t *softc, void *data, ipfobj_t *objp, void *ptr, int type) { ipfobj_t obj; int error; int size; if ((type < 0) || (type >= IPFOBJ_COUNT)) { IPFERROR(49); return (EINVAL); } if (objp == NULL) objp = &obj; error = BCOPYIN(data, objp, sizeof(*objp)); if (error != 0) { IPFERROR(124); return (EFAULT); } if (objp->ipfo_type != type) { IPFERROR(50); return (EINVAL); } if (objp->ipfo_rev >= ipf_objbytes[type][2]) { if ((ipf_objbytes[type][0] & 1) != 0) { if (objp->ipfo_size < ipf_objbytes[type][1]) { IPFERROR(51); return (EINVAL); } size = ipf_objbytes[type][1]; } else if (objp->ipfo_size == ipf_objbytes[type][1]) { size = objp->ipfo_size; } else { IPFERROR(52); return (EINVAL); } error = COPYIN(objp->ipfo_ptr, ptr, size); if (error != 0) { IPFERROR(55); error = EFAULT; } } else { #ifdef IPFILTER_COMPAT error = ipf_in_compat(softc, objp, ptr, 0); #else IPFERROR(54); error = EINVAL; #endif } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_inobjsz */ /* Returns: int - 0 = success, else failure */ /* Parameters: softc(I) - soft context pointerto work with */ /* data(I) - pointer to ioctl data */ /* ptr(I) - pointer to store real data in */ /* type(I) - type of structure being moved */ /* sz(I) - size of data to copy */ /* */ /* As per ipf_inobj, except the size of the object to copy in is passed in */ /* but it must not be smaller than the size defined for the type and the */ /* type must allow for varied sized objects. The extra requirement here is */ /* that sz must match the size of the object being passed in - this is not */ /* not possible nor required in ipf_inobj(). */ /* ------------------------------------------------------------------------ */ int ipf_inobjsz(ipf_main_softc_t *softc, void *data, void *ptr, int type, int sz) { ipfobj_t obj; int error; if ((type < 0) || (type >= IPFOBJ_COUNT)) { IPFERROR(56); return (EINVAL); } error = BCOPYIN(data, &obj, sizeof(obj)); if (error != 0) { IPFERROR(125); return (EFAULT); } if (obj.ipfo_type != type) { IPFERROR(58); return (EINVAL); } if (obj.ipfo_rev >= ipf_objbytes[type][2]) { if (((ipf_objbytes[type][0] & 1) == 0) || (sz < ipf_objbytes[type][1])) { IPFERROR(57); return (EINVAL); } error = COPYIN(obj.ipfo_ptr, ptr, sz); if (error != 0) { IPFERROR(61); error = EFAULT; } } else { #ifdef IPFILTER_COMPAT error = ipf_in_compat(softc, &obj, ptr, sz); #else IPFERROR(60); error = EINVAL; #endif } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_outobjsz */ /* Returns: int - 0 = success, else failure */ /* Parameters: data(I) - pointer to ioctl data */ /* ptr(I) - pointer to store real data in */ /* type(I) - type of structure being moved */ /* sz(I) - size of data to copy */ /* */ /* As per ipf_outobj, except the size of the object to copy out is passed in*/ /* but it must not be smaller than the size defined for the type and the */ /* type must allow for varied sized objects. The extra requirement here is */ /* that sz must match the size of the object being passed in - this is not */ /* not possible nor required in ipf_outobj(). */ /* ------------------------------------------------------------------------ */ int ipf_outobjsz(ipf_main_softc_t *softc, void *data, void *ptr, int type, int sz) { ipfobj_t obj; int error; if ((type < 0) || (type >= IPFOBJ_COUNT)) { IPFERROR(62); return (EINVAL); } error = BCOPYIN(data, &obj, sizeof(obj)); if (error != 0) { IPFERROR(127); return (EFAULT); } if (obj.ipfo_type != type) { IPFERROR(63); return (EINVAL); } if (obj.ipfo_rev >= ipf_objbytes[type][2]) { if (((ipf_objbytes[type][0] & 1) == 0) || (sz < ipf_objbytes[type][1])) { IPFERROR(146); return (EINVAL); } error = COPYOUT(ptr, obj.ipfo_ptr, sz); if (error != 0) { IPFERROR(66); error = EFAULT; } } else { #ifdef IPFILTER_COMPAT error = ipf_out_compat(softc, &obj, ptr); #else IPFERROR(65); error = EINVAL; #endif } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_outobj */ /* Returns: int - 0 = success, else failure */ /* Parameters: data(I) - pointer to ioctl data */ /* ptr(I) - pointer to store real data in */ /* type(I) - type of structure being moved */ /* */ /* Copy out the contents of what ptr is to where ipfobj points to. In */ /* future, we add things to check for version numbers, sizes, etc, to make */ /* it backward compatible at the ABI for user land. */ /* ------------------------------------------------------------------------ */ int ipf_outobj(ipf_main_softc_t *softc, void *data, void *ptr, int type) { ipfobj_t obj; int error; if ((type < 0) || (type >= IPFOBJ_COUNT)) { IPFERROR(67); return (EINVAL); } error = BCOPYIN(data, &obj, sizeof(obj)); if (error != 0) { IPFERROR(126); return (EFAULT); } if (obj.ipfo_type != type) { IPFERROR(68); return (EINVAL); } if (obj.ipfo_rev >= ipf_objbytes[type][2]) { if ((ipf_objbytes[type][0] & 1) != 0) { if (obj.ipfo_size < ipf_objbytes[type][1]) { IPFERROR(69); return (EINVAL); } } else if (obj.ipfo_size != ipf_objbytes[type][1]) { IPFERROR(70); return (EINVAL); } error = COPYOUT(ptr, obj.ipfo_ptr, obj.ipfo_size); if (error != 0) { IPFERROR(73); error = EFAULT; } } else { #ifdef IPFILTER_COMPAT error = ipf_out_compat(softc, &obj, ptr); #else IPFERROR(72); error = EINVAL; #endif } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_outobjk */ /* Returns: int - 0 = success, else failure */ /* Parameters: obj(I) - pointer to data description structure */ /* ptr(I) - pointer to kernel data to copy out */ /* */ /* In the above functions, the ipfobj_t structure is copied into the kernel,*/ /* telling ipfilter how to copy out data. In this instance, the ipfobj_t is */ /* already populated with information and now we just need to use it. */ /* There is no need for this function to have a "type" parameter as there */ /* is no point in validating information that comes from the kernel with */ /* itself. */ /* ------------------------------------------------------------------------ */ int ipf_outobjk(ipf_main_softc_t *softc, ipfobj_t *obj, void *ptr) { int type = obj->ipfo_type; int error; if ((type < 0) || (type >= IPFOBJ_COUNT)) { IPFERROR(147); return (EINVAL); } if (obj->ipfo_rev >= ipf_objbytes[type][2]) { if ((ipf_objbytes[type][0] & 1) != 0) { if (obj->ipfo_size < ipf_objbytes[type][1]) { IPFERROR(148); return (EINVAL); } } else if (obj->ipfo_size != ipf_objbytes[type][1]) { IPFERROR(149); return (EINVAL); } error = COPYOUT(ptr, obj->ipfo_ptr, obj->ipfo_size); if (error != 0) { IPFERROR(150); error = EFAULT; } } else { #ifdef IPFILTER_COMPAT error = ipf_out_compat(softc, obj, ptr); #else IPFERROR(151); error = EINVAL; #endif } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_checkl4sum */ /* Returns: int - 0 = good, -1 = bad, 1 = cannot check */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* If possible, calculate the layer 4 checksum for the packet. If this is */ /* not possible, return without indicating a failure or success but in a */ /* way that is ditinguishable. This function should only be called by the */ /* ipf_checkv6sum() for each platform. */ /* ------------------------------------------------------------------------ */ inline int ipf_checkl4sum(fr_info_t *fin) { u_short sum, hdrsum, *csump; udphdr_t *udp; int dosum; /* * If the TCP packet isn't a fragment, isn't too short and otherwise * isn't already considered "bad", then validate the checksum. If * this check fails then considered the packet to be "bad". */ if ((fin->fin_flx & (FI_FRAG|FI_SHORT|FI_BAD)) != 0) return (1); DT2(l4sumo, int, fin->fin_out, int, (int)fin->fin_p); if (fin->fin_out == 1) { fin->fin_cksum = FI_CK_SUMOK; return (0); } csump = NULL; hdrsum = 0; dosum = 0; sum = 0; switch (fin->fin_p) { case IPPROTO_TCP : csump = &((tcphdr_t *)fin->fin_dp)->th_sum; dosum = 1; break; case IPPROTO_UDP : udp = fin->fin_dp; if (udp->uh_sum != 0) { csump = &udp->uh_sum; dosum = 1; } break; #ifdef USE_INET6 case IPPROTO_ICMPV6 : csump = &((struct icmp6_hdr *)fin->fin_dp)->icmp6_cksum; dosum = 1; break; #endif case IPPROTO_ICMP : csump = &((struct icmp *)fin->fin_dp)->icmp_cksum; dosum = 1; break; default : return (1); /*NOTREACHED*/ } if (csump != NULL) { hdrsum = *csump; if (fin->fin_p == IPPROTO_UDP && hdrsum == 0xffff) hdrsum = 0x0000; } if (dosum) { sum = fr_cksum(fin, fin->fin_ip, fin->fin_p, fin->fin_dp); } #if !defined(_KERNEL) if (sum == hdrsum) { FR_DEBUG(("checkl4sum: %hx == %hx\n", sum, hdrsum)); } else { FR_DEBUG(("checkl4sum: %hx != %hx\n", sum, hdrsum)); } #endif DT3(l4sums, u_short, hdrsum, u_short, sum, fr_info_t *, fin); #ifdef USE_INET6 if (hdrsum == sum || (sum == 0 && IP_V(fin->fin_ip) == 6)) { #else if (hdrsum == sum) { #endif fin->fin_cksum = FI_CK_SUMOK; return (0); } fin->fin_cksum = FI_CK_BAD; return (-1); } /* ------------------------------------------------------------------------ */ /* Function: ipf_ifpfillv4addr */ /* Returns: int - 0 = address update, -1 = address not updated */ /* Parameters: atype(I) - type of network address update to perform */ /* sin(I) - pointer to source of address information */ /* mask(I) - pointer to source of netmask information */ /* inp(I) - pointer to destination address store */ /* inpmask(I) - pointer to destination netmask store */ /* */ /* Given a type of network address update (atype) to perform, copy */ /* information from sin/mask into inp/inpmask. If ipnmask is NULL then no */ /* netmask update is performed unless FRI_NETMASKED is passed as atype, in */ /* which case the operation fails. For all values of atype other than */ /* FRI_NETMASKED, if inpmask is non-NULL then the mask is set to an all 1s */ /* value. */ /* ------------------------------------------------------------------------ */ int ipf_ifpfillv4addr(int atype, struct sockaddr_in *sin, struct sockaddr_in *mask, struct in_addr *inp, struct in_addr *inpmask) { if (inpmask != NULL && atype != FRI_NETMASKED) inpmask->s_addr = 0xffffffff; if (atype == FRI_NETWORK || atype == FRI_NETMASKED) { if (atype == FRI_NETMASKED) { if (inpmask == NULL) return (-1); inpmask->s_addr = mask->sin_addr.s_addr; } inp->s_addr = sin->sin_addr.s_addr & mask->sin_addr.s_addr; } else { inp->s_addr = sin->sin_addr.s_addr; } return (0); } #ifdef USE_INET6 /* ------------------------------------------------------------------------ */ /* Function: ipf_ifpfillv6addr */ /* Returns: int - 0 = address update, -1 = address not updated */ /* Parameters: atype(I) - type of network address update to perform */ /* sin(I) - pointer to source of address information */ /* mask(I) - pointer to source of netmask information */ /* inp(I) - pointer to destination address store */ /* inpmask(I) - pointer to destination netmask store */ /* */ /* Given a type of network address update (atype) to perform, copy */ /* information from sin/mask into inp/inpmask. If ipnmask is NULL then no */ /* netmask update is performed unless FRI_NETMASKED is passed as atype, in */ /* which case the operation fails. For all values of atype other than */ /* FRI_NETMASKED, if inpmask is non-NULL then the mask is set to an all 1s */ /* value. */ /* ------------------------------------------------------------------------ */ int ipf_ifpfillv6addr(int atype, struct sockaddr_in6 *sin, struct sockaddr_in6 *mask, i6addr_t *inp, i6addr_t *inpmask) { i6addr_t *src, *and; src = (i6addr_t *)&sin->sin6_addr; and = (i6addr_t *)&mask->sin6_addr; if (inpmask != NULL && atype != FRI_NETMASKED) { inpmask->i6[0] = 0xffffffff; inpmask->i6[1] = 0xffffffff; inpmask->i6[2] = 0xffffffff; inpmask->i6[3] = 0xffffffff; } if (atype == FRI_NETWORK || atype == FRI_NETMASKED) { if (atype == FRI_NETMASKED) { if (inpmask == NULL) return (-1); inpmask->i6[0] = and->i6[0]; inpmask->i6[1] = and->i6[1]; inpmask->i6[2] = and->i6[2]; inpmask->i6[3] = and->i6[3]; } inp->i6[0] = src->i6[0] & and->i6[0]; inp->i6[1] = src->i6[1] & and->i6[1]; inp->i6[2] = src->i6[2] & and->i6[2]; inp->i6[3] = src->i6[3] & and->i6[3]; } else { inp->i6[0] = src->i6[0]; inp->i6[1] = src->i6[1]; inp->i6[2] = src->i6[2]; inp->i6[3] = src->i6[3]; } return (0); } #endif /* ------------------------------------------------------------------------ */ /* Function: ipf_matchtag */ /* Returns: 0 == mismatch, 1 == match. */ /* Parameters: tag1(I) - pointer to first tag to compare */ /* tag2(I) - pointer to second tag to compare */ /* */ /* Returns true (non-zero) or false(0) if the two tag structures can be */ /* considered to be a match or not match, respectively. The tag is 16 */ /* bytes long (16 characters) but that is overlayed with 4 32bit ints so */ /* compare the ints instead, for speed. tag1 is the master of the */ /* comparison. This function should only be called with both tag1 and tag2 */ /* as non-NULL pointers. */ /* ------------------------------------------------------------------------ */ int ipf_matchtag(ipftag_t *tag1, ipftag_t *tag2) { if (tag1 == tag2) return (1); if ((tag1->ipt_num[0] == 0) && (tag2->ipt_num[0] == 0)) return (1); if ((tag1->ipt_num[0] == tag2->ipt_num[0]) && (tag1->ipt_num[1] == tag2->ipt_num[1]) && (tag1->ipt_num[2] == tag2->ipt_num[2]) && (tag1->ipt_num[3] == tag2->ipt_num[3])) return (1); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_coalesce */ /* Returns: 1 == success, -1 == failure, 0 == no change */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* Attempt to get all of the packet data into a single, contiguous buffer. */ /* If this call returns a failure then the buffers have also been freed. */ /* ------------------------------------------------------------------------ */ int ipf_coalesce(fr_info_t *fin) { if ((fin->fin_flx & FI_COALESCE) != 0) return (1); /* * If the mbuf pointers indicate that there is no mbuf to work with, * return but do not indicate success or failure. */ if (fin->fin_m == NULL || fin->fin_mp == NULL) return (0); #if defined(_KERNEL) if (ipf_pullup(fin->fin_m, fin, fin->fin_plen) == NULL) { ipf_main_softc_t *softc = fin->fin_main_soft; DT1(frb_coalesce, fr_info_t *, fin); LBUMP(ipf_stats[fin->fin_out].fr_badcoalesces); # if SOLARIS FREE_MB_T(*fin->fin_mp); # endif fin->fin_reason = FRB_COALESCE; *fin->fin_mp = NULL; fin->fin_m = NULL; return (-1); } #else fin = fin; /* LINT */ #endif return (1); } /* * The following table lists all of the tunable variables that can be * accessed via SIOCIPFGET/SIOCIPFSET/SIOCIPFGETNEXt. The format of each row * in the table below is as follows: * * pointer to value, name of value, minimum, maximum, size of the value's * container, value attribute flags * * For convienience, IPFT_RDONLY means the value is read-only, IPFT_WRDISABLED * means the value can only be written to when IPFilter is loaded but disabled. * The obvious implication is if neither of these are set then the value can be * changed at any time without harm. */ /* ------------------------------------------------------------------------ */ /* Function: ipf_tune_findbycookie */ /* Returns: NULL = search failed, else pointer to tune struct */ /* Parameters: cookie(I) - cookie value to search for amongst tuneables */ /* next(O) - pointer to place to store the cookie for the */ /* "next" tuneable, if it is desired. */ /* */ /* This function is used to walk through all of the existing tunables with */ /* successive calls. It searches the known tunables for the one which has */ /* a matching value for "cookie" - ie its address. When returning a match, */ /* the next one to be found may be returned inside next. */ /* ------------------------------------------------------------------------ */ static ipftuneable_t * ipf_tune_findbycookie(ipftuneable_t **ptop, void *cookie, void **next) { ipftuneable_t *ta, **tap; for (ta = *ptop; ta->ipft_name != NULL; ta++) if (ta == cookie) { if (next != NULL) { /* * If the next entry in the array has a name * present, then return a pointer to it for * where to go next, else return a pointer to * the dynaminc list as a key to search there * next. This facilitates a weak linking of * the two "lists" together. */ if ((ta + 1)->ipft_name != NULL) *next = ta + 1; else *next = ptop; } return (ta); } for (tap = ptop; (ta = *tap) != NULL; tap = &ta->ipft_next) if (tap == cookie) { if (next != NULL) *next = &ta->ipft_next; return (ta); } if (next != NULL) *next = NULL; return (NULL); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tune_findbyname */ /* Returns: NULL = search failed, else pointer to tune struct */ /* Parameters: name(I) - name of the tuneable entry to find. */ /* */ /* Search the static array of tuneables and the list of dynamic tuneables */ /* for an entry with a matching name. If we can find one, return a pointer */ /* to the matching structure. */ /* ------------------------------------------------------------------------ */ static ipftuneable_t * ipf_tune_findbyname(ipftuneable_t *top, const char *name) { ipftuneable_t *ta; for (ta = top; ta != NULL; ta = ta->ipft_next) if (!strcmp(ta->ipft_name, name)) { return (ta); } return (NULL); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tune_add_array */ /* Returns: int - 0 == success, else failure */ /* Parameters: newtune - pointer to new tune array to add to tuneables */ /* */ /* Appends tune structures from the array passed in (newtune) to the end of */ /* the current list of "dynamic" tuneable parameters. */ /* If any entry to be added is already present (by name) then the operation */ /* is aborted - entries that have been added are removed before returning. */ /* An entry with no name (NULL) is used as the indication that the end of */ /* the array has been reached. */ /* ------------------------------------------------------------------------ */ int ipf_tune_add_array(ipf_main_softc_t *softc, ipftuneable_t *newtune) { ipftuneable_t *nt, *dt; int error = 0; for (nt = newtune; nt->ipft_name != NULL; nt++) { error = ipf_tune_add(softc, nt); if (error != 0) { for (dt = newtune; dt != nt; dt++) { (void) ipf_tune_del(softc, dt); } } } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tune_array_link */ /* Returns: 0 == success, -1 == failure */ /* Parameters: softc(I) - soft context pointerto work with */ /* array(I) - pointer to an array of tuneables */ /* */ /* Given an array of tunables (array), append them to the current list of */ /* tuneables for this context (softc->ipf_tuners.) To properly prepare the */ /* the array for being appended to the list, initialise all of the next */ /* pointers so we don't need to walk parts of it with ++ and others with */ /* next. The array is expected to have an entry with a NULL name as the */ /* terminator. Trying to add an array with no non-NULL names will return as */ /* a failure. */ /* ------------------------------------------------------------------------ */ int ipf_tune_array_link(ipf_main_softc_t *softc, ipftuneable_t *array) { ipftuneable_t *t, **p; t = array; if (t->ipft_name == NULL) return (-1); for (; t[1].ipft_name != NULL; t++) t[0].ipft_next = &t[1]; t->ipft_next = NULL; /* * Since a pointer to the last entry isn't kept, we need to find it * each time we want to add new variables to the list. */ for (p = &softc->ipf_tuners; (t = *p) != NULL; p = &t->ipft_next) if (t->ipft_name == NULL) break; *p = array; return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tune_array_unlink */ /* Returns: 0 == success, -1 == failure */ /* Parameters: softc(I) - soft context pointerto work with */ /* array(I) - pointer to an array of tuneables */ /* */ /* ------------------------------------------------------------------------ */ int ipf_tune_array_unlink(ipf_main_softc_t *softc, ipftuneable_t *array) { ipftuneable_t *t, **p; for (p = &softc->ipf_tuners; (t = *p) != NULL; p = &t->ipft_next) if (t == array) break; if (t == NULL) return (-1); for (; t[1].ipft_name != NULL; t++) ; *p = t->ipft_next; return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tune_array_copy */ /* Returns: NULL = failure, else pointer to new array */ /* Parameters: base(I) - pointer to structure base */ /* size(I) - size of the array at template */ /* template(I) - original array to copy */ /* */ /* Allocate memory for a new set of tuneable values and copy everything */ /* from template into the new region of memory. The new region is full of */ /* uninitialised pointers (ipft_next) so set them up. Now, ipftp_offset... */ /* */ /* NOTE: the following assumes that sizeof(long) == sizeof(void *) */ /* In the array template, ipftp_offset is the offset (in bytes) of the */ /* location of the tuneable value inside the structure pointed to by base. */ /* As ipftp_offset is a union over the pointers to the tuneable values, if */ /* we add base to the copy's ipftp_offset, copy ends up with a pointer in */ /* ipftp_void that points to the stored value. */ /* ------------------------------------------------------------------------ */ ipftuneable_t * ipf_tune_array_copy(void *base, size_t size, ipftuneable_t *template) { ipftuneable_t *copy; int i; KMALLOCS(copy, ipftuneable_t *, size); if (copy == NULL) { return (NULL); } bcopy(template, copy, size); for (i = 0; copy[i].ipft_name; i++) { copy[i].ipft_una.ipftp_offset += (u_long)base; copy[i].ipft_next = copy + i + 1; } return (copy); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tune_add */ /* Returns: int - 0 == success, else failure */ /* Parameters: newtune - pointer to new tune entry to add to tuneables */ /* */ /* Appends tune structures from the array passed in (newtune) to the end of */ /* the current list of "dynamic" tuneable parameters. Once added, the */ /* owner of the object is not expected to ever change "ipft_next". */ /* ------------------------------------------------------------------------ */ int ipf_tune_add(ipf_main_softc_t *softc, ipftuneable_t *newtune) { ipftuneable_t *ta, **tap; ta = ipf_tune_findbyname(softc->ipf_tuners, newtune->ipft_name); if (ta != NULL) { IPFERROR(74); return (EEXIST); } for (tap = &softc->ipf_tuners; *tap != NULL; tap = &(*tap)->ipft_next) ; newtune->ipft_next = NULL; *tap = newtune; return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tune_del */ /* Returns: int - 0 == success, else failure */ /* Parameters: oldtune - pointer to tune entry to remove from the list of */ /* current dynamic tuneables */ /* */ /* Search for the tune structure, by pointer, in the list of those that are */ /* dynamically added at run time. If found, adjust the list so that this */ /* structure is no longer part of it. */ /* ------------------------------------------------------------------------ */ int ipf_tune_del(ipf_main_softc_t *softc, ipftuneable_t *oldtune) { ipftuneable_t *ta, **tap; int error = 0; for (tap = &softc->ipf_tuners; (ta = *tap) != NULL; tap = &ta->ipft_next) { if (ta == oldtune) { *tap = oldtune->ipft_next; oldtune->ipft_next = NULL; break; } } if (ta == NULL) { error = ESRCH; IPFERROR(75); } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tune_del_array */ /* Returns: int - 0 == success, else failure */ /* Parameters: oldtune - pointer to tuneables array */ /* */ /* Remove each tuneable entry in the array from the list of "dynamic" */ /* tunables. If one entry should fail to be found, an error will be */ /* returned and no further ones removed. */ /* An entry with a NULL name is used as the indicator of the last entry in */ /* the array. */ /* ------------------------------------------------------------------------ */ int ipf_tune_del_array(ipf_main_softc_t *softc, ipftuneable_t *oldtune) { ipftuneable_t *ot; int error = 0; for (ot = oldtune; ot->ipft_name != NULL; ot++) { error = ipf_tune_del(softc, ot); if (error != 0) break; } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tune */ /* Returns: int - 0 == success, else failure */ /* Parameters: cmd(I) - ioctl command number */ /* data(I) - pointer to ioctl data structure */ /* */ /* Implement handling of SIOCIPFGETNEXT, SIOCIPFGET and SIOCIPFSET. These */ /* three ioctls provide the means to access and control global variables */ /* within IPFilter, allowing (for example) timeouts and table sizes to be */ /* changed without rebooting, reloading or recompiling. The initialisation */ /* and 'destruction' routines of the various components of ipfilter are all */ /* each responsible for handling their own values being too big. */ /* ------------------------------------------------------------------------ */ int ipf_ipftune(ipf_main_softc_t *softc, ioctlcmd_t cmd, void *data) { ipftuneable_t *ta; ipftune_t tu; void *cookie; int error; error = ipf_inobj(softc, data, NULL, &tu, IPFOBJ_TUNEABLE); if (error != 0) return (error); tu.ipft_name[sizeof(tu.ipft_name) - 1] = '\0'; cookie = tu.ipft_cookie; ta = NULL; switch (cmd) { case SIOCIPFGETNEXT : /* * If cookie is non-NULL, assume it to be a pointer to the last * entry we looked at, so find it (if possible) and return a * pointer to the next one after it. The last entry in the * the table is a NULL entry, so when we get to it, set cookie * to NULL and return that, indicating end of list, erstwhile * if we come in with cookie set to NULL, we are starting anew * at the front of the list. */ if (cookie != NULL) { ta = ipf_tune_findbycookie(&softc->ipf_tuners, cookie, &tu.ipft_cookie); } else { ta = softc->ipf_tuners; tu.ipft_cookie = ta + 1; } if (ta != NULL) { /* * Entry found, but does the data pointed to by that * row fit in what we can return? */ if (ta->ipft_sz > sizeof(tu.ipft_un)) { IPFERROR(76); return (EINVAL); } tu.ipft_vlong = 0; if (ta->ipft_sz == sizeof(u_long)) tu.ipft_vlong = *ta->ipft_plong; else if (ta->ipft_sz == sizeof(u_int)) tu.ipft_vint = *ta->ipft_pint; else if (ta->ipft_sz == sizeof(u_short)) tu.ipft_vshort = *ta->ipft_pshort; else if (ta->ipft_sz == sizeof(u_char)) tu.ipft_vchar = *ta->ipft_pchar; tu.ipft_sz = ta->ipft_sz; tu.ipft_min = ta->ipft_min; tu.ipft_max = ta->ipft_max; tu.ipft_flags = ta->ipft_flags; bcopy(ta->ipft_name, tu.ipft_name, MIN(sizeof(tu.ipft_name), strlen(ta->ipft_name) + 1)); } error = ipf_outobj(softc, data, &tu, IPFOBJ_TUNEABLE); break; case SIOCIPFGET : case SIOCIPFSET : /* * Search by name or by cookie value for a particular entry * in the tuning parameter table. */ IPFERROR(77); error = ESRCH; if (cookie != NULL) { ta = ipf_tune_findbycookie(&softc->ipf_tuners, cookie, NULL); if (ta != NULL) error = 0; } else if (tu.ipft_name[0] != '\0') { ta = ipf_tune_findbyname(softc->ipf_tuners, tu.ipft_name); if (ta != NULL) error = 0; } if (error != 0) break; if (cmd == (ioctlcmd_t)SIOCIPFGET) { /* * Fetch the tuning parameters for a particular value */ tu.ipft_vlong = 0; if (ta->ipft_sz == sizeof(u_long)) tu.ipft_vlong = *ta->ipft_plong; else if (ta->ipft_sz == sizeof(u_int)) tu.ipft_vint = *ta->ipft_pint; else if (ta->ipft_sz == sizeof(u_short)) tu.ipft_vshort = *ta->ipft_pshort; else if (ta->ipft_sz == sizeof(u_char)) tu.ipft_vchar = *ta->ipft_pchar; tu.ipft_cookie = ta; tu.ipft_sz = ta->ipft_sz; tu.ipft_min = ta->ipft_min; tu.ipft_max = ta->ipft_max; tu.ipft_flags = ta->ipft_flags; error = ipf_outobj(softc, data, &tu, IPFOBJ_TUNEABLE); } else if (cmd == (ioctlcmd_t)SIOCIPFSET) { /* * Set an internal parameter. The hard part here is * getting the new value safely and correctly out of * the kernel (given we only know its size, not type.) */ u_long in; if (((ta->ipft_flags & IPFT_WRDISABLED) != 0) && (softc->ipf_running > 0)) { IPFERROR(78); error = EBUSY; break; } in = tu.ipft_vlong; if (in < ta->ipft_min || in > ta->ipft_max) { IPFERROR(79); error = EINVAL; break; } if (ta->ipft_func != NULL) { SPL_INT(s); SPL_NET(s); error = (*ta->ipft_func)(softc, ta, &tu.ipft_un); SPL_X(s); } else if (ta->ipft_sz == sizeof(u_long)) { tu.ipft_vlong = *ta->ipft_plong; *ta->ipft_plong = in; } else if (ta->ipft_sz == sizeof(u_int)) { tu.ipft_vint = *ta->ipft_pint; *ta->ipft_pint = (u_int)(in & 0xffffffff); } else if (ta->ipft_sz == sizeof(u_short)) { tu.ipft_vshort = *ta->ipft_pshort; *ta->ipft_pshort = (u_short)(in & 0xffff); } else if (ta->ipft_sz == sizeof(u_char)) { tu.ipft_vchar = *ta->ipft_pchar; *ta->ipft_pchar = (u_char)(in & 0xff); } error = ipf_outobj(softc, data, &tu, IPFOBJ_TUNEABLE); } break; default : IPFERROR(80); error = EINVAL; break; } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_zerostats */ /* Returns: int - 0 = success, else failure */ /* Parameters: data(O) - pointer to pointer for copying data back to */ /* */ /* Copies the current statistics out to userspace and then zero's the */ /* current ones in the kernel. The lock is only held across the bzero() as */ /* the copyout may result in paging (ie network activity.) */ /* ------------------------------------------------------------------------ */ int ipf_zerostats(ipf_main_softc_t *softc, caddr_t data) { friostat_t fio; ipfobj_t obj; int error; error = ipf_inobj(softc, data, &obj, &fio, IPFOBJ_IPFSTAT); if (error != 0) return (error); ipf_getstat(softc, &fio, obj.ipfo_rev); error = ipf_outobj(softc, data, &fio, IPFOBJ_IPFSTAT); if (error != 0) return (error); WRITE_ENTER(&softc->ipf_mutex); bzero(&softc->ipf_stats, sizeof(softc->ipf_stats)); RWLOCK_EXIT(&softc->ipf_mutex); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_resolvedest */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* base(I) - where strings are stored */ /* fdp(IO) - pointer to destination information to resolve */ /* v(I) - IP protocol version to match */ /* */ /* Looks up an interface name in the frdest structure pointed to by fdp and */ /* if a matching name can be found for the particular IP protocol version */ /* then store the interface pointer in the frdest struct. If no match is */ /* found, then set the interface pointer to be -1 as NULL is considered to */ /* indicate there is no information at all in the structure. */ /* ------------------------------------------------------------------------ */ int ipf_resolvedest(ipf_main_softc_t *softc, char *base, frdest_t *fdp, int v) { int errval = 0; void *ifp; ifp = NULL; if (fdp->fd_name != -1) { if (fdp->fd_type == FRD_DSTLIST) { ifp = ipf_lookup_res_name(softc, IPL_LOGIPF, IPLT_DSTLIST, base + fdp->fd_name, NULL); if (ifp == NULL) { IPFERROR(144); errval = ESRCH; } } else { ifp = GETIFP(base + fdp->fd_name, v); if (ifp == NULL) ifp = (void *)-1; } } fdp->fd_ptr = ifp; return (errval); } /* ------------------------------------------------------------------------ */ /* Function: ipf_resolvenic */ /* Returns: void* - NULL = wildcard name, -1 = failed to find NIC, else */ /* pointer to interface structure for NIC */ /* Parameters: softc(I)- pointer to soft context main structure */ /* name(I) - complete interface name */ /* v(I) - IP protocol version */ /* */ /* Look for a network interface structure that firstly has a matching name */ /* to that passed in and that is also being used for that IP protocol */ /* version (necessary on some platforms where there are separate listings */ /* for both IPv4 and IPv6 on the same physical NIC. */ /* ------------------------------------------------------------------------ */ void * ipf_resolvenic(ipf_main_softc_t *softc, char *name, int v) { void *nic; softc = softc; /* gcc -Wextra */ if (name[0] == '\0') return (NULL); if ((name[1] == '\0') && ((name[0] == '-') || (name[0] == '*'))) { return (NULL); } nic = GETIFP(name, v); if (nic == NULL) nic = (void *)-1; return (nic); } /* ------------------------------------------------------------------------ */ /* Function: ipf_token_expire */ /* Returns: None. */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* This function is run every ipf tick to see if there are any tokens that */ /* have been held for too long and need to be freed up. */ /* ------------------------------------------------------------------------ */ void ipf_token_expire(ipf_main_softc_t *softc) { ipftoken_t *it; WRITE_ENTER(&softc->ipf_tokens); while ((it = softc->ipf_token_head) != NULL) { if (it->ipt_die > softc->ipf_ticks) break; ipf_token_deref(softc, it); } RWLOCK_EXIT(&softc->ipf_tokens); } /* ------------------------------------------------------------------------ */ /* Function: ipf_token_flush */ /* Returns: None. */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Loop through all of the existing tokens and call deref to see if they */ /* can be freed. Normally a function like this might just loop on */ /* ipf_token_head but there is a chance that a token might have a ref count */ /* of greater than one and in that case the reference would drop twice */ /* by code that is only entitled to drop it once. */ /* ------------------------------------------------------------------------ */ static void ipf_token_flush(ipf_main_softc_t *softc) { ipftoken_t *it, *next; WRITE_ENTER(&softc->ipf_tokens); for (it = softc->ipf_token_head; it != NULL; it = next) { next = it->ipt_next; (void) ipf_token_deref(softc, it); } RWLOCK_EXIT(&softc->ipf_tokens); } /* ------------------------------------------------------------------------ */ /* Function: ipf_token_del */ /* Returns: int - 0 = success, else error */ /* Parameters: softc(I)- pointer to soft context main structure */ /* type(I) - the token type to match */ /* uid(I) - uid owning the token */ /* ptr(I) - context pointer for the token */ /* */ /* This function looks for a token in the current list that matches up */ /* the fields (type, uid, ptr). If none is found, ESRCH is returned, else */ /* call ipf_token_dewref() to remove it from the list. In the event that */ /* the token has a reference held elsewhere, setting ipt_complete to 2 */ /* enables debugging to distinguish between the two paths that ultimately */ /* lead to a token to be deleted. */ /* ------------------------------------------------------------------------ */ int ipf_token_del(ipf_main_softc_t *softc, int type, int uid, void *ptr) { ipftoken_t *it; int error; IPFERROR(82); error = ESRCH; WRITE_ENTER(&softc->ipf_tokens); for (it = softc->ipf_token_head; it != NULL; it = it->ipt_next) { if (ptr == it->ipt_ctx && type == it->ipt_type && uid == it->ipt_uid) { it->ipt_complete = 2; ipf_token_deref(softc, it); error = 0; break; } } RWLOCK_EXIT(&softc->ipf_tokens); return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_token_mark_complete */ /* Returns: None. */ /* Parameters: token(I) - pointer to token structure */ /* */ /* Mark a token as being ineligable for being found with ipf_token_find. */ /* ------------------------------------------------------------------------ */ void ipf_token_mark_complete(ipftoken_t *token) { if (token->ipt_complete == 0) token->ipt_complete = 1; } /* ------------------------------------------------------------------------ */ /* Function: ipf_token_find */ /* Returns: ipftoken_t * - NULL if no memory, else pointer to token */ /* Parameters: softc(I)- pointer to soft context main structure */ /* type(I) - the token type to match */ /* uid(I) - uid owning the token */ /* ptr(I) - context pointer for the token */ /* */ /* This function looks for a live token in the list of current tokens that */ /* matches the tuple (type, uid, ptr). If one cannot be found then one is */ /* allocated. If one is found then it is moved to the top of the list of */ /* currently active tokens. */ /* ------------------------------------------------------------------------ */ ipftoken_t * ipf_token_find(ipf_main_softc_t *softc, int type, int uid, void *ptr) { ipftoken_t *it, *new; WRITE_ENTER(&softc->ipf_tokens); for (it = softc->ipf_token_head; it != NULL; it = it->ipt_next) { if ((ptr == it->ipt_ctx) && (type == it->ipt_type) && (uid == it->ipt_uid) && (it->ipt_complete < 2)) break; } if (it == NULL) { KMALLOC(new, ipftoken_t *); if (new != NULL) bzero((char *)new, sizeof(*new)); it = new; new = NULL; if (it == NULL) { RWLOCK_EXIT(&softc->ipf_tokens); return (NULL); } it->ipt_ctx = ptr; it->ipt_uid = uid; it->ipt_type = type; it->ipt_ref = 1; } else { if (it->ipt_complete > 0) it = NULL; else ipf_token_unlink(softc, it); } if (it != NULL) { it->ipt_pnext = softc->ipf_token_tail; *softc->ipf_token_tail = it; softc->ipf_token_tail = &it->ipt_next; it->ipt_next = NULL; it->ipt_ref++; it->ipt_die = softc->ipf_ticks + 20; } RWLOCK_EXIT(&softc->ipf_tokens); return (it); } /* ------------------------------------------------------------------------ */ /* Function: ipf_token_unlink */ /* Returns: None. */ /* Parameters: softc(I) - pointer to soft context main structure */ /* token(I) - pointer to token structure */ /* Write Locks: ipf_tokens */ /* */ /* This function unlinks a token structure from the linked list of tokens */ /* that "own" it. The head pointer never needs to be explicitly adjusted */ /* but the tail does due to the linked list implementation. */ /* ------------------------------------------------------------------------ */ static void ipf_token_unlink(ipf_main_softc_t *softc, ipftoken_t *token) { if (softc->ipf_token_tail == &token->ipt_next) softc->ipf_token_tail = token->ipt_pnext; *token->ipt_pnext = token->ipt_next; if (token->ipt_next != NULL) token->ipt_next->ipt_pnext = token->ipt_pnext; token->ipt_next = NULL; token->ipt_pnext = NULL; } /* ------------------------------------------------------------------------ */ /* Function: ipf_token_deref */ /* Returns: int - 0 == token freed, else reference count */ /* Parameters: softc(I) - pointer to soft context main structure */ /* token(I) - pointer to token structure */ /* Write Locks: ipf_tokens */ /* */ /* Drop the reference count on the token structure and if it drops to zero, */ /* call the dereference function for the token type because it is then */ /* possible to free the token data structure. */ /* ------------------------------------------------------------------------ */ int ipf_token_deref(ipf_main_softc_t *softc, ipftoken_t *token) { void *data, **datap; ASSERT(token->ipt_ref > 0); token->ipt_ref--; if (token->ipt_ref > 0) return (token->ipt_ref); data = token->ipt_data; datap = &data; if ((data != NULL) && (data != (void *)-1)) { switch (token->ipt_type) { case IPFGENITER_IPF : (void) ipf_derefrule(softc, (frentry_t **)datap); break; case IPFGENITER_IPNAT : WRITE_ENTER(&softc->ipf_nat); ipf_nat_rule_deref(softc, (ipnat_t **)datap); RWLOCK_EXIT(&softc->ipf_nat); break; case IPFGENITER_NAT : ipf_nat_deref(softc, (nat_t **)datap); break; case IPFGENITER_STATE : ipf_state_deref(softc, (ipstate_t **)datap); break; case IPFGENITER_FRAG : ipf_frag_pkt_deref(softc, (ipfr_t **)datap); break; case IPFGENITER_NATFRAG : ipf_frag_nat_deref(softc, (ipfr_t **)datap); break; case IPFGENITER_HOSTMAP : WRITE_ENTER(&softc->ipf_nat); ipf_nat_hostmapdel(softc, (hostmap_t **)datap); RWLOCK_EXIT(&softc->ipf_nat); break; default : ipf_lookup_iterderef(softc, token->ipt_type, data); break; } } ipf_token_unlink(softc, token); KFREE(token); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nextrule */ /* Returns: frentry_t * - NULL == no more rules, else pointer to next */ /* Parameters: softc(I) - pointer to soft context main structure */ /* fr(I) - pointer to filter rule */ /* out(I) - 1 == out rules, 0 == input rules */ /* */ /* Starting with "fr", find the next rule to visit. This includes visiting */ /* the list of rule groups if either fr is NULL (empty list) or it is the */ /* last rule in the list. When walking rule lists, it is either input or */ /* output rules that are returned, never both. */ /* ------------------------------------------------------------------------ */ static frentry_t * ipf_nextrule(ipf_main_softc_t *softc, int active, int unit, frentry_t *fr, int out) { frentry_t *next; frgroup_t *fg; if (fr != NULL && fr->fr_group != -1) { fg = ipf_findgroup(softc, fr->fr_names + fr->fr_group, unit, active, NULL); if (fg != NULL) fg = fg->fg_next; } else { fg = softc->ipf_groups[unit][active]; } while (fg != NULL) { next = fg->fg_start; while (next != NULL) { if (out) { if (next->fr_flags & FR_OUTQUE) return (next); } else if (next->fr_flags & FR_INQUE) { return (next); } next = next->fr_next; } if (next == NULL) fg = fg->fg_next; } return (NULL); } /* ------------------------------------------------------------------------ */ /* Function: ipf_getnextrule */ /* Returns: int - 0 = success, else error */ /* Parameters: softc(I)- pointer to soft context main structure */ /* t(I) - pointer to destination information to resolve */ /* ptr(I) - pointer to ipfobj_t to copyin from user space */ /* */ /* This function's first job is to bring in the ipfruleiter_t structure via */ /* the ipfobj_t structure to determine what should be the next rule to */ /* return. Once the ipfruleiter_t has been brought in, it then tries to */ /* find the 'next rule'. This may include searching rule group lists or */ /* just be as simple as looking at the 'next' field in the rule structure. */ /* When we have found the rule to return, increase its reference count and */ /* if we used an existing rule to get here, decrease its reference count. */ /* ------------------------------------------------------------------------ */ int ipf_getnextrule(ipf_main_softc_t *softc, ipftoken_t *t, void *ptr) { frentry_t *fr, *next, zero; ipfruleiter_t it; int error, out; frgroup_t *fg; ipfobj_t obj; int predict; char *dst; int unit; if (t == NULL || ptr == NULL) { IPFERROR(84); return (EFAULT); } error = ipf_inobj(softc, ptr, &obj, &it, IPFOBJ_IPFITER); if (error != 0) return (error); if ((it.iri_inout < 0) || (it.iri_inout > 3)) { IPFERROR(85); return (EINVAL); } if ((it.iri_active != 0) && (it.iri_active != 1)) { IPFERROR(86); return (EINVAL); } if (it.iri_nrules == 0) { IPFERROR(87); return (ENOSPC); } if (it.iri_rule == NULL) { IPFERROR(88); return (EFAULT); } fg = NULL; fr = t->ipt_data; if ((it.iri_inout & F_OUT) != 0) out = 1; else out = 0; if ((it.iri_inout & F_ACIN) != 0) unit = IPL_LOGCOUNT; else unit = IPL_LOGIPF; READ_ENTER(&softc->ipf_mutex); if (fr == NULL) { if (*it.iri_group == '\0') { if (unit == IPL_LOGCOUNT) { next = softc->ipf_acct[out][it.iri_active]; } else { next = softc->ipf_rules[out][it.iri_active]; } if (next == NULL) next = ipf_nextrule(softc, it.iri_active, unit, NULL, out); } else { fg = ipf_findgroup(softc, it.iri_group, unit, it.iri_active, NULL); if (fg != NULL) next = fg->fg_start; else next = NULL; } } else { next = fr->fr_next; if (next == NULL) next = ipf_nextrule(softc, it.iri_active, unit, fr, out); } if (next != NULL && next->fr_next != NULL) predict = 1; else if (ipf_nextrule(softc, it.iri_active, unit, next, out) != NULL) predict = 1; else predict = 0; if (fr != NULL) (void) ipf_derefrule(softc, &fr); obj.ipfo_type = IPFOBJ_FRENTRY; dst = (char *)it.iri_rule; if (next != NULL) { obj.ipfo_size = next->fr_size; MUTEX_ENTER(&next->fr_lock); next->fr_ref++; MUTEX_EXIT(&next->fr_lock); t->ipt_data = next; } else { obj.ipfo_size = sizeof(frentry_t); bzero(&zero, sizeof(zero)); next = &zero; t->ipt_data = NULL; } it.iri_rule = predict ? next : NULL; if (predict == 0) ipf_token_mark_complete(t); RWLOCK_EXIT(&softc->ipf_mutex); obj.ipfo_ptr = dst; error = ipf_outobjk(softc, &obj, next); if (error == 0 && t->ipt_data != NULL) { dst += obj.ipfo_size; if (next->fr_data != NULL) { ipfobj_t dobj; if (next->fr_type == FR_T_IPFEXPR) dobj.ipfo_type = IPFOBJ_IPFEXPR; else dobj.ipfo_type = IPFOBJ_FRIPF; dobj.ipfo_size = next->fr_dsize; dobj.ipfo_rev = obj.ipfo_rev; dobj.ipfo_ptr = dst; error = ipf_outobjk(softc, &dobj, next->fr_data); } } if ((fr != NULL) && (next == &zero)) (void) ipf_derefrule(softc, &fr); return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_frruleiter */ /* Returns: int - 0 = success, else error */ /* Parameters: softc(I)- pointer to soft context main structure */ /* data(I) - the token type to match */ /* uid(I) - uid owning the token */ /* ptr(I) - context pointer for the token */ /* */ /* This function serves as a stepping stone between ipf_ipf_ioctl and */ /* ipf_getnextrule. It's role is to find the right token in the kernel for */ /* the process doing the ioctl and use that to ask for the next rule. */ /* ------------------------------------------------------------------------ */ static int ipf_frruleiter(ipf_main_softc_t *softc, void *data, int uid, void *ctx) { ipftoken_t *token; ipfruleiter_t it; ipfobj_t obj; int error; token = ipf_token_find(softc, IPFGENITER_IPF, uid, ctx); if (token != NULL) { error = ipf_getnextrule(softc, token, data); WRITE_ENTER(&softc->ipf_tokens); ipf_token_deref(softc, token); RWLOCK_EXIT(&softc->ipf_tokens); } else { error = ipf_inobj(softc, data, &obj, &it, IPFOBJ_IPFITER); if (error != 0) return (error); it.iri_rule = NULL; error = ipf_outobj(softc, data, &it, IPFOBJ_IPFITER); } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_geniter */ /* Returns: int - 0 = success, else error */ /* Parameters: softc(I) - pointer to soft context main structure */ /* token(I) - pointer to ipftoken_t structure */ /* itp(I) - pointer to iterator data */ /* */ /* Decide which iterator function to call using information passed through */ /* the ipfgeniter_t structure at itp. */ /* ------------------------------------------------------------------------ */ static int ipf_geniter(ipf_main_softc_t *softc, ipftoken_t *token, ipfgeniter_t *itp) { int error; switch (itp->igi_type) { case IPFGENITER_FRAG : error = ipf_frag_pkt_next(softc, token, itp); break; default : IPFERROR(92); error = EINVAL; break; } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_genericiter */ /* Returns: int - 0 = success, else error */ /* Parameters: softc(I)- pointer to soft context main structure */ /* data(I) - the token type to match */ /* uid(I) - uid owning the token */ /* ptr(I) - context pointer for the token */ /* */ /* Handle the SIOCGENITER ioctl for the ipfilter device. The primary role */ /* ------------------------------------------------------------------------ */ int ipf_genericiter(ipf_main_softc_t *softc, void *data, int uid, void *ctx) { ipftoken_t *token; ipfgeniter_t iter; int error; error = ipf_inobj(softc, data, NULL, &iter, IPFOBJ_GENITER); if (error != 0) return (error); token = ipf_token_find(softc, iter.igi_type, uid, ctx); if (token != NULL) { token->ipt_subtype = iter.igi_type; error = ipf_geniter(softc, token, &iter); WRITE_ENTER(&softc->ipf_tokens); ipf_token_deref(softc, token); RWLOCK_EXIT(&softc->ipf_tokens); } else { IPFERROR(93); error = 0; } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_ipf_ioctl */ /* Returns: int - 0 = success, else error */ /* Parameters: softc(I)- pointer to soft context main structure */ /* data(I) - the token type to match */ /* cmd(I) - the ioctl command number */ /* mode(I) - mode flags for the ioctl */ /* uid(I) - uid owning the token */ /* ptr(I) - context pointer for the token */ /* */ /* This function handles all of the ioctl command that are actually issued */ /* to the /dev/ipl device. */ /* ------------------------------------------------------------------------ */ int ipf_ipf_ioctl(ipf_main_softc_t *softc, caddr_t data, ioctlcmd_t cmd, int mode, int uid, void *ctx) { friostat_t fio; int error, tmp; ipfobj_t obj; SPL_INT(s); switch (cmd) { case SIOCFRENB : if (!(mode & FWRITE)) { IPFERROR(94); error = EPERM; } else { error = BCOPYIN(data, &tmp, sizeof(tmp)); if (error != 0) { IPFERROR(95); error = EFAULT; break; } WRITE_ENTER(&softc->ipf_global); if (tmp) { if (softc->ipf_running > 0) error = 0; else error = ipfattach(softc); if (error == 0) softc->ipf_running = 1; else (void) ipfdetach(softc); } else { if (softc->ipf_running == 1) error = ipfdetach(softc); else error = 0; if (error == 0) softc->ipf_running = -1; } RWLOCK_EXIT(&softc->ipf_global); } break; case SIOCIPFSET : if (!(mode & FWRITE)) { IPFERROR(96); error = EPERM; break; } /* FALLTHRU */ case SIOCIPFGETNEXT : case SIOCIPFGET : error = ipf_ipftune(softc, cmd, (void *)data); break; case SIOCSETFF : if (!(mode & FWRITE)) { IPFERROR(97); error = EPERM; } else { error = BCOPYIN(data, &softc->ipf_flags, sizeof(softc->ipf_flags)); if (error != 0) { IPFERROR(98); error = EFAULT; } } break; case SIOCGETFF : error = BCOPYOUT(&softc->ipf_flags, data, sizeof(softc->ipf_flags)); if (error != 0) { IPFERROR(99); error = EFAULT; } break; case SIOCFUNCL : error = ipf_resolvefunc(softc, (void *)data); break; case SIOCINAFR : case SIOCRMAFR : case SIOCADAFR : case SIOCZRLST : if (!(mode & FWRITE)) { IPFERROR(100); error = EPERM; } else { error = frrequest(softc, IPL_LOGIPF, cmd, (caddr_t)data, softc->ipf_active, 1); } break; case SIOCINIFR : case SIOCRMIFR : case SIOCADIFR : if (!(mode & FWRITE)) { IPFERROR(101); error = EPERM; } else { error = frrequest(softc, IPL_LOGIPF, cmd, (caddr_t)data, 1 - softc->ipf_active, 1); } break; case SIOCSWAPA : if (!(mode & FWRITE)) { IPFERROR(102); error = EPERM; } else { WRITE_ENTER(&softc->ipf_mutex); error = BCOPYOUT(&softc->ipf_active, data, sizeof(softc->ipf_active)); if (error != 0) { IPFERROR(103); error = EFAULT; } else { softc->ipf_active = 1 - softc->ipf_active; } RWLOCK_EXIT(&softc->ipf_mutex); } break; case SIOCGETFS : error = ipf_inobj(softc, (void *)data, &obj, &fio, IPFOBJ_IPFSTAT); if (error != 0) break; ipf_getstat(softc, &fio, obj.ipfo_rev); error = ipf_outobj(softc, (void *)data, &fio, IPFOBJ_IPFSTAT); break; case SIOCFRZST : if (!(mode & FWRITE)) { IPFERROR(104); error = EPERM; } else error = ipf_zerostats(softc, (caddr_t)data); break; case SIOCIPFFL : if (!(mode & FWRITE)) { IPFERROR(105); error = EPERM; } else { error = BCOPYIN(data, &tmp, sizeof(tmp)); if (!error) { tmp = ipf_flush(softc, IPL_LOGIPF, tmp); error = BCOPYOUT(&tmp, data, sizeof(tmp)); if (error != 0) { IPFERROR(106); error = EFAULT; } } else { IPFERROR(107); error = EFAULT; } } break; #ifdef USE_INET6 case SIOCIPFL6 : if (!(mode & FWRITE)) { IPFERROR(108); error = EPERM; } else { error = BCOPYIN(data, &tmp, sizeof(tmp)); if (!error) { tmp = ipf_flush(softc, IPL_LOGIPF, tmp); error = BCOPYOUT(&tmp, data, sizeof(tmp)); if (error != 0) { IPFERROR(109); error = EFAULT; } } else { IPFERROR(110); error = EFAULT; } } break; #endif case SIOCSTLCK : if (!(mode & FWRITE)) { IPFERROR(122); error = EPERM; } else { error = BCOPYIN(data, &tmp, sizeof(tmp)); if (error == 0) { ipf_state_setlock(softc->ipf_state_soft, tmp); ipf_nat_setlock(softc->ipf_nat_soft, tmp); ipf_frag_setlock(softc->ipf_frag_soft, tmp); ipf_auth_setlock(softc->ipf_auth_soft, tmp); } else { IPFERROR(111); error = EFAULT; } } break; #ifdef IPFILTER_LOG case SIOCIPFFB : if (!(mode & FWRITE)) { IPFERROR(112); error = EPERM; } else { tmp = ipf_log_clear(softc, IPL_LOGIPF); error = BCOPYOUT(&tmp, data, sizeof(tmp)); if (error) { IPFERROR(113); error = EFAULT; } } break; #endif /* IPFILTER_LOG */ case SIOCFRSYN : if (!(mode & FWRITE)) { IPFERROR(114); error = EPERM; } else { WRITE_ENTER(&softc->ipf_global); #if (SOLARIS && defined(_KERNEL)) && !defined(INSTANCES) error = ipfsync(); #else ipf_sync(softc, NULL); error = 0; #endif RWLOCK_EXIT(&softc->ipf_global); } break; case SIOCGFRST : error = ipf_outobj(softc, (void *)data, ipf_frag_stats(softc->ipf_frag_soft), IPFOBJ_FRAGSTAT); break; #ifdef IPFILTER_LOG case FIONREAD : tmp = ipf_log_bytesused(softc, IPL_LOGIPF); error = BCOPYOUT(&tmp, data, sizeof(tmp)); break; #endif case SIOCIPFITER : SPL_SCHED(s); error = ipf_frruleiter(softc, data, uid, ctx); SPL_X(s); break; case SIOCGENITER : SPL_SCHED(s); error = ipf_genericiter(softc, data, uid, ctx); SPL_X(s); break; case SIOCIPFDELTOK : error = BCOPYIN(data, &tmp, sizeof(tmp)); if (error == 0) { SPL_SCHED(s); error = ipf_token_del(softc, tmp, uid, ctx); SPL_X(s); } break; default : IPFERROR(115); error = EINVAL; break; } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_decaps */ /* Returns: int - -1 == decapsulation failed, else bit mask of */ /* flags indicating packet filtering decision. */ /* Parameters: fin(I) - pointer to packet information */ /* pass(I) - IP protocol version to match */ /* l5proto(I) - layer 5 protocol to decode UDP data as. */ /* */ /* This function is called for packets that are wrapt up in other packets, */ /* for example, an IP packet that is the entire data segment for another IP */ /* packet. If the basic constraints for this are satisfied, change the */ /* buffer to point to the start of the inner packet and start processing */ /* rules belonging to the head group this rule specifies. */ /* ------------------------------------------------------------------------ */ u_32_t ipf_decaps(fr_info_t *fin, u_32_t pass, int l5proto) { fr_info_t fin2, *fino = NULL; int elen, hlen, nh; grehdr_t gre; ip_t *ip; mb_t *m; if ((fin->fin_flx & FI_COALESCE) == 0) if (ipf_coalesce(fin) == -1) goto cantdecaps; m = fin->fin_m; hlen = fin->fin_hlen; switch (fin->fin_p) { case IPPROTO_UDP : /* * In this case, the specific protocol being decapsulated * inside UDP frames comes from the rule. */ nh = fin->fin_fr->fr_icode; break; case IPPROTO_GRE : /* 47 */ bcopy(fin->fin_dp, (char *)&gre, sizeof(gre)); hlen += sizeof(grehdr_t); if (gre.gr_R|gre.gr_s) goto cantdecaps; if (gre.gr_C) hlen += 4; if (gre.gr_K) hlen += 4; if (gre.gr_S) hlen += 4; nh = IPPROTO_IP; /* * If the routing options flag is set, validate that it is * there and bounce over it. */ #if 0 /* This is really heavy weight and lots of room for error, */ /* so for now, put it off and get the simple stuff right. */ if (gre.gr_R) { u_char off, len, *s; u_short af; int end; end = 0; s = fin->fin_dp; s += hlen; aplen = fin->fin_plen - hlen; while (aplen > 3) { af = (s[0] << 8) | s[1]; off = s[2]; len = s[3]; aplen -= 4; s += 4; if (af == 0 && len == 0) { end = 1; break; } if (aplen < len) break; s += len; aplen -= len; } if (end != 1) goto cantdecaps; hlen = s - (u_char *)fin->fin_dp; } #endif break; #ifdef IPPROTO_IPIP case IPPROTO_IPIP : /* 4 */ #endif nh = IPPROTO_IP; break; default : /* Includes ESP, AH is special for IPv4 */ goto cantdecaps; } switch (nh) { case IPPROTO_IP : case IPPROTO_IPV6 : break; default : goto cantdecaps; } bcopy((char *)fin, (char *)&fin2, sizeof(fin2)); fino = fin; fin = &fin2; elen = hlen; #if SOLARIS && defined(_KERNEL) m->b_rptr += elen; #else m->m_data += elen; m->m_len -= elen; #endif fin->fin_plen -= elen; ip = (ip_t *)((char *)fin->fin_ip + elen); /* * Make sure we have at least enough data for the network layer * header. */ if (IP_V(ip) == 4) hlen = IP_HL(ip) << 2; #ifdef USE_INET6 else if (IP_V(ip) == 6) hlen = sizeof(ip6_t); #endif else goto cantdecaps2; if (fin->fin_plen < hlen) goto cantdecaps2; fin->fin_dp = (char *)ip + hlen; if (IP_V(ip) == 4) { /* * Perform IPv4 header checksum validation. */ if (ipf_cksum((u_short *)ip, hlen)) goto cantdecaps2; } if (ipf_makefrip(hlen, ip, fin) == -1) { cantdecaps2: if (m != NULL) { #if SOLARIS && defined(_KERNEL) m->b_rptr -= elen; #else m->m_data -= elen; m->m_len += elen; #endif } cantdecaps: DT1(frb_decapfrip, fr_info_t *, fin); pass &= ~FR_CMDMASK; pass |= FR_BLOCK|FR_QUICK; fin->fin_reason = FRB_DECAPFRIP; return (-1); } pass = ipf_scanlist(fin, pass); /* * Copy the packet filter "result" fields out of the fr_info_t struct * that is local to the decapsulation processing and back into the * one we were called with. */ fino->fin_flx = fin->fin_flx; fino->fin_rev = fin->fin_rev; fino->fin_icode = fin->fin_icode; fino->fin_rule = fin->fin_rule; (void) strncpy(fino->fin_group, fin->fin_group, FR_GROUPLEN); fino->fin_fr = fin->fin_fr; fino->fin_error = fin->fin_error; fino->fin_mp = fin->fin_mp; fino->fin_m = fin->fin_m; m = fin->fin_m; if (m != NULL) { #if SOLARIS && defined(_KERNEL) m->b_rptr -= elen; #else m->m_data -= elen; m->m_len += elen; #endif } return (pass); } /* ------------------------------------------------------------------------ */ /* Function: ipf_matcharray_load */ /* Returns: int - 0 = success, else error */ /* Parameters: softc(I) - pointer to soft context main structure */ /* data(I) - pointer to ioctl data */ /* objp(I) - ipfobj_t structure to load data into */ /* arrayptr(I) - pointer to location to store array pointer */ /* */ /* This function loads in a mathing array through the ipfobj_t struct that */ /* describes it. Sanity checking and array size limitations are enforced */ /* in this function to prevent userspace from trying to load in something */ /* that is insanely big. Once the size of the array is known, the memory */ /* required is malloc'd and returned through changing *arrayptr. The */ /* contents of the array are verified before returning. Only in the event */ /* of a successful call is the caller required to free up the malloc area. */ /* ------------------------------------------------------------------------ */ int ipf_matcharray_load(ipf_main_softc_t *softc, caddr_t data, ipfobj_t *objp, int **arrayptr) { int arraysize, *array, error; *arrayptr = NULL; error = BCOPYIN(data, objp, sizeof(*objp)); if (error != 0) { IPFERROR(116); return (EFAULT); } if (objp->ipfo_type != IPFOBJ_IPFEXPR) { IPFERROR(117); return (EINVAL); } if (((objp->ipfo_size & 3) != 0) || (objp->ipfo_size == 0) || (objp->ipfo_size > 1024)) { IPFERROR(118); return (EINVAL); } arraysize = objp->ipfo_size * sizeof(*array); KMALLOCS(array, int *, arraysize); if (array == NULL) { IPFERROR(119); return (ENOMEM); } error = COPYIN(objp->ipfo_ptr, array, arraysize); if (error != 0) { KFREES(array, arraysize); IPFERROR(120); return (EFAULT); } if (ipf_matcharray_verify(array, arraysize) != 0) { KFREES(array, arraysize); IPFERROR(121); return (EINVAL); } *arrayptr = array; return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_matcharray_verify */ /* Returns: Nil */ /* Parameters: array(I) - pointer to matching array */ /* arraysize(I) - number of elements in the array */ /* */ /* Verify the contents of a matching array by stepping through each element */ /* in it. The actual commands in the array are not verified for */ /* correctness, only that all of the sizes are correctly within limits. */ /* ------------------------------------------------------------------------ */ int ipf_matcharray_verify(int *array, int arraysize) { int i, nelem, maxidx; ipfexp_t *e; nelem = arraysize / sizeof(*array); /* * Currently, it makes no sense to have an array less than 6 * elements long - the initial size at the from, a single operation * (minimum 4 in length) and a trailer, for a total of 6. */ if ((array[0] < 6) || (arraysize < 24) || (arraysize > 4096)) { return (-1); } /* * Verify the size of data pointed to by array with how long * the array claims to be itself. */ if (array[0] * sizeof(*array) != arraysize) { return (-1); } maxidx = nelem - 1; /* * The last opcode in this array should be an IPF_EXP_END. */ if (array[maxidx] != IPF_EXP_END) { return (-1); } for (i = 1; i < maxidx; ) { e = (ipfexp_t *)(array + i); /* * The length of the bits to check must be at least 1 * (or else there is nothing to comapre with!) and it * cannot exceed the length of the data present. */ if ((e->ipfe_size < 1 ) || (e->ipfe_size + i > maxidx)) { return (-1); } i += e->ipfe_size; } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_fr_matcharray */ /* Returns: int - 0 = match failed, else positive match */ /* Parameters: fin(I) - pointer to packet information */ /* array(I) - pointer to matching array */ /* */ /* This function is used to apply a matching array against a packet and */ /* return an indication of whether or not the packet successfully matches */ /* all of the commands in it. */ /* ------------------------------------------------------------------------ */ static int ipf_fr_matcharray(fr_info_t *fin, int *array) { int i, n, *x, rv, p; ipfexp_t *e; rv = 0; n = array[0]; x = array + 1; for (; n > 0; x += 3 + x[3], rv = 0) { e = (ipfexp_t *)x; if (e->ipfe_cmd == IPF_EXP_END) break; n -= e->ipfe_size; /* * The upper 16 bits currently store the protocol value. * This is currently used with TCP and UDP port compares and * allows "tcp.port = 80" without requiring an explicit " "ip.pr = tcp" first. */ p = e->ipfe_cmd >> 16; if ((p != 0) && (p != fin->fin_p)) break; switch (e->ipfe_cmd) { case IPF_EXP_IP_PR : for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= (fin->fin_p == e->ipfe_arg0[i]); } break; case IPF_EXP_IP_SRCADDR : if (fin->fin_v != 4) break; for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= ((fin->fin_saddr & e->ipfe_arg0[i * 2 + 1]) == e->ipfe_arg0[i * 2]); } break; case IPF_EXP_IP_DSTADDR : if (fin->fin_v != 4) break; for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= ((fin->fin_daddr & e->ipfe_arg0[i * 2 + 1]) == e->ipfe_arg0[i * 2]); } break; case IPF_EXP_IP_ADDR : if (fin->fin_v != 4) break; for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= ((fin->fin_saddr & e->ipfe_arg0[i * 2 + 1]) == e->ipfe_arg0[i * 2]) || ((fin->fin_daddr & e->ipfe_arg0[i * 2 + 1]) == e->ipfe_arg0[i * 2]); } break; #ifdef USE_INET6 case IPF_EXP_IP6_SRCADDR : if (fin->fin_v != 6) break; for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= IP6_MASKEQ(&fin->fin_src6, &e->ipfe_arg0[i * 8 + 4], &e->ipfe_arg0[i * 8]); } break; case IPF_EXP_IP6_DSTADDR : if (fin->fin_v != 6) break; for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= IP6_MASKEQ(&fin->fin_dst6, &e->ipfe_arg0[i * 8 + 4], &e->ipfe_arg0[i * 8]); } break; case IPF_EXP_IP6_ADDR : if (fin->fin_v != 6) break; for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= IP6_MASKEQ(&fin->fin_src6, &e->ipfe_arg0[i * 8 + 4], &e->ipfe_arg0[i * 8]) || IP6_MASKEQ(&fin->fin_dst6, &e->ipfe_arg0[i * 8 + 4], &e->ipfe_arg0[i * 8]); } break; #endif case IPF_EXP_UDP_PORT : case IPF_EXP_TCP_PORT : for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= (fin->fin_sport == e->ipfe_arg0[i]) || (fin->fin_dport == e->ipfe_arg0[i]); } break; case IPF_EXP_UDP_SPORT : case IPF_EXP_TCP_SPORT : for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= (fin->fin_sport == e->ipfe_arg0[i]); } break; case IPF_EXP_UDP_DPORT : case IPF_EXP_TCP_DPORT : for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= (fin->fin_dport == e->ipfe_arg0[i]); } break; case IPF_EXP_TCP_FLAGS : for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= ((fin->fin_tcpf & e->ipfe_arg0[i * 2 + 1]) == e->ipfe_arg0[i * 2]); } break; } rv ^= e->ipfe_not; if (rv == 0) break; } return (rv); } /* ------------------------------------------------------------------------ */ /* Function: ipf_queueflush */ /* Returns: int - number of entries flushed (0 = none) */ /* Parameters: softc(I) - pointer to soft context main structure */ /* deletefn(I) - function to call to delete entry */ /* ipfqs(I) - top of the list of ipf internal queues */ /* userqs(I) - top of the list of user defined timeouts */ /* */ /* This fucntion gets called when the state/NAT hash tables fill up and we */ /* need to try a bit harder to free up some space. The algorithm used here */ /* split into two parts but both halves have the same goal: to reduce the */ /* number of connections considered to be "active" to the low watermark. */ /* There are two steps in doing this: */ /* 1) Remove any TCP connections that are already considered to be "closed" */ /* but have not yet been removed from the state table. The two states */ /* TCPS_TIME_WAIT and TCPS_CLOSED are considered to be the perfect */ /* candidates for this style of removal. If freeing up entries in */ /* CLOSED or both CLOSED and TIME_WAIT brings us to the low watermark, */ /* we do not go on to step 2. */ /* */ /* 2) Look for the oldest entries on each timeout queue and free them if */ /* they are within the given window we are considering. Where the */ /* window starts and the steps taken to increase its size depend upon */ /* how long ipf has been running (ipf_ticks.) Anything modified in the */ /* last 30 seconds is not touched. */ /* touched */ /* die ipf_ticks 30*1.5 1800*1.5 | 43200*1.5 */ /* | | | | | | */ /* future <--+----------+--------+-----------+-----+-----+-----------> past */ /* now \_int=30s_/ \_int=1hr_/ \_int=12hr */ /* */ /* Points to note: */ /* - tqe_die is the time, in the future, when entries die. */ /* - tqe_die - ipf_ticks is how long left the connection has to live in ipf */ /* ticks. */ /* - tqe_touched is when the entry was last used by NAT/state */ /* - the closer tqe_touched is to ipf_ticks, the further tqe_die will be */ /* ipf_ticks any given timeout queue and vice versa. */ /* - both tqe_die and tqe_touched increase over time */ /* - timeout queues are sorted with the highest value of tqe_die at the */ /* bottom and therefore the smallest values of each are at the top */ /* - the pointer passed in as ipfqs should point to an array of timeout */ /* queues representing each of the TCP states */ /* */ /* We start by setting up a maximum range to scan for things to move of */ /* iend (newest) to istart (oldest) in chunks of "interval". If nothing is */ /* found in that range, "interval" is adjusted (so long as it isn't 30) and */ /* we start again with a new value for "iend" and "istart". This is */ /* continued until we either finish the scan of 30 second intervals or the */ /* low water mark is reached. */ /* ------------------------------------------------------------------------ */ int ipf_queueflush(ipf_main_softc_t *softc, ipftq_delete_fn_t deletefn, ipftq_t *ipfqs, ipftq_t *userqs, u_int *activep, int size, int low) { u_long interval, istart, iend; ipftq_t *ifq, *ifqnext; ipftqent_t *tqe, *tqn; int removed = 0; for (tqn = ipfqs[IPF_TCPS_CLOSED].ifq_head; ((tqe = tqn) != NULL); ) { tqn = tqe->tqe_next; if ((*deletefn)(softc, tqe->tqe_parent) == 0) removed++; } if ((*activep * 100 / size) > low) { for (tqn = ipfqs[IPF_TCPS_TIME_WAIT].ifq_head; ((tqe = tqn) != NULL); ) { tqn = tqe->tqe_next; if ((*deletefn)(softc, tqe->tqe_parent) == 0) removed++; } } if ((*activep * 100 / size) <= low) { return (removed); } /* * NOTE: Use of "* 15 / 10" is required here because if "* 1.5" is * used then the operations are upgraded to floating point * and kernels don't like floating point... */ if (softc->ipf_ticks > IPF_TTLVAL(43200 * 15 / 10)) { istart = IPF_TTLVAL(86400 * 4); interval = IPF_TTLVAL(43200); } else if (softc->ipf_ticks > IPF_TTLVAL(1800 * 15 / 10)) { istart = IPF_TTLVAL(43200); interval = IPF_TTLVAL(1800); } else if (softc->ipf_ticks > IPF_TTLVAL(30 * 15 / 10)) { istart = IPF_TTLVAL(1800); interval = IPF_TTLVAL(30); } else { return (0); } if (istart > softc->ipf_ticks) { if (softc->ipf_ticks - interval < interval) istart = interval; else istart = (softc->ipf_ticks / interval) * interval; } iend = softc->ipf_ticks - interval; while ((*activep * 100 / size) > low) { u_long try; try = softc->ipf_ticks - istart; for (ifq = ipfqs; ifq != NULL; ifq = ifq->ifq_next) { for (tqn = ifq->ifq_head; ((tqe = tqn) != NULL); ) { if (try < tqe->tqe_touched) break; tqn = tqe->tqe_next; if ((*deletefn)(softc, tqe->tqe_parent) == 0) removed++; } } for (ifq = userqs; ifq != NULL; ifq = ifqnext) { ifqnext = ifq->ifq_next; for (tqn = ifq->ifq_head; ((tqe = tqn) != NULL); ) { if (try < tqe->tqe_touched) break; tqn = tqe->tqe_next; if ((*deletefn)(softc, tqe->tqe_parent) == 0) removed++; } } if (try >= iend) { if (interval == IPF_TTLVAL(43200)) { interval = IPF_TTLVAL(1800); } else if (interval == IPF_TTLVAL(1800)) { interval = IPF_TTLVAL(30); } else { break; } if (interval >= softc->ipf_ticks) break; iend = softc->ipf_ticks - interval; } istart -= interval; } return (removed); } /* ------------------------------------------------------------------------ */ /* Function: ipf_deliverlocal */ /* Returns: int - 1 = local address, 0 = non-local address */ /* Parameters: softc(I) - pointer to soft context main structure */ /* ipversion(I) - IP protocol version (4 or 6) */ /* ifp(I) - network interface pointer */ /* ipaddr(I) - IPv4/6 destination address */ /* */ /* This fucntion is used to determine in the address "ipaddr" belongs to */ /* the network interface represented by ifp. */ /* ------------------------------------------------------------------------ */ int ipf_deliverlocal(ipf_main_softc_t *softc, int ipversion, void *ifp, i6addr_t *ipaddr) { i6addr_t addr; int islocal = 0; if (ipversion == 4) { if (ipf_ifpaddr(softc, 4, FRI_NORMAL, ifp, &addr, NULL) == 0) { if (addr.in4.s_addr == ipaddr->in4.s_addr) islocal = 1; } #ifdef USE_INET6 } else if (ipversion == 6) { if (ipf_ifpaddr(softc, 6, FRI_NORMAL, ifp, &addr, NULL) == 0) { if (IP6_EQ(&addr, ipaddr)) islocal = 1; } #endif } return (islocal); } /* ------------------------------------------------------------------------ */ /* Function: ipf_settimeout */ /* Returns: int - 0 = success, -1 = failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* t(I) - pointer to tuneable array entry */ /* p(I) - pointer to values passed in to apply */ /* */ /* This function is called to set the timeout values for each distinct */ /* queue timeout that is available. When called, it calls into both the */ /* state and NAT code, telling them to update their timeout queues. */ /* ------------------------------------------------------------------------ */ static int ipf_settimeout(struct ipf_main_softc_s *softc, ipftuneable_t *t, ipftuneval_t *p) { /* * ipf_interror should be set by the functions called here, not * by this function - it's just a middle man. */ if (ipf_state_settimeout(softc, t, p) == -1) return (-1); if (ipf_nat_settimeout(softc, t, p) == -1) return (-1); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_apply_timeout */ /* Returns: int - 0 = success, -1 = failure */ /* Parameters: head(I) - pointer to tuneable array entry */ /* seconds(I) - pointer to values passed in to apply */ /* */ /* This function applies a timeout of "seconds" to the timeout queue that */ /* is pointed to by "head". All entries on this list have an expiration */ /* set to be the current tick value of ipf plus the ttl. Given that this */ /* function should only be called when the delta is non-zero, the task is */ /* to walk the entire list and apply the change. The sort order will not */ /* change. The only catch is that this is O(n) across the list, so if the */ /* queue has lots of entries (10s of thousands or 100s of thousands), it */ /* could take a relatively long time to work through them all. */ /* ------------------------------------------------------------------------ */ void ipf_apply_timeout(ipftq_t *head, u_int seconds) { u_int oldtimeout, newtimeout; ipftqent_t *tqe; int delta; MUTEX_ENTER(&head->ifq_lock); oldtimeout = head->ifq_ttl; newtimeout = IPF_TTLVAL(seconds); delta = oldtimeout - newtimeout; head->ifq_ttl = newtimeout; for (tqe = head->ifq_head; tqe != NULL; tqe = tqe->tqe_next) { tqe->tqe_die += delta; } MUTEX_EXIT(&head->ifq_lock); } /* ------------------------------------------------------------------------ */ /* Function: ipf_settimeout_tcp */ /* Returns: int - 0 = successfully applied, -1 = failed */ /* Parameters: t(I) - pointer to tuneable to change */ /* p(I) - pointer to new timeout information */ /* tab(I) - pointer to table of TCP queues */ /* */ /* This function applies the new timeout (p) to the TCP tunable (t) and */ /* updates all of the entries on the relevant timeout queue by calling */ /* ipf_apply_timeout(). */ /* ------------------------------------------------------------------------ */ int ipf_settimeout_tcp(ipftuneable_t *t, ipftuneval_t *p, ipftq_t *tab) { if (!strcmp(t->ipft_name, "tcp_idle_timeout") || !strcmp(t->ipft_name, "tcp_established")) { ipf_apply_timeout(&tab[IPF_TCPS_ESTABLISHED], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_close_wait")) { ipf_apply_timeout(&tab[IPF_TCPS_CLOSE_WAIT], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_last_ack")) { ipf_apply_timeout(&tab[IPF_TCPS_LAST_ACK], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_timeout")) { ipf_apply_timeout(&tab[IPF_TCPS_LISTEN], p->ipftu_int); ipf_apply_timeout(&tab[IPF_TCPS_HALF_ESTAB], p->ipftu_int); ipf_apply_timeout(&tab[IPF_TCPS_CLOSING], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_listen")) { ipf_apply_timeout(&tab[IPF_TCPS_LISTEN], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_half_established")) { ipf_apply_timeout(&tab[IPF_TCPS_HALF_ESTAB], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_closing")) { ipf_apply_timeout(&tab[IPF_TCPS_CLOSING], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_syn_received")) { ipf_apply_timeout(&tab[IPF_TCPS_SYN_RECEIVED], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_syn_sent")) { ipf_apply_timeout(&tab[IPF_TCPS_SYN_SENT], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_closed")) { ipf_apply_timeout(&tab[IPF_TCPS_CLOSED], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_half_closed")) { ipf_apply_timeout(&tab[IPF_TCPS_CLOSED], p->ipftu_int); } else if (!strcmp(t->ipft_name, "tcp_time_wait")) { ipf_apply_timeout(&tab[IPF_TCPS_TIME_WAIT], p->ipftu_int); } else { /* * ipf_interror isn't set here because it should be set * by whatever called this function. */ return (-1); } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_main_soft_create */ /* Returns: NULL = failure, else success */ /* Parameters: arg(I) - pointer to soft context structure if already allocd */ /* */ /* Create the foundation soft context structure. In circumstances where it */ /* is not required to dynamically allocate the context, a pointer can be */ /* passed in (rather than NULL) to a structure to be initialised. */ /* The main thing of interest is that a number of locks are initialised */ /* here instead of in the where might be expected - in the relevant create */ /* function elsewhere. This is done because the current locking design has */ /* some areas where these locks are used outside of their module. */ /* Possibly the most important exercise that is done here is setting of all */ /* the timeout values, allowing them to be changed before init(). */ /* ------------------------------------------------------------------------ */ void * ipf_main_soft_create(void *arg) { ipf_main_softc_t *softc; if (arg == NULL) { KMALLOC(softc, ipf_main_softc_t *); if (softc == NULL) return (NULL); } else { softc = arg; } bzero((char *)softc, sizeof(*softc)); /* * This serves as a flag as to whether or not the softc should be * free'd when _destroy is called. */ softc->ipf_dynamic_softc = (arg == NULL) ? 1 : 0; softc->ipf_tuners = ipf_tune_array_copy(softc, sizeof(ipf_main_tuneables), ipf_main_tuneables); if (softc->ipf_tuners == NULL) { ipf_main_soft_destroy(softc); return (NULL); } MUTEX_INIT(&softc->ipf_rw, "ipf rw mutex"); MUTEX_INIT(&softc->ipf_timeoutlock, "ipf timeout lock"); RWLOCK_INIT(&softc->ipf_global, "ipf filter load/unload mutex"); RWLOCK_INIT(&softc->ipf_mutex, "ipf filter rwlock"); RWLOCK_INIT(&softc->ipf_tokens, "ipf token rwlock"); RWLOCK_INIT(&softc->ipf_state, "ipf state rwlock"); RWLOCK_INIT(&softc->ipf_nat, "ipf IP NAT rwlock"); RWLOCK_INIT(&softc->ipf_poolrw, "ipf pool rwlock"); RWLOCK_INIT(&softc->ipf_frag, "ipf frag rwlock"); softc->ipf_token_head = NULL; softc->ipf_token_tail = &softc->ipf_token_head; softc->ipf_tcpidletimeout = FIVE_DAYS; softc->ipf_tcpclosewait = IPF_TTLVAL(2 * TCP_MSL); softc->ipf_tcplastack = IPF_TTLVAL(30); softc->ipf_tcptimewait = IPF_TTLVAL(2 * TCP_MSL); softc->ipf_tcptimeout = IPF_TTLVAL(2 * TCP_MSL); softc->ipf_tcpsynsent = IPF_TTLVAL(2 * TCP_MSL); softc->ipf_tcpsynrecv = IPF_TTLVAL(2 * TCP_MSL); softc->ipf_tcpclosed = IPF_TTLVAL(30); softc->ipf_tcphalfclosed = IPF_TTLVAL(2 * 3600); softc->ipf_udptimeout = IPF_TTLVAL(120); softc->ipf_udpacktimeout = IPF_TTLVAL(12); softc->ipf_icmptimeout = IPF_TTLVAL(60); softc->ipf_icmpacktimeout = IPF_TTLVAL(6); softc->ipf_iptimeout = IPF_TTLVAL(60); #if defined(IPFILTER_DEFAULT_BLOCK) softc->ipf_pass = FR_BLOCK|FR_NOMATCH; #else softc->ipf_pass = (IPF_DEFAULT_PASS)|FR_NOMATCH; #endif softc->ipf_minttl = 4; softc->ipf_icmpminfragmtu = 68; softc->ipf_flags = IPF_LOGGING; #ifdef LARGE_NAT softc->ipf_large_nat = 1; #endif ipf_fbsd_kenv_get(softc); return (softc); } /* ------------------------------------------------------------------------ */ /* Function: ipf_main_soft_init */ /* Returns: 0 = success, -1 = failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* A null-op function that exists as a placeholder so that the flow in */ /* other functions is obvious. */ /* ------------------------------------------------------------------------ */ /*ARGSUSED*/ int ipf_main_soft_init(ipf_main_softc_t *softc) { return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_main_soft_destroy */ /* Returns: void */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Undo everything that we did in ipf_main_soft_create. */ /* */ /* The most important check that needs to be made here is whether or not */ /* the structure was allocated by ipf_main_soft_create() by checking what */ /* value is stored in ipf_dynamic_main. */ /* ------------------------------------------------------------------------ */ /*ARGSUSED*/ void ipf_main_soft_destroy(ipf_main_softc_t *softc) { RW_DESTROY(&softc->ipf_frag); RW_DESTROY(&softc->ipf_poolrw); RW_DESTROY(&softc->ipf_nat); RW_DESTROY(&softc->ipf_state); RW_DESTROY(&softc->ipf_tokens); RW_DESTROY(&softc->ipf_mutex); RW_DESTROY(&softc->ipf_global); MUTEX_DESTROY(&softc->ipf_timeoutlock); MUTEX_DESTROY(&softc->ipf_rw); if (softc->ipf_tuners != NULL) { KFREES(softc->ipf_tuners, sizeof(ipf_main_tuneables)); } if (softc->ipf_dynamic_softc == 1) { KFREE(softc); } } /* ------------------------------------------------------------------------ */ /* Function: ipf_main_soft_fini */ /* Returns: 0 = success, -1 = failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Clean out the rules which have been added since _init was last called, */ /* the only dynamic part of the mainline. */ /* ------------------------------------------------------------------------ */ int ipf_main_soft_fini(ipf_main_softc_t *softc) { (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE); (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE); (void) ipf_flush(softc, IPL_LOGCOUNT, FR_INQUE|FR_OUTQUE|FR_INACTIVE); (void) ipf_flush(softc, IPL_LOGCOUNT, FR_INQUE|FR_OUTQUE); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_main_load */ /* Returns: 0 = success, -1 = failure */ /* Parameters: none */ /* */ /* Handle global initialisation that needs to be done for the base part of */ /* IPFilter. At present this just amounts to initialising some ICMP lookup */ /* arrays that get used by the state/NAT code. */ /* ------------------------------------------------------------------------ */ int ipf_main_load(void) { int i; /* fill icmp reply type table */ for (i = 0; i <= ICMP_MAXTYPE; i++) icmpreplytype4[i] = -1; icmpreplytype4[ICMP_ECHO] = ICMP_ECHOREPLY; icmpreplytype4[ICMP_TSTAMP] = ICMP_TSTAMPREPLY; icmpreplytype4[ICMP_IREQ] = ICMP_IREQREPLY; icmpreplytype4[ICMP_MASKREQ] = ICMP_MASKREPLY; #ifdef USE_INET6 /* fill icmp reply type table */ for (i = 0; i <= ICMP6_MAXTYPE; i++) icmpreplytype6[i] = -1; icmpreplytype6[ICMP6_ECHO_REQUEST] = ICMP6_ECHO_REPLY; icmpreplytype6[ICMP6_MEMBERSHIP_QUERY] = ICMP6_MEMBERSHIP_REPORT; icmpreplytype6[ICMP6_NI_QUERY] = ICMP6_NI_REPLY; icmpreplytype6[ND_ROUTER_SOLICIT] = ND_ROUTER_ADVERT; icmpreplytype6[ND_NEIGHBOR_SOLICIT] = ND_NEIGHBOR_ADVERT; #endif return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_main_unload */ /* Returns: 0 = success, -1 = failure */ /* Parameters: none */ /* */ /* A null-op function that exists as a placeholder so that the flow in */ /* other functions is obvious. */ /* ------------------------------------------------------------------------ */ int ipf_main_unload(void) { return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_load_all */ /* Returns: 0 = success, -1 = failure */ /* Parameters: none */ /* */ /* Work through all of the subsystems inside IPFilter and call the load */ /* function for each in an order that won't lead to a crash :) */ /* ------------------------------------------------------------------------ */ int ipf_load_all(void) { if (ipf_main_load() == -1) return (-1); if (ipf_state_main_load() == -1) return (-1); if (ipf_nat_main_load() == -1) return (-1); if (ipf_frag_main_load() == -1) return (-1); if (ipf_auth_main_load() == -1) return (-1); if (ipf_proxy_main_load() == -1) return (-1); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_unload_all */ /* Returns: 0 = success, -1 = failure */ /* Parameters: none */ /* */ /* Work through all of the subsystems inside IPFilter and call the unload */ /* function for each in an order that won't lead to a crash :) */ /* ------------------------------------------------------------------------ */ int ipf_unload_all(void) { if (ipf_proxy_main_unload() == -1) return (-1); if (ipf_auth_main_unload() == -1) return (-1); if (ipf_frag_main_unload() == -1) return (-1); if (ipf_nat_main_unload() == -1) return (-1); if (ipf_state_main_unload() == -1) return (-1); if (ipf_main_unload() == -1) return (-1); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_create_all */ /* Returns: NULL = failure, else success */ /* Parameters: arg(I) - pointer to soft context main structure */ /* */ /* Work through all of the subsystems inside IPFilter and call the create */ /* function for each in an order that won't lead to a crash :) */ /* ------------------------------------------------------------------------ */ ipf_main_softc_t * ipf_create_all(void *arg) { ipf_main_softc_t *softc; softc = ipf_main_soft_create(arg); if (softc == NULL) return (NULL); #ifdef IPFILTER_LOG softc->ipf_log_soft = ipf_log_soft_create(softc); if (softc->ipf_log_soft == NULL) { ipf_destroy_all(softc); return (NULL); } #endif softc->ipf_lookup_soft = ipf_lookup_soft_create(softc); if (softc->ipf_lookup_soft == NULL) { ipf_destroy_all(softc); return (NULL); } softc->ipf_sync_soft = ipf_sync_soft_create(softc); if (softc->ipf_sync_soft == NULL) { ipf_destroy_all(softc); return (NULL); } softc->ipf_state_soft = ipf_state_soft_create(softc); if (softc->ipf_state_soft == NULL) { ipf_destroy_all(softc); return (NULL); } softc->ipf_nat_soft = ipf_nat_soft_create(softc); if (softc->ipf_nat_soft == NULL) { ipf_destroy_all(softc); return (NULL); } softc->ipf_frag_soft = ipf_frag_soft_create(softc); if (softc->ipf_frag_soft == NULL) { ipf_destroy_all(softc); return (NULL); } softc->ipf_auth_soft = ipf_auth_soft_create(softc); if (softc->ipf_auth_soft == NULL) { ipf_destroy_all(softc); return (NULL); } softc->ipf_proxy_soft = ipf_proxy_soft_create(softc); if (softc->ipf_proxy_soft == NULL) { ipf_destroy_all(softc); return (NULL); } return (softc); } /* ------------------------------------------------------------------------ */ /* Function: ipf_destroy_all */ /* Returns: void */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Work through all of the subsystems inside IPFilter and call the destroy */ /* function for each in an order that won't lead to a crash :) */ /* */ /* Every one of these functions is expected to succeed, so there is no */ /* checking of return values. */ /* ------------------------------------------------------------------------ */ void ipf_destroy_all(ipf_main_softc_t *softc) { if (softc->ipf_state_soft != NULL) { ipf_state_soft_destroy(softc, softc->ipf_state_soft); softc->ipf_state_soft = NULL; } if (softc->ipf_nat_soft != NULL) { ipf_nat_soft_destroy(softc, softc->ipf_nat_soft); softc->ipf_nat_soft = NULL; } if (softc->ipf_frag_soft != NULL) { ipf_frag_soft_destroy(softc, softc->ipf_frag_soft); softc->ipf_frag_soft = NULL; } if (softc->ipf_auth_soft != NULL) { ipf_auth_soft_destroy(softc, softc->ipf_auth_soft); softc->ipf_auth_soft = NULL; } if (softc->ipf_proxy_soft != NULL) { ipf_proxy_soft_destroy(softc, softc->ipf_proxy_soft); softc->ipf_proxy_soft = NULL; } if (softc->ipf_sync_soft != NULL) { ipf_sync_soft_destroy(softc, softc->ipf_sync_soft); softc->ipf_sync_soft = NULL; } if (softc->ipf_lookup_soft != NULL) { ipf_lookup_soft_destroy(softc, softc->ipf_lookup_soft); softc->ipf_lookup_soft = NULL; } #ifdef IPFILTER_LOG if (softc->ipf_log_soft != NULL) { ipf_log_soft_destroy(softc, softc->ipf_log_soft); softc->ipf_log_soft = NULL; } #endif ipf_main_soft_destroy(softc); } /* ------------------------------------------------------------------------ */ /* Function: ipf_init_all */ /* Returns: 0 = success, -1 = failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Work through all of the subsystems inside IPFilter and call the init */ /* function for each in an order that won't lead to a crash :) */ /* ------------------------------------------------------------------------ */ int ipf_init_all(ipf_main_softc_t *softc) { if (ipf_main_soft_init(softc) == -1) return (-1); #ifdef IPFILTER_LOG if (ipf_log_soft_init(softc, softc->ipf_log_soft) == -1) return (-1); #endif if (ipf_lookup_soft_init(softc, softc->ipf_lookup_soft) == -1) return (-1); if (ipf_sync_soft_init(softc, softc->ipf_sync_soft) == -1) return (-1); if (ipf_state_soft_init(softc, softc->ipf_state_soft) == -1) return (-1); if (ipf_nat_soft_init(softc, softc->ipf_nat_soft) == -1) return (-1); if (ipf_frag_soft_init(softc, softc->ipf_frag_soft) == -1) return (-1); if (ipf_auth_soft_init(softc, softc->ipf_auth_soft) == -1) return (-1); if (ipf_proxy_soft_init(softc, softc->ipf_proxy_soft) == -1) return (-1); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_fini_all */ /* Returns: 0 = success, -1 = failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Work through all of the subsystems inside IPFilter and call the fini */ /* function for each in an order that won't lead to a crash :) */ /* ------------------------------------------------------------------------ */ int ipf_fini_all(ipf_main_softc_t *softc) { ipf_token_flush(softc); if (ipf_proxy_soft_fini(softc, softc->ipf_proxy_soft) == -1) return (-1); if (ipf_auth_soft_fini(softc, softc->ipf_auth_soft) == -1) return (-1); if (ipf_frag_soft_fini(softc, softc->ipf_frag_soft) == -1) return (-1); if (ipf_nat_soft_fini(softc, softc->ipf_nat_soft) == -1) return (-1); if (ipf_state_soft_fini(softc, softc->ipf_state_soft) == -1) return (-1); if (ipf_sync_soft_fini(softc, softc->ipf_sync_soft) == -1) return (-1); if (ipf_lookup_soft_fini(softc, softc->ipf_lookup_soft) == -1) return (-1); #ifdef IPFILTER_LOG if (ipf_log_soft_fini(softc, softc->ipf_log_soft) == -1) return (-1); #endif if (ipf_main_soft_fini(softc) == -1) return (-1); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_rule_expire */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* At present this function exists just to support temporary addition of */ /* firewall rules. Both inactive and active lists are scanned for items to */ /* purge, as by rights, the expiration is computed as soon as the rule is */ /* loaded in. */ /* ------------------------------------------------------------------------ */ void ipf_rule_expire(ipf_main_softc_t *softc) { frentry_t *fr; if ((softc->ipf_rule_explist[0] == NULL) && (softc->ipf_rule_explist[1] == NULL)) return; WRITE_ENTER(&softc->ipf_mutex); while ((fr = softc->ipf_rule_explist[0]) != NULL) { /* * Because the list is kept sorted on insertion, the fist * one that dies in the future means no more work to do. */ if (fr->fr_die > softc->ipf_ticks) break; ipf_rule_delete(softc, fr, IPL_LOGIPF, 0); } while ((fr = softc->ipf_rule_explist[1]) != NULL) { /* * Because the list is kept sorted on insertion, the fist * one that dies in the future means no more work to do. */ if (fr->fr_die > softc->ipf_ticks) break; ipf_rule_delete(softc, fr, IPL_LOGIPF, 1); } RWLOCK_EXIT(&softc->ipf_mutex); } static int ipf_ht_node_cmp(struct host_node_s *, struct host_node_s *); static void ipf_ht_node_make_key(host_track_t *, host_node_t *, int, i6addr_t *); host_node_t RBI_ZERO(ipf_rb); RBI_CODE(ipf_rb, host_node_t, hn_entry, ipf_ht_node_cmp) /* ------------------------------------------------------------------------ */ /* Function: ipf_ht_node_cmp */ /* Returns: int - 0 == nodes are the same, .. */ /* Parameters: k1(I) - pointer to first key to compare */ /* k2(I) - pointer to second key to compare */ /* */ /* The "key" for the node is a combination of two fields: the address */ /* family and the address itself. */ /* */ /* Because we're not actually interpreting the address data, it isn't */ /* necessary to convert them to/from network/host byte order. The mask is */ /* just used to remove bits that aren't significant - it doesn't matter */ /* where they are, as long as they're always in the same place. */ /* */ /* As with IP6_EQ, comparing IPv6 addresses starts at the bottom because */ /* this is where individual ones will differ the most - but not true for */ /* for /48's, etc. */ /* ------------------------------------------------------------------------ */ static int ipf_ht_node_cmp(struct host_node_s *k1, struct host_node_s *k2) { int i; i = (k2->hn_addr.adf_family - k1->hn_addr.adf_family); if (i != 0) return (i); if (k1->hn_addr.adf_family == AF_INET) return (k2->hn_addr.adf_addr.in4.s_addr - k1->hn_addr.adf_addr.in4.s_addr); i = k2->hn_addr.adf_addr.i6[3] - k1->hn_addr.adf_addr.i6[3]; if (i != 0) return (i); i = k2->hn_addr.adf_addr.i6[2] - k1->hn_addr.adf_addr.i6[2]; if (i != 0) return (i); i = k2->hn_addr.adf_addr.i6[1] - k1->hn_addr.adf_addr.i6[1]; if (i != 0) return (i); i = k2->hn_addr.adf_addr.i6[0] - k1->hn_addr.adf_addr.i6[0]; return (i); } /* ------------------------------------------------------------------------ */ /* Function: ipf_ht_node_make_key */ /* Returns: Nil */ /* parameters: htp(I) - pointer to address tracking structure */ /* key(I) - where to store masked address for lookup */ /* family(I) - protocol family of address */ /* addr(I) - pointer to network address */ /* */ /* Using the "netmask" (number of bits) stored parent host tracking struct, */ /* copy the address passed in into the key structure whilst masking out the */ /* bits that we don't want. */ /* */ /* Because the parser will set ht_netmask to 128 if there is no protocol */ /* specified (the parser doesn't know if it should be a v4 or v6 rule), we */ /* have to be wary of that and not allow 32-128 to happen. */ /* ------------------------------------------------------------------------ */ static void ipf_ht_node_make_key(host_track_t *htp, host_node_t *key, int family, i6addr_t *addr) { key->hn_addr.adf_family = family; if (family == AF_INET) { u_32_t mask; int bits; key->hn_addr.adf_len = sizeof(key->hn_addr.adf_addr.in4); bits = htp->ht_netmask; if (bits >= 32) { mask = 0xffffffff; } else { mask = htonl(0xffffffff << (32 - bits)); } key->hn_addr.adf_addr.in4.s_addr = addr->in4.s_addr & mask; #ifdef USE_INET6 } else { int bits = htp->ht_netmask; key->hn_addr.adf_len = sizeof(key->hn_addr.adf_addr.in6); if (bits > 96) { key->hn_addr.adf_addr.i6[3] = addr->i6[3] & htonl(0xffffffff << (128 - bits)); key->hn_addr.adf_addr.i6[2] = addr->i6[2]; key->hn_addr.adf_addr.i6[1] = addr->i6[2]; key->hn_addr.adf_addr.i6[0] = addr->i6[2]; } else if (bits > 64) { key->hn_addr.adf_addr.i6[3] = 0; key->hn_addr.adf_addr.i6[2] = addr->i6[2] & htonl(0xffffffff << (96 - bits)); key->hn_addr.adf_addr.i6[1] = addr->i6[1]; key->hn_addr.adf_addr.i6[0] = addr->i6[0]; } else if (bits > 32) { key->hn_addr.adf_addr.i6[3] = 0; key->hn_addr.adf_addr.i6[2] = 0; key->hn_addr.adf_addr.i6[1] = addr->i6[1] & htonl(0xffffffff << (64 - bits)); key->hn_addr.adf_addr.i6[0] = addr->i6[0]; } else { key->hn_addr.adf_addr.i6[3] = 0; key->hn_addr.adf_addr.i6[2] = 0; key->hn_addr.adf_addr.i6[1] = 0; key->hn_addr.adf_addr.i6[0] = addr->i6[0] & htonl(0xffffffff << (32 - bits)); } #endif } } /* ------------------------------------------------------------------------ */ /* Function: ipf_ht_node_add */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* htp(I) - pointer to address tracking structure */ /* family(I) - protocol family of address */ /* addr(I) - pointer to network address */ /* */ /* NOTE: THIS FUNCTION MUST BE CALLED WITH AN EXCLUSIVE LOCK THAT PREVENTS */ /* ipf_ht_node_del FROM RUNNING CONCURRENTLY ON THE SAME htp. */ /* */ /* After preparing the key with the address information to find, look in */ /* the red-black tree to see if the address is known. A successful call to */ /* this function can mean one of two things: a new node was added to the */ /* tree or a matching node exists and we're able to bump up its activity. */ /* ------------------------------------------------------------------------ */ int ipf_ht_node_add(ipf_main_softc_t *softc, host_track_t *htp, int family, i6addr_t *addr) { host_node_t *h; host_node_t k; ipf_ht_node_make_key(htp, &k, family, addr); h = RBI_SEARCH(ipf_rb, &htp->ht_root, &k); if (h == NULL) { if (htp->ht_cur_nodes >= htp->ht_max_nodes) return (-1); KMALLOC(h, host_node_t *); if (h == NULL) { DT(ipf_rb_no_mem); LBUMP(ipf_rb_no_mem); return (-1); } /* * If there was a macro to initialise the RB node then that * would get used here, but there isn't... */ bzero((char *)h, sizeof(*h)); h->hn_addr = k.hn_addr; h->hn_addr.adf_family = k.hn_addr.adf_family; RBI_INSERT(ipf_rb, &htp->ht_root, h); htp->ht_cur_nodes++; } else { if ((htp->ht_max_per_node != 0) && (h->hn_active >= htp->ht_max_per_node)) { DT(ipf_rb_node_max); LBUMP(ipf_rb_node_max); return (-1); } } h->hn_active++; return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_ht_node_del */ /* Returns: int - 0 == success, -1 == failure */ /* parameters: htp(I) - pointer to address tracking structure */ /* family(I) - protocol family of address */ /* addr(I) - pointer to network address */ /* */ /* NOTE: THIS FUNCTION MUST BE CALLED WITH AN EXCLUSIVE LOCK THAT PREVENTS */ /* ipf_ht_node_add FROM RUNNING CONCURRENTLY ON THE SAME htp. */ /* */ /* Try and find the address passed in amongst the leavese on this tree to */ /* be friend. If found then drop the active account for that node drops by */ /* one. If that count reaches 0, it is time to free it all up. */ /* ------------------------------------------------------------------------ */ int ipf_ht_node_del(host_track_t *htp, int family, i6addr_t *addr) { host_node_t *h; host_node_t k; ipf_ht_node_make_key(htp, &k, family, addr); h = RBI_SEARCH(ipf_rb, &htp->ht_root, &k); if (h == NULL) { return (-1); } else { h->hn_active--; if (h->hn_active == 0) { (void) RBI_DELETE(ipf_rb, &htp->ht_root, h); htp->ht_cur_nodes--; KFREE(h); } } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_rb_ht_init */ /* Returns: Nil */ /* Parameters: head(I) - pointer to host tracking structure */ /* */ /* Initialise the host tracking structure to be ready for use above. */ /* ------------------------------------------------------------------------ */ void ipf_rb_ht_init(host_track_t *head) { RBI_INIT(ipf_rb, &head->ht_root); } /* ------------------------------------------------------------------------ */ /* Function: ipf_rb_ht_freenode */ /* Returns: Nil */ /* Parameters: head(I) - pointer to host tracking structure */ /* arg(I) - additional argument from walk caller */ /* */ /* Free an actual host_node_t structure. */ /* ------------------------------------------------------------------------ */ void ipf_rb_ht_freenode(host_node_t *node, void *arg) { KFREE(node); } /* ------------------------------------------------------------------------ */ /* Function: ipf_rb_ht_flush */ /* Returns: Nil */ /* Parameters: head(I) - pointer to host tracking structure */ /* */ /* Remove all of the nodes in the tree tracking hosts by calling a walker */ /* and free'ing each one. */ /* ------------------------------------------------------------------------ */ void ipf_rb_ht_flush(host_track_t *head) { RBI_WALK(ipf_rb, &head->ht_root, ipf_rb_ht_freenode, NULL); } /* ------------------------------------------------------------------------ */ /* Function: ipf_slowtimer */ /* Returns: Nil */ /* Parameters: ptr(I) - pointer to main ipf soft context structure */ /* */ /* Slowly expire held state for fragments. Timeouts are set * in */ /* expectation of this being called twice per second. */ /* ------------------------------------------------------------------------ */ void ipf_slowtimer(ipf_main_softc_t *softc) { ipf_token_expire(softc); ipf_frag_expire(softc); ipf_state_expire(softc); ipf_nat_expire(softc); ipf_auth_expire(softc); ipf_lookup_expire(softc); ipf_rule_expire(softc); ipf_sync_expire(softc); softc->ipf_ticks++; } /* ------------------------------------------------------------------------ */ /* Function: ipf_inet_mask_add */ /* Returns: Nil */ /* Parameters: bits(I) - pointer to nat context information */ /* mtab(I) - pointer to mask hash table structure */ /* */ /* When called, bits represents the mask of a new NAT rule that has just */ /* been added. This function inserts a bitmask into the array of masks to */ /* search when searching for a matching NAT rule for a packet. */ /* Prevention of duplicate masks is achieved by checking the use count for */ /* a given netmask. */ /* ------------------------------------------------------------------------ */ void ipf_inet_mask_add(int bits, ipf_v4_masktab_t *mtab) { u_32_t mask; int i, j; mtab->imt4_masks[bits]++; if (mtab->imt4_masks[bits] > 1) return; if (bits == 0) mask = 0; else mask = 0xffffffff << (32 - bits); for (i = 0; i < 33; i++) { if (ntohl(mtab->imt4_active[i]) < mask) { for (j = 32; j > i; j--) mtab->imt4_active[j] = mtab->imt4_active[j - 1]; mtab->imt4_active[i] = htonl(mask); break; } } mtab->imt4_max++; } /* ------------------------------------------------------------------------ */ /* Function: ipf_inet_mask_del */ /* Returns: Nil */ /* Parameters: bits(I) - number of bits set in the netmask */ /* mtab(I) - pointer to mask hash table structure */ /* */ /* Remove the 32bit bitmask represented by "bits" from the collection of */ /* netmasks stored inside of mtab. */ /* ------------------------------------------------------------------------ */ void ipf_inet_mask_del(int bits, ipf_v4_masktab_t *mtab) { u_32_t mask; int i, j; mtab->imt4_masks[bits]--; if (mtab->imt4_masks[bits] > 0) return; mask = htonl(0xffffffff << (32 - bits)); for (i = 0; i < 33; i++) { if (mtab->imt4_active[i] == mask) { for (j = i + 1; j < 33; j++) mtab->imt4_active[j - 1] = mtab->imt4_active[j]; break; } } mtab->imt4_max--; ASSERT(mtab->imt4_max >= 0); } #ifdef USE_INET6 /* ------------------------------------------------------------------------ */ /* Function: ipf_inet6_mask_add */ /* Returns: Nil */ /* Parameters: bits(I) - number of bits set in mask */ /* mask(I) - pointer to mask to add */ /* mtab(I) - pointer to mask hash table structure */ /* */ /* When called, bitcount represents the mask of a IPv6 NAT map rule that */ /* has just been added. This function inserts a bitmask into the array of */ /* masks to search when searching for a matching NAT rule for a packet. */ /* Prevention of duplicate masks is achieved by checking the use count for */ /* a given netmask. */ /* ------------------------------------------------------------------------ */ void ipf_inet6_mask_add(int bits, i6addr_t *mask, ipf_v6_masktab_t *mtab) { i6addr_t zero; int i, j; mtab->imt6_masks[bits]++; if (mtab->imt6_masks[bits] > 1) return; if (bits == 0) { mask = &zero; zero.i6[0] = 0; zero.i6[1] = 0; zero.i6[2] = 0; zero.i6[3] = 0; } for (i = 0; i < 129; i++) { if (IP6_LT(&mtab->imt6_active[i], mask)) { for (j = 128; j > i; j--) mtab->imt6_active[j] = mtab->imt6_active[j - 1]; mtab->imt6_active[i] = *mask; break; } } mtab->imt6_max++; } /* ------------------------------------------------------------------------ */ /* Function: ipf_inet6_mask_del */ /* Returns: Nil */ /* Parameters: bits(I) - number of bits set in mask */ /* mask(I) - pointer to mask to remove */ /* mtab(I) - pointer to mask hash table structure */ /* */ /* Remove the 128bit bitmask represented by "bits" from the collection of */ /* netmasks stored inside of mtab. */ /* ------------------------------------------------------------------------ */ void ipf_inet6_mask_del(int bits, i6addr_t *mask, ipf_v6_masktab_t *mtab) { i6addr_t zero; int i, j; mtab->imt6_masks[bits]--; if (mtab->imt6_masks[bits] > 0) return; if (bits == 0) mask = &zero; zero.i6[0] = 0; zero.i6[1] = 0; zero.i6[2] = 0; zero.i6[3] = 0; for (i = 0; i < 129; i++) { if (IP6_EQ(&mtab->imt6_active[i], mask)) { for (j = i + 1; j < 129; j++) { mtab->imt6_active[j - 1] = mtab->imt6_active[j]; if (IP6_EQ(&mtab->imt6_active[j - 1], &zero)) break; } break; } } mtab->imt6_max--; ASSERT(mtab->imt6_max >= 0); } #endif diff --git a/sys/netpfil/ipfilter/netinet/ip_nat.c b/sys/netpfil/ipfilter/netinet/ip_nat.c index b8a0e7d2075b..a13c6129a287 100644 --- a/sys/netpfil/ipfilter/netinet/ip_nat.c +++ b/sys/netpfil/ipfilter/netinet/ip_nat.c @@ -1,8392 +1,8392 @@ /* * Copyright (C) 2012 by Darren Reed. * * See the IPFILTER.LICENCE file for details on licencing. */ #if defined(KERNEL) || defined(_KERNEL) # undef KERNEL # undef _KERNEL # define KERNEL 1 # define _KERNEL 1 #endif #include #include #include #include #include #if defined(_KERNEL) && \ (defined(__NetBSD_Version) && (__NetBSD_Version >= 399002000)) # include #endif #if !defined(_KERNEL) # include # include # include # define KERNEL # ifdef _OpenBSD__ struct file; # endif # include # undef KERNEL #endif #if defined(_KERNEL) && defined(__FreeBSD__) # include # include #else # include #endif # include # include #include #if defined(_KERNEL) # include # if defined(__FreeBSD__) # include # endif # if !defined(__SVR4) # include # endif #endif #if defined(__SVR4) # include # include # ifdef KERNEL # include # endif # include # include #endif #if defined(__FreeBSD__) # include #endif #include #if defined(__FreeBSD__) # include #endif #ifdef sun # include #endif #include #include #include #ifdef RFC1825 # include # include extern struct ifnet vpnif; #endif # include #include #include #include #include "netinet/ip_compat.h" #include #include "netinet/ipl.h" #include "netinet/ip_fil.h" #include "netinet/ip_nat.h" #include "netinet/ip_frag.h" #include "netinet/ip_state.h" #include "netinet/ip_proxy.h" #include "netinet/ip_lookup.h" #include "netinet/ip_dstlist.h" #include "netinet/ip_sync.h" #if defined(__FreeBSD__) # include #endif #ifdef HAS_SYS_MD5_H # include #else # include "md5.h" #endif /* END OF INCLUDES */ #undef SOCKADDR_IN #define SOCKADDR_IN struct sockaddr_in #define NATFSUM(n,v,f) ((v) == 4 ? (n)->f.in4.s_addr : (n)->f.i6[0] + \ (n)->f.i6[1] + (n)->f.i6[2] + (n)->f.i6[3]) #define NBUMP(x) softn->(x)++ #define NBUMPD(x, y) do { \ softn->x.y++; \ DT(y); \ } while (0) #define NBUMPSIDE(y,x) softn->ipf_nat_stats.ns_side[y].x++ #define NBUMPSIDED(y,x) do { softn->ipf_nat_stats.ns_side[y].x++; \ DT(x); } while (0) #define NBUMPSIDEX(y,x,z) \ do { softn->ipf_nat_stats.ns_side[y].x++; \ DT(z); } while (0) #define NBUMPSIDEDF(y,x)do { softn->ipf_nat_stats.ns_side[y].x++; \ DT1(x, fr_info_t *, fin); } while (0) static ipftuneable_t ipf_nat_tuneables[] = { /* nat */ { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_lock) }, "nat_lock", 0, 1, stsizeof(ipf_nat_softc_t, ipf_nat_lock), IPFT_RDONLY, NULL, NULL }, { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_table_sz) }, "nat_table_size", 1, 0x7fffffff, stsizeof(ipf_nat_softc_t, ipf_nat_table_sz), 0, NULL, ipf_nat_rehash }, { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_table_max) }, "nat_table_max", 1, 0x7fffffff, stsizeof(ipf_nat_softc_t, ipf_nat_table_max), 0, NULL, NULL }, { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_maprules_sz) }, "nat_rules_size", 1, 0x7fffffff, stsizeof(ipf_nat_softc_t, ipf_nat_maprules_sz), 0, NULL, ipf_nat_rehash_rules }, { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_rdrrules_sz) }, "rdr_rules_size", 1, 0x7fffffff, stsizeof(ipf_nat_softc_t, ipf_nat_rdrrules_sz), 0, NULL, ipf_nat_rehash_rules }, { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_hostmap_sz) }, "hostmap_size", 1, 0x7fffffff, stsizeof(ipf_nat_softc_t, ipf_nat_hostmap_sz), 0, NULL, ipf_nat_hostmap_rehash }, { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_maxbucket) }, "nat_maxbucket",1, 0x7fffffff, stsizeof(ipf_nat_softc_t, ipf_nat_maxbucket), 0, NULL, NULL }, { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_logging) }, "nat_logging", 0, 1, stsizeof(ipf_nat_softc_t, ipf_nat_logging), 0, NULL, NULL }, { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_doflush) }, "nat_doflush", 0, 1, stsizeof(ipf_nat_softc_t, ipf_nat_doflush), 0, NULL, NULL }, { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_table_wm_low) }, "nat_table_wm_low", 1, 99, stsizeof(ipf_nat_softc_t, ipf_nat_table_wm_low), 0, NULL, NULL }, { { (void *)offsetof(ipf_nat_softc_t, ipf_nat_table_wm_high) }, "nat_table_wm_high", 2, 100, stsizeof(ipf_nat_softc_t, ipf_nat_table_wm_high), 0, NULL, NULL }, { { 0 }, NULL, 0, 0, 0, 0, NULL, NULL } }; /* ======================================================================== */ /* How the NAT is organised and works. */ /* */ /* Inside (interface y) NAT Outside (interface x) */ /* -------------------- -+- ------------------------------------- */ /* Packet going | out, processed by ipf_nat_checkout() for x */ /* ------------> | ------------> */ /* src=10.1.1.1 | src=192.1.1.1 */ /* | */ /* | in, processed by ipf_nat_checkin() for x */ /* <------------ | <------------ */ /* dst=10.1.1.1 | dst=192.1.1.1 */ /* -------------------- -+- ------------------------------------- */ /* ipf_nat_checkout() - changes ip_src and if required, sport */ /* - creates a new mapping, if required. */ /* ipf_nat_checkin() - changes ip_dst and if required, dport */ /* */ /* In the NAT table, internal source is recorded as "in" and externally */ /* seen as "out". */ /* ======================================================================== */ #if SOLARIS && !defined(INSTANCES) extern int pfil_delayed_copy; #endif static int ipf_nat_flush_entry(ipf_main_softc_t *, void *); static int ipf_nat_getent(ipf_main_softc_t *, caddr_t, int); static int ipf_nat_getsz(ipf_main_softc_t *, caddr_t, int); static int ipf_nat_putent(ipf_main_softc_t *, caddr_t, int); static void ipf_nat_addmap(ipf_nat_softc_t *, ipnat_t *); static void ipf_nat_addrdr(ipf_nat_softc_t *, ipnat_t *); static int ipf_nat_builddivertmp(ipf_nat_softc_t *, ipnat_t *); static int ipf_nat_clearlist(ipf_main_softc_t *, ipf_nat_softc_t *); static int ipf_nat_cmp_rules(ipnat_t *, ipnat_t *); static int ipf_nat_decap(fr_info_t *, nat_t *); static void ipf_nat_delrule(ipf_main_softc_t *, ipf_nat_softc_t *, ipnat_t *, int); static int ipf_nat_extraflush(ipf_main_softc_t *, ipf_nat_softc_t *, int); static int ipf_nat_finalise(fr_info_t *, nat_t *); static int ipf_nat_flushtable(ipf_main_softc_t *, ipf_nat_softc_t *); static int ipf_nat_getnext(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *, ipfobj_t *); static int ipf_nat_gettable(ipf_main_softc_t *, ipf_nat_softc_t *, char *); static hostmap_t *ipf_nat_hostmap(ipf_nat_softc_t *, ipnat_t *, struct in_addr, struct in_addr, struct in_addr, u_32_t); static int ipf_nat_icmpquerytype(int); static int ipf_nat_iterator(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *, ipfobj_t *); static int ipf_nat_match(fr_info_t *, ipnat_t *); static int ipf_nat_matcharray(nat_t *, int *, u_long); static int ipf_nat_matchflush(ipf_main_softc_t *, ipf_nat_softc_t *, caddr_t); static void ipf_nat_mssclamp(tcphdr_t *, u_32_t, fr_info_t *, u_short *); static int ipf_nat_newmap(fr_info_t *, nat_t *, natinfo_t *); static int ipf_nat_newdivert(fr_info_t *, nat_t *, natinfo_t *); static int ipf_nat_newrdr(fr_info_t *, nat_t *, natinfo_t *); static int ipf_nat_newrewrite(fr_info_t *, nat_t *, natinfo_t *); static int ipf_nat_nextaddr(fr_info_t *, nat_addr_t *, u_32_t *, u_32_t *); static int ipf_nat_nextaddrinit(ipf_main_softc_t *, char *, nat_addr_t *, int, void *); static int ipf_nat_resolverule(ipf_main_softc_t *, ipnat_t *); static int ipf_nat_ruleaddrinit(ipf_main_softc_t *, ipf_nat_softc_t *, ipnat_t *); static void ipf_nat_rule_fini(ipf_main_softc_t *, ipnat_t *); static int ipf_nat_rule_init(ipf_main_softc_t *, ipf_nat_softc_t *, ipnat_t *); static int ipf_nat_siocaddnat(ipf_main_softc_t *, ipf_nat_softc_t *, ipnat_t *, int); static void ipf_nat_siocdelnat(ipf_main_softc_t *, ipf_nat_softc_t *, ipnat_t *, int); static void ipf_nat_tabmove(ipf_nat_softc_t *, nat_t *); /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_main_load */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: Nil */ /* */ /* The only global NAT structure that needs to be initialised is the filter */ /* rule that is used with blocking packets. */ /* ------------------------------------------------------------------------ */ int ipf_nat_main_load(void) { return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_main_unload */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: Nil */ /* */ /* A null-op function that exists as a placeholder so that the flow in */ /* other functions is obvious. */ /* ------------------------------------------------------------------------ */ int ipf_nat_main_unload(void) { return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_soft_create */ /* Returns: void * - NULL = failure, else pointer to NAT context */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Allocate the initial soft context structure for NAT and populate it with */ /* some default values. Creating the tables is left until we call _init so */ /* that sizes can be changed before we get under way. */ /* ------------------------------------------------------------------------ */ void * ipf_nat_soft_create(ipf_main_softc_t *softc) { ipf_nat_softc_t *softn; KMALLOC(softn, ipf_nat_softc_t *); if (softn == NULL) return (NULL); bzero((char *)softn, sizeof(*softn)); softn->ipf_nat_tune = ipf_tune_array_copy(softn, sizeof(ipf_nat_tuneables), ipf_nat_tuneables); if (softn->ipf_nat_tune == NULL) { ipf_nat_soft_destroy(softc, softn); return (NULL); } if (ipf_tune_array_link(softc, softn->ipf_nat_tune) == -1) { ipf_nat_soft_destroy(softc, softn); return (NULL); } softn->ipf_nat_list_tail = &softn->ipf_nat_list; if (softc->ipf_large_nat) { softn->ipf_nat_table_max = NAT_TABLE_MAX_LARGE; softn->ipf_nat_table_sz = NAT_TABLE_SZ_LARGE; softn->ipf_nat_maprules_sz = NAT_SIZE_LARGE; softn->ipf_nat_rdrrules_sz = RDR_SIZE_LARGE; softn->ipf_nat_hostmap_sz = HOSTMAP_SIZE_LARGE; } else { softn->ipf_nat_table_max = NAT_TABLE_MAX_NORMAL; softn->ipf_nat_table_sz = NAT_TABLE_SZ_NORMAL; softn->ipf_nat_maprules_sz = NAT_SIZE_NORMAL; softn->ipf_nat_rdrrules_sz = RDR_SIZE_NORMAL; softn->ipf_nat_hostmap_sz = HOSTMAP_SIZE_NORMAL; } softn->ipf_nat_doflush = 0; #ifdef IPFILTER_LOG softn->ipf_nat_logging = 1; #else softn->ipf_nat_logging = 0; #endif softn->ipf_nat_defage = DEF_NAT_AGE; softn->ipf_nat_defipage = IPF_TTLVAL(60); softn->ipf_nat_deficmpage = IPF_TTLVAL(3); softn->ipf_nat_table_wm_high = 99; softn->ipf_nat_table_wm_low = 90; return (softn); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_soft_destroy */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* ------------------------------------------------------------------------ */ void ipf_nat_soft_destroy(ipf_main_softc_t *softc, void *arg) { ipf_nat_softc_t *softn = arg; if (softn->ipf_nat_tune != NULL) { ipf_tune_array_unlink(softc, softn->ipf_nat_tune); KFREES(softn->ipf_nat_tune, sizeof(ipf_nat_tuneables)); softn->ipf_nat_tune = NULL; } KFREE(softn); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_init */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Initialise all of the NAT locks, tables and other structures. */ /* ------------------------------------------------------------------------ */ int ipf_nat_soft_init(ipf_main_softc_t *softc, void *arg) { ipf_nat_softc_t *softn = arg; ipftq_t *tq; int i; KMALLOCS(softn->ipf_nat_table[0], nat_t **, \ sizeof(nat_t *) * softn->ipf_nat_table_sz); if (softn->ipf_nat_table[0] != NULL) { bzero((char *)softn->ipf_nat_table[0], softn->ipf_nat_table_sz * sizeof(nat_t *)); } else { return (-1); } KMALLOCS(softn->ipf_nat_table[1], nat_t **, \ sizeof(nat_t *) * softn->ipf_nat_table_sz); if (softn->ipf_nat_table[1] != NULL) { bzero((char *)softn->ipf_nat_table[1], softn->ipf_nat_table_sz * sizeof(nat_t *)); } else { return (-2); } KMALLOCS(softn->ipf_nat_map_rules, ipnat_t **, \ sizeof(ipnat_t *) * softn->ipf_nat_maprules_sz); if (softn->ipf_nat_map_rules != NULL) { bzero((char *)softn->ipf_nat_map_rules, softn->ipf_nat_maprules_sz * sizeof(ipnat_t *)); } else { return (-3); } KMALLOCS(softn->ipf_nat_rdr_rules, ipnat_t **, \ sizeof(ipnat_t *) * softn->ipf_nat_rdrrules_sz); if (softn->ipf_nat_rdr_rules != NULL) { bzero((char *)softn->ipf_nat_rdr_rules, softn->ipf_nat_rdrrules_sz * sizeof(ipnat_t *)); } else { return (-4); } KMALLOCS(softn->ipf_hm_maptable, hostmap_t **, \ sizeof(hostmap_t *) * softn->ipf_nat_hostmap_sz); if (softn->ipf_hm_maptable != NULL) { bzero((char *)softn->ipf_hm_maptable, sizeof(hostmap_t *) * softn->ipf_nat_hostmap_sz); } else { return (-5); } softn->ipf_hm_maplist = NULL; KMALLOCS(softn->ipf_nat_stats.ns_side[0].ns_bucketlen, u_int *, softn->ipf_nat_table_sz * sizeof(u_int)); if (softn->ipf_nat_stats.ns_side[0].ns_bucketlen == NULL) { return (-6); } bzero((char *)softn->ipf_nat_stats.ns_side[0].ns_bucketlen, softn->ipf_nat_table_sz * sizeof(u_int)); KMALLOCS(softn->ipf_nat_stats.ns_side[1].ns_bucketlen, u_int *, softn->ipf_nat_table_sz * sizeof(u_int)); if (softn->ipf_nat_stats.ns_side[1].ns_bucketlen == NULL) { return (-7); } bzero((char *)softn->ipf_nat_stats.ns_side[1].ns_bucketlen, softn->ipf_nat_table_sz * sizeof(u_int)); if (softn->ipf_nat_maxbucket == 0) { for (i = softn->ipf_nat_table_sz; i > 0; i >>= 1) softn->ipf_nat_maxbucket++; softn->ipf_nat_maxbucket *= 2; } ipf_sttab_init(softc, softn->ipf_nat_tcptq); /* * Increase this because we may have "keep state" following this too * and packet storms can occur if this is removed too quickly. */ softn->ipf_nat_tcptq[IPF_TCPS_CLOSED].ifq_ttl = softc->ipf_tcplastack; softn->ipf_nat_tcptq[IPF_TCP_NSTATES - 1].ifq_next = &softn->ipf_nat_udptq; IPFTQ_INIT(&softn->ipf_nat_udptq, softn->ipf_nat_defage, "nat ipftq udp tab"); softn->ipf_nat_udptq.ifq_next = &softn->ipf_nat_udpacktq; IPFTQ_INIT(&softn->ipf_nat_udpacktq, softn->ipf_nat_defage, "nat ipftq udpack tab"); softn->ipf_nat_udpacktq.ifq_next = &softn->ipf_nat_icmptq; IPFTQ_INIT(&softn->ipf_nat_icmptq, softn->ipf_nat_deficmpage, "nat icmp ipftq tab"); softn->ipf_nat_icmptq.ifq_next = &softn->ipf_nat_icmpacktq; IPFTQ_INIT(&softn->ipf_nat_icmpacktq, softn->ipf_nat_defage, "nat icmpack ipftq tab"); softn->ipf_nat_icmpacktq.ifq_next = &softn->ipf_nat_iptq; IPFTQ_INIT(&softn->ipf_nat_iptq, softn->ipf_nat_defipage, "nat ip ipftq tab"); softn->ipf_nat_iptq.ifq_next = &softn->ipf_nat_pending; IPFTQ_INIT(&softn->ipf_nat_pending, 1, "nat pending ipftq tab"); softn->ipf_nat_pending.ifq_next = NULL; for (i = 0, tq = softn->ipf_nat_tcptq; i < IPF_TCP_NSTATES; i++, tq++) { if (tq->ifq_ttl < softn->ipf_nat_deficmpage) tq->ifq_ttl = softn->ipf_nat_deficmpage; else if (tq->ifq_ttl > softn->ipf_nat_defage && softc->ipf_large_nat) tq->ifq_ttl = softn->ipf_nat_defage; } /* * Increase this because we may have "keep state" following * this too and packet storms can occur if this is removed * too quickly. */ softn->ipf_nat_tcptq[IPF_TCPS_CLOSED].ifq_ttl = softc->ipf_tcplastack; MUTEX_INIT(&softn->ipf_nat_new, "ipf nat new mutex"); MUTEX_INIT(&softn->ipf_nat_io, "ipf nat io mutex"); softn->ipf_nat_inited = 1; return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_soft_fini */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Free all memory used by NAT structures allocated at runtime. */ /* ------------------------------------------------------------------------ */ int ipf_nat_soft_fini(ipf_main_softc_t *softc, void *arg) { ipf_nat_softc_t *softn = arg; ipftq_t *ifq, *ifqnext; (void) ipf_nat_clearlist(softc, softn); (void) ipf_nat_flushtable(softc, softn); /* * Proxy timeout queues are not cleaned here because although they * exist on the NAT list, ipf_proxy_unload is called after unload * and the proxies actually are responsible for them being created. * Should the proxy timeouts have their own list? There's no real * justification as this is the only complication. */ for (ifq = softn->ipf_nat_utqe; ifq != NULL; ifq = ifqnext) { ifqnext = ifq->ifq_next; if (ipf_deletetimeoutqueue(ifq) == 0) ipf_freetimeoutqueue(softc, ifq); } if (softn->ipf_nat_table[0] != NULL) { KFREES(softn->ipf_nat_table[0], sizeof(nat_t *) * softn->ipf_nat_table_sz); softn->ipf_nat_table[0] = NULL; } if (softn->ipf_nat_table[1] != NULL) { KFREES(softn->ipf_nat_table[1], sizeof(nat_t *) * softn->ipf_nat_table_sz); softn->ipf_nat_table[1] = NULL; } if (softn->ipf_nat_map_rules != NULL) { KFREES(softn->ipf_nat_map_rules, sizeof(ipnat_t *) * softn->ipf_nat_maprules_sz); softn->ipf_nat_map_rules = NULL; } if (softn->ipf_nat_rdr_rules != NULL) { KFREES(softn->ipf_nat_rdr_rules, sizeof(ipnat_t *) * softn->ipf_nat_rdrrules_sz); softn->ipf_nat_rdr_rules = NULL; } if (softn->ipf_hm_maptable != NULL) { KFREES(softn->ipf_hm_maptable, sizeof(hostmap_t *) * softn->ipf_nat_hostmap_sz); softn->ipf_hm_maptable = NULL; } if (softn->ipf_nat_stats.ns_side[0].ns_bucketlen != NULL) { KFREES(softn->ipf_nat_stats.ns_side[0].ns_bucketlen, sizeof(u_int) * softn->ipf_nat_table_sz); softn->ipf_nat_stats.ns_side[0].ns_bucketlen = NULL; } if (softn->ipf_nat_stats.ns_side[1].ns_bucketlen != NULL) { KFREES(softn->ipf_nat_stats.ns_side[1].ns_bucketlen, sizeof(u_int) * softn->ipf_nat_table_sz); softn->ipf_nat_stats.ns_side[1].ns_bucketlen = NULL; } if (softn->ipf_nat_inited == 1) { softn->ipf_nat_inited = 0; ipf_sttab_destroy(softn->ipf_nat_tcptq); MUTEX_DESTROY(&softn->ipf_nat_new); MUTEX_DESTROY(&softn->ipf_nat_io); MUTEX_DESTROY(&softn->ipf_nat_udptq.ifq_lock); MUTEX_DESTROY(&softn->ipf_nat_udpacktq.ifq_lock); MUTEX_DESTROY(&softn->ipf_nat_icmptq.ifq_lock); MUTEX_DESTROY(&softn->ipf_nat_icmpacktq.ifq_lock); MUTEX_DESTROY(&softn->ipf_nat_iptq.ifq_lock); MUTEX_DESTROY(&softn->ipf_nat_pending.ifq_lock); } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_setlock */ /* Returns: Nil */ /* Parameters: arg(I) - pointer to soft state information */ /* tmp(I) - new lock value */ /* */ /* Set the "lock status" of NAT to the value in tmp. */ /* ------------------------------------------------------------------------ */ void ipf_nat_setlock(void *arg, int tmp) { ipf_nat_softc_t *softn = arg; softn->ipf_nat_lock = tmp; } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_addrdr */ /* Returns: Nil */ /* Parameters: n(I) - pointer to NAT rule to add */ /* */ /* Adds a redirect rule to the hash table of redirect rules and the list of */ /* loaded NAT rules. Updates the bitmask indicating which netmasks are in */ /* use by redirect rules. */ /* ------------------------------------------------------------------------ */ static void ipf_nat_addrdr(ipf_nat_softc_t *softn, ipnat_t *n) { ipnat_t **np; u_32_t j; u_int hv; u_int rhv; int k; if (n->in_odstatype == FRI_NORMAL) { k = count4bits(n->in_odstmsk); ipf_inet_mask_add(k, &softn->ipf_nat_rdr_mask); j = (n->in_odstaddr & n->in_odstmsk); rhv = NAT_HASH_FN(j, 0, 0xffffffff); } else { ipf_inet_mask_add(0, &softn->ipf_nat_rdr_mask); j = 0; rhv = 0; } hv = rhv % softn->ipf_nat_rdrrules_sz; np = softn->ipf_nat_rdr_rules + hv; while (*np != NULL) np = &(*np)->in_rnext; n->in_rnext = NULL; n->in_prnext = np; n->in_hv[0] = hv; n->in_use++; *np = n; } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_addmap */ /* Returns: Nil */ /* Parameters: n(I) - pointer to NAT rule to add */ /* */ /* Adds a NAT map rule to the hash table of rules and the list of loaded */ /* NAT rules. Updates the bitmask indicating which netmasks are in use by */ /* redirect rules. */ /* ------------------------------------------------------------------------ */ static void ipf_nat_addmap(ipf_nat_softc_t *softn, ipnat_t *n) { ipnat_t **np; u_32_t j; u_int hv; u_int rhv; int k; if (n->in_osrcatype == FRI_NORMAL) { k = count4bits(n->in_osrcmsk); ipf_inet_mask_add(k, &softn->ipf_nat_map_mask); j = (n->in_osrcaddr & n->in_osrcmsk); rhv = NAT_HASH_FN(j, 0, 0xffffffff); } else { ipf_inet_mask_add(0, &softn->ipf_nat_map_mask); j = 0; rhv = 0; } hv = rhv % softn->ipf_nat_maprules_sz; np = softn->ipf_nat_map_rules + hv; while (*np != NULL) np = &(*np)->in_mnext; n->in_mnext = NULL; n->in_pmnext = np; n->in_hv[1] = rhv; n->in_use++; *np = n; } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_delrdr */ /* Returns: Nil */ /* Parameters: n(I) - pointer to NAT rule to delete */ /* */ /* Removes a redirect rule from the hash table of redirect rules. */ /* ------------------------------------------------------------------------ */ void ipf_nat_delrdr(ipf_nat_softc_t *softn, ipnat_t *n) { if (n->in_odstatype == FRI_NORMAL) { int k = count4bits(n->in_odstmsk); ipf_inet_mask_del(k, &softn->ipf_nat_rdr_mask); } else { ipf_inet_mask_del(0, &softn->ipf_nat_rdr_mask); } if (n->in_rnext) n->in_rnext->in_prnext = n->in_prnext; *n->in_prnext = n->in_rnext; n->in_use--; } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_delmap */ /* Returns: Nil */ /* Parameters: n(I) - pointer to NAT rule to delete */ /* */ /* Removes a NAT map rule from the hash table of NAT map rules. */ /* ------------------------------------------------------------------------ */ void ipf_nat_delmap(ipf_nat_softc_t *softn, ipnat_t *n) { if (n->in_osrcatype == FRI_NORMAL) { int k = count4bits(n->in_osrcmsk); ipf_inet_mask_del(k, &softn->ipf_nat_map_mask); } else { ipf_inet_mask_del(0, &softn->ipf_nat_map_mask); } if (n->in_mnext != NULL) n->in_mnext->in_pmnext = n->in_pmnext; *n->in_pmnext = n->in_mnext; n->in_use--; } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_hostmap */ /* Returns: struct hostmap* - NULL if no hostmap could be created, */ /* else a pointer to the hostmapping to use */ /* Parameters: np(I) - pointer to NAT rule */ /* real(I) - real IP address */ /* map(I) - mapped IP address */ /* port(I) - destination port number */ /* Write Locks: ipf_nat */ /* */ /* Check if an ip address has already been allocated for a given mapping */ /* that is not doing port based translation. If is not yet allocated, then */ /* create a new entry if a non-NULL NAT rule pointer has been supplied. */ /* ------------------------------------------------------------------------ */ static struct hostmap * ipf_nat_hostmap(ipf_nat_softc_t *softn, ipnat_t *np, struct in_addr src, struct in_addr dst, struct in_addr map, u_32_t port) { hostmap_t *hm; u_int hv, rhv; hv = (src.s_addr ^ dst.s_addr); hv += src.s_addr; hv += dst.s_addr; rhv = hv; hv %= softn->ipf_nat_hostmap_sz; for (hm = softn->ipf_hm_maptable[hv]; hm; hm = hm->hm_hnext) if ((hm->hm_osrcip.s_addr == src.s_addr) && (hm->hm_odstip.s_addr == dst.s_addr) && ((np == NULL) || (np == hm->hm_ipnat)) && ((port == 0) || (port == hm->hm_port))) { softn->ipf_nat_stats.ns_hm_addref++; hm->hm_ref++; return (hm); } if (np == NULL) { softn->ipf_nat_stats.ns_hm_nullnp++; return (NULL); } KMALLOC(hm, hostmap_t *); if (hm) { hm->hm_next = softn->ipf_hm_maplist; hm->hm_pnext = &softn->ipf_hm_maplist; if (softn->ipf_hm_maplist != NULL) softn->ipf_hm_maplist->hm_pnext = &hm->hm_next; softn->ipf_hm_maplist = hm; hm->hm_hnext = softn->ipf_hm_maptable[hv]; hm->hm_phnext = softn->ipf_hm_maptable + hv; if (softn->ipf_hm_maptable[hv] != NULL) softn->ipf_hm_maptable[hv]->hm_phnext = &hm->hm_hnext; softn->ipf_hm_maptable[hv] = hm; hm->hm_ipnat = np; np->in_use++; hm->hm_osrcip = src; hm->hm_odstip = dst; hm->hm_nsrcip = map; hm->hm_ndstip.s_addr = 0; hm->hm_ref = 1; hm->hm_port = port; hm->hm_hv = rhv; hm->hm_v = 4; softn->ipf_nat_stats.ns_hm_new++; } else { softn->ipf_nat_stats.ns_hm_newfail++; } return (hm); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_hostmapdel */ /* Returns: Nil */ /* Parameters: hmp(I) - pointer to hostmap structure pointer */ /* Write Locks: ipf_nat */ /* */ /* Decrement the references to this hostmap structure by one. If this */ /* reaches zero then remove it and free it. */ /* ------------------------------------------------------------------------ */ void ipf_nat_hostmapdel(ipf_main_softc_t *softc, struct hostmap **hmp) { struct hostmap *hm; hm = *hmp; *hmp = NULL; hm->hm_ref--; if (hm->hm_ref == 0) { ipf_nat_rule_deref(softc, &hm->hm_ipnat); if (hm->hm_hnext) hm->hm_hnext->hm_phnext = hm->hm_phnext; *hm->hm_phnext = hm->hm_hnext; if (hm->hm_next) hm->hm_next->hm_pnext = hm->hm_pnext; *hm->hm_pnext = hm->hm_next; KFREE(hm); } } /* ------------------------------------------------------------------------ */ /* Function: ipf_fix_outcksum */ /* Returns: Nil */ /* Parameters: cksum(I) - ipf_cksum_t, value of fin_cksum */ /* sp(I) - location of 16bit checksum to update */ /* n(I) - amount to adjust checksum by */ /* partial(I) - partial checksum */ /* */ /* Adjusts the 16bit checksum by "n" for packets going out. */ /* ------------------------------------------------------------------------ */ void ipf_fix_outcksum(int cksum, u_short *sp, u_32_t n, u_32_t partial) { u_short sumshort; u_32_t sum1; if (n == 0) return; if (cksum == 4) { *sp = 0; return; } if (cksum == 2) { sum1 = partial; sum1 = (sum1 & 0xffff) + (sum1 >> 16); *sp = htons(sum1); return; } sum1 = (~ntohs(*sp)) & 0xffff; sum1 += (n); sum1 = (sum1 >> 16) + (sum1 & 0xffff); /* Again */ sum1 = (sum1 >> 16) + (sum1 & 0xffff); sumshort = ~(u_short)sum1; *(sp) = htons(sumshort); } /* ------------------------------------------------------------------------ */ /* Function: ipf_fix_incksum */ /* Returns: Nil */ /* Parameters: cksum(I) - ipf_cksum_t, value of fin_cksum */ /* sp(I) - location of 16bit checksum to update */ /* n(I) - amount to adjust checksum by */ /* partial(I) - partial checksum */ /* */ /* Adjusts the 16bit checksum by "n" for packets going in. */ /* ------------------------------------------------------------------------ */ void ipf_fix_incksum(int cksum, u_short *sp, u_32_t n, u_32_t partial) { u_short sumshort; u_32_t sum1; if (n == 0) return; if (cksum == 4) { *sp = 0; return; } if (cksum == 2) { sum1 = partial; sum1 = (sum1 & 0xffff) + (sum1 >> 16); *sp = htons(sum1); return; } sum1 = (~ntohs(*sp)) & 0xffff; sum1 += ~(n) & 0xffff; sum1 = (sum1 >> 16) + (sum1 & 0xffff); /* Again */ sum1 = (sum1 >> 16) + (sum1 & 0xffff); sumshort = ~(u_short)sum1; *(sp) = htons(sumshort); } /* ------------------------------------------------------------------------ */ /* Function: ipf_fix_datacksum */ /* Returns: Nil */ /* Parameters: sp(I) - location of 16bit checksum to update */ /* n(I) - amount to adjust checksum by */ /* */ /* Fix_datacksum is used *only* for the adjustments of checksums in the */ /* data section of an IP packet. */ /* */ /* The only situation in which you need to do this is when NAT'ing an */ /* ICMP error message. Such a message, contains in its body the IP header */ /* of the original IP packet, that causes the error. */ /* */ /* You can't use fix_incksum or fix_outcksum in that case, because for the */ /* kernel the data section of the ICMP error is just data, and no special */ /* processing like hardware cksum or ntohs processing have been done by the */ /* kernel on the data section. */ /* ------------------------------------------------------------------------ */ void ipf_fix_datacksum(u_short *sp, u_32_t n) { u_short sumshort; u_32_t sum1; if (n == 0) return; sum1 = (~ntohs(*sp)) & 0xffff; sum1 += (n); sum1 = (sum1 >> 16) + (sum1 & 0xffff); /* Again */ sum1 = (sum1 >> 16) + (sum1 & 0xffff); sumshort = ~(u_short)sum1; *(sp) = htons(sumshort); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_ioctl */ /* Returns: int - 0 == success, != 0 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* data(I) - pointer to ioctl data */ /* cmd(I) - ioctl command integer */ /* mode(I) - file mode bits used with open */ /* uid(I) - uid of calling process */ /* ctx(I) - pointer used as key for finding context */ /* */ /* Processes an ioctl call made to operate on the IP Filter NAT device. */ /* ------------------------------------------------------------------------ */ int ipf_nat_ioctl(ipf_main_softc_t *softc, caddr_t data, ioctlcmd_t cmd, int mode, int uid, void *ctx) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; int error = 0, ret, arg, getlock; ipnat_t *nat, *nt, *n; ipnat_t natd; SPL_INT(s); #if !SOLARIS && defined(_KERNEL) # if NETBSD_GE_REV(399002000) if ((mode & FWRITE) && kauth_authorize_network(curlwp->l_cred, KAUTH_NETWORK_FIREWALL, KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) # else # if defined(__FreeBSD__) if (securelevel_ge(curthread->td_ucred, 3) && (mode & FWRITE)) # else if ((securelevel >= 3) && (mode & FWRITE)) # endif # endif { IPFERROR(60001); return (EPERM); } # if defined(__FreeBSD__) if (jailed_without_vnet(curthread->td_ucred)) { IPFERROR(60076); return (EOPNOTSUPP); } # endif #endif getlock = (mode & NAT_LOCKHELD) ? 0 : 1; n = NULL; nt = NULL; nat = NULL; if ((cmd == (ioctlcmd_t)SIOCADNAT) || (cmd == (ioctlcmd_t)SIOCRMNAT) || (cmd == (ioctlcmd_t)SIOCPURGENAT)) { if (mode & NAT_SYSSPACE) { bcopy(data, (char *)&natd, sizeof(natd)); nat = &natd; error = 0; } else { bzero(&natd, sizeof(natd)); error = ipf_inobj(softc, data, NULL, &natd, IPFOBJ_IPNAT); if (error != 0) goto done; if (natd.in_size < sizeof(ipnat_t)) { error = EINVAL; goto done; } KMALLOCS(nt, ipnat_t *, natd.in_size); if (nt == NULL) { IPFERROR(60070); error = ENOMEM; goto done; } bzero(nt, natd.in_size); error = ipf_inobjsz(softc, data, nt, IPFOBJ_IPNAT, natd.in_size); if (error) goto done; nat = nt; } /* * For add/delete, look to see if the NAT entry is * already present */ nat->in_flags &= IPN_USERFLAGS; if ((nat->in_redir & NAT_MAPBLK) == 0) { if (nat->in_osrcatype == FRI_NORMAL || nat->in_osrcatype == FRI_NONE) nat->in_osrcaddr &= nat->in_osrcmsk; if (nat->in_odstatype == FRI_NORMAL || nat->in_odstatype == FRI_NONE) nat->in_odstaddr &= nat->in_odstmsk; if ((nat->in_flags & (IPN_SPLIT|IPN_SIPRANGE)) == 0) { if (nat->in_nsrcatype == FRI_NORMAL) nat->in_nsrcaddr &= nat->in_nsrcmsk; if (nat->in_ndstatype == FRI_NORMAL) nat->in_ndstaddr &= nat->in_ndstmsk; } } error = ipf_nat_rule_init(softc, softn, nat); if (error != 0) goto done; MUTEX_ENTER(&softn->ipf_nat_io); for (n = softn->ipf_nat_list; n != NULL; n = n->in_next) if (ipf_nat_cmp_rules(nat, n) == 0) break; } switch (cmd) { #ifdef IPFILTER_LOG case SIOCIPFFB : { int tmp; if (!(mode & FWRITE)) { IPFERROR(60002); error = EPERM; } else { tmp = ipf_log_clear(softc, IPL_LOGNAT); error = BCOPYOUT(&tmp, data, sizeof(tmp)); if (error != 0) { IPFERROR(60057); error = EFAULT; } } break; } case SIOCSETLG : if (!(mode & FWRITE)) { IPFERROR(60003); error = EPERM; } else { error = BCOPYIN(data, &softn->ipf_nat_logging, sizeof(softn->ipf_nat_logging)); if (error != 0) error = EFAULT; } break; case SIOCGETLG : error = BCOPYOUT(&softn->ipf_nat_logging, data, sizeof(softn->ipf_nat_logging)); if (error != 0) { IPFERROR(60004); error = EFAULT; } break; case FIONREAD : arg = ipf_log_bytesused(softc, IPL_LOGNAT); error = BCOPYOUT(&arg, data, sizeof(arg)); if (error != 0) { IPFERROR(60005); error = EFAULT; } break; #endif case SIOCADNAT : if (!(mode & FWRITE)) { IPFERROR(60006); error = EPERM; } else if (n != NULL) { natd.in_flineno = n->in_flineno; (void) ipf_outobj(softc, data, &natd, IPFOBJ_IPNAT); IPFERROR(60007); error = EEXIST; } else if (nt == NULL) { IPFERROR(60008); error = ENOMEM; } if (error != 0) { MUTEX_EXIT(&softn->ipf_nat_io); break; } if (nat != nt) bcopy((char *)nat, (char *)nt, sizeof(*n)); error = ipf_nat_siocaddnat(softc, softn, nt, getlock); MUTEX_EXIT(&softn->ipf_nat_io); if (error == 0) { nat = NULL; nt = NULL; } break; case SIOCRMNAT : case SIOCPURGENAT : if (!(mode & FWRITE)) { IPFERROR(60009); error = EPERM; n = NULL; } else if (n == NULL) { IPFERROR(60010); error = ESRCH; } if (error != 0) { MUTEX_EXIT(&softn->ipf_nat_io); break; } if (cmd == (ioctlcmd_t)SIOCPURGENAT) { error = ipf_outobjsz(softc, data, n, IPFOBJ_IPNAT, n->in_size); if (error) { MUTEX_EXIT(&softn->ipf_nat_io); goto done; } n->in_flags |= IPN_PURGE; } ipf_nat_siocdelnat(softc, softn, n, getlock); MUTEX_EXIT(&softn->ipf_nat_io); n = NULL; break; case SIOCGNATS : { natstat_t *nsp = &softn->ipf_nat_stats; nsp->ns_side[0].ns_table = softn->ipf_nat_table[0]; nsp->ns_side[1].ns_table = softn->ipf_nat_table[1]; nsp->ns_list = softn->ipf_nat_list; nsp->ns_maptable = softn->ipf_hm_maptable; nsp->ns_maplist = softn->ipf_hm_maplist; nsp->ns_nattab_sz = softn->ipf_nat_table_sz; nsp->ns_nattab_max = softn->ipf_nat_table_max; nsp->ns_rultab_sz = softn->ipf_nat_maprules_sz; nsp->ns_rdrtab_sz = softn->ipf_nat_rdrrules_sz; nsp->ns_hostmap_sz = softn->ipf_nat_hostmap_sz; nsp->ns_instances = softn->ipf_nat_instances; nsp->ns_ticks = softc->ipf_ticks; #ifdef IPFILTER_LOGGING nsp->ns_log_ok = ipf_log_logok(softc, IPF_LOGNAT); nsp->ns_log_fail = ipf_log_failures(softc, IPF_LOGNAT); #else nsp->ns_log_ok = 0; nsp->ns_log_fail = 0; #endif error = ipf_outobj(softc, data, nsp, IPFOBJ_NATSTAT); break; } case SIOCGNATL : { natlookup_t nl; error = ipf_inobj(softc, data, NULL, &nl, IPFOBJ_NATLOOKUP); if (error == 0) { void *ptr; if (getlock) { READ_ENTER(&softc->ipf_nat); } switch (nl.nl_v) { case 4 : ptr = ipf_nat_lookupredir(&nl); break; #ifdef USE_INET6 case 6 : ptr = ipf_nat6_lookupredir(&nl); break; #endif default: ptr = NULL; break; } if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } if (ptr != NULL) { error = ipf_outobj(softc, data, &nl, IPFOBJ_NATLOOKUP); } else { IPFERROR(60011); error = ESRCH; } } break; } case SIOCIPFFL : /* old SIOCFLNAT & SIOCCNATL */ if (!(mode & FWRITE)) { IPFERROR(60012); error = EPERM; break; } if (getlock) { WRITE_ENTER(&softc->ipf_nat); } error = BCOPYIN(data, &arg, sizeof(arg)); if (error != 0) { IPFERROR(60013); error = EFAULT; } else { if (arg == 0) ret = ipf_nat_flushtable(softc, softn); else if (arg == 1) ret = ipf_nat_clearlist(softc, softn); else ret = ipf_nat_extraflush(softc, softn, arg); ipf_proxy_flush(softc->ipf_proxy_soft, arg); } if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } if (error == 0) { error = BCOPYOUT(&ret, data, sizeof(ret)); } break; case SIOCMATCHFLUSH : if (!(mode & FWRITE)) { IPFERROR(60014); error = EPERM; break; } if (getlock) { WRITE_ENTER(&softc->ipf_nat); } error = ipf_nat_matchflush(softc, softn, data); if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } break; case SIOCPROXY : error = ipf_proxy_ioctl(softc, data, cmd, mode, ctx); break; case SIOCSTLCK : if (!(mode & FWRITE)) { IPFERROR(60015); error = EPERM; } else { error = ipf_lock(data, &softn->ipf_nat_lock); } break; case SIOCSTPUT : if ((mode & FWRITE) != 0) { error = ipf_nat_putent(softc, data, getlock); } else { IPFERROR(60016); error = EACCES; } break; case SIOCSTGSZ : if (softn->ipf_nat_lock) { error = ipf_nat_getsz(softc, data, getlock); } else { IPFERROR(60017); error = EACCES; } break; case SIOCSTGET : if (softn->ipf_nat_lock) { error = ipf_nat_getent(softc, data, getlock); } else { IPFERROR(60018); error = EACCES; } break; case SIOCGENITER : { ipfgeniter_t iter; ipftoken_t *token; ipfobj_t obj; error = ipf_inobj(softc, data, &obj, &iter, IPFOBJ_GENITER); if (error != 0) break; SPL_SCHED(s); token = ipf_token_find(softc, iter.igi_type, uid, ctx); if (token != NULL) { error = ipf_nat_iterator(softc, token, &iter, &obj); WRITE_ENTER(&softc->ipf_tokens); ipf_token_deref(softc, token); RWLOCK_EXIT(&softc->ipf_tokens); } SPL_X(s); break; } case SIOCIPFDELTOK : error = BCOPYIN(data, &arg, sizeof(arg)); if (error == 0) { SPL_SCHED(s); error = ipf_token_del(softc, arg, uid, ctx); SPL_X(s); } else { IPFERROR(60019); error = EFAULT; } break; case SIOCGTQTAB : error = ipf_outobj(softc, data, softn->ipf_nat_tcptq, IPFOBJ_STATETQTAB); break; case SIOCGTABL : error = ipf_nat_gettable(softc, softn, data); break; default : IPFERROR(60020); error = EINVAL; break; } done: if (nat != NULL) ipf_nat_rule_fini(softc, nat); if (nt != NULL) KFREES(nt, nt->in_size); return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_siocaddnat */ /* Returns: int - 0 == success, != 0 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* n(I) - pointer to new NAT rule */ /* np(I) - pointer to where to insert new NAT rule */ /* getlock(I) - flag indicating if lock on is held */ /* Mutex Locks: ipf_nat_io */ /* */ /* Handle SIOCADNAT. Resolve and calculate details inside the NAT rule */ /* from information passed to the kernel, then add it to the appropriate */ /* NAT rule table(s). */ /* ------------------------------------------------------------------------ */ static int ipf_nat_siocaddnat(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, ipnat_t *n, int getlock) { int error = 0; if (ipf_nat_resolverule(softc, n) != 0) { IPFERROR(60022); return (ENOENT); } if ((n->in_age[0] == 0) && (n->in_age[1] != 0)) { IPFERROR(60023); return (EINVAL); } if (n->in_redir == (NAT_DIVERTUDP|NAT_MAP)) { /* * Prerecord whether or not the destination of the divert * is local or not to the interface the packet is going * to be sent out. */ n->in_dlocal = ipf_deliverlocal(softc, n->in_v[1], n->in_ifps[1], &n->in_ndstip6); } if (getlock) { WRITE_ENTER(&softc->ipf_nat); } n->in_next = NULL; n->in_pnext = softn->ipf_nat_list_tail; *n->in_pnext = n; softn->ipf_nat_list_tail = &n->in_next; n->in_use++; if (n->in_redir & NAT_REDIRECT) { n->in_flags &= ~IPN_NOTDST; switch (n->in_v[0]) { case 4 : ipf_nat_addrdr(softn, n); break; #ifdef USE_INET6 case 6 : ipf_nat6_addrdr(softn, n); break; #endif default : break; } ATOMIC_INC32(softn->ipf_nat_stats.ns_rules_rdr); } if (n->in_redir & (NAT_MAP|NAT_MAPBLK)) { n->in_flags &= ~IPN_NOTSRC; switch (n->in_v[0]) { case 4 : ipf_nat_addmap(softn, n); break; #ifdef USE_INET6 case 6 : ipf_nat6_addmap(softn, n); break; #endif default : break; } ATOMIC_INC32(softn->ipf_nat_stats.ns_rules_map); } if (n->in_age[0] != 0) n->in_tqehead[0] = ipf_addtimeoutqueue(softc, &softn->ipf_nat_utqe, n->in_age[0]); if (n->in_age[1] != 0) n->in_tqehead[1] = ipf_addtimeoutqueue(softc, &softn->ipf_nat_utqe, n->in_age[1]); MUTEX_INIT(&n->in_lock, "ipnat rule lock"); n = NULL; ATOMIC_INC32(softn->ipf_nat_stats.ns_rules); #if SOLARIS && !defined(INSTANCES) pfil_delayed_copy = 0; #endif if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); /* WRITE */ } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_ruleaddrinit */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* n(I) - pointer to NAT rule */ /* */ /* Initialise all of the NAT address structures in a NAT rule. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_ruleaddrinit(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, ipnat_t *n) { int idx, error; if ((n->in_ndst.na_atype == FRI_LOOKUP) && (n->in_ndst.na_type != IPLT_DSTLIST)) { IPFERROR(60071); return (EINVAL); } if ((n->in_nsrc.na_atype == FRI_LOOKUP) && (n->in_nsrc.na_type != IPLT_DSTLIST)) { IPFERROR(60069); return (EINVAL); } if (n->in_redir == NAT_BIMAP) { n->in_ndstaddr = n->in_osrcaddr; n->in_ndstmsk = n->in_osrcmsk; n->in_odstaddr = n->in_nsrcaddr; n->in_odstmsk = n->in_nsrcmsk; } if (n->in_redir & NAT_REDIRECT) idx = 1; else idx = 0; /* * Initialise all of the address fields. */ error = ipf_nat_nextaddrinit(softc, n->in_names, &n->in_osrc, 1, n->in_ifps[idx]); if (error != 0) return (error); error = ipf_nat_nextaddrinit(softc, n->in_names, &n->in_odst, 1, n->in_ifps[idx]); if (error != 0) return (error); error = ipf_nat_nextaddrinit(softc, n->in_names, &n->in_nsrc, 1, n->in_ifps[idx]); if (error != 0) return (error); error = ipf_nat_nextaddrinit(softc, n->in_names, &n->in_ndst, 1, n->in_ifps[idx]); if (error != 0) return (error); if (n->in_redir & NAT_DIVERTUDP) ipf_nat_builddivertmp(softn, n); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_resolvrule */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* n(I) - pointer to NAT rule */ /* */ /* Handle SIOCADNAT. Resolve and calculate details inside the NAT rule */ /* from information passed to the kernel, then add it to the appropriate */ /* NAT rule table(s). */ /* ------------------------------------------------------------------------ */ static int ipf_nat_resolverule(ipf_main_softc_t *softc, ipnat_t *n) { char *base; base = n->in_names; n->in_ifps[0] = ipf_resolvenic(softc, base + n->in_ifnames[0], n->in_v[0]); if (n->in_ifnames[1] == -1) { n->in_ifnames[1] = n->in_ifnames[0]; n->in_ifps[1] = n->in_ifps[0]; } else { n->in_ifps[1] = ipf_resolvenic(softc, base + n->in_ifnames[1], n->in_v[1]); } if (n->in_plabel != -1) { if (n->in_redir & NAT_REDIRECT) n->in_apr = ipf_proxy_lookup(softc->ipf_proxy_soft, n->in_pr[0], base + n->in_plabel); else n->in_apr = ipf_proxy_lookup(softc->ipf_proxy_soft, n->in_pr[1], base + n->in_plabel); if (n->in_apr == NULL) return (-1); } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_siocdelnat */ /* Returns: int - 0 == success, != 0 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* n(I) - pointer to new NAT rule */ /* getlock(I) - flag indicating if lock on is held */ /* Mutex Locks: ipf_nat_io */ /* */ /* Handle SIOCADNAT. Resolve and calculate details inside the NAT rule */ /* from information passed to the kernel, then add it to the appropriate */ /* NAT rule table(s). */ /* ------------------------------------------------------------------------ */ static void ipf_nat_siocdelnat(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, ipnat_t *n, int getlock) { if (getlock) { WRITE_ENTER(&softc->ipf_nat); } ipf_nat_delrule(softc, softn, n, 1); if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); /* READ/WRITE */ } } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_getsz */ /* Returns: int - 0 == success, != 0 is the error value. */ /* Parameters: softc(I) - pointer to soft context main structure */ /* data(I) - pointer to natget structure with kernel */ /* pointer get the size of. */ /* getlock(I) - flag indicating whether or not the caller */ /* holds a lock on ipf_nat */ /* */ /* Handle SIOCSTGSZ. */ /* Return the size of the nat list entry to be copied back to user space. */ /* The size of the entry is stored in the ng_sz field and the enture natget */ /* structure is copied back to the user. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_getsz(ipf_main_softc_t *softc, caddr_t data, int getlock) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; ap_session_t *aps; nat_t *nat, *n; natget_t ng; int error; error = BCOPYIN(data, &ng, sizeof(ng)); if (error != 0) { IPFERROR(60024); return (EFAULT); } if (getlock) { READ_ENTER(&softc->ipf_nat); } nat = ng.ng_ptr; if (!nat) { nat = softn->ipf_nat_instances; ng.ng_sz = 0; /* * Empty list so the size returned is 0. Simple. */ if (nat == NULL) { if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } error = BCOPYOUT(&ng, data, sizeof(ng)); if (error != 0) { IPFERROR(60025); return (EFAULT); } return (0); } } else { /* * Make sure the pointer we're copying from exists in the * current list of entries. Security precaution to prevent * copying of random kernel data. */ for (n = softn->ipf_nat_instances; n; n = n->nat_next) if (n == nat) break; if (n == NULL) { if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } IPFERROR(60026); return (ESRCH); } } /* * Include any space required for proxy data structures. */ ng.ng_sz = sizeof(nat_save_t); aps = nat->nat_aps; if (aps != NULL) { ng.ng_sz += sizeof(ap_session_t) - 4; if (aps->aps_data != 0) ng.ng_sz += aps->aps_psiz; } if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } error = BCOPYOUT(&ng, data, sizeof(ng)); if (error != 0) { IPFERROR(60027); return (EFAULT); } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_getent */ /* Returns: int - 0 == success, != 0 is the error value. */ /* Parameters: softc(I) - pointer to soft context main structure */ /* data(I) - pointer to natget structure with kernel pointer*/ /* to NAT structure to copy out. */ /* getlock(I) - flag indicating whether or not the caller */ /* holds a lock on ipf_nat */ /* */ /* Handle SIOCSTGET. */ /* Copies out NAT entry to user space. Any additional data held for a */ /* proxy is also copied, as to is the NAT rule which was responsible for it */ /* ------------------------------------------------------------------------ */ static int ipf_nat_getent(ipf_main_softc_t *softc, caddr_t data, int getlock) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; int error, outsize; ap_session_t *aps; nat_save_t *ipn, ipns; nat_t *n, *nat; error = ipf_inobj(softc, data, NULL, &ipns, IPFOBJ_NATSAVE); if (error != 0) return (error); if ((ipns.ipn_dsize < sizeof(ipns)) || (ipns.ipn_dsize > 81920)) { IPFERROR(60028); return (EINVAL); } KMALLOCS(ipn, nat_save_t *, ipns.ipn_dsize); if (ipn == NULL) { IPFERROR(60029); return (ENOMEM); } if (getlock) { READ_ENTER(&softc->ipf_nat); } ipn->ipn_dsize = ipns.ipn_dsize; nat = ipns.ipn_next; if (nat == NULL) { nat = softn->ipf_nat_instances; if (nat == NULL) { if (softn->ipf_nat_instances == NULL) { IPFERROR(60030); error = ENOENT; } goto finished; } } else { /* * Make sure the pointer we're copying from exists in the * current list of entries. Security precaution to prevent * copying of random kernel data. */ for (n = softn->ipf_nat_instances; n; n = n->nat_next) if (n == nat) break; if (n == NULL) { IPFERROR(60031); error = ESRCH; goto finished; } } ipn->ipn_next = nat->nat_next; /* * Copy the NAT structure. */ bcopy((char *)nat, &ipn->ipn_nat, sizeof(*nat)); /* * If we have a pointer to the NAT rule it belongs to, save that too. */ if (nat->nat_ptr != NULL) bcopy((char *)nat->nat_ptr, (char *)&ipn->ipn_ipnat, sizeof(nat->nat_ptr)); /* * If we also know the NAT entry has an associated filter rule, * save that too. */ if (nat->nat_fr != NULL) bcopy((char *)nat->nat_fr, (char *)&ipn->ipn_fr, sizeof(ipn->ipn_fr)); /* * Last but not least, if there is an application proxy session set * up for this NAT entry, then copy that out too, including any * private data saved along side it by the proxy. */ aps = nat->nat_aps; outsize = ipn->ipn_dsize - sizeof(*ipn) + sizeof(ipn->ipn_data); if (aps != NULL) { char *s; if (outsize < sizeof(*aps)) { IPFERROR(60032); error = ENOBUFS; goto finished; } s = ipn->ipn_data; bcopy((char *)aps, s, sizeof(*aps)); s += sizeof(*aps); outsize -= sizeof(*aps); if ((aps->aps_data != NULL) && (outsize >= aps->aps_psiz)) bcopy(aps->aps_data, s, aps->aps_psiz); else { IPFERROR(60033); error = ENOBUFS; } } if (error == 0) { error = ipf_outobjsz(softc, data, ipn, IPFOBJ_NATSAVE, ipns.ipn_dsize); } finished: if (ipn != NULL) { KFREES(ipn, ipns.ipn_dsize); } if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_putent */ /* Returns: int - 0 == success, != 0 is the error value. */ /* Parameters: softc(I) - pointer to soft context main structure */ /* data(I) - pointer to natget structure with NAT */ /* structure information to load into the kernel */ /* getlock(I) - flag indicating whether or not a write lock */ /* on is already held. */ /* */ /* Handle SIOCSTPUT. */ /* Loads a NAT table entry from user space, including a NAT rule, proxy and */ /* firewall rule data structures, if pointers to them indicate so. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_putent(ipf_main_softc_t *softc, caddr_t data, int getlock) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; nat_save_t ipn, *ipnn; ap_session_t *aps; nat_t *n, *nat; frentry_t *fr; fr_info_t fin; ipnat_t *in; int error; error = ipf_inobj(softc, data, NULL, &ipn, IPFOBJ_NATSAVE); if (error != 0) return (error); /* * Initialise early because of code at junkput label. */ n = NULL; in = NULL; aps = NULL; nat = NULL; ipnn = NULL; fr = NULL; /* * New entry, copy in the rest of the NAT entry if it's size is more * than just the nat_t structure. */ if (ipn.ipn_dsize > sizeof(ipn)) { if (ipn.ipn_dsize > 81920) { IPFERROR(60034); error = ENOMEM; goto junkput; } KMALLOCS(ipnn, nat_save_t *, ipn.ipn_dsize); if (ipnn == NULL) { IPFERROR(60035); return (ENOMEM); } bzero(ipnn, ipn.ipn_dsize); error = ipf_inobjsz(softc, data, ipnn, IPFOBJ_NATSAVE, ipn.ipn_dsize); if (error != 0) { goto junkput; } } else ipnn = &ipn; KMALLOC(nat, nat_t *); if (nat == NULL) { IPFERROR(60037); error = ENOMEM; goto junkput; } bcopy((char *)&ipnn->ipn_nat, (char *)nat, sizeof(*nat)); switch (nat->nat_v[0]) { case 4: #ifdef USE_INET6 case 6 : #endif break; default : IPFERROR(60061); error = EPROTONOSUPPORT; goto junkput; /*NOTREACHED*/ } /* * Initialize all these so that ipf_nat_delete() doesn't cause a crash. */ bzero((char *)nat, offsetof(struct nat, nat_tqe)); nat->nat_tqe.tqe_pnext = NULL; nat->nat_tqe.tqe_next = NULL; nat->nat_tqe.tqe_ifq = NULL; nat->nat_tqe.tqe_parent = nat; /* * Restore the rule associated with this nat session */ in = ipnn->ipn_nat.nat_ptr; if (in != NULL) { KMALLOCS(in, ipnat_t *, ipnn->ipn_ipnat.in_size); nat->nat_ptr = in; if (in == NULL) { IPFERROR(60038); error = ENOMEM; goto junkput; } bcopy((char *)&ipnn->ipn_ipnat, (char *)in, ipnn->ipn_ipnat.in_size); in->in_use = 1; in->in_flags |= IPN_DELETE; ATOMIC_INC32(softn->ipf_nat_stats.ns_rules); if (ipf_nat_resolverule(softc, in) != 0) { IPFERROR(60039); error = ESRCH; goto junkput; } } /* * Check that the NAT entry doesn't already exist in the kernel. * * For NAT_OUTBOUND, we're lookup for a duplicate MAP entry. To do * this, we check to see if the inbound combination of addresses and * ports is already known. Similar logic is applied for NAT_INBOUND. * */ bzero((char *)&fin, sizeof(fin)); fin.fin_v = nat->nat_v[0]; fin.fin_p = nat->nat_pr[0]; fin.fin_rev = nat->nat_rev; fin.fin_ifp = nat->nat_ifps[0]; fin.fin_data[0] = ntohs(nat->nat_ndport); fin.fin_data[1] = ntohs(nat->nat_nsport); switch (nat->nat_dir) { case NAT_OUTBOUND : case NAT_DIVERTOUT : if (getlock) { READ_ENTER(&softc->ipf_nat); } fin.fin_v = nat->nat_v[1]; if (nat->nat_v[1] == 4) { n = ipf_nat_inlookup(&fin, nat->nat_flags, fin.fin_p, nat->nat_ndstip, nat->nat_nsrcip); #ifdef USE_INET6 } else if (nat->nat_v[1] == 6) { n = ipf_nat6_inlookup(&fin, nat->nat_flags, fin.fin_p, &nat->nat_ndst6.in6, &nat->nat_nsrc6.in6); #endif } if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } if (n != NULL) { IPFERROR(60040); error = EEXIST; goto junkput; } break; case NAT_INBOUND : case NAT_DIVERTIN : if (getlock) { READ_ENTER(&softc->ipf_nat); } if (fin.fin_v == 4) { n = ipf_nat_outlookup(&fin, nat->nat_flags, fin.fin_p, nat->nat_ndstip, nat->nat_nsrcip); #ifdef USE_INET6 } else if (fin.fin_v == 6) { n = ipf_nat6_outlookup(&fin, nat->nat_flags, fin.fin_p, &nat->nat_ndst6.in6, &nat->nat_nsrc6.in6); #endif } if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } if (n != NULL) { IPFERROR(60041); error = EEXIST; goto junkput; } break; default : IPFERROR(60042); error = EINVAL; goto junkput; } /* * Restore ap_session_t structure. Include the private data allocated * if it was there. */ aps = nat->nat_aps; if (aps != NULL) { KMALLOC(aps, ap_session_t *); nat->nat_aps = aps; if (aps == NULL) { IPFERROR(60043); error = ENOMEM; goto junkput; } bcopy(ipnn->ipn_data, (char *)aps, sizeof(*aps)); if (in != NULL) aps->aps_apr = in->in_apr; else aps->aps_apr = NULL; if (aps->aps_psiz != 0) { if (aps->aps_psiz > 81920) { IPFERROR(60044); error = ENOMEM; goto junkput; } KMALLOCS(aps->aps_data, void *, aps->aps_psiz); if (aps->aps_data == NULL) { IPFERROR(60045); error = ENOMEM; goto junkput; } bcopy(ipnn->ipn_data + sizeof(*aps), aps->aps_data, aps->aps_psiz); } else { aps->aps_psiz = 0; aps->aps_data = NULL; } } /* * If there was a filtering rule associated with this entry then * build up a new one. */ fr = nat->nat_fr; if (fr != NULL) { if ((nat->nat_flags & SI_NEWFR) != 0) { KMALLOC(fr, frentry_t *); nat->nat_fr = fr; if (fr == NULL) { IPFERROR(60046); error = ENOMEM; goto junkput; } ipnn->ipn_nat.nat_fr = fr; fr->fr_ref = 1; (void) ipf_outobj(softc, data, ipnn, IPFOBJ_NATSAVE); bcopy((char *)&ipnn->ipn_fr, (char *)fr, sizeof(*fr)); fr->fr_ref = 1; fr->fr_dsize = 0; fr->fr_data = NULL; fr->fr_type = FR_T_NONE; MUTEX_NUKE(&fr->fr_lock); MUTEX_INIT(&fr->fr_lock, "nat-filter rule lock"); } else { if (getlock) { READ_ENTER(&softc->ipf_nat); } for (n = softn->ipf_nat_instances; n; n = n->nat_next) if (n->nat_fr == fr) break; if (n != NULL) { MUTEX_ENTER(&fr->fr_lock); fr->fr_ref++; MUTEX_EXIT(&fr->fr_lock); } if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } if (n == NULL) { IPFERROR(60047); error = ESRCH; goto junkput; } } } if (ipnn != &ipn) { KFREES(ipnn, ipn.ipn_dsize); ipnn = NULL; } if (getlock) { WRITE_ENTER(&softc->ipf_nat); } if (fin.fin_v == 4) error = ipf_nat_finalise(&fin, nat); #ifdef USE_INET6 else error = ipf_nat6_finalise(&fin, nat); #endif if (getlock) { RWLOCK_EXIT(&softc->ipf_nat); } if (error == 0) return (0); IPFERROR(60048); error = ENOMEM; junkput: if (fr != NULL) { (void) ipf_derefrule(softc, &fr); } if ((ipnn != NULL) && (ipnn != &ipn)) { KFREES(ipnn, ipn.ipn_dsize); } if (nat != NULL) { if (aps != NULL) { if (aps->aps_data != NULL) { KFREES(aps->aps_data, aps->aps_psiz); } KFREE(aps); } if (in != NULL) { if (in->in_apr) ipf_proxy_deref(in->in_apr); KFREES(in, in->in_size); } KFREE(nat); } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_delete */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* nat(I) - pointer to NAT structure to delete */ /* logtype(I) - type of LOG record to create before deleting */ /* Write Lock: ipf_nat */ /* */ /* Delete a nat entry from the various lists and table. If NAT logging is */ /* enabled then generate a NAT log record for this event. */ /* ------------------------------------------------------------------------ */ void ipf_nat_delete(ipf_main_softc_t *softc, struct nat *nat, int logtype) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; int madeorphan = 0, bkt, removed = 0; nat_stat_side_t *nss; struct ipnat *ipn; if (logtype != 0 && softn->ipf_nat_logging != 0) ipf_nat_log(softc, softn, nat, logtype); /* * Take it as a general indication that all the pointers are set if * nat_pnext is set. */ if (nat->nat_pnext != NULL) { removed = 1; bkt = nat->nat_hv[0] % softn->ipf_nat_table_sz; nss = &softn->ipf_nat_stats.ns_side[0]; if (nss->ns_bucketlen[bkt] > 0) nss->ns_bucketlen[bkt]--; if (nss->ns_bucketlen[bkt] == 0) { nss->ns_inuse--; } bkt = nat->nat_hv[1] % softn->ipf_nat_table_sz; nss = &softn->ipf_nat_stats.ns_side[1]; if (nss->ns_bucketlen[bkt] > 0) nss->ns_bucketlen[bkt]--; if (nss->ns_bucketlen[bkt] == 0) { nss->ns_inuse--; } *nat->nat_pnext = nat->nat_next; if (nat->nat_next != NULL) { nat->nat_next->nat_pnext = nat->nat_pnext; nat->nat_next = NULL; } nat->nat_pnext = NULL; *nat->nat_phnext[0] = nat->nat_hnext[0]; if (nat->nat_hnext[0] != NULL) { nat->nat_hnext[0]->nat_phnext[0] = nat->nat_phnext[0]; nat->nat_hnext[0] = NULL; } nat->nat_phnext[0] = NULL; *nat->nat_phnext[1] = nat->nat_hnext[1]; if (nat->nat_hnext[1] != NULL) { nat->nat_hnext[1]->nat_phnext[1] = nat->nat_phnext[1]; nat->nat_hnext[1] = NULL; } nat->nat_phnext[1] = NULL; if ((nat->nat_flags & SI_WILDP) != 0) { ATOMIC_DEC32(softn->ipf_nat_stats.ns_wilds); } madeorphan = 1; } if (nat->nat_me != NULL) { *nat->nat_me = NULL; nat->nat_me = NULL; nat->nat_ref--; ASSERT(nat->nat_ref >= 0); } if (nat->nat_tqe.tqe_ifq != NULL) { /* * No call to ipf_freetimeoutqueue() is made here, they are * garbage collected in ipf_nat_expire(). */ (void) ipf_deletequeueentry(&nat->nat_tqe); } if (nat->nat_sync) { ipf_sync_del_nat(softc->ipf_sync_soft, nat->nat_sync); nat->nat_sync = NULL; } if (logtype == NL_EXPIRE) softn->ipf_nat_stats.ns_expire++; MUTEX_ENTER(&nat->nat_lock); /* * NL_DESTROY should only be passed in when we've got nat_ref >= 2. * This happens when a nat'd packet is blocked and we want to throw * away the NAT session. */ if (logtype == NL_DESTROY) { if (nat->nat_ref > 2) { nat->nat_ref -= 2; MUTEX_EXIT(&nat->nat_lock); if (removed) softn->ipf_nat_stats.ns_orphans++; return; } } else if (nat->nat_ref > 1) { nat->nat_ref--; MUTEX_EXIT(&nat->nat_lock); if (madeorphan == 1) softn->ipf_nat_stats.ns_orphans++; return; } ASSERT(nat->nat_ref >= 0); MUTEX_EXIT(&nat->nat_lock); nat->nat_ref = 0; if (madeorphan == 0) softn->ipf_nat_stats.ns_orphans--; /* * At this point, nat_ref can be either 0 or -1 */ softn->ipf_nat_stats.ns_proto[nat->nat_pr[0]]--; if (nat->nat_fr != NULL) { (void) ipf_derefrule(softc, &nat->nat_fr); } if (nat->nat_hm != NULL) { ipf_nat_hostmapdel(softc, &nat->nat_hm); } /* * If there is an active reference from the nat entry to its parent * rule, decrement the rule's reference count and free it too if no * longer being used. */ ipn = nat->nat_ptr; nat->nat_ptr = NULL; if (ipn != NULL) { ipn->in_space++; ipf_nat_rule_deref(softc, &ipn); } if (nat->nat_aps != NULL) { ipf_proxy_free(softc, nat->nat_aps); nat->nat_aps = NULL; } MUTEX_DESTROY(&nat->nat_lock); softn->ipf_nat_stats.ns_active--; /* * If there's a fragment table entry too for this nat entry, then * dereference that as well. This is after nat_lock is released * because of Tru64. */ ipf_frag_natforget(softc, (void *)nat); KFREE(nat); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_flushtable */ /* Returns: int - number of NAT rules deleted */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* Write Lock: ipf_nat */ /* */ /* Deletes all currently active NAT sessions. In deleting each NAT entry a */ /* log record should be emitted in ipf_nat_delete() if NAT logging is */ /* enabled. */ /* ------------------------------------------------------------------------ */ /* * nat_flushtable - clear the NAT table of all mapping entries. */ static int ipf_nat_flushtable(ipf_main_softc_t *softc, ipf_nat_softc_t *softn) { nat_t *nat; int j = 0; /* * ALL NAT mappings deleted, so lets just make the deletions * quicker. */ if (softn->ipf_nat_table[0] != NULL) bzero((char *)softn->ipf_nat_table[0], sizeof(softn->ipf_nat_table[0]) * softn->ipf_nat_table_sz); if (softn->ipf_nat_table[1] != NULL) bzero((char *)softn->ipf_nat_table[1], sizeof(softn->ipf_nat_table[1]) * softn->ipf_nat_table_sz); while ((nat = softn->ipf_nat_instances) != NULL) { ipf_nat_delete(softc, nat, NL_FLUSH); j++; } return (j); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_clearlist */ /* Returns: int - number of NAT/RDR rules deleted */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* */ /* Delete all rules in the current list of rules. There is nothing elegant */ /* about this cleanup: simply free all entries on the list of rules and */ /* clear out the tables used for hashed NAT rule lookups. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_clearlist(ipf_main_softc_t *softc, ipf_nat_softc_t *softn) { ipnat_t *n; int i = 0; if (softn->ipf_nat_map_rules != NULL) { bzero((char *)softn->ipf_nat_map_rules, sizeof(*softn->ipf_nat_map_rules) * softn->ipf_nat_maprules_sz); } if (softn->ipf_nat_rdr_rules != NULL) { bzero((char *)softn->ipf_nat_rdr_rules, sizeof(*softn->ipf_nat_rdr_rules) * softn->ipf_nat_rdrrules_sz); } while ((n = softn->ipf_nat_list) != NULL) { ipf_nat_delrule(softc, softn, n, 0); i++; } #if SOLARIS && !defined(INSTANCES) pfil_delayed_copy = 1; #endif return (i); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_delrule */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* np(I) - pointer to NAT rule to delete */ /* purge(I) - 1 == allow purge, 0 == prevent purge */ /* Locks: WRITE(ipf_nat) */ /* */ /* Preventing "purge" from occuring is allowed because when all of the NAT */ /* rules are being removed, allowing the "purge" to walk through the list */ /* of NAT sessions, possibly multiple times, would be a large performance */ /* hit, on the order of O(N^2). */ /* ------------------------------------------------------------------------ */ static void ipf_nat_delrule(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, ipnat_t *np, int purge) { if (np->in_pnext != NULL) { *np->in_pnext = np->in_next; if (np->in_next != NULL) np->in_next->in_pnext = np->in_pnext; if (softn->ipf_nat_list_tail == &np->in_next) softn->ipf_nat_list_tail = np->in_pnext; } if ((purge == 1) && ((np->in_flags & IPN_PURGE) != 0)) { nat_t *next; nat_t *nat; for (next = softn->ipf_nat_instances; (nat = next) != NULL;) { next = nat->nat_next; if (nat->nat_ptr == np) ipf_nat_delete(softc, nat, NL_PURGE); } } if ((np->in_flags & IPN_DELETE) == 0) { if (np->in_redir & NAT_REDIRECT) { switch (np->in_v[0]) { case 4 : ipf_nat_delrdr(softn, np); break; #ifdef USE_INET6 case 6 : ipf_nat6_delrdr(softn, np); break; #endif } } if (np->in_redir & (NAT_MAPBLK|NAT_MAP)) { switch (np->in_v[0]) { case 4 : ipf_nat_delmap(softn, np); break; #ifdef USE_INET6 case 6 : ipf_nat6_delmap(softn, np); break; #endif } } } np->in_flags |= IPN_DELETE; ipf_nat_rule_deref(softc, &np); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_newmap */ /* Returns: int - -1 == error, 0 == success */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to NAT entry */ /* ni(I) - pointer to structure with misc. information needed */ /* to create new NAT entry. */ /* */ /* Given an empty NAT structure, populate it with new information about a */ /* new NAT session, as defined by the matching NAT rule. */ /* ni.nai_ip is passed in uninitialised and must be set, in host byte order,*/ /* to the new IP address for the translation. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_newmap(fr_info_t *fin, nat_t *nat, natinfo_t *ni) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; u_short st_port, dport, sport, port, sp, dp; struct in_addr in, inb; hostmap_t *hm; u_32_t flags; u_32_t st_ip; ipnat_t *np; nat_t *natl; int l; /* * If it's an outbound packet which doesn't match any existing * record, then create a new port */ l = 0; hm = NULL; np = ni->nai_np; st_ip = np->in_snip; st_port = np->in_spnext; flags = nat->nat_flags; if (flags & IPN_ICMPQUERY) { sport = fin->fin_data[1]; dport = 0; } else { sport = htons(fin->fin_data[0]); dport = htons(fin->fin_data[1]); } /* * Do a loop until we either run out of entries to try or we find * a NAT mapping that isn't currently being used. This is done * because the change to the source is not (usually) being fixed. */ do { port = 0; in.s_addr = htonl(np->in_snip); if (l == 0) { /* * Check to see if there is an existing NAT * setup for this IP address pair. */ hm = ipf_nat_hostmap(softn, np, fin->fin_src, fin->fin_dst, in, 0); if (hm != NULL) in.s_addr = hm->hm_nsrcip.s_addr; } else if ((l == 1) && (hm != NULL)) { ipf_nat_hostmapdel(softc, &hm); } in.s_addr = ntohl(in.s_addr); nat->nat_hm = hm; if ((np->in_nsrcmsk == 0xffffffff) && (np->in_spnext == 0)) { if (l > 0) { NBUMPSIDEX(1, ns_exhausted, ns_exhausted_1); DT4(ns_exhausted_1, fr_info_t *, fin, nat_t *, nat, natinfo_t *, ni, ipnat_t *, np); return (-1); } } if (np->in_redir == NAT_BIMAP && np->in_osrcmsk == np->in_nsrcmsk) { /* * map the address block in a 1:1 fashion */ in.s_addr = np->in_nsrcaddr; in.s_addr |= fin->fin_saddr & ~np->in_osrcmsk; in.s_addr = ntohl(in.s_addr); } else if (np->in_redir & NAT_MAPBLK) { if ((l >= np->in_ppip) || ((l > 0) && !(flags & IPN_TCPUDP))) { NBUMPSIDEX(1, ns_exhausted, ns_exhausted_2); DT4(ns_exhausted_2, fr_info_t *, fin, nat_t *, nat, natinfo_t *, ni, ipnat_t *, np); return (-1); } /* * map-block - Calculate destination address. */ in.s_addr = ntohl(fin->fin_saddr); in.s_addr &= ntohl(~np->in_osrcmsk); inb.s_addr = in.s_addr; in.s_addr /= np->in_ippip; in.s_addr &= ntohl(~np->in_nsrcmsk); in.s_addr += ntohl(np->in_nsrcaddr); /* * Calculate destination port. */ if ((flags & IPN_TCPUDP) && (np->in_ppip != 0)) { port = ntohs(sport) + l; port %= np->in_ppip; port += np->in_ppip * (inb.s_addr % np->in_ippip); port += MAPBLK_MINPORT; port = htons(port); } } else if ((np->in_nsrcaddr == 0) && (np->in_nsrcmsk == 0xffffffff)) { i6addr_t in6; /* * 0/32 - use the interface's IP address. */ if ((l > 0) || ipf_ifpaddr(softc, 4, FRI_NORMAL, fin->fin_ifp, &in6, NULL) == -1) { NBUMPSIDEX(1, ns_new_ifpaddr, ns_new_ifpaddr_1); DT4(ns_new_ifpaddr_1, fr_info_t *, fin, nat_t *, nat, natinfo_t *, ni, ipnat_t *, np); return (-1); } in.s_addr = ntohl(in6.in4.s_addr); } else if ((np->in_nsrcaddr == 0) && (np->in_nsrcmsk == 0)) { /* * 0/0 - use the original source address/port. */ if (l > 0) { NBUMPSIDEX(1, ns_exhausted, ns_exhausted_3); DT4(ns_exhausted_3, fr_info_t *, fin, nat_t *, nat, natinfo_t *, ni, ipnat_t *, np); return (-1); } in.s_addr = ntohl(fin->fin_saddr); } else if ((np->in_nsrcmsk != 0xffffffff) && (np->in_spnext == 0) && ((l > 0) || (hm == NULL))) np->in_snip++; natl = NULL; if ((flags & IPN_TCPUDP) && ((np->in_redir & NAT_MAPBLK) == 0) && (np->in_flags & IPN_AUTOPORTMAP)) { /* * "ports auto" (without map-block) */ if ((l > 0) && (l % np->in_ppip == 0)) { if ((l > np->in_ppip) && np->in_nsrcmsk != 0xffffffff) np->in_snip++; } if (np->in_ppip != 0) { port = ntohs(sport); port += (l % np->in_ppip); port %= np->in_ppip; port += np->in_ppip * (ntohl(fin->fin_saddr) % np->in_ippip); port += MAPBLK_MINPORT; port = htons(port); } } else if (((np->in_redir & NAT_MAPBLK) == 0) && (flags & IPN_TCPUDPICMP) && (np->in_spnext != 0)) { /* * Standard port translation. Select next port. */ if (np->in_flags & IPN_SEQUENTIAL) { port = np->in_spnext; } else { port = ipf_random() % (np->in_spmax - np->in_spmin + 1); port += np->in_spmin; } port = htons(port); np->in_spnext++; if (np->in_spnext > np->in_spmax) { np->in_spnext = np->in_spmin; if (np->in_nsrcmsk != 0xffffffff) np->in_snip++; } } if (np->in_flags & IPN_SIPRANGE) { if (np->in_snip > ntohl(np->in_nsrcmsk)) np->in_snip = ntohl(np->in_nsrcaddr); } else { if ((np->in_nsrcmsk != 0xffffffff) && ((np->in_snip + 1) & ntohl(np->in_nsrcmsk)) > ntohl(np->in_nsrcaddr)) np->in_snip = ntohl(np->in_nsrcaddr) + 1; } if ((port == 0) && (flags & (IPN_TCPUDPICMP|IPN_ICMPQUERY))) port = sport; /* * Here we do a lookup of the connection as seen from * the outside. If an IP# pair already exists, try * again. So if you have A->B becomes C->B, you can * also have D->E become C->E but not D->B causing * another C->B. Also take protocol and ports into * account when determining whether a pre-existing * NAT setup will cause an external conflict where * this is appropriate. */ inb.s_addr = htonl(in.s_addr); sp = fin->fin_data[0]; dp = fin->fin_data[1]; fin->fin_data[0] = fin->fin_data[1]; fin->fin_data[1] = ntohs(port); natl = ipf_nat_inlookup(fin, flags & ~(SI_WILDP|NAT_SEARCH), (u_int)fin->fin_p, fin->fin_dst, inb); fin->fin_data[0] = sp; fin->fin_data[1] = dp; /* * Has the search wrapped around and come back to the * start ? */ if ((natl != NULL) && (np->in_spnext != 0) && (st_port == np->in_spnext) && (np->in_snip != 0) && (st_ip == np->in_snip)) { NBUMPSIDED(1, ns_wrap); DT4(ns_wrap, fr_info_t *, fin, nat_t *, nat, natinfo_t *, ni, ipnat_t *, np); return (-1); } l++; } while (natl != NULL); /* Setup the NAT table */ nat->nat_osrcip = fin->fin_src; nat->nat_nsrcaddr = htonl(in.s_addr); nat->nat_odstip = fin->fin_dst; nat->nat_ndstip = fin->fin_dst; if (nat->nat_hm == NULL) nat->nat_hm = ipf_nat_hostmap(softn, np, fin->fin_src, fin->fin_dst, nat->nat_nsrcip, 0); if (flags & IPN_TCPUDP) { nat->nat_osport = sport; nat->nat_nsport = port; /* sport */ nat->nat_odport = dport; nat->nat_ndport = dport; ((tcphdr_t *)fin->fin_dp)->th_sport = port; } else if (flags & IPN_ICMPQUERY) { nat->nat_oicmpid = fin->fin_data[1]; ((icmphdr_t *)fin->fin_dp)->icmp_id = port; nat->nat_nicmpid = port; } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_newrdr */ /* Returns: int - -1 == error, 0 == success (no move), 1 == success and */ /* allow rule to be moved if IPN_ROUNDR is set. */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to NAT entry */ /* ni(I) - pointer to structure with misc. information needed */ /* to create new NAT entry. */ /* */ /* ni.nai_ip is passed in uninitialised and must be set, in host byte order,*/ /* to the new IP address for the translation. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_newrdr(fr_info_t *fin, nat_t *nat, natinfo_t *ni) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; u_short nport, dport, sport; struct in_addr in, inb; u_short sp, dp; hostmap_t *hm; u_32_t flags; ipnat_t *np; nat_t *natl; int move; move = 1; hm = NULL; in.s_addr = 0; np = ni->nai_np; flags = nat->nat_flags; if (flags & IPN_ICMPQUERY) { dport = fin->fin_data[1]; sport = 0; } else { sport = htons(fin->fin_data[0]); dport = htons(fin->fin_data[1]); } /* TRACE sport, dport */ /* * If the matching rule has IPN_STICKY set, then we want to have the * same rule kick in as before. Why would this happen? If you have * a collection of rdr rules with "round-robin sticky", the current * packet might match a different one to the previous connection but * we want the same destination to be used. */ if (((np->in_flags & (IPN_ROUNDR|IPN_SPLIT)) != 0) && ((np->in_flags & IPN_STICKY) != 0)) { hm = ipf_nat_hostmap(softn, NULL, fin->fin_src, fin->fin_dst, in, (u_32_t)dport); if (hm != NULL) { in.s_addr = ntohl(hm->hm_ndstip.s_addr); np = hm->hm_ipnat; ni->nai_np = np; move = 0; ipf_nat_hostmapdel(softc, &hm); } } /* * Otherwise, it's an inbound packet. Most likely, we don't * want to rewrite source ports and source addresses. Instead, * we want to rewrite to a fixed internal address and fixed * internal port. */ if (np->in_flags & IPN_SPLIT) { in.s_addr = np->in_dnip; inb.s_addr = htonl(in.s_addr); if ((np->in_flags & (IPN_ROUNDR|IPN_STICKY)) == IPN_STICKY) { hm = ipf_nat_hostmap(softn, NULL, fin->fin_src, fin->fin_dst, inb, (u_32_t)dport); if (hm != NULL) { in.s_addr = hm->hm_ndstip.s_addr; move = 0; } } if (hm == NULL || hm->hm_ref == 1) { if (np->in_ndstaddr == htonl(in.s_addr)) { np->in_dnip = ntohl(np->in_ndstmsk); move = 0; } else { np->in_dnip = ntohl(np->in_ndstaddr); } } if (hm != NULL) ipf_nat_hostmapdel(softc, &hm); } else if ((np->in_ndstaddr == 0) && (np->in_ndstmsk == 0xffffffff)) { i6addr_t in6; /* * 0/32 - use the interface's IP address. */ if (ipf_ifpaddr(softc, 4, FRI_NORMAL, fin->fin_ifp, &in6, NULL) == -1) { NBUMPSIDEX(0, ns_new_ifpaddr, ns_new_ifpaddr_2); DT3(ns_new_ifpaddr_2, fr_info_t *, fin, nat_t *, nat, natinfo_t, ni); return (-1); } in.s_addr = ntohl(in6.in4.s_addr); } else if ((np->in_ndstaddr == 0) && (np->in_ndstmsk== 0)) { /* * 0/0 - use the original destination address/port. */ in.s_addr = ntohl(fin->fin_daddr); } else if (np->in_redir == NAT_BIMAP && np->in_ndstmsk == np->in_odstmsk) { /* * map the address block in a 1:1 fashion */ in.s_addr = np->in_ndstaddr; in.s_addr |= fin->fin_daddr & ~np->in_ndstmsk; in.s_addr = ntohl(in.s_addr); } else { in.s_addr = ntohl(np->in_ndstaddr); } if ((np->in_dpnext == 0) || ((flags & NAT_NOTRULEPORT) != 0)) nport = dport; else { /* * Whilst not optimized for the case where * pmin == pmax, the gain is not significant. */ if (((np->in_flags & IPN_FIXEDDPORT) == 0) && (np->in_odport != np->in_dtop)) { nport = ntohs(dport) - np->in_odport + np->in_dpmax; nport = htons(nport); } else { nport = htons(np->in_dpnext); np->in_dpnext++; if (np->in_dpnext > np->in_dpmax) np->in_dpnext = np->in_dpmin; } } /* * When the redirect-to address is set to 0.0.0.0, just * assume a blank `forwarding' of the packet. We don't * setup any translation for this either. */ if (in.s_addr == 0) { if (nport == dport) { NBUMPSIDED(0, ns_xlate_null); return (-1); } in.s_addr = ntohl(fin->fin_daddr); } /* * Check to see if this redirect mapping already exists and if * it does, return "failure" (allowing it to be created will just * cause one or both of these "connections" to stop working.) */ inb.s_addr = htonl(in.s_addr); sp = fin->fin_data[0]; dp = fin->fin_data[1]; fin->fin_data[1] = fin->fin_data[0]; fin->fin_data[0] = ntohs(nport); natl = ipf_nat_outlookup(fin, flags & ~(SI_WILDP|NAT_SEARCH), (u_int)fin->fin_p, inb, fin->fin_src); fin->fin_data[0] = sp; fin->fin_data[1] = dp; if (natl != NULL) { DT2(ns_new_xlate_exists, fr_info_t *, fin, nat_t *, natl); NBUMPSIDE(0, ns_xlate_exists); return (-1); } inb.s_addr = htonl(in.s_addr); nat->nat_ndstaddr = htonl(in.s_addr); nat->nat_odstip = fin->fin_dst; nat->nat_nsrcip = fin->fin_src; nat->nat_osrcip = fin->fin_src; if ((nat->nat_hm == NULL) && ((np->in_flags & IPN_STICKY) != 0)) nat->nat_hm = ipf_nat_hostmap(softn, np, fin->fin_src, fin->fin_dst, inb, (u_32_t)dport); if (flags & IPN_TCPUDP) { nat->nat_odport = dport; nat->nat_ndport = nport; nat->nat_osport = sport; nat->nat_nsport = sport; ((tcphdr_t *)fin->fin_dp)->th_dport = nport; } else if (flags & IPN_ICMPQUERY) { nat->nat_oicmpid = fin->fin_data[1]; ((icmphdr_t *)fin->fin_dp)->icmp_id = nport; nat->nat_nicmpid = nport; } return (move); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_add */ /* Returns: nat_t* - NULL == failure to create new NAT structure, */ /* else pointer to new NAT structure */ /* Parameters: fin(I) - pointer to packet information */ /* np(I) - pointer to NAT rule */ /* natsave(I) - pointer to where to store NAT struct pointer */ /* flags(I) - flags describing the current packet */ /* direction(I) - direction of packet (in/out) */ /* Write Lock: ipf_nat */ /* */ /* Attempts to create a new NAT entry. Does not actually change the packet */ /* in any way. */ /* */ /* This function is in three main parts: (1) deal with creating a new NAT */ /* structure for a "MAP" rule (outgoing NAT translation); (2) deal with */ /* creating a new NAT structure for a "RDR" rule (incoming NAT translation) */ /* and (3) building that structure and putting it into the NAT table(s). */ /* */ /* NOTE: natsave should NOT be used to point back to an ipstate_t struct */ /* as it can result in memory being corrupted. */ /* ------------------------------------------------------------------------ */ nat_t * ipf_nat_add(fr_info_t *fin, ipnat_t *np, nat_t **natsave, u_int flags, int direction) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; hostmap_t *hm = NULL; nat_t *nat, *natl; natstat_t *nsp; u_int nflags; natinfo_t ni; int move; nsp = &softn->ipf_nat_stats; if ((nsp->ns_active * 100 / softn->ipf_nat_table_max) > softn->ipf_nat_table_wm_high) { softn->ipf_nat_doflush = 1; } if (nsp->ns_active >= softn->ipf_nat_table_max) { NBUMPSIDED(fin->fin_out, ns_table_max); DT2(ns_table_max, nat_stat_t *, nsp, ipf_nat_softc_t *, softn); return (NULL); } move = 1; nflags = np->in_flags & flags; nflags &= NAT_FROMRULE; ni.nai_np = np; ni.nai_dport = 0; ni.nai_sport = 0; /* Give me a new nat */ KMALLOC(nat, nat_t *); if (nat == NULL) { DT(ns_memfail); NBUMPSIDED(fin->fin_out, ns_memfail); /* * Try to automatically tune the max # of entries in the * table allowed to be less than what will cause kmem_alloc() * to fail and try to eliminate panics due to out of memory * conditions arising. */ if ((softn->ipf_nat_table_max > softn->ipf_nat_table_sz) && (nsp->ns_active > 100)) { softn->ipf_nat_table_max = nsp->ns_active - 100; printf("table_max reduced to %d\n", softn->ipf_nat_table_max); } return (NULL); } if (flags & IPN_ICMPQUERY) { /* * In the ICMP query NAT code, we translate the ICMP id fields * to make them unique. This is indepedent of the ICMP type * (e.g. in the unlikely event that a host sends an echo and * an tstamp request with the same id, both packets will have * their ip address/id field changed in the same way). */ /* The icmp_id field is used by the sender to identify the * process making the icmp request. (the receiver justs * copies it back in its response). So, it closely matches * the concept of source port. We overlay sport, so we can * maximally reuse the existing code. */ ni.nai_sport = fin->fin_data[1]; ni.nai_dport = 0; } bzero((char *)nat, sizeof(*nat)); nat->nat_flags = flags; nat->nat_redir = np->in_redir; nat->nat_dir = direction; nat->nat_pr[0] = fin->fin_p; nat->nat_pr[1] = fin->fin_p; /* * Search the current table for a match and create a new mapping * if there is none found. */ if (np->in_redir & NAT_DIVERTUDP) { move = ipf_nat_newdivert(fin, nat, &ni); } else if (np->in_redir & NAT_REWRITE) { move = ipf_nat_newrewrite(fin, nat, &ni); } else if (direction == NAT_OUTBOUND) { /* * We can now arrange to call this for the same connection * because ipf_nat_new doesn't protect the code path into * this function. */ natl = ipf_nat_outlookup(fin, nflags, (u_int)fin->fin_p, fin->fin_src, fin->fin_dst); if (natl != NULL) { KFREE(nat); nat = natl; goto done; } move = ipf_nat_newmap(fin, nat, &ni); } else { /* * NAT_INBOUND is used for redirects rules */ natl = ipf_nat_inlookup(fin, nflags, (u_int)fin->fin_p, fin->fin_src, fin->fin_dst); if (natl != NULL) { KFREE(nat); nat = natl; goto done; } move = ipf_nat_newrdr(fin, nat, &ni); } if (move == -1) goto badnat; np = ni.nai_np; nat->nat_mssclamp = np->in_mssclamp; nat->nat_me = natsave; nat->nat_fr = fin->fin_fr; nat->nat_rev = fin->fin_rev; nat->nat_ptr = np; nat->nat_dlocal = np->in_dlocal; if ((np->in_apr != NULL) && ((nat->nat_flags & NAT_SLAVE) == 0)) { if (ipf_proxy_new(fin, nat) == -1) { NBUMPSIDED(fin->fin_out, ns_appr_fail); DT3(ns_appr_fail, fr_info_t *, fin, nat_t *, nat, ipnat_t *, np); goto badnat; } } nat->nat_ifps[0] = np->in_ifps[0]; if (np->in_ifps[0] != NULL) { COPYIFNAME(np->in_v[0], np->in_ifps[0], nat->nat_ifnames[0]); } nat->nat_ifps[1] = np->in_ifps[1]; if (np->in_ifps[1] != NULL) { COPYIFNAME(np->in_v[1], np->in_ifps[1], nat->nat_ifnames[1]); } if (ipf_nat_finalise(fin, nat) == -1) { goto badnat; } np->in_use++; if ((move == 1) && (np->in_flags & IPN_ROUNDR)) { if ((np->in_redir & (NAT_REDIRECT|NAT_MAP)) == NAT_REDIRECT) { ipf_nat_delrdr(softn, np); ipf_nat_addrdr(softn, np); } else if ((np->in_redir & (NAT_REDIRECT|NAT_MAP)) == NAT_MAP) { ipf_nat_delmap(softn, np); ipf_nat_addmap(softn, np); } } if (flags & SI_WILDP) nsp->ns_wilds++; nsp->ns_proto[nat->nat_pr[0]]++; goto done; badnat: DT3(ns_badnatnew, fr_info_t *, fin, nat_t *, nat, ipnat_t *, np); NBUMPSIDE(fin->fin_out, ns_badnatnew); if ((hm = nat->nat_hm) != NULL) ipf_nat_hostmapdel(softc, &hm); KFREE(nat); nat = NULL; done: if (nat != NULL && np != NULL) np->in_hits++; if (natsave != NULL) *natsave = nat; return (nat); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_finalise */ /* Returns: int - 0 == sucess, -1 == failure */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to NAT entry */ /* Write Lock: ipf_nat */ /* */ /* This is the tail end of constructing a new NAT entry and is the same */ /* for both IPv4 and IPv6. */ /* ------------------------------------------------------------------------ */ /*ARGSUSED*/ static int ipf_nat_finalise(fr_info_t *fin, nat_t *nat) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; u_32_t sum1, sum2, sumd; frentry_t *fr; u_32_t flags; #if SOLARIS && defined(_KERNEL) && defined(ICK_M_CTL_MAGIC) qpktinfo_t *qpi = fin->fin_qpi; #endif flags = nat->nat_flags; switch (nat->nat_pr[0]) { case IPPROTO_ICMP : sum1 = LONG_SUM(ntohs(nat->nat_oicmpid)); sum2 = LONG_SUM(ntohs(nat->nat_nicmpid)); CALC_SUMD(sum1, sum2, sumd); nat->nat_sumd[0] = (sumd & 0xffff) + (sumd >> 16); break; default : sum1 = LONG_SUM(ntohl(nat->nat_osrcaddr) + \ ntohs(nat->nat_osport)); sum2 = LONG_SUM(ntohl(nat->nat_nsrcaddr) + \ ntohs(nat->nat_nsport)); CALC_SUMD(sum1, sum2, sumd); nat->nat_sumd[0] = (sumd & 0xffff) + (sumd >> 16); sum1 = LONG_SUM(ntohl(nat->nat_odstaddr) + \ ntohs(nat->nat_odport)); sum2 = LONG_SUM(ntohl(nat->nat_ndstaddr) + \ ntohs(nat->nat_ndport)); CALC_SUMD(sum1, sum2, sumd); nat->nat_sumd[0] += (sumd & 0xffff) + (sumd >> 16); break; } /* * Compute the partial checksum, just in case. * This is only ever placed into outbound packets so care needs * to be taken over which pair of addresses are used. */ if (nat->nat_dir == NAT_OUTBOUND) { sum1 = LONG_SUM(ntohl(nat->nat_nsrcaddr)); sum1 += LONG_SUM(ntohl(nat->nat_ndstaddr)); } else { sum1 = LONG_SUM(ntohl(nat->nat_osrcaddr)); sum1 += LONG_SUM(ntohl(nat->nat_odstaddr)); } sum1 += nat->nat_pr[1]; nat->nat_sumd[1] = (sum1 & 0xffff) + (sum1 >> 16); sum1 = LONG_SUM(ntohl(nat->nat_osrcaddr)); sum2 = LONG_SUM(ntohl(nat->nat_nsrcaddr)); CALC_SUMD(sum1, sum2, sumd); nat->nat_ipsumd = (sumd & 0xffff) + (sumd >> 16); sum1 = LONG_SUM(ntohl(nat->nat_odstaddr)); sum2 = LONG_SUM(ntohl(nat->nat_ndstaddr)); CALC_SUMD(sum1, sum2, sumd); nat->nat_ipsumd += (sumd & 0xffff) + (sumd >> 16); nat->nat_v[0] = 4; nat->nat_v[1] = 4; if ((nat->nat_ifps[0] != NULL) && (nat->nat_ifps[0] != (void *)-1)) { nat->nat_mtu[0] = GETIFMTU_4(nat->nat_ifps[0]); } if ((nat->nat_ifps[1] != NULL) && (nat->nat_ifps[1] != (void *)-1)) { nat->nat_mtu[1] = GETIFMTU_4(nat->nat_ifps[1]); } if ((nat->nat_flags & SI_CLONE) == 0) nat->nat_sync = ipf_sync_new(softc, SMC_NAT, fin, nat); if (ipf_nat_insert(softc, softn, nat) == 0) { if (softn->ipf_nat_logging) ipf_nat_log(softc, softn, nat, NL_NEW); fr = nat->nat_fr; if (fr != NULL) { MUTEX_ENTER(&fr->fr_lock); fr->fr_ref++; MUTEX_EXIT(&fr->fr_lock); } return (0); } NBUMPSIDED(fin->fin_out, ns_unfinalised); DT2(ns_unfinalised, fr_info_t *, fin, nat_t *, nat); /* * nat_insert failed, so cleanup time... */ if (nat->nat_sync != NULL) ipf_sync_del_nat(softc->ipf_sync_soft, nat->nat_sync); return (-1); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_insert */ /* Returns: int - 0 == sucess, -1 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* nat(I) - pointer to NAT structure */ /* Write Lock: ipf_nat */ /* */ /* Insert a NAT entry into the hash tables for searching and add it to the */ /* list of active NAT entries. Adjust global counters when complete. */ /* ------------------------------------------------------------------------ */ int ipf_nat_insert(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, nat_t *nat) { u_int hv0, hv1; u_int sp, dp; ipnat_t *in; int ret; /* * Try and return an error as early as possible, so calculate the hash * entry numbers first and then proceed. */ if ((nat->nat_flags & (SI_W_SPORT|SI_W_DPORT)) == 0) { if ((nat->nat_flags & IPN_TCPUDP) != 0) { sp = nat->nat_osport; dp = nat->nat_odport; } else if ((nat->nat_flags & IPN_ICMPQUERY) != 0) { sp = 0; dp = nat->nat_oicmpid; } else { sp = 0; dp = 0; } hv0 = NAT_HASH_FN(nat->nat_osrcaddr, sp, 0xffffffff); hv0 = NAT_HASH_FN(nat->nat_odstaddr, hv0 + dp, 0xffffffff); /* * TRACE nat_osrcaddr, nat_osport, nat_odstaddr, * nat_odport, hv0 */ if ((nat->nat_flags & IPN_TCPUDP) != 0) { sp = nat->nat_nsport; dp = nat->nat_ndport; } else if ((nat->nat_flags & IPN_ICMPQUERY) != 0) { sp = 0; dp = nat->nat_nicmpid; } else { sp = 0; dp = 0; } hv1 = NAT_HASH_FN(nat->nat_nsrcaddr, sp, 0xffffffff); hv1 = NAT_HASH_FN(nat->nat_ndstaddr, hv1 + dp, 0xffffffff); /* * TRACE nat_nsrcaddr, nat_nsport, nat_ndstaddr, * nat_ndport, hv1 */ } else { hv0 = NAT_HASH_FN(nat->nat_osrcaddr, 0, 0xffffffff); hv0 = NAT_HASH_FN(nat->nat_odstaddr, hv0, 0xffffffff); /* TRACE nat_osrcaddr, nat_odstaddr, hv0 */ hv1 = NAT_HASH_FN(nat->nat_nsrcaddr, 0, 0xffffffff); hv1 = NAT_HASH_FN(nat->nat_ndstaddr, hv1, 0xffffffff); /* TRACE nat_nsrcaddr, nat_ndstaddr, hv1 */ } nat->nat_hv[0] = hv0; nat->nat_hv[1] = hv1; MUTEX_INIT(&nat->nat_lock, "nat entry lock"); in = nat->nat_ptr; nat->nat_ref = nat->nat_me ? 2 : 1; nat->nat_ifnames[0][LIFNAMSIZ - 1] = '\0'; nat->nat_ifps[0] = ipf_resolvenic(softc, nat->nat_ifnames[0], 4); if (nat->nat_ifnames[1][0] != '\0') { nat->nat_ifnames[1][LIFNAMSIZ - 1] = '\0'; nat->nat_ifps[1] = ipf_resolvenic(softc, nat->nat_ifnames[1], 4); } else if (in->in_ifnames[1] != -1) { char *name; name = in->in_names + in->in_ifnames[1]; if (name[1] != '\0' && name[0] != '-' && name[0] != '*') { (void) strncpy(nat->nat_ifnames[1], nat->nat_ifnames[0], LIFNAMSIZ); nat->nat_ifnames[1][LIFNAMSIZ - 1] = '\0'; nat->nat_ifps[1] = nat->nat_ifps[0]; } } if ((nat->nat_ifps[0] != NULL) && (nat->nat_ifps[0] != (void *)-1)) { nat->nat_mtu[0] = GETIFMTU_4(nat->nat_ifps[0]); } if ((nat->nat_ifps[1] != NULL) && (nat->nat_ifps[1] != (void *)-1)) { nat->nat_mtu[1] = GETIFMTU_4(nat->nat_ifps[1]); } ret = ipf_nat_hashtab_add(softc, softn, nat); if (ret == -1) MUTEX_DESTROY(&nat->nat_lock); return (ret); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_hashtab_add */ /* Returns: int - 0 == sucess, -1 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* nat(I) - pointer to NAT structure */ /* */ /* Handle the insertion of a NAT entry into the table/list. */ /* ------------------------------------------------------------------------ */ int ipf_nat_hashtab_add(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, nat_t *nat) { nat_t **natp; u_int hv0; u_int hv1; if (nat->nat_dir == NAT_INBOUND || nat->nat_dir == NAT_DIVERTIN) { hv1 = nat->nat_hv[0] % softn->ipf_nat_table_sz; hv0 = nat->nat_hv[1] % softn->ipf_nat_table_sz; } else { hv0 = nat->nat_hv[0] % softn->ipf_nat_table_sz; hv1 = nat->nat_hv[1] % softn->ipf_nat_table_sz; } if (softn->ipf_nat_stats.ns_side[0].ns_bucketlen[hv0] >= softn->ipf_nat_maxbucket) { DT1(ns_bucket_max_0, int, softn->ipf_nat_stats.ns_side[0].ns_bucketlen[hv0]); NBUMPSIDE(0, ns_bucket_max); return (-1); } if (softn->ipf_nat_stats.ns_side[1].ns_bucketlen[hv1] >= softn->ipf_nat_maxbucket) { DT1(ns_bucket_max_1, int, softn->ipf_nat_stats.ns_side[1].ns_bucketlen[hv1]); NBUMPSIDE(1, ns_bucket_max); return (-1); } /* * The ordering of operations in the list and hash table insertion * is very important. The last operation for each task should be * to update the top of the list, after all the "nexts" have been * done so that walking the list while it is being done does not * find strange pointers. * * Global list of NAT instances */ nat->nat_next = softn->ipf_nat_instances; nat->nat_pnext = &softn->ipf_nat_instances; if (softn->ipf_nat_instances) softn->ipf_nat_instances->nat_pnext = &nat->nat_next; softn->ipf_nat_instances = nat; /* * Inbound hash table. */ natp = &softn->ipf_nat_table[0][hv0]; nat->nat_phnext[0] = natp; nat->nat_hnext[0] = *natp; if (*natp) { (*natp)->nat_phnext[0] = &nat->nat_hnext[0]; } else { NBUMPSIDE(0, ns_inuse); } *natp = nat; NBUMPSIDE(0, ns_bucketlen[hv0]); /* * Outbound hash table. */ natp = &softn->ipf_nat_table[1][hv1]; nat->nat_phnext[1] = natp; nat->nat_hnext[1] = *natp; if (*natp) (*natp)->nat_phnext[1] = &nat->nat_hnext[1]; else { NBUMPSIDE(1, ns_inuse); } *natp = nat; NBUMPSIDE(1, ns_bucketlen[hv1]); ipf_nat_setqueue(softc, softn, nat); if (nat->nat_dir & NAT_OUTBOUND) { NBUMPSIDE(1, ns_added); } else { NBUMPSIDE(0, ns_added); } softn->ipf_nat_stats.ns_active++; return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_icmperrorlookup */ /* Returns: nat_t* - point to matching NAT structure */ /* Parameters: fin(I) - pointer to packet information */ /* dir(I) - direction of packet (in/out) */ /* */ /* Check if the ICMP error message is related to an existing TCP, UDP or */ /* ICMP query nat entry. It is assumed that the packet is already of the */ /* the required length. */ /* ------------------------------------------------------------------------ */ nat_t * ipf_nat_icmperrorlookup(fr_info_t *fin, int dir) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; int flags = 0, type, minlen; icmphdr_t *icmp, *orgicmp; nat_stat_side_t *nside; tcphdr_t *tcp = NULL; u_short data[2]; nat_t *nat; ip_t *oip; u_int p; icmp = fin->fin_dp; type = icmp->icmp_type; nside = &softn->ipf_nat_stats.ns_side[fin->fin_out]; /* * Does it at least have the return (basic) IP header ? * Only a basic IP header (no options) should be with an ICMP error * header. Also, if it's not an error type, then return. */ if ((fin->fin_hlen != sizeof(ip_t)) || !(fin->fin_flx & FI_ICMPERR)) { ATOMIC_INCL(nside->ns_icmp_basic); return (NULL); } /* * Check packet size */ oip = (ip_t *)((char *)fin->fin_dp + 8); minlen = IP_HL(oip) << 2; if ((minlen < sizeof(ip_t)) || (fin->fin_plen < ICMPERR_IPICMPHLEN + minlen)) { ATOMIC_INCL(nside->ns_icmp_size); return (NULL); } /* * Is the buffer big enough for all of it ? It's the size of the IP * header claimed in the encapsulated part which is of concern. It * may be too big to be in this buffer but not so big that it's * outside the ICMP packet, leading to TCP deref's causing problems. * This is possible because we don't know how big oip_hl is when we * do the pullup early in ipf_check() and thus can't gaurantee it is * all here now. */ #ifdef ipf_nat_KERNEL { mb_t *m; m = fin->fin_m; # if SOLARIS if ((char *)oip + fin->fin_dlen - ICMPERR_ICMPHLEN > (char *)m->b_wptr) { ATOMIC_INCL(nside->ns_icmp_mbuf); return (NULL); } # else if ((char *)oip + fin->fin_dlen - ICMPERR_ICMPHLEN > (char *)fin->fin_ip + M_LEN(m)) { ATOMIC_INCL(nside->ns_icmp_mbuf); return (NULL); } # endif } #endif if (fin->fin_daddr != oip->ip_src.s_addr) { ATOMIC_INCL(nside->ns_icmp_address); return (NULL); } p = oip->ip_p; if (p == IPPROTO_TCP) flags = IPN_TCP; else if (p == IPPROTO_UDP) flags = IPN_UDP; else if (p == IPPROTO_ICMP) { orgicmp = (icmphdr_t *)((char *)oip + (IP_HL(oip) << 2)); /* see if this is related to an ICMP query */ if (ipf_nat_icmpquerytype(orgicmp->icmp_type)) { data[0] = fin->fin_data[0]; data[1] = fin->fin_data[1]; fin->fin_data[0] = 0; fin->fin_data[1] = orgicmp->icmp_id; flags = IPN_ICMPERR|IPN_ICMPQUERY; /* * NOTE : dir refers to the direction of the original * ip packet. By definition the icmp error * message flows in the opposite direction. */ if (dir == NAT_INBOUND) nat = ipf_nat_inlookup(fin, flags, p, oip->ip_dst, oip->ip_src); else nat = ipf_nat_outlookup(fin, flags, p, oip->ip_dst, oip->ip_src); fin->fin_data[0] = data[0]; fin->fin_data[1] = data[1]; return (nat); } } if (flags & IPN_TCPUDP) { minlen += 8; /* + 64bits of data to get ports */ /* TRACE (fin,minlen) */ if (fin->fin_plen < ICMPERR_IPICMPHLEN + minlen) { ATOMIC_INCL(nside->ns_icmp_short); return (NULL); } data[0] = fin->fin_data[0]; data[1] = fin->fin_data[1]; tcp = (tcphdr_t *)((char *)oip + (IP_HL(oip) << 2)); fin->fin_data[0] = ntohs(tcp->th_dport); fin->fin_data[1] = ntohs(tcp->th_sport); if (dir == NAT_INBOUND) { nat = ipf_nat_inlookup(fin, flags, p, oip->ip_dst, oip->ip_src); } else { nat = ipf_nat_outlookup(fin, flags, p, oip->ip_dst, oip->ip_src); } fin->fin_data[0] = data[0]; fin->fin_data[1] = data[1]; return (nat); } if (dir == NAT_INBOUND) nat = ipf_nat_inlookup(fin, 0, p, oip->ip_dst, oip->ip_src); else nat = ipf_nat_outlookup(fin, 0, p, oip->ip_dst, oip->ip_src); return (nat); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_icmperror */ /* Returns: nat_t* - point to matching NAT structure */ /* Parameters: fin(I) - pointer to packet information */ /* nflags(I) - NAT flags for this packet */ /* dir(I) - direction of packet (in/out) */ /* */ /* Fix up an ICMP packet which is an error message for an existing NAT */ /* session. This will correct both packet header data and checksums. */ /* */ /* This should *ONLY* be used for incoming ICMP error packets to make sure */ /* a NAT'd ICMP packet gets correctly recognised. */ /* ------------------------------------------------------------------------ */ nat_t * ipf_nat_icmperror(fr_info_t *fin, u_int *nflags, int dir) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; u_32_t sum1, sum2, sumd, sumd2; struct in_addr a1, a2, a3, a4; int flags, dlen, odst; icmphdr_t *icmp; u_short *csump; tcphdr_t *tcp; nat_t *nat; ip_t *oip; void *dp; if ((fin->fin_flx & (FI_SHORT|FI_FRAGBODY))) { NBUMPSIDED(fin->fin_out, ns_icmp_short); return (NULL); } /* * ipf_nat_icmperrorlookup() will return NULL for `defective' packets. */ if ((fin->fin_v != 4) || !(nat = ipf_nat_icmperrorlookup(fin, dir))) { NBUMPSIDED(fin->fin_out, ns_icmp_notfound); return (NULL); } tcp = NULL; csump = NULL; flags = 0; sumd2 = 0; *nflags = IPN_ICMPERR; icmp = fin->fin_dp; oip = (ip_t *)&icmp->icmp_ip; dp = (((char *)oip) + (IP_HL(oip) << 2)); if (oip->ip_p == IPPROTO_TCP) { tcp = (tcphdr_t *)dp; csump = (u_short *)&tcp->th_sum; flags = IPN_TCP; } else if (oip->ip_p == IPPROTO_UDP) { udphdr_t *udp; udp = (udphdr_t *)dp; tcp = (tcphdr_t *)dp; csump = (u_short *)&udp->uh_sum; flags = IPN_UDP; } else if (oip->ip_p == IPPROTO_ICMP) flags = IPN_ICMPQUERY; dlen = fin->fin_plen - ((char *)dp - (char *)fin->fin_ip); /* * Need to adjust ICMP header to include the real IP#'s and * port #'s. Only apply a checksum change relative to the * IP address change as it will be modified again in ipf_nat_checkout * for both address and port. Two checksum changes are * necessary for the two header address changes. Be careful * to only modify the checksum once for the port # and twice * for the IP#. */ /* * Step 1 * Fix the IP addresses in the offending IP packet. You also need * to adjust the IP header checksum of that offending IP packet. * * Normally, you would expect that the ICMP checksum of the * ICMP error message needs to be adjusted as well for the * IP address change in oip. * However, this is a NOP, because the ICMP checksum is * calculated over the complete ICMP packet, which includes the * changed oip IP addresses and oip->ip_sum. However, these * two changes cancel each other out (if the delta for * the IP address is x, then the delta for ip_sum is minus x), * so no change in the icmp_cksum is necessary. * * Inbound ICMP * ------------ * MAP rule, SRC=a,DST=b -> SRC=c,DST=b * - response to outgoing packet (a,b)=>(c,b) (OIP_SRC=c,OIP_DST=b) * - OIP_SRC(c)=nat_newsrcip, OIP_DST(b)=nat_newdstip *=> OIP_SRC(c)=nat_oldsrcip, OIP_DST(b)=nat_olddstip * * RDR rule, SRC=a,DST=b -> SRC=a,DST=c * - response to outgoing packet (c,a)=>(b,a) (OIP_SRC=b,OIP_DST=a) * - OIP_SRC(b)=nat_olddstip, OIP_DST(a)=nat_oldsrcip *=> OIP_SRC(b)=nat_newdstip, OIP_DST(a)=nat_newsrcip * * REWRITE out rule, SRC=a,DST=b -> SRC=c,DST=d * - response to outgoing packet (a,b)=>(c,d) (OIP_SRC=c,OIP_DST=d) * - OIP_SRC(c)=nat_newsrcip, OIP_DST(d)=nat_newdstip *=> OIP_SRC(c)=nat_oldsrcip, OIP_DST(d)=nat_olddstip * * REWRITE in rule, SRC=a,DST=b -> SRC=c,DST=d * - response to outgoing packet (d,c)=>(b,a) (OIP_SRC=b,OIP_DST=a) * - OIP_SRC(b)=nat_olddstip, OIP_DST(a)=nat_oldsrcip *=> OIP_SRC(b)=nat_newdstip, OIP_DST(a)=nat_newsrcip * * Outbound ICMP * ------------- * MAP rule, SRC=a,DST=b -> SRC=c,DST=b * - response to incoming packet (b,c)=>(b,a) (OIP_SRC=b,OIP_DST=a) * - OIP_SRC(b)=nat_olddstip, OIP_DST(a)=nat_oldsrcip *=> OIP_SRC(b)=nat_newdstip, OIP_DST(a)=nat_newsrcip * * RDR rule, SRC=a,DST=b -> SRC=a,DST=c * - response to incoming packet (a,b)=>(a,c) (OIP_SRC=a,OIP_DST=c) * - OIP_SRC(a)=nat_newsrcip, OIP_DST(c)=nat_newdstip *=> OIP_SRC(a)=nat_oldsrcip, OIP_DST(c)=nat_olddstip * * REWRITE out rule, SRC=a,DST=b -> SRC=c,DST=d * - response to incoming packet (d,c)=>(b,a) (OIP_SRC=c,OIP_DST=d) * - OIP_SRC(c)=nat_olddstip, OIP_DST(d)=nat_oldsrcip *=> OIP_SRC(b)=nat_newdstip, OIP_DST(a)=nat_newsrcip * * REWRITE in rule, SRC=a,DST=b -> SRC=c,DST=d * - response to incoming packet (a,b)=>(c,d) (OIP_SRC=b,OIP_DST=a) * - OIP_SRC(b)=nat_newsrcip, OIP_DST(a)=nat_newdstip *=> OIP_SRC(a)=nat_oldsrcip, OIP_DST(c)=nat_olddstip */ if (((fin->fin_out == 0) && ((nat->nat_redir & NAT_MAP) != 0)) || ((fin->fin_out == 1) && ((nat->nat_redir & NAT_REDIRECT) != 0))) { a1.s_addr = ntohl(nat->nat_osrcaddr); a4.s_addr = ntohl(oip->ip_src.s_addr); a3.s_addr = ntohl(nat->nat_odstaddr); a2.s_addr = ntohl(oip->ip_dst.s_addr); oip->ip_src.s_addr = htonl(a1.s_addr); oip->ip_dst.s_addr = htonl(a3.s_addr); odst = 1; } else { a1.s_addr = ntohl(nat->nat_ndstaddr); a2.s_addr = ntohl(oip->ip_dst.s_addr); a3.s_addr = ntohl(nat->nat_nsrcaddr); a4.s_addr = ntohl(oip->ip_src.s_addr); oip->ip_dst.s_addr = htonl(a3.s_addr); oip->ip_src.s_addr = htonl(a1.s_addr); odst = 0; } sum1 = 0; sum2 = 0; sumd = 0; CALC_SUMD(a2.s_addr, a3.s_addr, sum1); CALC_SUMD(a4.s_addr, a1.s_addr, sum2); sumd = sum2 + sum1; if (sumd != 0) ipf_fix_datacksum(&oip->ip_sum, sumd); sumd2 = sumd; sum1 = 0; sum2 = 0; /* * Fix UDP pseudo header checksum to compensate for the * IP address change. */ if (((flags & IPN_TCPUDP) != 0) && (dlen >= 4)) { u_32_t sum3, sum4, sumt; /* * Step 2 : * For offending TCP/UDP IP packets, translate the ports as * well, based on the NAT specification. Of course such * a change may be reflected in the ICMP checksum as well. * * Since the port fields are part of the TCP/UDP checksum * of the offending IP packet, you need to adjust that checksum * as well... except that the change in the port numbers should * be offset by the checksum change. However, the TCP/UDP * checksum will also need to change if there has been an * IP address change. */ if (odst == 1) { sum1 = ntohs(nat->nat_osport); sum4 = ntohs(tcp->th_sport); sum3 = ntohs(nat->nat_odport); sum2 = ntohs(tcp->th_dport); tcp->th_sport = htons(sum1); tcp->th_dport = htons(sum3); } else { sum1 = ntohs(nat->nat_ndport); sum2 = ntohs(tcp->th_dport); sum3 = ntohs(nat->nat_nsport); sum4 = ntohs(tcp->th_sport); tcp->th_dport = htons(sum3); tcp->th_sport = htons(sum1); } CALC_SUMD(sum4, sum1, sumt); sumd += sumt; CALC_SUMD(sum2, sum3, sumt); sumd += sumt; if (sumd != 0 || sumd2 != 0) { /* * At this point, sumd is the delta to apply to the * TCP/UDP header, given the changes in both the IP * address and the ports and sumd2 is the delta to * apply to the ICMP header, given the IP address * change delta that may need to be applied to the * TCP/UDP checksum instead. * * If we will both the IP and TCP/UDP checksums * then the ICMP checksum changes by the address * delta applied to the TCP/UDP checksum. If we * do not change the TCP/UDP checksum them we * apply the delta in ports to the ICMP checksum. */ if (oip->ip_p == IPPROTO_UDP) { if ((dlen >= 8) && (*csump != 0)) { ipf_fix_datacksum(csump, sumd); } else { CALC_SUMD(sum1, sum4, sumd2); CALC_SUMD(sum3, sum2, sumt); sumd2 += sumt; } } else if (oip->ip_p == IPPROTO_TCP) { if (dlen >= 18) { ipf_fix_datacksum(csump, sumd); } else { CALC_SUMD(sum1, sum4, sumd2); CALC_SUMD(sum3, sum2, sumt); sumd2 += sumt; } } if (sumd2 != 0) { sumd2 = (sumd2 & 0xffff) + (sumd2 >> 16); sumd2 = (sumd2 & 0xffff) + (sumd2 >> 16); sumd2 = (sumd2 & 0xffff) + (sumd2 >> 16); ipf_fix_incksum(0, &icmp->icmp_cksum, sumd2, 0); } } } else if (((flags & IPN_ICMPQUERY) != 0) && (dlen >= 8)) { icmphdr_t *orgicmp; /* * XXX - what if this is bogus hl and we go off the end ? * In this case, ipf_nat_icmperrorlookup() will have * returned NULL. */ orgicmp = (icmphdr_t *)dp; if (odst == 1) { if (orgicmp->icmp_id != nat->nat_osport) { /* * Fix ICMP checksum (of the offening ICMP * query packet) to compensate the change * in the ICMP id of the offending ICMP * packet. * * Since you modify orgicmp->icmp_id with * a delta (say x) and you compensate that * in origicmp->icmp_cksum with a delta * minus x, you don't have to adjust the * overall icmp->icmp_cksum */ sum1 = ntohs(orgicmp->icmp_id); sum2 = ntohs(nat->nat_oicmpid); CALC_SUMD(sum1, sum2, sumd); orgicmp->icmp_id = nat->nat_oicmpid; ipf_fix_datacksum(&orgicmp->icmp_cksum, sumd); } } /* nat_dir == NAT_INBOUND is impossible for icmp queries */ } return (nat); } /* * MAP-IN MAP-OUT RDR-IN RDR-OUT * osrc X == src == src X * odst X == dst == dst X * nsrc == dst X X == dst * ndst == src X X == src * MAP = NAT_OUTBOUND, RDR = NAT_INBOUND */ /* * NB: these lookups don't lock access to the list, it assumed that it has * already been done! */ /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_inlookup */ /* Returns: nat_t* - NULL == no match, */ /* else pointer to matching NAT entry */ /* Parameters: fin(I) - pointer to packet information */ /* flags(I) - NAT flags for this packet */ /* p(I) - protocol for this packet */ /* src(I) - source IP address */ /* mapdst(I) - destination IP address */ /* */ /* Lookup a nat entry based on the mapped destination ip address/port and */ /* real source address/port. We use this lookup when receiving a packet, */ /* we're looking for a table entry, based on the destination address. */ /* */ /* NOTE: THE PACKET BEING CHECKED (IF FOUND) HAS A MAPPING ALREADY. */ /* */ /* NOTE: IT IS ASSUMED THAT IS ONLY HELD WITH A READ LOCK WHEN */ /* THIS FUNCTION IS CALLED WITH NAT_SEARCH SET IN nflags. */ /* */ /* flags -> relevant are IPN_UDP/IPN_TCP/IPN_ICMPQUERY that indicate if */ /* the packet is of said protocol */ /* ------------------------------------------------------------------------ */ nat_t * ipf_nat_inlookup(fr_info_t *fin, u_int flags, u_int p, struct in_addr src , struct in_addr mapdst) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; u_short sport, dport; grehdr_t *gre; ipnat_t *ipn; u_int sflags; nat_t *nat; int nflags; u_32_t dst; void *ifp; u_int hv, rhv; ifp = fin->fin_ifp; gre = NULL; dst = mapdst.s_addr; sflags = flags & NAT_TCPUDPICMP; switch (p) { case IPPROTO_TCP : case IPPROTO_UDP : sport = htons(fin->fin_data[0]); dport = htons(fin->fin_data[1]); break; case IPPROTO_ICMP : sport = 0; dport = fin->fin_data[1]; break; default : sport = 0; dport = 0; break; } if ((flags & SI_WILDP) != 0) goto find_in_wild_ports; rhv = NAT_HASH_FN(dst, dport, 0xffffffff); rhv = NAT_HASH_FN(src.s_addr, rhv + sport, 0xffffffff); hv = rhv % softn->ipf_nat_table_sz; nat = softn->ipf_nat_table[1][hv]; /* TRACE dst, dport, src, sport, hv, nat */ for (; nat; nat = nat->nat_hnext[1]) { if (nat->nat_ifps[0] != NULL) { if ((ifp != NULL) && (ifp != nat->nat_ifps[0])) continue; } if (nat->nat_pr[0] != p) continue; switch (nat->nat_dir) { case NAT_INBOUND : case NAT_DIVERTIN : if (nat->nat_v[0] != 4) continue; if (nat->nat_osrcaddr != src.s_addr || nat->nat_odstaddr != dst) continue; if ((nat->nat_flags & IPN_TCPUDP) != 0) { if (nat->nat_osport != sport) continue; if (nat->nat_odport != dport) continue; } else if (p == IPPROTO_ICMP) { if (nat->nat_osport != dport) { continue; } } break; case NAT_DIVERTOUT : if (nat->nat_dlocal) continue; case NAT_OUTBOUND : if (nat->nat_v[1] != 4) continue; if (nat->nat_dlocal) continue; if (nat->nat_dlocal) continue; if (nat->nat_ndstaddr != src.s_addr || nat->nat_nsrcaddr != dst) continue; if ((nat->nat_flags & IPN_TCPUDP) != 0) { if (nat->nat_ndport != sport) continue; if (nat->nat_nsport != dport) continue; } else if (p == IPPROTO_ICMP) { if (nat->nat_osport != dport) { continue; } } break; } if ((nat->nat_flags & IPN_TCPUDP) != 0) { ipn = nat->nat_ptr; if ((ipn != NULL) && (nat->nat_aps != NULL)) if (ipf_proxy_match(fin, nat) != 0) continue; } if ((nat->nat_ifps[0] == NULL) && (ifp != NULL)) { nat->nat_ifps[0] = ifp; nat->nat_mtu[0] = GETIFMTU_4(ifp); } return (nat); } /* * So if we didn't find it but there are wildcard members in the hash * table, go back and look for them. We do this search and update here * because it is modifying the NAT table and we want to do this only * for the first packet that matches. The exception, of course, is * for "dummy" (FI_IGNORE) lookups. */ find_in_wild_ports: if (!(flags & NAT_TCPUDP) || !(flags & NAT_SEARCH)) { NBUMPSIDEX(0, ns_lookup_miss, ns_lookup_miss_0); return (NULL); } if (softn->ipf_nat_stats.ns_wilds == 0 || (fin->fin_flx & FI_NOWILD)) { NBUMPSIDEX(0, ns_lookup_nowild, ns_lookup_nowild_0); return (NULL); } RWLOCK_EXIT(&softc->ipf_nat); hv = NAT_HASH_FN(dst, 0, 0xffffffff); hv = NAT_HASH_FN(src.s_addr, hv, softn->ipf_nat_table_sz); WRITE_ENTER(&softc->ipf_nat); nat = softn->ipf_nat_table[1][hv]; /* TRACE dst, src, hv, nat */ for (; nat; nat = nat->nat_hnext[1]) { if (nat->nat_ifps[0] != NULL) { if ((ifp != NULL) && (ifp != nat->nat_ifps[0])) continue; } if (nat->nat_pr[0] != fin->fin_p) continue; switch (nat->nat_dir & (NAT_INBOUND|NAT_OUTBOUND)) { case NAT_INBOUND : if (nat->nat_v[0] != 4) continue; if (nat->nat_osrcaddr != src.s_addr || nat->nat_odstaddr != dst) continue; break; case NAT_OUTBOUND : if (nat->nat_v[1] != 4) continue; if (nat->nat_ndstaddr != src.s_addr || nat->nat_nsrcaddr != dst) continue; break; } nflags = nat->nat_flags; if (!(nflags & (NAT_TCPUDP|SI_WILDP))) continue; if (ipf_nat_wildok(nat, (int)sport, (int)dport, nflags, NAT_INBOUND) == 1) { if ((fin->fin_flx & FI_IGNORE) != 0) break; if ((nflags & SI_CLONE) != 0) { nat = ipf_nat_clone(fin, nat); if (nat == NULL) break; } else { MUTEX_ENTER(&softn->ipf_nat_new); softn->ipf_nat_stats.ns_wilds--; MUTEX_EXIT(&softn->ipf_nat_new); } if (nat->nat_dir == NAT_INBOUND) { if (nat->nat_osport == 0) { nat->nat_osport = sport; nat->nat_nsport = sport; } if (nat->nat_odport == 0) { nat->nat_odport = dport; nat->nat_ndport = dport; } } else if (nat->nat_dir == NAT_OUTBOUND) { if (nat->nat_osport == 0) { nat->nat_osport = dport; nat->nat_nsport = dport; } if (nat->nat_odport == 0) { nat->nat_odport = sport; nat->nat_ndport = sport; } } if ((nat->nat_ifps[0] == NULL) && (ifp != NULL)) { nat->nat_ifps[0] = ifp; nat->nat_mtu[0] = GETIFMTU_4(ifp); } nat->nat_flags &= ~(SI_W_DPORT|SI_W_SPORT); ipf_nat_tabmove(softn, nat); break; } } MUTEX_DOWNGRADE(&softc->ipf_nat); if (nat == NULL) { NBUMPSIDE(0, ns_lookup_miss); } return (nat); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_tabmove */ /* Returns: Nil */ /* Parameters: softn(I) - pointer to NAT context structure */ /* nat(I) - pointer to NAT structure */ /* Write Lock: ipf_nat */ /* */ /* This function is only called for TCP/UDP NAT table entries where the */ /* original was placed in the table without hashing on the ports and we now */ /* want to include hashing on port numbers. */ /* ------------------------------------------------------------------------ */ static void ipf_nat_tabmove(ipf_nat_softc_t *softn, nat_t *nat) { u_int hv0, hv1, rhv0, rhv1; natstat_t *nsp; nat_t **natp; if (nat->nat_flags & SI_CLONE) return; nsp = &softn->ipf_nat_stats; /* * Remove the NAT entry from the old location */ if (nat->nat_hnext[0]) nat->nat_hnext[0]->nat_phnext[0] = nat->nat_phnext[0]; *nat->nat_phnext[0] = nat->nat_hnext[0]; nsp->ns_side[0].ns_bucketlen[nat->nat_hv[0] % softn->ipf_nat_table_sz]--; if (nat->nat_hnext[1]) nat->nat_hnext[1]->nat_phnext[1] = nat->nat_phnext[1]; *nat->nat_phnext[1] = nat->nat_hnext[1]; nsp->ns_side[1].ns_bucketlen[nat->nat_hv[1] % softn->ipf_nat_table_sz]--; /* * Add into the NAT table in the new position */ rhv0 = NAT_HASH_FN(nat->nat_osrcaddr, nat->nat_osport, 0xffffffff); rhv0 = NAT_HASH_FN(nat->nat_odstaddr, rhv0 + nat->nat_odport, 0xffffffff); rhv1 = NAT_HASH_FN(nat->nat_nsrcaddr, nat->nat_nsport, 0xffffffff); rhv1 = NAT_HASH_FN(nat->nat_ndstaddr, rhv1 + nat->nat_ndport, 0xffffffff); hv0 = rhv0 % softn->ipf_nat_table_sz; hv1 = rhv1 % softn->ipf_nat_table_sz; if (nat->nat_dir == NAT_INBOUND || nat->nat_dir == NAT_DIVERTIN) { u_int swap; swap = hv0; hv0 = hv1; hv1 = swap; } /* TRACE nat_osrcaddr, nat_osport, nat_odstaddr, nat_odport, hv0 */ /* TRACE nat_nsrcaddr, nat_nsport, nat_ndstaddr, nat_ndport, hv1 */ nat->nat_hv[0] = rhv0; natp = &softn->ipf_nat_table[0][hv0]; if (*natp) (*natp)->nat_phnext[0] = &nat->nat_hnext[0]; nat->nat_phnext[0] = natp; nat->nat_hnext[0] = *natp; *natp = nat; nsp->ns_side[0].ns_bucketlen[hv0]++; nat->nat_hv[1] = rhv1; natp = &softn->ipf_nat_table[1][hv1]; if (*natp) (*natp)->nat_phnext[1] = &nat->nat_hnext[1]; nat->nat_phnext[1] = natp; nat->nat_hnext[1] = *natp; *natp = nat; nsp->ns_side[1].ns_bucketlen[hv1]++; } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_outlookup */ /* Returns: nat_t* - NULL == no match, */ /* else pointer to matching NAT entry */ /* Parameters: fin(I) - pointer to packet information */ /* flags(I) - NAT flags for this packet */ /* p(I) - protocol for this packet */ /* src(I) - source IP address */ /* dst(I) - destination IP address */ /* rw(I) - 1 == write lock on held, 0 == read lock. */ /* */ /* Lookup a nat entry based on the source 'real' ip address/port and */ /* destination address/port. We use this lookup when sending a packet out, */ /* we're looking for a table entry, based on the source address. */ /* */ /* NOTE: THE PACKET BEING CHECKED (IF FOUND) HAS A MAPPING ALREADY. */ /* */ /* NOTE: IT IS ASSUMED THAT IS ONLY HELD WITH A READ LOCK WHEN */ /* THIS FUNCTION IS CALLED WITH NAT_SEARCH SET IN nflags. */ /* */ /* flags -> relevant are IPN_UDP/IPN_TCP/IPN_ICMPQUERY that indicate if */ /* the packet is of said protocol */ /* ------------------------------------------------------------------------ */ nat_t * ipf_nat_outlookup(fr_info_t *fin, u_int flags, u_int p, struct in_addr src , struct in_addr dst) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; u_short sport, dport; u_int sflags; ipnat_t *ipn; nat_t *nat; void *ifp; u_int hv; ifp = fin->fin_ifp; sflags = flags & IPN_TCPUDPICMP; switch (p) { case IPPROTO_TCP : case IPPROTO_UDP : sport = htons(fin->fin_data[0]); dport = htons(fin->fin_data[1]); break; case IPPROTO_ICMP : sport = 0; dport = fin->fin_data[1]; break; default : sport = 0; dport = 0; break; } if ((flags & SI_WILDP) != 0) goto find_out_wild_ports; hv = NAT_HASH_FN(src.s_addr, sport, 0xffffffff); hv = NAT_HASH_FN(dst.s_addr, hv + dport, softn->ipf_nat_table_sz); nat = softn->ipf_nat_table[0][hv]; /* TRACE src, sport, dst, dport, hv, nat */ for (; nat; nat = nat->nat_hnext[0]) { if (nat->nat_ifps[1] != NULL) { if ((ifp != NULL) && (ifp != nat->nat_ifps[1])) continue; } if (nat->nat_pr[1] != p) continue; switch (nat->nat_dir) { case NAT_INBOUND : case NAT_DIVERTIN : if (nat->nat_v[1] != 4) continue; if (nat->nat_ndstaddr != src.s_addr || nat->nat_nsrcaddr != dst.s_addr) continue; if ((nat->nat_flags & IPN_TCPUDP) != 0) { if (nat->nat_ndport != sport) continue; if (nat->nat_nsport != dport) continue; } else if (p == IPPROTO_ICMP) { if (nat->nat_osport != dport) { continue; } } break; case NAT_OUTBOUND : case NAT_DIVERTOUT : if (nat->nat_v[0] != 4) continue; if (nat->nat_osrcaddr != src.s_addr || nat->nat_odstaddr != dst.s_addr) continue; if ((nat->nat_flags & IPN_TCPUDP) != 0) { if (nat->nat_odport != dport) continue; if (nat->nat_osport != sport) continue; } else if (p == IPPROTO_ICMP) { if (nat->nat_osport != dport) { continue; } } break; } ipn = nat->nat_ptr; if ((ipn != NULL) && (nat->nat_aps != NULL)) if (ipf_proxy_match(fin, nat) != 0) continue; if ((nat->nat_ifps[1] == NULL) && (ifp != NULL)) { nat->nat_ifps[1] = ifp; nat->nat_mtu[1] = GETIFMTU_4(ifp); } return (nat); } /* * So if we didn't find it but there are wildcard members in the hash * table, go back and look for them. We do this search and update here * because it is modifying the NAT table and we want to do this only * for the first packet that matches. The exception, of course, is * for "dummy" (FI_IGNORE) lookups. */ find_out_wild_ports: if (!(flags & NAT_TCPUDP) || !(flags & NAT_SEARCH)) { NBUMPSIDEX(1, ns_lookup_miss, ns_lookup_miss_1); return (NULL); } if (softn->ipf_nat_stats.ns_wilds == 0 || (fin->fin_flx & FI_NOWILD)) { NBUMPSIDEX(1, ns_lookup_nowild, ns_lookup_nowild_1); return (NULL); } RWLOCK_EXIT(&softc->ipf_nat); hv = NAT_HASH_FN(src.s_addr, 0, 0xffffffff); hv = NAT_HASH_FN(dst.s_addr, hv, softn->ipf_nat_table_sz); WRITE_ENTER(&softc->ipf_nat); nat = softn->ipf_nat_table[0][hv]; for (; nat; nat = nat->nat_hnext[0]) { if (nat->nat_ifps[1] != NULL) { if ((ifp != NULL) && (ifp != nat->nat_ifps[1])) continue; } if (nat->nat_pr[1] != fin->fin_p) continue; switch (nat->nat_dir & (NAT_INBOUND|NAT_OUTBOUND)) { case NAT_INBOUND : if (nat->nat_v[1] != 4) continue; if (nat->nat_ndstaddr != src.s_addr || nat->nat_nsrcaddr != dst.s_addr) continue; break; case NAT_OUTBOUND : if (nat->nat_v[0] != 4) continue; if (nat->nat_osrcaddr != src.s_addr || nat->nat_odstaddr != dst.s_addr) continue; break; } if (!(nat->nat_flags & (NAT_TCPUDP|SI_WILDP))) continue; if (ipf_nat_wildok(nat, (int)sport, (int)dport, nat->nat_flags, NAT_OUTBOUND) == 1) { if ((fin->fin_flx & FI_IGNORE) != 0) break; if ((nat->nat_flags & SI_CLONE) != 0) { nat = ipf_nat_clone(fin, nat); if (nat == NULL) break; } else { MUTEX_ENTER(&softn->ipf_nat_new); softn->ipf_nat_stats.ns_wilds--; MUTEX_EXIT(&softn->ipf_nat_new); } if (nat->nat_dir == NAT_OUTBOUND) { if (nat->nat_osport == 0) { nat->nat_osport = sport; nat->nat_nsport = sport; } if (nat->nat_odport == 0) { nat->nat_odport = dport; nat->nat_ndport = dport; } } else if (nat->nat_dir == NAT_INBOUND) { if (nat->nat_osport == 0) { nat->nat_osport = dport; nat->nat_nsport = dport; } if (nat->nat_odport == 0) { nat->nat_odport = sport; nat->nat_ndport = sport; } } if ((nat->nat_ifps[1] == NULL) && (ifp != NULL)) { nat->nat_ifps[1] = ifp; nat->nat_mtu[1] = GETIFMTU_4(ifp); } nat->nat_flags &= ~(SI_W_DPORT|SI_W_SPORT); ipf_nat_tabmove(softn, nat); break; } } MUTEX_DOWNGRADE(&softc->ipf_nat); if (nat == NULL) { NBUMPSIDE(1, ns_lookup_miss); } return (nat); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_lookupredir */ /* Returns: nat_t* - NULL == no match, */ /* else pointer to matching NAT entry */ /* Parameters: np(I) - pointer to description of packet to find NAT table */ /* entry for. */ /* */ /* Lookup the NAT tables to search for a matching redirect */ /* The contents of natlookup_t should imitate those found in a packet that */ /* would be translated - ie a packet coming in for RDR or going out for MAP.*/ /* We can do the lookup in one of two ways, imitating an inbound or */ /* outbound packet. By default we assume outbound, unless IPN_IN is set. */ /* For IN, the fields are set as follows: */ /* nl_real* = source information */ /* nl_out* = destination information (translated) */ /* For an out packet, the fields are set like this: */ /* nl_in* = source information (untranslated) */ /* nl_out* = destination information (translated) */ /* ------------------------------------------------------------------------ */ nat_t * ipf_nat_lookupredir(natlookup_t *np) { fr_info_t fi; nat_t *nat; bzero((char *)&fi, sizeof(fi)); if (np->nl_flags & IPN_IN) { fi.fin_data[0] = ntohs(np->nl_realport); fi.fin_data[1] = ntohs(np->nl_outport); } else { fi.fin_data[0] = ntohs(np->nl_inport); fi.fin_data[1] = ntohs(np->nl_outport); } if (np->nl_flags & IPN_TCP) fi.fin_p = IPPROTO_TCP; else if (np->nl_flags & IPN_UDP) fi.fin_p = IPPROTO_UDP; else if (np->nl_flags & (IPN_ICMPERR|IPN_ICMPQUERY)) fi.fin_p = IPPROTO_ICMP; /* * We can do two sorts of lookups: * - IPN_IN: we have the `real' and `out' address, look for `in'. * - default: we have the `in' and `out' address, look for `real'. */ if (np->nl_flags & IPN_IN) { if ((nat = ipf_nat_inlookup(&fi, np->nl_flags, fi.fin_p, np->nl_realip, np->nl_outip))) { np->nl_inip = nat->nat_odstip; np->nl_inport = nat->nat_odport; } } else { /* * If nl_inip is non null, this is a lookup based on the real * ip address. Else, we use the fake. */ if ((nat = ipf_nat_outlookup(&fi, np->nl_flags, fi.fin_p, np->nl_inip, np->nl_outip))) { if ((np->nl_flags & IPN_FINDFORWARD) != 0) { fr_info_t fin; bzero((char *)&fin, sizeof(fin)); fin.fin_p = nat->nat_pr[0]; fin.fin_data[0] = ntohs(nat->nat_ndport); fin.fin_data[1] = ntohs(nat->nat_nsport); if (ipf_nat_inlookup(&fin, np->nl_flags, fin.fin_p, nat->nat_ndstip, nat->nat_nsrcip) != NULL) { np->nl_flags &= ~IPN_FINDFORWARD; } } np->nl_realip = nat->nat_odstip; np->nl_realport = nat->nat_odport; } } return (nat); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_match */ /* Returns: int - 0 == no match, 1 == match */ /* Parameters: fin(I) - pointer to packet information */ /* np(I) - pointer to NAT rule */ /* */ /* Pull the matching of a packet against a NAT rule out of that complex */ /* loop inside ipf_nat_checkin() and lay it out properly in its own function. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_match(fr_info_t *fin, ipnat_t *np) { ipf_main_softc_t *softc = fin->fin_main_soft; frtuc_t *ft; int match; match = 0; switch (np->in_osrcatype) { case FRI_NORMAL : match = ((fin->fin_saddr & np->in_osrcmsk) != np->in_osrcaddr); break; case FRI_LOOKUP : match = (*np->in_osrcfunc)(softc, np->in_osrcptr, 4, &fin->fin_saddr, fin->fin_plen); break; } match ^= ((np->in_flags & IPN_NOTSRC) != 0); if (match) return (0); match = 0; switch (np->in_odstatype) { case FRI_NORMAL : match = ((fin->fin_daddr & np->in_odstmsk) != np->in_odstaddr); break; case FRI_LOOKUP : match = (*np->in_odstfunc)(softc, np->in_odstptr, 4, &fin->fin_daddr, fin->fin_plen); break; } match ^= ((np->in_flags & IPN_NOTDST) != 0); if (match) return (0); ft = &np->in_tuc; if (!(fin->fin_flx & FI_TCPUDP) || (fin->fin_flx & (FI_SHORT|FI_FRAGBODY))) { if (ft->ftu_scmp || ft->ftu_dcmp) return (0); return (1); } return (ipf_tcpudpchk(&fin->fin_fi, ft)); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_update */ /* Returns: Nil */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to NAT structure */ /* */ /* Updates the lifetime of a NAT table entry for non-TCP packets. Must be */ /* called with fin_rev updated - i.e. after calling ipf_nat_proto(). */ /* */ /* This *MUST* be called after ipf_nat_proto() as it expects fin_rev to */ /* already be set. */ /* ------------------------------------------------------------------------ */ void ipf_nat_update(fr_info_t *fin, nat_t *nat) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; ipftq_t *ifq, *ifq2; ipftqent_t *tqe; ipnat_t *np = nat->nat_ptr; tqe = &nat->nat_tqe; ifq = tqe->tqe_ifq; /* * We allow over-riding of NAT timeouts from NAT rules, even for * TCP, however, if it is TCP and there is no rule timeout set, * then do not update the timeout here. */ if (np != NULL) { np->in_bytes[fin->fin_rev] += fin->fin_plen; ifq2 = np->in_tqehead[fin->fin_rev]; } else { ifq2 = NULL; } if (nat->nat_pr[0] == IPPROTO_TCP && ifq2 == NULL) { (void) ipf_tcp_age(&nat->nat_tqe, fin, softn->ipf_nat_tcptq, 0, 2); } else { if (ifq2 == NULL) { if (nat->nat_pr[0] == IPPROTO_UDP) ifq2 = fin->fin_rev ? &softn->ipf_nat_udpacktq : &softn->ipf_nat_udptq; else if (nat->nat_pr[0] == IPPROTO_ICMP || nat->nat_pr[0] == IPPROTO_ICMPV6) ifq2 = fin->fin_rev ? &softn->ipf_nat_icmpacktq: &softn->ipf_nat_icmptq; else ifq2 = &softn->ipf_nat_iptq; } ipf_movequeue(softc->ipf_ticks, tqe, ifq, ifq2); } } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_checkout */ /* Returns: int - -1 == packet failed NAT checks so block it, */ /* 0 == no packet translation occurred, */ /* 1 == packet was successfully translated. */ /* Parameters: fin(I) - pointer to packet information */ /* passp(I) - pointer to filtering result flags */ /* */ /* Check to see if an outcoming packet should be changed. ICMP packets are */ /* first checked to see if they match an existing entry (if an error), */ /* otherwise a search of the current NAT table is made. If neither results */ /* in a match then a search for a matching NAT rule is made. Create a new */ /* NAT entry if a we matched a NAT rule. Lastly, actually change the */ /* packet header(s) as required. */ /* ------------------------------------------------------------------------ */ int ipf_nat_checkout(fr_info_t *fin, u_32_t *passp) { ipnat_t *np = NULL, *npnext; struct ifnet *ifp, *sifp; ipf_main_softc_t *softc; ipf_nat_softc_t *softn; icmphdr_t *icmp = NULL; tcphdr_t *tcp = NULL; int rval, natfailed; u_int nflags = 0; u_32_t ipa, iph; int natadd = 1; frentry_t *fr; nat_t *nat; if (fin->fin_v == 6) { #ifdef USE_INET6 return (ipf_nat6_checkout(fin, passp)); #else return (0); #endif } softc = fin->fin_main_soft; softn = softc->ipf_nat_soft; if (softn->ipf_nat_lock != 0) return (0); if (softn->ipf_nat_stats.ns_rules == 0 && softn->ipf_nat_instances == NULL) return (0); natfailed = 0; fr = fin->fin_fr; sifp = fin->fin_ifp; if (fr != NULL) { ifp = fr->fr_tifs[fin->fin_rev].fd_ptr; if ((ifp != NULL) && (ifp != (void *)-1)) fin->fin_ifp = ifp; } ifp = fin->fin_ifp; if (!(fin->fin_flx & FI_SHORT) && (fin->fin_off == 0)) { switch (fin->fin_p) { case IPPROTO_TCP : nflags = IPN_TCP; break; case IPPROTO_UDP : nflags = IPN_UDP; break; case IPPROTO_ICMP : icmp = fin->fin_dp; /* * This is an incoming packet, so the destination is * the icmp_id and the source port equals 0 */ if ((fin->fin_flx & FI_ICMPQUERY) != 0) nflags = IPN_ICMPQUERY; break; default : break; } if ((nflags & IPN_TCPUDP)) tcp = fin->fin_dp; } ipa = fin->fin_saddr; READ_ENTER(&softc->ipf_nat); if ((fin->fin_p == IPPROTO_ICMP) && !(nflags & IPN_ICMPQUERY) && (nat = ipf_nat_icmperror(fin, &nflags, NAT_OUTBOUND))) /*EMPTY*/; else if ((fin->fin_flx & FI_FRAG) && (nat = ipf_frag_natknown(fin))) natadd = 0; else if ((nat = ipf_nat_outlookup(fin, nflags|NAT_SEARCH, (u_int)fin->fin_p, fin->fin_src, fin->fin_dst))) { nflags = nat->nat_flags; } else if (fin->fin_off == 0) { u_32_t hv, msk, nmsk = 0; /* * If there is no current entry in the nat table for this IP#, * create one for it (if there is a matching rule). */ maskloop: msk = softn->ipf_nat_map_active_masks[nmsk]; iph = ipa & msk; hv = NAT_HASH_FN(iph, 0, softn->ipf_nat_maprules_sz); retry_roundrobin: for (np = softn->ipf_nat_map_rules[hv]; np; np = npnext) { npnext = np->in_mnext; if ((np->in_ifps[1] && (np->in_ifps[1] != ifp))) continue; if (np->in_v[0] != 4) continue; if (np->in_pr[1] && (np->in_pr[1] != fin->fin_p)) continue; if ((np->in_flags & IPN_RF) && !(np->in_flags & nflags)) continue; if (np->in_flags & IPN_FILTER) { switch (ipf_nat_match(fin, np)) { case 0 : continue; case -1 : rval = -3; goto outmatchfail; case 1 : default : break; } } else if ((ipa & np->in_osrcmsk) != np->in_osrcaddr) continue; if ((fr != NULL) && !ipf_matchtag(&np->in_tag, &fr->fr_nattag)) continue; if (np->in_plabel != -1) { if (((np->in_flags & IPN_FILTER) == 0) && (np->in_odport != fin->fin_data[1])) continue; if (ipf_proxy_ok(fin, tcp, np) == 0) continue; } if (np->in_flags & IPN_NO) { np->in_hits++; break; } MUTEX_ENTER(&softn->ipf_nat_new); /* * If we've matched a round-robin rule but it has * moved in the list since we got it, start over as * this is now no longer correct. */ if (npnext != np->in_mnext) { if ((np->in_flags & IPN_ROUNDR) != 0) { MUTEX_EXIT(&softn->ipf_nat_new); goto retry_roundrobin; } npnext = np->in_mnext; } nat = ipf_nat_add(fin, np, NULL, nflags, NAT_OUTBOUND); MUTEX_EXIT(&softn->ipf_nat_new); if (nat != NULL) { natfailed = 0; break; } natfailed = -2; } if ((np == NULL) && (nmsk < softn->ipf_nat_map_max)) { nmsk++; goto maskloop; } } if (nat != NULL) { rval = ipf_nat_out(fin, nat, natadd, nflags); if (rval == 1) { MUTEX_ENTER(&nat->nat_lock); ipf_nat_update(fin, nat); nat->nat_bytes[1] += fin->fin_plen; nat->nat_pkts[1]++; fin->fin_pktnum = nat->nat_pkts[1]; MUTEX_EXIT(&nat->nat_lock); } } else rval = natfailed; outmatchfail: RWLOCK_EXIT(&softc->ipf_nat); switch (rval) { case -3 : /* ipf_nat_match() failure */ /* FALLTHROUGH */ case -2 : /* retry_roundrobin loop failure */ /* FALLTHROUGH */ case -1 : /* proxy failure detected by ipf_nat_out() */ if (passp != NULL) { DT2(frb_natv4out, fr_info_t *, fin, int, rval); NBUMPSIDED(1, ns_drop); *passp = FR_BLOCK; fin->fin_reason = FRB_NATV4; } fin->fin_flx |= FI_BADNAT; NBUMPSIDED(1, ns_badnat); rval = -1; /* We only return -1 on error. */ break; case 0 : NBUMPSIDE(1, ns_ignored); break; case 1 : NBUMPSIDE(1, ns_translated); break; } fin->fin_ifp = sifp; return (rval); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_out */ /* Returns: int - -1 == packet failed NAT checks so block it, */ /* 1 == packet was successfully translated. */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to NAT structure */ /* natadd(I) - flag indicating if it is safe to add frag cache */ /* nflags(I) - NAT flags set for this packet */ /* */ /* Translate a packet coming "out" on an interface. */ /* ------------------------------------------------------------------------ */ int ipf_nat_out(fr_info_t *fin, nat_t *nat, int natadd, u_32_t nflags) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; icmphdr_t *icmp; tcphdr_t *tcp; ipnat_t *np; int skip; int i; tcp = NULL; icmp = NULL; np = nat->nat_ptr; if ((natadd != 0) && (fin->fin_flx & FI_FRAG) && (np != NULL)) (void) ipf_frag_natnew(softc, fin, 0, nat); /* * Fix up checksums, not by recalculating them, but * simply computing adjustments. * This is only done for STREAMS based IP implementations where the * checksum has already been calculated by IP. In all other cases, * IPFilter is called before the checksum needs calculating so there * is no call to modify whatever is in the header now. */ if (nflags == IPN_ICMPERR) { u_32_t s1, s2, sumd, msumd; s1 = LONG_SUM(ntohl(fin->fin_saddr)); if (nat->nat_dir == NAT_OUTBOUND) { s2 = LONG_SUM(ntohl(nat->nat_nsrcaddr)); } else { s2 = LONG_SUM(ntohl(nat->nat_odstaddr)); } CALC_SUMD(s1, s2, sumd); msumd = sumd; s1 = LONG_SUM(ntohl(fin->fin_daddr)); if (nat->nat_dir == NAT_OUTBOUND) { s2 = LONG_SUM(ntohl(nat->nat_ndstaddr)); } else { s2 = LONG_SUM(ntohl(nat->nat_osrcaddr)); } CALC_SUMD(s1, s2, sumd); msumd += sumd; ipf_fix_outcksum(0, &fin->fin_ip->ip_sum, msumd, 0); } #if !defined(_KERNEL) || SOLARIS || \ defined(BRIDGE_IPF) || defined(__FreeBSD__) else { /* * We always do this on FreeBSD because this code doesn't * exist in fastforward. */ switch (nat->nat_dir) { case NAT_OUTBOUND : ipf_fix_outcksum(fin->fin_cksum & FI_CK_L4PART, &fin->fin_ip->ip_sum, nat->nat_ipsumd, 0); break; case NAT_INBOUND : ipf_fix_incksum(fin->fin_cksum & FI_CK_L4PART, &fin->fin_ip->ip_sum, nat->nat_ipsumd, 0); break; default : break; } } #endif /* * Address assignment is after the checksum modification because * we are using the address in the packet for determining the * correct checksum offset (the ICMP error could be coming from * anyone...) */ switch (nat->nat_dir) { case NAT_OUTBOUND : fin->fin_ip->ip_src = nat->nat_nsrcip; fin->fin_saddr = nat->nat_nsrcaddr; fin->fin_ip->ip_dst = nat->nat_ndstip; fin->fin_daddr = nat->nat_ndstaddr; break; case NAT_INBOUND : fin->fin_ip->ip_src = nat->nat_odstip; fin->fin_saddr = nat->nat_ndstaddr; fin->fin_ip->ip_dst = nat->nat_osrcip; fin->fin_daddr = nat->nat_nsrcaddr; break; case NAT_DIVERTIN : { mb_t *m; skip = ipf_nat_decap(fin, nat); if (skip <= 0) { NBUMPSIDED(1, ns_decap_fail); return (-1); } m = fin->fin_m; #if SOLARIS && defined(_KERNEL) m->b_rptr += skip; #else m->m_data += skip; m->m_len -= skip; # ifdef M_PKTHDR if (m->m_flags & M_PKTHDR) m->m_pkthdr.len -= skip; # endif #endif MUTEX_ENTER(&nat->nat_lock); ipf_nat_update(fin, nat); MUTEX_EXIT(&nat->nat_lock); fin->fin_flx |= FI_NATED; if (np != NULL && np->in_tag.ipt_num[0] != 0) fin->fin_nattag = &np->in_tag; return (1); /* NOTREACHED */ } case NAT_DIVERTOUT : { u_32_t s1, s2, sumd; udphdr_t *uh; ip_t *ip; mb_t *m; m = M_DUP(np->in_divmp); if (m == NULL) { NBUMPSIDED(1, ns_divert_dup); return (-1); } ip = MTOD(m, ip_t *); - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); s2 = ntohs(ip->ip_id); s1 = ip->ip_len; ip->ip_len = ntohs(ip->ip_len); ip->ip_len += fin->fin_plen; ip->ip_len = htons(ip->ip_len); s2 += ntohs(ip->ip_len); CALC_SUMD(s1, s2, sumd); uh = (udphdr_t *)(ip + 1); uh->uh_ulen += fin->fin_plen; uh->uh_ulen = htons(uh->uh_ulen); #if !defined(_KERNEL) || SOLARIS || \ defined(BRIDGE_IPF) || defined(__FreeBSD__) ipf_fix_outcksum(0, &ip->ip_sum, sumd, 0); #endif PREP_MB_T(fin, m); fin->fin_src = ip->ip_src; fin->fin_dst = ip->ip_dst; fin->fin_ip = ip; fin->fin_plen += sizeof(ip_t) + 8; /* UDP + IPv4 hdr */ fin->fin_dlen += sizeof(ip_t) + 8; /* UDP + IPv4 hdr */ nflags &= ~IPN_TCPUDPICMP; break; } default : break; } if (!(fin->fin_flx & FI_SHORT) && (fin->fin_off == 0)) { u_short *csump; if ((nat->nat_nsport != 0) && (nflags & IPN_TCPUDP)) { tcp = fin->fin_dp; switch (nat->nat_dir) { case NAT_OUTBOUND : tcp->th_sport = nat->nat_nsport; fin->fin_data[0] = ntohs(nat->nat_nsport); tcp->th_dport = nat->nat_ndport; fin->fin_data[1] = ntohs(nat->nat_ndport); break; case NAT_INBOUND : tcp->th_sport = nat->nat_odport; fin->fin_data[0] = ntohs(nat->nat_odport); tcp->th_dport = nat->nat_osport; fin->fin_data[1] = ntohs(nat->nat_osport); break; } } if ((nat->nat_nsport != 0) && (nflags & IPN_ICMPQUERY)) { icmp = fin->fin_dp; icmp->icmp_id = nat->nat_nicmpid; } csump = ipf_nat_proto(fin, nat, nflags); /* * The above comments do not hold for layer 4 (or higher) * checksums... */ if (csump != NULL) { if (nat->nat_dir == NAT_OUTBOUND) ipf_fix_outcksum(fin->fin_cksum, csump, nat->nat_sumd[0], nat->nat_sumd[1] + fin->fin_dlen); else ipf_fix_incksum(fin->fin_cksum, csump, nat->nat_sumd[0], nat->nat_sumd[1] + fin->fin_dlen); } } ipf_sync_update(softc, SMC_NAT, fin, nat->nat_sync); /* ------------------------------------------------------------- */ /* A few quick notes: */ /* Following are test conditions prior to calling the */ /* ipf_proxy_check routine. */ /* */ /* A NULL tcp indicates a non TCP/UDP packet. When dealing */ /* with a redirect rule, we attempt to match the packet's */ /* source port against in_dport, otherwise we'd compare the */ /* packet's destination. */ /* ------------------------------------------------------------- */ if ((np != NULL) && (np->in_apr != NULL)) { i = ipf_proxy_check(fin, nat); if (i == -1) { NBUMPSIDED(1, ns_ipf_proxy_fail); } } else { i = 1; } fin->fin_flx |= FI_NATED; return (i); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_checkin */ /* Returns: int - -1 == packet failed NAT checks so block it, */ /* 0 == no packet translation occurred, */ /* 1 == packet was successfully translated. */ /* Parameters: fin(I) - pointer to packet information */ /* passp(I) - pointer to filtering result flags */ /* */ /* Check to see if an incoming packet should be changed. ICMP packets are */ /* first checked to see if they match an existing entry (if an error), */ /* otherwise a search of the current NAT table is made. If neither results */ /* in a match then a search for a matching NAT rule is made. Create a new */ /* NAT entry if a we matched a NAT rule. Lastly, actually change the */ /* packet header(s) as required. */ /* ------------------------------------------------------------------------ */ int ipf_nat_checkin(fr_info_t *fin, u_32_t *passp) { ipf_main_softc_t *softc; ipf_nat_softc_t *softn; u_int nflags, natadd; ipnat_t *np, *npnext; int rval, natfailed; struct ifnet *ifp; struct in_addr in; icmphdr_t *icmp; tcphdr_t *tcp; u_short dport; nat_t *nat; u_32_t iph; softc = fin->fin_main_soft; softn = softc->ipf_nat_soft; if (softn->ipf_nat_lock != 0) return (0); if (softn->ipf_nat_stats.ns_rules == 0 && softn->ipf_nat_instances == NULL) return (0); tcp = NULL; icmp = NULL; dport = 0; natadd = 1; nflags = 0; natfailed = 0; ifp = fin->fin_ifp; if (!(fin->fin_flx & FI_SHORT) && (fin->fin_off == 0)) { switch (fin->fin_p) { case IPPROTO_TCP : nflags = IPN_TCP; break; case IPPROTO_UDP : nflags = IPN_UDP; break; case IPPROTO_ICMP : icmp = fin->fin_dp; /* * This is an incoming packet, so the destination is * the icmp_id and the source port equals 0 */ if ((fin->fin_flx & FI_ICMPQUERY) != 0) { nflags = IPN_ICMPQUERY; dport = icmp->icmp_id; } break; default : break; } if ((nflags & IPN_TCPUDP)) { tcp = fin->fin_dp; dport = fin->fin_data[1]; } } in = fin->fin_dst; READ_ENTER(&softc->ipf_nat); if ((fin->fin_p == IPPROTO_ICMP) && !(nflags & IPN_ICMPQUERY) && (nat = ipf_nat_icmperror(fin, &nflags, NAT_INBOUND))) /*EMPTY*/; else if ((fin->fin_flx & FI_FRAG) && (nat = ipf_frag_natknown(fin))) natadd = 0; else if ((nat = ipf_nat_inlookup(fin, nflags|NAT_SEARCH, (u_int)fin->fin_p, fin->fin_src, in))) { nflags = nat->nat_flags; } else if (fin->fin_off == 0) { u_32_t hv, msk, rmsk = 0; /* * If there is no current entry in the nat table for this IP#, * create one for it (if there is a matching rule). */ maskloop: msk = softn->ipf_nat_rdr_active_masks[rmsk]; iph = in.s_addr & msk; hv = NAT_HASH_FN(iph, 0, softn->ipf_nat_rdrrules_sz); retry_roundrobin: /* TRACE (iph,msk,rmsk,hv,softn->ipf_nat_rdrrules_sz) */ for (np = softn->ipf_nat_rdr_rules[hv]; np; np = npnext) { npnext = np->in_rnext; if (np->in_ifps[0] && (np->in_ifps[0] != ifp)) continue; if (np->in_v[0] != 4) continue; if (np->in_pr[0] && (np->in_pr[0] != fin->fin_p)) continue; if ((np->in_flags & IPN_RF) && !(np->in_flags & nflags)) continue; if (np->in_flags & IPN_FILTER) { switch (ipf_nat_match(fin, np)) { case 0 : continue; case -1 : rval = -3; goto inmatchfail; case 1 : default : break; } } else { if ((in.s_addr & np->in_odstmsk) != np->in_odstaddr) continue; if (np->in_odport && ((np->in_dtop < dport) || (dport < np->in_odport))) continue; } if (np->in_plabel != -1) { if (!ipf_proxy_ok(fin, tcp, np)) { continue; } } if (np->in_flags & IPN_NO) { np->in_hits++; break; } MUTEX_ENTER(&softn->ipf_nat_new); /* * If we've matched a round-robin rule but it has * moved in the list since we got it, start over as * this is now no longer correct. */ if (npnext != np->in_rnext) { if ((np->in_flags & IPN_ROUNDR) != 0) { MUTEX_EXIT(&softn->ipf_nat_new); goto retry_roundrobin; } npnext = np->in_rnext; } nat = ipf_nat_add(fin, np, NULL, nflags, NAT_INBOUND); MUTEX_EXIT(&softn->ipf_nat_new); if (nat != NULL) { natfailed = 0; break; } natfailed = -2; } if ((np == NULL) && (rmsk < softn->ipf_nat_rdr_max)) { rmsk++; goto maskloop; } } if (nat != NULL) { rval = ipf_nat_in(fin, nat, natadd, nflags); if (rval == 1) { MUTEX_ENTER(&nat->nat_lock); ipf_nat_update(fin, nat); nat->nat_bytes[0] += fin->fin_plen; nat->nat_pkts[0]++; fin->fin_pktnum = nat->nat_pkts[0]; MUTEX_EXIT(&nat->nat_lock); } } else rval = natfailed; inmatchfail: RWLOCK_EXIT(&softc->ipf_nat); DT2(frb_natv4in, fr_info_t *, fin, int, rval); switch (rval) { case -3 : /* ipf_nat_match() failure */ /* FALLTHROUGH */ case -2 : /* retry_roundrobin loop failure */ /* FALLTHROUGH */ case -1 : /* proxy failure detected by ipf_nat_in() */ if (passp != NULL) { NBUMPSIDED(0, ns_drop); *passp = FR_BLOCK; fin->fin_reason = FRB_NATV4; } fin->fin_flx |= FI_BADNAT; NBUMPSIDED(0, ns_badnat); rval = -1; /* We only return -1 on error. */ break; case 0 : NBUMPSIDE(0, ns_ignored); break; case 1 : NBUMPSIDE(0, ns_translated); break; } return (rval); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_in */ /* Returns: int - -1 == packet failed NAT checks so block it, */ /* 1 == packet was successfully translated. */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to NAT structure */ /* natadd(I) - flag indicating if it is safe to add frag cache */ /* nflags(I) - NAT flags set for this packet */ /* Locks Held: ipf_nat(READ) */ /* */ /* Translate a packet coming "in" on an interface. */ /* ------------------------------------------------------------------------ */ int ipf_nat_in(fr_info_t *fin, nat_t *nat, int natadd, u_32_t nflags) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; u_32_t sumd, ipsumd, sum1, sum2; icmphdr_t *icmp; tcphdr_t *tcp; ipnat_t *np; int skip; int i; tcp = NULL; np = nat->nat_ptr; fin->fin_fr = nat->nat_fr; if (np != NULL) { if ((natadd != 0) && (fin->fin_flx & FI_FRAG)) (void) ipf_frag_natnew(softc, fin, 0, nat); /* ------------------------------------------------------------- */ /* A few quick notes: */ /* Following are test conditions prior to calling the */ /* ipf_proxy_check routine. */ /* */ /* A NULL tcp indicates a non TCP/UDP packet. When dealing */ /* with a map rule, we attempt to match the packet's */ /* source port against in_dport, otherwise we'd compare the */ /* packet's destination. */ /* ------------------------------------------------------------- */ if (np->in_apr != NULL) { i = ipf_proxy_check(fin, nat); if (i == -1) { NBUMPSIDED(0, ns_ipf_proxy_fail); return (-1); } } } ipf_sync_update(softc, SMC_NAT, fin, nat->nat_sync); ipsumd = nat->nat_ipsumd; /* * Fix up checksums, not by recalculating them, but * simply computing adjustments. * Why only do this for some platforms on inbound packets ? * Because for those that it is done, IP processing is yet to happen * and so the IPv4 header checksum has not yet been evaluated. * Perhaps it should always be done for the benefit of things like * fast forwarding (so that it doesn't need to be recomputed) but with * header checksum offloading, perhaps it is a moot point. */ switch (nat->nat_dir) { case NAT_INBOUND : if ((fin->fin_flx & FI_ICMPERR) == 0) { fin->fin_ip->ip_src = nat->nat_nsrcip; fin->fin_saddr = nat->nat_nsrcaddr; } else { sum1 = nat->nat_osrcaddr; sum2 = nat->nat_nsrcaddr; CALC_SUMD(sum1, sum2, sumd); ipsumd -= sumd; } fin->fin_ip->ip_dst = nat->nat_ndstip; fin->fin_daddr = nat->nat_ndstaddr; #if !defined(_KERNEL) || SOLARIS ipf_fix_outcksum(0, &fin->fin_ip->ip_sum, ipsumd, 0); #endif break; case NAT_OUTBOUND : if ((fin->fin_flx & FI_ICMPERR) == 0) { fin->fin_ip->ip_src = nat->nat_odstip; fin->fin_saddr = nat->nat_odstaddr; } else { sum1 = nat->nat_odstaddr; sum2 = nat->nat_ndstaddr; CALC_SUMD(sum1, sum2, sumd); ipsumd -= sumd; } fin->fin_ip->ip_dst = nat->nat_osrcip; fin->fin_daddr = nat->nat_osrcaddr; #if !defined(_KERNEL) || SOLARIS ipf_fix_incksum(0, &fin->fin_ip->ip_sum, ipsumd, 0); #endif break; case NAT_DIVERTIN : { udphdr_t *uh; ip_t *ip; mb_t *m; m = M_DUP(np->in_divmp); if (m == NULL) { NBUMPSIDED(0, ns_divert_dup); return (-1); } ip = MTOD(m, ip_t *); - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); sum1 = ntohs(ip->ip_len); ip->ip_len = ntohs(ip->ip_len); ip->ip_len += fin->fin_plen; ip->ip_len = htons(ip->ip_len); uh = (udphdr_t *)(ip + 1); uh->uh_ulen += fin->fin_plen; uh->uh_ulen = htons(uh->uh_ulen); sum2 = ntohs(ip->ip_id) + ntohs(ip->ip_len); sum2 += ntohs(ip->ip_off) & IP_DF; CALC_SUMD(sum1, sum2, sumd); #if !defined(_KERNEL) || SOLARIS ipf_fix_outcksum(0, &ip->ip_sum, sumd, 0); #endif PREP_MB_T(fin, m); fin->fin_ip = ip; fin->fin_plen += sizeof(ip_t) + 8; /* UDP + new IPv4 hdr */ fin->fin_dlen += sizeof(ip_t) + 8; /* UDP + old IPv4 hdr */ nflags &= ~IPN_TCPUDPICMP; break; } case NAT_DIVERTOUT : { mb_t *m; skip = ipf_nat_decap(fin, nat); if (skip <= 0) { NBUMPSIDED(0, ns_decap_fail); return (-1); } m = fin->fin_m; #if SOLARIS && defined(_KERNEL) m->b_rptr += skip; #else m->m_data += skip; m->m_len -= skip; # ifdef M_PKTHDR if (m->m_flags & M_PKTHDR) m->m_pkthdr.len -= skip; # endif #endif ipf_nat_update(fin, nat); nflags &= ~IPN_TCPUDPICMP; fin->fin_flx |= FI_NATED; if (np != NULL && np->in_tag.ipt_num[0] != 0) fin->fin_nattag = &np->in_tag; return (1); /* NOTREACHED */ } } if (nflags & IPN_TCPUDP) tcp = fin->fin_dp; if (!(fin->fin_flx & FI_SHORT) && (fin->fin_off == 0)) { u_short *csump; if ((nat->nat_odport != 0) && (nflags & IPN_TCPUDP)) { switch (nat->nat_dir) { case NAT_INBOUND : tcp->th_sport = nat->nat_nsport; fin->fin_data[0] = ntohs(nat->nat_nsport); tcp->th_dport = nat->nat_ndport; fin->fin_data[1] = ntohs(nat->nat_ndport); break; case NAT_OUTBOUND : tcp->th_sport = nat->nat_odport; fin->fin_data[0] = ntohs(nat->nat_odport); tcp->th_dport = nat->nat_osport; fin->fin_data[1] = ntohs(nat->nat_osport); break; } } if ((nat->nat_odport != 0) && (nflags & IPN_ICMPQUERY)) { icmp = fin->fin_dp; icmp->icmp_id = nat->nat_nicmpid; } csump = ipf_nat_proto(fin, nat, nflags); /* * The above comments do not hold for layer 4 (or higher) * checksums... */ if (csump != NULL) { if (nat->nat_dir == NAT_OUTBOUND) ipf_fix_incksum(0, csump, nat->nat_sumd[0], 0); else ipf_fix_outcksum(0, csump, nat->nat_sumd[0], 0); } } fin->fin_flx |= FI_NATED; if (np != NULL && np->in_tag.ipt_num[0] != 0) fin->fin_nattag = &np->in_tag; return (1); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_proto */ /* Returns: u_short* - pointer to transport header checksum to update, */ /* NULL if the transport protocol is not recognised */ /* as needing a checksum update. */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to NAT structure */ /* nflags(I) - NAT flags set for this packet */ /* */ /* Return the pointer to the checksum field for each protocol so understood.*/ /* If support for making other changes to a protocol header is required, */ /* that is not strictly 'address' translation, such as clamping the MSS in */ /* TCP down to a specific value, then do it from here. */ /* ------------------------------------------------------------------------ */ u_short * ipf_nat_proto(fr_info_t *fin, nat_t *nat, u_int nflags) { icmphdr_t *icmp; u_short *csump; tcphdr_t *tcp; udphdr_t *udp; csump = NULL; if (fin->fin_out == 0) { fin->fin_rev = (nat->nat_dir & NAT_OUTBOUND); } else { fin->fin_rev = ((nat->nat_dir & NAT_OUTBOUND) == 0); } switch (fin->fin_p) { case IPPROTO_TCP : tcp = fin->fin_dp; if ((nflags & IPN_TCP) != 0) csump = &tcp->th_sum; /* * Do a MSS CLAMPING on a SYN packet, * only deal IPv4 for now. */ if ((nat->nat_mssclamp != 0) && (tcp_get_flags(tcp) & TH_SYN) != 0) ipf_nat_mssclamp(tcp, nat->nat_mssclamp, fin, csump); break; case IPPROTO_UDP : udp = fin->fin_dp; if ((nflags & IPN_UDP) != 0) { if (udp->uh_sum != 0) csump = &udp->uh_sum; } break; case IPPROTO_ICMP : icmp = fin->fin_dp; if ((nflags & IPN_ICMPQUERY) != 0) { if (icmp->icmp_cksum != 0) csump = &icmp->icmp_cksum; } break; #ifdef USE_INET6 case IPPROTO_ICMPV6 : { struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)fin->fin_dp; icmp6 = fin->fin_dp; if ((nflags & IPN_ICMPQUERY) != 0) { if (icmp6->icmp6_cksum != 0) csump = &icmp6->icmp6_cksum; } break; } #endif } return (csump); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_expire */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Check all of the timeout queues for entries at the top which need to be */ /* expired. */ /* ------------------------------------------------------------------------ */ void ipf_nat_expire(ipf_main_softc_t *softc) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; ipftq_t *ifq, *ifqnext; ipftqent_t *tqe, *tqn; int i; SPL_INT(s); SPL_NET(s); WRITE_ENTER(&softc->ipf_nat); for (ifq = softn->ipf_nat_tcptq, i = 0; ifq != NULL; ifq = ifq->ifq_next) { for (tqn = ifq->ifq_head; ((tqe = tqn) != NULL); i++) { if (tqe->tqe_die > softc->ipf_ticks) break; tqn = tqe->tqe_next; ipf_nat_delete(softc, tqe->tqe_parent, NL_EXPIRE); } } for (ifq = softn->ipf_nat_utqe; ifq != NULL; ifq = ifq->ifq_next) { for (tqn = ifq->ifq_head; ((tqe = tqn) != NULL); i++) { if (tqe->tqe_die > softc->ipf_ticks) break; tqn = tqe->tqe_next; ipf_nat_delete(softc, tqe->tqe_parent, NL_EXPIRE); } } for (ifq = softn->ipf_nat_utqe; ifq != NULL; ifq = ifqnext) { ifqnext = ifq->ifq_next; if (((ifq->ifq_flags & IFQF_DELETE) != 0) && (ifq->ifq_ref == 0)) { ipf_freetimeoutqueue(softc, ifq); } } if (softn->ipf_nat_doflush != 0) { ipf_nat_extraflush(softc, softn, 2); softn->ipf_nat_doflush = 0; } RWLOCK_EXIT(&softc->ipf_nat); SPL_X(s); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_sync */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* ifp(I) - pointer to network interface */ /* */ /* Walk through all of the currently active NAT sessions, looking for those */ /* which need to have their translated address updated. */ /* ------------------------------------------------------------------------ */ void ipf_nat_sync(ipf_main_softc_t *softc, void *ifp) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; u_32_t sum1, sum2, sumd; i6addr_t in; ipnat_t *n; nat_t *nat; void *ifp2; int idx; SPL_INT(s); if (softc->ipf_running <= 0) return; /* * Change IP addresses for NAT sessions for any protocol except TCP * since it will break the TCP connection anyway. The only rules * which will get changed are those which are "map ... -> 0/32", * where the rule specifies the address is taken from the interface. */ SPL_NET(s); WRITE_ENTER(&softc->ipf_nat); if (softc->ipf_running <= 0) { RWLOCK_EXIT(&softc->ipf_nat); return; } for (nat = softn->ipf_nat_instances; nat; nat = nat->nat_next) { if ((nat->nat_flags & IPN_TCP) != 0) continue; n = nat->nat_ptr; if (n != NULL) { if (n->in_v[1] == 4) { if (n->in_redir & NAT_MAP) { if ((n->in_nsrcaddr != 0) || (n->in_nsrcmsk != 0xffffffff)) continue; } else if (n->in_redir & NAT_REDIRECT) { if ((n->in_ndstaddr != 0) || (n->in_ndstmsk != 0xffffffff)) continue; } } #ifdef USE_INET6 if (n->in_v[1] == 4) { if (n->in_redir & NAT_MAP) { if (!IP6_ISZERO(&n->in_nsrcaddr) || !IP6_ISONES(&n->in_nsrcmsk)) continue; } else if (n->in_redir & NAT_REDIRECT) { if (!IP6_ISZERO(&n->in_ndstaddr) || !IP6_ISONES(&n->in_ndstmsk)) continue; } } #endif } if (((ifp == NULL) || (ifp == nat->nat_ifps[0]) || (ifp == nat->nat_ifps[1]))) { nat->nat_ifps[0] = GETIFP(nat->nat_ifnames[0], nat->nat_v[0]); if ((nat->nat_ifps[0] != NULL) && (nat->nat_ifps[0] != (void *)-1)) { nat->nat_mtu[0] = GETIFMTU_4(nat->nat_ifps[0]); } if (nat->nat_ifnames[1][0] != '\0') { nat->nat_ifps[1] = GETIFP(nat->nat_ifnames[1], nat->nat_v[1]); } else { nat->nat_ifps[1] = nat->nat_ifps[0]; } if ((nat->nat_ifps[1] != NULL) && (nat->nat_ifps[1] != (void *)-1)) { nat->nat_mtu[1] = GETIFMTU_4(nat->nat_ifps[1]); } ifp2 = nat->nat_ifps[0]; if (ifp2 == NULL) continue; /* * Change the map-to address to be the same as the * new one. */ sum1 = NATFSUM(nat, nat->nat_v[1], nat_nsrc6); if (ipf_ifpaddr(softc, nat->nat_v[0], FRI_NORMAL, ifp2, &in, NULL) != -1) { if (nat->nat_v[0] == 4) nat->nat_nsrcip = in.in4; } sum2 = NATFSUM(nat, nat->nat_v[1], nat_nsrc6); if (sum1 == sum2) continue; /* * Readjust the checksum adjustment to take into * account the new IP#. */ CALC_SUMD(sum1, sum2, sumd); /* XXX - dont change for TCP when solaris does * hardware checksumming. */ sumd += nat->nat_sumd[0]; nat->nat_sumd[0] = (sumd & 0xffff) + (sumd >> 16); nat->nat_sumd[1] = nat->nat_sumd[0]; } } for (n = softn->ipf_nat_list; (n != NULL); n = n->in_next) { char *base = n->in_names; if ((ifp == NULL) || (n->in_ifps[0] == ifp)) n->in_ifps[0] = ipf_resolvenic(softc, base + n->in_ifnames[0], n->in_v[0]); if ((ifp == NULL) || (n->in_ifps[1] == ifp)) n->in_ifps[1] = ipf_resolvenic(softc, base + n->in_ifnames[1], n->in_v[1]); if (n->in_redir & NAT_REDIRECT) idx = 1; else idx = 0; if (((ifp == NULL) || (n->in_ifps[idx] == ifp)) && (n->in_ifps[idx] != NULL && n->in_ifps[idx] != (void *)-1)) { ipf_nat_nextaddrinit(softc, n->in_names, &n->in_osrc, 0, n->in_ifps[idx]); ipf_nat_nextaddrinit(softc, n->in_names, &n->in_odst, 0, n->in_ifps[idx]); ipf_nat_nextaddrinit(softc, n->in_names, &n->in_nsrc, 0, n->in_ifps[idx]); ipf_nat_nextaddrinit(softc, n->in_names, &n->in_ndst, 0, n->in_ifps[idx]); } } RWLOCK_EXIT(&softc->ipf_nat); SPL_X(s); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_icmpquerytype */ /* Returns: int - 1 == success, 0 == failure */ /* Parameters: icmptype(I) - ICMP type number */ /* */ /* Tests to see if the ICMP type number passed is a query/response type or */ /* not. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_icmpquerytype(int icmptype) { /* * For the ICMP query NAT code, it is essential that both the query * and the reply match on the NAT rule. Because the NAT structure * does not keep track of the icmptype, and a single NAT structure * is used for all icmp types with the same src, dest and id, we * simply define the replies as queries as well. The funny thing is, * altough it seems silly to call a reply a query, this is exactly * as it is defined in the IPv4 specification */ switch (icmptype) { case ICMP_ECHOREPLY: case ICMP_ECHO: /* route advertisement/solicitation is currently unsupported: */ /* it would require rewriting the ICMP data section */ case ICMP_TSTAMP: case ICMP_TSTAMPREPLY: case ICMP_IREQ: case ICMP_IREQREPLY: case ICMP_MASKREQ: case ICMP_MASKREPLY: return (1); default: return (0); } } /* ------------------------------------------------------------------------ */ /* Function: nat_log */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* nat(I) - pointer to NAT structure */ /* action(I) - action related to NAT structure being performed */ /* */ /* Creates a NAT log entry. */ /* ------------------------------------------------------------------------ */ void ipf_nat_log(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, struct nat *nat, u_int action) { #ifdef IPFILTER_LOG struct ipnat *np; int rulen; struct natlog natl; void *items[1]; size_t sizes[1]; int types[1]; bcopy((char *)&nat->nat_osrc6, (char *)&natl.nl_osrcip, sizeof(natl.nl_osrcip)); bcopy((char *)&nat->nat_nsrc6, (char *)&natl.nl_nsrcip, sizeof(natl.nl_nsrcip)); bcopy((char *)&nat->nat_odst6, (char *)&natl.nl_odstip, sizeof(natl.nl_odstip)); bcopy((char *)&nat->nat_ndst6, (char *)&natl.nl_ndstip, sizeof(natl.nl_ndstip)); natl.nl_bytes[0] = nat->nat_bytes[0]; natl.nl_bytes[1] = nat->nat_bytes[1]; natl.nl_pkts[0] = nat->nat_pkts[0]; natl.nl_pkts[1] = nat->nat_pkts[1]; natl.nl_odstport = nat->nat_odport; natl.nl_osrcport = nat->nat_osport; natl.nl_nsrcport = nat->nat_nsport; natl.nl_ndstport = nat->nat_ndport; natl.nl_p[0] = nat->nat_pr[0]; natl.nl_p[1] = nat->nat_pr[1]; natl.nl_v[0] = nat->nat_v[0]; natl.nl_v[1] = nat->nat_v[1]; natl.nl_type = nat->nat_redir; natl.nl_action = action; natl.nl_rule = -1; bcopy(nat->nat_ifnames[0], natl.nl_ifnames[0], sizeof(nat->nat_ifnames[0])); bcopy(nat->nat_ifnames[1], natl.nl_ifnames[1], sizeof(nat->nat_ifnames[1])); if (softc->ipf_large_nat && nat->nat_ptr != NULL) { for (rulen = 0, np = softn->ipf_nat_list; np != NULL; np = np->in_next, rulen++) if (np == nat->nat_ptr) { natl.nl_rule = rulen; break; } } items[0] = &natl; sizes[0] = sizeof(natl); types[0] = 0; (void) ipf_log_items(softc, IPL_LOGNAT, NULL, items, sizes, types, 1); #endif } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_rule_deref */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* inp(I) - pointer to pointer to NAT rule */ /* Write Locks: ipf_nat */ /* */ /* Dropping the refernce count for a rule means that whatever held the */ /* pointer to this rule (*inp) is no longer interested in it and when the */ /* reference count drops to zero, any resources allocated for the rule can */ /* be released and the rule itself free'd. */ /* ------------------------------------------------------------------------ */ void ipf_nat_rule_deref(ipf_main_softc_t *softc, ipnat_t **inp) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; ipnat_t *n; n = *inp; *inp = NULL; n->in_use--; if (n->in_use > 0) return; if (n->in_apr != NULL) ipf_proxy_deref(n->in_apr); ipf_nat_rule_fini(softc, n); if (n->in_redir & NAT_REDIRECT) { if ((n->in_flags & IPN_PROXYRULE) == 0) { ATOMIC_DEC32(softn->ipf_nat_stats.ns_rules_rdr); } } if (n->in_redir & (NAT_MAP|NAT_MAPBLK)) { if ((n->in_flags & IPN_PROXYRULE) == 0) { ATOMIC_DEC32(softn->ipf_nat_stats.ns_rules_map); } } if (n->in_tqehead[0] != NULL) { if (ipf_deletetimeoutqueue(n->in_tqehead[0]) == 0) { ipf_freetimeoutqueue(softc, n->in_tqehead[0]); } } if (n->in_tqehead[1] != NULL) { if (ipf_deletetimeoutqueue(n->in_tqehead[1]) == 0) { ipf_freetimeoutqueue(softc, n->in_tqehead[1]); } } if ((n->in_flags & IPN_PROXYRULE) == 0) { ATOMIC_DEC32(softn->ipf_nat_stats.ns_rules); } MUTEX_DESTROY(&n->in_lock); KFREES(n, n->in_size); #if SOLARIS && !defined(INSTANCES) if (softn->ipf_nat_stats.ns_rules == 0) pfil_delayed_copy = 1; #endif } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_deref */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* natp(I) - pointer to pointer to NAT table entry */ /* */ /* Decrement the reference counter for this NAT table entry and free it if */ /* there are no more things using it. */ /* */ /* IF nat_ref == 1 when this function is called, then we have an orphan nat */ /* structure *because* it only gets called on paths _after_ nat_ref has been*/ /* incremented. If nat_ref == 1 then we shouldn't decrement it here */ /* because nat_delete() will do that and send nat_ref to -1. */ /* */ /* Holding the lock on nat_lock is required to serialise nat_delete() being */ /* called from a NAT flush ioctl with a deref happening because of a packet.*/ /* ------------------------------------------------------------------------ */ void ipf_nat_deref(ipf_main_softc_t *softc, nat_t **natp) { nat_t *nat; nat = *natp; *natp = NULL; MUTEX_ENTER(&nat->nat_lock); if (nat->nat_ref > 1) { nat->nat_ref--; ASSERT(nat->nat_ref >= 0); MUTEX_EXIT(&nat->nat_lock); return; } MUTEX_EXIT(&nat->nat_lock); WRITE_ENTER(&softc->ipf_nat); ipf_nat_delete(softc, nat, NL_EXPIRE); RWLOCK_EXIT(&softc->ipf_nat); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_clone */ /* Returns: ipstate_t* - NULL == cloning failed, */ /* else pointer to new state structure */ /* Parameters: fin(I) - pointer to packet information */ /* is(I) - pointer to master state structure */ /* Write Lock: ipf_nat */ /* */ /* Create a "duplcate" state table entry from the master. */ /* ------------------------------------------------------------------------ */ nat_t * ipf_nat_clone(fr_info_t *fin, nat_t *nat) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; frentry_t *fr; nat_t *clone; ipnat_t *np; KMALLOC(clone, nat_t *); if (clone == NULL) { NBUMPSIDED(fin->fin_out, ns_clone_nomem); return (NULL); } bcopy((char *)nat, (char *)clone, sizeof(*clone)); MUTEX_NUKE(&clone->nat_lock); clone->nat_rev = fin->fin_rev; clone->nat_aps = NULL; /* * Initialize all these so that ipf_nat_delete() doesn't cause a crash. */ clone->nat_tqe.tqe_pnext = NULL; clone->nat_tqe.tqe_next = NULL; clone->nat_tqe.tqe_ifq = NULL; clone->nat_tqe.tqe_parent = clone; clone->nat_flags &= ~SI_CLONE; clone->nat_flags |= SI_CLONED; if (clone->nat_hm) clone->nat_hm->hm_ref++; if (ipf_nat_insert(softc, softn, clone) == -1) { KFREE(clone); NBUMPSIDED(fin->fin_out, ns_insert_fail); return (NULL); } np = clone->nat_ptr; if (np != NULL) { if (softn->ipf_nat_logging) ipf_nat_log(softc, softn, clone, NL_CLONE); np->in_use++; } fr = clone->nat_fr; if (fr != NULL) { MUTEX_ENTER(&fr->fr_lock); fr->fr_ref++; MUTEX_EXIT(&fr->fr_lock); } /* * Because the clone is created outside the normal loop of things and * TCP has special needs in terms of state, initialise the timeout * state of the new NAT from here. */ if (clone->nat_pr[0] == IPPROTO_TCP) { (void) ipf_tcp_age(&clone->nat_tqe, fin, softn->ipf_nat_tcptq, clone->nat_flags, 2); } clone->nat_sync = ipf_sync_new(softc, SMC_NAT, fin, clone); if (softn->ipf_nat_logging) ipf_nat_log(softc, softn, clone, NL_CLONE); return (clone); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_wildok */ /* Returns: int - 1 == packet's ports match wildcards */ /* 0 == packet's ports don't match wildcards */ /* Parameters: nat(I) - NAT entry */ /* sport(I) - source port */ /* dport(I) - destination port */ /* flags(I) - wildcard flags */ /* dir(I) - packet direction */ /* */ /* Use NAT entry and packet direction to determine which combination of */ /* wildcard flags should be used. */ /* ------------------------------------------------------------------------ */ int ipf_nat_wildok(nat_t *nat, int sport, int dport, int flags, int dir) { /* * When called by dir is set to * nat_inlookup NAT_INBOUND (0) * nat_outlookup NAT_OUTBOUND (1) * * We simply combine the packet's direction in dir with the original * "intended" direction of that NAT entry in nat->nat_dir to decide * which combination of wildcard flags to allow. */ switch ((dir << 1) | (nat->nat_dir & (NAT_INBOUND|NAT_OUTBOUND))) { case 3: /* outbound packet / outbound entry */ if (((nat->nat_osport == sport) || (flags & SI_W_SPORT)) && ((nat->nat_odport == dport) || (flags & SI_W_DPORT))) return (1); break; case 2: /* outbound packet / inbound entry */ if (((nat->nat_osport == dport) || (flags & SI_W_SPORT)) && ((nat->nat_odport == sport) || (flags & SI_W_DPORT))) return (1); break; case 1: /* inbound packet / outbound entry */ if (((nat->nat_osport == dport) || (flags & SI_W_SPORT)) && ((nat->nat_odport == sport) || (flags & SI_W_DPORT))) return (1); break; case 0: /* inbound packet / inbound entry */ if (((nat->nat_osport == sport) || (flags & SI_W_SPORT)) && ((nat->nat_odport == dport) || (flags & SI_W_DPORT))) return (1); break; default: break; } return (0); } /* ------------------------------------------------------------------------ */ /* Function: nat_mssclamp */ /* Returns: Nil */ /* Parameters: tcp(I) - pointer to TCP header */ /* maxmss(I) - value to clamp the TCP MSS to */ /* fin(I) - pointer to packet information */ /* csump(I) - pointer to TCP checksum */ /* */ /* Check for MSS option and clamp it if necessary. If found and changed, */ /* then the TCP header checksum will be updated to reflect the change in */ /* the MSS. */ /* ------------------------------------------------------------------------ */ static void ipf_nat_mssclamp(tcphdr_t *tcp, u_32_t maxmss, fr_info_t *fin, u_short *csump) { u_char *cp, *ep, opt; int hlen, advance; u_32_t mss, sumd; hlen = TCP_OFF(tcp) << 2; if (hlen > sizeof(*tcp)) { cp = (u_char *)tcp + sizeof(*tcp); ep = (u_char *)tcp + hlen; while (cp < ep) { opt = cp[0]; if (opt == TCPOPT_EOL) break; else if (opt == TCPOPT_NOP) { cp++; continue; } if (cp + 1 >= ep) break; advance = cp[1]; if ((cp + advance > ep) || (advance <= 0)) break; switch (opt) { case TCPOPT_MAXSEG: if (advance != 4) break; mss = cp[2] * 256 + cp[3]; if (mss > maxmss) { cp[2] = maxmss / 256; cp[3] = maxmss & 0xff; CALC_SUMD(mss, maxmss, sumd); ipf_fix_outcksum(0, csump, sumd, 0); } break; default: /* ignore unknown options */ break; } cp += advance; } } } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_setqueue */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* nat(I)- pointer to NAT structure */ /* Locks: ipf_nat (read or write) */ /* */ /* Put the NAT entry on its default queue entry, using rev as a helped in */ /* determining which queue it should be placed on. */ /* ------------------------------------------------------------------------ */ void ipf_nat_setqueue(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, nat_t *nat) { ipftq_t *oifq, *nifq; int rev = nat->nat_rev; if (nat->nat_ptr != NULL) nifq = nat->nat_ptr->in_tqehead[rev]; else nifq = NULL; if (nifq == NULL) { switch (nat->nat_pr[0]) { case IPPROTO_UDP : nifq = &softn->ipf_nat_udptq; break; case IPPROTO_ICMP : nifq = &softn->ipf_nat_icmptq; break; case IPPROTO_TCP : nifq = softn->ipf_nat_tcptq + nat->nat_tqe.tqe_state[rev]; break; default : nifq = &softn->ipf_nat_iptq; break; } } oifq = nat->nat_tqe.tqe_ifq; /* * If it's currently on a timeout queue, move it from one queue to * another, else put it on the end of the newly determined queue. */ if (oifq != NULL) ipf_movequeue(softc->ipf_ticks, &nat->nat_tqe, oifq, nifq); else ipf_queueappend(softc->ipf_ticks, &nat->nat_tqe, nifq, nat); return; } /* ------------------------------------------------------------------------ */ /* Function: nat_getnext */ /* Returns: int - 0 == ok, else error */ /* Parameters: softc(I) - pointer to soft context main structure */ /* t(I) - pointer to ipftoken structure */ /* itp(I) - pointer to ipfgeniter_t structure */ /* */ /* Fetch the next nat/ipnat structure pointer from the linked list and */ /* copy it out to the storage space pointed to by itp_data. The next item */ /* in the list to look at is put back in the ipftoken struture. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_getnext(ipf_main_softc_t *softc, ipftoken_t *t, ipfgeniter_t *itp, ipfobj_t *objp) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; hostmap_t *hm, *nexthm = NULL, zerohm; ipnat_t *ipn, *nextipnat = NULL, zeroipn; nat_t *nat, *nextnat = NULL, zeronat; int error = 0; void *nnext; if (itp->igi_nitems != 1) { IPFERROR(60075); return (ENOSPC); } READ_ENTER(&softc->ipf_nat); switch (itp->igi_type) { case IPFGENITER_HOSTMAP : hm = t->ipt_data; if (hm == NULL) { nexthm = softn->ipf_hm_maplist; } else { nexthm = hm->hm_next; } if (nexthm != NULL) { ATOMIC_INC32(nexthm->hm_ref); t->ipt_data = nexthm; } else { bzero(&zerohm, sizeof(zerohm)); nexthm = &zerohm; t->ipt_data = NULL; } nnext = nexthm->hm_next; break; case IPFGENITER_IPNAT : ipn = t->ipt_data; if (ipn == NULL) { nextipnat = softn->ipf_nat_list; } else { nextipnat = ipn->in_next; } if (nextipnat != NULL) { ATOMIC_INC32(nextipnat->in_use); t->ipt_data = nextipnat; } else { bzero(&zeroipn, sizeof(zeroipn)); nextipnat = &zeroipn; t->ipt_data = NULL; } nnext = nextipnat->in_next; break; case IPFGENITER_NAT : nat = t->ipt_data; if (nat == NULL) { nextnat = softn->ipf_nat_instances; } else { nextnat = nat->nat_next; } if (nextnat != NULL) { MUTEX_ENTER(&nextnat->nat_lock); nextnat->nat_ref++; MUTEX_EXIT(&nextnat->nat_lock); t->ipt_data = nextnat; } else { bzero(&zeronat, sizeof(zeronat)); nextnat = &zeronat; t->ipt_data = NULL; } nnext = nextnat->nat_next; break; default : RWLOCK_EXIT(&softc->ipf_nat); IPFERROR(60055); return (EINVAL); } RWLOCK_EXIT(&softc->ipf_nat); objp->ipfo_ptr = itp->igi_data; switch (itp->igi_type) { case IPFGENITER_HOSTMAP : error = COPYOUT(nexthm, objp->ipfo_ptr, sizeof(*nexthm)); if (error != 0) { IPFERROR(60049); error = EFAULT; } if (hm != NULL) { WRITE_ENTER(&softc->ipf_nat); ipf_nat_hostmapdel(softc, &hm); RWLOCK_EXIT(&softc->ipf_nat); } break; case IPFGENITER_IPNAT : objp->ipfo_size = nextipnat->in_size; objp->ipfo_type = IPFOBJ_IPNAT; error = ipf_outobjk(softc, objp, nextipnat); if (ipn != NULL) { WRITE_ENTER(&softc->ipf_nat); ipf_nat_rule_deref(softc, &ipn); RWLOCK_EXIT(&softc->ipf_nat); } break; case IPFGENITER_NAT : objp->ipfo_size = sizeof(nat_t); objp->ipfo_type = IPFOBJ_NAT; error = ipf_outobjk(softc, objp, nextnat); if (nat != NULL) ipf_nat_deref(softc, &nat); break; } if (nnext == NULL) ipf_token_mark_complete(t); return (error); } /* ------------------------------------------------------------------------ */ /* Function: nat_extraflush */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* which(I) - how to flush the active NAT table */ /* Write Locks: ipf_nat */ /* */ /* Flush nat tables. Three actions currently defined: */ /* which == 0 : flush all nat table entries */ /* which == 1 : flush TCP connections which have started to close but are */ /* stuck for some reason. */ /* which == 2 : flush TCP connections which have been idle for a long time, */ /* starting at > 4 days idle and working back in successive half-*/ /* days to at most 12 hours old. If this fails to free enough */ /* slots then work backwards in half hour slots to 30 minutes. */ /* If that too fails, then work backwards in 30 second intervals */ /* for the last 30 minutes to at worst 30 seconds idle. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_extraflush(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, int which) { nat_t *nat, **natp; ipftqent_t *tqn; ipftq_t *ifq; int removed; SPL_INT(s); removed = 0; SPL_NET(s); switch (which) { case 0 : softn->ipf_nat_stats.ns_flush_all++; /* * Style 0 flush removes everything... */ for (natp = &softn->ipf_nat_instances; ((nat = *natp) != NULL); ) { ipf_nat_delete(softc, nat, NL_FLUSH); removed++; } break; case 1 : softn->ipf_nat_stats.ns_flush_closing++; /* * Since we're only interested in things that are closing, * we can start with the appropriate timeout queue. */ for (ifq = softn->ipf_nat_tcptq + IPF_TCPS_CLOSE_WAIT; ifq != NULL; ifq = ifq->ifq_next) { for (tqn = ifq->ifq_head; tqn != NULL; ) { nat = tqn->tqe_parent; tqn = tqn->tqe_next; if (nat->nat_pr[0] != IPPROTO_TCP || nat->nat_pr[1] != IPPROTO_TCP) break; ipf_nat_delete(softc, nat, NL_EXPIRE); removed++; } } /* * Also need to look through the user defined queues. */ for (ifq = softn->ipf_nat_utqe; ifq != NULL; ifq = ifq->ifq_next) { for (tqn = ifq->ifq_head; tqn != NULL; ) { nat = tqn->tqe_parent; tqn = tqn->tqe_next; if (nat->nat_pr[0] != IPPROTO_TCP || nat->nat_pr[1] != IPPROTO_TCP) continue; if ((nat->nat_tcpstate[0] > IPF_TCPS_ESTABLISHED) && (nat->nat_tcpstate[1] > IPF_TCPS_ESTABLISHED)) { ipf_nat_delete(softc, nat, NL_EXPIRE); removed++; } } } break; /* * Args 5-11 correspond to flushing those particular states * for TCP connections. */ case IPF_TCPS_CLOSE_WAIT : case IPF_TCPS_FIN_WAIT_1 : case IPF_TCPS_CLOSING : case IPF_TCPS_LAST_ACK : case IPF_TCPS_FIN_WAIT_2 : case IPF_TCPS_TIME_WAIT : case IPF_TCPS_CLOSED : softn->ipf_nat_stats.ns_flush_state++; tqn = softn->ipf_nat_tcptq[which].ifq_head; while (tqn != NULL) { nat = tqn->tqe_parent; tqn = tqn->tqe_next; ipf_nat_delete(softc, nat, NL_FLUSH); removed++; } break; default : if (which < 30) break; softn->ipf_nat_stats.ns_flush_timeout++; /* * Take a large arbitrary number to mean the number of seconds * for which which consider to be the maximum value we'll allow * the expiration to be. */ which = IPF_TTLVAL(which); for (natp = &softn->ipf_nat_instances; ((nat = *natp) != NULL); ) { if (softc->ipf_ticks - nat->nat_touched > which) { ipf_nat_delete(softc, nat, NL_FLUSH); removed++; } else natp = &nat->nat_next; } break; } if (which != 2) { SPL_X(s); return (removed); } softn->ipf_nat_stats.ns_flush_queue++; /* * Asked to remove inactive entries because the table is full, try * again, 3 times, if first attempt failed with a different criteria * each time. The order tried in must be in decreasing age. * Another alternative is to implement random drop and drop N entries * at random until N have been freed up. */ if (softc->ipf_ticks - softn->ipf_nat_last_force_flush > IPF_TTLVAL(5)) { softn->ipf_nat_last_force_flush = softc->ipf_ticks; removed = ipf_queueflush(softc, ipf_nat_flush_entry, softn->ipf_nat_tcptq, softn->ipf_nat_utqe, &softn->ipf_nat_stats.ns_active, softn->ipf_nat_table_sz, softn->ipf_nat_table_wm_low); } SPL_X(s); return (removed); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_flush_entry */ /* Returns: 0 - always succeeds */ /* Parameters: softc(I) - pointer to soft context main structure */ /* entry(I) - pointer to NAT entry */ /* Write Locks: ipf_nat */ /* */ /* This function is a stepping stone between ipf_queueflush() and */ /* nat_dlete(). It is used so we can provide a uniform interface via the */ /* ipf_queueflush() function. Since the nat_delete() function returns void */ /* we translate that to mean it always succeeds in deleting something. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_flush_entry(ipf_main_softc_t *softc, void *entry) { ipf_nat_delete(softc, entry, NL_FLUSH); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_iterator */ /* Returns: int - 0 == ok, else error */ /* Parameters: softc(I) - pointer to soft context main structure */ /* token(I) - pointer to ipftoken structure */ /* itp(I) - pointer to ipfgeniter_t structure */ /* obj(I) - pointer to data description structure */ /* */ /* This function acts as a handler for the SIOCGENITER ioctls that use a */ /* generic structure to iterate through a list. There are three different */ /* linked lists of NAT related information to go through: NAT rules, active */ /* NAT mappings and the NAT fragment cache. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_iterator(ipf_main_softc_t *softc, ipftoken_t *token, ipfgeniter_t *itp, ipfobj_t *obj) { int error; if (itp->igi_data == NULL) { IPFERROR(60052); return (EFAULT); } switch (itp->igi_type) { case IPFGENITER_HOSTMAP : case IPFGENITER_IPNAT : case IPFGENITER_NAT : error = ipf_nat_getnext(softc, token, itp, obj); break; case IPFGENITER_NATFRAG : error = ipf_frag_nat_next(softc, token, itp); break; default : IPFERROR(60053); error = EINVAL; break; } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_setpending */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* nat(I) - pointer to NAT structure */ /* Locks: ipf_nat (read or write) */ /* */ /* Put the NAT entry on to the pending queue - this queue has a very short */ /* lifetime where items are put that can't be deleted straight away because */ /* of locking issues but we want to delete them ASAP, anyway. In calling */ /* this function, it is assumed that the owner (if there is one, as shown */ /* by nat_me) is no longer interested in it. */ /* ------------------------------------------------------------------------ */ void ipf_nat_setpending(ipf_main_softc_t *softc, nat_t *nat) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; ipftq_t *oifq; oifq = nat->nat_tqe.tqe_ifq; if (oifq != NULL) ipf_movequeue(softc->ipf_ticks, &nat->nat_tqe, oifq, &softn->ipf_nat_pending); else ipf_queueappend(softc->ipf_ticks, &nat->nat_tqe, &softn->ipf_nat_pending, nat); if (nat->nat_me != NULL) { *nat->nat_me = NULL; nat->nat_me = NULL; nat->nat_ref--; ASSERT(nat->nat_ref >= 0); } } /* ------------------------------------------------------------------------ */ /* Function: nat_newrewrite */ /* Returns: int - -1 == error, 0 == success (no move), 1 == success and */ /* allow rule to be moved if IPN_ROUNDR is set. */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to NAT entry */ /* ni(I) - pointer to structure with misc. information needed */ /* to create new NAT entry. */ /* Write Lock: ipf_nat */ /* */ /* This function is responsible for setting up an active NAT session where */ /* we are changing both the source and destination parameters at the same */ /* time. The loop in here works differently to elsewhere - each iteration */ /* is responsible for changing a single parameter that can be incremented. */ /* So one pass may increase the source IP#, next source port, next dest. IP#*/ /* and the last destination port for a total of 4 iterations to try each. */ /* This is done to try and exhaustively use the translation space available.*/ /* ------------------------------------------------------------------------ */ static int ipf_nat_newrewrite(fr_info_t *fin, nat_t *nat, natinfo_t *nai) { int src_search = 1; int dst_search = 1; fr_info_t frnat; u_32_t flags; u_short swap; ipnat_t *np; nat_t *natl; int l = 0; int changed; natl = NULL; changed = -1; np = nai->nai_np; flags = nat->nat_flags; bcopy((char *)fin, (char *)&frnat, sizeof(*fin)); nat->nat_hm = NULL; do { changed = -1; /* TRACE (l, src_search, dst_search, np) */ DT4(ipf_nat_rewrite_1, int, l, int, src_search, int, dst_search, ipnat_t *, np); if ((src_search == 0) && (np->in_spnext == 0) && (dst_search == 0) && (np->in_dpnext == 0)) { if (l > 0) return (-1); } /* * Find a new source address */ if (ipf_nat_nextaddr(fin, &np->in_nsrc, &frnat.fin_saddr, &frnat.fin_saddr) == -1) { return (-1); } if ((np->in_nsrcaddr == 0) && (np->in_nsrcmsk == 0xffffffff)) { src_search = 0; if (np->in_stepnext == 0) np->in_stepnext = 1; } else if ((np->in_nsrcaddr == 0) && (np->in_nsrcmsk == 0)) { src_search = 0; if (np->in_stepnext == 0) np->in_stepnext = 1; } else if (np->in_nsrcmsk == 0xffffffff) { src_search = 0; if (np->in_stepnext == 0) np->in_stepnext = 1; } else if (np->in_nsrcmsk != 0xffffffff) { if (np->in_stepnext == 0 && changed == -1) { np->in_snip++; np->in_stepnext++; changed = 0; } } if ((flags & IPN_TCPUDPICMP) != 0) { if (np->in_spnext != 0) frnat.fin_data[0] = np->in_spnext; /* * Standard port translation. Select next port. */ if ((flags & IPN_FIXEDSPORT) != 0) { np->in_stepnext = 2; } else if ((np->in_stepnext == 1) && (changed == -1) && (natl != NULL)) { np->in_spnext++; np->in_stepnext++; changed = 1; if (np->in_spnext > np->in_spmax) np->in_spnext = np->in_spmin; } } else { np->in_stepnext = 2; } np->in_stepnext &= 0x3; /* * Find a new destination address */ /* TRACE (fin, np, l, frnat) */ DT4(ipf_nat_rewrite_2, frinfo_t *, fin, ipnat_t *, np, int, l, frinfo_t *, &frnat); if (ipf_nat_nextaddr(fin, &np->in_ndst, &frnat.fin_daddr, &frnat.fin_daddr) == -1) return (-1); if ((np->in_ndstaddr == 0) && (np->in_ndstmsk == 0xffffffff)) { dst_search = 0; if (np->in_stepnext == 2) np->in_stepnext = 3; } else if ((np->in_ndstaddr == 0) && (np->in_ndstmsk == 0)) { dst_search = 0; if (np->in_stepnext == 2) np->in_stepnext = 3; } else if (np->in_ndstmsk == 0xffffffff) { dst_search = 0; if (np->in_stepnext == 2) np->in_stepnext = 3; } else if (np->in_ndstmsk != 0xffffffff) { if ((np->in_stepnext == 2) && (changed == -1) && (natl != NULL)) { changed = 2; np->in_stepnext++; np->in_dnip++; } } if ((flags & IPN_TCPUDPICMP) != 0) { if (np->in_dpnext != 0) frnat.fin_data[1] = np->in_dpnext; /* * Standard port translation. Select next port. */ if ((flags & IPN_FIXEDDPORT) != 0) { np->in_stepnext = 0; } else if (np->in_stepnext == 3 && changed == -1) { np->in_dpnext++; np->in_stepnext++; changed = 3; if (np->in_dpnext > np->in_dpmax) np->in_dpnext = np->in_dpmin; } } else { if (np->in_stepnext == 3) np->in_stepnext = 0; } /* TRACE (frnat) */ DT1(ipf_nat_rewrite_3, frinfo_t *, &frnat); /* * Here we do a lookup of the connection as seen from * the outside. If an IP# pair already exists, try * again. So if you have A->B becomes C->B, you can * also have D->E become C->E but not D->B causing * another C->B. Also take protocol and ports into * account when determining whether a pre-existing * NAT setup will cause an external conflict where * this is appropriate. * * fin_data[] is swapped around because we are doing a * lookup of the packet is if it were moving in the opposite * direction of the one we are working with now. */ if (flags & IPN_TCPUDP) { swap = frnat.fin_data[0]; frnat.fin_data[0] = frnat.fin_data[1]; frnat.fin_data[1] = swap; } if (fin->fin_out == 1) { natl = ipf_nat_inlookup(&frnat, flags & ~(SI_WILDP|NAT_SEARCH), (u_int)frnat.fin_p, frnat.fin_dst, frnat.fin_src); } else { natl = ipf_nat_outlookup(&frnat, flags & ~(SI_WILDP|NAT_SEARCH), (u_int)frnat.fin_p, frnat.fin_dst, frnat.fin_src); } if (flags & IPN_TCPUDP) { swap = frnat.fin_data[0]; frnat.fin_data[0] = frnat.fin_data[1]; frnat.fin_data[1] = swap; } /* TRACE natl, in_stepnext, l */ DT3(ipf_nat_rewrite_2, nat_t *, natl, ipnat_t *, np , int, l); if ((natl != NULL) && (l > 8)) /* XXX 8 is arbitrary */ return (-1); np->in_stepnext &= 0x3; l++; changed = -1; } while (natl != NULL); nat->nat_osrcip = fin->fin_src; nat->nat_odstip = fin->fin_dst; nat->nat_nsrcip = frnat.fin_src; nat->nat_ndstip = frnat.fin_dst; if ((flags & IPN_TCPUDP) != 0) { nat->nat_osport = htons(fin->fin_data[0]); nat->nat_odport = htons(fin->fin_data[1]); nat->nat_nsport = htons(frnat.fin_data[0]); nat->nat_ndport = htons(frnat.fin_data[1]); } else if ((flags & IPN_ICMPQUERY) != 0) { nat->nat_oicmpid = fin->fin_data[1]; nat->nat_nicmpid = frnat.fin_data[1]; } return (0); } /* ------------------------------------------------------------------------ */ /* Function: nat_newdivert */ /* Returns: int - -1 == error, 0 == success */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to NAT entry */ /* ni(I) - pointer to structure with misc. information needed */ /* to create new NAT entry. */ /* Write Lock: ipf_nat */ /* */ /* Create a new NAT divert session as defined by the NAT rule. This is */ /* somewhat different to other NAT session creation routines because we */ /* do not iterate through either port numbers or IP addresses, searching */ /* for a unique mapping, however, a complimentary duplicate check is made. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_newdivert(fr_info_t *fin, nat_t *nat, natinfo_t *nai) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; fr_info_t frnat; ipnat_t *np; nat_t *natl; int p; np = nai->nai_np; bcopy((char *)fin, (char *)&frnat, sizeof(*fin)); nat->nat_pr[0] = 0; nat->nat_osrcaddr = fin->fin_saddr; nat->nat_odstaddr = fin->fin_daddr; frnat.fin_saddr = htonl(np->in_snip); frnat.fin_daddr = htonl(np->in_dnip); if ((nat->nat_flags & IPN_TCPUDP) != 0) { nat->nat_osport = htons(fin->fin_data[0]); nat->nat_odport = htons(fin->fin_data[1]); } else if ((nat->nat_flags & IPN_ICMPQUERY) != 0) { nat->nat_oicmpid = fin->fin_data[1]; } if (np->in_redir & NAT_DIVERTUDP) { frnat.fin_data[0] = np->in_spnext; frnat.fin_data[1] = np->in_dpnext; frnat.fin_flx |= FI_TCPUDP; p = IPPROTO_UDP; } else { frnat.fin_flx &= ~FI_TCPUDP; p = IPPROTO_IPIP; } if (fin->fin_out == 1) { natl = ipf_nat_inlookup(&frnat, 0, p, frnat.fin_dst, frnat.fin_src); } else { natl = ipf_nat_outlookup(&frnat, 0, p, frnat.fin_dst, frnat.fin_src); } if (natl != NULL) { NBUMPSIDED(fin->fin_out, ns_divert_exist); DT3(ns_divert_exist, fr_info_t *, fin, nat_t *, nat, natinfo_t, nai); return (-1); } nat->nat_nsrcaddr = frnat.fin_saddr; nat->nat_ndstaddr = frnat.fin_daddr; if ((nat->nat_flags & IPN_TCPUDP) != 0) { nat->nat_nsport = htons(frnat.fin_data[0]); nat->nat_ndport = htons(frnat.fin_data[1]); } else if ((nat->nat_flags & IPN_ICMPQUERY) != 0) { nat->nat_nicmpid = frnat.fin_data[1]; } nat->nat_pr[fin->fin_out] = fin->fin_p; nat->nat_pr[1 - fin->fin_out] = p; if (np->in_redir & NAT_REDIRECT) nat->nat_dir = NAT_DIVERTIN; else nat->nat_dir = NAT_DIVERTOUT; return (0); } /* ------------------------------------------------------------------------ */ /* Function: nat_builddivertmp */ /* Returns: int - -1 == error, 0 == success */ /* Parameters: softn(I) - pointer to NAT context structure */ /* np(I) - pointer to a NAT rule */ /* */ /* For divert rules, a skeleton packet representing what will be prepended */ /* to the real packet is created. Even though we don't have the full */ /* packet here, a checksum is calculated that we update later when we */ /* fill in the final details. At present a 0 checksum for UDP is being set */ /* here because it is expected that divert will be used for localhost. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_builddivertmp(ipf_nat_softc_t *softn, ipnat_t *np) { udphdr_t *uh; size_t len; ip_t *ip; if ((np->in_redir & NAT_DIVERTUDP) != 0) len = sizeof(ip_t) + sizeof(udphdr_t); else len = sizeof(ip_t); ALLOC_MB_T(np->in_divmp, len); if (np->in_divmp == NULL) { NBUMPD(ipf_nat_stats, ns_divert_build); return (-1); } /* * First, the header to get the packet diverted to the new destination */ ip = MTOD(np->in_divmp, ip_t *); IP_V_A(ip, 4); IP_HL_A(ip, 5); ip->ip_tos = 0; if ((np->in_redir & NAT_DIVERTUDP) != 0) ip->ip_p = IPPROTO_UDP; else ip->ip_p = IPPROTO_IPIP; ip->ip_ttl = 255; ip->ip_off = 0; ip->ip_sum = 0; ip->ip_len = htons(len); ip->ip_id = 0; ip->ip_src.s_addr = htonl(np->in_snip); ip->ip_dst.s_addr = htonl(np->in_dnip); ip->ip_sum = ipf_cksum((u_short *)ip, sizeof(*ip)); if (np->in_redir & NAT_DIVERTUDP) { uh = (udphdr_t *)(ip + 1); uh->uh_sum = 0; uh->uh_ulen = 8; uh->uh_sport = htons(np->in_spnext); uh->uh_dport = htons(np->in_dpnext); } return (0); } #define MINDECAP (sizeof(ip_t) + sizeof(udphdr_t) + sizeof(ip_t)) /* ------------------------------------------------------------------------ */ /* Function: nat_decap */ /* Returns: int - -1 == error, 0 == success */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to current NAT session */ /* */ /* This function is responsible for undoing a packet's encapsulation in the */ /* reverse of an encap/divert rule. After removing the outer encapsulation */ /* it is necessary to call ipf_makefrip() again so that the contents of 'fin'*/ /* match the "new" packet as it may still be used by IPFilter elsewhere. */ /* We use "dir" here as the basis for some of the expectations about the */ /* outer header. If we return an error, the goal is to leave the original */ /* packet information undisturbed - this falls short at the end where we'd */ /* need to back a backup copy of "fin" - expensive. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_decap(fr_info_t *fin, nat_t *nat) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; char *hdr; int hlen; int skip; mb_t *m; if ((fin->fin_flx & FI_ICMPERR) != 0) { /* * ICMP packets don't get decapsulated, instead what we need * to do is change the ICMP reply from including (in the data * portion for errors) the encapsulated packet that we sent * out to something that resembles the original packet prior * to encapsulation. This isn't done here - all we're doing * here is changing the outer address to ensure that it gets * targetted back to the correct system. */ if (nat->nat_dir & NAT_OUTBOUND) { u_32_t sum1, sum2, sumd; sum1 = ntohl(fin->fin_daddr); sum2 = ntohl(nat->nat_osrcaddr); CALC_SUMD(sum1, sum2, sumd); fin->fin_ip->ip_dst = nat->nat_osrcip; fin->fin_daddr = nat->nat_osrcaddr; #if !defined(_KERNEL) || SOLARIS ipf_fix_outcksum(0, &fin->fin_ip->ip_sum, sumd, 0); #endif } return (0); } m = fin->fin_m; skip = fin->fin_hlen; switch (nat->nat_dir) { case NAT_DIVERTIN : case NAT_DIVERTOUT : if (fin->fin_plen < MINDECAP) return (-1); skip += sizeof(udphdr_t); break; case NAT_ENCAPIN : case NAT_ENCAPOUT : if (fin->fin_plen < (skip + sizeof(ip_t))) return (-1); break; default : return (-1); /* NOTREACHED */ } /* * The aim here is to keep the original packet details in "fin" for * as long as possible so that returning with an error is for the * original packet and there is little undoing work to do. */ if (M_LEN(m) < skip + sizeof(ip_t)) { if (ipf_pr_pullup(fin, skip + sizeof(ip_t)) == -1) return (-1); } hdr = MTOD(fin->fin_m, char *); fin->fin_ip = (ip_t *)(hdr + skip); hlen = IP_HL(fin->fin_ip) << 2; if (ipf_pr_pullup(fin, skip + hlen) == -1) { NBUMPSIDED(fin->fin_out, ns_decap_pullup); return (-1); } fin->fin_hlen = hlen; fin->fin_dlen -= skip; fin->fin_plen -= skip; fin->fin_ipoff += skip; if (ipf_makefrip(hlen, (ip_t *)hdr, fin) == -1) { NBUMPSIDED(fin->fin_out, ns_decap_bad); return (-1); } return (skip); } /* ------------------------------------------------------------------------ */ /* Function: nat_nextaddr */ /* Returns: int - -1 == bad input (no new address), */ /* 0 == success and dst has new address */ /* Parameters: fin(I) - pointer to packet information */ /* na(I) - how to generate new address */ /* old(I) - original address being replaced */ /* dst(O) - where to put the new address */ /* Write Lock: ipf_nat */ /* */ /* This function uses the contents of the "na" structure, in combination */ /* with "old" to produce a new address to store in "dst". Not all of the */ /* possible uses of "na" will result in a new address. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_nextaddr(fr_info_t *fin, nat_addr_t *na, u_32_t *old, u_32_t *dst) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; u_32_t amin, amax, new; i6addr_t newip; int error; new = 0; amin = na->na_addr[0].in4.s_addr; switch (na->na_atype) { case FRI_RANGE : amax = na->na_addr[1].in4.s_addr; break; case FRI_NETMASKED : case FRI_DYNAMIC : case FRI_NORMAL : /* * Compute the maximum address by adding the inverse of the * netmask to the minimum address. */ amax = ~na->na_addr[1].in4.s_addr; amax |= amin; break; case FRI_LOOKUP : break; case FRI_BROADCAST : case FRI_PEERADDR : case FRI_NETWORK : default : DT4(ns_na_atype, fr_info_t *, fin, nat_addr_t *, na, u_32_t *, old, u_32_t *, new); return (-1); } error = -1; if (na->na_atype == FRI_LOOKUP) { if (na->na_type == IPLT_DSTLIST) { error = ipf_dstlist_select_node(fin, na->na_ptr, dst, NULL); } else { NBUMPSIDE(fin->fin_out, ns_badnextaddr); DT4(ns_badnextaddr_1, fr_info_t *, fin, nat_addr_t *, na, u_32_t *, old, u_32_t *, new); } } else if (na->na_atype == IPLT_NONE) { /* * 0/0 as the new address means leave it alone. */ if (na->na_addr[0].in4.s_addr == 0 && na->na_addr[1].in4.s_addr == 0) { new = *old; /* * 0/32 means get the interface's address */ } else if (na->na_addr[0].in4.s_addr == 0 && na->na_addr[1].in4.s_addr == 0xffffffff) { if (ipf_ifpaddr(softc, 4, na->na_atype, fin->fin_ifp, &newip, NULL) == -1) { NBUMPSIDED(fin->fin_out, ns_ifpaddrfail); DT4(ns_ifpaddrfail, fr_info_t *, fin, nat_addr_t *, na, u_32_t *, old, u_32_t *, new); return (-1); } new = newip.in4.s_addr; } else { new = htonl(na->na_nextip); } *dst = new; error = 0; } else { NBUMPSIDE(fin->fin_out, ns_badnextaddr); DT4(ns_badnextaddr_2, fr_info_t *, fin, nat_addr_t *, na, u_32_t *, old, u_32_t *, new); } return (error); } /* ------------------------------------------------------------------------ */ /* Function: nat_nextaddrinit */ /* Returns: int - 0 == success, else error number */ /* Parameters: softc(I) - pointer to soft context main structure */ /* na(I) - NAT address information for generating new addr*/ /* initial(I) - flag indicating if it is the first call for */ /* this "na" structure. */ /* ifp(I) - network interface to derive address */ /* information from. */ /* */ /* This function is expected to be called in two scenarious: when a new NAT */ /* rule is loaded into the kernel and when the list of NAT rules is sync'd */ /* up with the valid network interfaces (possibly due to them changing.) */ /* To distinguish between these, the "initial" parameter is used. If it is */ /* 1 then this indicates the rule has just been reloaded and 0 for when we */ /* are updating information. This difference is important because in */ /* instances where we are not updating address information associated with */ /* a network interface, we don't want to disturb what the "next" address to */ /* come out of ipf_nat_nextaddr() will be. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_nextaddrinit(ipf_main_softc_t *softc, char *base, nat_addr_t *na, int initial, void *ifp) { switch (na->na_atype) { case FRI_LOOKUP : if (na->na_subtype == 0) { na->na_ptr = ipf_lookup_res_num(softc, IPL_LOGNAT, na->na_type, na->na_num, &na->na_func); } else if (na->na_subtype == 1) { na->na_ptr = ipf_lookup_res_name(softc, IPL_LOGNAT, na->na_type, base + na->na_num, &na->na_func); } if (na->na_func == NULL) { IPFERROR(60060); return (ESRCH); } if (na->na_ptr == NULL) { IPFERROR(60056); return (ESRCH); } break; case FRI_DYNAMIC : case FRI_BROADCAST : case FRI_NETWORK : case FRI_NETMASKED : case FRI_PEERADDR : if (ifp != NULL) (void )ipf_ifpaddr(softc, 4, na->na_atype, ifp, &na->na_addr[0], &na->na_addr[1]); break; case FRI_SPLIT : case FRI_RANGE : if (initial) na->na_nextip = ntohl(na->na_addr[0].in4.s_addr); break; case FRI_NONE : na->na_addr[0].in4.s_addr &= na->na_addr[1].in4.s_addr; return (0); case FRI_NORMAL : na->na_addr[0].in4.s_addr &= na->na_addr[1].in4.s_addr; break; default : IPFERROR(60054); return (EINVAL); } if (initial && (na->na_atype == FRI_NORMAL)) { if (na->na_addr[0].in4.s_addr == 0) { if ((na->na_addr[1].in4.s_addr == 0xffffffff) || (na->na_addr[1].in4.s_addr == 0)) { return (0); } } if (na->na_addr[1].in4.s_addr == 0xffffffff) { na->na_nextip = ntohl(na->na_addr[0].in4.s_addr); } else { na->na_nextip = ntohl(na->na_addr[0].in4.s_addr) + 1; } } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_matchflush */ /* Returns: int - -1 == error, 0 == success */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* nat(I) - pointer to current NAT session */ /* */ /* ------------------------------------------------------------------------ */ static int ipf_nat_matchflush(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, caddr_t data) { int *array, flushed, error; nat_t *nat, *natnext; ipfobj_t obj; error = ipf_matcharray_load(softc, data, &obj, &array); if (error != 0) return (error); flushed = 0; for (nat = softn->ipf_nat_instances; nat != NULL; nat = natnext) { natnext = nat->nat_next; if (ipf_nat_matcharray(nat, array, softc->ipf_ticks) == 0) { ipf_nat_delete(softc, nat, NL_FLUSH); flushed++; } } obj.ipfo_retval = flushed; error = BCOPYOUT(&obj, data, sizeof(obj)); KFREES(array, array[0] * sizeof(*array)); return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_matcharray */ /* Returns: int - -1 == error, 0 == success */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to current NAT session */ /* */ /* ------------------------------------------------------------------------ */ static int ipf_nat_matcharray(nat_t *nat, int *array, u_long ticks) { int i, n, *x, e, p; e = 0; n = array[0]; x = array + 1; for (; n > 0; x += 3 + x[2]) { if (x[0] == IPF_EXP_END) break; e = 0; n -= x[2] + 3; if (n < 0) break; p = x[0] >> 16; if (p != 0 && p != nat->nat_pr[1]) break; switch (x[0]) { case IPF_EXP_IP_PR : for (i = 0; !e && i < x[2]; i++) { e |= (nat->nat_pr[1] == x[i + 3]); } break; case IPF_EXP_IP_SRCADDR : if (nat->nat_v[0] == 4) { for (i = 0; !e && i < x[2]; i++) { e |= ((nat->nat_osrcaddr & x[i + 4]) == x[i + 3]); } } if (nat->nat_v[1] == 4) { for (i = 0; !e && i < x[2]; i++) { e |= ((nat->nat_nsrcaddr & x[i + 4]) == x[i + 3]); } } break; case IPF_EXP_IP_DSTADDR : if (nat->nat_v[0] == 4) { for (i = 0; !e && i < x[2]; i++) { e |= ((nat->nat_odstaddr & x[i + 4]) == x[i + 3]); } } if (nat->nat_v[1] == 4) { for (i = 0; !e && i < x[2]; i++) { e |= ((nat->nat_ndstaddr & x[i + 4]) == x[i + 3]); } } break; case IPF_EXP_IP_ADDR : for (i = 0; !e && i < x[2]; i++) { if (nat->nat_v[0] == 4) { e |= ((nat->nat_osrcaddr & x[i + 4]) == x[i + 3]); } if (nat->nat_v[1] == 4) { e |= ((nat->nat_nsrcaddr & x[i + 4]) == x[i + 3]); } if (nat->nat_v[0] == 4) { e |= ((nat->nat_odstaddr & x[i + 4]) == x[i + 3]); } if (nat->nat_v[1] == 4) { e |= ((nat->nat_ndstaddr & x[i + 4]) == x[i + 3]); } } break; #ifdef USE_INET6 case IPF_EXP_IP6_SRCADDR : if (nat->nat_v[0] == 6) { for (i = 0; !e && i < x[3]; i++) { e |= IP6_MASKEQ(&nat->nat_osrc6, x + i + 7, x + i + 3); } } if (nat->nat_v[1] == 6) { for (i = 0; !e && i < x[3]; i++) { e |= IP6_MASKEQ(&nat->nat_nsrc6, x + i + 7, x + i + 3); } } break; case IPF_EXP_IP6_DSTADDR : if (nat->nat_v[0] == 6) { for (i = 0; !e && i < x[3]; i++) { e |= IP6_MASKEQ(&nat->nat_odst6, x + i + 7, x + i + 3); } } if (nat->nat_v[1] == 6) { for (i = 0; !e && i < x[3]; i++) { e |= IP6_MASKEQ(&nat->nat_ndst6, x + i + 7, x + i + 3); } } break; case IPF_EXP_IP6_ADDR : for (i = 0; !e && i < x[3]; i++) { if (nat->nat_v[0] == 6) { e |= IP6_MASKEQ(&nat->nat_osrc6, x + i + 7, x + i + 3); } if (nat->nat_v[0] == 6) { e |= IP6_MASKEQ(&nat->nat_odst6, x + i + 7, x + i + 3); } if (nat->nat_v[1] == 6) { e |= IP6_MASKEQ(&nat->nat_nsrc6, x + i + 7, x + i + 3); } if (nat->nat_v[1] == 6) { e |= IP6_MASKEQ(&nat->nat_ndst6, x + i + 7, x + i + 3); } } break; #endif case IPF_EXP_UDP_PORT : case IPF_EXP_TCP_PORT : for (i = 0; !e && i < x[2]; i++) { e |= (nat->nat_nsport == x[i + 3]) || (nat->nat_ndport == x[i + 3]); } break; case IPF_EXP_UDP_SPORT : case IPF_EXP_TCP_SPORT : for (i = 0; !e && i < x[2]; i++) { e |= (nat->nat_nsport == x[i + 3]); } break; case IPF_EXP_UDP_DPORT : case IPF_EXP_TCP_DPORT : for (i = 0; !e && i < x[2]; i++) { e |= (nat->nat_ndport == x[i + 3]); } break; case IPF_EXP_TCP_STATE : for (i = 0; !e && i < x[2]; i++) { e |= (nat->nat_tcpstate[0] == x[i + 3]) || (nat->nat_tcpstate[1] == x[i + 3]); } break; case IPF_EXP_IDLE_GT : e |= (ticks - nat->nat_touched > x[3]); break; } e ^= x[1]; if (!e) break; } return (e); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_gettable */ /* Returns: int - 0 = success, else error */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* data(I) - pointer to ioctl data */ /* */ /* This function handles ioctl requests for tables of nat information. */ /* At present the only table it deals with is the hash bucket statistics. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_gettable(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, char *data) { ipftable_t table; int error; error = ipf_inobj(softc, data, NULL, &table, IPFOBJ_GTABLE); if (error != 0) return (error); switch (table.ita_type) { case IPFTABLE_BUCKETS_NATIN : error = COPYOUT(softn->ipf_nat_stats.ns_side[0].ns_bucketlen, table.ita_table, softn->ipf_nat_table_sz * sizeof(u_int)); break; case IPFTABLE_BUCKETS_NATOUT : error = COPYOUT(softn->ipf_nat_stats.ns_side[1].ns_bucketlen, table.ita_table, softn->ipf_nat_table_sz * sizeof(u_int)); break; default : IPFERROR(60058); return (EINVAL); } if (error != 0) { IPFERROR(60059); error = EFAULT; } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_settimeout */ /* Returns: int - 0 = success, else failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* t(I) - pointer to tunable */ /* p(I) - pointer to new tuning data */ /* */ /* Apply the timeout change to the NAT timeout queues. */ /* ------------------------------------------------------------------------ */ int ipf_nat_settimeout(struct ipf_main_softc_s *softc, ipftuneable_t *t, ipftuneval_t *p) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; if (!strncmp(t->ipft_name, "tcp_", 4)) return (ipf_settimeout_tcp(t, p, softn->ipf_nat_tcptq)); if (!strcmp(t->ipft_name, "udp_timeout")) { ipf_apply_timeout(&softn->ipf_nat_udptq, p->ipftu_int); } else if (!strcmp(t->ipft_name, "udp_ack_timeout")) { ipf_apply_timeout(&softn->ipf_nat_udpacktq, p->ipftu_int); } else if (!strcmp(t->ipft_name, "icmp_timeout")) { ipf_apply_timeout(&softn->ipf_nat_icmptq, p->ipftu_int); } else if (!strcmp(t->ipft_name, "icmp_ack_timeout")) { ipf_apply_timeout(&softn->ipf_nat_icmpacktq, p->ipftu_int); } else if (!strcmp(t->ipft_name, "ip_timeout")) { ipf_apply_timeout(&softn->ipf_nat_iptq, p->ipftu_int); } else { IPFERROR(60062); return (ESRCH); } return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_rehash */ /* Returns: int - 0 = success, else failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* t(I) - pointer to tunable */ /* p(I) - pointer to new tuning data */ /* */ /* To change the size of the basic NAT table, we need to first allocate the */ /* new tables (lest it fails and we've got nowhere to store all of the NAT */ /* sessions currently active) and then walk through the entire list and */ /* insert them into the table. There are two tables here: an inbound one */ /* and an outbound one. Each NAT entry goes into each table once. */ /* ------------------------------------------------------------------------ */ int ipf_nat_rehash(ipf_main_softc_t *softc, ipftuneable_t *t, ipftuneval_t *p) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; nat_t **newtab[2], *nat, **natp; u_int *bucketlens[2]; u_int maxbucket; u_int newsize; int error; u_int hv; int i; newsize = p->ipftu_int; /* * In case there is nothing to do... */ if (newsize == softn->ipf_nat_table_sz) return (0); newtab[0] = NULL; newtab[1] = NULL; bucketlens[0] = NULL; bucketlens[1] = NULL; /* * 4 tables depend on the NAT table size: the inbound looking table, * the outbound lookup table and the hash chain length for each. */ KMALLOCS(newtab[0], nat_t **, newsize * sizeof(nat_t *)); if (newtab[0] == NULL) { error = 60063; goto badrehash; } KMALLOCS(newtab[1], nat_t **, newsize * sizeof(nat_t *)); if (newtab[1] == NULL) { error = 60064; goto badrehash; } KMALLOCS(bucketlens[0], u_int *, newsize * sizeof(u_int)); if (bucketlens[0] == NULL) { error = 60065; goto badrehash; } KMALLOCS(bucketlens[1], u_int *, newsize * sizeof(u_int)); if (bucketlens[1] == NULL) { error = 60066; goto badrehash; } /* * Recalculate the maximum length based on the new size. */ for (maxbucket = 0, i = newsize; i > 0; i >>= 1) maxbucket++; maxbucket *= 2; bzero((char *)newtab[0], newsize * sizeof(nat_t *)); bzero((char *)newtab[1], newsize * sizeof(nat_t *)); bzero((char *)bucketlens[0], newsize * sizeof(u_int)); bzero((char *)bucketlens[1], newsize * sizeof(u_int)); WRITE_ENTER(&softc->ipf_nat); if (softn->ipf_nat_table[0] != NULL) { KFREES(softn->ipf_nat_table[0], softn->ipf_nat_table_sz * sizeof(*softn->ipf_nat_table[0])); } softn->ipf_nat_table[0] = newtab[0]; if (softn->ipf_nat_table[1] != NULL) { KFREES(softn->ipf_nat_table[1], softn->ipf_nat_table_sz * sizeof(*softn->ipf_nat_table[1])); } softn->ipf_nat_table[1] = newtab[1]; if (softn->ipf_nat_stats.ns_side[0].ns_bucketlen != NULL) { KFREES(softn->ipf_nat_stats.ns_side[0].ns_bucketlen, softn->ipf_nat_table_sz * sizeof(u_int)); } softn->ipf_nat_stats.ns_side[0].ns_bucketlen = bucketlens[0]; if (softn->ipf_nat_stats.ns_side[1].ns_bucketlen != NULL) { KFREES(softn->ipf_nat_stats.ns_side[1].ns_bucketlen, softn->ipf_nat_table_sz * sizeof(u_int)); } softn->ipf_nat_stats.ns_side[1].ns_bucketlen = bucketlens[1]; #ifdef USE_INET6 if (softn->ipf_nat_stats.ns_side6[0].ns_bucketlen != NULL) { KFREES(softn->ipf_nat_stats.ns_side6[0].ns_bucketlen, softn->ipf_nat_table_sz * sizeof(u_int)); } softn->ipf_nat_stats.ns_side6[0].ns_bucketlen = bucketlens[0]; if (softn->ipf_nat_stats.ns_side6[1].ns_bucketlen != NULL) { KFREES(softn->ipf_nat_stats.ns_side6[1].ns_bucketlen, softn->ipf_nat_table_sz * sizeof(u_int)); } softn->ipf_nat_stats.ns_side6[1].ns_bucketlen = bucketlens[1]; #endif softn->ipf_nat_maxbucket = maxbucket; softn->ipf_nat_table_sz = newsize; /* * Walk through the entire list of NAT table entries and put them * in the new NAT table, somewhere. Because we have a new table, * we need to restart the counter of how many chains are in use. */ softn->ipf_nat_stats.ns_side[0].ns_inuse = 0; softn->ipf_nat_stats.ns_side[1].ns_inuse = 0; #ifdef USE_INET6 softn->ipf_nat_stats.ns_side6[0].ns_inuse = 0; softn->ipf_nat_stats.ns_side6[1].ns_inuse = 0; #endif for (nat = softn->ipf_nat_instances; nat != NULL; nat = nat->nat_next) { nat->nat_hnext[0] = NULL; nat->nat_phnext[0] = NULL; hv = nat->nat_hv[0] % softn->ipf_nat_table_sz; natp = &softn->ipf_nat_table[0][hv]; if (*natp) { (*natp)->nat_phnext[0] = &nat->nat_hnext[0]; } else { NBUMPSIDE(0, ns_inuse); } nat->nat_phnext[0] = natp; nat->nat_hnext[0] = *natp; *natp = nat; NBUMPSIDE(0, ns_bucketlen[hv]); nat->nat_hnext[1] = NULL; nat->nat_phnext[1] = NULL; hv = nat->nat_hv[1] % softn->ipf_nat_table_sz; natp = &softn->ipf_nat_table[1][hv]; if (*natp) { (*natp)->nat_phnext[1] = &nat->nat_hnext[1]; } else { NBUMPSIDE(1, ns_inuse); } nat->nat_phnext[1] = natp; nat->nat_hnext[1] = *natp; *natp = nat; NBUMPSIDE(1, ns_bucketlen[hv]); } RWLOCK_EXIT(&softc->ipf_nat); return (0); badrehash: if (bucketlens[1] != NULL) { KFREES(bucketlens[0], newsize * sizeof(u_int)); } if (bucketlens[0] != NULL) { KFREES(bucketlens[0], newsize * sizeof(u_int)); } if (newtab[0] != NULL) { KFREES(newtab[0], newsize * sizeof(nat_t *)); } if (newtab[1] != NULL) { KFREES(newtab[1], newsize * sizeof(nat_t *)); } IPFERROR(error); return (ENOMEM); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_rehash_rules */ /* Returns: int - 0 = success, else failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* t(I) - pointer to tunable */ /* p(I) - pointer to new tuning data */ /* */ /* All of the NAT rules hang off of a hash table that is searched with a */ /* hash on address after the netmask is applied. There is a different table*/ /* for both inbound rules (rdr) and outbound (map.) The resizing will only */ /* affect one of these two tables. */ /* ------------------------------------------------------------------------ */ int ipf_nat_rehash_rules(ipf_main_softc_t *softc, ipftuneable_t *t, ipftuneval_t *p) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; ipnat_t **newtab, *np, ***old, **npp; u_int newsize; u_int mask; u_int hv; newsize = p->ipftu_int; /* * In case there is nothing to do... */ if (newsize == *t->ipft_pint) return (0); /* * All inbound rules have the NAT_REDIRECT bit set in in_redir and * all outbound rules have either NAT_MAP or MAT_MAPBLK set. * This if statement allows for some more generic code to be below, * rather than two huge gobs of code that almost do the same thing. */ if (t->ipft_pint == &softn->ipf_nat_rdrrules_sz) { old = &softn->ipf_nat_rdr_rules; mask = NAT_REDIRECT; } else { old = &softn->ipf_nat_map_rules; mask = NAT_MAP|NAT_MAPBLK; } KMALLOCS(newtab, ipnat_t **, newsize * sizeof(ipnat_t *)); if (newtab == NULL) { IPFERROR(60067); return (ENOMEM); } bzero((char *)newtab, newsize * sizeof(ipnat_t *)); WRITE_ENTER(&softc->ipf_nat); if (*old != NULL) { KFREES(*old, *t->ipft_pint * sizeof(ipnat_t **)); } *old = newtab; *t->ipft_pint = newsize; for (np = softn->ipf_nat_list; np != NULL; np = np->in_next) { if ((np->in_redir & mask) == 0) continue; if (np->in_redir & NAT_REDIRECT) { np->in_rnext = NULL; hv = np->in_hv[0] % newsize; for (npp = newtab + hv; *npp != NULL; ) npp = &(*npp)->in_rnext; np->in_prnext = npp; *npp = np; } if (np->in_redir & NAT_MAP) { np->in_mnext = NULL; hv = np->in_hv[1] % newsize; for (npp = newtab + hv; *npp != NULL; ) npp = &(*npp)->in_mnext; np->in_pmnext = npp; *npp = np; } } RWLOCK_EXIT(&softc->ipf_nat); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_hostmap_rehash */ /* Returns: int - 0 = success, else failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* t(I) - pointer to tunable */ /* p(I) - pointer to new tuning data */ /* */ /* Allocate and populate a new hash table that will contain a reference to */ /* all of the active IP# translations currently in place. */ /* ------------------------------------------------------------------------ */ int ipf_nat_hostmap_rehash(ipf_main_softc_t *softc, ipftuneable_t *t, ipftuneval_t *p) { ipf_nat_softc_t *softn = softc->ipf_nat_soft; hostmap_t *hm, **newtab; u_int newsize; u_int hv; newsize = p->ipftu_int; /* * In case there is nothing to do... */ if (newsize == *t->ipft_pint) return (0); KMALLOCS(newtab, hostmap_t **, newsize * sizeof(hostmap_t *)); if (newtab == NULL) { IPFERROR(60068); return (ENOMEM); } bzero((char *)newtab, newsize * sizeof(hostmap_t *)); WRITE_ENTER(&softc->ipf_nat); if (softn->ipf_hm_maptable != NULL) { KFREES(softn->ipf_hm_maptable, softn->ipf_nat_hostmap_sz * sizeof(hostmap_t *)); } softn->ipf_hm_maptable = newtab; softn->ipf_nat_hostmap_sz = newsize; for (hm = softn->ipf_hm_maplist; hm != NULL; hm = hm->hm_next) { hv = hm->hm_hv % softn->ipf_nat_hostmap_sz; hm->hm_hnext = softn->ipf_hm_maptable[hv]; hm->hm_phnext = softn->ipf_hm_maptable + hv; if (softn->ipf_hm_maptable[hv] != NULL) softn->ipf_hm_maptable[hv]->hm_phnext = &hm->hm_hnext; softn->ipf_hm_maptable[hv] = hm; } RWLOCK_EXIT(&softc->ipf_nat); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_add_tq */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* ------------------------------------------------------------------------ */ ipftq_t * ipf_nat_add_tq(ipf_main_softc_t *softc, int ttl) { ipf_nat_softc_t *softs = softc->ipf_nat_soft; return (ipf_addtimeoutqueue(softc, &softs->ipf_nat_utqe, ttl)); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_uncreate */ /* Returns: Nil */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* This function is used to remove a NAT entry from the NAT table when we */ /* decide that the create was actually in error. It is thus assumed that */ /* fin_flx will have both FI_NATED and FI_NATNEW set. Because we're dealing */ /* with the translated packet (not the original), we have to reverse the */ /* lookup. Although doing the lookup is expensive (relatively speaking), it */ /* is not anticipated that this will be a frequent occurance for normal */ /* traffic patterns. */ /* ------------------------------------------------------------------------ */ void ipf_nat_uncreate(fr_info_t *fin) { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_nat_softc_t *softn = softc->ipf_nat_soft; int nflags; nat_t *nat; switch (fin->fin_p) { case IPPROTO_TCP : nflags = IPN_TCP; break; case IPPROTO_UDP : nflags = IPN_UDP; break; default : nflags = 0; break; } WRITE_ENTER(&softc->ipf_nat); if (fin->fin_out == 0) { nat = ipf_nat_outlookup(fin, nflags, (u_int)fin->fin_p, fin->fin_dst, fin->fin_src); } else { nat = ipf_nat_inlookup(fin, nflags, (u_int)fin->fin_p, fin->fin_src, fin->fin_dst); } if (nat != NULL) { NBUMPSIDE(fin->fin_out, ns_uncreate[0]); ipf_nat_delete(softc, nat, NL_DESTROY); } else { NBUMPSIDE(fin->fin_out, ns_uncreate[1]); } RWLOCK_EXIT(&softc->ipf_nat); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_cmp_rules */ /* Returns: int - 0 == success, else rules do not match. */ /* Parameters: n1(I) - first rule to compare */ /* n2(I) - first rule to compare */ /* */ /* Compare two rules using pointers to each rule. A straight bcmp will not */ /* work as some fields (such as in_dst, in_pkts) actually do change once */ /* the rule has been loaded into the kernel. Whilst this function returns */ /* various non-zero returns, they're strictly to aid in debugging. Use of */ /* this function should simply care if the result is zero or not. */ /* ------------------------------------------------------------------------ */ static int ipf_nat_cmp_rules(ipnat_t *n1, ipnat_t *n2) { if (n1->in_size != n2->in_size) return (1); if (bcmp((char *)&n1->in_v, (char *)&n2->in_v, offsetof(ipnat_t, in_ndst) - offsetof(ipnat_t, in_v)) != 0) return (2); if (bcmp((char *)&n1->in_tuc, (char *)&n2->in_tuc, n1->in_size - offsetof(ipnat_t, in_tuc)) != 0) return (3); if (n1->in_ndst.na_atype != n2->in_ndst.na_atype) return (5); if (n1->in_ndst.na_function != n2->in_ndst.na_function) return (6); if (bcmp((char *)&n1->in_ndst.na_addr, (char *)&n2->in_ndst.na_addr, sizeof(n1->in_ndst.na_addr))) return (7); if (n1->in_nsrc.na_atype != n2->in_nsrc.na_atype) return (8); if (n1->in_nsrc.na_function != n2->in_nsrc.na_function) return (9); if (bcmp((char *)&n1->in_nsrc.na_addr, (char *)&n2->in_nsrc.na_addr, sizeof(n1->in_nsrc.na_addr))) return (10); if (n1->in_odst.na_atype != n2->in_odst.na_atype) return (11); if (n1->in_odst.na_function != n2->in_odst.na_function) return (12); if (bcmp((char *)&n1->in_odst.na_addr, (char *)&n2->in_odst.na_addr, sizeof(n1->in_odst.na_addr))) return (13); if (n1->in_osrc.na_atype != n2->in_osrc.na_atype) return (14); if (n1->in_osrc.na_function != n2->in_osrc.na_function) return (15); if (bcmp((char *)&n1->in_osrc.na_addr, (char *)&n2->in_osrc.na_addr, sizeof(n1->in_osrc.na_addr))) return (16); return (0); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_rule_init */ /* Returns: int - 0 == success, else rules do not match. */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softn(I) - pointer to NAT context structure */ /* n(I) - first rule to compare */ /* */ /* ------------------------------------------------------------------------ */ static int ipf_nat_rule_init(ipf_main_softc_t *softc, ipf_nat_softc_t *softn, ipnat_t *n) { int error = 0; if ((n->in_flags & IPN_SIPRANGE) != 0) n->in_nsrcatype = FRI_RANGE; if ((n->in_flags & IPN_DIPRANGE) != 0) n->in_ndstatype = FRI_RANGE; if ((n->in_flags & IPN_SPLIT) != 0) n->in_ndstatype = FRI_SPLIT; if ((n->in_redir & (NAT_MAP|NAT_REWRITE|NAT_DIVERTUDP)) != 0) n->in_spnext = n->in_spmin; if ((n->in_redir & (NAT_REWRITE|NAT_DIVERTUDP)) != 0) { n->in_dpnext = n->in_dpmin; } else if (n->in_redir == NAT_REDIRECT) { n->in_dpnext = n->in_dpmin; } n->in_stepnext = 0; switch (n->in_v[0]) { case 4 : error = ipf_nat_ruleaddrinit(softc, softn, n); if (error != 0) return (error); break; #ifdef USE_INET6 case 6 : error = ipf_nat6_ruleaddrinit(softc, softn, n); if (error != 0) return (error); break; #endif default : break; } if (n->in_redir == (NAT_DIVERTUDP|NAT_MAP)) { /* * Prerecord whether or not the destination of the divert * is local or not to the interface the packet is going * to be sent out. */ n->in_dlocal = ipf_deliverlocal(softc, n->in_v[1], n->in_ifps[1], &n->in_ndstip6); } return (error); } /* ------------------------------------------------------------------------ */ /* Function: ipf_nat_rule_fini */ /* Returns: int - 0 == success, else rules do not match. */ /* Parameters: softc(I) - pointer to soft context main structure */ /* n(I) - rule to work on */ /* */ /* This function is used to release any objects that were referenced during */ /* the rule initialisation. This is useful both when free'ing the rule and */ /* when handling ioctls that need to initialise these fields but not */ /* actually use them after the ioctl processing has finished. */ /* ------------------------------------------------------------------------ */ static void ipf_nat_rule_fini(ipf_main_softc_t *softc, ipnat_t *n) { if (n->in_odst.na_atype == FRI_LOOKUP && n->in_odst.na_ptr != NULL) ipf_lookup_deref(softc, n->in_odst.na_type, n->in_odst.na_ptr); if (n->in_osrc.na_atype == FRI_LOOKUP && n->in_osrc.na_ptr != NULL) ipf_lookup_deref(softc, n->in_osrc.na_type, n->in_osrc.na_ptr); if (n->in_ndst.na_atype == FRI_LOOKUP && n->in_ndst.na_ptr != NULL) ipf_lookup_deref(softc, n->in_ndst.na_type, n->in_ndst.na_ptr); if (n->in_nsrc.na_atype == FRI_LOOKUP && n->in_nsrc.na_ptr != NULL) ipf_lookup_deref(softc, n->in_nsrc.na_type, n->in_nsrc.na_ptr); if (n->in_divmp != NULL) FREE_MB_T(n->in_divmp); } diff --git a/sys/netpfil/ipfw/nat64/nat64_translate.c b/sys/netpfil/ipfw/nat64/nat64_translate.c index 2924a9b2d19a..393780c969fe 100644 --- a/sys/netpfil/ipfw/nat64/nat64_translate.c +++ b/sys/netpfil/ipfw/nat64/nat64_translate.c @@ -1,1721 +1,1721 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2019 Yandex LLC * Copyright (c) 2015-2019 Andrey V. Elsukov * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_ipstealth.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ip_fw_nat64.h" #include "nat64_translate.h" typedef int (*nat64_output_t)(struct ifnet *, struct mbuf *, struct sockaddr *, struct nat64_counters *, void *); typedef int (*nat64_output_one_t)(struct mbuf *, struct nat64_counters *, void *); static struct nhop_object *nat64_find_route4(struct sockaddr_in *, struct mbuf *); static struct nhop_object *nat64_find_route6(struct sockaddr_in6 *, struct mbuf *); static int nat64_output_one(struct mbuf *, struct nat64_counters *, void *); static int nat64_output(struct ifnet *, struct mbuf *, struct sockaddr *, struct nat64_counters *, void *); static int nat64_direct_output_one(struct mbuf *, struct nat64_counters *, void *); static int nat64_direct_output(struct ifnet *, struct mbuf *, struct sockaddr *, struct nat64_counters *, void *); struct nat64_methods { nat64_output_t output; nat64_output_one_t output_one; }; static const struct nat64_methods nat64_netisr = { .output = nat64_output, .output_one = nat64_output_one }; static const struct nat64_methods nat64_direct = { .output = nat64_direct_output, .output_one = nat64_direct_output_one }; /* These variables should be initialized explicitly on module loading */ VNET_DEFINE_STATIC(const struct nat64_methods *, nat64out); VNET_DEFINE_STATIC(const int *, nat64ipstealth); VNET_DEFINE_STATIC(const int *, nat64ip6stealth); #define V_nat64out VNET(nat64out) #define V_nat64ipstealth VNET(nat64ipstealth) #define V_nat64ip6stealth VNET(nat64ip6stealth) static const int stealth_on = 1; #ifndef IPSTEALTH static const int stealth_off = 0; #endif void nat64_set_output_method(int direct) { if (direct != 0) { V_nat64out = &nat64_direct; #ifdef IPSTEALTH /* Honor corresponding variables, if IPSTEALTH is defined */ V_nat64ipstealth = &V_ipstealth; V_nat64ip6stealth = &V_ip6stealth; #else /* otherwise we need to decrement HLIM/TTL for direct case */ V_nat64ipstealth = V_nat64ip6stealth = &stealth_off; #endif } else { V_nat64out = &nat64_netisr; /* Leave TTL/HLIM decrementing to forwarding code */ V_nat64ipstealth = V_nat64ip6stealth = &stealth_on; } } int nat64_get_output_method(void) { return (V_nat64out == &nat64_direct ? 1: 0); } static void nat64_log(struct pfloghdr *logdata, struct mbuf *m, sa_family_t family) { logdata->dir = PF_OUT; logdata->af = family; ipfw_bpf_mtap2(logdata, PFLOG_HDRLEN, m); } static int nat64_direct_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct nat64_counters *stats, void *logdata) { int error; if (logdata != NULL) nat64_log(logdata, m, dst->sa_family); error = (*ifp->if_output)(ifp, m, dst, NULL); if (error != 0) NAT64STAT_INC(stats, oerrors); return (error); } static int nat64_direct_output_one(struct mbuf *m, struct nat64_counters *stats, void *logdata) { struct nhop_object *nh4 = NULL; struct nhop_object *nh6 = NULL; struct sockaddr_in6 dst6; struct sockaddr_in dst4; struct sockaddr *dst; struct ip6_hdr *ip6; struct ip *ip4; struct ifnet *ifp; int error; ip4 = mtod(m, struct ip *); error = 0; switch (ip4->ip_v) { case IPVERSION: dst4.sin_addr = ip4->ip_dst; nh4 = nat64_find_route4(&dst4, m); if (nh4 == NULL) { NAT64STAT_INC(stats, noroute4); error = EHOSTUNREACH; } else { ifp = nh4->nh_ifp; dst = (struct sockaddr *)&dst4; } break; case (IPV6_VERSION >> 4): ip6 = mtod(m, struct ip6_hdr *); dst6.sin6_addr = ip6->ip6_dst; nh6 = nat64_find_route6(&dst6, m); if (nh6 == NULL) { NAT64STAT_INC(stats, noroute6); error = EHOSTUNREACH; } else { ifp = nh6->nh_ifp; dst = (struct sockaddr *)&dst6; } break; default: m_freem(m); NAT64STAT_INC(stats, dropped); DPRINTF(DP_DROPS, "dropped due to unknown IP version"); return (EAFNOSUPPORT); } if (error != 0) { m_freem(m); return (EHOSTUNREACH); } if (logdata != NULL) nat64_log(logdata, m, dst->sa_family); error = (*ifp->if_output)(ifp, m, dst, NULL); if (error != 0) NAT64STAT_INC(stats, oerrors); return (error); } static int nat64_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct nat64_counters *stats, void *logdata) { struct ip *ip4; int ret, af; ip4 = mtod(m, struct ip *); switch (ip4->ip_v) { case IPVERSION: af = AF_INET; ret = NETISR_IP; break; case (IPV6_VERSION >> 4): af = AF_INET6; ret = NETISR_IPV6; break; default: m_freem(m); NAT64STAT_INC(stats, dropped); DPRINTF(DP_DROPS, "unknown IP version"); return (EAFNOSUPPORT); } if (logdata != NULL) nat64_log(logdata, m, af); if (m->m_pkthdr.rcvif == NULL) m->m_pkthdr.rcvif = V_loif; ret = netisr_queue(ret, m); if (ret != 0) NAT64STAT_INC(stats, oerrors); return (ret); } static int nat64_output_one(struct mbuf *m, struct nat64_counters *stats, void *logdata) { return (nat64_output(NULL, m, NULL, stats, logdata)); } /* * Check the given IPv6 prefix and length according to RFC6052: * The prefixes can only have one of the following lengths: * 32, 40, 48, 56, 64, or 96 (The Well-Known Prefix is 96 bits long). * Returns zero on success, otherwise EINVAL. */ int nat64_check_prefixlen(int length) { switch (length) { case 32: case 40: case 48: case 56: case 64: case 96: return (0); } return (EINVAL); } int nat64_check_prefix6(const struct in6_addr *prefix, int length) { if (nat64_check_prefixlen(length) != 0) return (EINVAL); /* Well-known prefix has 96 prefix length */ if (IN6_IS_ADDR_WKPFX(prefix) && length != 96) return (EINVAL); /* Bits 64 to 71 must be set to zero */ if (prefix->__u6_addr.__u6_addr8[8] != 0) return (EINVAL); /* Some extra checks */ if (IN6_IS_ADDR_MULTICAST(prefix) || IN6_IS_ADDR_UNSPECIFIED(prefix) || IN6_IS_ADDR_LOOPBACK(prefix)) return (EINVAL); return (0); } int nat64_check_private_ip4(const struct nat64_config *cfg, in_addr_t ia) { if (cfg->flags & NAT64_ALLOW_PRIVATE) return (0); /* WKPFX must not be used to represent non-global IPv4 addresses */ if (cfg->flags & NAT64_WKPFX) { /* IN_PRIVATE */ if ((ia & htonl(0xff000000)) == htonl(0x0a000000) || (ia & htonl(0xfff00000)) == htonl(0xac100000) || (ia & htonl(0xffff0000)) == htonl(0xc0a80000)) return (1); /* * RFC 5735: * 192.0.0.0/24 - reserved for IETF protocol assignments * 192.88.99.0/24 - for use as 6to4 relay anycast addresses * 198.18.0.0/15 - for use in benchmark tests * 192.0.2.0/24, 198.51.100.0/24, 203.0.113.0/24 - for use * in documentation and example code */ if ((ia & htonl(0xffffff00)) == htonl(0xc0000000) || (ia & htonl(0xffffff00)) == htonl(0xc0586300) || (ia & htonl(0xfffffe00)) == htonl(0xc6120000) || (ia & htonl(0xffffff00)) == htonl(0xc0000200) || (ia & htonl(0xfffffe00)) == htonl(0xc6336400) || (ia & htonl(0xffffff00)) == htonl(0xcb007100)) return (1); } return (0); } /* * Embed @ia IPv4 address into @ip6 IPv6 address. * Place to embedding determined from prefix length @plen. */ void nat64_embed_ip4(struct in6_addr *ip6, int plen, in_addr_t ia) { switch (plen) { case 32: case 96: ip6->s6_addr32[plen / 32] = ia; break; case 40: case 48: case 56: /* * Preserve prefix bits. * Since suffix bits should be zero and reserved for future * use, we just overwrite the whole word, where they are. */ ip6->s6_addr32[1] &= 0xffffffff << (32 - plen % 32); #if BYTE_ORDER == BIG_ENDIAN ip6->s6_addr32[1] |= ia >> (plen % 32); ip6->s6_addr32[2] = ia << (24 - plen % 32); #elif BYTE_ORDER == LITTLE_ENDIAN ip6->s6_addr32[1] |= ia << (plen % 32); ip6->s6_addr32[2] = ia >> (24 - plen % 32); #endif break; case 64: #if BYTE_ORDER == BIG_ENDIAN ip6->s6_addr32[2] = ia >> 8; ip6->s6_addr32[3] = ia << 24; #elif BYTE_ORDER == LITTLE_ENDIAN ip6->s6_addr32[2] = ia << 8; ip6->s6_addr32[3] = ia >> 24; #endif break; default: panic("Wrong plen: %d", plen); }; /* * Bits 64 to 71 of the address are reserved for compatibility * with the host identifier format defined in the IPv6 addressing * architecture [RFC4291]. These bits MUST be set to zero. */ ip6->s6_addr8[8] = 0; } in_addr_t nat64_extract_ip4(const struct in6_addr *ip6, int plen) { in_addr_t ia; /* * According to RFC 6052 p2.2: * IPv4-embedded IPv6 addresses are composed of a variable-length * prefix, the embedded IPv4 address, and a variable length suffix. * The suffix bits are reserved for future extensions and SHOULD * be set to zero. */ switch (plen) { case 32: if (ip6->s6_addr32[3] != 0 || ip6->s6_addr32[2] != 0) goto badip6; break; case 40: if (ip6->s6_addr32[3] != 0 || (ip6->s6_addr32[2] & htonl(0xff00ffff)) != 0) goto badip6; break; case 48: if (ip6->s6_addr32[3] != 0 || (ip6->s6_addr32[2] & htonl(0xff0000ff)) != 0) goto badip6; break; case 56: if (ip6->s6_addr32[3] != 0 || ip6->s6_addr8[8] != 0) goto badip6; break; case 64: if (ip6->s6_addr8[8] != 0 || (ip6->s6_addr32[3] & htonl(0x00ffffff)) != 0) goto badip6; }; switch (plen) { case 32: case 96: ia = ip6->s6_addr32[plen / 32]; break; case 40: case 48: case 56: #if BYTE_ORDER == BIG_ENDIAN ia = (ip6->s6_addr32[1] << (plen % 32)) | (ip6->s6_addr32[2] >> (24 - plen % 32)); #elif BYTE_ORDER == LITTLE_ENDIAN ia = (ip6->s6_addr32[1] >> (plen % 32)) | (ip6->s6_addr32[2] << (24 - plen % 32)); #endif break; case 64: #if BYTE_ORDER == BIG_ENDIAN ia = (ip6->s6_addr32[2] << 8) | (ip6->s6_addr32[3] >> 24); #elif BYTE_ORDER == LITTLE_ENDIAN ia = (ip6->s6_addr32[2] >> 8) | (ip6->s6_addr32[3] << 24); #endif break; default: return (0); }; if (nat64_check_ip4(ia) == 0) return (ia); DPRINTF(DP_GENERIC | DP_DROPS, "invalid destination address: %08x", ia); return (0); badip6: DPRINTF(DP_GENERIC | DP_DROPS, "invalid IPv4-embedded IPv6 address"); return (0); } /* * According to RFC 1624 the equation for incremental checksum update is: * HC' = ~(~HC + ~m + m') -- [Eqn. 3] * HC' = HC - ~m - m' -- [Eqn. 4] * So, when we are replacing IPv4 addresses to IPv6, we * can assume, that new bytes previously were zeros, and vise versa - * when we replacing IPv6 addresses to IPv4, now unused bytes become * zeros. The payload length in pseudo header has bigger size, but one * half of it should be zero. Using the equation 4 we get: * HC' = HC - (~m0 + m0') -- m0 is first changed word * HC' = (HC - (~m0 + m0')) - (~m1 + m1') -- m1 is second changed word * HC' = HC - ~m0 - m0' - ~m1 - m1' - ... = * = HC - sum(~m[i] + m'[i]) * * The function result should be used as follows: * IPv6 to IPv4: HC' = cksum_add(HC, result) * IPv4 to IPv6: HC' = cksum_add(HC, ~result) */ static uint16_t nat64_cksum_convert(struct ip6_hdr *ip6, struct ip *ip) { uint32_t sum; uint16_t *p; sum = ~ip->ip_src.s_addr >> 16; sum += ~ip->ip_src.s_addr & 0xffff; sum += ~ip->ip_dst.s_addr >> 16; sum += ~ip->ip_dst.s_addr & 0xffff; for (p = (uint16_t *)&ip6->ip6_src; p < (uint16_t *)(&ip6->ip6_src + 2); p++) sum += *p; while (sum >> 16) sum = (sum & 0xffff) + (sum >> 16); return (sum); } static void nat64_init_ip4hdr(const struct ip6_hdr *ip6, const struct ip6_frag *frag, uint16_t plen, uint8_t proto, struct ip *ip) { /* assume addresses are already initialized */ ip->ip_v = IPVERSION; ip->ip_hl = sizeof(*ip) >> 2; ip->ip_tos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; ip->ip_len = htons(sizeof(*ip) + plen); ip->ip_ttl = ip6->ip6_hlim; if (*V_nat64ip6stealth == 0) ip->ip_ttl -= IPV6_HLIMDEC; ip->ip_sum = 0; ip->ip_p = (proto == IPPROTO_ICMPV6) ? IPPROTO_ICMP: proto; - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); if (frag != NULL) { ip->ip_off = htons(ntohs(frag->ip6f_offlg) >> 3); if (frag->ip6f_offlg & IP6F_MORE_FRAG) ip->ip_off |= htons(IP_MF); } else { ip->ip_off = htons(IP_DF); } ip->ip_sum = in_cksum_hdr(ip); } #define FRAGSZ(mtu) ((mtu) - sizeof(struct ip6_hdr) - sizeof(struct ip6_frag)) static NAT64NOINLINE int nat64_fragment6(struct nat64_counters *stats, struct ip6_hdr *ip6, struct mbufq *mq, struct mbuf *m, uint32_t mtu, uint16_t ip_id, uint16_t ip_off) { struct ip6_frag ip6f; struct mbuf *n; uint16_t hlen, len, offset; int plen; plen = ntohs(ip6->ip6_plen); hlen = sizeof(struct ip6_hdr); /* Fragmentation isn't needed */ if (ip_off == 0 && plen <= mtu - hlen) { M_PREPEND(m, hlen, M_NOWAIT); if (m == NULL) { NAT64STAT_INC(stats, nomem); return (ENOMEM); } bcopy(ip6, mtod(m, void *), hlen); if (mbufq_enqueue(mq, m) != 0) { m_freem(m); NAT64STAT_INC(stats, dropped); DPRINTF(DP_DROPS, "dropped due to mbufq overflow"); return (ENOBUFS); } return (0); } hlen += sizeof(struct ip6_frag); ip6f.ip6f_reserved = 0; ip6f.ip6f_nxt = ip6->ip6_nxt; ip6->ip6_nxt = IPPROTO_FRAGMENT; if (ip_off != 0) { /* * We have got an IPv4 fragment. * Use offset value and ip_id from original fragment. */ ip6f.ip6f_ident = htonl(ntohs(ip_id)); offset = (ntohs(ip_off) & IP_OFFMASK) << 3; NAT64STAT_INC(stats, ifrags); } else { /* The packet size exceeds interface MTU */ ip6f.ip6f_ident = htonl(ip6_randomid()); offset = 0; /* First fragment*/ } while (plen > 0 && m != NULL) { n = NULL; len = FRAGSZ(mtu) & ~7; if (len > plen) len = plen; ip6->ip6_plen = htons(len + sizeof(ip6f)); ip6f.ip6f_offlg = ntohs(offset); if (len < plen || (ip_off & htons(IP_MF)) != 0) ip6f.ip6f_offlg |= IP6F_MORE_FRAG; offset += len; plen -= len; if (plen > 0) { n = m_split(m, len, M_NOWAIT); if (n == NULL) goto fail; } M_PREPEND(m, hlen, M_NOWAIT); if (m == NULL) goto fail; bcopy(ip6, mtod(m, void *), sizeof(struct ip6_hdr)); bcopy(&ip6f, mtodo(m, sizeof(struct ip6_hdr)), sizeof(struct ip6_frag)); if (mbufq_enqueue(mq, m) != 0) goto fail; m = n; } NAT64STAT_ADD(stats, ofrags, mbufq_len(mq)); return (0); fail: if (m != NULL) m_freem(m); if (n != NULL) m_freem(n); mbufq_drain(mq); NAT64STAT_INC(stats, nomem); return (ENOMEM); } static struct nhop_object * nat64_find_route6(struct sockaddr_in6 *dst, struct mbuf *m) { struct nhop_object *nh; NET_EPOCH_ASSERT(); nh = fib6_lookup(M_GETFIB(m), &dst->sin6_addr, 0, NHR_NONE, 0); if (nh == NULL) return (NULL); if (nh->nh_flags & (NHF_BLACKHOLE | NHF_REJECT)) return (NULL); dst->sin6_family = AF_INET6; dst->sin6_len = sizeof(*dst); if (nh->nh_flags & NHF_GATEWAY) dst->sin6_addr = nh->gw6_sa.sin6_addr; dst->sin6_port = 0; dst->sin6_scope_id = 0; dst->sin6_flowinfo = 0; return (nh); } #define NAT64_ICMP6_PLEN 64 static NAT64NOINLINE void nat64_icmp6_reflect(struct mbuf *m, uint8_t type, uint8_t code, uint32_t mtu, struct nat64_counters *stats, void *logdata) { struct icmp6_hdr *icmp6; struct ip6_hdr *ip6, *oip6; struct mbuf *n; int len, plen, proto; len = 0; proto = nat64_getlasthdr(m, &len); if (proto < 0) { DPRINTF(DP_DROPS, "mbuf isn't contigious"); goto freeit; } /* * Do not send ICMPv6 in reply to ICMPv6 errors. */ if (proto == IPPROTO_ICMPV6) { if (m->m_len < len + sizeof(*icmp6)) { DPRINTF(DP_DROPS, "mbuf isn't contigious"); goto freeit; } icmp6 = mtodo(m, len); if (icmp6->icmp6_type < ICMP6_ECHO_REQUEST || icmp6->icmp6_type == ND_REDIRECT) { DPRINTF(DP_DROPS, "do not send ICMPv6 in reply to " "ICMPv6 errors"); goto freeit; } /* * If there are extra headers between IPv6 and ICMPv6, * strip off them. */ if (len > sizeof(struct ip6_hdr)) { /* * NOTE: ipfw_chk already did m_pullup() and it is * expected that data is contigious from the start * of IPv6 header up to the end of ICMPv6 header. */ bcopy(mtod(m, caddr_t), mtodo(m, len - sizeof(struct ip6_hdr)), sizeof(struct ip6_hdr)); m_adj(m, len - sizeof(struct ip6_hdr)); } } /* if (icmp6_ratelimit(&ip6->ip6_src, type, code)) goto freeit; */ ip6 = mtod(m, struct ip6_hdr *); switch (type) { case ICMP6_DST_UNREACH: case ICMP6_PACKET_TOO_BIG: case ICMP6_TIME_EXCEEDED: case ICMP6_PARAM_PROB: break; default: goto freeit; } /* Calculate length of ICMPv6 payload */ len = (m->m_pkthdr.len > NAT64_ICMP6_PLEN) ? NAT64_ICMP6_PLEN: m->m_pkthdr.len; /* Create new ICMPv6 datagram */ plen = len + sizeof(struct icmp6_hdr); n = m_get2(sizeof(struct ip6_hdr) + plen + max_hdr, M_NOWAIT, MT_HEADER, M_PKTHDR); if (n == NULL) { NAT64STAT_INC(stats, nomem); m_freem(m); return; } /* * Move pkthdr from original mbuf. We should have initialized some * fields, because we can reinject this mbuf to netisr and it will * go through input path (it requires at least rcvif should be set). * Also do M_ALIGN() to reduce chances of need to allocate new mbuf * in the chain, when we will do M_PREPEND() or make some type of * tunneling. */ m_move_pkthdr(n, m); M_ALIGN(n, sizeof(struct ip6_hdr) + plen + max_hdr); n->m_len = n->m_pkthdr.len = sizeof(struct ip6_hdr) + plen; oip6 = mtod(n, struct ip6_hdr *); /* * Make IPv6 source address selection for reflected datagram. * nat64_check_ip6() doesn't allow scoped addresses, therefore * we use zero scopeid. */ if (in6_selectsrc_addr(M_GETFIB(n), &ip6->ip6_src, 0, n->m_pkthdr.rcvif, &oip6->ip6_src, NULL) != 0) { /* * Failed to find proper source address, drop the packet. */ m_freem(n); goto freeit; } oip6->ip6_dst = ip6->ip6_src; oip6->ip6_nxt = IPPROTO_ICMPV6; oip6->ip6_flow = 0; oip6->ip6_vfc |= IPV6_VERSION; oip6->ip6_hlim = V_ip6_defhlim; oip6->ip6_plen = htons(plen); icmp6 = mtodo(n, sizeof(struct ip6_hdr)); icmp6->icmp6_cksum = 0; icmp6->icmp6_type = type; icmp6->icmp6_code = code; icmp6->icmp6_mtu = htonl(mtu); m_copydata(m, 0, len, mtodo(n, sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr))); icmp6->icmp6_cksum = in6_cksum(n, IPPROTO_ICMPV6, sizeof(struct ip6_hdr), plen); m_freem(m); V_nat64out->output_one(n, stats, logdata); return; freeit: NAT64STAT_INC(stats, dropped); m_freem(m); } static struct nhop_object * nat64_find_route4(struct sockaddr_in *dst, struct mbuf *m) { struct nhop_object *nh; NET_EPOCH_ASSERT(); nh = fib4_lookup(M_GETFIB(m), dst->sin_addr, 0, NHR_NONE, 0); if (nh == NULL) return (NULL); if (nh->nh_flags & (NHF_BLACKHOLE | NHF_BROADCAST | NHF_REJECT)) return (NULL); dst->sin_family = AF_INET; dst->sin_len = sizeof(*dst); if (nh->nh_flags & NHF_GATEWAY) dst->sin_addr = nh->gw4_sa.sin_addr; dst->sin_port = 0; return (nh); } #define NAT64_ICMP_PLEN 64 static NAT64NOINLINE void nat64_icmp_reflect(struct mbuf *m, uint8_t type, uint8_t code, uint16_t mtu, struct nat64_counters *stats, void *logdata) { struct icmp *icmp; struct ip *ip, *oip; struct mbuf *n; int len, plen; ip = mtod(m, struct ip *); /* Do not send ICMP error if packet is not the first fragment */ if (ip->ip_off & ~ntohs(IP_MF|IP_DF)) { DPRINTF(DP_DROPS, "not first fragment"); goto freeit; } /* Do not send ICMP in reply to ICMP errors */ if (ip->ip_p == IPPROTO_ICMP) { if (m->m_len < (ip->ip_hl << 2)) { DPRINTF(DP_DROPS, "mbuf isn't contigious"); goto freeit; } icmp = mtodo(m, ip->ip_hl << 2); if (!ICMP_INFOTYPE(icmp->icmp_type)) { DPRINTF(DP_DROPS, "do not send ICMP in reply to " "ICMP errors"); goto freeit; } } switch (type) { case ICMP_UNREACH: case ICMP_TIMXCEED: case ICMP_PARAMPROB: break; default: goto freeit; } /* Calculate length of ICMP payload */ len = (m->m_pkthdr.len > NAT64_ICMP_PLEN) ? (ip->ip_hl << 2) + 8: m->m_pkthdr.len; /* Create new ICMPv4 datagram */ plen = len + sizeof(struct icmphdr) + sizeof(uint32_t); n = m_get2(sizeof(struct ip) + plen + max_hdr, M_NOWAIT, MT_HEADER, M_PKTHDR); if (n == NULL) { NAT64STAT_INC(stats, nomem); m_freem(m); return; } m_move_pkthdr(n, m); M_ALIGN(n, sizeof(struct ip) + plen + max_hdr); n->m_len = n->m_pkthdr.len = sizeof(struct ip) + plen; oip = mtod(n, struct ip *); oip->ip_v = IPVERSION; oip->ip_hl = sizeof(struct ip) >> 2; oip->ip_tos = 0; oip->ip_len = htons(n->m_pkthdr.len); oip->ip_ttl = V_ip_defttl; oip->ip_p = IPPROTO_ICMP; - ip_fillid(oip); + ip_fillid(oip, V_ip_random_id); oip->ip_off = htons(IP_DF); oip->ip_src = ip->ip_dst; oip->ip_dst = ip->ip_src; oip->ip_sum = 0; oip->ip_sum = in_cksum_hdr(oip); icmp = mtodo(n, sizeof(struct ip)); icmp->icmp_type = type; icmp->icmp_code = code; icmp->icmp_cksum = 0; icmp->icmp_pmvoid = 0; icmp->icmp_nextmtu = htons(mtu); m_copydata(m, 0, len, mtodo(n, sizeof(struct ip) + sizeof(struct icmphdr) + sizeof(uint32_t))); icmp->icmp_cksum = in_cksum_skip(n, sizeof(struct ip) + plen, sizeof(struct ip)); m_freem(m); V_nat64out->output_one(n, stats, logdata); return; freeit: NAT64STAT_INC(stats, dropped); m_freem(m); } /* Translate ICMP echo request/reply into ICMPv6 */ static void nat64_icmp_handle_echo(struct ip6_hdr *ip6, struct icmp6_hdr *icmp6, uint16_t id, uint8_t type) { uint16_t old; old = *(uint16_t *)icmp6; /* save type+code in one word */ icmp6->icmp6_type = type; /* Reflect ICMPv6 -> ICMPv4 type translation in the cksum */ icmp6->icmp6_cksum = cksum_adjust(icmp6->icmp6_cksum, old, *(uint16_t *)icmp6); if (id != 0) { old = icmp6->icmp6_id; icmp6->icmp6_id = id; /* Reflect ICMP id translation in the cksum */ icmp6->icmp6_cksum = cksum_adjust(icmp6->icmp6_cksum, old, id); } /* Reflect IPv6 pseudo header in the cksum */ icmp6->icmp6_cksum = ~in6_cksum_pseudo(ip6, ntohs(ip6->ip6_plen), IPPROTO_ICMPV6, ~icmp6->icmp6_cksum); } static NAT64NOINLINE struct mbuf * nat64_icmp_translate(struct mbuf *m, struct ip6_hdr *ip6, uint16_t icmpid, int offset, struct nat64_config *cfg) { struct ip ip; struct icmp *icmp; struct tcphdr *tcp; struct udphdr *udp; struct ip6_hdr *eip6; struct mbuf *n; uint32_t mtu; int len, hlen, plen; uint8_t type, code; if (m->m_len < offset + ICMP_MINLEN) m = m_pullup(m, offset + ICMP_MINLEN); if (m == NULL) { NAT64STAT_INC(&cfg->stats, nomem); return (m); } mtu = 0; icmp = mtodo(m, offset); /* RFC 7915 p4.2 */ switch (icmp->icmp_type) { case ICMP_ECHOREPLY: type = ICMP6_ECHO_REPLY; code = 0; break; case ICMP_UNREACH: type = ICMP6_DST_UNREACH; switch (icmp->icmp_code) { case ICMP_UNREACH_NET: case ICMP_UNREACH_HOST: case ICMP_UNREACH_SRCFAIL: case ICMP_UNREACH_NET_UNKNOWN: case ICMP_UNREACH_HOST_UNKNOWN: case ICMP_UNREACH_TOSNET: case ICMP_UNREACH_TOSHOST: code = ICMP6_DST_UNREACH_NOROUTE; break; case ICMP_UNREACH_PROTOCOL: type = ICMP6_PARAM_PROB; code = ICMP6_PARAMPROB_NEXTHEADER; break; case ICMP_UNREACH_PORT: code = ICMP6_DST_UNREACH_NOPORT; break; case ICMP_UNREACH_NEEDFRAG: type = ICMP6_PACKET_TOO_BIG; code = 0; /* XXX: needs an additional look */ mtu = max(IPV6_MMTU, ntohs(icmp->icmp_nextmtu) + 20); break; case ICMP_UNREACH_NET_PROHIB: case ICMP_UNREACH_HOST_PROHIB: case ICMP_UNREACH_FILTER_PROHIB: case ICMP_UNREACH_PRECEDENCE_CUTOFF: code = ICMP6_DST_UNREACH_ADMIN; break; default: DPRINTF(DP_DROPS, "Unsupported ICMP type %d, code %d", icmp->icmp_type, icmp->icmp_code); goto freeit; } break; case ICMP_TIMXCEED: type = ICMP6_TIME_EXCEEDED; code = icmp->icmp_code; break; case ICMP_ECHO: type = ICMP6_ECHO_REQUEST; code = 0; break; case ICMP_PARAMPROB: type = ICMP6_PARAM_PROB; switch (icmp->icmp_code) { case ICMP_PARAMPROB_ERRATPTR: case ICMP_PARAMPROB_LENGTH: code = ICMP6_PARAMPROB_HEADER; switch (icmp->icmp_pptr) { case 0: /* Version/IHL */ case 1: /* Type Of Service */ mtu = icmp->icmp_pptr; break; case 2: /* Total Length */ case 3: mtu = 4; /* Payload Length */ break; case 8: /* Time to Live */ mtu = 7; /* Hop Limit */ break; case 9: /* Protocol */ mtu = 6; /* Next Header */ break; case 12: /* Source address */ case 13: case 14: case 15: mtu = 8; break; case 16: /* Destination address */ case 17: case 18: case 19: mtu = 24; break; default: /* Silently drop */ DPRINTF(DP_DROPS, "Unsupported ICMP type %d," " code %d, pptr %d", icmp->icmp_type, icmp->icmp_code, icmp->icmp_pptr); goto freeit; } break; default: DPRINTF(DP_DROPS, "Unsupported ICMP type %d," " code %d, pptr %d", icmp->icmp_type, icmp->icmp_code, icmp->icmp_pptr); goto freeit; } break; default: DPRINTF(DP_DROPS, "Unsupported ICMP type %d, code %d", icmp->icmp_type, icmp->icmp_code); goto freeit; } /* * For echo request/reply we can use original payload, * but we need adjust icmp_cksum, because ICMPv6 cksum covers * IPv6 pseudo header and ICMPv6 types differs from ICMPv4. */ if (type == ICMP6_ECHO_REQUEST || type == ICMP6_ECHO_REPLY) { nat64_icmp_handle_echo(ip6, ICMP6(icmp), icmpid, type); return (m); } /* * For other types of ICMP messages we need to translate inner * IPv4 header to IPv6 header. * Assume ICMP src is the same as payload dst * E.g. we have ( GWsrc1 , NATIP1 ) in outer header * and ( NATIP1, Hostdst1 ) in ICMP copy header. * In that case, we already have map for NATIP1 and GWsrc1. * The only thing we need is to copy IPv6 map prefix to * Hostdst1. */ hlen = offset + ICMP_MINLEN; if (m->m_pkthdr.len < hlen + sizeof(struct ip) + ICMP_MINLEN) { DPRINTF(DP_DROPS, "Message is too short %d", m->m_pkthdr.len); goto freeit; } m_copydata(m, hlen, sizeof(struct ip), (char *)&ip); if (ip.ip_v != IPVERSION) { DPRINTF(DP_DROPS, "Wrong IP version %d", ip.ip_v); goto freeit; } hlen += ip.ip_hl << 2; /* Skip inner IP header */ if (nat64_check_ip4(ip.ip_src.s_addr) != 0 || nat64_check_ip4(ip.ip_dst.s_addr) != 0 || nat64_check_private_ip4(cfg, ip.ip_src.s_addr) != 0 || nat64_check_private_ip4(cfg, ip.ip_dst.s_addr) != 0) { DPRINTF(DP_DROPS, "IP addresses checks failed %04x -> %04x", ntohl(ip.ip_src.s_addr), ntohl(ip.ip_dst.s_addr)); goto freeit; } if (m->m_pkthdr.len < hlen + ICMP_MINLEN) { DPRINTF(DP_DROPS, "Message is too short %d", m->m_pkthdr.len); goto freeit; } #if 0 /* * Check that inner source matches the outer destination. * XXX: We need some method to convert IPv4 into IPv6 address here, * and compare IPv6 addresses. */ if (ip.ip_src.s_addr != nat64_get_ip4(&ip6->ip6_dst)) { DPRINTF(DP_GENERIC, "Inner source doesn't match destination ", "%04x vs %04x", ip.ip_src.s_addr, nat64_get_ip4(&ip6->ip6_dst)); goto freeit; } #endif /* * Create new mbuf for ICMPv6 datagram. * NOTE: len is data length just after inner IP header. */ len = m->m_pkthdr.len - hlen; if (sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr) + len > NAT64_ICMP6_PLEN) len = NAT64_ICMP6_PLEN - sizeof(struct icmp6_hdr) - sizeof(struct ip6_hdr); plen = sizeof(struct icmp6_hdr) + sizeof(struct ip6_hdr) + len; n = m_get2(offset + plen + max_hdr, M_NOWAIT, MT_HEADER, M_PKTHDR); if (n == NULL) { NAT64STAT_INC(&cfg->stats, nomem); m_freem(m); return (NULL); } m_move_pkthdr(n, m); M_ALIGN(n, offset + plen + max_hdr); n->m_len = n->m_pkthdr.len = offset + plen; /* Adjust ip6_plen in outer header */ ip6->ip6_plen = htons(plen); /* Construct new inner IPv6 header */ eip6 = mtodo(n, offset + sizeof(struct icmp6_hdr)); eip6->ip6_src = ip6->ip6_dst; /* Use the same prefix that we have in outer header */ eip6->ip6_dst = ip6->ip6_src; MPASS(cfg->flags & NAT64_PLATPFX); nat64_embed_ip4(&eip6->ip6_dst, cfg->plat_plen, ip.ip_dst.s_addr); eip6->ip6_flow = htonl(ip.ip_tos << 20); eip6->ip6_vfc |= IPV6_VERSION; eip6->ip6_hlim = ip.ip_ttl; eip6->ip6_plen = htons(ntohs(ip.ip_len) - (ip.ip_hl << 2)); eip6->ip6_nxt = (ip.ip_p == IPPROTO_ICMP) ? IPPROTO_ICMPV6: ip.ip_p; m_copydata(m, hlen, len, (char *)(eip6 + 1)); /* * We need to translate source port in the inner ULP header, * and adjust ULP checksum. */ switch (ip.ip_p) { case IPPROTO_TCP: if (len < offsetof(struct tcphdr, th_sum)) break; tcp = TCP(eip6 + 1); if (icmpid != 0) { tcp->th_sum = cksum_adjust(tcp->th_sum, tcp->th_sport, icmpid); tcp->th_sport = icmpid; } tcp->th_sum = cksum_add(tcp->th_sum, ~nat64_cksum_convert(eip6, &ip)); break; case IPPROTO_UDP: if (len < offsetof(struct udphdr, uh_sum)) break; udp = UDP(eip6 + 1); if (icmpid != 0) { udp->uh_sum = cksum_adjust(udp->uh_sum, udp->uh_sport, icmpid); udp->uh_sport = icmpid; } udp->uh_sum = cksum_add(udp->uh_sum, ~nat64_cksum_convert(eip6, &ip)); break; case IPPROTO_ICMP: /* * Check if this is an ICMP error message for echo request * that we sent. I.e. ULP in the data containing invoking * packet is IPPROTO_ICMP and its type is ICMP_ECHO. */ icmp = (struct icmp *)(eip6 + 1); if (icmp->icmp_type != ICMP_ECHO) { m_freem(n); goto freeit; } /* * For our client this original datagram should looks * like it was ICMPv6 datagram with type ICMP6_ECHO_REQUEST. * Thus we need adjust icmp_cksum and convert type from * ICMP_ECHO to ICMP6_ECHO_REQUEST. */ nat64_icmp_handle_echo(eip6, ICMP6(icmp), icmpid, ICMP6_ECHO_REQUEST); } m_freem(m); /* Convert ICMPv4 into ICMPv6 header */ icmp = mtodo(n, offset); ICMP6(icmp)->icmp6_type = type; ICMP6(icmp)->icmp6_code = code; ICMP6(icmp)->icmp6_mtu = htonl(mtu); ICMP6(icmp)->icmp6_cksum = 0; ICMP6(icmp)->icmp6_cksum = cksum_add( ~in6_cksum_pseudo(ip6, plen, IPPROTO_ICMPV6, 0), in_cksum_skip(n, n->m_pkthdr.len, offset)); return (n); freeit: m_freem(m); NAT64STAT_INC(&cfg->stats, dropped); return (NULL); } int nat64_getlasthdr(struct mbuf *m, int *offset) { struct ip6_hdr *ip6; struct ip6_hbh *hbh; int proto, hlen; if (offset != NULL) hlen = *offset; else hlen = 0; if (m->m_len < hlen + sizeof(*ip6)) return (-1); ip6 = mtodo(m, hlen); hlen += sizeof(*ip6); proto = ip6->ip6_nxt; /* Skip extension headers */ while (proto == IPPROTO_HOPOPTS || proto == IPPROTO_ROUTING || proto == IPPROTO_DSTOPTS) { hbh = mtodo(m, hlen); /* * We expect mbuf has contigious data up to * upper level header. */ if (m->m_len < hlen) return (-1); /* * We doesn't support Jumbo payload option, * so return error. */ if (proto == IPPROTO_HOPOPTS && ip6->ip6_plen == 0) return (-1); proto = hbh->ip6h_nxt; hlen += (hbh->ip6h_len + 1) << 3; } if (offset != NULL) *offset = hlen; return (proto); } int nat64_do_handle_ip4(struct mbuf *m, struct in6_addr *saddr, struct in6_addr *daddr, uint16_t lport, struct nat64_config *cfg, void *logdata) { struct nhop_object *nh; struct ip6_hdr ip6; struct sockaddr_in6 dst; struct ip *ip; struct mbufq mq; uint16_t ip_id, ip_off; uint16_t *csum; int plen, hlen; uint8_t proto; ip = mtod(m, struct ip*); if (*V_nat64ipstealth == 0 && ip->ip_ttl <= IPTTLDEC) { nat64_icmp_reflect(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, &cfg->stats, logdata); return (NAT64RETURN); } ip6.ip6_dst = *daddr; ip6.ip6_src = *saddr; hlen = ip->ip_hl << 2; plen = ntohs(ip->ip_len) - hlen; proto = ip->ip_p; /* Save ip_id and ip_off, both are in network byte order */ ip_id = ip->ip_id; ip_off = ip->ip_off & htons(IP_OFFMASK | IP_MF); /* Fragment length must be multiple of 8 octets */ if ((ip->ip_off & htons(IP_MF)) != 0 && (plen & 0x7) != 0) { nat64_icmp_reflect(m, ICMP_PARAMPROB, ICMP_PARAMPROB_LENGTH, 0, &cfg->stats, logdata); return (NAT64RETURN); } /* Fragmented ICMP is unsupported */ if (proto == IPPROTO_ICMP && ip_off != 0) { DPRINTF(DP_DROPS, "dropped due to fragmented ICMP"); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } dst.sin6_addr = ip6.ip6_dst; nh = nat64_find_route6(&dst, m); if (nh == NULL) { NAT64STAT_INC(&cfg->stats, noroute6); nat64_icmp_reflect(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, &cfg->stats, logdata); return (NAT64RETURN); } if (nh->nh_mtu < plen + sizeof(ip6) && (ip->ip_off & htons(IP_DF)) != 0) { nat64_icmp_reflect(m, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, FRAGSZ(nh->nh_mtu) + sizeof(struct ip), &cfg->stats, logdata); return (NAT64RETURN); } ip6.ip6_flow = htonl(ip->ip_tos << 20); ip6.ip6_vfc |= IPV6_VERSION; ip6.ip6_hlim = ip->ip_ttl; if (*V_nat64ipstealth == 0) ip6.ip6_hlim -= IPTTLDEC; ip6.ip6_plen = htons(plen); ip6.ip6_nxt = (proto == IPPROTO_ICMP) ? IPPROTO_ICMPV6: proto; /* Handle delayed checksums if needed. */ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { in_delayed_cksum(m); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } /* Convert checksums. */ switch (proto) { case IPPROTO_TCP: csum = &TCP(mtodo(m, hlen))->th_sum; if (lport != 0) { struct tcphdr *tcp = TCP(mtodo(m, hlen)); *csum = cksum_adjust(*csum, tcp->th_dport, lport); tcp->th_dport = lport; } *csum = cksum_add(*csum, ~nat64_cksum_convert(&ip6, ip)); break; case IPPROTO_UDP: csum = &UDP(mtodo(m, hlen))->uh_sum; if (lport != 0) { struct udphdr *udp = UDP(mtodo(m, hlen)); *csum = cksum_adjust(*csum, udp->uh_dport, lport); udp->uh_dport = lport; } *csum = cksum_add(*csum, ~nat64_cksum_convert(&ip6, ip)); break; case IPPROTO_ICMP: m = nat64_icmp_translate(m, &ip6, lport, hlen, cfg); if (m == NULL) /* stats already accounted */ return (NAT64RETURN); } m_adj(m, hlen); mbufq_init(&mq, 255); nat64_fragment6(&cfg->stats, &ip6, &mq, m, nh->nh_mtu, ip_id, ip_off); while ((m = mbufq_dequeue(&mq)) != NULL) { if (V_nat64out->output(nh->nh_ifp, m, (struct sockaddr *)&dst, &cfg->stats, logdata) != 0) break; NAT64STAT_INC(&cfg->stats, opcnt46); } mbufq_drain(&mq); return (NAT64RETURN); } int nat64_handle_icmp6(struct mbuf *m, int hlen, uint32_t aaddr, uint16_t aport, struct nat64_config *cfg, void *logdata) { struct ip ip; struct icmp6_hdr *icmp6; struct ip6_frag *ip6f; struct ip6_hdr *ip6, *ip6i; uint32_t mtu; int plen, proto; uint8_t type, code; if (hlen == 0) { ip6 = mtod(m, struct ip6_hdr *); if (nat64_check_ip6(&ip6->ip6_src) != 0 || nat64_check_ip6(&ip6->ip6_dst) != 0) return (NAT64SKIP); proto = nat64_getlasthdr(m, &hlen); if (proto != IPPROTO_ICMPV6) { DPRINTF(DP_DROPS, "dropped due to mbuf isn't contigious"); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } } /* * Translate ICMPv6 type and code to ICMPv4 (RFC7915). * NOTE: ICMPv6 echo handled by nat64_do_handle_ip6(). */ icmp6 = mtodo(m, hlen); mtu = 0; switch (icmp6->icmp6_type) { case ICMP6_DST_UNREACH: type = ICMP_UNREACH; switch (icmp6->icmp6_code) { case ICMP6_DST_UNREACH_NOROUTE: case ICMP6_DST_UNREACH_BEYONDSCOPE: case ICMP6_DST_UNREACH_ADDR: code = ICMP_UNREACH_HOST; break; case ICMP6_DST_UNREACH_ADMIN: code = ICMP_UNREACH_HOST_PROHIB; break; case ICMP6_DST_UNREACH_NOPORT: code = ICMP_UNREACH_PORT; break; default: DPRINTF(DP_DROPS, "Unsupported ICMPv6 type %d," " code %d", icmp6->icmp6_type, icmp6->icmp6_code); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } break; case ICMP6_PACKET_TOO_BIG: type = ICMP_UNREACH; code = ICMP_UNREACH_NEEDFRAG; mtu = ntohl(icmp6->icmp6_mtu); if (mtu < IPV6_MMTU) { DPRINTF(DP_DROPS, "Wrong MTU %d in ICMPv6 type %d," " code %d", mtu, icmp6->icmp6_type, icmp6->icmp6_code); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } /* * Adjust MTU to reflect difference between * IPv6 an IPv4 headers. */ mtu -= sizeof(struct ip6_hdr) - sizeof(struct ip); break; case ICMP6_TIME_EXCEEDED: type = ICMP_TIMXCEED; code = icmp6->icmp6_code; break; case ICMP6_PARAM_PROB: switch (icmp6->icmp6_code) { case ICMP6_PARAMPROB_HEADER: type = ICMP_PARAMPROB; code = ICMP_PARAMPROB_ERRATPTR; mtu = ntohl(icmp6->icmp6_pptr); switch (mtu) { case 0: /* Version/Traffic Class */ case 1: /* Traffic Class/Flow Label */ break; case 4: /* Payload Length */ case 5: mtu = 2; break; case 6: /* Next Header */ mtu = 9; break; case 7: /* Hop Limit */ mtu = 8; break; default: if (mtu >= 8 && mtu <= 23) { mtu = 12; /* Source address */ break; } if (mtu >= 24 && mtu <= 39) { mtu = 16; /* Destination address */ break; } DPRINTF(DP_DROPS, "Unsupported ICMPv6 type %d," " code %d, pptr %d", icmp6->icmp6_type, icmp6->icmp6_code, mtu); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } case ICMP6_PARAMPROB_NEXTHEADER: type = ICMP_UNREACH; code = ICMP_UNREACH_PROTOCOL; break; default: DPRINTF(DP_DROPS, "Unsupported ICMPv6 type %d," " code %d, pptr %d", icmp6->icmp6_type, icmp6->icmp6_code, ntohl(icmp6->icmp6_pptr)); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } break; default: DPRINTF(DP_DROPS, "Unsupported ICMPv6 type %d, code %d", icmp6->icmp6_type, icmp6->icmp6_code); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } hlen += sizeof(struct icmp6_hdr); if (m->m_pkthdr.len < hlen + sizeof(struct ip6_hdr) + ICMP_MINLEN) { NAT64STAT_INC(&cfg->stats, dropped); DPRINTF(DP_DROPS, "Message is too short %d", m->m_pkthdr.len); return (NAT64MFREE); } /* * We need at least ICMP_MINLEN bytes of original datagram payload * to generate ICMP message. It is nice that ICMP_MINLEN is equal * to sizeof(struct ip6_frag). So, if embedded datagram had a fragment * header we will not have to do m_pullup() again. * * What we have here: * Outer header: (IPv6iGW, v4mapPRefix+v4exthost) * Inner header: (v4mapPRefix+v4host, IPv6iHost) [sport, dport] * We need to translate it to: * * Outer header: (alias_host, v4exthost) * Inner header: (v4exthost, alias_host) [sport, alias_port] * * Assume caller function has checked if v4mapPRefix+v4host * matches configured prefix. * The only two things we should be provided with are mapping between * IPv6iHost <> alias_host and between dport and alias_port. */ if (m->m_len < hlen + sizeof(struct ip6_hdr) + ICMP_MINLEN) m = m_pullup(m, hlen + sizeof(struct ip6_hdr) + ICMP_MINLEN); if (m == NULL) { NAT64STAT_INC(&cfg->stats, nomem); return (NAT64RETURN); } ip6 = mtod(m, struct ip6_hdr *); ip6i = mtodo(m, hlen); ip6f = NULL; proto = ip6i->ip6_nxt; plen = ntohs(ip6i->ip6_plen); hlen += sizeof(struct ip6_hdr); if (proto == IPPROTO_FRAGMENT) { if (m->m_pkthdr.len < hlen + sizeof(struct ip6_frag) + ICMP_MINLEN) goto fail; ip6f = mtodo(m, hlen); proto = ip6f->ip6f_nxt; plen -= sizeof(struct ip6_frag); hlen += sizeof(struct ip6_frag); /* Ajust MTU to reflect frag header size */ if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG) mtu -= sizeof(struct ip6_frag); } if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) { DPRINTF(DP_DROPS, "Unsupported proto %d in the inner header", proto); goto fail; } if (nat64_check_ip6(&ip6i->ip6_src) != 0 || nat64_check_ip6(&ip6i->ip6_dst) != 0) { DPRINTF(DP_DROPS, "Inner addresses do not passes the check"); goto fail; } /* Check if outer dst is the same as inner src */ if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6i->ip6_src)) { DPRINTF(DP_DROPS, "Inner src doesn't match outer dst"); goto fail; } /* Now we need to make a fake IPv4 packet to generate ICMP message */ ip.ip_dst.s_addr = aaddr; ip.ip_src.s_addr = nat64_extract_ip4(&ip6i->ip6_src, cfg->plat_plen); if (ip.ip_src.s_addr == 0) goto fail; /* XXX: Make fake ulp header */ if (V_nat64out == &nat64_direct) /* init_ip4hdr will decrement it */ ip6i->ip6_hlim += IPV6_HLIMDEC; nat64_init_ip4hdr(ip6i, ip6f, plen, proto, &ip); m_adj(m, hlen - sizeof(struct ip)); bcopy(&ip, mtod(m, void *), sizeof(ip)); nat64_icmp_reflect(m, type, code, (uint16_t)mtu, &cfg->stats, logdata); return (NAT64RETURN); fail: /* * We must call m_freem() because mbuf pointer could be * changed with m_pullup(). */ m_freem(m); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64RETURN); } int nat64_do_handle_ip6(struct mbuf *m, uint32_t aaddr, uint16_t aport, struct nat64_config *cfg, void *logdata) { struct ip ip; struct nhop_object *nh; struct sockaddr_in dst; struct ip6_frag *frag; struct ip6_hdr *ip6; struct icmp6_hdr *icmp6; uint16_t *csum; int plen, hlen, proto; /* * XXX: we expect ipfw_chk() did m_pullup() up to upper level * protocol's headers. Also we skip some checks, that ip6_input(), * ip6_forward(), ip6_fastfwd() and ipfw_chk() already did. */ ip6 = mtod(m, struct ip6_hdr *); if (nat64_check_ip6(&ip6->ip6_src) != 0 || nat64_check_ip6(&ip6->ip6_dst) != 0) { return (NAT64SKIP); } /* Starting from this point we must not return zero */ ip.ip_src.s_addr = aaddr; if (nat64_check_ip4(ip.ip_src.s_addr) != 0) { DPRINTF(DP_GENERIC | DP_DROPS, "invalid source address: %08x", ip.ip_src.s_addr); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } ip.ip_dst.s_addr = nat64_extract_ip4(&ip6->ip6_dst, cfg->plat_plen); if (ip.ip_dst.s_addr == 0) { NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } if (*V_nat64ip6stealth == 0 && ip6->ip6_hlim <= IPV6_HLIMDEC) { nat64_icmp6_reflect(m, ICMP6_TIME_EXCEEDED, ICMP6_TIME_EXCEED_TRANSIT, 0, &cfg->stats, logdata); return (NAT64RETURN); } hlen = 0; plen = ntohs(ip6->ip6_plen); proto = nat64_getlasthdr(m, &hlen); if (proto < 0) { DPRINTF(DP_DROPS, "dropped due to mbuf isn't contigious"); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } frag = NULL; if (proto == IPPROTO_FRAGMENT) { /* ipfw_chk should m_pullup up to frag header */ if (m->m_len < hlen + sizeof(*frag)) { DPRINTF(DP_DROPS, "dropped due to mbuf isn't contigious"); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } frag = mtodo(m, hlen); proto = frag->ip6f_nxt; hlen += sizeof(*frag); /* Fragmented ICMPv6 is unsupported */ if (proto == IPPROTO_ICMPV6) { DPRINTF(DP_DROPS, "dropped due to fragmented ICMPv6"); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } /* Fragment length must be multiple of 8 octets */ if ((frag->ip6f_offlg & IP6F_MORE_FRAG) != 0 && ((plen + sizeof(struct ip6_hdr) - hlen) & 0x7) != 0) { nat64_icmp6_reflect(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offsetof(struct ip6_hdr, ip6_plen), &cfg->stats, logdata); return (NAT64RETURN); } } plen -= hlen - sizeof(struct ip6_hdr); if (plen < 0 || m->m_pkthdr.len < plen + hlen) { DPRINTF(DP_DROPS, "plen %d, pkthdr.len %d, hlen %d", plen, m->m_pkthdr.len, hlen); NAT64STAT_INC(&cfg->stats, dropped); return (NAT64MFREE); } icmp6 = NULL; /* Make gcc happy */ if (proto == IPPROTO_ICMPV6) { icmp6 = mtodo(m, hlen); if (icmp6->icmp6_type != ICMP6_ECHO_REQUEST && icmp6->icmp6_type != ICMP6_ECHO_REPLY) return (nat64_handle_icmp6(m, hlen, aaddr, aport, cfg, logdata)); } dst.sin_addr.s_addr = ip.ip_dst.s_addr; nh = nat64_find_route4(&dst, m); if (nh == NULL) { NAT64STAT_INC(&cfg->stats, noroute4); nat64_icmp6_reflect(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOROUTE, 0, &cfg->stats, logdata); return (NAT64RETURN); } if (nh->nh_mtu < plen + sizeof(ip)) { nat64_icmp6_reflect(m, ICMP6_PACKET_TOO_BIG, 0, nh->nh_mtu, &cfg->stats, logdata); return (NAT64RETURN); } nat64_init_ip4hdr(ip6, frag, plen, proto, &ip); /* Handle delayed checksums if needed. */ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6) { in6_delayed_cksum(m, plen, hlen); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6; } /* Convert checksums. */ switch (proto) { case IPPROTO_TCP: csum = &TCP(mtodo(m, hlen))->th_sum; if (aport != 0) { struct tcphdr *tcp = TCP(mtodo(m, hlen)); *csum = cksum_adjust(*csum, tcp->th_sport, aport); tcp->th_sport = aport; } *csum = cksum_add(*csum, nat64_cksum_convert(ip6, &ip)); break; case IPPROTO_UDP: csum = &UDP(mtodo(m, hlen))->uh_sum; if (aport != 0) { struct udphdr *udp = UDP(mtodo(m, hlen)); *csum = cksum_adjust(*csum, udp->uh_sport, aport); udp->uh_sport = aport; } *csum = cksum_add(*csum, nat64_cksum_convert(ip6, &ip)); break; case IPPROTO_ICMPV6: /* Checksum in ICMPv6 covers pseudo header */ csum = &icmp6->icmp6_cksum; *csum = cksum_add(*csum, in6_cksum_pseudo(ip6, plen, IPPROTO_ICMPV6, 0)); /* Convert ICMPv6 types to ICMP */ proto = *(uint16_t *)icmp6; /* save old word for cksum_adjust */ if (icmp6->icmp6_type == ICMP6_ECHO_REQUEST) icmp6->icmp6_type = ICMP_ECHO; else /* ICMP6_ECHO_REPLY */ icmp6->icmp6_type = ICMP_ECHOREPLY; *csum = cksum_adjust(*csum, (uint16_t)proto, *(uint16_t *)icmp6); if (aport != 0) { uint16_t old_id = icmp6->icmp6_id; icmp6->icmp6_id = aport; *csum = cksum_adjust(*csum, old_id, aport); } break; }; m_adj(m, hlen - sizeof(ip)); bcopy(&ip, mtod(m, void *), sizeof(ip)); if (V_nat64out->output(nh->nh_ifp, m, (struct sockaddr *)&dst, &cfg->stats, logdata) == 0) NAT64STAT_INC(&cfg->stats, opcnt64); return (NAT64RETURN); } diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c index 98a2367b79b0..b2aaf3add25c 100644 --- a/sys/netpfil/pf/if_pfsync.c +++ b/sys/netpfil/pf/if_pfsync.c @@ -1,3281 +1,3281 @@ /*- * SPDX-License-Identifier: (BSD-2-Clause AND ISC) * * Copyright (c) 2002 Michael Shalayeff * Copyright (c) 2012 Gleb Smirnoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2009 David Gwynne * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ * * Revisions picked from OpenBSD after revision 1.110 import: * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input() * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates * 1.120, 1.175 - use monotonic time_uptime * 1.122 - reduce number of updates for non-TCP sessions * 1.125, 1.127 - rewrite merge or stale processing * 1.128 - cleanups * 1.146 - bzero() mbuf before sparsely filling it with data * 1.170 - SIOCSIFMTU checks * 1.126, 1.142 - deferred packets processing * 1.173 - correct expire time processing */ #include #include "opt_inet.h" #include "opt_inet6.h" #include "opt_pf.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x struct pfsync_bucket; struct pfsync_softc; union inet_template { struct ip ipv4; struct ip6_hdr ipv6; }; #define PFSYNC_MINPKT ( \ sizeof(union inet_template) + \ sizeof(struct pfsync_header) + \ sizeof(struct pfsync_subheader) ) static int pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *, struct pfsync_state_peer *); static int pfsync_in_clr(struct mbuf *, int, int, int, int); static int pfsync_in_ins(struct mbuf *, int, int, int, int); static int pfsync_in_iack(struct mbuf *, int, int, int, int); static int pfsync_in_upd(struct mbuf *, int, int, int, int); static int pfsync_in_upd_c(struct mbuf *, int, int, int, int); static int pfsync_in_ureq(struct mbuf *, int, int, int, int); static int pfsync_in_del_c(struct mbuf *, int, int, int, int); static int pfsync_in_bus(struct mbuf *, int, int, int, int); static int pfsync_in_tdb(struct mbuf *, int, int, int, int); static int pfsync_in_eof(struct mbuf *, int, int, int, int); static int pfsync_in_error(struct mbuf *, int, int, int, int); static int (*pfsync_acts[])(struct mbuf *, int, int, int, int) = { pfsync_in_clr, /* PFSYNC_ACT_CLR */ pfsync_in_ins, /* PFSYNC_ACT_INS_1301 */ pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */ pfsync_in_upd, /* PFSYNC_ACT_UPD_1301 */ pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */ pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */ pfsync_in_error, /* PFSYNC_ACT_DEL */ pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */ pfsync_in_error, /* PFSYNC_ACT_INS_F */ pfsync_in_error, /* PFSYNC_ACT_DEL_F */ pfsync_in_bus, /* PFSYNC_ACT_BUS */ pfsync_in_tdb, /* PFSYNC_ACT_TDB */ pfsync_in_eof, /* PFSYNC_ACT_EOF */ pfsync_in_ins, /* PFSYNC_ACT_INS_1400 */ pfsync_in_upd, /* PFSYNC_ACT_UPD_1400 */ }; struct pfsync_q { void (*write)(struct pf_kstate *, void *); size_t len; u_int8_t action; }; /* We have the following sync queues */ enum pfsync_q_id { PFSYNC_Q_INS_1301, PFSYNC_Q_INS_1400, PFSYNC_Q_IACK, PFSYNC_Q_UPD_1301, PFSYNC_Q_UPD_1400, PFSYNC_Q_UPD_C, PFSYNC_Q_DEL_C, PFSYNC_Q_COUNT, }; /* Functions for building messages for given queue */ static void pfsync_out_state_1301(struct pf_kstate *, void *); static void pfsync_out_state_1400(struct pf_kstate *, void *); static void pfsync_out_iack(struct pf_kstate *, void *); static void pfsync_out_upd_c(struct pf_kstate *, void *); static void pfsync_out_del_c(struct pf_kstate *, void *); /* Attach those functions to queue */ static struct pfsync_q pfsync_qs[] = { { pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_INS_1301 }, { pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_INS_1400 }, { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK }, { pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_UPD_1301 }, { pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_UPD_1400 }, { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C }, { pfsync_out_del_c, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C } }; /* Map queue to pf_kstate->sync_state */ static u_int8_t pfsync_qid_sstate[] = { PFSYNC_S_INS, /* PFSYNC_Q_INS_1301 */ PFSYNC_S_INS, /* PFSYNC_Q_INS_1400 */ PFSYNC_S_IACK, /* PFSYNC_Q_IACK */ PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1301 */ PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1400 */ PFSYNC_S_UPD_C, /* PFSYNC_Q_UPD_C */ PFSYNC_S_DEL_C, /* PFSYNC_Q_DEL_C */ }; /* Map pf_kstate->sync_state to queue */ static enum pfsync_q_id pfsync_sstate_to_qid(u_int8_t); static void pfsync_q_ins(struct pf_kstate *, int sync_state, bool); static void pfsync_q_del(struct pf_kstate *, bool, struct pfsync_bucket *); static void pfsync_update_state(struct pf_kstate *); static void pfsync_tx(struct pfsync_softc *, struct mbuf *); struct pfsync_upd_req_item { TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; struct pfsync_upd_req ur_msg; }; struct pfsync_deferral { struct pfsync_softc *pd_sc; TAILQ_ENTRY(pfsync_deferral) pd_entry; struct callout pd_tmo; struct pf_kstate *pd_st; struct mbuf *pd_m; }; struct pfsync_bucket { int b_id; struct pfsync_softc *b_sc; struct mtx b_mtx; struct callout b_tmo; int b_flags; #define PFSYNCF_BUCKET_PUSH 0x00000001 size_t b_len; TAILQ_HEAD(, pf_kstate) b_qs[PFSYNC_Q_COUNT]; TAILQ_HEAD(, pfsync_upd_req_item) b_upd_req_list; TAILQ_HEAD(, pfsync_deferral) b_deferrals; u_int b_deferred; uint8_t *b_plus; size_t b_pluslen; struct ifaltq b_snd; }; struct pfsync_softc { /* Configuration */ struct ifnet *sc_ifp; struct ifnet *sc_sync_if; struct ip_moptions sc_imo; struct ip6_moptions sc_im6o; struct sockaddr_storage sc_sync_peer; uint32_t sc_flags; uint8_t sc_maxupdates; union inet_template sc_template; struct mtx sc_mtx; uint32_t sc_version; /* Queued data */ struct pfsync_bucket *sc_buckets; /* Bulk update info */ struct mtx sc_bulk_mtx; uint32_t sc_ureq_sent; int sc_bulk_tries; uint32_t sc_ureq_received; int sc_bulk_hashid; uint64_t sc_bulk_stateid; uint32_t sc_bulk_creatorid; struct callout sc_bulk_tmo; struct callout sc_bulkfail_tmo; }; #define PFSYNC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define PFSYNC_BUCKET_LOCK(b) mtx_lock(&(b)->b_mtx) #define PFSYNC_BUCKET_UNLOCK(b) mtx_unlock(&(b)->b_mtx) #define PFSYNC_BUCKET_LOCK_ASSERT(b) mtx_assert(&(b)->b_mtx, MA_OWNED) #define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx) #define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx) #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED) #define PFSYNC_DEFER_TIMEOUT 20 static const char pfsyncname[] = "pfsync"; static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data"); VNET_DEFINE_STATIC(struct pfsync_softc *, pfsyncif) = NULL; #define V_pfsyncif VNET(pfsyncif) VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL; #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie) VNET_DEFINE_STATIC(struct intr_event *, pfsync_swi_ie); #define V_pfsync_swi_ie VNET(pfsync_swi_ie) VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats); #define V_pfsyncstats VNET(pfsyncstats) VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW; #define V_pfsync_carp_adj VNET(pfsync_carp_adj) VNET_DEFINE_STATIC(unsigned int, pfsync_defer_timeout) = PFSYNC_DEFER_TIMEOUT; #define V_pfsync_defer_timeout VNET(pfsync_defer_timeout) static void pfsync_timeout(void *); static void pfsync_push(struct pfsync_bucket *); static void pfsync_push_all(struct pfsync_softc *); static void pfsyncintr(void *); static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *, struct in_mfilter *, struct in6_mfilter *); static void pfsync_multicast_cleanup(struct pfsync_softc *); static void pfsync_pointers_init(void); static void pfsync_pointers_uninit(void); static int pfsync_init(void); static void pfsync_uninit(void); static unsigned long pfsync_buckets; SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "PFSYNC"); SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(pfsyncstats), pfsyncstats, "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN, &pfsync_buckets, 0, "Number of pfsync hash buckets"); SYSCTL_UINT(_net_pfsync, OID_AUTO, defer_delay, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(pfsync_defer_timeout), 0, "Deferred packet timeout (in ms)"); static int pfsync_clone_create(struct if_clone *, int, caddr_t); static void pfsync_clone_destroy(struct ifnet *); static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, struct pf_state_peer *); static int pfsyncoutput(struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *); static int pfsyncioctl(struct ifnet *, u_long, caddr_t); static int pfsync_defer(struct pf_kstate *, struct mbuf *); static void pfsync_undefer(struct pfsync_deferral *, int); static void pfsync_undefer_state_locked(struct pf_kstate *, int); static void pfsync_undefer_state(struct pf_kstate *, int); static void pfsync_defer_tmo(void *); static void pfsync_request_update(u_int32_t, u_int64_t); static bool pfsync_update_state_req(struct pf_kstate *); static void pfsync_drop_all(struct pfsync_softc *); static void pfsync_drop(struct pfsync_softc *, int); static void pfsync_sendout(int, int); static void pfsync_send_plus(void *, size_t); static void pfsync_bulk_start(void); static void pfsync_bulk_status(u_int8_t); static void pfsync_bulk_update(void *); static void pfsync_bulk_fail(void *); static void pfsync_detach_ifnet(struct ifnet *); static int pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *, struct pfsync_kstatus *); static int pfsync_kstatus_to_softc(struct pfsync_kstatus *, struct pfsync_softc *); #ifdef IPSEC static void pfsync_update_net_tdb(struct pfsync_tdb *); #endif static struct pfsync_bucket *pfsync_get_bucket(struct pfsync_softc *, struct pf_kstate *); #define PFSYNC_MAX_BULKTRIES 12 VNET_DEFINE(struct if_clone *, pfsync_cloner); #define V_pfsync_cloner VNET(pfsync_cloner) const struct in6_addr in6addr_linklocal_pfsync_group = {{{ 0xff, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0 }}}; static int pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) { struct pfsync_softc *sc; struct ifnet *ifp; struct pfsync_bucket *b; int c; enum pfsync_q_id q; if (unit != 0) return (EINVAL); if (! pfsync_buckets) pfsync_buckets = mp_ncpus * 2; sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO); sc->sc_flags |= PFSYNCF_OK; sc->sc_maxupdates = 128; sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT; ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); if_initname(ifp, pfsyncname, unit); ifp->if_softc = sc; ifp->if_ioctl = pfsyncioctl; ifp->if_output = pfsyncoutput; ifp->if_type = IFT_PFSYNC; ifp->if_hdrlen = sizeof(struct pfsync_header); ifp->if_mtu = ETHERMTU; mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF); mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF); callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0); callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0); if_attach(ifp); bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets), M_PFSYNC, M_ZERO | M_WAITOK); for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF); b->b_id = c; b->b_sc = sc; b->b_len = PFSYNC_MINPKT; for (q = 0; q < PFSYNC_Q_COUNT; q++) TAILQ_INIT(&b->b_qs[q]); TAILQ_INIT(&b->b_upd_req_list); TAILQ_INIT(&b->b_deferrals); callout_init(&b->b_tmo, 1); b->b_snd.ifq_maxlen = ifqmaxlen; } V_pfsyncif = sc; return (0); } static void pfsync_clone_destroy(struct ifnet *ifp) { struct pfsync_softc *sc = ifp->if_softc; struct pfsync_bucket *b; int c, ret; for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; /* * At this stage, everything should have already been * cleared by pfsync_uninit(), and we have only to * drain callouts. */ PFSYNC_BUCKET_LOCK(b); while (b->b_deferred > 0) { struct pfsync_deferral *pd = TAILQ_FIRST(&b->b_deferrals); ret = callout_stop(&pd->pd_tmo); PFSYNC_BUCKET_UNLOCK(b); if (ret > 0) { pfsync_undefer(pd, 1); } else { callout_drain(&pd->pd_tmo); } PFSYNC_BUCKET_LOCK(b); } MPASS(b->b_deferred == 0); MPASS(TAILQ_EMPTY(&b->b_deferrals)); PFSYNC_BUCKET_UNLOCK(b); free(b->b_plus, M_PFSYNC); b->b_plus = NULL; b->b_pluslen = 0; callout_drain(&b->b_tmo); } callout_drain(&sc->sc_bulkfail_tmo); callout_drain(&sc->sc_bulk_tmo); if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy"); bpfdetach(ifp); if_detach(ifp); pfsync_drop_all(sc); if_free(ifp); pfsync_multicast_cleanup(sc); mtx_destroy(&sc->sc_mtx); mtx_destroy(&sc->sc_bulk_mtx); free(sc->sc_buckets, M_PFSYNC); free(sc, M_PFSYNC); V_pfsyncif = NULL; } static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, struct pf_state_peer *d) { if (s->scrub.scrub_flag && d->scrub == NULL) { d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO); if (d->scrub == NULL) return (ENOMEM); } return (0); } static int pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version) { struct pfsync_softc *sc = V_pfsyncif; #ifndef __NO_STRICT_ALIGNMENT struct pfsync_state_key key[2]; #endif struct pfsync_state_key *kw, *ks; struct pf_kstate *st = NULL; struct pf_state_key *skw = NULL, *sks = NULL; struct pf_krule *r = NULL; struct pfi_kkif *kif; struct pfi_kkif *rt_kif = NULL; struct pf_kpooladdr *rpool_first; int error; uint8_t rt = 0; PF_RULES_RASSERT(); if (sp->pfs_1301.creatorid == 0) { if (V_pf_status.debug >= PF_DEBUG_MISC) printf("%s: invalid creator id: %08x\n", __func__, ntohl(sp->pfs_1301.creatorid)); return (EINVAL); } if ((kif = pfi_kkif_find(sp->pfs_1301.ifname)) == NULL) { if (V_pf_status.debug >= PF_DEBUG_MISC) printf("%s: unknown interface: %s\n", __func__, sp->pfs_1301.ifname); if (flags & PFSYNC_SI_IOCTL) return (EINVAL); return (0); /* skip this state */ } /* * If the ruleset checksums match or the state is coming from the ioctl, * it's safe to associate the state with the rule of that number. */ if (sp->pfs_1301.rule != htonl(-1) && sp->pfs_1301.anchor == htonl(-1) && (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->pfs_1301.rule) < pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) r = pf_main_ruleset.rules[ PF_RULESET_FILTER].active.ptr_array[ntohl(sp->pfs_1301.rule)]; else r = &V_pf_default_rule; /* * Check routing interface early on. Do it before allocating memory etc. * because there is a high chance there will be a lot more such states. */ switch (msg_version) { case PFSYNC_MSG_VERSION_1301: /* * On FreeBSD <= 13 the routing interface and routing operation * are not sent over pfsync. If the ruleset is identical, * though, we might be able to recover the routing information * from the local ruleset. */ if (r != &V_pf_default_rule) { struct pf_kpool *pool = &r->route; /* Backwards compatibility. */ if (TAILQ_EMPTY(&pool->list)) pool = &r->rdr; /* * The ruleset is identical, try to recover. If the rule * has a redirection pool with a single interface, there * is a chance that this interface is identical as on * the pfsync peer. If there's more than one interface, * give up, as we can't be sure that we will pick the * same one as the pfsync peer did. */ rpool_first = TAILQ_FIRST(&(pool->list)); if ((rpool_first == NULL) || (TAILQ_NEXT(rpool_first, entries) != NULL)) { DPFPRINTF(PF_DEBUG_MISC, ("%s: can't recover routing information " "because of empty or bad redirection pool\n", __func__)); return ((flags & PFSYNC_SI_IOCTL) ? EINVAL : 0); } rt = r->rt; rt_kif = rpool_first->kif; } else if (!PF_AZERO(&sp->pfs_1301.rt_addr, sp->pfs_1301.af)) { /* * Ruleset different, routing *supposedly* requested, * give up on recovering. */ DPFPRINTF(PF_DEBUG_MISC, ("%s: can't recover routing information " "because of different ruleset\n", __func__)); return ((flags & PFSYNC_SI_IOCTL) ? EINVAL : 0); } break; case PFSYNC_MSG_VERSION_1400: /* * On FreeBSD 14 and above we're not taking any chances. * We use the information synced to us. */ if (sp->pfs_1400.rt) { rt_kif = pfi_kkif_find(sp->pfs_1400.rt_ifname); if (rt_kif == NULL) { DPFPRINTF(PF_DEBUG_MISC, ("%s: unknown route interface: %s\n", __func__, sp->pfs_1400.rt_ifname)); return ((flags & PFSYNC_SI_IOCTL) ? EINVAL : 0); } rt = sp->pfs_1400.rt; } break; } if ((r->max_states && counter_u64_fetch(r->states_cur) >= r->max_states)) goto cleanup; /* * XXXGL: consider M_WAITOK in ioctl path after. */ st = pf_alloc_state(M_NOWAIT); if (__predict_false(st == NULL)) goto cleanup; if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL) goto cleanup; #ifndef __NO_STRICT_ALIGNMENT bcopy(&sp->pfs_1301.key, key, sizeof(struct pfsync_state_key) * 2); kw = &key[PF_SK_WIRE]; ks = &key[PF_SK_STACK]; #else kw = &sp->pfs_1301.key[PF_SK_WIRE]; ks = &sp->pfs_1301.key[PF_SK_STACK]; #endif if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->pfs_1301.af) || PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->pfs_1301.af) || kw->port[0] != ks->port[0] || kw->port[1] != ks->port[1]) { sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT); if (sks == NULL) goto cleanup; } else sks = skw; /* allocate memory for scrub info */ if (pfsync_alloc_scrub_memory(&sp->pfs_1301.src, &st->src) || pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst)) goto cleanup; /* Copy to state key(s). */ skw->addr[0] = kw->addr[0]; skw->addr[1] = kw->addr[1]; skw->port[0] = kw->port[0]; skw->port[1] = kw->port[1]; skw->proto = sp->pfs_1301.proto; skw->af = sp->pfs_1301.af; if (sks != skw) { sks->addr[0] = ks->addr[0]; sks->addr[1] = ks->addr[1]; sks->port[0] = ks->port[0]; sks->port[1] = ks->port[1]; sks->proto = sp->pfs_1301.proto; sks->af = sp->pfs_1301.af; } /* copy to state */ bcopy(&sp->pfs_1301.rt_addr, &st->act.rt_addr, sizeof(st->act.rt_addr)); st->creation = (time_uptime - ntohl(sp->pfs_1301.creation)) * 1000; st->expire = pf_get_uptime(); if (sp->pfs_1301.expire) { uint32_t timeout; timeout = r->timeout[sp->pfs_1301.timeout]; if (!timeout) timeout = V_pf_default_rule.timeout[sp->pfs_1301.timeout]; /* sp->expire may have been adaptively scaled by export. */ st->expire -= (timeout - ntohl(sp->pfs_1301.expire)) * 1000; } st->direction = sp->pfs_1301.direction; st->act.log = sp->pfs_1301.log; st->timeout = sp->pfs_1301.timeout; st->act.rt = rt; st->act.rt_kif = rt_kif; switch (msg_version) { case PFSYNC_MSG_VERSION_1301: st->state_flags = sp->pfs_1301.state_flags; /* * In FreeBSD 13 pfsync lacks many attributes. Copy them * from the rule if possible. If rule can't be matched * clear any set options as we can't recover their * parameters. */ if (r == &V_pf_default_rule) { st->state_flags &= ~PFSTATE_SETMASK; } else { /* * Similar to pf_rule_to_actions(). This code * won't set the actions properly if they come * from multiple "match" rules as only rule * creating the state is send over pfsync. */ st->act.qid = r->qid; st->act.pqid = r->pqid; st->act.rtableid = r->rtableid; if (r->scrub_flags & PFSTATE_SETTOS) st->act.set_tos = r->set_tos; st->act.min_ttl = r->min_ttl; st->act.max_mss = r->max_mss; st->state_flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID| PFSTATE_SETTOS|PFSTATE_SCRUB_TCP| PFSTATE_SETPRIO)); if (r->dnpipe || r->dnrpipe) { if (r->free_flags & PFRULE_DN_IS_PIPE) st->state_flags |= PFSTATE_DN_IS_PIPE; else st->state_flags &= ~PFSTATE_DN_IS_PIPE; } st->act.dnpipe = r->dnpipe; st->act.dnrpipe = r->dnrpipe; } break; case PFSYNC_MSG_VERSION_1400: st->state_flags = ntohs(sp->pfs_1400.state_flags); st->act.qid = ntohs(sp->pfs_1400.qid); st->act.pqid = ntohs(sp->pfs_1400.pqid); st->act.dnpipe = ntohs(sp->pfs_1400.dnpipe); st->act.dnrpipe = ntohs(sp->pfs_1400.dnrpipe); st->act.rtableid = ntohl(sp->pfs_1400.rtableid); st->act.min_ttl = sp->pfs_1400.min_ttl; st->act.set_tos = sp->pfs_1400.set_tos; st->act.max_mss = ntohs(sp->pfs_1400.max_mss); st->act.set_prio[0] = sp->pfs_1400.set_prio[0]; st->act.set_prio[1] = sp->pfs_1400.set_prio[1]; break; default: panic("%s: Unsupported pfsync_msg_version %d", __func__, msg_version); } st->id = sp->pfs_1301.id; st->creatorid = sp->pfs_1301.creatorid; pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src); pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst); st->rule = r; st->nat_rule = NULL; st->anchor = NULL; st->pfsync_time = time_uptime; st->sync_state = PFSYNC_S_NONE; if (!(flags & PFSYNC_SI_IOCTL)) st->state_flags |= PFSTATE_NOSYNC; if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0) goto cleanup_state; /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ counter_u64_add(r->states_cur, 1); counter_u64_add(r->states_tot, 1); if (!(flags & PFSYNC_SI_IOCTL)) { st->state_flags &= ~PFSTATE_NOSYNC; if (st->state_flags & PFSTATE_ACK) { struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK(b); pfsync_q_ins(st, PFSYNC_S_IACK, true); PFSYNC_BUCKET_UNLOCK(b); pfsync_push_all(sc); } } st->state_flags &= ~PFSTATE_ACK; PF_STATE_UNLOCK(st); return (0); cleanup: error = ENOMEM; if (skw == sks) sks = NULL; uma_zfree(V_pf_state_key_z, skw); uma_zfree(V_pf_state_key_z, sks); cleanup_state: /* pf_state_insert() frees the state keys. */ if (st) { st->timeout = PFTM_UNLINKED; /* appease an assert */ pf_free_state(st); } return (error); } #ifdef INET static int pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused) { struct pfsync_softc *sc = V_pfsyncif; struct mbuf *m = *mp; struct ip *ip = mtod(m, struct ip *); struct pfsync_header *ph; struct pfsync_subheader subh; int offset, len, flags = 0; int rv; uint16_t count; PF_RULES_RLOCK_TRACKER; *mp = NULL; V_pfsyncstats.pfsyncs_ipackets++; /* Verify that we have a sync interface configured. */ if (!sc || !sc->sc_sync_if || !V_pf_status.running || (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done; /* verify that the packet came in on the right interface */ if (sc->sc_sync_if != m->m_pkthdr.rcvif) { V_pfsyncstats.pfsyncs_badif++; goto done; } if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); /* verify that the IP TTL is 255. */ if (ip->ip_ttl != PFSYNC_DFLTTL) { V_pfsyncstats.pfsyncs_badttl++; goto done; } offset = ip->ip_hl << 2; if (m->m_pkthdr.len < offset + sizeof(*ph)) { V_pfsyncstats.pfsyncs_hdrops++; goto done; } if (offset + sizeof(*ph) > m->m_len) { if (m_pullup(m, offset + sizeof(*ph)) == NULL) { V_pfsyncstats.pfsyncs_hdrops++; return (IPPROTO_DONE); } ip = mtod(m, struct ip *); } ph = (struct pfsync_header *)((char *)ip + offset); /* verify the version */ if (ph->version != PFSYNC_VERSION) { V_pfsyncstats.pfsyncs_badver++; goto done; } len = ntohs(ph->len) + offset; if (m->m_pkthdr.len < len) { V_pfsyncstats.pfsyncs_badlen++; goto done; } /* * Trusting pf_chksum during packet processing, as well as seeking * in interface name tree, require holding PF_RULES_RLOCK(). */ PF_RULES_RLOCK(); if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) flags = PFSYNC_SI_CKSUM; offset += sizeof(*ph); while (offset <= len - sizeof(subh)) { m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); offset += sizeof(subh); if (subh.action >= PFSYNC_ACT_MAX) { V_pfsyncstats.pfsyncs_badact++; PF_RULES_RUNLOCK(); goto done; } count = ntohs(subh.count); V_pfsyncstats.pfsyncs_iacts[subh.action] += count; rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action); if (rv == -1) { PF_RULES_RUNLOCK(); return (IPPROTO_DONE); } offset += rv; } PF_RULES_RUNLOCK(); done: m_freem(m); return (IPPROTO_DONE); } #endif #ifdef INET6 static int pfsync6_input(struct mbuf **mp, int *offp __unused, int proto __unused) { struct pfsync_softc *sc = V_pfsyncif; struct mbuf *m = *mp; struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); struct pfsync_header *ph; struct pfsync_subheader subh; int offset, len, flags = 0; int rv; uint16_t count; PF_RULES_RLOCK_TRACKER; *mp = NULL; V_pfsyncstats.pfsyncs_ipackets++; /* Verify that we have a sync interface configured. */ if (!sc || !sc->sc_sync_if || !V_pf_status.running || (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done; /* verify that the packet came in on the right interface */ if (sc->sc_sync_if != m->m_pkthdr.rcvif) { V_pfsyncstats.pfsyncs_badif++; goto done; } if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); /* verify that the IP TTL is 255. */ if (ip6->ip6_hlim != PFSYNC_DFLTTL) { V_pfsyncstats.pfsyncs_badttl++; goto done; } offset = sizeof(*ip6); if (m->m_pkthdr.len < offset + sizeof(*ph)) { V_pfsyncstats.pfsyncs_hdrops++; goto done; } if (offset + sizeof(*ph) > m->m_len) { if (m_pullup(m, offset + sizeof(*ph)) == NULL) { V_pfsyncstats.pfsyncs_hdrops++; return (IPPROTO_DONE); } ip6 = mtod(m, struct ip6_hdr *); } ph = (struct pfsync_header *)((char *)ip6 + offset); /* verify the version */ if (ph->version != PFSYNC_VERSION) { V_pfsyncstats.pfsyncs_badver++; goto done; } len = ntohs(ph->len) + offset; if (m->m_pkthdr.len < len) { V_pfsyncstats.pfsyncs_badlen++; goto done; } /* * Trusting pf_chksum during packet processing, as well as seeking * in interface name tree, require holding PF_RULES_RLOCK(). */ PF_RULES_RLOCK(); if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) flags = PFSYNC_SI_CKSUM; offset += sizeof(*ph); while (offset <= len - sizeof(subh)) { m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); offset += sizeof(subh); if (subh.action >= PFSYNC_ACT_MAX) { V_pfsyncstats.pfsyncs_badact++; PF_RULES_RUNLOCK(); goto done; } count = ntohs(subh.count); V_pfsyncstats.pfsyncs_iacts[subh.action] += count; rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action); if (rv == -1) { PF_RULES_RUNLOCK(); return (IPPROTO_DONE); } offset += rv; } PF_RULES_RUNLOCK(); done: m_freem(m); return (IPPROTO_DONE); } #endif static int pfsync_in_clr(struct mbuf *m, int offset, int count, int flags, int action) { struct pfsync_clr *clr; struct mbuf *mp; int len = sizeof(*clr) * count; int i, offp; u_int32_t creatorid; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } clr = (struct pfsync_clr *)(mp->m_data + offp); for (i = 0; i < count; i++) { creatorid = clr[i].creatorid; if (clr[i].ifname[0] != '\0' && pfi_kkif_find(clr[i].ifname) == NULL) continue; for (int i = 0; i <= V_pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; struct pf_kstate *s; relock: PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { if (s->creatorid == creatorid) { s->state_flags |= PFSTATE_NOSYNC; pf_unlink_state(s); goto relock; } } PF_HASHROW_UNLOCK(ih); } } return (len); } static int pfsync_in_ins(struct mbuf *m, int offset, int count, int flags, int action) { struct mbuf *mp; union pfsync_state_union *sa, *sp; int i, offp, total_len, msg_version, msg_len; switch (action) { case PFSYNC_ACT_INS_1301: msg_len = sizeof(struct pfsync_state_1301); total_len = msg_len * count; msg_version = PFSYNC_MSG_VERSION_1301; break; case PFSYNC_ACT_INS_1400: msg_len = sizeof(struct pfsync_state_1400); total_len = msg_len * count; msg_version = PFSYNC_MSG_VERSION_1400; break; default: V_pfsyncstats.pfsyncs_badact++; return (-1); } mp = m_pulldown(m, offset, total_len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } sa = (union pfsync_state_union *)(mp->m_data + offp); for (i = 0; i < count; i++) { sp = (union pfsync_state_union *)((char *)sa + msg_len * i); /* Check for invalid values. */ if (sp->pfs_1301.timeout >= PFTM_MAX || sp->pfs_1301.src.state > PF_TCPS_PROXY_DST || sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST || sp->pfs_1301.direction > PF_OUT || (sp->pfs_1301.af != AF_INET && sp->pfs_1301.af != AF_INET6)) { if (V_pf_status.debug >= PF_DEBUG_MISC) printf("%s: invalid value\n", __func__); V_pfsyncstats.pfsyncs_badval++; continue; } if (pfsync_state_import(sp, flags, msg_version) == ENOMEM) /* Drop out, but process the rest of the actions. */ break; } return (total_len); } static int pfsync_in_iack(struct mbuf *m, int offset, int count, int flags, int action) { struct pfsync_ins_ack *ia, *iaa; struct pf_kstate *st; struct mbuf *mp; int len = count * sizeof(*ia); int offp, i; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } iaa = (struct pfsync_ins_ack *)(mp->m_data + offp); for (i = 0; i < count; i++) { ia = &iaa[i]; st = pf_find_state_byid(ia->id, ia->creatorid); if (st == NULL) continue; if (st->state_flags & PFSTATE_ACK) { pfsync_undefer_state(st, 0); } PF_STATE_UNLOCK(st); } /* * XXX this is not yet implemented, but we know the size of the * message so we can skip it. */ return (count * sizeof(struct pfsync_ins_ack)); } static int pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src, struct pfsync_state_peer *dst) { int sync = 0; PF_STATE_LOCK_ASSERT(st); /* * The state should never go backwards except * for syn-proxy states. Neither should the * sequence window slide backwards. */ if ((st->src.state > src->state && (st->src.state < PF_TCPS_PROXY_SRC || src->state >= PF_TCPS_PROXY_SRC)) || (st->src.state == src->state && SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))) sync++; else pf_state_peer_ntoh(src, &st->src); if ((st->dst.state > dst->state) || (st->dst.state >= TCPS_SYN_SENT && SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))) sync++; else pf_state_peer_ntoh(dst, &st->dst); return (sync); } static int pfsync_in_upd(struct mbuf *m, int offset, int count, int flags, int action) { struct pfsync_softc *sc = V_pfsyncif; union pfsync_state_union *sa, *sp; struct pf_kstate *st; struct mbuf *mp; int sync, offp, i, total_len, msg_len, msg_version; switch (action) { case PFSYNC_ACT_UPD_1301: msg_len = sizeof(struct pfsync_state_1301); total_len = msg_len * count; msg_version = PFSYNC_MSG_VERSION_1301; break; case PFSYNC_ACT_UPD_1400: msg_len = sizeof(struct pfsync_state_1400); total_len = msg_len * count; msg_version = PFSYNC_MSG_VERSION_1400; break; default: V_pfsyncstats.pfsyncs_badact++; return (-1); } mp = m_pulldown(m, offset, total_len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } sa = (union pfsync_state_union *)(mp->m_data + offp); for (i = 0; i < count; i++) { sp = (union pfsync_state_union *)((char *)sa + msg_len * i); /* check for invalid values */ if (sp->pfs_1301.timeout >= PFTM_MAX || sp->pfs_1301.src.state > PF_TCPS_PROXY_DST || sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pfsync_input: PFSYNC_ACT_UPD: " "invalid value\n"); } V_pfsyncstats.pfsyncs_badval++; continue; } st = pf_find_state_byid(sp->pfs_1301.id, sp->pfs_1301.creatorid); if (st == NULL) { /* insert the update */ if (pfsync_state_import(sp, flags, msg_version)) V_pfsyncstats.pfsyncs_badstate++; continue; } if (st->state_flags & PFSTATE_ACK) { pfsync_undefer_state(st, 1); } if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) sync = pfsync_upd_tcp(st, &sp->pfs_1301.src, &sp->pfs_1301.dst); else { sync = 0; /* * Non-TCP protocol state machine always go * forwards */ if (st->src.state > sp->pfs_1301.src.state) sync++; else pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src); if (st->dst.state > sp->pfs_1301.dst.state) sync++; else pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst); } if (sync < 2) { pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst); pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst); st->expire = pf_get_uptime(); st->timeout = sp->pfs_1301.timeout; } st->pfsync_time = time_uptime; if (sync) { V_pfsyncstats.pfsyncs_stale++; pfsync_update_state(st); PF_STATE_UNLOCK(st); pfsync_push_all(sc); continue; } PF_STATE_UNLOCK(st); } return (total_len); } static int pfsync_in_upd_c(struct mbuf *m, int offset, int count, int flags, int action) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_upd_c *ua, *up; struct pf_kstate *st; int len = count * sizeof(*up); int sync; struct mbuf *mp; int offp, i; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } ua = (struct pfsync_upd_c *)(mp->m_data + offp); for (i = 0; i < count; i++) { up = &ua[i]; /* check for invalid values */ if (up->timeout >= PFTM_MAX || up->src.state > PF_TCPS_PROXY_DST || up->dst.state > PF_TCPS_PROXY_DST) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pfsync_input: " "PFSYNC_ACT_UPD_C: " "invalid value\n"); } V_pfsyncstats.pfsyncs_badval++; continue; } st = pf_find_state_byid(up->id, up->creatorid); if (st == NULL) { /* We don't have this state. Ask for it. */ PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); pfsync_request_update(up->creatorid, up->id); PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); continue; } if (st->state_flags & PFSTATE_ACK) { pfsync_undefer_state(st, 1); } if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) sync = pfsync_upd_tcp(st, &up->src, &up->dst); else { sync = 0; /* * Non-TCP protocol state machine always go * forwards */ if (st->src.state > up->src.state) sync++; else pf_state_peer_ntoh(&up->src, &st->src); if (st->dst.state > up->dst.state) sync++; else pf_state_peer_ntoh(&up->dst, &st->dst); } if (sync < 2) { pfsync_alloc_scrub_memory(&up->dst, &st->dst); pf_state_peer_ntoh(&up->dst, &st->dst); st->expire = pf_get_uptime(); st->timeout = up->timeout; } st->pfsync_time = time_uptime; if (sync) { V_pfsyncstats.pfsyncs_stale++; pfsync_update_state(st); PF_STATE_UNLOCK(st); pfsync_push_all(sc); continue; } PF_STATE_UNLOCK(st); } return (len); } static int pfsync_in_ureq(struct mbuf *m, int offset, int count, int flags, int action) { struct pfsync_upd_req *ur, *ura; struct mbuf *mp; int len = count * sizeof(*ur); int i, offp; struct pf_kstate *st; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } ura = (struct pfsync_upd_req *)(mp->m_data + offp); for (i = 0; i < count; i++) { ur = &ura[i]; if (ur->id == 0 && ur->creatorid == 0) pfsync_bulk_start(); else { st = pf_find_state_byid(ur->id, ur->creatorid); if (st == NULL) { V_pfsyncstats.pfsyncs_badstate++; continue; } if (st->state_flags & PFSTATE_NOSYNC) { PF_STATE_UNLOCK(st); continue; } pfsync_update_state_req(st); PF_STATE_UNLOCK(st); } } return (len); } static int pfsync_in_del_c(struct mbuf *m, int offset, int count, int flags, int action) { struct mbuf *mp; struct pfsync_del_c *sa, *sp; struct pf_kstate *st; int len = count * sizeof(*sp); int offp, i; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } sa = (struct pfsync_del_c *)(mp->m_data + offp); for (i = 0; i < count; i++) { sp = &sa[i]; st = pf_find_state_byid(sp->id, sp->creatorid); if (st == NULL) { V_pfsyncstats.pfsyncs_badstate++; continue; } st->state_flags |= PFSTATE_NOSYNC; pf_unlink_state(st); } return (len); } static int pfsync_in_bus(struct mbuf *m, int offset, int count, int flags, int action) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bus *bus; struct mbuf *mp; int len = count * sizeof(*bus); int offp; PFSYNC_BLOCK(sc); /* If we're not waiting for a bulk update, who cares. */ if (sc->sc_ureq_sent == 0) { PFSYNC_BUNLOCK(sc); return (len); } mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { PFSYNC_BUNLOCK(sc); V_pfsyncstats.pfsyncs_badlen++; return (-1); } bus = (struct pfsync_bus *)(mp->m_data + offp); switch (bus->status) { case PFSYNC_BUS_START: callout_reset(&sc->sc_bulkfail_tmo, 4 * hz + V_pf_limits[PF_LIMIT_STATES].limit / ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) / sizeof(union pfsync_state_union)), pfsync_bulk_fail, sc); if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: received bulk update start\n"); break; case PFSYNC_BUS_END: if (time_uptime - ntohl(bus->endtime) >= sc->sc_ureq_sent) { /* that's it, we're happy */ sc->sc_ureq_sent = 0; sc->sc_bulk_tries = 0; callout_stop(&sc->sc_bulkfail_tmo); if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync bulk done"); sc->sc_flags |= PFSYNCF_OK; if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: received valid " "bulk update end\n"); } else { if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: received invalid " "bulk update end: bad timestamp\n"); } break; } PFSYNC_BUNLOCK(sc); return (len); } static int pfsync_in_tdb(struct mbuf *m, int offset, int count, int flags, int action) { int len = count * sizeof(struct pfsync_tdb); #if defined(IPSEC) struct pfsync_tdb *tp; struct mbuf *mp; int offp; int i; int s; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } tp = (struct pfsync_tdb *)(mp->m_data + offp); for (i = 0; i < count; i++) pfsync_update_net_tdb(&tp[i]); #endif return (len); } #if defined(IPSEC) /* Update an in-kernel tdb. Silently fail if no tdb is found. */ static void pfsync_update_net_tdb(struct pfsync_tdb *pt) { struct tdb *tdb; int s; /* check for invalid values */ if (ntohl(pt->spi) <= SPI_RESERVED_MAX || (pt->dst.sa.sa_family != AF_INET && pt->dst.sa.sa_family != AF_INET6)) goto bad; tdb = gettdb(pt->spi, &pt->dst, pt->sproto); if (tdb) { pt->rpl = ntohl(pt->rpl); pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes); /* Neither replay nor byte counter should ever decrease. */ if (pt->rpl < tdb->tdb_rpl || pt->cur_bytes < tdb->tdb_cur_bytes) { goto bad; } tdb->tdb_rpl = pt->rpl; tdb->tdb_cur_bytes = pt->cur_bytes; } return; bad: if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: " "invalid value\n"); V_pfsyncstats.pfsyncs_badstate++; return; } #endif static int pfsync_in_eof(struct mbuf *m, int offset, int count, int flags, int action) { /* check if we are at the right place in the packet */ if (offset != m->m_pkthdr.len) V_pfsyncstats.pfsyncs_badlen++; /* we're done. free and let the caller return */ m_freem(m); return (-1); } static int pfsync_in_error(struct mbuf *m, int offset, int count, int flags, int action) { V_pfsyncstats.pfsyncs_badact++; m_freem(m); return (-1); } static int pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *rt) { m_freem(m); return (0); } /* ARGSUSED */ static int pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct pfsync_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct pfsyncreq pfsyncr; size_t nvbuflen; int error; int c; switch (cmd) { case SIOCSIFFLAGS: PFSYNC_LOCK(sc); if (ifp->if_flags & IFF_UP) { ifp->if_drv_flags |= IFF_DRV_RUNNING; PFSYNC_UNLOCK(sc); pfsync_pointers_init(); } else { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; PFSYNC_UNLOCK(sc); pfsync_pointers_uninit(); } break; case SIOCSIFMTU: if (!sc->sc_sync_if || ifr->ifr_mtu <= PFSYNC_MINPKT || ifr->ifr_mtu > sc->sc_sync_if->if_mtu) return (EINVAL); if (ifr->ifr_mtu < ifp->if_mtu) { for (c = 0; c < pfsync_buckets; c++) { PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT) pfsync_sendout(1, c); PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); } } ifp->if_mtu = ifr->ifr_mtu; break; case SIOCGETPFSYNC: bzero(&pfsyncr, sizeof(pfsyncr)); PFSYNC_LOCK(sc); if (sc->sc_sync_if) { strlcpy(pfsyncr.pfsyncr_syncdev, sc->sc_sync_if->if_xname, IFNAMSIZ); } pfsyncr.pfsyncr_syncpeer = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr; pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; pfsyncr.pfsyncr_defer = sc->sc_flags; PFSYNC_UNLOCK(sc); return (copyout(&pfsyncr, ifr_data_get_ptr(ifr), sizeof(pfsyncr))); case SIOCGETPFSYNCNV: { nvlist_t *nvl_syncpeer; nvlist_t *nvl = nvlist_create(0); if (nvl == NULL) return (ENOMEM); if (sc->sc_sync_if) nvlist_add_string(nvl, "syncdev", sc->sc_sync_if->if_xname); nvlist_add_number(nvl, "maxupdates", sc->sc_maxupdates); nvlist_add_number(nvl, "flags", sc->sc_flags); nvlist_add_number(nvl, "version", sc->sc_version); if ((nvl_syncpeer = pfsync_sockaddr_to_syncpeer_nvlist(&sc->sc_sync_peer)) != NULL) nvlist_add_nvlist(nvl, "syncpeer", nvl_syncpeer); void *packed = NULL; packed = nvlist_pack(nvl, &nvbuflen); if (packed == NULL) { free(packed, M_NVLIST); nvlist_destroy(nvl); return (ENOMEM); } if (nvbuflen > ifr->ifr_cap_nv.buf_length) { ifr->ifr_cap_nv.length = nvbuflen; ifr->ifr_cap_nv.buffer = NULL; free(packed, M_NVLIST); nvlist_destroy(nvl); return (EFBIG); } ifr->ifr_cap_nv.length = nvbuflen; error = copyout(packed, ifr->ifr_cap_nv.buffer, nvbuflen); nvlist_destroy(nvl); nvlist_destroy(nvl_syncpeer); free(packed, M_NVLIST); break; } case SIOCSETPFSYNC: { struct pfsync_kstatus status; if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) return (error); if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr, sizeof(pfsyncr)))) return (error); memset((char *)&status, 0, sizeof(struct pfsync_kstatus)); pfsync_pfsyncreq_to_kstatus(&pfsyncr, &status); error = pfsync_kstatus_to_softc(&status, sc); return (error); } case SIOCSETPFSYNCNV: { struct pfsync_kstatus status; void *data; nvlist_t *nvl; if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) return (error); if (ifr->ifr_cap_nv.length > IFR_CAP_NV_MAXBUFSIZE) return (EINVAL); data = malloc(ifr->ifr_cap_nv.length, M_TEMP, M_WAITOK); if ((error = copyin(ifr->ifr_cap_nv.buffer, data, ifr->ifr_cap_nv.length)) != 0) { free(data, M_TEMP); return (error); } if ((nvl = nvlist_unpack(data, ifr->ifr_cap_nv.length, 0)) == NULL) { free(data, M_TEMP); return (EINVAL); } memset((char *)&status, 0, sizeof(struct pfsync_kstatus)); pfsync_nvstatus_to_kstatus(nvl, &status); nvlist_destroy(nvl); free(data, M_TEMP); error = pfsync_kstatus_to_softc(&status, sc); return (error); } default: return (ENOTTY); } return (0); } static void pfsync_out_state_1301(struct pf_kstate *st, void *buf) { union pfsync_state_union *sp = buf; pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1301); } static void pfsync_out_state_1400(struct pf_kstate *st, void *buf) { union pfsync_state_union *sp = buf; pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1400); } static void pfsync_out_iack(struct pf_kstate *st, void *buf) { struct pfsync_ins_ack *iack = buf; iack->id = st->id; iack->creatorid = st->creatorid; } static void pfsync_out_upd_c(struct pf_kstate *st, void *buf) { struct pfsync_upd_c *up = buf; bzero(up, sizeof(*up)); up->id = st->id; pf_state_peer_hton(&st->src, &up->src); pf_state_peer_hton(&st->dst, &up->dst); up->creatorid = st->creatorid; up->timeout = st->timeout; } static void pfsync_out_del_c(struct pf_kstate *st, void *buf) { struct pfsync_del_c *dp = buf; dp->id = st->id; dp->creatorid = st->creatorid; st->state_flags |= PFSTATE_NOSYNC; } static void pfsync_drop_all(struct pfsync_softc *sc) { struct pfsync_bucket *b; int c; for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; PFSYNC_BUCKET_LOCK(b); pfsync_drop(sc, c); PFSYNC_BUCKET_UNLOCK(b); } } static void pfsync_drop(struct pfsync_softc *sc, int c) { struct pf_kstate *st, *next; struct pfsync_upd_req_item *ur; struct pfsync_bucket *b; enum pfsync_q_id q; b = &sc->sc_buckets[c]; PFSYNC_BUCKET_LOCK_ASSERT(b); for (q = 0; q < PFSYNC_Q_COUNT; q++) { if (TAILQ_EMPTY(&b->b_qs[q])) continue; TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) { KASSERT(st->sync_state == pfsync_qid_sstate[q], ("%s: st->sync_state %d == q %d", __func__, st->sync_state, q)); st->sync_state = PFSYNC_S_NONE; pf_release_state(st); } TAILQ_INIT(&b->b_qs[q]); } while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); free(ur, M_PFSYNC); } b->b_len = PFSYNC_MINPKT; free(b->b_plus, M_PFSYNC); b->b_plus = NULL; b->b_pluslen = 0; } static void pfsync_sendout(int schedswi, int c) { struct pfsync_softc *sc = V_pfsyncif; struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; struct pfsync_header *ph; struct pfsync_subheader *subh; struct pf_kstate *st, *st_next; struct pfsync_upd_req_item *ur; struct pfsync_bucket *b = &sc->sc_buckets[c]; size_t len; int aflen, offset, count = 0; enum pfsync_q_id q; KASSERT(sc != NULL, ("%s: null sc", __func__)); KASSERT(b->b_len > PFSYNC_MINPKT, ("%s: sc_len %zu", __func__, b->b_len)); PFSYNC_BUCKET_LOCK_ASSERT(b); if (!bpf_peers_present(ifp->if_bpf) && sc->sc_sync_if == NULL) { pfsync_drop(sc, c); return; } m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); V_pfsyncstats.pfsyncs_onomem++; return; } m->m_data += max_linkhdr; bzero(m->m_data, b->b_len); len = b->b_len; /* build the ip header */ switch (sc->sc_sync_peer.ss_family) { #ifdef INET case AF_INET: { struct ip *ip; ip = mtod(m, struct ip *); bcopy(&sc->sc_template.ipv4, ip, sizeof(*ip)); aflen = offset = sizeof(*ip); len -= sizeof(union inet_template) - sizeof(struct ip); ip->ip_len = htons(len); - ip_fillid(ip); + ip_fillid(ip, V_ip_random_id); break; } #endif #ifdef INET6 case AF_INET6: { struct ip6_hdr *ip6; ip6 = mtod(m, struct ip6_hdr *); bcopy(&sc->sc_template.ipv6, ip6, sizeof(*ip6)); aflen = offset = sizeof(*ip6); len -= sizeof(union inet_template) - sizeof(struct ip6_hdr); ip6->ip6_plen = htons(len); break; } #endif default: m_freem(m); pfsync_drop(sc, c); return; } m->m_len = m->m_pkthdr.len = len; /* build the pfsync header */ ph = (struct pfsync_header *)(m->m_data + offset); offset += sizeof(*ph); ph->version = PFSYNC_VERSION; ph->len = htons(len - aflen); bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); /* walk the queues */ for (q = 0; q < PFSYNC_Q_COUNT; q++) { if (TAILQ_EMPTY(&b->b_qs[q])) continue; subh = (struct pfsync_subheader *)(m->m_data + offset); offset += sizeof(*subh); count = 0; TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) { KASSERT(st->sync_state == pfsync_qid_sstate[q], ("%s: st->sync_state == q", __func__)); /* * XXXGL: some of write methods do unlocked reads * of state data :( */ pfsync_qs[q].write(st, m->m_data + offset); offset += pfsync_qs[q].len; st->sync_state = PFSYNC_S_NONE; pf_release_state(st); count++; } TAILQ_INIT(&b->b_qs[q]); subh->action = pfsync_qs[q].action; subh->count = htons(count); V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count; } if (!TAILQ_EMPTY(&b->b_upd_req_list)) { subh = (struct pfsync_subheader *)(m->m_data + offset); offset += sizeof(*subh); count = 0; while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); bcopy(&ur->ur_msg, m->m_data + offset, sizeof(ur->ur_msg)); offset += sizeof(ur->ur_msg); free(ur, M_PFSYNC); count++; } subh->action = PFSYNC_ACT_UPD_REQ; subh->count = htons(count); V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count; } /* has someone built a custom region for us to add? */ if (b->b_plus != NULL) { bcopy(b->b_plus, m->m_data + offset, b->b_pluslen); offset += b->b_pluslen; free(b->b_plus, M_PFSYNC); b->b_plus = NULL; b->b_pluslen = 0; } subh = (struct pfsync_subheader *)(m->m_data + offset); offset += sizeof(*subh); subh->action = PFSYNC_ACT_EOF; subh->count = htons(1); V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++; /* we're done, let's put it on the wire */ if (bpf_peers_present(ifp->if_bpf)) { m->m_data += aflen; m->m_len = m->m_pkthdr.len = len - aflen; bpf_mtap(ifp->if_bpf, m); m->m_data -= aflen; m->m_len = m->m_pkthdr.len = len; } if (sc->sc_sync_if == NULL) { b->b_len = PFSYNC_MINPKT; m_freem(m); return; } if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); b->b_len = PFSYNC_MINPKT; if (!_IF_QFULL(&b->b_snd)) _IF_ENQUEUE(&b->b_snd, m); else { m_freem(m); if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1); } if (schedswi) swi_sched(V_pfsync_swi_cookie, 0); } static void pfsync_insert_state(struct pf_kstate *st) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); if (st->state_flags & PFSTATE_NOSYNC) return; if ((st->rule->rule_flag & PFRULE_NOSYNC) || st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { st->state_flags |= PFSTATE_NOSYNC; return; } KASSERT(st->sync_state == PFSYNC_S_NONE, ("%s: st->sync_state %u", __func__, st->sync_state)); PFSYNC_BUCKET_LOCK(b); if (b->b_len == PFSYNC_MINPKT) callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); pfsync_q_ins(st, PFSYNC_S_INS, true); PFSYNC_BUCKET_UNLOCK(b); st->sync_updates = 0; } static int pfsync_defer(struct pf_kstate *st, struct mbuf *m) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_deferral *pd; struct pfsync_bucket *b; if (m->m_flags & (M_BCAST|M_MCAST)) return (0); if (sc == NULL) return (0); b = pfsync_get_bucket(sc, st); PFSYNC_LOCK(sc); if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) || !(sc->sc_flags & PFSYNCF_DEFER)) { PFSYNC_UNLOCK(sc); return (0); } PFSYNC_BUCKET_LOCK(b); PFSYNC_UNLOCK(sc); if (b->b_deferred >= 128) pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0); pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT); if (pd == NULL) { PFSYNC_BUCKET_UNLOCK(b); return (0); } b->b_deferred++; m->m_flags |= M_SKIP_FIREWALL; st->state_flags |= PFSTATE_ACK; pd->pd_sc = sc; pd->pd_st = st; pf_ref_state(st); pd->pd_m = m; TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry); callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED); callout_reset(&pd->pd_tmo, (V_pfsync_defer_timeout * hz) / 1000, pfsync_defer_tmo, pd); pfsync_push(b); PFSYNC_BUCKET_UNLOCK(b); return (1); } static void pfsync_undefer(struct pfsync_deferral *pd, int drop) { struct pfsync_softc *sc = pd->pd_sc; struct mbuf *m = pd->pd_m; struct pf_kstate *st = pd->pd_st; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK_ASSERT(b); TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); b->b_deferred--; pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ free(pd, M_PFSYNC); pf_release_state(st); if (drop) m_freem(m); else { _IF_ENQUEUE(&b->b_snd, m); pfsync_push(b); } } static void pfsync_defer_tmo(void *arg) { struct epoch_tracker et; struct pfsync_deferral *pd = arg; struct pfsync_softc *sc = pd->pd_sc; struct mbuf *m = pd->pd_m; struct pf_kstate *st = pd->pd_st; struct pfsync_bucket *b; CURVNET_SET(sc->sc_ifp->if_vnet); b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK_ASSERT(b); TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); b->b_deferred--; pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ PFSYNC_BUCKET_UNLOCK(b); free(pd, M_PFSYNC); if (sc->sc_sync_if == NULL) { pf_release_state(st); m_freem(m); CURVNET_RESTORE(); return; } NET_EPOCH_ENTER(et); pfsync_tx(sc, m); pf_release_state(st); CURVNET_RESTORE(); NET_EPOCH_EXIT(et); } static void pfsync_undefer_state_locked(struct pf_kstate *st, int drop) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_deferral *pd; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK_ASSERT(b); TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) { if (pd->pd_st == st) { if (callout_stop(&pd->pd_tmo) > 0) pfsync_undefer(pd, drop); return; } } panic("%s: unable to find deferred state", __func__); } static void pfsync_undefer_state(struct pf_kstate *st, int drop) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK(b); pfsync_undefer_state_locked(st, drop); PFSYNC_BUCKET_UNLOCK(b); } static struct pfsync_bucket* pfsync_get_bucket(struct pfsync_softc *sc, struct pf_kstate *st) { int c = PF_IDHASH(st) % pfsync_buckets; return &sc->sc_buckets[c]; } static void pfsync_update_state(struct pf_kstate *st) { struct pfsync_softc *sc = V_pfsyncif; bool sync = false, ref = true; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PF_STATE_LOCK_ASSERT(st); PFSYNC_BUCKET_LOCK(b); if (st->state_flags & PFSTATE_ACK) pfsync_undefer_state_locked(st, 0); if (st->state_flags & PFSTATE_NOSYNC) { if (st->sync_state != PFSYNC_S_NONE) pfsync_q_del(st, true, b); PFSYNC_BUCKET_UNLOCK(b); return; } if (b->b_len == PFSYNC_MINPKT) callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); switch (st->sync_state) { case PFSYNC_S_UPD_C: case PFSYNC_S_UPD: case PFSYNC_S_INS: /* we're already handling it */ if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { st->sync_updates++; if (st->sync_updates >= sc->sc_maxupdates) sync = true; } break; case PFSYNC_S_IACK: pfsync_q_del(st, false, b); ref = false; /* FALLTHROUGH */ case PFSYNC_S_NONE: pfsync_q_ins(st, PFSYNC_S_UPD_C, ref); st->sync_updates = 0; break; default: panic("%s: unexpected sync state %d", __func__, st->sync_state); } if (sync || (time_uptime - st->pfsync_time) < 2) pfsync_push(b); PFSYNC_BUCKET_UNLOCK(b); } static void pfsync_request_update(u_int32_t creatorid, u_int64_t id) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bucket *b = &sc->sc_buckets[0]; struct pfsync_upd_req_item *item; size_t nlen = sizeof(struct pfsync_upd_req); PFSYNC_BUCKET_LOCK_ASSERT(b); /* * This code does a bit to prevent multiple update requests for the * same state being generated. It searches current subheader queue, * but it doesn't lookup into queue of already packed datagrams. */ TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry) if (item->ur_msg.id == id && item->ur_msg.creatorid == creatorid) return; item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT); if (item == NULL) return; /* XXX stats */ item->ur_msg.id = id; item->ur_msg.creatorid = creatorid; if (TAILQ_EMPTY(&b->b_upd_req_list)) nlen += sizeof(struct pfsync_subheader); if (b->b_len + nlen > sc->sc_ifp->if_mtu) { pfsync_sendout(0, 0); nlen = sizeof(struct pfsync_subheader) + sizeof(struct pfsync_upd_req); } TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry); b->b_len += nlen; pfsync_push(b); } static bool pfsync_update_state_req(struct pf_kstate *st) { struct pfsync_softc *sc = V_pfsyncif; bool ref = true, full = false; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PF_STATE_LOCK_ASSERT(st); PFSYNC_BUCKET_LOCK(b); if (st->state_flags & PFSTATE_NOSYNC) { if (st->sync_state != PFSYNC_S_NONE) pfsync_q_del(st, true, b); PFSYNC_BUCKET_UNLOCK(b); return (full); } switch (st->sync_state) { case PFSYNC_S_UPD_C: case PFSYNC_S_IACK: pfsync_q_del(st, false, b); ref = false; /* FALLTHROUGH */ case PFSYNC_S_NONE: pfsync_q_ins(st, PFSYNC_S_UPD, ref); pfsync_push(b); break; case PFSYNC_S_INS: case PFSYNC_S_UPD: case PFSYNC_S_DEL_C: /* we're already handling it */ break; default: panic("%s: unexpected sync state %d", __func__, st->sync_state); } if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(union pfsync_state_union)) full = true; PFSYNC_BUCKET_UNLOCK(b); return (full); } static void pfsync_delete_state(struct pf_kstate *st) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); bool ref = true; PFSYNC_BUCKET_LOCK(b); if (st->state_flags & PFSTATE_ACK) pfsync_undefer_state_locked(st, 1); if (st->state_flags & PFSTATE_NOSYNC) { if (st->sync_state != PFSYNC_S_NONE) pfsync_q_del(st, true, b); PFSYNC_BUCKET_UNLOCK(b); return; } if (b->b_len == PFSYNC_MINPKT) callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); switch (st->sync_state) { case PFSYNC_S_INS: /* We never got to tell the world so just forget about it. */ pfsync_q_del(st, true, b); break; case PFSYNC_S_UPD_C: case PFSYNC_S_UPD: case PFSYNC_S_IACK: pfsync_q_del(st, false, b); ref = false; /* FALLTHROUGH */ case PFSYNC_S_NONE: pfsync_q_ins(st, PFSYNC_S_DEL_C, ref); break; default: panic("%s: unexpected sync state %d", __func__, st->sync_state); } PFSYNC_BUCKET_UNLOCK(b); } static void pfsync_clear_states(u_int32_t creatorid, const char *ifname) { struct { struct pfsync_subheader subh; struct pfsync_clr clr; } __packed r; bzero(&r, sizeof(r)); r.subh.action = PFSYNC_ACT_CLR; r.subh.count = htons(1); V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++; strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); r.clr.creatorid = creatorid; pfsync_send_plus(&r, sizeof(r)); } static enum pfsync_q_id pfsync_sstate_to_qid(u_int8_t sync_state) { struct pfsync_softc *sc = V_pfsyncif; switch (sync_state) { case PFSYNC_S_INS: switch (sc->sc_version) { case PFSYNC_MSG_VERSION_1301: return PFSYNC_Q_INS_1301; case PFSYNC_MSG_VERSION_1400: return PFSYNC_Q_INS_1400; } break; case PFSYNC_S_IACK: return PFSYNC_Q_IACK; case PFSYNC_S_UPD: switch (sc->sc_version) { case PFSYNC_MSG_VERSION_1301: return PFSYNC_Q_UPD_1301; case PFSYNC_MSG_VERSION_1400: return PFSYNC_Q_UPD_1400; } break; case PFSYNC_S_UPD_C: return PFSYNC_Q_UPD_C; case PFSYNC_S_DEL_C: return PFSYNC_Q_DEL_C; default: panic("%s: Unsupported st->sync_state 0x%02x", __func__, sync_state); } panic("%s: Unsupported pfsync_msg_version %d", __func__, sc->sc_version); } static void pfsync_q_ins(struct pf_kstate *st, int sync_state, bool ref) { enum pfsync_q_id q = pfsync_sstate_to_qid(sync_state); struct pfsync_softc *sc = V_pfsyncif; size_t nlen = pfsync_qs[q].len; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK_ASSERT(b); KASSERT(st->sync_state == PFSYNC_S_NONE, ("%s: st->sync_state %u", __func__, st->sync_state)); KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu", b->b_len)); if (TAILQ_EMPTY(&b->b_qs[q])) nlen += sizeof(struct pfsync_subheader); if (b->b_len + nlen > sc->sc_ifp->if_mtu) { pfsync_sendout(1, b->b_id); nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; } b->b_len += nlen; st->sync_state = pfsync_qid_sstate[q]; TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list); if (ref) pf_ref_state(st); } static void pfsync_q_del(struct pf_kstate *st, bool unref, struct pfsync_bucket *b) { enum pfsync_q_id q; PFSYNC_BUCKET_LOCK_ASSERT(b); KASSERT(st->sync_state != PFSYNC_S_NONE, ("%s: st->sync_state != PFSYNC_S_NONE", __func__)); q = pfsync_sstate_to_qid(st->sync_state); b->b_len -= pfsync_qs[q].len; TAILQ_REMOVE(&b->b_qs[q], st, sync_list); st->sync_state = PFSYNC_S_NONE; if (unref) pf_release_state(st); if (TAILQ_EMPTY(&b->b_qs[q])) b->b_len -= sizeof(struct pfsync_subheader); } static void pfsync_bulk_start(void) { struct pfsync_softc *sc = V_pfsyncif; if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: received bulk update request\n"); PFSYNC_BLOCK(sc); sc->sc_ureq_received = time_uptime; sc->sc_bulk_hashid = 0; sc->sc_bulk_stateid = 0; pfsync_bulk_status(PFSYNC_BUS_START); callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc); PFSYNC_BUNLOCK(sc); } static void pfsync_bulk_update(void *arg) { struct pfsync_softc *sc = arg; struct pf_kstate *s; int i; PFSYNC_BLOCK_ASSERT(sc); CURVNET_SET(sc->sc_ifp->if_vnet); /* * Start with last state from previous invocation. * It may had gone, in this case start from the * hash slot. */ s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid); if (s != NULL) i = PF_IDHASH(s); else i = sc->sc_bulk_hashid; for (; i <= V_pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; if (s != NULL) PF_HASHROW_ASSERT(ih); else { PF_HASHROW_LOCK(ih); s = LIST_FIRST(&ih->states); } for (; s; s = LIST_NEXT(s, entry)) { if (s->sync_state == PFSYNC_S_NONE && s->timeout < PFTM_MAX && s->pfsync_time <= sc->sc_ureq_received) { if (pfsync_update_state_req(s)) { /* We've filled a packet. */ sc->sc_bulk_hashid = i; sc->sc_bulk_stateid = s->id; sc->sc_bulk_creatorid = s->creatorid; PF_HASHROW_UNLOCK(ih); callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc); goto full; } } } PF_HASHROW_UNLOCK(ih); } /* We're done. */ pfsync_bulk_status(PFSYNC_BUS_END); full: CURVNET_RESTORE(); } static void pfsync_bulk_status(u_int8_t status) { struct { struct pfsync_subheader subh; struct pfsync_bus bus; } __packed r; struct pfsync_softc *sc = V_pfsyncif; bzero(&r, sizeof(r)); r.subh.action = PFSYNC_ACT_BUS; r.subh.count = htons(1); V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++; r.bus.creatorid = V_pf_status.hostid; r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); r.bus.status = status; pfsync_send_plus(&r, sizeof(r)); } static void pfsync_bulk_fail(void *arg) { struct pfsync_softc *sc = arg; struct pfsync_bucket *b = &sc->sc_buckets[0]; CURVNET_SET(sc->sc_ifp->if_vnet); PFSYNC_BLOCK_ASSERT(sc); if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { /* Try again */ callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, V_pfsyncif); PFSYNC_BUCKET_LOCK(b); pfsync_request_update(0, 0); PFSYNC_BUCKET_UNLOCK(b); } else { /* Pretend like the transfer was ok. */ sc->sc_ureq_sent = 0; sc->sc_bulk_tries = 0; PFSYNC_LOCK(sc); if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync bulk fail"); sc->sc_flags |= PFSYNCF_OK; PFSYNC_UNLOCK(sc); if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: failed to receive bulk update\n"); } CURVNET_RESTORE(); } static void pfsync_send_plus(void *plus, size_t pluslen) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bucket *b = &sc->sc_buckets[0]; uint8_t *newplus; PFSYNC_BUCKET_LOCK(b); if (b->b_len + pluslen > sc->sc_ifp->if_mtu) pfsync_sendout(1, b->b_id); newplus = malloc(pluslen + b->b_pluslen, M_PFSYNC, M_NOWAIT); if (newplus == NULL) goto out; if (b->b_plus != NULL) { memcpy(newplus, b->b_plus, b->b_pluslen); free(b->b_plus, M_PFSYNC); } else { MPASS(b->b_pluslen == 0); } memcpy(newplus + b->b_pluslen, plus, pluslen); b->b_plus = newplus; b->b_pluslen += pluslen; b->b_len += pluslen; pfsync_sendout(1, b->b_id); out: PFSYNC_BUCKET_UNLOCK(b); } static void pfsync_timeout(void *arg) { struct pfsync_bucket *b = arg; CURVNET_SET(b->b_sc->sc_ifp->if_vnet); PFSYNC_BUCKET_LOCK(b); pfsync_push(b); PFSYNC_BUCKET_UNLOCK(b); CURVNET_RESTORE(); } static void pfsync_push(struct pfsync_bucket *b) { PFSYNC_BUCKET_LOCK_ASSERT(b); b->b_flags |= PFSYNCF_BUCKET_PUSH; swi_sched(V_pfsync_swi_cookie, 0); } static void pfsync_push_all(struct pfsync_softc *sc) { int c; struct pfsync_bucket *b; for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; PFSYNC_BUCKET_LOCK(b); pfsync_push(b); PFSYNC_BUCKET_UNLOCK(b); } } static void pfsync_tx(struct pfsync_softc *sc, struct mbuf *m) { struct ip *ip; int af, error = 0; ip = mtod(m, struct ip *); MPASS(ip->ip_v == IPVERSION || ip->ip_v == (IPV6_VERSION >> 4)); af = ip->ip_v == IPVERSION ? AF_INET : AF_INET6; /* * We distinguish between a deferral packet and our * own pfsync packet based on M_SKIP_FIREWALL * flag. This is XXX. */ switch (af) { #ifdef INET case AF_INET: if (m->m_flags & M_SKIP_FIREWALL) { error = ip_output(m, NULL, NULL, 0, NULL, NULL); } else { error = ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL); } break; #endif #ifdef INET6 case AF_INET6: if (m->m_flags & M_SKIP_FIREWALL) { error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); } else { error = ip6_output(m, NULL, NULL, 0, &sc->sc_im6o, NULL, NULL); } break; #endif } if (error == 0) V_pfsyncstats.pfsyncs_opackets++; else V_pfsyncstats.pfsyncs_oerrors++; } static void pfsyncintr(void *arg) { struct epoch_tracker et; struct pfsync_softc *sc = arg; struct pfsync_bucket *b; struct mbuf *m, *n; int c; NET_EPOCH_ENTER(et); CURVNET_SET(sc->sc_ifp->if_vnet); for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; PFSYNC_BUCKET_LOCK(b); if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) { pfsync_sendout(0, b->b_id); b->b_flags &= ~PFSYNCF_BUCKET_PUSH; } _IF_DEQUEUE_ALL(&b->b_snd, m); PFSYNC_BUCKET_UNLOCK(b); for (; m != NULL; m = n) { n = m->m_nextpkt; m->m_nextpkt = NULL; pfsync_tx(sc, m); } } CURVNET_RESTORE(); NET_EPOCH_EXIT(et); } static int pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, struct in_mfilter* imf, struct in6_mfilter* im6f) { #ifdef INET struct ip_moptions *imo = &sc->sc_imo; #endif #ifdef INET6 struct ip6_moptions *im6o = &sc->sc_im6o; struct sockaddr_in6 *syncpeer_sa6 = NULL; #endif if (!(ifp->if_flags & IFF_MULTICAST)) return (EADDRNOTAVAIL); switch (sc->sc_sync_peer.ss_family) { #ifdef INET case AF_INET: { int error; ip_mfilter_init(&imo->imo_head); imo->imo_multicast_vif = -1; if ((error = in_joingroup(ifp, &((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr, NULL, &imf->imf_inm)) != 0) return (error); ip_mfilter_insert(&imo->imo_head, imf); imo->imo_multicast_ifp = ifp; imo->imo_multicast_ttl = PFSYNC_DFLTTL; imo->imo_multicast_loop = 0; break; } #endif #ifdef INET6 case AF_INET6: { int error; syncpeer_sa6 = (struct sockaddr_in6 *)&sc->sc_sync_peer; if ((error = in6_setscope(&syncpeer_sa6->sin6_addr, ifp, NULL))) return (error); ip6_mfilter_init(&im6o->im6o_head); if ((error = in6_joingroup(ifp, &syncpeer_sa6->sin6_addr, NULL, &(im6f->im6f_in6m), 0)) != 0) return (error); ip6_mfilter_insert(&im6o->im6o_head, im6f); im6o->im6o_multicast_ifp = ifp; im6o->im6o_multicast_hlim = PFSYNC_DFLTTL; im6o->im6o_multicast_loop = 0; break; } #endif } return (0); } static void pfsync_multicast_cleanup(struct pfsync_softc *sc) { #ifdef INET struct ip_moptions *imo = &sc->sc_imo; struct in_mfilter *imf; while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) { ip_mfilter_remove(&imo->imo_head, imf); in_leavegroup(imf->imf_inm, NULL); ip_mfilter_free(imf); } imo->imo_multicast_ifp = NULL; #endif #ifdef INET6 struct ip6_moptions *im6o = &sc->sc_im6o; struct in6_mfilter *im6f; while ((im6f = ip6_mfilter_first(&im6o->im6o_head)) != NULL) { ip6_mfilter_remove(&im6o->im6o_head, im6f); in6_leavegroup(im6f->im6f_in6m, NULL); ip6_mfilter_free(im6f); } im6o->im6o_multicast_ifp = NULL; #endif } void pfsync_detach_ifnet(struct ifnet *ifp) { struct pfsync_softc *sc = V_pfsyncif; if (sc == NULL) return; PFSYNC_LOCK(sc); if (sc->sc_sync_if == ifp) { /* We don't need mutlicast cleanup here, because the interface * is going away. We do need to ensure we don't try to do * cleanup later. */ ip_mfilter_init(&sc->sc_imo.imo_head); sc->sc_imo.imo_multicast_ifp = NULL; sc->sc_im6o.im6o_multicast_ifp = NULL; sc->sc_sync_if = NULL; } PFSYNC_UNLOCK(sc); } static int pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *pfsyncr, struct pfsync_kstatus *status) { struct sockaddr_storage sa; status->maxupdates = pfsyncr->pfsyncr_maxupdates; status->flags = pfsyncr->pfsyncr_defer; strlcpy(status->syncdev, pfsyncr->pfsyncr_syncdev, IFNAMSIZ); memset(&sa, 0, sizeof(sa)); if (pfsyncr->pfsyncr_syncpeer.s_addr != 0) { struct sockaddr_in *in = (struct sockaddr_in *)&sa; in->sin_family = AF_INET; in->sin_len = sizeof(*in); in->sin_addr.s_addr = pfsyncr->pfsyncr_syncpeer.s_addr; } status->syncpeer = sa; return 0; } static int pfsync_kstatus_to_softc(struct pfsync_kstatus *status, struct pfsync_softc *sc) { struct ifnet *sifp; struct in_mfilter *imf = NULL; struct in6_mfilter *im6f = NULL; int error; int c; if ((status->maxupdates < 0) || (status->maxupdates > 255)) return (EINVAL); if (status->syncdev[0] == '\0') sifp = NULL; else if ((sifp = ifunit_ref(status->syncdev)) == NULL) return (EINVAL); switch (status->syncpeer.ss_family) { #ifdef INET case AF_UNSPEC: case AF_INET: { struct sockaddr_in *status_sin; status_sin = (struct sockaddr_in *)&(status->syncpeer); if (sifp != NULL) { if (status_sin->sin_addr.s_addr == 0 || status_sin->sin_addr.s_addr == htonl(INADDR_PFSYNC_GROUP)) { status_sin->sin_family = AF_INET; status_sin->sin_len = sizeof(*status_sin); status_sin->sin_addr.s_addr = htonl(INADDR_PFSYNC_GROUP); } if (IN_MULTICAST(ntohl(status_sin->sin_addr.s_addr))) { imf = ip_mfilter_alloc(M_WAITOK, 0, 0); } } break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *status_sin6; status_sin6 = (struct sockaddr_in6*)&(status->syncpeer); if (sifp != NULL) { if (IN6_IS_ADDR_UNSPECIFIED(&status_sin6->sin6_addr) || IN6_ARE_ADDR_EQUAL(&status_sin6->sin6_addr, &in6addr_linklocal_pfsync_group)) { status_sin6->sin6_family = AF_INET6; status_sin6->sin6_len = sizeof(*status_sin6); status_sin6->sin6_addr = in6addr_linklocal_pfsync_group; } if (IN6_IS_ADDR_MULTICAST(&status_sin6->sin6_addr)) { im6f = ip6_mfilter_alloc(M_WAITOK, 0, 0); } } break; } #endif } PFSYNC_LOCK(sc); switch (status->version) { case PFSYNC_MSG_VERSION_UNSPECIFIED: sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT; break; case PFSYNC_MSG_VERSION_1301: case PFSYNC_MSG_VERSION_1400: sc->sc_version = status->version; break; default: PFSYNC_UNLOCK(sc); return (EINVAL); } switch (status->syncpeer.ss_family) { case AF_INET: { struct sockaddr_in *status_sin = (struct sockaddr_in *)&(status->syncpeer); struct sockaddr_in *sc_sin = (struct sockaddr_in *)&sc->sc_sync_peer; sc_sin->sin_family = AF_INET; sc_sin->sin_len = sizeof(*sc_sin); if (status_sin->sin_addr.s_addr == 0) { sc_sin->sin_addr.s_addr = htonl(INADDR_PFSYNC_GROUP); } else { sc_sin->sin_addr.s_addr = status_sin->sin_addr.s_addr; } break; } case AF_INET6: { struct sockaddr_in6 *status_sin = (struct sockaddr_in6 *)&(status->syncpeer); struct sockaddr_in6 *sc_sin = (struct sockaddr_in6 *)&sc->sc_sync_peer; sc_sin->sin6_family = AF_INET6; sc_sin->sin6_len = sizeof(*sc_sin); if(IN6_IS_ADDR_UNSPECIFIED(&status_sin->sin6_addr)) { sc_sin->sin6_addr = in6addr_linklocal_pfsync_group; } else { sc_sin->sin6_addr = status_sin->sin6_addr; } break; } } sc->sc_maxupdates = status->maxupdates; if (status->flags & PFSYNCF_DEFER) { sc->sc_flags |= PFSYNCF_DEFER; V_pfsync_defer_ptr = pfsync_defer; } else { sc->sc_flags &= ~PFSYNCF_DEFER; V_pfsync_defer_ptr = NULL; } if (sifp == NULL) { if (sc->sc_sync_if) if_rele(sc->sc_sync_if); sc->sc_sync_if = NULL; pfsync_multicast_cleanup(sc); PFSYNC_UNLOCK(sc); return (0); } for (c = 0; c < pfsync_buckets; c++) { PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT && (sifp->if_mtu < sc->sc_ifp->if_mtu || (sc->sc_sync_if != NULL && sifp->if_mtu < sc->sc_sync_if->if_mtu) || sifp->if_mtu < MCLBYTES - sizeof(struct ip))) pfsync_sendout(1, c); PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); } pfsync_multicast_cleanup(sc); if (((sc->sc_sync_peer.ss_family == AF_INET) && IN_MULTICAST(ntohl(((struct sockaddr_in *) &sc->sc_sync_peer)->sin_addr.s_addr))) || ((sc->sc_sync_peer.ss_family == AF_INET6) && IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6*) &sc->sc_sync_peer)->sin6_addr))) { error = pfsync_multicast_setup(sc, sifp, imf, im6f); if (error) { if_rele(sifp); PFSYNC_UNLOCK(sc); #ifdef INET if (imf != NULL) ip_mfilter_free(imf); #endif #ifdef INET6 if (im6f != NULL) ip6_mfilter_free(im6f); #endif return (error); } } if (sc->sc_sync_if) if_rele(sc->sc_sync_if); sc->sc_sync_if = sifp; switch (sc->sc_sync_peer.ss_family) { #ifdef INET case AF_INET: { struct ip *ip; ip = &sc->sc_template.ipv4; bzero(ip, sizeof(*ip)); ip->ip_v = IPVERSION; ip->ip_hl = sizeof(sc->sc_template.ipv4) >> 2; ip->ip_tos = IPTOS_LOWDELAY; /* len and id are set later. */ ip->ip_off = htons(IP_DF); ip->ip_ttl = PFSYNC_DFLTTL; ip->ip_p = IPPROTO_PFSYNC; ip->ip_src.s_addr = INADDR_ANY; ip->ip_dst = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr; break; } #endif #ifdef INET6 case AF_INET6: { struct ip6_hdr *ip6; ip6 = &sc->sc_template.ipv6; bzero(ip6, sizeof(*ip6)); ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_hlim = PFSYNC_DFLTTL; ip6->ip6_nxt = IPPROTO_PFSYNC; ip6->ip6_dst = ((struct sockaddr_in6 *)&sc->sc_sync_peer)->sin6_addr; struct epoch_tracker et; NET_EPOCH_ENTER(et); in6_selectsrc_addr(if_getfib(sc->sc_sync_if), &ip6->ip6_dst, 0, sc->sc_sync_if, &ip6->ip6_src, NULL); NET_EPOCH_EXIT(et); break; } #endif } /* Request a full state table update. */ if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) (*carp_demote_adj_p)(V_pfsync_carp_adj, "pfsync bulk start"); sc->sc_flags &= ~PFSYNCF_OK; if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: requesting bulk update\n"); PFSYNC_UNLOCK(sc); PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); pfsync_request_update(0, 0); PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); PFSYNC_BLOCK(sc); sc->sc_ureq_sent = time_uptime; callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, sc); PFSYNC_BUNLOCK(sc); return (0); } static void pfsync_pointers_init(void) { PF_RULES_WLOCK(); V_pfsync_state_import_ptr = pfsync_state_import; V_pfsync_insert_state_ptr = pfsync_insert_state; V_pfsync_update_state_ptr = pfsync_update_state; V_pfsync_delete_state_ptr = pfsync_delete_state; V_pfsync_clear_states_ptr = pfsync_clear_states; V_pfsync_defer_ptr = pfsync_defer; PF_RULES_WUNLOCK(); } static void pfsync_pointers_uninit(void) { PF_RULES_WLOCK(); V_pfsync_state_import_ptr = NULL; V_pfsync_insert_state_ptr = NULL; V_pfsync_update_state_ptr = NULL; V_pfsync_delete_state_ptr = NULL; V_pfsync_clear_states_ptr = NULL; V_pfsync_defer_ptr = NULL; PF_RULES_WUNLOCK(); } static void vnet_pfsync_init(const void *unused __unused) { int error; V_pfsync_cloner = if_clone_simple(pfsyncname, pfsync_clone_create, pfsync_clone_destroy, 1); error = swi_add(&V_pfsync_swi_ie, pfsyncname, pfsyncintr, V_pfsyncif, SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie); if (error) { if_clone_detach(V_pfsync_cloner); log(LOG_INFO, "swi_add() failed in %s\n", __func__); } pfsync_pointers_init(); } VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY, vnet_pfsync_init, NULL); static void vnet_pfsync_uninit(const void *unused __unused) { int ret __diagused; pfsync_pointers_uninit(); if_clone_detach(V_pfsync_cloner); ret = swi_remove(V_pfsync_swi_cookie); MPASS(ret == 0); ret = intr_event_destroy(V_pfsync_swi_ie); MPASS(ret == 0); } VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH, vnet_pfsync_uninit, NULL); static int pfsync_init(void) { int error; pfsync_detach_ifnet_ptr = pfsync_detach_ifnet; #ifdef INET error = ipproto_register(IPPROTO_PFSYNC, pfsync_input, NULL); if (error) return (error); #endif #ifdef INET6 error = ip6proto_register(IPPROTO_PFSYNC, pfsync6_input, NULL); if (error) { ipproto_unregister(IPPROTO_PFSYNC); return (error); } #endif return (0); } static void pfsync_uninit(void) { pfsync_detach_ifnet_ptr = NULL; #ifdef INET ipproto_unregister(IPPROTO_PFSYNC); #endif #ifdef INET6 ip6proto_unregister(IPPROTO_PFSYNC); #endif } static int pfsync_modevent(module_t mod, int type, void *data) { int error = 0; switch (type) { case MOD_LOAD: error = pfsync_init(); break; case MOD_UNLOAD: pfsync_uninit(); break; default: error = EINVAL; break; } return (error); } static moduledata_t pfsync_mod = { pfsyncname, pfsync_modevent, 0 }; #define PFSYNC_MODVER 1 /* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */ DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY); MODULE_VERSION(pfsync, PFSYNC_MODVER); MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER); diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c index 349b10c346a7..e0b664772544 100644 --- a/sys/netpfil/pf/pf.c +++ b/sys/netpfil/pf/pf.c @@ -1,10674 +1,10674 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * Copyright (c) 2002 - 2008 Henning Brauer * Copyright (c) 2012 Gleb Smirnoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $ */ #include #include "opt_bpf.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_pf.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* dummynet */ #include #include #include #include #include #ifdef INET6 #include #include #include #include #include #include #include #endif /* INET6 */ #include #include #include #include #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x SDT_PROVIDER_DEFINE(pf); SDT_PROBE_DEFINE4(pf, ip, test, done, "int", "int", "struct pf_krule *", "struct pf_kstate *"); SDT_PROBE_DEFINE5(pf, ip, state, lookup, "struct pfi_kkif *", "struct pf_state_key_cmp *", "int", "struct pf_pdesc *", "struct pf_kstate *"); SDT_PROBE_DEFINE2(pf, ip, , bound_iface, "struct pf_kstate *", "struct pfi_kkif *"); SDT_PROBE_DEFINE4(pf, ip, route_to, entry, "struct mbuf *", "struct pf_pdesc *", "struct pf_kstate *", "struct ifnet *"); SDT_PROBE_DEFINE1(pf, ip, route_to, drop, "int"); SDT_PROBE_DEFINE2(pf, ip, route_to, output, "struct ifnet *", "int"); SDT_PROBE_DEFINE4(pf, ip6, route_to, entry, "struct mbuf *", "struct pf_pdesc *", "struct pf_kstate *", "struct ifnet *"); SDT_PROBE_DEFINE1(pf, ip6, route_to, drop, "int"); SDT_PROBE_DEFINE2(pf, ip6, route_to, output, "struct ifnet *", "int"); SDT_PROBE_DEFINE4(pf, sctp, multihome, test, "struct pfi_kkif *", "struct pf_krule *", "struct mbuf *", "int"); SDT_PROBE_DEFINE2(pf, sctp, multihome, add, "uint32_t", "struct pf_sctp_source *"); SDT_PROBE_DEFINE3(pf, sctp, multihome, remove, "uint32_t", "struct pf_kstate *", "struct pf_sctp_source *"); SDT_PROBE_DEFINE4(pf, sctp, multihome_scan, entry, "int", "int", "struct pf_pdesc *", "int"); SDT_PROBE_DEFINE2(pf, sctp, multihome_scan, param, "uint16_t", "uint16_t"); SDT_PROBE_DEFINE2(pf, sctp, multihome_scan, ipv4, "struct in_addr *", "int"); SDT_PROBE_DEFINE2(pf, sctp, multihome_scan, ipv6, "struct in_addr6 *", "int"); SDT_PROBE_DEFINE3(pf, eth, test_rule, entry, "int", "struct ifnet *", "struct mbuf *"); SDT_PROBE_DEFINE2(pf, eth, test_rule, test, "int", "struct pf_keth_rule *"); SDT_PROBE_DEFINE3(pf, eth, test_rule, mismatch, "int", "struct pf_keth_rule *", "char *"); SDT_PROBE_DEFINE2(pf, eth, test_rule, match, "int", "struct pf_keth_rule *"); SDT_PROBE_DEFINE2(pf, eth, test_rule, final_match, "int", "struct pf_keth_rule *"); SDT_PROBE_DEFINE2(pf, purge, state, rowcount, "int", "size_t"); /* * Global variables */ /* state tables */ VNET_DEFINE(struct pf_altqqueue, pf_altqs[4]); VNET_DEFINE(struct pf_kpalist, pf_pabuf[3]); VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active); VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_active); VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive); VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_inactive); VNET_DEFINE(struct pf_kstatus, pf_status); VNET_DEFINE(u_int32_t, ticket_altqs_active); VNET_DEFINE(u_int32_t, ticket_altqs_inactive); VNET_DEFINE(int, altqs_inactive_open); VNET_DEFINE(u_int32_t, ticket_pabuf); VNET_DEFINE(SHA512_CTX, pf_tcp_secret_ctx); #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx) VNET_DEFINE(u_char, pf_tcp_secret[16]); #define V_pf_tcp_secret VNET(pf_tcp_secret) VNET_DEFINE(int, pf_tcp_secret_init); #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init) VNET_DEFINE(int, pf_tcp_iss_off); #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off) VNET_DECLARE(int, pf_vnet_active); #define V_pf_vnet_active VNET(pf_vnet_active) VNET_DEFINE_STATIC(uint32_t, pf_purge_idx); #define V_pf_purge_idx VNET(pf_purge_idx) #ifdef PF_WANT_32_TO_64_COUNTER VNET_DEFINE_STATIC(uint32_t, pf_counter_periodic_iter); #define V_pf_counter_periodic_iter VNET(pf_counter_periodic_iter) VNET_DEFINE(struct allrulelist_head, pf_allrulelist); VNET_DEFINE(size_t, pf_allrulecount); VNET_DEFINE(struct pf_krule *, pf_rulemarker); #endif struct pf_sctp_endpoint; RB_HEAD(pf_sctp_endpoints, pf_sctp_endpoint); struct pf_sctp_source { sa_family_t af; struct pf_addr addr; TAILQ_ENTRY(pf_sctp_source) entry; }; TAILQ_HEAD(pf_sctp_sources, pf_sctp_source); struct pf_sctp_endpoint { uint32_t v_tag; struct pf_sctp_sources sources; RB_ENTRY(pf_sctp_endpoint) entry; }; static int pf_sctp_endpoint_compare(struct pf_sctp_endpoint *a, struct pf_sctp_endpoint *b) { return (a->v_tag - b->v_tag); } RB_PROTOTYPE(pf_sctp_endpoints, pf_sctp_endpoint, entry, pf_sctp_endpoint_compare); RB_GENERATE(pf_sctp_endpoints, pf_sctp_endpoint, entry, pf_sctp_endpoint_compare); VNET_DEFINE_STATIC(struct pf_sctp_endpoints, pf_sctp_endpoints); #define V_pf_sctp_endpoints VNET(pf_sctp_endpoints) static struct mtx_padalign pf_sctp_endpoints_mtx; MTX_SYSINIT(pf_sctp_endpoints_mtx, &pf_sctp_endpoints_mtx, "SCTP endpoints", MTX_DEF); #define PF_SCTP_ENDPOINTS_LOCK() mtx_lock(&pf_sctp_endpoints_mtx) #define PF_SCTP_ENDPOINTS_UNLOCK() mtx_unlock(&pf_sctp_endpoints_mtx) /* * Queue for pf_intr() sends. */ static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations"); struct pf_send_entry { STAILQ_ENTRY(pf_send_entry) pfse_next; struct mbuf *pfse_m; enum { PFSE_IP, PFSE_IP6, PFSE_ICMP, PFSE_ICMP6, } pfse_type; struct { int type; int code; int mtu; } icmpopts; }; STAILQ_HEAD(pf_send_head, pf_send_entry); VNET_DEFINE_STATIC(struct pf_send_head, pf_sendqueue); #define V_pf_sendqueue VNET(pf_sendqueue) static struct mtx_padalign pf_sendqueue_mtx; MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF); #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx) #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx) /* * Queue for pf_overload_task() tasks. */ struct pf_overload_entry { SLIST_ENTRY(pf_overload_entry) next; struct pf_addr addr; sa_family_t af; uint8_t dir; struct pf_krule *rule; }; SLIST_HEAD(pf_overload_head, pf_overload_entry); VNET_DEFINE_STATIC(struct pf_overload_head, pf_overloadqueue); #define V_pf_overloadqueue VNET(pf_overloadqueue) VNET_DEFINE_STATIC(struct task, pf_overloadtask); #define V_pf_overloadtask VNET(pf_overloadtask) static struct mtx_padalign pf_overloadqueue_mtx; MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx, "pf overload/flush queue", MTX_DEF); #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx) #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx) VNET_DEFINE(struct pf_krulequeue, pf_unlinked_rules); struct mtx_padalign pf_unlnkdrules_mtx; MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules", MTX_DEF); struct sx pf_config_lock; SX_SYSINIT(pf_config_lock, &pf_config_lock, "pf config"); struct mtx_padalign pf_table_stats_lock; MTX_SYSINIT(pf_table_stats_lock, &pf_table_stats_lock, "pf table stats", MTX_DEF); VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z); #define V_pf_sources_z VNET(pf_sources_z) uma_zone_t pf_mtag_z; VNET_DEFINE(uma_zone_t, pf_state_z); VNET_DEFINE(uma_zone_t, pf_state_key_z); VNET_DEFINE(uma_zone_t, pf_udp_mapping_z); VNET_DEFINE(struct unrhdr64, pf_stateid); static void pf_src_tree_remove_state(struct pf_kstate *); static void pf_init_threshold(struct pf_threshold *, u_int32_t, u_int32_t); static void pf_add_threshold(struct pf_threshold *); static int pf_check_threshold(struct pf_threshold *); static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *, u_int16_t *, u_int16_t *, struct pf_addr *, u_int16_t, u_int8_t, sa_family_t, sa_family_t); static int pf_modulate_sack(struct pf_pdesc *, struct tcphdr *, struct pf_state_peer *); int pf_icmp_mapping(struct pf_pdesc *, u_int8_t, int *, u_int16_t *, u_int16_t *); static void pf_change_icmp(struct pf_addr *, u_int16_t *, struct pf_addr *, struct pf_addr *, u_int16_t, u_int16_t *, u_int16_t *, u_int16_t *, u_int16_t *, u_int8_t, sa_family_t); int pf_change_icmp_af(struct mbuf *, int, struct pf_pdesc *, struct pf_pdesc *, struct pf_addr *, struct pf_addr *, sa_family_t, sa_family_t); int pf_translate_icmp_af(int, void *); static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, sa_family_t, struct pf_krule *, int); static void pf_detach_state(struct pf_kstate *); static int pf_state_key_attach(struct pf_state_key *, struct pf_state_key *, struct pf_kstate *); static void pf_state_key_detach(struct pf_kstate *, int); static int pf_state_key_ctor(void *, int, void *, int); static u_int32_t pf_tcp_iss(struct pf_pdesc *); static __inline void pf_dummynet_flag_remove(struct mbuf *m, struct pf_mtag *pf_mtag); static int pf_dummynet(struct pf_pdesc *, struct pf_kstate *, struct pf_krule *, struct mbuf **); static int pf_dummynet_route(struct pf_pdesc *, struct pf_kstate *, struct pf_krule *, struct ifnet *, const struct sockaddr *, struct mbuf **); static int pf_test_eth_rule(int, struct pfi_kkif *, struct mbuf **); static int pf_test_rule(struct pf_krule **, struct pf_kstate **, struct pf_pdesc *, struct pf_krule **, struct pf_kruleset **, struct inpcb *); static int pf_create_state(struct pf_krule *, struct pf_krule *, struct pf_krule *, struct pf_pdesc *, struct pf_state_key *, struct pf_state_key *, int *, struct pf_kstate **, int, u_int16_t, u_int16_t, struct pf_krule_slist *, struct pf_udp_mapping *); static int pf_state_key_addr_setup(struct pf_pdesc *, struct pf_state_key_cmp *, int); static int pf_tcp_track_full(struct pf_kstate **, struct pf_pdesc *, u_short *, int *, struct pf_state_peer *, struct pf_state_peer *, u_int8_t, u_int8_t); static int pf_tcp_track_sloppy(struct pf_kstate **, struct pf_pdesc *, u_short *, struct pf_state_peer *, struct pf_state_peer *, u_int8_t, u_int8_t); static int pf_test_state(struct pf_kstate **, struct pf_pdesc *, u_short *); int pf_icmp_state_lookup(struct pf_state_key_cmp *, struct pf_pdesc *, struct pf_kstate **, u_int16_t, u_int16_t, int, int *, int, int); static int pf_test_state_icmp(struct pf_kstate **, struct pf_pdesc *, u_short *); static int pf_sctp_track(struct pf_kstate *, struct pf_pdesc *, u_short *); static void pf_sctp_multihome_detach_addr(const struct pf_kstate *); static void pf_sctp_multihome_delayed(struct pf_pdesc *, struct pfi_kkif *, struct pf_kstate *, int); static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, int, u_int16_t); static int pf_check_proto_cksum(struct mbuf *, int, int, u_int8_t, sa_family_t); static int pf_walk_option6(struct pf_pdesc *, struct ip6_hdr *, int, int, u_short *); static int pf_walk_header6(struct pf_pdesc *, struct ip6_hdr *, u_short *); static void pf_print_state_parts(struct pf_kstate *, struct pf_state_key *, struct pf_state_key *); static void pf_patch_8(struct mbuf *, u_int16_t *, u_int8_t *, u_int8_t, bool, u_int8_t); static struct pf_kstate *pf_find_state(struct pfi_kkif *, const struct pf_state_key_cmp *, u_int); static bool pf_src_connlimit(struct pf_kstate *); static int pf_match_rcvif(struct mbuf *, struct pf_krule *); static void pf_counters_inc(int, struct pf_pdesc *, struct pf_kstate *, struct pf_krule *, struct pf_krule *); static void pf_log_matches(struct pf_pdesc *, struct pf_krule *, struct pf_krule *, struct pf_kruleset *, struct pf_krule_slist *); static void pf_overload_task(void *v, int pending); static u_short pf_insert_src_node(struct pf_ksrc_node *[PF_SN_MAX], struct pf_srchash *[PF_SN_MAX], struct pf_krule *, struct pf_addr *, sa_family_t, struct pf_addr *, struct pfi_kkif *, pf_sn_types_t); static u_int pf_purge_expired_states(u_int, int); static void pf_purge_unlinked_rules(void); static int pf_mtag_uminit(void *, int, int); static void pf_mtag_free(struct m_tag *); static void pf_packet_rework_nat(struct mbuf *, struct pf_pdesc *, int, struct pf_state_key *); #ifdef INET static void pf_route(struct mbuf **, struct pf_krule *, struct ifnet *, struct pf_kstate *, struct pf_pdesc *, struct inpcb *); #endif /* INET */ #ifdef INET6 static void pf_change_a6(struct pf_addr *, u_int16_t *, struct pf_addr *, u_int8_t); static void pf_route6(struct mbuf **, struct pf_krule *, struct ifnet *, struct pf_kstate *, struct pf_pdesc *, struct inpcb *); #endif /* INET6 */ static __inline void pf_set_protostate(struct pf_kstate *, int, u_int8_t); int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len); extern int pf_end_threads; extern struct proc *pf_purge_proc; VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]); #define PACKET_UNDO_NAT(_m, _pd, _off, _s) \ do { \ struct pf_state_key *nk; \ if ((pd->dir) == PF_OUT) \ nk = (_s)->key[PF_SK_STACK]; \ else \ nk = (_s)->key[PF_SK_WIRE]; \ pf_packet_rework_nat(_m, _pd, _off, nk); \ } while (0) #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \ (pd)->pf_mtag->flags & PF_MTAG_FLAG_PACKET_LOOPED) #define STATE_LOOKUP(k, s, pd) \ do { \ (s) = pf_find_state((pd->kif), (k), (pd->dir)); \ SDT_PROBE5(pf, ip, state, lookup, pd->kif, k, (pd->dir), pd, (s)); \ if ((s) == NULL) \ return (PF_DROP); \ if (PACKET_LOOPED(pd)) \ return (PF_PASS); \ } while (0) static struct pfi_kkif * BOUND_IFACE(struct pf_kstate *st, struct pf_pdesc *pd) { struct pfi_kkif *k = pd->kif; SDT_PROBE2(pf, ip, , bound_iface, st, k); /* Floating unless otherwise specified. */ if (! (st->rule->rule_flag & PFRULE_IFBOUND)) return (V_pfi_all); /* * Initially set to all, because we don't know what interface we'll be * sending this out when we create the state. */ if (st->rule->rt == PF_REPLYTO || (pd->af != pd->naf && st->direction == PF_IN)) return (V_pfi_all); /* * If this state is created based on another state (e.g. SCTP * multihome) always set it floating initially. We can't know for sure * what interface the actual traffic for this state will come in on. */ if (pd->related_rule) return (V_pfi_all); /* Don't overrule the interface for states created on incoming packets. */ if (st->direction == PF_IN) return (k); /* No route-to, so don't overrule. */ if (st->act.rt != PF_ROUTETO) return (k); /* Bind to the route-to interface. */ return (st->act.rt_kif); } #define STATE_INC_COUNTERS(s) \ do { \ struct pf_krule_item *mrm; \ counter_u64_add(s->rule->states_cur, 1); \ counter_u64_add(s->rule->states_tot, 1); \ if (s->anchor != NULL) { \ counter_u64_add(s->anchor->states_cur, 1); \ counter_u64_add(s->anchor->states_tot, 1); \ } \ if (s->nat_rule != NULL) { \ counter_u64_add(s->nat_rule->states_cur, 1);\ counter_u64_add(s->nat_rule->states_tot, 1);\ } \ SLIST_FOREACH(mrm, &s->match_rules, entry) { \ counter_u64_add(mrm->r->states_cur, 1); \ counter_u64_add(mrm->r->states_tot, 1); \ } \ } while (0) #define STATE_DEC_COUNTERS(s) \ do { \ struct pf_krule_item *mrm; \ if (s->nat_rule != NULL) \ counter_u64_add(s->nat_rule->states_cur, -1);\ if (s->anchor != NULL) \ counter_u64_add(s->anchor->states_cur, -1); \ counter_u64_add(s->rule->states_cur, -1); \ SLIST_FOREACH(mrm, &s->match_rules, entry) \ counter_u64_add(mrm->r->states_cur, -1); \ } while (0) MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures"); MALLOC_DEFINE(M_PF_RULE_ITEM, "pf_krule_item", "pf(4) rule items"); VNET_DEFINE(struct pf_keyhash *, pf_keyhash); VNET_DEFINE(struct pf_idhash *, pf_idhash); VNET_DEFINE(struct pf_srchash *, pf_srchash); VNET_DEFINE(struct pf_udpendpointhash *, pf_udpendpointhash); VNET_DEFINE(struct pf_udpendpointmapping *, pf_udpendpointmapping); SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "pf(4)"); VNET_DEFINE(u_long, pf_hashmask); VNET_DEFINE(u_long, pf_srchashmask); VNET_DEFINE(u_long, pf_udpendpointhashmask); VNET_DEFINE_STATIC(u_long, pf_hashsize); #define V_pf_hashsize VNET(pf_hashsize) VNET_DEFINE_STATIC(u_long, pf_srchashsize); #define V_pf_srchashsize VNET(pf_srchashsize) VNET_DEFINE_STATIC(u_long, pf_udpendpointhashsize); #define V_pf_udpendpointhashsize VNET(pf_udpendpointhashsize) u_long pf_ioctl_maxcount = 65535; SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable"); SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable"); SYSCTL_ULONG(_net_pf, OID_AUTO, udpendpoint_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(pf_udpendpointhashsize), 0, "Size of pf(4) endpoint hashtable"); SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN, &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call"); VNET_DEFINE(void *, pf_swi_cookie); VNET_DEFINE(struct intr_event *, pf_swi_ie); VNET_DEFINE(uint32_t, pf_hashseed); #define V_pf_hashseed VNET(pf_hashseed) static void pf_sctp_checksum(struct mbuf *m, int off) { uint32_t sum = 0; /* Zero out the checksum, to enable recalculation. */ m_copyback(m, off + offsetof(struct sctphdr, checksum), sizeof(sum), (caddr_t)&sum); sum = sctp_calculate_cksum(m, off); m_copyback(m, off + offsetof(struct sctphdr, checksum), sizeof(sum), (caddr_t)&sum); } int pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: if (a->addr32[0] > b->addr32[0]) return (1); if (a->addr32[0] < b->addr32[0]) return (-1); break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (a->addr32[3] > b->addr32[3]) return (1); if (a->addr32[3] < b->addr32[3]) return (-1); if (a->addr32[2] > b->addr32[2]) return (1); if (a->addr32[2] < b->addr32[2]) return (-1); if (a->addr32[1] > b->addr32[1]) return (1); if (a->addr32[1] < b->addr32[1]) return (-1); if (a->addr32[0] > b->addr32[0]) return (1); if (a->addr32[0] < b->addr32[0]) return (-1); break; #endif /* INET6 */ default: unhandled_af(af); } return (0); } static bool pf_is_loopback(sa_family_t af, struct pf_addr *addr) { switch (af) { #ifdef INET case AF_INET: return IN_LOOPBACK(ntohl(addr->v4.s_addr)); #endif case AF_INET6: return IN6_IS_ADDR_LOOPBACK(&addr->v6); default: unhandled_af(af); } } static void pf_packet_rework_nat(struct mbuf *m, struct pf_pdesc *pd, int off, struct pf_state_key *nk) { switch (pd->proto) { case IPPROTO_TCP: { struct tcphdr *th = &pd->hdr.tcp; if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) pf_change_ap(m, pd->src, &th->th_sport, pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx], nk->port[pd->sidx], 0, pd->af, pd->naf); if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) pf_change_ap(m, pd->dst, &th->th_dport, pd->ip_sum, &th->th_sum, &nk->addr[pd->didx], nk->port[pd->didx], 0, pd->af, pd->naf); m_copyback(m, off, sizeof(*th), (caddr_t)th); break; } case IPPROTO_UDP: { struct udphdr *uh = &pd->hdr.udp; if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum, &uh->uh_sum, &nk->addr[pd->sidx], nk->port[pd->sidx], 1, pd->af, pd->naf); if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum, &uh->uh_sum, &nk->addr[pd->didx], nk->port[pd->didx], 1, pd->af, pd->naf); m_copyback(m, off, sizeof(*uh), (caddr_t)uh); break; } case IPPROTO_SCTP: { struct sctphdr *sh = &pd->hdr.sctp; uint16_t checksum = 0; if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) { pf_change_ap(m, pd->src, &sh->src_port, pd->ip_sum, &checksum, &nk->addr[pd->sidx], nk->port[pd->sidx], 1, pd->af, pd->naf); } if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) { pf_change_ap(m, pd->dst, &sh->dest_port, pd->ip_sum, &checksum, &nk->addr[pd->didx], nk->port[pd->didx], 1, pd->af, pd->naf); } break; } case IPPROTO_ICMP: { struct icmp *ih = &pd->hdr.icmp; if (nk->port[pd->sidx] != ih->icmp_id) { pd->hdr.icmp.icmp_cksum = pf_cksum_fixup( ih->icmp_cksum, ih->icmp_id, nk->port[pd->sidx], 0); ih->icmp_id = nk->port[pd->sidx]; pd->sport = &ih->icmp_id; m_copyback(m, off, ICMP_MINLEN, (caddr_t)ih); } /* FALLTHROUGH */ } default: if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) { switch (pd->af) { case AF_INET: pf_change_a(&pd->src->v4.s_addr, pd->ip_sum, nk->addr[pd->sidx].v4.s_addr, 0); break; case AF_INET6: PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); break; default: unhandled_af(pd->af); } } if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) { switch (pd->af) { case AF_INET: pf_change_a(&pd->dst->v4.s_addr, pd->ip_sum, nk->addr[pd->didx].v4.s_addr, 0); break; case AF_INET6: PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); break; default: unhandled_af(pd->af); } } break; } } static __inline uint32_t pf_hashkey(const struct pf_state_key *sk) { uint32_t h; h = murmur3_32_hash32((const uint32_t *)sk, sizeof(struct pf_state_key_cmp)/sizeof(uint32_t), V_pf_hashseed); return (h & V_pf_hashmask); } __inline uint32_t pf_hashsrc(struct pf_addr *addr, sa_family_t af) { uint32_t h; switch (af) { case AF_INET: h = murmur3_32_hash32((uint32_t *)&addr->v4, sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed); break; case AF_INET6: h = murmur3_32_hash32((uint32_t *)&addr->v6, sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed); break; default: unhandled_af(af); } return (h & V_pf_srchashmask); } static inline uint32_t pf_hashudpendpoint(struct pf_udp_endpoint *endpoint) { uint32_t h; h = murmur3_32_hash32((uint32_t *)endpoint, sizeof(struct pf_udp_endpoint_cmp)/sizeof(uint32_t), V_pf_hashseed); return (h & V_pf_udpendpointhashmask); } #ifdef ALTQ static int pf_state_hash(struct pf_kstate *s) { u_int32_t hv = (intptr_t)s / sizeof(*s); hv ^= crc32(&s->src, sizeof(s->src)); hv ^= crc32(&s->dst, sizeof(s->dst)); if (hv == 0) hv = 1; return (hv); } #endif static __inline void pf_set_protostate(struct pf_kstate *s, int which, u_int8_t newstate) { if (which == PF_PEER_DST || which == PF_PEER_BOTH) s->dst.state = newstate; if (which == PF_PEER_DST) return; if (s->src.state == newstate) return; if (s->creatorid == V_pf_status.hostid && s->key[PF_SK_STACK] != NULL && s->key[PF_SK_STACK]->proto == IPPROTO_TCP && !(TCPS_HAVEESTABLISHED(s->src.state) || s->src.state == TCPS_CLOSED) && (TCPS_HAVEESTABLISHED(newstate) || newstate == TCPS_CLOSED)) atomic_add_32(&V_pf_status.states_halfopen, -1); s->src.state = newstate; } #ifdef INET6 void pf_addrcpy(struct pf_addr *dst, const struct pf_addr *src, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: memcpy(&dst->v4, &src->v4, sizeof(dst->v4)); break; #endif /* INET */ case AF_INET6: memcpy(&dst->v6, &src->v6, sizeof(dst->v6)); break; } } #endif /* INET6 */ static void pf_init_threshold(struct pf_threshold *threshold, u_int32_t limit, u_int32_t seconds) { threshold->limit = limit * PF_THRESHOLD_MULT; threshold->seconds = seconds; threshold->count = 0; threshold->last = time_uptime; } static void pf_add_threshold(struct pf_threshold *threshold) { u_int32_t t = time_uptime, diff = t - threshold->last; if (diff >= threshold->seconds) threshold->count = 0; else threshold->count -= threshold->count * diff / threshold->seconds; threshold->count += PF_THRESHOLD_MULT; threshold->last = t; } static int pf_check_threshold(struct pf_threshold *threshold) { return (threshold->count > threshold->limit); } static bool pf_src_connlimit(struct pf_kstate *state) { struct pf_overload_entry *pfoe; struct pf_ksrc_node *src_node = state->sns[PF_SN_LIMIT]; bool limited = false; PF_STATE_LOCK_ASSERT(state); PF_SRC_NODE_LOCK(src_node); src_node->conn++; state->src.tcp_est = 1; pf_add_threshold(&src_node->conn_rate); if (state->rule->max_src_conn && state->rule->max_src_conn < src_node->conn) { counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1); limited = true; } if (state->rule->max_src_conn_rate.limit && pf_check_threshold(&src_node->conn_rate)) { counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1); limited = true; } if (!limited) goto done; /* Kill this state. */ state->timeout = PFTM_PURGE; pf_set_protostate(state, PF_PEER_BOTH, TCPS_CLOSED); if (state->rule->overload_tbl == NULL) goto done; /* Schedule overloading and flushing task. */ pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT); if (pfoe == NULL) goto done; /* too bad :( */ bcopy(&src_node->addr, &pfoe->addr, sizeof(pfoe->addr)); pfoe->af = state->key[PF_SK_WIRE]->af; pfoe->rule = state->rule; pfoe->dir = state->direction; PF_OVERLOADQ_LOCK(); SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next); PF_OVERLOADQ_UNLOCK(); taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask); done: PF_SRC_NODE_UNLOCK(src_node); return (limited); } static void pf_overload_task(void *v, int pending) { struct pf_overload_head queue; struct pfr_addr p; struct pf_overload_entry *pfoe, *pfoe1; uint32_t killed = 0; CURVNET_SET((struct vnet *)v); PF_OVERLOADQ_LOCK(); queue = V_pf_overloadqueue; SLIST_INIT(&V_pf_overloadqueue); PF_OVERLOADQ_UNLOCK(); bzero(&p, sizeof(p)); SLIST_FOREACH(pfoe, &queue, next) { counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1); if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("%s: blocking address ", __func__); pf_print_host(&pfoe->addr, 0, pfoe->af); printf("\n"); } p.pfra_af = pfoe->af; switch (pfoe->af) { #ifdef INET case AF_INET: p.pfra_net = 32; p.pfra_ip4addr = pfoe->addr.v4; break; #endif #ifdef INET6 case AF_INET6: p.pfra_net = 128; p.pfra_ip6addr = pfoe->addr.v6; break; #endif default: unhandled_af(pfoe->af); } PF_RULES_WLOCK(); pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second); PF_RULES_WUNLOCK(); } /* * Remove those entries, that don't need flushing. */ SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1) if (pfoe->rule->flush == 0) { SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next); free(pfoe, M_PFTEMP); } else counter_u64_add( V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1); /* If nothing to flush, return. */ if (SLIST_EMPTY(&queue)) { CURVNET_RESTORE(); return; } for (int i = 0; i <= V_pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; struct pf_state_key *sk; struct pf_kstate *s; PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { sk = s->key[PF_SK_WIRE]; SLIST_FOREACH(pfoe, &queue, next) if (sk->af == pfoe->af && ((pfoe->rule->flush & PF_FLUSH_GLOBAL) || pfoe->rule == s->rule) && ((pfoe->dir == PF_OUT && PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) || (pfoe->dir == PF_IN && PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) { s->timeout = PFTM_PURGE; pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED); killed++; } } PF_HASHROW_UNLOCK(ih); } SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1) free(pfoe, M_PFTEMP); if (V_pf_status.debug >= PF_DEBUG_MISC) printf("%s: %u states killed", __func__, killed); CURVNET_RESTORE(); } /* * On node found always returns locked. On not found its configurable. */ struct pf_ksrc_node * pf_find_src_node(struct pf_addr *src, struct pf_krule *rule, sa_family_t af, struct pf_srchash **sh, pf_sn_types_t sn_type, bool returnlocked) { struct pf_ksrc_node *n; counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1); *sh = &V_pf_srchash[pf_hashsrc(src, af)]; PF_HASHROW_LOCK(*sh); LIST_FOREACH(n, &(*sh)->nodes, entry) if (n->rule == rule && n->af == af && n->type == sn_type && ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) || (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0))) break; if (n == NULL && !returnlocked) PF_HASHROW_UNLOCK(*sh); return (n); } bool pf_src_node_exists(struct pf_ksrc_node **sn, struct pf_srchash *sh) { struct pf_ksrc_node *cur; if ((*sn) == NULL) return (false); KASSERT(sh != NULL, ("%s: sh is NULL", __func__)); counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1); PF_HASHROW_LOCK(sh); LIST_FOREACH(cur, &(sh->nodes), entry) { if (cur == (*sn) && cur->expire != 1) /* Ignore nodes being killed */ return (true); } PF_HASHROW_UNLOCK(sh); (*sn) = NULL; return (false); } static void pf_free_src_node(struct pf_ksrc_node *sn) { for (int i = 0; i < 2; i++) { counter_u64_free(sn->bytes[i]); counter_u64_free(sn->packets[i]); } uma_zfree(V_pf_sources_z, sn); } static u_short pf_insert_src_node(struct pf_ksrc_node *sns[PF_SN_MAX], struct pf_srchash *snhs[PF_SN_MAX], struct pf_krule *rule, struct pf_addr *src, sa_family_t af, struct pf_addr *raddr, struct pfi_kkif *rkif, pf_sn_types_t sn_type) { u_short reason = 0; struct pf_krule *r_track = rule; struct pf_ksrc_node **sn = &(sns[sn_type]); struct pf_srchash **sh = &(snhs[sn_type]); KASSERT(sn_type != PF_SN_LIMIT || (raddr == NULL && rkif == NULL), ("%s: raddr and rkif must be NULL for PF_SN_LIMIT", __func__)); KASSERT(sn_type != PF_SN_LIMIT || (rule->rule_flag & PFRULE_SRCTRACK), ("%s: PF_SN_LIMIT only valid for rules with PFRULE_SRCTRACK", __func__)); /* * XXX: There could be a KASSERT for * sn_type == PF_SN_LIMIT || (pool->opts & PF_POOL_STICKYADDR) * but we'd need to pass pool *only* for this KASSERT. */ if ( (rule->rule_flag & PFRULE_SRCTRACK) && !(rule->rule_flag & PFRULE_RULESRCTRACK)) r_track = &V_pf_default_rule; /* * Request the sh to always be locked, as we might insert a new sn. */ if (*sn == NULL) *sn = pf_find_src_node(src, r_track, af, sh, sn_type, true); if (*sn == NULL) { PF_HASHROW_ASSERT(*sh); if (sn_type == PF_SN_LIMIT && rule->max_src_nodes && counter_u64_fetch(r_track->src_nodes[sn_type]) >= rule->max_src_nodes) { counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES], 1); reason = PFRES_SRCLIMIT; goto done; } (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO); if ((*sn) == NULL) { reason = PFRES_MEMORY; goto done; } for (int i = 0; i < 2; i++) { (*sn)->bytes[i] = counter_u64_alloc(M_NOWAIT); (*sn)->packets[i] = counter_u64_alloc(M_NOWAIT); if ((*sn)->bytes[i] == NULL || (*sn)->packets[i] == NULL) { pf_free_src_node(*sn); reason = PFRES_MEMORY; goto done; } } if (sn_type == PF_SN_LIMIT) pf_init_threshold(&(*sn)->conn_rate, rule->max_src_conn_rate.limit, rule->max_src_conn_rate.seconds); MPASS((*sn)->lock == NULL); (*sn)->lock = &(*sh)->lock; (*sn)->af = af; (*sn)->rule = r_track; PF_ACPY(&(*sn)->addr, src, af); if (raddr != NULL) PF_ACPY(&(*sn)->raddr, raddr, af); (*sn)->rkif = rkif; LIST_INSERT_HEAD(&(*sh)->nodes, *sn, entry); (*sn)->creation = time_uptime; (*sn)->ruletype = rule->action; (*sn)->type = sn_type; counter_u64_add(r_track->src_nodes[sn_type], 1); counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1); } else { if (sn_type == PF_SN_LIMIT && rule->max_src_states && (*sn)->states >= rule->max_src_states) { counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES], 1); reason = PFRES_SRCLIMIT; goto done; } } done: if (reason == 0) (*sn)->states++; else (*sn) = NULL; PF_HASHROW_UNLOCK(*sh); return (reason); } void pf_unlink_src_node(struct pf_ksrc_node *src) { PF_SRC_NODE_LOCK_ASSERT(src); LIST_REMOVE(src, entry); if (src->rule) counter_u64_add(src->rule->src_nodes[src->type], -1); } u_int pf_free_src_nodes(struct pf_ksrc_node_list *head) { struct pf_ksrc_node *sn, *tmp; u_int count = 0; LIST_FOREACH_SAFE(sn, head, entry, tmp) { pf_free_src_node(sn); count++; } counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count); return (count); } void pf_mtag_initialize(void) { pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) + sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL, UMA_ALIGN_PTR, 0); } /* Per-vnet data storage structures initialization. */ void pf_initialize(void) { struct pf_keyhash *kh; struct pf_idhash *ih; struct pf_srchash *sh; struct pf_udpendpointhash *uh; u_int i; if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize)) V_pf_hashsize = PF_HASHSIZ; if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize)) V_pf_srchashsize = PF_SRCHASHSIZ; if (V_pf_udpendpointhashsize == 0 || !powerof2(V_pf_udpendpointhashsize)) V_pf_udpendpointhashsize = PF_UDPENDHASHSIZ; V_pf_hashseed = arc4random(); /* States and state keys storage. */ V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_kstate), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z; uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT); uma_zone_set_warning(V_pf_state_z, "PF states limit reached"); V_pf_state_key_z = uma_zcreate("pf state keys", sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); V_pf_keyhash = mallocarray(V_pf_hashsize, sizeof(struct pf_keyhash), M_PFHASH, M_NOWAIT | M_ZERO); V_pf_idhash = mallocarray(V_pf_hashsize, sizeof(struct pf_idhash), M_PFHASH, M_NOWAIT | M_ZERO); if (V_pf_keyhash == NULL || V_pf_idhash == NULL) { printf("pf: Unable to allocate memory for " "state_hashsize %lu.\n", V_pf_hashsize); free(V_pf_keyhash, M_PFHASH); free(V_pf_idhash, M_PFHASH); V_pf_hashsize = PF_HASHSIZ; V_pf_keyhash = mallocarray(V_pf_hashsize, sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO); V_pf_idhash = mallocarray(V_pf_hashsize, sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO); } V_pf_hashmask = V_pf_hashsize - 1; for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; i++, kh++, ih++) { mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK); mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF); } /* Source nodes. */ V_pf_sources_z = uma_zcreate("pf source nodes", sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z; uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT); uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached"); V_pf_srchash = mallocarray(V_pf_srchashsize, sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO); if (V_pf_srchash == NULL) { printf("pf: Unable to allocate memory for " "source_hashsize %lu.\n", V_pf_srchashsize); V_pf_srchashsize = PF_SRCHASHSIZ; V_pf_srchash = mallocarray(V_pf_srchashsize, sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO); } V_pf_srchashmask = V_pf_srchashsize - 1; for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF); /* UDP endpoint mappings. */ V_pf_udp_mapping_z = uma_zcreate("pf UDP mappings", sizeof(struct pf_udp_mapping), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); V_pf_udpendpointhash = mallocarray(V_pf_udpendpointhashsize, sizeof(struct pf_udpendpointhash), M_PFHASH, M_NOWAIT | M_ZERO); if (V_pf_udpendpointhash == NULL) { printf("pf: Unable to allocate memory for " "udpendpoint_hashsize %lu.\n", V_pf_udpendpointhashsize); V_pf_udpendpointhashsize = PF_UDPENDHASHSIZ; V_pf_udpendpointhash = mallocarray(V_pf_udpendpointhashsize, sizeof(struct pf_udpendpointhash), M_PFHASH, M_WAITOK | M_ZERO); } V_pf_udpendpointhashmask = V_pf_udpendpointhashsize - 1; for (i = 0, uh = V_pf_udpendpointhash; i <= V_pf_udpendpointhashmask; i++, uh++) { mtx_init(&uh->lock, "pf_udpendpointhash", NULL, MTX_DEF | MTX_DUPOK); } /* ALTQ */ TAILQ_INIT(&V_pf_altqs[0]); TAILQ_INIT(&V_pf_altqs[1]); TAILQ_INIT(&V_pf_altqs[2]); TAILQ_INIT(&V_pf_altqs[3]); TAILQ_INIT(&V_pf_pabuf[0]); TAILQ_INIT(&V_pf_pabuf[1]); TAILQ_INIT(&V_pf_pabuf[2]); V_pf_altqs_active = &V_pf_altqs[0]; V_pf_altq_ifs_active = &V_pf_altqs[1]; V_pf_altqs_inactive = &V_pf_altqs[2]; V_pf_altq_ifs_inactive = &V_pf_altqs[3]; /* Send & overload+flush queues. */ STAILQ_INIT(&V_pf_sendqueue); SLIST_INIT(&V_pf_overloadqueue); TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet); /* Unlinked, but may be referenced rules. */ TAILQ_INIT(&V_pf_unlinked_rules); } void pf_mtag_cleanup(void) { uma_zdestroy(pf_mtag_z); } void pf_cleanup(void) { struct pf_keyhash *kh; struct pf_idhash *ih; struct pf_srchash *sh; struct pf_udpendpointhash *uh; struct pf_send_entry *pfse, *next; u_int i; for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; i++, kh++, ih++) { KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty", __func__)); KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty", __func__)); mtx_destroy(&kh->lock); mtx_destroy(&ih->lock); } free(V_pf_keyhash, M_PFHASH); free(V_pf_idhash, M_PFHASH); for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) { KASSERT(LIST_EMPTY(&sh->nodes), ("%s: source node hash not empty", __func__)); mtx_destroy(&sh->lock); } free(V_pf_srchash, M_PFHASH); for (i = 0, uh = V_pf_udpendpointhash; i <= V_pf_udpendpointhashmask; i++, uh++) { KASSERT(LIST_EMPTY(&uh->endpoints), ("%s: udp endpoint hash not empty", __func__)); mtx_destroy(&uh->lock); } free(V_pf_udpendpointhash, M_PFHASH); STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) { m_freem(pfse->pfse_m); free(pfse, M_PFTEMP); } MPASS(RB_EMPTY(&V_pf_sctp_endpoints)); uma_zdestroy(V_pf_sources_z); uma_zdestroy(V_pf_state_z); uma_zdestroy(V_pf_state_key_z); uma_zdestroy(V_pf_udp_mapping_z); } static int pf_mtag_uminit(void *mem, int size, int how) { struct m_tag *t; t = (struct m_tag *)mem; t->m_tag_cookie = MTAG_ABI_COMPAT; t->m_tag_id = PACKET_TAG_PF; t->m_tag_len = sizeof(struct pf_mtag); t->m_tag_free = pf_mtag_free; return (0); } static void pf_mtag_free(struct m_tag *t) { uma_zfree(pf_mtag_z, t); } struct pf_mtag * pf_get_mtag(struct mbuf *m) { struct m_tag *mtag; if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL) return ((struct pf_mtag *)(mtag + 1)); mtag = uma_zalloc(pf_mtag_z, M_NOWAIT); if (mtag == NULL) return (NULL); bzero(mtag + 1, sizeof(struct pf_mtag)); m_tag_prepend(m, mtag); return ((struct pf_mtag *)(mtag + 1)); } static int pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks, struct pf_kstate *s) { struct pf_keyhash *khs, *khw, *kh; struct pf_state_key *sk, *cur; struct pf_kstate *si, *olds = NULL; int idx; NET_EPOCH_ASSERT(); KASSERT(s->refs == 0, ("%s: state not pristine", __func__)); KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__)); KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__)); /* * We need to lock hash slots of both keys. To avoid deadlock * we always lock the slot with lower address first. Unlock order * isn't important. * * We also need to lock ID hash slot before dropping key * locks. On success we return with ID hash slot locked. */ if (skw == sks) { khs = khw = &V_pf_keyhash[pf_hashkey(skw)]; PF_HASHROW_LOCK(khs); } else { khs = &V_pf_keyhash[pf_hashkey(sks)]; khw = &V_pf_keyhash[pf_hashkey(skw)]; if (khs == khw) { PF_HASHROW_LOCK(khs); } else if (khs < khw) { PF_HASHROW_LOCK(khs); PF_HASHROW_LOCK(khw); } else { PF_HASHROW_LOCK(khw); PF_HASHROW_LOCK(khs); } } #define KEYS_UNLOCK() do { \ if (khs != khw) { \ PF_HASHROW_UNLOCK(khs); \ PF_HASHROW_UNLOCK(khw); \ } else \ PF_HASHROW_UNLOCK(khs); \ } while (0) /* * First run: start with wire key. */ sk = skw; kh = khw; idx = PF_SK_WIRE; MPASS(s->lock == NULL); s->lock = &V_pf_idhash[PF_IDHASH(s)].lock; keyattach: LIST_FOREACH(cur, &kh->keys, entry) if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0) break; if (cur != NULL) { /* Key exists. Check for same kif, if none, add to key. */ TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) { struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)]; PF_HASHROW_LOCK(ih); if (si->kif == s->kif && ((si->key[PF_SK_WIRE]->af == sk->af && si->direction == s->direction) || (si->key[PF_SK_WIRE]->af != si->key[PF_SK_STACK]->af && sk->af == si->key[PF_SK_STACK]->af && si->direction != s->direction))) { if (sk->proto == IPPROTO_TCP && si->src.state >= TCPS_FIN_WAIT_2 && si->dst.state >= TCPS_FIN_WAIT_2) { /* * New state matches an old >FIN_WAIT_2 * state. We can't drop key hash locks, * thus we can't unlink it properly. * * As a workaround we drop it into * TCPS_CLOSED state, schedule purge * ASAP and push it into the very end * of the slot TAILQ, so that it won't * conflict with our new state. */ pf_set_protostate(si, PF_PEER_BOTH, TCPS_CLOSED); si->timeout = PFTM_PURGE; olds = si; } else { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: %s key attach " "failed on %s: ", (idx == PF_SK_WIRE) ? "wire" : "stack", s->kif->pfik_name); pf_print_state_parts(s, (idx == PF_SK_WIRE) ? sk : NULL, (idx == PF_SK_STACK) ? sk : NULL); printf(", existing: "); pf_print_state_parts(si, (idx == PF_SK_WIRE) ? sk : NULL, (idx == PF_SK_STACK) ? sk : NULL); printf("\n"); } s->timeout = PFTM_UNLINKED; PF_HASHROW_UNLOCK(ih); KEYS_UNLOCK(); if (idx == PF_SK_WIRE) { uma_zfree(V_pf_state_key_z, skw); if (skw != sks) uma_zfree(V_pf_state_key_z, sks); } else { pf_detach_state(s); } return (EEXIST); /* collision! */ } } PF_HASHROW_UNLOCK(ih); } uma_zfree(V_pf_state_key_z, sk); s->key[idx] = cur; } else { LIST_INSERT_HEAD(&kh->keys, sk, entry); s->key[idx] = sk; } stateattach: /* List is sorted, if-bound states before floating. */ if (s->kif == V_pfi_all) TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]); else TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]); if (olds) { TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]); TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds, key_list[idx]); olds = NULL; } /* * Attach done. See how should we (or should not?) * attach a second key. */ if (sks == skw) { s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; idx = PF_SK_STACK; sks = NULL; goto stateattach; } else if (sks != NULL) { /* * Continue attaching with stack key. */ sk = sks; kh = khs; idx = PF_SK_STACK; sks = NULL; goto keyattach; } PF_STATE_LOCK(s); KEYS_UNLOCK(); KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL, ("%s failure", __func__)); return (0); #undef KEYS_UNLOCK } static void pf_detach_state(struct pf_kstate *s) { struct pf_state_key *sks = s->key[PF_SK_STACK]; struct pf_keyhash *kh; NET_EPOCH_ASSERT(); MPASS(s->timeout >= PFTM_MAX); pf_sctp_multihome_detach_addr(s); if ((s->state_flags & PFSTATE_PFLOW) && V_pflow_export_state_ptr) V_pflow_export_state_ptr(s); if (sks != NULL) { kh = &V_pf_keyhash[pf_hashkey(sks)]; PF_HASHROW_LOCK(kh); if (s->key[PF_SK_STACK] != NULL) pf_state_key_detach(s, PF_SK_STACK); /* * If both point to same key, then we are done. */ if (sks == s->key[PF_SK_WIRE]) { pf_state_key_detach(s, PF_SK_WIRE); PF_HASHROW_UNLOCK(kh); return; } PF_HASHROW_UNLOCK(kh); } if (s->key[PF_SK_WIRE] != NULL) { kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])]; PF_HASHROW_LOCK(kh); if (s->key[PF_SK_WIRE] != NULL) pf_state_key_detach(s, PF_SK_WIRE); PF_HASHROW_UNLOCK(kh); } } static void pf_state_key_detach(struct pf_kstate *s, int idx) { struct pf_state_key *sk = s->key[idx]; #ifdef INVARIANTS struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)]; PF_HASHROW_ASSERT(kh); #endif TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]); s->key[idx] = NULL; if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) { LIST_REMOVE(sk, entry); uma_zfree(V_pf_state_key_z, sk); } } static int pf_state_key_ctor(void *mem, int size, void *arg, int flags) { struct pf_state_key *sk = mem; bzero(sk, sizeof(struct pf_state_key_cmp)); TAILQ_INIT(&sk->states[PF_SK_WIRE]); TAILQ_INIT(&sk->states[PF_SK_STACK]); return (0); } static int pf_state_key_addr_setup(struct pf_pdesc *pd, struct pf_state_key_cmp *key, int multi) { struct pf_addr *saddr = pd->src; struct pf_addr *daddr = pd->dst; #ifdef INET6 struct nd_neighbor_solicit nd; struct pf_addr *target; u_short action, reason; if (pd->af == AF_INET || pd->proto != IPPROTO_ICMPV6) goto copy; switch (pd->hdr.icmp6.icmp6_type) { case ND_NEIGHBOR_SOLICIT: if (multi) return (-1); if (!pf_pull_hdr(pd->m, pd->off, &nd, sizeof(nd), &action, &reason, pd->af)) return (-1); target = (struct pf_addr *)&nd.nd_ns_target; daddr = target; break; case ND_NEIGHBOR_ADVERT: if (multi) return (-1); if (!pf_pull_hdr(pd->m, pd->off, &nd, sizeof(nd), &action, &reason, pd->af)) return (-1); target = (struct pf_addr *)&nd.nd_ns_target; saddr = target; if (IN6_IS_ADDR_MULTICAST(&pd->dst->v6)) { key->addr[pd->didx].addr32[0] = 0; key->addr[pd->didx].addr32[1] = 0; key->addr[pd->didx].addr32[2] = 0; key->addr[pd->didx].addr32[3] = 0; daddr = NULL; /* overwritten */ } break; default: if (multi) { key->addr[pd->sidx].addr32[0] = IPV6_ADDR_INT32_MLL; key->addr[pd->sidx].addr32[1] = 0; key->addr[pd->sidx].addr32[2] = 0; key->addr[pd->sidx].addr32[3] = IPV6_ADDR_INT32_ONE; saddr = NULL; /* overwritten */ } } copy: #endif if (saddr) PF_ACPY(&key->addr[pd->sidx], saddr, pd->af); if (daddr) PF_ACPY(&key->addr[pd->didx], daddr, pd->af); return (0); } int pf_state_key_setup(struct pf_pdesc *pd, u_int16_t sport, u_int16_t dport, struct pf_state_key **sk, struct pf_state_key **nk) { *sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT); if (*sk == NULL) return (ENOMEM); if (pf_state_key_addr_setup(pd, (struct pf_state_key_cmp *)*sk, 0)) { uma_zfree(V_pf_state_key_z, *sk); *sk = NULL; return (ENOMEM); } (*sk)->port[pd->sidx] = sport; (*sk)->port[pd->didx] = dport; (*sk)->proto = pd->proto; (*sk)->af = pd->af; *nk = pf_state_key_clone(*sk); if (*nk == NULL) { uma_zfree(V_pf_state_key_z, *sk); *sk = NULL; return (ENOMEM); } if (pd->af != pd->naf) { (*sk)->port[pd->sidx] = pd->osport; (*sk)->port[pd->didx] = pd->odport; (*nk)->af = pd->naf; /* * We're overwriting an address here, so potentially there's bits of an IPv6 * address left in here. Clear that out first. */ bzero(&(*nk)->addr[0], sizeof((*nk)->addr[0])); bzero(&(*nk)->addr[1], sizeof((*nk)->addr[1])); if (pd->dir == PF_IN) { PF_ACPY(&(*nk)->addr[pd->didx], &pd->nsaddr, pd->naf); PF_ACPY(&(*nk)->addr[pd->sidx], &pd->ndaddr, pd->naf); (*nk)->port[pd->didx] = pd->nsport; (*nk)->port[pd->sidx] = pd->ndport; } else { PF_ACPY(&(*nk)->addr[pd->sidx], &pd->nsaddr, pd->naf); PF_ACPY(&(*nk)->addr[pd->didx], &pd->ndaddr, pd->naf); (*nk)->port[pd->sidx] = pd->nsport; (*nk)->port[pd->didx] = pd->ndport; } switch (pd->proto) { case IPPROTO_ICMP: (*nk)->proto = IPPROTO_ICMPV6; break; case IPPROTO_ICMPV6: (*nk)->proto = IPPROTO_ICMP; break; default: (*nk)->proto = pd->proto; } } return (0); } struct pf_state_key * pf_state_key_clone(const struct pf_state_key *orig) { struct pf_state_key *sk; sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT); if (sk == NULL) return (NULL); bcopy(orig, sk, sizeof(struct pf_state_key_cmp)); return (sk); } int pf_state_insert(struct pfi_kkif *kif, struct pfi_kkif *orig_kif, struct pf_state_key *skw, struct pf_state_key *sks, struct pf_kstate *s) { struct pf_idhash *ih; struct pf_kstate *cur; int error; NET_EPOCH_ASSERT(); KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]), ("%s: sks not pristine", __func__)); KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]), ("%s: skw not pristine", __func__)); KASSERT(s->refs == 0, ("%s: state not pristine", __func__)); s->kif = kif; s->orig_kif = orig_kif; if (s->id == 0 && s->creatorid == 0) { s->id = alloc_unr64(&V_pf_stateid); s->id = htobe64(s->id); s->creatorid = V_pf_status.hostid; } /* Returns with ID locked on success. */ if ((error = pf_state_key_attach(skw, sks, s)) != 0) return (error); ih = &V_pf_idhash[PF_IDHASH(s)]; PF_HASHROW_ASSERT(ih); LIST_FOREACH(cur, &ih->states, entry) if (cur->id == s->id && cur->creatorid == s->creatorid) break; if (cur != NULL) { s->timeout = PFTM_UNLINKED; PF_HASHROW_UNLOCK(ih); if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: state ID collision: " "id: %016llx creatorid: %08x\n", (unsigned long long)be64toh(s->id), ntohl(s->creatorid)); } pf_detach_state(s); return (EEXIST); } LIST_INSERT_HEAD(&ih->states, s, entry); /* One for keys, one for ID hash. */ refcount_init(&s->refs, 2); pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_INSERT], 1); if (V_pfsync_insert_state_ptr != NULL) V_pfsync_insert_state_ptr(s); /* Returns locked. */ return (0); } /* * Find state by ID: returns with locked row on success. */ struct pf_kstate * pf_find_state_byid(uint64_t id, uint32_t creatorid) { struct pf_idhash *ih; struct pf_kstate *s; pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1); ih = &V_pf_idhash[(be64toh(id) % (V_pf_hashmask + 1))]; PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) if (s->id == id && s->creatorid == creatorid) break; if (s == NULL) PF_HASHROW_UNLOCK(ih); return (s); } /* * Find state by key. * Returns with ID hash slot locked on success. */ static struct pf_kstate * pf_find_state(struct pfi_kkif *kif, const struct pf_state_key_cmp *key, u_int dir) { struct pf_keyhash *kh; struct pf_state_key *sk; struct pf_kstate *s; int idx; pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1); kh = &V_pf_keyhash[pf_hashkey((const struct pf_state_key *)key)]; PF_HASHROW_LOCK(kh); LIST_FOREACH(sk, &kh->keys, entry) if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0) break; if (sk == NULL) { PF_HASHROW_UNLOCK(kh); return (NULL); } idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK); /* List is sorted, if-bound states before floating ones. */ TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) if (s->kif == V_pfi_all || s->kif == kif || s->orig_kif == kif) { PF_STATE_LOCK(s); PF_HASHROW_UNLOCK(kh); if (__predict_false(s->timeout >= PFTM_MAX)) { /* * State is either being processed by * pf_unlink_state() in an other thread, or * is scheduled for immediate expiry. */ PF_STATE_UNLOCK(s); return (NULL); } return (s); } /* Look through the other list, in case of AF-TO */ idx = idx == PF_SK_WIRE ? PF_SK_STACK : PF_SK_WIRE; TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) { if (s->key[PF_SK_WIRE]->af == s->key[PF_SK_STACK]->af) continue; if (s->kif == V_pfi_all || s->kif == kif || s->orig_kif == kif) { PF_STATE_LOCK(s); PF_HASHROW_UNLOCK(kh); if (__predict_false(s->timeout >= PFTM_MAX)) { /* * State is either being processed by * pf_unlink_state() in an other thread, or * is scheduled for immediate expiry. */ PF_STATE_UNLOCK(s); return (NULL); } return (s); } } PF_HASHROW_UNLOCK(kh); return (NULL); } /* * Returns with ID hash slot locked on success. */ struct pf_kstate * pf_find_state_all(const struct pf_state_key_cmp *key, u_int dir, int *more) { struct pf_keyhash *kh; struct pf_state_key *sk; struct pf_kstate *s, *ret = NULL; int idx, inout = 0; if (more != NULL) *more = 0; pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1); kh = &V_pf_keyhash[pf_hashkey((const struct pf_state_key *)key)]; PF_HASHROW_LOCK(kh); LIST_FOREACH(sk, &kh->keys, entry) if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0) break; if (sk == NULL) { PF_HASHROW_UNLOCK(kh); return (NULL); } switch (dir) { case PF_IN: idx = PF_SK_WIRE; break; case PF_OUT: idx = PF_SK_STACK; break; case PF_INOUT: idx = PF_SK_WIRE; inout = 1; break; default: panic("%s: dir %u", __func__, dir); } second_run: TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) { if (more == NULL) { PF_STATE_LOCK(s); PF_HASHROW_UNLOCK(kh); return (s); } if (ret) (*more)++; else { ret = s; PF_STATE_LOCK(s); } } if (inout == 1) { inout = 0; idx = PF_SK_STACK; goto second_run; } PF_HASHROW_UNLOCK(kh); return (ret); } /* * FIXME * This routine is inefficient -- locks the state only to unlock immediately on * return. * It is racy -- after the state is unlocked nothing stops other threads from * removing it. */ bool pf_find_state_all_exists(const struct pf_state_key_cmp *key, u_int dir) { struct pf_kstate *s; s = pf_find_state_all(key, dir, NULL); if (s != NULL) { PF_STATE_UNLOCK(s); return (true); } return (false); } struct pf_udp_mapping * pf_udp_mapping_create(sa_family_t af, struct pf_addr *src_addr, uint16_t src_port, struct pf_addr *nat_addr, uint16_t nat_port) { struct pf_udp_mapping *mapping; mapping = uma_zalloc(V_pf_udp_mapping_z, M_NOWAIT | M_ZERO); if (mapping == NULL) return (NULL); PF_ACPY(&mapping->endpoints[0].addr, src_addr, af); mapping->endpoints[0].port = src_port; mapping->endpoints[0].af = af; mapping->endpoints[0].mapping = mapping; PF_ACPY(&mapping->endpoints[1].addr, nat_addr, af); mapping->endpoints[1].port = nat_port; mapping->endpoints[1].af = af; mapping->endpoints[1].mapping = mapping; refcount_init(&mapping->refs, 1); return (mapping); } int pf_udp_mapping_insert(struct pf_udp_mapping *mapping) { struct pf_udpendpointhash *h0, *h1; struct pf_udp_endpoint *endpoint; int ret = EEXIST; h0 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[0])]; h1 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[1])]; if (h0 == h1) { PF_HASHROW_LOCK(h0); } else if (h0 < h1) { PF_HASHROW_LOCK(h0); PF_HASHROW_LOCK(h1); } else { PF_HASHROW_LOCK(h1); PF_HASHROW_LOCK(h0); } LIST_FOREACH(endpoint, &h0->endpoints, entry) { if (bcmp(endpoint, &mapping->endpoints[0], sizeof(struct pf_udp_endpoint_cmp)) == 0) break; } if (endpoint != NULL) goto cleanup; LIST_FOREACH(endpoint, &h1->endpoints, entry) { if (bcmp(endpoint, &mapping->endpoints[1], sizeof(struct pf_udp_endpoint_cmp)) == 0) break; } if (endpoint != NULL) goto cleanup; LIST_INSERT_HEAD(&h0->endpoints, &mapping->endpoints[0], entry); LIST_INSERT_HEAD(&h1->endpoints, &mapping->endpoints[1], entry); ret = 0; cleanup: if (h0 != h1) { PF_HASHROW_UNLOCK(h0); PF_HASHROW_UNLOCK(h1); } else { PF_HASHROW_UNLOCK(h0); } return (ret); } void pf_udp_mapping_release(struct pf_udp_mapping *mapping) { /* refcount is synchronized on the source endpoint's row lock */ struct pf_udpendpointhash *h0, *h1; if (mapping == NULL) return; h0 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[0])]; PF_HASHROW_LOCK(h0); if (refcount_release(&mapping->refs)) { LIST_REMOVE(&mapping->endpoints[0], entry); PF_HASHROW_UNLOCK(h0); h1 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[1])]; PF_HASHROW_LOCK(h1); LIST_REMOVE(&mapping->endpoints[1], entry); PF_HASHROW_UNLOCK(h1); uma_zfree(V_pf_udp_mapping_z, mapping); } else { PF_HASHROW_UNLOCK(h0); } } struct pf_udp_mapping * pf_udp_mapping_find(struct pf_udp_endpoint_cmp *key) { struct pf_udpendpointhash *uh; struct pf_udp_endpoint *endpoint; uh = &V_pf_udpendpointhash[pf_hashudpendpoint((struct pf_udp_endpoint*)key)]; PF_HASHROW_LOCK(uh); LIST_FOREACH(endpoint, &uh->endpoints, entry) { if (bcmp(endpoint, key, sizeof(struct pf_udp_endpoint_cmp)) == 0 && bcmp(endpoint, &endpoint->mapping->endpoints[0], sizeof(struct pf_udp_endpoint_cmp)) == 0) break; } if (endpoint == NULL) { PF_HASHROW_UNLOCK(uh); return (NULL); } refcount_acquire(&endpoint->mapping->refs); PF_HASHROW_UNLOCK(uh); return (endpoint->mapping); } /* END state table stuff */ static void pf_send(struct pf_send_entry *pfse) { PF_SENDQ_LOCK(); STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next); PF_SENDQ_UNLOCK(); swi_sched(V_pf_swi_cookie, 0); } static bool pf_isforlocal(struct mbuf *m, int af) { switch (af) { #ifdef INET case AF_INET: { struct ip *ip = mtod(m, struct ip *); return (in_localip(ip->ip_dst)); } #endif #ifdef INET6 case AF_INET6: { struct ip6_hdr *ip6; struct in6_ifaddr *ia; ip6 = mtod(m, struct ip6_hdr *); ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false); if (ia == NULL) return (false); return (! (ia->ia6_flags & IN6_IFF_NOTREADY)); } #endif default: unhandled_af(af); } return (false); } int pf_icmp_mapping(struct pf_pdesc *pd, u_int8_t type, int *icmp_dir, u_int16_t *virtual_id, u_int16_t *virtual_type) { /* * ICMP types marked with PF_OUT are typically responses to * PF_IN, and will match states in the opposite direction. * PF_IN ICMP types need to match a state with that type. */ *icmp_dir = PF_OUT; /* Queries (and responses) */ switch (pd->af) { #ifdef INET case AF_INET: switch (type) { case ICMP_ECHO: *icmp_dir = PF_IN; case ICMP_ECHOREPLY: *virtual_type = ICMP_ECHO; *virtual_id = pd->hdr.icmp.icmp_id; break; case ICMP_TSTAMP: *icmp_dir = PF_IN; case ICMP_TSTAMPREPLY: *virtual_type = ICMP_TSTAMP; *virtual_id = pd->hdr.icmp.icmp_id; break; case ICMP_IREQ: *icmp_dir = PF_IN; case ICMP_IREQREPLY: *virtual_type = ICMP_IREQ; *virtual_id = pd->hdr.icmp.icmp_id; break; case ICMP_MASKREQ: *icmp_dir = PF_IN; case ICMP_MASKREPLY: *virtual_type = ICMP_MASKREQ; *virtual_id = pd->hdr.icmp.icmp_id; break; case ICMP_IPV6_WHEREAREYOU: *icmp_dir = PF_IN; case ICMP_IPV6_IAMHERE: *virtual_type = ICMP_IPV6_WHEREAREYOU; *virtual_id = 0; /* Nothing sane to match on! */ break; case ICMP_MOBILE_REGREQUEST: *icmp_dir = PF_IN; case ICMP_MOBILE_REGREPLY: *virtual_type = ICMP_MOBILE_REGREQUEST; *virtual_id = 0; /* Nothing sane to match on! */ break; case ICMP_ROUTERSOLICIT: *icmp_dir = PF_IN; case ICMP_ROUTERADVERT: *virtual_type = ICMP_ROUTERSOLICIT; *virtual_id = 0; /* Nothing sane to match on! */ break; /* These ICMP types map to other connections */ case ICMP_UNREACH: case ICMP_SOURCEQUENCH: case ICMP_REDIRECT: case ICMP_TIMXCEED: case ICMP_PARAMPROB: /* These will not be used, but set them anyway */ *icmp_dir = PF_IN; *virtual_type = type; *virtual_id = 0; HTONS(*virtual_type); return (1); /* These types match to another state */ /* * All remaining ICMP types get their own states, * and will only match in one direction. */ default: *icmp_dir = PF_IN; *virtual_type = type; *virtual_id = 0; break; } break; #endif /* INET */ #ifdef INET6 case AF_INET6: switch (type) { case ICMP6_ECHO_REQUEST: *icmp_dir = PF_IN; case ICMP6_ECHO_REPLY: *virtual_type = ICMP6_ECHO_REQUEST; *virtual_id = pd->hdr.icmp6.icmp6_id; break; case MLD_LISTENER_QUERY: case MLD_LISTENER_REPORT: { /* * Listener Report can be sent by clients * without an associated Listener Query. * In addition to that, when Report is sent as a * reply to a Query its source and destination * address are different. */ *icmp_dir = PF_IN; *virtual_type = MLD_LISTENER_QUERY; *virtual_id = 0; break; } case MLD_MTRACE: *icmp_dir = PF_IN; case MLD_MTRACE_RESP: *virtual_type = MLD_MTRACE; *virtual_id = 0; /* Nothing sane to match on! */ break; case ND_NEIGHBOR_SOLICIT: *icmp_dir = PF_IN; case ND_NEIGHBOR_ADVERT: { *virtual_type = ND_NEIGHBOR_SOLICIT; *virtual_id = 0; break; } /* * These ICMP types map to other connections. * ND_REDIRECT can't be in this list because the triggering * packet header is optional. */ case ICMP6_DST_UNREACH: case ICMP6_PACKET_TOO_BIG: case ICMP6_TIME_EXCEEDED: case ICMP6_PARAM_PROB: /* These will not be used, but set them anyway */ *icmp_dir = PF_IN; *virtual_type = type; *virtual_id = 0; HTONS(*virtual_type); return (1); /* These types match to another state */ /* * All remaining ICMP6 types get their own states, * and will only match in one direction. */ default: *icmp_dir = PF_IN; *virtual_type = type; *virtual_id = 0; break; } break; #endif /* INET6 */ default: unhandled_af(pd->af); } HTONS(*virtual_type); return (0); /* These types match to their own state */ } void pf_intr(void *v) { struct epoch_tracker et; struct pf_send_head queue; struct pf_send_entry *pfse, *next; CURVNET_SET((struct vnet *)v); PF_SENDQ_LOCK(); queue = V_pf_sendqueue; STAILQ_INIT(&V_pf_sendqueue); PF_SENDQ_UNLOCK(); NET_EPOCH_ENTER(et); STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) { switch (pfse->pfse_type) { #ifdef INET case PFSE_IP: { if (pf_isforlocal(pfse->pfse_m, AF_INET)) { KASSERT(pfse->pfse_m->m_pkthdr.rcvif == V_loif, ("%s: rcvif != loif", __func__)); pfse->pfse_m->m_flags |= M_SKIP_FIREWALL; pfse->pfse_m->m_pkthdr.csum_flags |= CSUM_IP_VALID | CSUM_IP_CHECKED | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; pfse->pfse_m->m_pkthdr.csum_data = 0xffff; ip_input(pfse->pfse_m); } else { ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL); } break; } case PFSE_ICMP: icmp_error(pfse->pfse_m, pfse->icmpopts.type, pfse->icmpopts.code, 0, pfse->icmpopts.mtu); break; #endif /* INET */ #ifdef INET6 case PFSE_IP6: if (pf_isforlocal(pfse->pfse_m, AF_INET6)) { KASSERT(pfse->pfse_m->m_pkthdr.rcvif == V_loif, ("%s: rcvif != loif", __func__)); pfse->pfse_m->m_flags |= M_SKIP_FIREWALL | M_LOOP; pfse->pfse_m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; pfse->pfse_m->m_pkthdr.csum_data = 0xffff; ip6_input(pfse->pfse_m); } else { ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL, NULL); } break; case PFSE_ICMP6: icmp6_error(pfse->pfse_m, pfse->icmpopts.type, pfse->icmpopts.code, pfse->icmpopts.mtu); break; #endif /* INET6 */ default: panic("%s: unknown type", __func__); } free(pfse, M_PFTEMP); } NET_EPOCH_EXIT(et); CURVNET_RESTORE(); } #define pf_purge_thread_period (hz / 10) #ifdef PF_WANT_32_TO_64_COUNTER static void pf_status_counter_u64_periodic(void) { PF_RULES_RASSERT(); if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 60)) != 0) { return; } for (int i = 0; i < FCNT_MAX; i++) { pf_counter_u64_periodic(&V_pf_status.fcounters[i]); } } static void pf_kif_counter_u64_periodic(void) { struct pfi_kkif *kif; size_t r, run; PF_RULES_RASSERT(); if (__predict_false(V_pf_allkifcount == 0)) { return; } if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) { return; } run = V_pf_allkifcount / 10; if (run < 5) run = 5; for (r = 0; r < run; r++) { kif = LIST_NEXT(V_pf_kifmarker, pfik_allkiflist); if (kif == NULL) { LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); break; } LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); LIST_INSERT_AFTER(kif, V_pf_kifmarker, pfik_allkiflist); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { pf_counter_u64_periodic(&kif->pfik_packets[i][j][k]); pf_counter_u64_periodic(&kif->pfik_bytes[i][j][k]); } } } } } static void pf_rule_counter_u64_periodic(void) { struct pf_krule *rule; size_t r, run; PF_RULES_RASSERT(); if (__predict_false(V_pf_allrulecount == 0)) { return; } if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) { return; } run = V_pf_allrulecount / 10; if (run < 5) run = 5; for (r = 0; r < run; r++) { rule = LIST_NEXT(V_pf_rulemarker, allrulelist); if (rule == NULL) { LIST_REMOVE(V_pf_rulemarker, allrulelist); LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); break; } LIST_REMOVE(V_pf_rulemarker, allrulelist); LIST_INSERT_AFTER(rule, V_pf_rulemarker, allrulelist); pf_counter_u64_periodic(&rule->evaluations); for (int i = 0; i < 2; i++) { pf_counter_u64_periodic(&rule->packets[i]); pf_counter_u64_periodic(&rule->bytes[i]); } } } static void pf_counter_u64_periodic_main(void) { PF_RULES_RLOCK_TRACKER; V_pf_counter_periodic_iter++; PF_RULES_RLOCK(); pf_counter_u64_critical_enter(); pf_status_counter_u64_periodic(); pf_kif_counter_u64_periodic(); pf_rule_counter_u64_periodic(); pf_counter_u64_critical_exit(); PF_RULES_RUNLOCK(); } #else #define pf_counter_u64_periodic_main() do { } while (0) #endif void pf_purge_thread(void *unused __unused) { struct epoch_tracker et; VNET_ITERATOR_DECL(vnet_iter); sx_xlock(&pf_end_lock); while (pf_end_threads == 0) { sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", pf_purge_thread_period); VNET_LIST_RLOCK(); NET_EPOCH_ENTER(et); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); /* Wait until V_pf_default_rule is initialized. */ if (V_pf_vnet_active == 0) { CURVNET_RESTORE(); continue; } pf_counter_u64_periodic_main(); /* * Process 1/interval fraction of the state * table every run. */ V_pf_purge_idx = pf_purge_expired_states(V_pf_purge_idx, V_pf_hashmask / (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10)); /* * Purge other expired types every * PFTM_INTERVAL seconds. */ if (V_pf_purge_idx == 0) { /* * Order is important: * - states and src nodes reference rules * - states and rules reference kifs */ pf_purge_expired_fragments(); pf_purge_expired_src_nodes(); pf_purge_unlinked_rules(); pfi_kkif_purge(); } CURVNET_RESTORE(); } NET_EPOCH_EXIT(et); VNET_LIST_RUNLOCK(); } pf_end_threads++; sx_xunlock(&pf_end_lock); kproc_exit(0); } void pf_unload_vnet_purge(void) { /* * To cleanse up all kifs and rules we need * two runs: first one clears reference flags, * then pf_purge_expired_states() doesn't * raise them, and then second run frees. */ pf_purge_unlinked_rules(); pfi_kkif_purge(); /* * Now purge everything. */ pf_purge_expired_states(0, V_pf_hashmask); pf_purge_fragments(UINT_MAX); pf_purge_expired_src_nodes(); /* * Now all kifs & rules should be unreferenced, * thus should be successfully freed. */ pf_purge_unlinked_rules(); pfi_kkif_purge(); } u_int32_t pf_state_expires(const struct pf_kstate *state) { u_int32_t timeout; u_int32_t start; u_int32_t end; u_int32_t states; /* handle all PFTM_* > PFTM_MAX here */ if (state->timeout == PFTM_PURGE) return (time_uptime); KASSERT(state->timeout != PFTM_UNLINKED, ("pf_state_expires: timeout == PFTM_UNLINKED")); KASSERT((state->timeout < PFTM_MAX), ("pf_state_expires: timeout > PFTM_MAX")); timeout = state->rule->timeout[state->timeout]; if (!timeout) timeout = V_pf_default_rule.timeout[state->timeout]; start = state->rule->timeout[PFTM_ADAPTIVE_START]; if (start && state->rule != &V_pf_default_rule) { end = state->rule->timeout[PFTM_ADAPTIVE_END]; states = counter_u64_fetch(state->rule->states_cur); } else { start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START]; end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END]; states = V_pf_status.states; } if (end && states > start && start < end) { if (states < end) { timeout = (u_int64_t)timeout * (end - states) / (end - start); return ((state->expire / 1000) + timeout); } else return (time_uptime); } return ((state->expire / 1000) + timeout); } void pf_purge_expired_src_nodes(void) { struct pf_ksrc_node_list freelist; struct pf_srchash *sh; struct pf_ksrc_node *cur, *next; int i; LIST_INIT(&freelist); for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next) if (cur->states == 0 && cur->expire <= time_uptime) { pf_unlink_src_node(cur); LIST_INSERT_HEAD(&freelist, cur, entry); } else if (cur->rule != NULL) cur->rule->rule_ref |= PFRULE_REFS; PF_HASHROW_UNLOCK(sh); } pf_free_src_nodes(&freelist); V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z); } static void pf_src_tree_remove_state(struct pf_kstate *s) { uint32_t timeout; timeout = s->rule->timeout[PFTM_SRC_NODE] ? s->rule->timeout[PFTM_SRC_NODE] : V_pf_default_rule.timeout[PFTM_SRC_NODE]; for (pf_sn_types_t sn_type=0; sn_typesns[sn_type] == NULL) continue; PF_SRC_NODE_LOCK(s->sns[sn_type]); if (sn_type == PF_SN_LIMIT && s->src.tcp_est) --(s->sns[sn_type]->conn); if (--(s->sns[sn_type]->states) == 0) s->sns[sn_type]->expire = time_uptime + timeout; PF_SRC_NODE_UNLOCK(s->sns[sn_type]); s->sns[sn_type] = NULL; } } /* * Unlink and potentilly free a state. Function may be * called with ID hash row locked, but always returns * unlocked, since it needs to go through key hash locking. */ int pf_unlink_state(struct pf_kstate *s) { struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)]; NET_EPOCH_ASSERT(); PF_HASHROW_ASSERT(ih); if (s->timeout == PFTM_UNLINKED) { /* * State is being processed * by pf_unlink_state() in * an other thread. */ PF_HASHROW_UNLOCK(ih); return (0); /* XXXGL: undefined actually */ } if (s->src.state == PF_TCPS_PROXY_DST) { /* XXX wire key the right one? */ pf_send_tcp(s->rule, s->key[PF_SK_WIRE]->af, &s->key[PF_SK_WIRE]->addr[1], &s->key[PF_SK_WIRE]->addr[0], s->key[PF_SK_WIRE]->port[1], s->key[PF_SK_WIRE]->port[0], s->src.seqhi, s->src.seqlo + 1, TH_RST|TH_ACK, 0, 0, 0, M_SKIP_FIREWALL, s->tag, 0, s->act.rtableid); } LIST_REMOVE(s, entry); pf_src_tree_remove_state(s); if (V_pfsync_delete_state_ptr != NULL) V_pfsync_delete_state_ptr(s); STATE_DEC_COUNTERS(s); s->timeout = PFTM_UNLINKED; /* Ensure we remove it from the list of halfopen states, if needed. */ if (s->key[PF_SK_STACK] != NULL && s->key[PF_SK_STACK]->proto == IPPROTO_TCP) pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED); PF_HASHROW_UNLOCK(ih); pf_detach_state(s); pf_udp_mapping_release(s->udp_mapping); /* pf_state_insert() initialises refs to 2 */ return (pf_release_staten(s, 2)); } struct pf_kstate * pf_alloc_state(int flags) { return (uma_zalloc(V_pf_state_z, flags | M_ZERO)); } void pf_free_state(struct pf_kstate *cur) { struct pf_krule_item *ri; KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur)); KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__, cur->timeout)); while ((ri = SLIST_FIRST(&cur->match_rules))) { SLIST_REMOVE_HEAD(&cur->match_rules, entry); free(ri, M_PF_RULE_ITEM); } pf_normalize_tcp_cleanup(cur); uma_zfree(V_pf_state_z, cur); pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1); } /* * Called only from pf_purge_thread(), thus serialized. */ static u_int pf_purge_expired_states(u_int i, int maxcheck) { struct pf_idhash *ih; struct pf_kstate *s; struct pf_krule_item *mrm; size_t count __unused; V_pf_status.states = uma_zone_get_cur(V_pf_state_z); /* * Go through hash and unlink states that expire now. */ while (maxcheck > 0) { count = 0; ih = &V_pf_idhash[i]; /* only take the lock if we expect to do work */ if (!LIST_EMPTY(&ih->states)) { relock: PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { if (pf_state_expires(s) <= time_uptime) { V_pf_status.states -= pf_unlink_state(s); goto relock; } s->rule->rule_ref |= PFRULE_REFS; if (s->nat_rule != NULL) s->nat_rule->rule_ref |= PFRULE_REFS; if (s->anchor != NULL) s->anchor->rule_ref |= PFRULE_REFS; s->kif->pfik_flags |= PFI_IFLAG_REFS; SLIST_FOREACH(mrm, &s->match_rules, entry) mrm->r->rule_ref |= PFRULE_REFS; if (s->act.rt_kif) s->act.rt_kif->pfik_flags |= PFI_IFLAG_REFS; count++; } PF_HASHROW_UNLOCK(ih); } SDT_PROBE2(pf, purge, state, rowcount, i, count); /* Return when we hit end of hash. */ if (++i > V_pf_hashmask) { V_pf_status.states = uma_zone_get_cur(V_pf_state_z); return (0); } maxcheck--; } V_pf_status.states = uma_zone_get_cur(V_pf_state_z); return (i); } static void pf_purge_unlinked_rules(void) { struct pf_krulequeue tmpq; struct pf_krule *r, *r1; /* * If we have overloading task pending, then we'd * better skip purging this time. There is a tiny * probability that overloading task references * an already unlinked rule. */ PF_OVERLOADQ_LOCK(); if (!SLIST_EMPTY(&V_pf_overloadqueue)) { PF_OVERLOADQ_UNLOCK(); return; } PF_OVERLOADQ_UNLOCK(); /* * Do naive mark-and-sweep garbage collecting of old rules. * Reference flag is raised by pf_purge_expired_states() * and pf_purge_expired_src_nodes(). * * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK, * use a temporary queue. */ TAILQ_INIT(&tmpq); PF_UNLNKDRULES_LOCK(); TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) { if (!(r->rule_ref & PFRULE_REFS)) { TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries); TAILQ_INSERT_TAIL(&tmpq, r, entries); } else r->rule_ref &= ~PFRULE_REFS; } PF_UNLNKDRULES_UNLOCK(); if (!TAILQ_EMPTY(&tmpq)) { PF_CONFIG_LOCK(); PF_RULES_WLOCK(); TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) { TAILQ_REMOVE(&tmpq, r, entries); pf_free_rule(r); } PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); } } void pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: { u_int32_t a = ntohl(addr->addr32[0]); printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, (a>>8)&255, a&255); if (p) { p = ntohs(p); printf(":%u", p); } break; } #endif /* INET */ #ifdef INET6 case AF_INET6: { u_int16_t b; u_int8_t i, curstart, curend, maxstart, maxend; curstart = curend = maxstart = maxend = 255; for (i = 0; i < 8; i++) { if (!addr->addr16[i]) { if (curstart == 255) curstart = i; curend = i; } else { if ((curend - curstart) > (maxend - maxstart)) { maxstart = curstart; maxend = curend; } curstart = curend = 255; } } if ((curend - curstart) > (maxend - maxstart)) { maxstart = curstart; maxend = curend; } for (i = 0; i < 8; i++) { if (i >= maxstart && i <= maxend) { if (i == 0) printf(":"); if (i == maxend) printf(":"); } else { b = ntohs(addr->addr16[i]); printf("%x", b); if (i < 7) printf(":"); } } if (p) { p = ntohs(p); printf("[%u]", p); } break; } #endif /* INET6 */ default: unhandled_af(af); } } void pf_print_state(struct pf_kstate *s) { pf_print_state_parts(s, NULL, NULL); } static void pf_print_state_parts(struct pf_kstate *s, struct pf_state_key *skwp, struct pf_state_key *sksp) { struct pf_state_key *skw, *sks; u_int8_t proto, dir; /* Do our best to fill these, but they're skipped if NULL */ skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); proto = skw ? skw->proto : (sks ? sks->proto : 0); dir = s ? s->direction : 0; switch (proto) { case IPPROTO_IPV4: printf("IPv4"); break; case IPPROTO_IPV6: printf("IPv6"); break; case IPPROTO_TCP: printf("TCP"); break; case IPPROTO_UDP: printf("UDP"); break; case IPPROTO_ICMP: printf("ICMP"); break; case IPPROTO_ICMPV6: printf("ICMPv6"); break; default: printf("%u", proto); break; } switch (dir) { case PF_IN: printf(" in"); break; case PF_OUT: printf(" out"); break; } if (skw) { printf(" wire: "); pf_print_host(&skw->addr[0], skw->port[0], skw->af); printf(" "); pf_print_host(&skw->addr[1], skw->port[1], skw->af); } if (sks) { printf(" stack: "); if (sks != skw) { pf_print_host(&sks->addr[0], sks->port[0], sks->af); printf(" "); pf_print_host(&sks->addr[1], sks->port[1], sks->af); } else printf("-"); } if (s) { if (proto == IPPROTO_TCP) { printf(" [lo=%u high=%u win=%u modulator=%u", s->src.seqlo, s->src.seqhi, s->src.max_win, s->src.seqdiff); if (s->src.wscale && s->dst.wscale) printf(" wscale=%u", s->src.wscale & PF_WSCALE_MASK); printf("]"); printf(" [lo=%u high=%u win=%u modulator=%u", s->dst.seqlo, s->dst.seqhi, s->dst.max_win, s->dst.seqdiff); if (s->src.wscale && s->dst.wscale) printf(" wscale=%u", s->dst.wscale & PF_WSCALE_MASK); printf("]"); } printf(" %u:%u", s->src.state, s->dst.state); if (s->rule) printf(" @%d", s->rule->nr); } } void pf_print_flags(uint16_t f) { if (f) printf(" "); if (f & TH_FIN) printf("F"); if (f & TH_SYN) printf("S"); if (f & TH_RST) printf("R"); if (f & TH_PUSH) printf("P"); if (f & TH_ACK) printf("A"); if (f & TH_URG) printf("U"); if (f & TH_ECE) printf("E"); if (f & TH_CWR) printf("W"); if (f & TH_AE) printf("e"); } #define PF_SET_SKIP_STEPS(i) \ do { \ while (head[i] != cur) { \ head[i]->skip[i] = cur; \ head[i] = TAILQ_NEXT(head[i], entries); \ } \ } while (0) void pf_calc_skip_steps(struct pf_krulequeue *rules) { struct pf_krule *cur, *prev, *head[PF_SKIP_COUNT]; int i; cur = TAILQ_FIRST(rules); prev = cur; for (i = 0; i < PF_SKIP_COUNT; ++i) head[i] = cur; while (cur != NULL) { if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) PF_SET_SKIP_STEPS(PF_SKIP_IFP); if (cur->direction != prev->direction) PF_SET_SKIP_STEPS(PF_SKIP_DIR); if (cur->af != prev->af) PF_SET_SKIP_STEPS(PF_SKIP_AF); if (cur->proto != prev->proto) PF_SET_SKIP_STEPS(PF_SKIP_PROTO); if (cur->src.neg != prev->src.neg || pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); if (cur->dst.neg != prev->dst.neg || pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); if (cur->src.port[0] != prev->src.port[0] || cur->src.port[1] != prev->src.port[1] || cur->src.port_op != prev->src.port_op) PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); if (cur->dst.port[0] != prev->dst.port[0] || cur->dst.port[1] != prev->dst.port[1] || cur->dst.port_op != prev->dst.port_op) PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); prev = cur; cur = TAILQ_NEXT(cur, entries); } for (i = 0; i < PF_SKIP_COUNT; ++i) PF_SET_SKIP_STEPS(i); } int pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) { if (aw1->type != aw2->type) return (1); switch (aw1->type) { case PF_ADDR_ADDRMASK: case PF_ADDR_RANGE: if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6)) return (1); if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6)) return (1); return (0); case PF_ADDR_DYNIFTL: return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); case PF_ADDR_NONE: case PF_ADDR_NOROUTE: case PF_ADDR_URPFFAILED: return (0); case PF_ADDR_TABLE: return (aw1->p.tbl != aw2->p.tbl); default: printf("invalid address type: %d\n", aw1->type); return (1); } } /** * Checksum updates are a little complicated because the checksum in the TCP/UDP * header isn't always a full checksum. In some cases (i.e. output) it's a * pseudo-header checksum, which is a partial checksum over src/dst IP * addresses, protocol number and length. * * That means we have the following cases: * * Input or forwarding: we don't have TSO, the checksum fields are full * checksums, we need to update the checksum whenever we change anything. * * Output (i.e. the checksum is a pseudo-header checksum): * x The field being updated is src/dst address or affects the length of * the packet. We need to update the pseudo-header checksum (note that this * checksum is not ones' complement). * x Some other field is being modified (e.g. src/dst port numbers): We * don't have to update anything. **/ u_int16_t pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) { u_int32_t x; x = cksum + old - new; x = (x + (x >> 16)) & 0xffff; /* optimise: eliminate a branch when not udp */ if (udp && cksum == 0x0000) return cksum; if (udp && x == 0x0000) x = 0xffff; return (u_int16_t)(x); } static void pf_patch_8(struct mbuf *m, u_int16_t *cksum, u_int8_t *f, u_int8_t v, bool hi, u_int8_t udp) { u_int16_t old = htons(hi ? (*f << 8) : *f); u_int16_t new = htons(hi ? ( v << 8) : v); if (*f == v) return; *f = v; if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) return; *cksum = pf_cksum_fixup(*cksum, old, new, udp); } void pf_patch_16_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int16_t v, bool hi, u_int8_t udp) { u_int8_t *fb = (u_int8_t *)f; u_int8_t *vb = (u_int8_t *)&v; pf_patch_8(m, cksum, fb++, *vb++, hi, udp); pf_patch_8(m, cksum, fb++, *vb++, !hi, udp); } void pf_patch_32_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int32_t v, bool hi, u_int8_t udp) { u_int8_t *fb = (u_int8_t *)f; u_int8_t *vb = (u_int8_t *)&v; pf_patch_8(m, cksum, fb++, *vb++, hi, udp); pf_patch_8(m, cksum, fb++, *vb++, !hi, udp); pf_patch_8(m, cksum, fb++, *vb++, hi, udp); pf_patch_8(m, cksum, fb++, *vb++, !hi, udp); } u_int16_t pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) { if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) return (cksum); return (pf_cksum_fixup(cksum, old, new, udp)); } static void pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af, sa_family_t naf) { struct pf_addr ao; u_int16_t po; PF_ACPY(&ao, a, af); if (af == naf) PF_ACPY(a, an, af); if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) *pc = ~*pc; if (p == NULL) /* no port -> done. no cksum to worry about. */ return; po = *p; *p = pn; switch (af) { #ifdef INET case AF_INET: switch (naf) { case AF_INET: *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, ao.addr16[0], an->addr16[0], 0), ao.addr16[1], an->addr16[1], 0); *p = pn; *pc = pf_cksum_fixup(pf_cksum_fixup(*pc, ao.addr16[0], an->addr16[0], u), ao.addr16[1], an->addr16[1], u); *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u); break; #ifdef INET6 case AF_INET6: *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, ao.addr16[0], an->addr16[0], u), ao.addr16[1], an->addr16[1], u), 0, an->addr16[2], u), 0, an->addr16[3], u), 0, an->addr16[4], u), 0, an->addr16[5], u), 0, an->addr16[6], u), 0, an->addr16[7], u), po, pn, u); /* XXXKP TODO *ic checksum? */ break; #endif /* INET6 */ } break; #endif /* INET */ #ifdef INET6 case AF_INET6: switch (naf) { #ifdef INET case AF_INET: *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, ao.addr16[0], an->addr16[0], u), ao.addr16[1], an->addr16[1], u), ao.addr16[2], 0, u), ao.addr16[3], 0, u), ao.addr16[4], 0, u), ao.addr16[5], 0, u), ao.addr16[6], 0, u), ao.addr16[7], 0, u), po, pn, u); /* XXXKP TODO *ic checksum? */ break; #endif /* INET */ case AF_INET6: *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(*pc, ao.addr16[0], an->addr16[0], u), ao.addr16[1], an->addr16[1], u), ao.addr16[2], an->addr16[2], u), ao.addr16[3], an->addr16[3], u), ao.addr16[4], an->addr16[4], u), ao.addr16[5], an->addr16[5], u), ao.addr16[6], an->addr16[6], u), ao.addr16[7], an->addr16[7], u); *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u); break; } break; #endif /* INET6 */ default: unhandled_af(af); } if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) { *pc = ~*pc; if (! *pc) *pc = 0xffff; } } /* Changes a u_int32_t. Uses a void * so there are no align restrictions */ void pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) { u_int32_t ao; memcpy(&ao, a, sizeof(ao)); memcpy(a, &an, sizeof(u_int32_t)); *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), ao % 65536, an % 65536, u); } void pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp) { u_int32_t ao; memcpy(&ao, a, sizeof(ao)); memcpy(a, &an, sizeof(u_int32_t)); *c = pf_proto_cksum_fixup(m, pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp), ao % 65536, an % 65536, udp); } #ifdef INET6 static void pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) { struct pf_addr ao; PF_ACPY(&ao, a, AF_INET6); PF_ACPY(a, an, AF_INET6); *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(*c, ao.addr16[0], an->addr16[0], u), ao.addr16[1], an->addr16[1], u), ao.addr16[2], an->addr16[2], u), ao.addr16[3], an->addr16[3], u), ao.addr16[4], an->addr16[4], u), ao.addr16[5], an->addr16[5], u), ao.addr16[6], an->addr16[6], u), ao.addr16[7], an->addr16[7], u); } #endif /* INET6 */ static void pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) { struct pf_addr oia, ooa; PF_ACPY(&oia, ia, af); if (oa) PF_ACPY(&ooa, oa, af); /* Change inner protocol port, fix inner protocol checksum. */ if (ip != NULL) { u_int16_t oip = *ip; u_int32_t opc; if (pc != NULL) opc = *pc; *ip = np; if (pc != NULL) *pc = pf_cksum_fixup(*pc, oip, *ip, u); *ic = pf_cksum_fixup(*ic, oip, *ip, 0); if (pc != NULL) *ic = pf_cksum_fixup(*ic, opc, *pc, 0); } /* Change inner ip address, fix inner ip and icmp checksums. */ PF_ACPY(ia, na, af); switch (af) { #ifdef INET case AF_INET: { u_int32_t oh2c = *h2c; *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, oia.addr16[0], ia->addr16[0], 0), oia.addr16[1], ia->addr16[1], 0); *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, oia.addr16[0], ia->addr16[0], 0), oia.addr16[1], ia->addr16[1], 0); *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); break; } #endif /* INET */ #ifdef INET6 case AF_INET6: *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(*ic, oia.addr16[0], ia->addr16[0], u), oia.addr16[1], ia->addr16[1], u), oia.addr16[2], ia->addr16[2], u), oia.addr16[3], ia->addr16[3], u), oia.addr16[4], ia->addr16[4], u), oia.addr16[5], ia->addr16[5], u), oia.addr16[6], ia->addr16[6], u), oia.addr16[7], ia->addr16[7], u); break; #endif /* INET6 */ } /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ if (oa) { PF_ACPY(oa, na, af); switch (af) { #ifdef INET case AF_INET: *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, ooa.addr16[0], oa->addr16[0], 0), ooa.addr16[1], oa->addr16[1], 0); break; #endif /* INET */ #ifdef INET6 case AF_INET6: *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(*ic, ooa.addr16[0], oa->addr16[0], u), ooa.addr16[1], oa->addr16[1], u), ooa.addr16[2], oa->addr16[2], u), ooa.addr16[3], oa->addr16[3], u), ooa.addr16[4], oa->addr16[4], u), ooa.addr16[5], oa->addr16[5], u), ooa.addr16[6], oa->addr16[6], u), ooa.addr16[7], oa->addr16[7], u); break; #endif /* INET6 */ } } } int pf_translate_af(struct pf_pdesc *pd) { #if defined(INET) && defined(INET6) struct mbuf *mp; struct ip *ip4; struct ip6_hdr *ip6; struct icmp6_hdr *icmp; struct m_tag *mtag; struct pf_fragment_tag *ftag; int hlen; hlen = pd->naf == AF_INET ? sizeof(*ip4) : sizeof(*ip6); /* trim the old header */ m_adj(pd->m, pd->off); /* prepend a new one */ M_PREPEND(pd->m, hlen, M_NOWAIT); if (pd->m == NULL) return (-1); switch (pd->naf) { case AF_INET: ip4 = mtod(pd->m, struct ip *); bzero(ip4, hlen); ip4->ip_v = IPVERSION; ip4->ip_hl = hlen >> 2; ip4->ip_tos = pd->tos; ip4->ip_len = htons(hlen + (pd->tot_len - pd->off)); - ip_fillid(ip4); + ip_fillid(ip4, V_ip_random_id); ip4->ip_ttl = pd->ttl; ip4->ip_p = pd->proto; ip4->ip_src = pd->nsaddr.v4; ip4->ip_dst = pd->ndaddr.v4; pd->src = (struct pf_addr *)&ip4->ip_src; pd->dst = (struct pf_addr *)&ip4->ip_dst; pd->off = sizeof(struct ip); break; case AF_INET6: ip6 = mtod(pd->m, struct ip6_hdr *); bzero(ip6, hlen); ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_flow |= htonl((u_int32_t)pd->tos << 20); ip6->ip6_plen = htons(pd->tot_len - pd->off); ip6->ip6_nxt = pd->proto; if (!pd->ttl || pd->ttl > IPV6_DEFHLIM) ip6->ip6_hlim = IPV6_DEFHLIM; else ip6->ip6_hlim = pd->ttl; ip6->ip6_src = pd->nsaddr.v6; ip6->ip6_dst = pd->ndaddr.v6; pd->src = (struct pf_addr *)&ip6->ip6_src; pd->dst = (struct pf_addr *)&ip6->ip6_dst; pd->off = sizeof(struct ip6_hdr); /* * If we're dealing with a reassembled packet we need to adjust * the header length from the IPv4 header size to IPv6 header * size. */ mtag = m_tag_find(pd->m, PACKET_TAG_PF_REASSEMBLED, NULL); if (mtag) { ftag = (struct pf_fragment_tag *)(mtag + 1); ftag->ft_hdrlen = sizeof(*ip6); ftag->ft_maxlen -= sizeof(struct ip6_hdr) - sizeof(struct ip) + sizeof(struct ip6_frag); } break; default: return (-1); } /* recalculate icmp/icmp6 checksums */ if (pd->proto == IPPROTO_ICMP || pd->proto == IPPROTO_ICMPV6) { int off; if ((mp = m_pulldown(pd->m, hlen, sizeof(*icmp), &off)) == NULL) { pd->m = NULL; return (-1); } icmp = (struct icmp6_hdr *)(mp->m_data + off); icmp->icmp6_cksum = 0; icmp->icmp6_cksum = pd->naf == AF_INET ? in4_cksum(pd->m, 0, hlen, ntohs(ip4->ip_len) - hlen) : in6_cksum(pd->m, IPPROTO_ICMPV6, hlen, ntohs(ip6->ip6_plen)); } #endif /* INET && INET6 */ return (0); } int pf_change_icmp_af(struct mbuf *m, int off, struct pf_pdesc *pd, struct pf_pdesc *pd2, struct pf_addr *src, struct pf_addr *dst, sa_family_t af, sa_family_t naf) { #if defined(INET) && defined(INET6) struct mbuf *n = NULL; struct ip *ip4; struct ip6_hdr *ip6; int hlen, olen, mlen; if (af == naf || (af != AF_INET && af != AF_INET6) || (naf != AF_INET && naf != AF_INET6)) return (-1); /* split the mbuf chain on the inner ip/ip6 header boundary */ if ((n = m_split(m, off, M_NOWAIT)) == NULL) return (-1); /* old header */ olen = pd2->off - off; /* new header */ hlen = naf == AF_INET ? sizeof(*ip4) : sizeof(*ip6); /* trim old header */ m_adj(n, olen); /* prepend a new one */ M_PREPEND(n, hlen, M_NOWAIT); if (n == NULL) return (-1); /* translate inner ip/ip6 header */ switch (naf) { case AF_INET: ip4 = mtod(n, struct ip *); bzero(ip4, sizeof(*ip4)); ip4->ip_v = IPVERSION; ip4->ip_hl = sizeof(*ip4) >> 2; ip4->ip_len = htons(sizeof(*ip4) + pd2->tot_len - olen); - ip_fillid(ip4); + ip_fillid(ip4, V_ip_random_id); ip4->ip_off = htons(IP_DF); ip4->ip_ttl = pd2->ttl; if (pd2->proto == IPPROTO_ICMPV6) ip4->ip_p = IPPROTO_ICMP; else ip4->ip_p = pd2->proto; ip4->ip_src = src->v4; ip4->ip_dst = dst->v4; ip4->ip_sum = in_cksum(n, ip4->ip_hl << 2); break; case AF_INET6: ip6 = mtod(n, struct ip6_hdr *); bzero(ip6, sizeof(*ip6)); ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_plen = htons(pd2->tot_len - olen); if (pd2->proto == IPPROTO_ICMP) ip6->ip6_nxt = IPPROTO_ICMPV6; else ip6->ip6_nxt = pd2->proto; if (!pd2->ttl || pd2->ttl > IPV6_DEFHLIM) ip6->ip6_hlim = IPV6_DEFHLIM; else ip6->ip6_hlim = pd2->ttl; ip6->ip6_src = src->v6; ip6->ip6_dst = dst->v6; break; default: unhandled_af(naf); } /* adjust payload offset and total packet length */ pd2->off += hlen - olen; pd->tot_len += hlen - olen; /* merge modified inner packet with the original header */ mlen = n->m_pkthdr.len; m_cat(m, n); m->m_pkthdr.len += mlen; #endif /* INET && INET6 */ return (0); } #define PTR_IP(field) (offsetof(struct ip, field)) #define PTR_IP6(field) (offsetof(struct ip6_hdr, field)) int pf_translate_icmp_af(int af, void *arg) { #if defined(INET) && defined(INET6) struct icmp *icmp4; struct icmp6_hdr *icmp6; u_int32_t mtu; int32_t ptr = -1; u_int8_t type; u_int8_t code; switch (af) { case AF_INET: icmp6 = arg; type = icmp6->icmp6_type; code = icmp6->icmp6_code; mtu = ntohl(icmp6->icmp6_mtu); switch (type) { case ICMP6_ECHO_REQUEST: type = ICMP_ECHO; break; case ICMP6_ECHO_REPLY: type = ICMP_ECHOREPLY; break; case ICMP6_DST_UNREACH: type = ICMP_UNREACH; switch (code) { case ICMP6_DST_UNREACH_NOROUTE: case ICMP6_DST_UNREACH_BEYONDSCOPE: case ICMP6_DST_UNREACH_ADDR: code = ICMP_UNREACH_HOST; break; case ICMP6_DST_UNREACH_ADMIN: code = ICMP_UNREACH_HOST_PROHIB; break; case ICMP6_DST_UNREACH_NOPORT: code = ICMP_UNREACH_PORT; break; default: return (-1); } break; case ICMP6_PACKET_TOO_BIG: type = ICMP_UNREACH; code = ICMP_UNREACH_NEEDFRAG; mtu -= 20; break; case ICMP6_TIME_EXCEEDED: type = ICMP_TIMXCEED; break; case ICMP6_PARAM_PROB: switch (code) { case ICMP6_PARAMPROB_HEADER: type = ICMP_PARAMPROB; code = ICMP_PARAMPROB_ERRATPTR; ptr = ntohl(icmp6->icmp6_pptr); if (ptr == PTR_IP6(ip6_vfc)) ; /* preserve */ else if (ptr == PTR_IP6(ip6_vfc) + 1) ptr = PTR_IP(ip_tos); else if (ptr == PTR_IP6(ip6_plen) || ptr == PTR_IP6(ip6_plen) + 1) ptr = PTR_IP(ip_len); else if (ptr == PTR_IP6(ip6_nxt)) ptr = PTR_IP(ip_p); else if (ptr == PTR_IP6(ip6_hlim)) ptr = PTR_IP(ip_ttl); else if (ptr >= PTR_IP6(ip6_src) && ptr < PTR_IP6(ip6_dst)) ptr = PTR_IP(ip_src); else if (ptr >= PTR_IP6(ip6_dst) && ptr < sizeof(struct ip6_hdr)) ptr = PTR_IP(ip_dst); else { return (-1); } break; case ICMP6_PARAMPROB_NEXTHEADER: type = ICMP_UNREACH; code = ICMP_UNREACH_PROTOCOL; break; default: return (-1); } break; default: return (-1); } if (icmp6->icmp6_type != type) { icmp6->icmp6_cksum = pf_cksum_fixup(icmp6->icmp6_cksum, icmp6->icmp6_type, type, 0); icmp6->icmp6_type = type; } if (icmp6->icmp6_code != code) { icmp6->icmp6_cksum = pf_cksum_fixup(icmp6->icmp6_cksum, icmp6->icmp6_code, code, 0); icmp6->icmp6_code = code; } if (icmp6->icmp6_mtu != htonl(mtu)) { icmp6->icmp6_cksum = pf_cksum_fixup(icmp6->icmp6_cksum, htons(ntohl(icmp6->icmp6_mtu)), htons(mtu), 0); /* aligns well with a icmpv4 nextmtu */ icmp6->icmp6_mtu = htonl(mtu); } if (ptr >= 0 && icmp6->icmp6_pptr != htonl(ptr)) { icmp6->icmp6_cksum = pf_cksum_fixup(icmp6->icmp6_cksum, htons(ntohl(icmp6->icmp6_pptr)), htons(ptr), 0); /* icmpv4 pptr is a one most significant byte */ icmp6->icmp6_pptr = htonl(ptr << 24); } break; case AF_INET6: icmp4 = arg; type = icmp4->icmp_type; code = icmp4->icmp_code; mtu = ntohs(icmp4->icmp_nextmtu); switch (type) { case ICMP_ECHO: type = ICMP6_ECHO_REQUEST; break; case ICMP_ECHOREPLY: type = ICMP6_ECHO_REPLY; break; case ICMP_UNREACH: type = ICMP6_DST_UNREACH; switch (code) { case ICMP_UNREACH_NET: case ICMP_UNREACH_HOST: case ICMP_UNREACH_NET_UNKNOWN: case ICMP_UNREACH_HOST_UNKNOWN: case ICMP_UNREACH_ISOLATED: case ICMP_UNREACH_TOSNET: case ICMP_UNREACH_TOSHOST: code = ICMP6_DST_UNREACH_NOROUTE; break; case ICMP_UNREACH_PORT: code = ICMP6_DST_UNREACH_NOPORT; break; case ICMP_UNREACH_NET_PROHIB: case ICMP_UNREACH_HOST_PROHIB: case ICMP_UNREACH_FILTER_PROHIB: case ICMP_UNREACH_PRECEDENCE_CUTOFF: code = ICMP6_DST_UNREACH_ADMIN; break; case ICMP_UNREACH_PROTOCOL: type = ICMP6_PARAM_PROB; code = ICMP6_PARAMPROB_NEXTHEADER; ptr = offsetof(struct ip6_hdr, ip6_nxt); break; case ICMP_UNREACH_NEEDFRAG: type = ICMP6_PACKET_TOO_BIG; code = 0; mtu += 20; break; default: return (-1); } break; case ICMP_TIMXCEED: type = ICMP6_TIME_EXCEEDED; break; case ICMP_PARAMPROB: type = ICMP6_PARAM_PROB; switch (code) { case ICMP_PARAMPROB_ERRATPTR: code = ICMP6_PARAMPROB_HEADER; break; case ICMP_PARAMPROB_LENGTH: code = ICMP6_PARAMPROB_HEADER; break; default: return (-1); } ptr = icmp4->icmp_pptr; if (ptr == 0 || ptr == PTR_IP(ip_tos)) ; /* preserve */ else if (ptr == PTR_IP(ip_len) || ptr == PTR_IP(ip_len) + 1) ptr = PTR_IP6(ip6_plen); else if (ptr == PTR_IP(ip_ttl)) ptr = PTR_IP6(ip6_hlim); else if (ptr == PTR_IP(ip_p)) ptr = PTR_IP6(ip6_nxt); else if (ptr >= PTR_IP(ip_src) && ptr < PTR_IP(ip_dst)) ptr = PTR_IP6(ip6_src); else if (ptr >= PTR_IP(ip_dst) && ptr < sizeof(struct ip)) ptr = PTR_IP6(ip6_dst); else { return (-1); } break; default: return (-1); } if (icmp4->icmp_type != type) { icmp4->icmp_cksum = pf_cksum_fixup(icmp4->icmp_cksum, icmp4->icmp_type, type, 0); icmp4->icmp_type = type; } if (icmp4->icmp_code != code) { icmp4->icmp_cksum = pf_cksum_fixup(icmp4->icmp_cksum, icmp4->icmp_code, code, 0); icmp4->icmp_code = code; } if (icmp4->icmp_nextmtu != htons(mtu)) { icmp4->icmp_cksum = pf_cksum_fixup(icmp4->icmp_cksum, icmp4->icmp_nextmtu, htons(mtu), 0); icmp4->icmp_nextmtu = htons(mtu); } if (ptr >= 0 && icmp4->icmp_void != ptr) { icmp4->icmp_cksum = pf_cksum_fixup(icmp4->icmp_cksum, htons(icmp4->icmp_pptr), htons(ptr), 0); icmp4->icmp_void = htonl(ptr); } break; default: unhandled_af(af); } #endif /* INET && INET6 */ return (0); } /* * Need to modulate the sequence numbers in the TCP SACK option * (credits to Krzysztof Pfaff for report and patch) */ static int pf_modulate_sack(struct pf_pdesc *pd, struct tcphdr *th, struct pf_state_peer *dst) { int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; u_int8_t opts[TCP_MAXOLEN], *opt = opts; int copyback = 0, i, olen; struct sackblk sack; #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) if (hlen < TCPOLEN_SACKLEN || hlen > MAX_TCPOPTLEN || !pf_pull_hdr(pd->m, pd->off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) return 0; while (hlen >= TCPOLEN_SACKLEN) { size_t startoff = opt - opts; olen = opt[1]; switch (*opt) { case TCPOPT_EOL: /* FALLTHROUGH */ case TCPOPT_NOP: opt++; hlen--; break; case TCPOPT_SACK: if (olen > hlen) olen = hlen; if (olen >= TCPOLEN_SACKLEN) { for (i = 2; i + TCPOLEN_SACK <= olen; i += TCPOLEN_SACK) { memcpy(&sack, &opt[i], sizeof(sack)); pf_patch_32_unaligned(pd->m, &th->th_sum, &sack.start, htonl(ntohl(sack.start) - dst->seqdiff), PF_ALGNMNT(startoff), 0); pf_patch_32_unaligned(pd->m, &th->th_sum, &sack.end, htonl(ntohl(sack.end) - dst->seqdiff), PF_ALGNMNT(startoff), 0); memcpy(&opt[i], &sack, sizeof(sack)); } copyback = 1; } /* FALLTHROUGH */ default: if (olen < 2) olen = 2; hlen -= olen; opt += olen; } } if (copyback) m_copyback(pd->m, pd->off + sizeof(*th), thoptlen, (caddr_t)opts); return (copyback); } struct mbuf * pf_build_tcp(const struct pf_krule *r, sa_family_t af, const struct pf_addr *saddr, const struct pf_addr *daddr, u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, u_int8_t tcp_flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int mbuf_flags, u_int16_t mtag_tag, u_int16_t mtag_flags, int rtableid) { struct mbuf *m; int len, tlen; #ifdef INET struct ip *h = NULL; #endif /* INET */ #ifdef INET6 struct ip6_hdr *h6 = NULL; #endif /* INET6 */ struct tcphdr *th; char *opt; struct pf_mtag *pf_mtag; len = 0; th = NULL; /* maximum segment size tcp option */ tlen = sizeof(struct tcphdr); if (mss) tlen += 4; switch (af) { #ifdef INET case AF_INET: len = sizeof(struct ip) + tlen; break; #endif /* INET */ #ifdef INET6 case AF_INET6: len = sizeof(struct ip6_hdr) + tlen; break; #endif /* INET6 */ default: unhandled_af(af); } m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) return (NULL); #ifdef MAC mac_netinet_firewall_send(m); #endif if ((pf_mtag = pf_get_mtag(m)) == NULL) { m_freem(m); return (NULL); } m->m_flags |= mbuf_flags; pf_mtag->tag = mtag_tag; pf_mtag->flags = mtag_flags; if (rtableid >= 0) M_SETFIB(m, rtableid); #ifdef ALTQ if (r != NULL && r->qid) { pf_mtag->qid = r->qid; /* add hints for ecn */ pf_mtag->hdr = mtod(m, struct ip *); } #endif /* ALTQ */ m->m_data += max_linkhdr; m->m_pkthdr.len = m->m_len = len; /* The rest of the stack assumes a rcvif, so provide one. * This is a locally generated packet, so .. close enough. */ m->m_pkthdr.rcvif = V_loif; bzero(m->m_data, len); switch (af) { #ifdef INET case AF_INET: m->m_pkthdr.csum_flags |= CSUM_TCP; m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); h = mtod(m, struct ip *); h->ip_p = IPPROTO_TCP; h->ip_len = htons(tlen); h->ip_v = 4; h->ip_hl = sizeof(*h) >> 2; h->ip_tos = IPTOS_LOWDELAY; h->ip_len = htons(len); h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0); h->ip_ttl = ttl ? ttl : V_ip_defttl; h->ip_sum = 0; h->ip_src.s_addr = saddr->v4.s_addr; h->ip_dst.s_addr = daddr->v4.s_addr; th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); th->th_sum = in_pseudo(h->ip_src.s_addr, h->ip_dst.s_addr, htons(len - sizeof(struct ip) + IPPROTO_TCP)); break; #endif /* INET */ #ifdef INET6 case AF_INET6: m->m_pkthdr.csum_flags |= CSUM_TCP_IPV6; m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); h6 = mtod(m, struct ip6_hdr *); /* IP header fields included in the TCP checksum */ h6->ip6_nxt = IPPROTO_TCP; h6->ip6_plen = htons(tlen); h6->ip6_vfc |= IPV6_VERSION; h6->ip6_hlim = V_ip6_defhlim; memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); th->th_sum = in6_cksum_pseudo(h6, len - sizeof(struct ip6_hdr), IPPROTO_TCP, 0); break; #endif /* INET6 */ } /* TCP header */ th->th_sport = sport; th->th_dport = dport; th->th_seq = htonl(seq); th->th_ack = htonl(ack); th->th_off = tlen >> 2; tcp_set_flags(th, tcp_flags); th->th_win = htons(win); if (mss) { opt = (char *)(th + 1); opt[0] = TCPOPT_MAXSEG; opt[1] = 4; HTONS(mss); memcpy((opt + 2), &mss, 2); } return (m); } static void pf_send_sctp_abort(sa_family_t af, struct pf_pdesc *pd, uint8_t ttl, int rtableid) { struct mbuf *m; #ifdef INET struct ip *h = NULL; #endif /* INET */ #ifdef INET6 struct ip6_hdr *h6 = NULL; #endif /* INET6 */ struct sctphdr *hdr; struct sctp_chunkhdr *chunk; struct pf_send_entry *pfse; int off = 0; MPASS(af == pd->af); m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) return; m->m_data += max_linkhdr; m->m_flags |= M_SKIP_FIREWALL; /* The rest of the stack assumes a rcvif, so provide one. * This is a locally generated packet, so .. close enough. */ m->m_pkthdr.rcvif = V_loif; /* IPv4|6 header */ switch (af) { #ifdef INET case AF_INET: bzero(m->m_data, sizeof(struct ip) + sizeof(*hdr) + sizeof(*chunk)); h = mtod(m, struct ip *); /* IP header fields included in the TCP checksum */ h->ip_p = IPPROTO_SCTP; h->ip_len = htons(sizeof(*h) + sizeof(*hdr) + sizeof(*chunk)); h->ip_ttl = ttl ? ttl : V_ip_defttl; h->ip_src = pd->dst->v4; h->ip_dst = pd->src->v4; off += sizeof(struct ip); break; #endif /* INET */ #ifdef INET6 case AF_INET6: bzero(m->m_data, sizeof(struct ip6_hdr) + sizeof(*hdr) + sizeof(*chunk)); h6 = mtod(m, struct ip6_hdr *); /* IP header fields included in the TCP checksum */ h6->ip6_vfc |= IPV6_VERSION; h6->ip6_nxt = IPPROTO_SCTP; h6->ip6_plen = htons(sizeof(*h6) + sizeof(*hdr) + sizeof(*chunk)); h6->ip6_hlim = ttl ? ttl : V_ip6_defhlim; memcpy(&h6->ip6_src, &pd->dst->v6, sizeof(struct in6_addr)); memcpy(&h6->ip6_dst, &pd->src->v6, sizeof(struct in6_addr)); off += sizeof(struct ip6_hdr); break; #endif /* INET6 */ default: unhandled_af(af); } /* SCTP header */ hdr = mtodo(m, off); hdr->src_port = pd->hdr.sctp.dest_port; hdr->dest_port = pd->hdr.sctp.src_port; hdr->v_tag = pd->sctp_initiate_tag; hdr->checksum = 0; /* Abort chunk. */ off += sizeof(struct sctphdr); chunk = mtodo(m, off); chunk->chunk_type = SCTP_ABORT_ASSOCIATION; chunk->chunk_length = htons(sizeof(*chunk)); /* SCTP checksum */ off += sizeof(*chunk); m->m_pkthdr.len = m->m_len = off; pf_sctp_checksum(m, off - sizeof(*hdr) - sizeof(*chunk)); if (rtableid >= 0) M_SETFIB(m, rtableid); /* Allocate outgoing queue entry, mbuf and mbuf tag. */ pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT); if (pfse == NULL) { m_freem(m); return; } switch (af) { #ifdef INET case AF_INET: pfse->pfse_type = PFSE_IP; break; #endif /* INET */ #ifdef INET6 case AF_INET6: pfse->pfse_type = PFSE_IP6; break; #endif /* INET6 */ } pfse->pfse_m = m; pf_send(pfse); } void pf_send_tcp(const struct pf_krule *r, sa_family_t af, const struct pf_addr *saddr, const struct pf_addr *daddr, u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, u_int8_t tcp_flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int mbuf_flags, u_int16_t mtag_tag, u_int16_t mtag_flags, int rtableid) { struct pf_send_entry *pfse; struct mbuf *m; m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, tcp_flags, win, mss, ttl, mbuf_flags, mtag_tag, mtag_flags, rtableid); if (m == NULL) return; /* Allocate outgoing queue entry, mbuf and mbuf tag. */ pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT); if (pfse == NULL) { m_freem(m); return; } switch (af) { #ifdef INET case AF_INET: pfse->pfse_type = PFSE_IP; break; #endif /* INET */ #ifdef INET6 case AF_INET6: pfse->pfse_type = PFSE_IP6; break; #endif /* INET6 */ default: unhandled_af(af); } pfse->pfse_m = m; pf_send(pfse); } static void pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd, struct pf_state_key *sk, struct tcphdr *th, u_int16_t bproto_sum, u_int16_t bip_sum, u_short *reason, int rtableid) { struct pf_addr * const saddr = pd->src; struct pf_addr * const daddr = pd->dst; /* undo NAT changes, if they have taken place */ if (nr != NULL) { PF_ACPY(saddr, &sk->addr[pd->sidx], pd->af); PF_ACPY(daddr, &sk->addr[pd->didx], pd->af); if (pd->sport) *pd->sport = sk->port[pd->sidx]; if (pd->dport) *pd->dport = sk->port[pd->didx]; if (pd->ip_sum) *pd->ip_sum = bip_sum; m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any); } if (pd->proto == IPPROTO_TCP && ((r->rule_flag & PFRULE_RETURNRST) || (r->rule_flag & PFRULE_RETURN)) && !(tcp_get_flags(th) & TH_RST)) { u_int32_t ack = ntohl(th->th_seq) + pd->p_len; if (pf_check_proto_cksum(pd->m, pd->off, pd->tot_len - pd->off, IPPROTO_TCP, pd->af)) REASON_SET(reason, PFRES_PROTCKSUM); else { if (tcp_get_flags(th) & TH_SYN) ack++; if (tcp_get_flags(th) & TH_FIN) ack++; pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, r->return_ttl, M_SKIP_FIREWALL, 0, 0, rtableid); } } else if (pd->proto == IPPROTO_SCTP && (r->rule_flag & PFRULE_RETURN)) { pf_send_sctp_abort(pd->af, pd, r->return_ttl, rtableid); } else if (pd->proto != IPPROTO_ICMP && pd->af == AF_INET && r->return_icmp) pf_send_icmp(pd->m, r->return_icmp >> 8, r->return_icmp & 255, pd->af, r, rtableid); else if (pd->proto != IPPROTO_ICMPV6 && pd->af == AF_INET6 && r->return_icmp6) pf_send_icmp(pd->m, r->return_icmp6 >> 8, r->return_icmp6 & 255, pd->af, r, rtableid); } static int pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m) { struct m_tag *mtag; u_int8_t mpcp; mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); if (mtag == NULL) return (0); if (prio == PF_PRIO_ZERO) prio = 0; mpcp = *(uint8_t *)(mtag + 1); return (mpcp == prio); } static int pf_icmp_to_bandlim(uint8_t type) { switch (type) { case ICMP_ECHO: case ICMP_ECHOREPLY: return (BANDLIM_ICMP_ECHO); case ICMP_TSTAMP: case ICMP_TSTAMPREPLY: return (BANDLIM_ICMP_TSTAMP); case ICMP_UNREACH: default: return (BANDLIM_ICMP_UNREACH); } } static void pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, struct pf_krule *r, int rtableid) { struct pf_send_entry *pfse; struct mbuf *m0; struct pf_mtag *pf_mtag; /* ICMP packet rate limitation. */ switch (af) { #ifdef INET6 case AF_INET6: if (icmp6_ratelimit(NULL, type, code)) return; break; #endif #ifdef INET case AF_INET: if (badport_bandlim(pf_icmp_to_bandlim(type)) != 0) return; break; #endif } /* Allocate outgoing queue entry, mbuf and mbuf tag. */ pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT); if (pfse == NULL) return; if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) { free(pfse, M_PFTEMP); return; } if ((pf_mtag = pf_get_mtag(m0)) == NULL) { free(pfse, M_PFTEMP); return; } /* XXX: revisit */ m0->m_flags |= M_SKIP_FIREWALL; if (rtableid >= 0) M_SETFIB(m0, rtableid); #ifdef ALTQ if (r->qid) { pf_mtag->qid = r->qid; /* add hints for ecn */ pf_mtag->hdr = mtod(m0, struct ip *); } #endif /* ALTQ */ switch (af) { #ifdef INET case AF_INET: pfse->pfse_type = PFSE_ICMP; break; #endif /* INET */ #ifdef INET6 case AF_INET6: pfse->pfse_type = PFSE_ICMP6; break; #endif /* INET6 */ } pfse->pfse_m = m0; pfse->icmpopts.type = type; pfse->icmpopts.code = code; pf_send(pfse); } /* * Return 1 if the addresses a and b match (with mask m), otherwise return 0. * If n is 0, they match if they are equal. If n is != 0, they match if they * are different. */ int pf_match_addr(u_int8_t n, const struct pf_addr *a, const struct pf_addr *m, const struct pf_addr *b, sa_family_t af) { int match = 0; switch (af) { #ifdef INET case AF_INET: if (IN_ARE_MASKED_ADDR_EQUAL(a->v4, b->v4, m->v4)) match++; break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (IN6_ARE_MASKED_ADDR_EQUAL(&a->v6, &b->v6, &m->v6)) match++; break; #endif /* INET6 */ } if (match) { if (n) return (0); else return (1); } else { if (n) return (1); else return (0); } } /* * Return 1 if b <= a <= e, otherwise return 0. */ int pf_match_addr_range(const struct pf_addr *b, const struct pf_addr *e, const struct pf_addr *a, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) || (ntohl(a->addr32[0]) > ntohl(e->addr32[0]))) return (0); break; #endif /* INET */ #ifdef INET6 case AF_INET6: { int i; /* check a >= b */ for (i = 0; i < 4; ++i) if (ntohl(a->addr32[i]) > ntohl(b->addr32[i])) break; else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i])) return (0); /* check a <= e */ for (i = 0; i < 4; ++i) if (ntohl(a->addr32[i]) < ntohl(e->addr32[i])) break; else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i])) return (0); break; } #endif /* INET6 */ } return (1); } static int pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) { switch (op) { case PF_OP_IRG: return ((p > a1) && (p < a2)); case PF_OP_XRG: return ((p < a1) || (p > a2)); case PF_OP_RRG: return ((p >= a1) && (p <= a2)); case PF_OP_EQ: return (p == a1); case PF_OP_NE: return (p != a1); case PF_OP_LT: return (p < a1); case PF_OP_LE: return (p <= a1); case PF_OP_GT: return (p > a1); case PF_OP_GE: return (p >= a1); } return (0); /* never reached */ } int pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) { NTOHS(a1); NTOHS(a2); NTOHS(p); return (pf_match(op, a1, a2, p)); } static int pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) { if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) return (0); return (pf_match(op, a1, a2, u)); } static int pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) { if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) return (0); return (pf_match(op, a1, a2, g)); } int pf_match_tag(struct mbuf *m, struct pf_krule *r, int *tag, int mtag) { if (*tag == -1) *tag = mtag; return ((!r->match_tag_not && r->match_tag == *tag) || (r->match_tag_not && r->match_tag != *tag)); } static int pf_match_rcvif(struct mbuf *m, struct pf_krule *r) { struct ifnet *ifp = m->m_pkthdr.rcvif; struct pfi_kkif *kif; if (ifp == NULL) return (0); kif = (struct pfi_kkif *)ifp->if_pf_kif; if (kif == NULL) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test_via: kif == NULL, @%d via %s\n", r->nr, r->rcv_ifname)); return (0); } return (pfi_kkif_match(r->rcv_kif, kif)); } int pf_tag_packet(struct pf_pdesc *pd, int tag) { KASSERT(tag > 0, ("%s: tag %d", __func__, tag)); if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(pd->m)) == NULL)) return (ENOMEM); pd->pf_mtag->tag = tag; return (0); } #define PF_ANCHOR_STACKSIZE 32 struct pf_kanchor_stackframe { struct pf_kruleset *rs; struct pf_krule *r; /* XXX: + match bit */ struct pf_kanchor *child; }; /* * XXX: We rely on malloc(9) returning pointer aligned addresses. */ #define PF_ANCHORSTACK_MATCH 0x00000001 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH) #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH) #define PF_ANCHOR_RULE(f) (struct pf_krule *) \ ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK) #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \ ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \ } while (0) void pf_step_into_anchor(struct pf_kanchor_stackframe *stack, int *depth, struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a) { struct pf_kanchor_stackframe *f; PF_RULES_RASSERT(); if (*depth >= PF_ANCHOR_STACKSIZE) { printf("%s: anchor stack overflow on %s\n", __func__, (*r)->anchor->name); *r = TAILQ_NEXT(*r, entries); return; } else if (*depth == 0 && a != NULL) *a = *r; f = stack + (*depth)++; f->rs = *rs; f->r = *r; if ((*r)->anchor_wildcard) { struct pf_kanchor_node *parent = &(*r)->anchor->children; if ((f->child = RB_MIN(pf_kanchor_node, parent)) == NULL) { *r = NULL; return; } *rs = &f->child->ruleset; } else { f->child = NULL; *rs = &(*r)->anchor->ruleset; } *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); } int pf_step_out_of_anchor(struct pf_kanchor_stackframe *stack, int *depth, struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a, int *match) { struct pf_kanchor_stackframe *f; struct pf_krule *fr; int quick = 0; PF_RULES_RASSERT(); do { if (*depth <= 0) break; f = stack + *depth - 1; fr = PF_ANCHOR_RULE(f); if (f->child != NULL) { f->child = RB_NEXT(pf_kanchor_node, &fr->anchor->children, f->child); if (f->child != NULL) { *rs = &f->child->ruleset; *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); if (*r == NULL) continue; else break; } } (*depth)--; if (*depth == 0 && a != NULL) *a = NULL; *rs = f->rs; if (match != NULL && *match > *depth) { *match = *depth; if (f->r->quick) quick = 1; } *r = TAILQ_NEXT(fr, entries); } while (*r == NULL); return (quick); } struct pf_keth_anchor_stackframe { struct pf_keth_ruleset *rs; struct pf_keth_rule *r; /* XXX: + match bit */ struct pf_keth_anchor *child; }; #define PF_ETH_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH) #define PF_ETH_ANCHOR_RULE(f) (struct pf_keth_rule *) \ ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK) #define PF_ETH_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \ ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \ } while (0) void pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe *stack, int *depth, struct pf_keth_ruleset **rs, struct pf_keth_rule **r, struct pf_keth_rule **a, int *match) { struct pf_keth_anchor_stackframe *f; NET_EPOCH_ASSERT(); if (match) *match = 0; if (*depth >= PF_ANCHOR_STACKSIZE) { printf("%s: anchor stack overflow on %s\n", __func__, (*r)->anchor->name); *r = TAILQ_NEXT(*r, entries); return; } else if (*depth == 0 && a != NULL) *a = *r; f = stack + (*depth)++; f->rs = *rs; f->r = *r; if ((*r)->anchor_wildcard) { struct pf_keth_anchor_node *parent = &(*r)->anchor->children; if ((f->child = RB_MIN(pf_keth_anchor_node, parent)) == NULL) { *r = NULL; return; } *rs = &f->child->ruleset; } else { f->child = NULL; *rs = &(*r)->anchor->ruleset; } *r = TAILQ_FIRST((*rs)->active.rules); } int pf_step_out_of_keth_anchor(struct pf_keth_anchor_stackframe *stack, int *depth, struct pf_keth_ruleset **rs, struct pf_keth_rule **r, struct pf_keth_rule **a, int *match) { struct pf_keth_anchor_stackframe *f; struct pf_keth_rule *fr; int quick = 0; NET_EPOCH_ASSERT(); do { if (*depth <= 0) break; f = stack + *depth - 1; fr = PF_ETH_ANCHOR_RULE(f); if (f->child != NULL) { /* * This block traverses through * a wildcard anchor. */ if (match != NULL && *match) { /* * If any of "*" matched, then * "foo/ *" matched, mark frame * appropriately. */ PF_ETH_ANCHOR_SET_MATCH(f); *match = 0; } f->child = RB_NEXT(pf_keth_anchor_node, &fr->anchor->children, f->child); if (f->child != NULL) { *rs = &f->child->ruleset; *r = TAILQ_FIRST((*rs)->active.rules); if (*r == NULL) continue; else break; } } (*depth)--; if (*depth == 0 && a != NULL) *a = NULL; *rs = f->rs; if (PF_ETH_ANCHOR_MATCH(f) || (match != NULL && *match)) quick = fr->quick; *r = TAILQ_NEXT(fr, entries); } while (*r == NULL); return (quick); } #ifdef INET6 void pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); break; #endif /* INET */ case AF_INET6: naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); break; } } void pf_addr_inc(struct pf_addr *addr, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); break; #endif /* INET */ case AF_INET6: if (addr->addr32[3] == 0xffffffff) { addr->addr32[3] = 0; if (addr->addr32[2] == 0xffffffff) { addr->addr32[2] = 0; if (addr->addr32[1] == 0xffffffff) { addr->addr32[1] = 0; addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); } else addr->addr32[1] = htonl(ntohl(addr->addr32[1]) + 1); } else addr->addr32[2] = htonl(ntohl(addr->addr32[2]) + 1); } else addr->addr32[3] = htonl(ntohl(addr->addr32[3]) + 1); break; } } #endif /* INET6 */ void pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a) { /* * Modern rules use the same flags in rules as they do in states. */ a->flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID| PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO)); /* * Old-style scrub rules have different flags which need to be translated. */ if (r->rule_flag & PFRULE_RANDOMID) a->flags |= PFSTATE_RANDOMID; if (r->scrub_flags & PFSTATE_SETTOS || r->rule_flag & PFRULE_SET_TOS ) { a->flags |= PFSTATE_SETTOS; a->set_tos = r->set_tos; } if (r->qid) a->qid = r->qid; if (r->pqid) a->pqid = r->pqid; if (r->rtableid >= 0) a->rtableid = r->rtableid; a->log |= r->log; if (r->min_ttl) a->min_ttl = r->min_ttl; if (r->max_mss) a->max_mss = r->max_mss; if (r->dnpipe) a->dnpipe = r->dnpipe; if (r->dnrpipe) a->dnrpipe = r->dnrpipe; if (r->dnpipe || r->dnrpipe) { if (r->free_flags & PFRULE_DN_IS_PIPE) a->flags |= PFSTATE_DN_IS_PIPE; else a->flags &= ~PFSTATE_DN_IS_PIPE; } if (r->scrub_flags & PFSTATE_SETPRIO) { a->set_prio[0] = r->set_prio[0]; a->set_prio[1] = r->set_prio[1]; } } int pf_socket_lookup(struct pf_pdesc *pd) { struct pf_addr *saddr, *daddr; u_int16_t sport, dport; struct inpcbinfo *pi; struct inpcb *inp; pd->lookup.uid = UID_MAX; pd->lookup.gid = GID_MAX; switch (pd->proto) { case IPPROTO_TCP: sport = pd->hdr.tcp.th_sport; dport = pd->hdr.tcp.th_dport; pi = &V_tcbinfo; break; case IPPROTO_UDP: sport = pd->hdr.udp.uh_sport; dport = pd->hdr.udp.uh_dport; pi = &V_udbinfo; break; default: return (-1); } if (pd->dir == PF_IN) { saddr = pd->src; daddr = pd->dst; } else { u_int16_t p; p = sport; sport = dport; dport = p; saddr = pd->dst; daddr = pd->src; } switch (pd->af) { #ifdef INET case AF_INET: inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4, dport, INPLOOKUP_RLOCKPCB, NULL, pd->m); if (inp == NULL) { inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4, dport, INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL, pd->m); if (inp == NULL) return (-1); } break; #endif /* INET */ #ifdef INET6 case AF_INET6: inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6, dport, INPLOOKUP_RLOCKPCB, NULL, pd->m); if (inp == NULL) { inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6, dport, INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL, pd->m); if (inp == NULL) return (-1); } break; #endif /* INET6 */ } INP_RLOCK_ASSERT(inp); pd->lookup.uid = inp->inp_cred->cr_uid; pd->lookup.gid = inp->inp_cred->cr_groups[0]; INP_RUNLOCK(inp); return (1); } u_int8_t pf_get_wscale(struct pf_pdesc *pd) { struct tcphdr *th = &pd->hdr.tcp; int hlen; u_int8_t hdr[60]; u_int8_t *opt, optlen; u_int8_t wscale = 0; hlen = th->th_off << 2; /* hlen <= sizeof(hdr) */ if (hlen <= sizeof(struct tcphdr)) return (0); if (!pf_pull_hdr(pd->m, pd->off, hdr, hlen, NULL, NULL, pd->af)) return (0); opt = hdr + sizeof(struct tcphdr); hlen -= sizeof(struct tcphdr); while (hlen >= 3) { switch (*opt) { case TCPOPT_EOL: case TCPOPT_NOP: ++opt; --hlen; break; case TCPOPT_WINDOW: wscale = opt[2]; if (wscale > TCP_MAX_WINSHIFT) wscale = TCP_MAX_WINSHIFT; wscale |= PF_WSCALE_FLAG; /* FALLTHROUGH */ default: optlen = opt[1]; if (optlen < 2) optlen = 2; hlen -= optlen; opt += optlen; break; } } return (wscale); } u_int16_t pf_get_mss(struct pf_pdesc *pd) { struct tcphdr *th = &pd->hdr.tcp; int hlen; u_int8_t hdr[60]; u_int8_t *opt, optlen; u_int16_t mss = V_tcp_mssdflt; hlen = th->th_off << 2; /* hlen <= sizeof(hdr) */ if (hlen <= sizeof(struct tcphdr)) return (0); if (!pf_pull_hdr(pd->m, pd->off, hdr, hlen, NULL, NULL, pd->af)) return (0); opt = hdr + sizeof(struct tcphdr); hlen -= sizeof(struct tcphdr); while (hlen >= TCPOLEN_MAXSEG) { switch (*opt) { case TCPOPT_EOL: case TCPOPT_NOP: ++opt; --hlen; break; case TCPOPT_MAXSEG: memcpy(&mss, (opt + 2), 2); NTOHS(mss); /* FALLTHROUGH */ default: optlen = opt[1]; if (optlen < 2) optlen = 2; hlen -= optlen; opt += optlen; break; } } return (mss); } static u_int16_t pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer) { struct nhop_object *nh; #ifdef INET6 struct in6_addr dst6; uint32_t scopeid; #endif /* INET6 */ int hlen = 0; uint16_t mss = 0; NET_EPOCH_ASSERT(); switch (af) { #ifdef INET case AF_INET: hlen = sizeof(struct ip); nh = fib4_lookup(rtableid, addr->v4, 0, 0, 0); if (nh != NULL) mss = nh->nh_mtu - hlen - sizeof(struct tcphdr); break; #endif /* INET */ #ifdef INET6 case AF_INET6: hlen = sizeof(struct ip6_hdr); in6_splitscope(&addr->v6, &dst6, &scopeid); nh = fib6_lookup(rtableid, &dst6, scopeid, 0, 0); if (nh != NULL) mss = nh->nh_mtu - hlen - sizeof(struct tcphdr); break; #endif /* INET6 */ } mss = max(V_tcp_mssdflt, mss); mss = min(mss, offer); mss = max(mss, 64); /* sanity - at least max opt space */ return (mss); } static u_int32_t pf_tcp_iss(struct pf_pdesc *pd) { SHA512_CTX ctx; union { uint8_t bytes[SHA512_DIGEST_LENGTH]; uint32_t words[1]; } digest; if (V_pf_tcp_secret_init == 0) { arc4random_buf(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret)); SHA512_Init(&V_pf_tcp_secret_ctx); SHA512_Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret, sizeof(V_pf_tcp_secret)); V_pf_tcp_secret_init = 1; } ctx = V_pf_tcp_secret_ctx; SHA512_Update(&ctx, &pd->hdr.tcp.th_sport, sizeof(u_short)); SHA512_Update(&ctx, &pd->hdr.tcp.th_dport, sizeof(u_short)); switch (pd->af) { case AF_INET6: SHA512_Update(&ctx, &pd->src->v6, sizeof(struct in6_addr)); SHA512_Update(&ctx, &pd->dst->v6, sizeof(struct in6_addr)); break; case AF_INET: SHA512_Update(&ctx, &pd->src->v4, sizeof(struct in_addr)); SHA512_Update(&ctx, &pd->dst->v4, sizeof(struct in_addr)); break; } SHA512_Final(digest.bytes, &ctx); V_pf_tcp_iss_off += 4096; #define ISN_RANDOM_INCREMENT (4096 - 1) return (digest.words[0] + (arc4random() & ISN_RANDOM_INCREMENT) + V_pf_tcp_iss_off); #undef ISN_RANDOM_INCREMENT } static bool pf_match_eth_addr(const uint8_t *a, const struct pf_keth_rule_addr *r) { bool match = true; /* Always matches if not set */ if (! r->isset) return (!r->neg); for (int i = 0; i < ETHER_ADDR_LEN; i++) { if ((a[i] & r->mask[i]) != (r->addr[i] & r->mask[i])) { match = false; break; } } return (match ^ r->neg); } static int pf_match_eth_tag(struct mbuf *m, struct pf_keth_rule *r, int *tag, int mtag) { if (*tag == -1) *tag = mtag; return ((!r->match_tag_not && r->match_tag == *tag) || (r->match_tag_not && r->match_tag != *tag)); } static void pf_bridge_to(struct ifnet *ifp, struct mbuf *m) { /* If we don't have the interface drop the packet. */ if (ifp == NULL) { m_freem(m); return; } switch (ifp->if_type) { case IFT_ETHER: case IFT_XETHER: case IFT_L2VLAN: case IFT_BRIDGE: case IFT_IEEE8023ADLAG: break; default: m_freem(m); return; } ifp->if_transmit(ifp, m); } static int pf_test_eth_rule(int dir, struct pfi_kkif *kif, struct mbuf **m0) { #ifdef INET struct ip ip; #endif #ifdef INET6 struct ip6_hdr ip6; #endif struct mbuf *m = *m0; struct ether_header *e; struct pf_keth_rule *r, *rm, *a = NULL; struct pf_keth_ruleset *ruleset = NULL; struct pf_mtag *mtag; struct pf_keth_ruleq *rules; struct pf_addr *src = NULL, *dst = NULL; struct pfi_kkif *bridge_to; sa_family_t af = 0; uint16_t proto; int asd = 0, match = 0; int tag = -1; uint8_t action; struct pf_keth_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; MPASS(kif->pfik_ifp->if_vnet == curvnet); NET_EPOCH_ASSERT(); PF_RULES_RLOCK_TRACKER; SDT_PROBE3(pf, eth, test_rule, entry, dir, kif->pfik_ifp, m); mtag = pf_find_mtag(m); if (mtag != NULL && mtag->flags & PF_MTAG_FLAG_DUMMYNET) { /* Dummynet re-injects packets after they've * completed their delay. We've already * processed them, so pass unconditionally. */ /* But only once. We may see the packet multiple times (e.g. * PFIL_IN/PFIL_OUT). */ pf_dummynet_flag_remove(m, mtag); return (PF_PASS); } if (__predict_false(m->m_len < sizeof(struct ether_header)) && (m = *m0 = m_pullup(*m0, sizeof(struct ether_header))) == NULL) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test_eth_rule: m_len < sizeof(struct ether_header)" ", pullup failed\n")); return (PF_DROP); } e = mtod(m, struct ether_header *); proto = ntohs(e->ether_type); switch (proto) { #ifdef INET case ETHERTYPE_IP: { if (m_length(m, NULL) < (sizeof(struct ether_header) + sizeof(ip))) return (PF_DROP); af = AF_INET; m_copydata(m, sizeof(struct ether_header), sizeof(ip), (caddr_t)&ip); src = (struct pf_addr *)&ip.ip_src; dst = (struct pf_addr *)&ip.ip_dst; break; } #endif /* INET */ #ifdef INET6 case ETHERTYPE_IPV6: { if (m_length(m, NULL) < (sizeof(struct ether_header) + sizeof(ip6))) return (PF_DROP); af = AF_INET6; m_copydata(m, sizeof(struct ether_header), sizeof(ip6), (caddr_t)&ip6); src = (struct pf_addr *)&ip6.ip6_src; dst = (struct pf_addr *)&ip6.ip6_dst; break; } #endif /* INET6 */ } PF_RULES_RLOCK(); ruleset = V_pf_keth; rules = atomic_load_ptr(&ruleset->active.rules); for (r = TAILQ_FIRST(rules), rm = NULL; r != NULL;) { counter_u64_add(r->evaluations, 1); SDT_PROBE2(pf, eth, test_rule, test, r->nr, r); if (pfi_kkif_match(r->kif, kif) == r->ifnot) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "kif"); r = r->skip[PFE_SKIP_IFP].ptr; } else if (r->direction && r->direction != dir) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "dir"); r = r->skip[PFE_SKIP_DIR].ptr; } else if (r->proto && r->proto != proto) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "proto"); r = r->skip[PFE_SKIP_PROTO].ptr; } else if (! pf_match_eth_addr(e->ether_shost, &r->src)) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "src"); r = r->skip[PFE_SKIP_SRC_ADDR].ptr; } else if (! pf_match_eth_addr(e->ether_dhost, &r->dst)) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "dst"); r = r->skip[PFE_SKIP_DST_ADDR].ptr; } else if (src != NULL && PF_MISMATCHAW(&r->ipsrc.addr, src, af, r->ipsrc.neg, kif, M_GETFIB(m))) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "ip_src"); r = r->skip[PFE_SKIP_SRC_IP_ADDR].ptr; } else if (dst != NULL && PF_MISMATCHAW(&r->ipdst.addr, dst, af, r->ipdst.neg, kif, M_GETFIB(m))) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "ip_dst"); r = r->skip[PFE_SKIP_DST_IP_ADDR].ptr; } else if (r->match_tag && !pf_match_eth_tag(m, r, &tag, mtag ? mtag->tag : 0)) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "match_tag"); r = TAILQ_NEXT(r, entries); } else { if (r->tag) tag = r->tag; if (r->anchor == NULL) { /* Rule matches */ rm = r; SDT_PROBE2(pf, eth, test_rule, match, r->nr, r); if (r->quick) break; r = TAILQ_NEXT(r, entries); } else { pf_step_into_keth_anchor(anchor_stack, &asd, &ruleset, &r, &a, &match); } } if (r == NULL && pf_step_out_of_keth_anchor(anchor_stack, &asd, &ruleset, &r, &a, &match)) break; } r = rm; SDT_PROBE2(pf, eth, test_rule, final_match, (r != NULL ? r->nr : -1), r); /* Default to pass. */ if (r == NULL) { PF_RULES_RUNLOCK(); return (PF_PASS); } /* Execute action. */ counter_u64_add(r->packets[dir == PF_OUT], 1); counter_u64_add(r->bytes[dir == PF_OUT], m_length(m, NULL)); pf_update_timestamp(r); /* Shortcut. Don't tag if we're just going to drop anyway. */ if (r->action == PF_DROP) { PF_RULES_RUNLOCK(); return (PF_DROP); } if (tag > 0) { if (mtag == NULL) mtag = pf_get_mtag(m); if (mtag == NULL) { PF_RULES_RUNLOCK(); counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1); return (PF_DROP); } mtag->tag = tag; } if (r->qid != 0) { if (mtag == NULL) mtag = pf_get_mtag(m); if (mtag == NULL) { PF_RULES_RUNLOCK(); counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1); return (PF_DROP); } mtag->qid = r->qid; } action = r->action; bridge_to = r->bridge_to; /* Dummynet */ if (r->dnpipe) { struct ip_fw_args dnflow; /* Drop packet if dummynet is not loaded. */ if (ip_dn_io_ptr == NULL) { PF_RULES_RUNLOCK(); m_freem(m); counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1); return (PF_DROP); } if (mtag == NULL) mtag = pf_get_mtag(m); if (mtag == NULL) { PF_RULES_RUNLOCK(); counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1); return (PF_DROP); } bzero(&dnflow, sizeof(dnflow)); /* We don't have port numbers here, so we set 0. That means * that we'll be somewhat limited in distinguishing flows (i.e. * only based on IP addresses, not based on port numbers), but * it's better than nothing. */ dnflow.f_id.dst_port = 0; dnflow.f_id.src_port = 0; dnflow.f_id.proto = 0; dnflow.rule.info = r->dnpipe; dnflow.rule.info |= IPFW_IS_DUMMYNET; if (r->dnflags & PFRULE_DN_IS_PIPE) dnflow.rule.info |= IPFW_IS_PIPE; dnflow.f_id.extra = dnflow.rule.info; dnflow.flags = dir == PF_IN ? IPFW_ARGS_IN : IPFW_ARGS_OUT; dnflow.flags |= IPFW_ARGS_ETHER; dnflow.ifp = kif->pfik_ifp; switch (af) { case AF_INET: dnflow.f_id.addr_type = 4; dnflow.f_id.src_ip = src->v4.s_addr; dnflow.f_id.dst_ip = dst->v4.s_addr; break; case AF_INET6: dnflow.flags |= IPFW_ARGS_IP6; dnflow.f_id.addr_type = 6; dnflow.f_id.src_ip6 = src->v6; dnflow.f_id.dst_ip6 = dst->v6; break; } PF_RULES_RUNLOCK(); mtag->flags |= PF_MTAG_FLAG_DUMMYNET; ip_dn_io_ptr(m0, &dnflow); if (*m0 != NULL) pf_dummynet_flag_remove(m, mtag); } else { PF_RULES_RUNLOCK(); } if (action == PF_PASS && bridge_to) { pf_bridge_to(bridge_to->pfik_ifp, *m0); *m0 = NULL; /* We've eaten the packet. */ } return (action); } #define PF_TEST_ATTRIB(t, a)\ do { \ if (t) { \ r = a; \ goto nextrule; \ } \ } while (0) static int pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, struct pf_pdesc *pd, struct pf_krule **am, struct pf_kruleset **rsm, struct inpcb *inp) { struct pf_krule *nr = NULL; struct pf_krule *r, *a = NULL; struct pf_kruleset *ruleset = NULL; struct pf_krule_slist match_rules; struct pf_krule_item *ri; struct tcphdr *th = &pd->hdr.tcp; struct pf_state_key *sk = NULL, *nk = NULL; u_short reason, transerror; int rewrite = 0; int tag = -1; int asd = 0; int match = 0; int state_icmp = 0, icmp_dir; u_int16_t virtual_type, virtual_id; u_int16_t bproto_sum = 0, bip_sum = 0; u_int8_t icmptype = 0, icmpcode = 0; struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; struct pf_udp_mapping *udp_mapping = NULL; PF_RULES_RASSERT(); PF_ACPY(&pd->nsaddr, pd->src, pd->af); PF_ACPY(&pd->ndaddr, pd->dst, pd->af); SLIST_INIT(&match_rules); if (inp != NULL) { INP_LOCK_ASSERT(inp); pd->lookup.uid = inp->inp_cred->cr_uid; pd->lookup.gid = inp->inp_cred->cr_groups[0]; pd->lookup.done = 1; } switch (pd->virtual_proto) { case IPPROTO_TCP: pd->nsport = th->th_sport; pd->ndport = th->th_dport; break; case IPPROTO_UDP: pd->nsport = pd->hdr.udp.uh_sport; pd->ndport = pd->hdr.udp.uh_dport; break; case IPPROTO_SCTP: pd->nsport = pd->hdr.sctp.src_port; pd->ndport = pd->hdr.sctp.dest_port; break; #ifdef INET case IPPROTO_ICMP: MPASS(pd->af == AF_INET); icmptype = pd->hdr.icmp.icmp_type; icmpcode = pd->hdr.icmp.icmp_code; state_icmp = pf_icmp_mapping(pd, icmptype, &icmp_dir, &virtual_id, &virtual_type); if (icmp_dir == PF_IN) { pd->nsport = virtual_id; pd->ndport = virtual_type; } else { pd->nsport = virtual_type; pd->ndport = virtual_id; } break; #endif /* INET */ #ifdef INET6 case IPPROTO_ICMPV6: MPASS(pd->af == AF_INET6); icmptype = pd->hdr.icmp6.icmp6_type; icmpcode = pd->hdr.icmp6.icmp6_code; state_icmp = pf_icmp_mapping(pd, icmptype, &icmp_dir, &virtual_id, &virtual_type); if (icmp_dir == PF_IN) { pd->nsport = virtual_id; pd->ndport = virtual_type; } else { pd->nsport = virtual_type; pd->ndport = virtual_id; } break; #endif /* INET6 */ default: pd->nsport = pd->ndport = 0; break; } pd->osport = pd->nsport; pd->odport = pd->ndport; r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); /* check packet for BINAT/NAT/RDR */ transerror = pf_get_translation(pd, pd->off, &sk, &nk, anchor_stack, &nr, &udp_mapping); switch (transerror) { default: /* A translation error occurred. */ REASON_SET(&reason, transerror); goto cleanup; case PFRES_MAX: /* No match. */ break; case PFRES_MATCH: KASSERT(sk != NULL, ("%s: null sk", __func__)); KASSERT(nk != NULL, ("%s: null nk", __func__)); if (nr->log) { PFLOG_PACKET(nr->action, PFRES_MATCH, nr, a, ruleset, pd, 1, NULL); } if (pd->ip_sum) bip_sum = *pd->ip_sum; switch (pd->proto) { case IPPROTO_TCP: bproto_sum = th->th_sum; if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], pd->af) || nk->port[pd->sidx] != pd->nsport) { pf_change_ap(pd->m, pd->src, &th->th_sport, pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx], nk->port[pd->sidx], 0, pd->af, pd->naf); pd->sport = &th->th_sport; pd->nsport = th->th_sport; PF_ACPY(&pd->nsaddr, pd->src, pd->af); } if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], pd->af) || nk->port[pd->didx] != pd->ndport) { pf_change_ap(pd->m, pd->dst, &th->th_dport, pd->ip_sum, &th->th_sum, &nk->addr[pd->didx], nk->port[pd->didx], 0, pd->af, pd->naf); pd->dport = &th->th_dport; pd->ndport = th->th_dport; PF_ACPY(&pd->ndaddr, pd->dst, pd->af); } rewrite++; break; case IPPROTO_UDP: bproto_sum = pd->hdr.udp.uh_sum; if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], pd->af) || nk->port[pd->sidx] != pd->nsport) { pf_change_ap(pd->m, pd->src, &pd->hdr.udp.uh_sport, pd->ip_sum, &pd->hdr.udp.uh_sum, &nk->addr[pd->sidx], nk->port[pd->sidx], 1, pd->af, pd->naf); pd->sport = &pd->hdr.udp.uh_sport; pd->nsport = pd->hdr.udp.uh_sport; PF_ACPY(&pd->nsaddr, pd->src, pd->af); } if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], pd->af) || nk->port[pd->didx] != pd->ndport) { pf_change_ap(pd->m, pd->dst, &pd->hdr.udp.uh_dport, pd->ip_sum, &pd->hdr.udp.uh_sum, &nk->addr[pd->didx], nk->port[pd->didx], 1, pd->af, pd->naf); pd->dport = &pd->hdr.udp.uh_dport; pd->ndport = pd->hdr.udp.uh_dport; PF_ACPY(&pd->ndaddr, pd->dst, pd->af); } rewrite++; break; case IPPROTO_SCTP: { uint16_t checksum = 0; if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], pd->af) || nk->port[pd->sidx] != pd->nsport) { pf_change_ap(pd->m, pd->src, &pd->hdr.sctp.src_port, pd->ip_sum, &checksum, &nk->addr[pd->sidx], nk->port[pd->sidx], 1, pd->af, pd->naf); pd->sport = &pd->hdr.sctp.src_port; pd->nsport = pd->hdr.sctp.src_port; PF_ACPY(&pd->nsaddr, pd->src, pd->af); } if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], pd->af) || nk->port[pd->didx] != pd->ndport) { pf_change_ap(pd->m, pd->dst, &pd->hdr.sctp.dest_port, pd->ip_sum, &checksum, &nk->addr[pd->didx], nk->port[pd->didx], 1, pd->af, pd->naf); pd->dport = &pd->hdr.sctp.dest_port; pd->ndport = pd->hdr.sctp.dest_port; PF_ACPY(&pd->ndaddr, pd->dst, pd->af); } break; } #ifdef INET case IPPROTO_ICMP: if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], AF_INET)) { pf_change_a(&pd->src->v4.s_addr, pd->ip_sum, nk->addr[pd->sidx].v4.s_addr, 0); PF_ACPY(&pd->nsaddr, pd->src, pd->af); } if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], AF_INET)) { pf_change_a(&pd->dst->v4.s_addr, pd->ip_sum, nk->addr[pd->didx].v4.s_addr, 0); PF_ACPY(&pd->ndaddr, pd->dst, pd->af); } if (virtual_type == htons(ICMP_ECHO) && nk->port[pd->sidx] != pd->hdr.icmp.icmp_id) { pd->hdr.icmp.icmp_cksum = pf_cksum_fixup( pd->hdr.icmp.icmp_cksum, pd->nsport, nk->port[pd->sidx], 0); pd->hdr.icmp.icmp_id = nk->port[pd->sidx]; pd->sport = &pd->hdr.icmp.icmp_id; } m_copyback(pd->m, pd->off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp); break; #endif /* INET */ #ifdef INET6 case IPPROTO_ICMPV6: if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], AF_INET6)) { pf_change_a6(pd->src, &pd->hdr.icmp6.icmp6_cksum, &nk->addr[pd->sidx], 0); PF_ACPY(&pd->nsaddr, pd->src, pd->af); } if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], AF_INET6)) { pf_change_a6(pd->dst, &pd->hdr.icmp6.icmp6_cksum, &nk->addr[pd->didx], 0); PF_ACPY(&pd->ndaddr, pd->dst, pd->af); } rewrite++; break; #endif /* INET */ default: switch (pd->af) { #ifdef INET case AF_INET: if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], AF_INET)) { pf_change_a(&pd->src->v4.s_addr, pd->ip_sum, nk->addr[pd->sidx].v4.s_addr, 0); PF_ACPY(&pd->nsaddr, pd->src, pd->af); } if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], AF_INET)) { pf_change_a(&pd->dst->v4.s_addr, pd->ip_sum, nk->addr[pd->didx].v4.s_addr, 0); PF_ACPY(&pd->ndaddr, pd->dst, pd->af); } break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], AF_INET6)) { PF_ACPY(&pd->nsaddr, &nk->addr[pd->sidx], pd->af); PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); } if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], AF_INET6)) { PF_ACPY(&pd->ndaddr, &nk->addr[pd->didx], pd->af); PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); } break; #endif /* INET */ } break; } if (nr->natpass) r = NULL; } while (r != NULL) { if (pd->related_rule) { *rm = pd->related_rule; break; } pf_counter_u64_add(&r->evaluations, 1); PF_TEST_ATTRIB(pfi_kkif_match(r->kif, pd->kif) == r->ifnot, r->skip[PF_SKIP_IFP]); PF_TEST_ATTRIB(r->direction && r->direction != pd->dir, r->skip[PF_SKIP_DIR]); PF_TEST_ATTRIB(r->af && r->af != pd->af, r->skip[PF_SKIP_AF]); PF_TEST_ATTRIB(r->proto && r->proto != pd->proto, r->skip[PF_SKIP_PROTO]); PF_TEST_ATTRIB(PF_MISMATCHAW(&r->src.addr, &pd->nsaddr, pd->naf, r->src.neg, pd->kif, M_GETFIB(pd->m)), r->skip[PF_SKIP_SRC_ADDR]); PF_TEST_ATTRIB(PF_MISMATCHAW(&r->dst.addr, &pd->ndaddr, pd->af, r->dst.neg, NULL, M_GETFIB(pd->m)), r->skip[PF_SKIP_DST_ADDR]); switch (pd->virtual_proto) { case PF_VPROTO_FRAGMENT: /* tcp/udp only. port_op always 0 in other cases */ PF_TEST_ATTRIB((r->src.port_op || r->dst.port_op), TAILQ_NEXT(r, entries)); PF_TEST_ATTRIB((pd->proto == IPPROTO_TCP && r->flagset), TAILQ_NEXT(r, entries)); /* icmp only. type/code always 0 in other cases */ PF_TEST_ATTRIB((r->type || r->code), TAILQ_NEXT(r, entries)); /* tcp/udp only. {uid|gid}.op always 0 in other cases */ PF_TEST_ATTRIB((r->gid.op || r->uid.op), TAILQ_NEXT(r, entries)); break; case IPPROTO_TCP: PF_TEST_ATTRIB((r->flagset & tcp_get_flags(th)) != r->flags, TAILQ_NEXT(r, entries)); /* FALLTHROUGH */ case IPPROTO_SCTP: case IPPROTO_UDP: /* tcp/udp only. port_op always 0 in other cases */ PF_TEST_ATTRIB(r->src.port_op && !pf_match_port(r->src.port_op, r->src.port[0], r->src.port[1], pd->nsport), r->skip[PF_SKIP_SRC_PORT]); /* tcp/udp only. port_op always 0 in other cases */ PF_TEST_ATTRIB(r->dst.port_op && !pf_match_port(r->dst.port_op, r->dst.port[0], r->dst.port[1], pd->ndport), r->skip[PF_SKIP_DST_PORT]); /* tcp/udp only. uid.op always 0 in other cases */ PF_TEST_ATTRIB(r->uid.op && (pd->lookup.done || (pd->lookup.done = pf_socket_lookup(pd), 1)) && !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], pd->lookup.uid), TAILQ_NEXT(r, entries)); /* tcp/udp only. gid.op always 0 in other cases */ PF_TEST_ATTRIB(r->gid.op && (pd->lookup.done || (pd->lookup.done = pf_socket_lookup(pd), 1)) && !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], pd->lookup.gid), TAILQ_NEXT(r, entries)); break; case IPPROTO_ICMP: case IPPROTO_ICMPV6: /* icmp only. type always 0 in other cases */ PF_TEST_ATTRIB(r->type && r->type != icmptype + 1, TAILQ_NEXT(r, entries)); /* icmp only. type always 0 in other cases */ PF_TEST_ATTRIB(r->code && r->code != icmpcode + 1, TAILQ_NEXT(r, entries)); break; default: break; } PF_TEST_ATTRIB(r->tos && !(r->tos == pd->tos), TAILQ_NEXT(r, entries)); PF_TEST_ATTRIB(r->prio && !pf_match_ieee8021q_pcp(r->prio, pd->m), TAILQ_NEXT(r, entries)); PF_TEST_ATTRIB(r->prob && r->prob <= arc4random(), TAILQ_NEXT(r, entries)); PF_TEST_ATTRIB(r->match_tag && !pf_match_tag(pd->m, r, &tag, pd->pf_mtag ? pd->pf_mtag->tag : 0), TAILQ_NEXT(r, entries)); PF_TEST_ATTRIB((r->rcv_kif && pf_match_rcvif(pd->m, r) == r->rcvifnot), TAILQ_NEXT(r, entries)); PF_TEST_ATTRIB((r->rule_flag & PFRULE_FRAGMENT && pd->virtual_proto != PF_VPROTO_FRAGMENT), TAILQ_NEXT(r, entries)); PF_TEST_ATTRIB(r->os_fingerprint != PF_OSFP_ANY && (pd->virtual_proto != IPPROTO_TCP || !pf_osfp_match( pf_osfp_fingerprint(pd, th), r->os_fingerprint)), TAILQ_NEXT(r, entries)); /* FALLTHROUGH */ if (r->tag) tag = r->tag; if (r->anchor == NULL) { if (r->action == PF_MATCH) { ri = malloc(sizeof(struct pf_krule_item), M_PF_RULE_ITEM, M_NOWAIT | M_ZERO); if (ri == NULL) { REASON_SET(&reason, PFRES_MEMORY); goto cleanup; } ri->r = r; SLIST_INSERT_HEAD(&match_rules, ri, entry); pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); pf_counter_u64_critical_exit(); pf_rule_to_actions(r, &pd->act); if (r->rule_flag & PFRULE_AFTO) pd->naf = r->naf; if (pd->af != pd->naf) { if (pf_get_transaddr_af(r, pd) == -1) { REASON_SET(&reason, PFRES_TRANSLATE); goto cleanup; } } if (r->log) PFLOG_PACKET(r->action, PFRES_MATCH, r, a, ruleset, pd, 1, NULL); } else { match = asd; *rm = r; *am = a; *rsm = ruleset; } if (pd->act.log & PF_LOG_MATCHES) pf_log_matches(pd, r, a, ruleset, &match_rules); if (r->quick) break; r = TAILQ_NEXT(r, entries); } else pf_step_into_anchor(anchor_stack, &asd, &ruleset, PF_RULESET_FILTER, &r, &a); nextrule: if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd, &ruleset, PF_RULESET_FILTER, &r, &a, &match)) break; } r = *rm; a = *am; ruleset = *rsm; REASON_SET(&reason, PFRES_MATCH); /* apply actions for last matching pass/block rule */ pf_rule_to_actions(r, &pd->act); if (r->rule_flag & PFRULE_AFTO) pd->naf = r->naf; if (pd->af != pd->naf) { if (pf_get_transaddr_af(r, pd) == -1) { REASON_SET(&reason, PFRES_TRANSLATE); goto cleanup; } } if (r->log) { if (rewrite) m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any); PFLOG_PACKET(r->action, reason, r, a, ruleset, pd, 1, NULL); } if (pd->act.log & PF_LOG_MATCHES) pf_log_matches(pd, r, a, ruleset, &match_rules); if (pd->virtual_proto != PF_VPROTO_FRAGMENT && (r->action == PF_DROP) && ((r->rule_flag & PFRULE_RETURNRST) || (r->rule_flag & PFRULE_RETURNICMP) || (r->rule_flag & PFRULE_RETURN))) { pf_return(r, nr, pd, sk, th, bproto_sum, bip_sum, &reason, r->rtableid); } if (r->action == PF_DROP) goto cleanup; if (tag > 0 && pf_tag_packet(pd, tag)) { REASON_SET(&reason, PFRES_MEMORY); goto cleanup; } if (pd->act.rtableid >= 0) M_SETFIB(pd->m, pd->act.rtableid); if (r->rt) { struct pf_ksrc_node *sn = NULL; struct pf_srchash *snh = NULL; struct pf_kpool *pool = &r->route; /* Backwards compatibility. */ if (TAILQ_EMPTY(&pool->list)) pool = &r->rdr; /* * Set act.rt here instead of in pf_rule_to_actions() because * it is applied only from the last pass rule. */ pd->act.rt = r->rt; /* Don't use REASON_SET, pf_map_addr increases the reason counters */ reason = pf_map_addr_sn(pd->af, r, pd->src, &pd->act.rt_addr, &pd->act.rt_kif, NULL, &sn, &snh, pool, PF_SN_ROUTE); if (reason != 0) goto cleanup; } if (pd->virtual_proto != PF_VPROTO_FRAGMENT && (!state_icmp && (r->keep_state || nr != NULL || (pd->flags & PFDESC_TCP_NORM)))) { int action; bool nat64; action = pf_create_state(r, nr, a, pd, nk, sk, &rewrite, sm, tag, bproto_sum, bip_sum, &match_rules, udp_mapping); if (action != PF_PASS) { pf_udp_mapping_release(udp_mapping); pd->act.log |= PF_LOG_FORCE; if (action == PF_DROP && (r->rule_flag & PFRULE_RETURN)) pf_return(r, nr, pd, sk, th, bproto_sum, bip_sum, &reason, pd->act.rtableid); return (action); } nat64 = pd->af != pd->naf; if (nat64) { int ret; if (sk == NULL) sk = (*sm)->key[pd->dir == PF_IN ? PF_SK_STACK : PF_SK_WIRE]; if (nk == NULL) nk = (*sm)->key[pd->dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK]; if (pd->dir == PF_IN) { ret = pf_translate(pd, &sk->addr[pd->didx], sk->port[pd->didx], &sk->addr[pd->sidx], sk->port[pd->sidx], virtual_type, icmp_dir); } else { ret = pf_translate(pd, &sk->addr[pd->sidx], sk->port[pd->sidx], &sk->addr[pd->didx], sk->port[pd->didx], virtual_type, icmp_dir); } if (ret < 0) goto cleanup; rewrite += ret; } } else { while ((ri = SLIST_FIRST(&match_rules))) { SLIST_REMOVE_HEAD(&match_rules, entry); free(ri, M_PF_RULE_ITEM); } uma_zfree(V_pf_state_key_z, sk); uma_zfree(V_pf_state_key_z, nk); pf_udp_mapping_release(udp_mapping); } /* copy back packet headers if we performed NAT operations */ if (rewrite) m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any); if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) && pd->dir == PF_OUT && V_pfsync_defer_ptr != NULL && V_pfsync_defer_ptr(*sm, pd->m)) /* * We want the state created, but we dont * want to send this in case a partner * firewall has to know about it to allow * replies through it. */ return (PF_DEFER); if (rewrite && sk != NULL && nk != NULL && sk->af != nk->af) { return (PF_AFRT); } else return (PF_PASS); cleanup: while ((ri = SLIST_FIRST(&match_rules))) { SLIST_REMOVE_HEAD(&match_rules, entry); free(ri, M_PF_RULE_ITEM); } uma_zfree(V_pf_state_key_z, sk); uma_zfree(V_pf_state_key_z, nk); pf_udp_mapping_release(udp_mapping); return (PF_DROP); } static int pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a, struct pf_pdesc *pd, struct pf_state_key *nk, struct pf_state_key *sk, int *rewrite, struct pf_kstate **sm, int tag, u_int16_t bproto_sum, u_int16_t bip_sum, struct pf_krule_slist *match_rules, struct pf_udp_mapping *udp_mapping) { struct pf_kstate *s = NULL; struct pf_ksrc_node *sns[PF_SN_MAX] = { NULL }; /* * XXXKS: The hash for PF_SN_LIMIT and PF_SN_ROUTE should be the same * but for PF_SN_NAT it is different. Don't try optimizing it, * just store all 3 hashes. */ struct pf_srchash *snhs[PF_SN_MAX] = { NULL }; struct tcphdr *th = &pd->hdr.tcp; u_int16_t mss = V_tcp_mssdflt; u_short reason, sn_reason; struct pf_krule_item *ri; struct pf_kpool *pool_route = &r->route; /* check maximums */ if (r->max_states && (counter_u64_fetch(r->states_cur) >= r->max_states)) { counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1); REASON_SET(&reason, PFRES_MAXSTATES); goto csfailed; } /* src node for limits */ if ((r->rule_flag & PFRULE_SRCTRACK) && (sn_reason = pf_insert_src_node(sns, snhs, r, pd->src, pd->af, NULL, NULL, PF_SN_LIMIT)) != 0) { REASON_SET(&reason, sn_reason); goto csfailed; } /* src node for route-to rule */ if (TAILQ_EMPTY(&pool_route->list)) /* Backwards compatibility. */ pool_route = &r->rdr; if ((pool_route->opts & PF_POOL_STICKYADDR) && (sn_reason = pf_insert_src_node(sns, snhs, r, pd->src, pd->af, &pd->act.rt_addr, pd->act.rt_kif, PF_SN_ROUTE)) != 0) { REASON_SET(&reason, sn_reason); goto csfailed; } /* src node for translation rule */ if (nr != NULL && (nr->rdr.opts & PF_POOL_STICKYADDR) && (sn_reason = pf_insert_src_node(sns, snhs, nr, &sk->addr[pd->sidx], pd->af, &nk->addr[1], NULL, PF_SN_NAT)) != 0 ) { REASON_SET(&reason, sn_reason); goto csfailed; } s = pf_alloc_state(M_NOWAIT); if (s == NULL) { REASON_SET(&reason, PFRES_MEMORY); goto csfailed; } s->rule = r; s->nat_rule = nr; s->anchor = a; memcpy(&s->match_rules, match_rules, sizeof(s->match_rules)); memcpy(&s->act, &pd->act, sizeof(struct pf_rule_actions)); STATE_INC_COUNTERS(s); if (r->allow_opts) s->state_flags |= PFSTATE_ALLOWOPTS; if (r->rule_flag & PFRULE_STATESLOPPY) s->state_flags |= PFSTATE_SLOPPY; if (pd->flags & PFDESC_TCP_NORM) /* Set by old-style scrub rules */ s->state_flags |= PFSTATE_SCRUB_TCP; if ((r->rule_flag & PFRULE_PFLOW) || (nr != NULL && nr->rule_flag & PFRULE_PFLOW)) s->state_flags |= PFSTATE_PFLOW; s->act.log = pd->act.log & PF_LOG_ALL; s->sync_state = PFSYNC_S_NONE; s->state_flags |= pd->act.flags; /* Only needed for pfsync and state export */ if (nr != NULL) s->act.log |= nr->log & PF_LOG_ALL; switch (pd->proto) { case IPPROTO_TCP: s->src.seqlo = ntohl(th->th_seq); s->src.seqhi = s->src.seqlo + pd->p_len + 1; if ((tcp_get_flags(th) & (TH_SYN|TH_ACK)) == TH_SYN && r->keep_state == PF_STATE_MODULATE) { /* Generate sequence number modulator */ if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 0) s->src.seqdiff = 1; pf_change_proto_a(pd->m, &th->th_seq, &th->th_sum, htonl(s->src.seqlo + s->src.seqdiff), 0); *rewrite = 1; } else s->src.seqdiff = 0; if (tcp_get_flags(th) & TH_SYN) { s->src.seqhi++; s->src.wscale = pf_get_wscale(pd); } s->src.max_win = MAX(ntohs(th->th_win), 1); if (s->src.wscale & PF_WSCALE_MASK) { /* Remove scale factor from initial window */ int win = s->src.max_win; win += 1 << (s->src.wscale & PF_WSCALE_MASK); s->src.max_win = (win - 1) >> (s->src.wscale & PF_WSCALE_MASK); } if (tcp_get_flags(th) & TH_FIN) s->src.seqhi++; s->dst.seqhi = 1; s->dst.max_win = 1; pf_set_protostate(s, PF_PEER_SRC, TCPS_SYN_SENT); pf_set_protostate(s, PF_PEER_DST, TCPS_CLOSED); s->timeout = PFTM_TCP_FIRST_PACKET; atomic_add_32(&V_pf_status.states_halfopen, 1); break; case IPPROTO_UDP: pf_set_protostate(s, PF_PEER_SRC, PFUDPS_SINGLE); pf_set_protostate(s, PF_PEER_DST, PFUDPS_NO_TRAFFIC); s->timeout = PFTM_UDP_FIRST_PACKET; break; case IPPROTO_SCTP: pf_set_protostate(s, PF_PEER_SRC, SCTP_COOKIE_WAIT); pf_set_protostate(s, PF_PEER_DST, SCTP_CLOSED); s->timeout = PFTM_SCTP_FIRST_PACKET; break; case IPPROTO_ICMP: #ifdef INET6 case IPPROTO_ICMPV6: #endif s->timeout = PFTM_ICMP_FIRST_PACKET; break; default: pf_set_protostate(s, PF_PEER_SRC, PFOTHERS_SINGLE); pf_set_protostate(s, PF_PEER_DST, PFOTHERS_NO_TRAFFIC); s->timeout = PFTM_OTHER_FIRST_PACKET; } s->creation = s->expire = pf_get_uptime(); if (pd->proto == IPPROTO_TCP) { if (s->state_flags & PFSTATE_SCRUB_TCP && pf_normalize_tcp_init(pd, th, &s->src, &s->dst)) { REASON_SET(&reason, PFRES_MEMORY); goto csfailed; } if (s->state_flags & PFSTATE_SCRUB_TCP && s->src.scrub && pf_normalize_tcp_stateful(pd, &reason, th, s, &s->src, &s->dst, rewrite)) { /* This really shouldn't happen!!! */ DPFPRINTF(PF_DEBUG_URGENT, ("pf_normalize_tcp_stateful failed on first " "pkt\n")); goto csfailed; } } else if (pd->proto == IPPROTO_SCTP) { if (pf_normalize_sctp_init(pd, &s->src, &s->dst)) goto csfailed; if (! (pd->sctp_flags & (PFDESC_SCTP_INIT | PFDESC_SCTP_ADD_IP))) goto csfailed; } s->direction = pd->dir; /* * sk/nk could already been setup by pf_get_translation(). */ if (nr == NULL) { KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p", __func__, nr, sk, nk)); MPASS(pd->sport == NULL || (pd->osport == *pd->sport)); MPASS(pd->dport == NULL || (pd->odport == *pd->dport)); if (pf_state_key_setup(pd, pd->nsport, pd->ndport, &sk, &nk)) { goto csfailed; } } else KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p", __func__, nr, sk, nk)); /* Swap sk/nk for PF_OUT. */ if (pf_state_insert(BOUND_IFACE(s, pd), pd->kif, (pd->dir == PF_IN) ? sk : nk, (pd->dir == PF_IN) ? nk : sk, s)) { REASON_SET(&reason, PFRES_STATEINS); goto drop; } else *sm = s; /* * Lock order is important: first state, then source node. */ for (pf_sn_types_t sn_type=0; sn_typesns[sn_type] = sns[sn_type]; PF_HASHROW_UNLOCK(snhs[sn_type]); } } if (tag > 0) s->tag = tag; if (pd->proto == IPPROTO_TCP && (tcp_get_flags(th) & (TH_SYN|TH_ACK)) == TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_SRC); /* undo NAT changes, if they have taken place */ if (nr != NULL) { struct pf_state_key *skt = s->key[PF_SK_WIRE]; if (pd->dir == PF_OUT) skt = s->key[PF_SK_STACK]; PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); if (pd->sport) *pd->sport = skt->port[pd->sidx]; if (pd->dport) *pd->dport = skt->port[pd->didx]; if (pd->ip_sum) *pd->ip_sum = bip_sum; m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any); } s->src.seqhi = htonl(arc4random()); /* Find mss option */ int rtid = M_GETFIB(pd->m); mss = pf_get_mss(pd); mss = pf_calc_mss(pd->src, pd->af, rtid, mss); mss = pf_calc_mss(pd->dst, pd->af, rtid, mss); s->src.mss = mss; pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, TH_SYN|TH_ACK, 0, s->src.mss, 0, M_SKIP_FIREWALL, 0, 0, pd->act.rtableid); REASON_SET(&reason, PFRES_SYNPROXY); return (PF_SYNPROXY_DROP); } s->udp_mapping = udp_mapping; return (PF_PASS); csfailed: while ((ri = SLIST_FIRST(match_rules))) { SLIST_REMOVE_HEAD(match_rules, entry); free(ri, M_PF_RULE_ITEM); } uma_zfree(V_pf_state_key_z, sk); uma_zfree(V_pf_state_key_z, nk); for (pf_sn_types_t sn_type=0; sn_typestates == 0 && sns[sn_type]->expire == 0) { pf_unlink_src_node(sns[sn_type]); pf_free_src_node(sns[sn_type]); counter_u64_add( V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1); } PF_HASHROW_UNLOCK(snhs[sn_type]); } } drop: if (s != NULL) { pf_src_tree_remove_state(s); s->timeout = PFTM_UNLINKED; STATE_DEC_COUNTERS(s); pf_free_state(s); } return (PF_DROP); } int pf_translate(struct pf_pdesc *pd, struct pf_addr *saddr, u_int16_t sport, struct pf_addr *daddr, u_int16_t dport, u_int16_t virtual_type, int icmp_dir) { /* * pf_translate() implements OpenBSD's "new" NAT approach. * We don't follow it, because it involves a breaking syntax change * (removing nat/rdr rules, moving it into regular pf rules.) * It also moves NAT processing to be done after normal rules evaluation * whereas in FreeBSD that's done before rules processing. * * We adopt the function only for nat64, and keep other NAT processing * before rules processing. */ int rewrite = 0; int afto = pd->af != pd->naf; MPASS(afto); switch (pd->proto) { case IPPROTO_TCP: if (afto || *pd->sport != sport) { pf_change_ap(pd->m, pd->src, pd->sport, pd->ip_sum, &pd->hdr.tcp.th_sum, saddr, sport, 0, pd->af, pd->naf); rewrite = 1; } if (afto || *pd->dport != dport) { pf_change_ap(pd->m, pd->dst, pd->dport, pd->ip_sum, &pd->hdr.tcp.th_sum, daddr, dport, 0, pd->af, pd->naf); rewrite = 1; } break; case IPPROTO_UDP: if (afto || *pd->sport != sport) { pf_change_ap(pd->m, pd->src, pd->sport, pd->ip_sum, &pd->hdr.udp.uh_sum, saddr, sport, 1, pd->af, pd->naf); rewrite = 1; } if (afto || *pd->dport != dport) { pf_change_ap(pd->m, pd->dst, pd->dport, pd->ip_sum, &pd->hdr.udp.uh_sum, daddr, dport, 1, pd->af, pd->naf); rewrite = 1; } break; case IPPROTO_SCTP: { uint16_t checksum = 0; if (afto || *pd->sport != sport) { pf_change_ap(pd->m, pd->src, pd->sport, pd->ip_sum, &checksum, saddr, sport, 1, pd->af, pd->naf); rewrite = 1; } if (afto || *pd->dport != dport) { pf_change_ap(pd->m, pd->dst, pd->dport, pd->ip_sum, &checksum, daddr, dport, 1, pd->af, pd->naf); rewrite = 1; } break; } #ifdef INET case IPPROTO_ICMP: /* pf_translate() is also used when logging invalid packets */ if (pd->af != AF_INET) return (0); if (afto) { if (pf_translate_icmp_af(AF_INET6, &pd->hdr.icmp)) return (-1); pd->proto = IPPROTO_ICMPV6; rewrite = 1; } if (virtual_type == htons(ICMP_ECHO)) { u_int16_t icmpid = (icmp_dir == PF_IN) ? sport : dport; if (icmpid != pd->hdr.icmp.icmp_id) { pd->hdr.icmp.icmp_cksum = pf_cksum_fixup( pd->hdr.icmp.icmp_cksum, pd->hdr.icmp.icmp_id, icmpid, 0); pd->hdr.icmp.icmp_id = icmpid; /* XXX TODO copyback. */ rewrite = 1; } } break; #endif /* INET */ #ifdef INET6 case IPPROTO_ICMPV6: /* pf_translate() is also used when logging invalid packets */ if (pd->af != AF_INET6) return (0); if (afto) { /* ip_sum will be recalculated in pf_translate_af */ if (pf_translate_icmp_af(AF_INET, &pd->hdr.icmp6)) return (0); pd->proto = IPPROTO_ICMP; rewrite = 1; } break; #endif /* INET6 */ default: break; } return (rewrite); } static int pf_tcp_track_full(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason, int *copyback, struct pf_state_peer *src, struct pf_state_peer *dst, u_int8_t psrc, u_int8_t pdst) { struct tcphdr *th = &pd->hdr.tcp; u_int16_t win = ntohs(th->th_win); u_int32_t ack, end, data_end, seq, orig_seq; u_int8_t sws, dws; int ackskew; if (src->wscale && dst->wscale && !(tcp_get_flags(th) & TH_SYN)) { sws = src->wscale & PF_WSCALE_MASK; dws = dst->wscale & PF_WSCALE_MASK; } else sws = dws = 0; /* * Sequence tracking algorithm from Guido van Rooij's paper: * http://www.madison-gurkha.com/publications/tcp_filtering/ * tcp_filtering.ps */ orig_seq = seq = ntohl(th->th_seq); if (src->seqlo == 0) { /* First packet from this end. Set its state */ if (((*state)->state_flags & PFSTATE_SCRUB_TCP || dst->scrub) && src->scrub == NULL) { if (pf_normalize_tcp_init(pd, th, src, dst)) { REASON_SET(reason, PFRES_MEMORY); return (PF_DROP); } } /* Deferred generation of sequence number modulator */ if (dst->seqdiff && !src->seqdiff) { /* use random iss for the TCP server */ while ((src->seqdiff = arc4random() - seq) == 0) ; ack = ntohl(th->th_ack) - dst->seqdiff; pf_change_proto_a(pd->m, &th->th_seq, &th->th_sum, htonl(seq + src->seqdiff), 0); pf_change_proto_a(pd->m, &th->th_ack, &th->th_sum, htonl(ack), 0); *copyback = 1; } else { ack = ntohl(th->th_ack); } end = seq + pd->p_len; if (tcp_get_flags(th) & TH_SYN) { end++; if (dst->wscale & PF_WSCALE_FLAG) { src->wscale = pf_get_wscale(pd); if (src->wscale & PF_WSCALE_FLAG) { /* Remove scale factor from initial * window */ sws = src->wscale & PF_WSCALE_MASK; win = ((u_int32_t)win + (1 << sws) - 1) >> sws; dws = dst->wscale & PF_WSCALE_MASK; } else { /* fixup other window */ dst->max_win = MIN(TCP_MAXWIN, (u_int32_t)dst->max_win << (dst->wscale & PF_WSCALE_MASK)); /* in case of a retrans SYN|ACK */ dst->wscale = 0; } } } data_end = end; if (tcp_get_flags(th) & TH_FIN) end++; src->seqlo = seq; if (src->state < TCPS_SYN_SENT) pf_set_protostate(*state, psrc, TCPS_SYN_SENT); /* * May need to slide the window (seqhi may have been set by * the crappy stack check or if we picked up the connection * after establishment) */ if (src->seqhi == 1 || SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) src->seqhi = end + MAX(1, dst->max_win << dws); if (win > src->max_win) src->max_win = win; } else { ack = ntohl(th->th_ack) - dst->seqdiff; if (src->seqdiff) { /* Modulate sequence numbers */ pf_change_proto_a(pd->m, &th->th_seq, &th->th_sum, htonl(seq + src->seqdiff), 0); pf_change_proto_a(pd->m, &th->th_ack, &th->th_sum, htonl(ack), 0); *copyback = 1; } end = seq + pd->p_len; if (tcp_get_flags(th) & TH_SYN) end++; data_end = end; if (tcp_get_flags(th) & TH_FIN) end++; } if ((tcp_get_flags(th) & TH_ACK) == 0) { /* Let it pass through the ack skew check */ ack = dst->seqlo; } else if ((ack == 0 && (tcp_get_flags(th) & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || /* broken tcp stacks do not set ack */ (dst->state < TCPS_SYN_SENT)) { /* * Many stacks (ours included) will set the ACK number in an * FIN|ACK if the SYN times out -- no sequence to ACK. */ ack = dst->seqlo; } if (seq == end) { /* Ease sequencing restrictions on no data packets */ seq = src->seqlo; data_end = end = seq; } ackskew = dst->seqlo - ack; /* * Need to demodulate the sequence numbers in any TCP SACK options * (Selective ACK). We could optionally validate the SACK values * against the current ACK window, either forwards or backwards, but * I'm not confident that SACK has been implemented properly * everywhere. It wouldn't surprise me if several stacks accidentally * SACK too far backwards of previously ACKed data. There really aren't * any security implications of bad SACKing unless the target stack * doesn't validate the option length correctly. Someone trying to * spoof into a TCP connection won't bother blindly sending SACK * options anyway. */ if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { if (pf_modulate_sack(pd, th, dst)) *copyback = 1; } #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ if (SEQ_GEQ(src->seqhi, data_end) && /* Last octet inside other's window space */ SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && /* Retrans: not more than one window back */ (ackskew >= -MAXACKWINDOW) && /* Acking not more than one reassembled fragment backwards */ (ackskew <= (MAXACKWINDOW << sws)) && /* Acking not more than one window forward */ ((tcp_get_flags(th) & TH_RST) == 0 || orig_seq == src->seqlo || (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo))) { /* Require an exact/+1 sequence match on resets when possible */ if (dst->scrub || src->scrub) { if (pf_normalize_tcp_stateful(pd, reason, th, *state, src, dst, copyback)) return (PF_DROP); } /* update max window */ if (src->max_win < win) src->max_win = win; /* synchronize sequencing */ if (SEQ_GT(end, src->seqlo)) src->seqlo = end; /* slide the window of what the other end can send */ if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) dst->seqhi = ack + MAX((win << sws), 1); /* update states */ if (tcp_get_flags(th) & TH_SYN) if (src->state < TCPS_SYN_SENT) pf_set_protostate(*state, psrc, TCPS_SYN_SENT); if (tcp_get_flags(th) & TH_FIN) if (src->state < TCPS_CLOSING) pf_set_protostate(*state, psrc, TCPS_CLOSING); if (tcp_get_flags(th) & TH_ACK) { if (dst->state == TCPS_SYN_SENT) { pf_set_protostate(*state, pdst, TCPS_ESTABLISHED); if (src->state == TCPS_ESTABLISHED && (*state)->sns[PF_SN_LIMIT] != NULL && pf_src_connlimit(*state)) { REASON_SET(reason, PFRES_SRCLIMIT); return (PF_DROP); } } else if (dst->state == TCPS_CLOSING) pf_set_protostate(*state, pdst, TCPS_FIN_WAIT_2); } if (tcp_get_flags(th) & TH_RST) pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT); /* update expire time */ (*state)->expire = pf_get_uptime(); if (src->state >= TCPS_FIN_WAIT_2 && dst->state >= TCPS_FIN_WAIT_2) (*state)->timeout = PFTM_TCP_CLOSED; else if (src->state >= TCPS_CLOSING && dst->state >= TCPS_CLOSING) (*state)->timeout = PFTM_TCP_FIN_WAIT; else if (src->state < TCPS_ESTABLISHED || dst->state < TCPS_ESTABLISHED) (*state)->timeout = PFTM_TCP_OPENING; else if (src->state >= TCPS_CLOSING || dst->state >= TCPS_CLOSING) (*state)->timeout = PFTM_TCP_CLOSING; else (*state)->timeout = PFTM_TCP_ESTABLISHED; /* Fall through to PASS packet */ } else if ((dst->state < TCPS_SYN_SENT || dst->state >= TCPS_FIN_WAIT_2 || src->state >= TCPS_FIN_WAIT_2) && SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end) && /* Within a window forward of the originating packet */ SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { /* Within a window backward of the originating packet */ /* * This currently handles three situations: * 1) Stupid stacks will shotgun SYNs before their peer * replies. * 2) When PF catches an already established stream (the * firewall rebooted, the state table was flushed, routes * changed...) * 3) Packets get funky immediately after the connection * closes (this should catch Solaris spurious ACK|FINs * that web servers like to spew after a close) * * This must be a little more careful than the above code * since packet floods will also be caught here. We don't * update the TTL here to mitigate the damage of a packet * flood and so the same code can handle awkward establishment * and a loosened connection close. * In the establishment case, a correct peer response will * validate the connection, go through the normal state code * and keep updating the state TTL. */ if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: loose state match: "); pf_print_state(*state); pf_print_flags(tcp_get_flags(th)); printf(" seq=%u (%u) ack=%u len=%u ackskew=%d " "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, ackskew, (unsigned long long)(*state)->packets[0], (unsigned long long)(*state)->packets[1], pd->dir == PF_IN ? "in" : "out", pd->dir == (*state)->direction ? "fwd" : "rev"); } if (dst->scrub || src->scrub) { if (pf_normalize_tcp_stateful(pd, reason, th, *state, src, dst, copyback)) return (PF_DROP); } /* update max window */ if (src->max_win < win) src->max_win = win; /* synchronize sequencing */ if (SEQ_GT(end, src->seqlo)) src->seqlo = end; /* slide the window of what the other end can send */ if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) dst->seqhi = ack + MAX((win << sws), 1); /* * Cannot set dst->seqhi here since this could be a shotgunned * SYN and not an already established connection. */ if (tcp_get_flags(th) & TH_FIN) if (src->state < TCPS_CLOSING) pf_set_protostate(*state, psrc, TCPS_CLOSING); if (tcp_get_flags(th) & TH_RST) pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT); /* Fall through to PASS packet */ } else { if ((*state)->dst.state == TCPS_SYN_SENT && (*state)->src.state == TCPS_SYN_SENT) { /* Send RST for state mismatches during handshake */ if (!(tcp_get_flags(th) & TH_RST)) pf_send_tcp((*state)->rule, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, ntohl(th->th_ack), 0, TH_RST, 0, 0, (*state)->rule->return_ttl, M_SKIP_FIREWALL, 0, 0, (*state)->act.rtableid); src->seqlo = 0; src->seqhi = 1; src->max_win = 1; } else if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: BAD state: "); pf_print_state(*state); pf_print_flags(tcp_get_flags(th)); printf(" seq=%u (%u) ack=%u len=%u ackskew=%d " "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, ackskew, (unsigned long long)(*state)->packets[0], (unsigned long long)(*state)->packets[1], pd->dir == PF_IN ? "in" : "out", pd->dir == (*state)->direction ? "fwd" : "rev"); printf("pf: State failure on: %c %c %c %c | %c %c\n", SEQ_GEQ(src->seqhi, data_end) ? ' ' : '1', SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? ' ': '2', (ackskew >= -MAXACKWINDOW) ? ' ' : '3', (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end) ?' ' :'5', SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); } REASON_SET(reason, PFRES_BADSTATE); return (PF_DROP); } return (PF_PASS); } static int pf_tcp_track_sloppy(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason, struct pf_state_peer *src, struct pf_state_peer *dst, u_int8_t psrc, u_int8_t pdst) { struct tcphdr *th = &pd->hdr.tcp; if (tcp_get_flags(th) & TH_SYN) if (src->state < TCPS_SYN_SENT) pf_set_protostate(*state, psrc, TCPS_SYN_SENT); if (tcp_get_flags(th) & TH_FIN) if (src->state < TCPS_CLOSING) pf_set_protostate(*state, psrc, TCPS_CLOSING); if (tcp_get_flags(th) & TH_ACK) { if (dst->state == TCPS_SYN_SENT) { pf_set_protostate(*state, pdst, TCPS_ESTABLISHED); if (src->state == TCPS_ESTABLISHED && (*state)->sns[PF_SN_LIMIT] != NULL && pf_src_connlimit(*state)) { REASON_SET(reason, PFRES_SRCLIMIT); return (PF_DROP); } } else if (dst->state == TCPS_CLOSING) { pf_set_protostate(*state, pdst, TCPS_FIN_WAIT_2); } else if (src->state == TCPS_SYN_SENT && dst->state < TCPS_SYN_SENT) { /* * Handle a special sloppy case where we only see one * half of the connection. If there is a ACK after * the initial SYN without ever seeing a packet from * the destination, set the connection to established. */ pf_set_protostate(*state, PF_PEER_BOTH, TCPS_ESTABLISHED); dst->state = src->state = TCPS_ESTABLISHED; if ((*state)->sns[PF_SN_LIMIT] != NULL && pf_src_connlimit(*state)) { REASON_SET(reason, PFRES_SRCLIMIT); return (PF_DROP); } } else if (src->state == TCPS_CLOSING && dst->state == TCPS_ESTABLISHED && dst->seqlo == 0) { /* * Handle the closing of half connections where we * don't see the full bidirectional FIN/ACK+ACK * handshake. */ pf_set_protostate(*state, pdst, TCPS_CLOSING); } } if (tcp_get_flags(th) & TH_RST) pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT); /* update expire time */ (*state)->expire = pf_get_uptime(); if (src->state >= TCPS_FIN_WAIT_2 && dst->state >= TCPS_FIN_WAIT_2) (*state)->timeout = PFTM_TCP_CLOSED; else if (src->state >= TCPS_CLOSING && dst->state >= TCPS_CLOSING) (*state)->timeout = PFTM_TCP_FIN_WAIT; else if (src->state < TCPS_ESTABLISHED || dst->state < TCPS_ESTABLISHED) (*state)->timeout = PFTM_TCP_OPENING; else if (src->state >= TCPS_CLOSING || dst->state >= TCPS_CLOSING) (*state)->timeout = PFTM_TCP_CLOSING; else (*state)->timeout = PFTM_TCP_ESTABLISHED; return (PF_PASS); } static int pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason) { struct pf_state_key *sk = (*state)->key[pd->didx]; struct tcphdr *th = &pd->hdr.tcp; if ((*state)->src.state == PF_TCPS_PROXY_SRC) { if (pd->dir != (*state)->direction) { REASON_SET(reason, PFRES_SYNPROXY); return (PF_SYNPROXY_DROP); } if (tcp_get_flags(th) & TH_SYN) { if (ntohl(th->th_seq) != (*state)->src.seqlo) { REASON_SET(reason, PFRES_SYNPROXY); return (PF_DROP); } pf_send_tcp((*state)->rule, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, (*state)->src.seqhi, ntohl(th->th_seq) + 1, TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, M_SKIP_FIREWALL, 0, 0, (*state)->act.rtableid); REASON_SET(reason, PFRES_SYNPROXY); return (PF_SYNPROXY_DROP); } else if ((tcp_get_flags(th) & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK || (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { REASON_SET(reason, PFRES_SYNPROXY); return (PF_DROP); } else if ((*state)->sns[PF_SN_LIMIT] != NULL && pf_src_connlimit(*state)) { REASON_SET(reason, PFRES_SRCLIMIT); return (PF_DROP); } else pf_set_protostate(*state, PF_PEER_SRC, PF_TCPS_PROXY_DST); } if ((*state)->src.state == PF_TCPS_PROXY_DST) { if (pd->dir == (*state)->direction) { if (((tcp_get_flags(th) & (TH_SYN|TH_ACK)) != TH_ACK) || (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { REASON_SET(reason, PFRES_SYNPROXY); return (PF_DROP); } (*state)->src.max_win = MAX(ntohs(th->th_win), 1); if ((*state)->dst.seqhi == 1) (*state)->dst.seqhi = htonl(arc4random()); pf_send_tcp((*state)->rule, pd->af, &sk->addr[pd->sidx], &sk->addr[pd->didx], sk->port[pd->sidx], sk->port[pd->didx], (*state)->dst.seqhi, 0, TH_SYN, 0, (*state)->src.mss, 0, (*state)->orig_kif->pfik_ifp == V_loif ? M_LOOP : 0, (*state)->tag, 0, (*state)->act.rtableid); REASON_SET(reason, PFRES_SYNPROXY); return (PF_SYNPROXY_DROP); } else if (((tcp_get_flags(th) & (TH_SYN|TH_ACK)) != (TH_SYN|TH_ACK)) || (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { REASON_SET(reason, PFRES_SYNPROXY); return (PF_DROP); } else { (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); (*state)->dst.seqlo = ntohl(th->th_seq); pf_send_tcp((*state)->rule, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, ntohl(th->th_ack), ntohl(th->th_seq) + 1, TH_ACK, (*state)->src.max_win, 0, 0, 0, (*state)->tag, 0, (*state)->act.rtableid); pf_send_tcp((*state)->rule, pd->af, &sk->addr[pd->sidx], &sk->addr[pd->didx], sk->port[pd->sidx], sk->port[pd->didx], (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, TH_ACK, (*state)->dst.max_win, 0, 0, M_SKIP_FIREWALL, 0, 0, (*state)->act.rtableid); (*state)->src.seqdiff = (*state)->dst.seqhi - (*state)->src.seqlo; (*state)->dst.seqdiff = (*state)->src.seqhi - (*state)->dst.seqlo; (*state)->src.seqhi = (*state)->src.seqlo + (*state)->dst.max_win; (*state)->dst.seqhi = (*state)->dst.seqlo + (*state)->src.max_win; (*state)->src.wscale = (*state)->dst.wscale = 0; pf_set_protostate(*state, PF_PEER_BOTH, TCPS_ESTABLISHED); REASON_SET(reason, PFRES_SYNPROXY); return (PF_SYNPROXY_DROP); } } return (PF_PASS); } static int pf_test_state(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason) { struct pf_state_key_cmp key; int copyback = 0; struct pf_state_peer *src, *dst; uint8_t psrc, pdst; int action = PF_PASS; bzero(&key, sizeof(key)); key.af = pd->af; key.proto = pd->virtual_proto; PF_ACPY(&key.addr[pd->sidx], pd->src, key.af); PF_ACPY(&key.addr[pd->didx], pd->dst, key.af); key.port[pd->sidx] = pd->osport; key.port[pd->didx] = pd->odport; STATE_LOOKUP(&key, *state, pd); if (pd->dir == (*state)->direction) { if (PF_REVERSED_KEY(*state, pd->af)) { src = &(*state)->dst; dst = &(*state)->src; psrc = PF_PEER_DST; pdst = PF_PEER_SRC; } else { src = &(*state)->src; dst = &(*state)->dst; psrc = PF_PEER_SRC; pdst = PF_PEER_DST; } } else { if (PF_REVERSED_KEY(*state, pd->af)) { src = &(*state)->src; dst = &(*state)->dst; psrc = PF_PEER_SRC; pdst = PF_PEER_DST; } else { src = &(*state)->dst; dst = &(*state)->src; psrc = PF_PEER_DST; pdst = PF_PEER_SRC; } } switch (pd->virtual_proto) { case IPPROTO_TCP: { struct tcphdr *th = &pd->hdr.tcp; if ((action = pf_synproxy(pd, state, reason)) != PF_PASS) return (action); if ((*state)->src.state >= TCPS_FIN_WAIT_2 && (*state)->dst.state >= TCPS_FIN_WAIT_2 && (((tcp_get_flags(th) & (TH_SYN|TH_ACK)) == TH_SYN) || ((tcp_get_flags(th) & (TH_SYN|TH_ACK|TH_RST)) == TH_ACK && pf_syncookie_check(pd) && pd->dir == PF_IN))) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: state reuse "); pf_print_state(*state); pf_print_flags(tcp_get_flags(th)); printf("\n"); } /* XXX make sure it's the same direction ?? */ pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED); pf_unlink_state(*state); *state = NULL; return (PF_DROP); } if ((*state)->state_flags & PFSTATE_SLOPPY) { if (pf_tcp_track_sloppy(state, pd, reason, src, dst, psrc, pdst) == PF_DROP) return (PF_DROP); } else { int ret; ret = pf_tcp_track_full(state, pd, reason, ©back, src, dst, psrc, pdst); if (ret == PF_DROP) return (PF_DROP); } break; } case IPPROTO_UDP: /* update states */ if (src->state < PFUDPS_SINGLE) pf_set_protostate(*state, psrc, PFUDPS_SINGLE); if (dst->state == PFUDPS_SINGLE) pf_set_protostate(*state, pdst, PFUDPS_MULTIPLE); /* update expire time */ (*state)->expire = pf_get_uptime(); if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) (*state)->timeout = PFTM_UDP_MULTIPLE; else (*state)->timeout = PFTM_UDP_SINGLE; break; case IPPROTO_SCTP: if ((src->state >= SCTP_SHUTDOWN_SENT || src->state == SCTP_CLOSED) && (dst->state >= SCTP_SHUTDOWN_SENT || dst->state == SCTP_CLOSED) && pd->sctp_flags & PFDESC_SCTP_INIT) { pf_set_protostate(*state, PF_PEER_BOTH, SCTP_CLOSED); pf_unlink_state(*state); *state = NULL; return (PF_DROP); } if (pf_sctp_track(*state, pd, reason) != PF_PASS) return (PF_DROP); /* Track state. */ if (pd->sctp_flags & PFDESC_SCTP_INIT) { if (src->state < SCTP_COOKIE_WAIT) { pf_set_protostate(*state, psrc, SCTP_COOKIE_WAIT); (*state)->timeout = PFTM_SCTP_OPENING; } } if (pd->sctp_flags & PFDESC_SCTP_INIT_ACK) { MPASS(dst->scrub != NULL); if (dst->scrub->pfss_v_tag == 0) dst->scrub->pfss_v_tag = pd->sctp_initiate_tag; } /* * Bind to the correct interface if we're if-bound. For multihomed * extra associations we don't know which interface that will be until * here, so we've inserted the state on V_pf_all. Fix that now. */ if ((*state)->kif == V_pfi_all && (*state)->rule->rule_flag & PFRULE_IFBOUND) (*state)->kif = pd->kif; if (pd->sctp_flags & (PFDESC_SCTP_COOKIE | PFDESC_SCTP_HEARTBEAT_ACK)) { if (src->state < SCTP_ESTABLISHED) { pf_set_protostate(*state, psrc, SCTP_ESTABLISHED); (*state)->timeout = PFTM_SCTP_ESTABLISHED; } } if (pd->sctp_flags & (PFDESC_SCTP_SHUTDOWN | PFDESC_SCTP_SHUTDOWN_COMPLETE)) { if (src->state < SCTP_SHUTDOWN_PENDING) { pf_set_protostate(*state, psrc, SCTP_SHUTDOWN_PENDING); (*state)->timeout = PFTM_SCTP_CLOSING; } } if (pd->sctp_flags & (PFDESC_SCTP_SHUTDOWN_COMPLETE | PFDESC_SCTP_ABORT)) { pf_set_protostate(*state, psrc, SCTP_CLOSED); (*state)->timeout = PFTM_SCTP_CLOSED; } (*state)->expire = pf_get_uptime(); break; default: /* update states */ if (src->state < PFOTHERS_SINGLE) pf_set_protostate(*state, psrc, PFOTHERS_SINGLE); if (dst->state == PFOTHERS_SINGLE) pf_set_protostate(*state, pdst, PFOTHERS_MULTIPLE); /* update expire time */ (*state)->expire = pf_get_uptime(); if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) (*state)->timeout = PFTM_OTHER_MULTIPLE; else (*state)->timeout = PFTM_OTHER_SINGLE; break; } /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk; int afto, sidx, didx; if (PF_REVERSED_KEY(*state, pd->af)) nk = (*state)->key[pd->sidx]; else nk = (*state)->key[pd->didx]; afto = pd->af != nk->af; if (afto && (*state)->direction == PF_IN) { sidx = pd->didx; didx = pd->sidx; } else { sidx = pd->sidx; didx = pd->didx; } if (afto || PF_ANEQ(pd->src, &nk->addr[sidx], pd->af) || nk->port[sidx] != pd->osport) pf_change_ap(pd->m, pd->src, pd->sport, pd->ip_sum, pd->pcksum, &nk->addr[sidx], nk->port[sidx], pd->virtual_proto == IPPROTO_UDP, pd->af, nk->af); if (afto || PF_ANEQ(pd->dst, &nk->addr[didx], pd->af) || nk->port[didx] != pd->odport) pf_change_ap(pd->m, pd->dst, pd->dport, pd->ip_sum, pd->pcksum, &nk->addr[didx], nk->port[didx], pd->virtual_proto == IPPROTO_UDP, pd->af, nk->af); if (afto) { PF_ACPY(&pd->nsaddr, &nk->addr[sidx], nk->af); PF_ACPY(&pd->ndaddr, &nk->addr[didx], nk->af); pd->naf = nk->af; action = PF_AFRT; } copyback = 1; } if (copyback && pd->hdrlen > 0) m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any); return (action); } static int pf_sctp_track(struct pf_kstate *state, struct pf_pdesc *pd, u_short *reason) { struct pf_state_peer *src; if (pd->dir == state->direction) { if (PF_REVERSED_KEY(state, pd->af)) src = &state->dst; else src = &state->src; } else { if (PF_REVERSED_KEY(state, pd->af)) src = &state->src; else src = &state->dst; } if (src->scrub != NULL) { if (src->scrub->pfss_v_tag == 0) src->scrub->pfss_v_tag = pd->hdr.sctp.v_tag; else if (src->scrub->pfss_v_tag != pd->hdr.sctp.v_tag) return (PF_DROP); } return (PF_PASS); } static void pf_sctp_multihome_detach_addr(const struct pf_kstate *s) { struct pf_sctp_endpoint key; struct pf_sctp_endpoint *ep; struct pf_state_key *sks = s->key[PF_SK_STACK]; struct pf_sctp_source *i, *tmp; if (sks == NULL || sks->proto != IPPROTO_SCTP || s->dst.scrub == NULL) return; PF_SCTP_ENDPOINTS_LOCK(); key.v_tag = s->dst.scrub->pfss_v_tag; ep = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key); if (ep != NULL) { TAILQ_FOREACH_SAFE(i, &ep->sources, entry, tmp) { if (pf_addr_cmp(&i->addr, &s->key[PF_SK_WIRE]->addr[s->direction == PF_OUT], s->key[PF_SK_WIRE]->af) == 0) { SDT_PROBE3(pf, sctp, multihome, remove, key.v_tag, s, i); TAILQ_REMOVE(&ep->sources, i, entry); free(i, M_PFTEMP); break; } } if (TAILQ_EMPTY(&ep->sources)) { RB_REMOVE(pf_sctp_endpoints, &V_pf_sctp_endpoints, ep); free(ep, M_PFTEMP); } } /* Other direction. */ key.v_tag = s->src.scrub->pfss_v_tag; ep = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key); if (ep != NULL) { TAILQ_FOREACH_SAFE(i, &ep->sources, entry, tmp) { if (pf_addr_cmp(&i->addr, &s->key[PF_SK_WIRE]->addr[s->direction == PF_IN], s->key[PF_SK_WIRE]->af) == 0) { SDT_PROBE3(pf, sctp, multihome, remove, key.v_tag, s, i); TAILQ_REMOVE(&ep->sources, i, entry); free(i, M_PFTEMP); break; } } if (TAILQ_EMPTY(&ep->sources)) { RB_REMOVE(pf_sctp_endpoints, &V_pf_sctp_endpoints, ep); free(ep, M_PFTEMP); } } PF_SCTP_ENDPOINTS_UNLOCK(); } static void pf_sctp_multihome_add_addr(struct pf_pdesc *pd, struct pf_addr *a, uint32_t v_tag) { struct pf_sctp_endpoint key = { .v_tag = v_tag, }; struct pf_sctp_source *i; struct pf_sctp_endpoint *ep; PF_SCTP_ENDPOINTS_LOCK(); ep = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key); if (ep == NULL) { ep = malloc(sizeof(struct pf_sctp_endpoint), M_PFTEMP, M_NOWAIT); if (ep == NULL) { PF_SCTP_ENDPOINTS_UNLOCK(); return; } ep->v_tag = v_tag; TAILQ_INIT(&ep->sources); RB_INSERT(pf_sctp_endpoints, &V_pf_sctp_endpoints, ep); } /* Avoid inserting duplicates. */ TAILQ_FOREACH(i, &ep->sources, entry) { if (pf_addr_cmp(&i->addr, a, pd->af) == 0) { PF_SCTP_ENDPOINTS_UNLOCK(); return; } } i = malloc(sizeof(*i), M_PFTEMP, M_NOWAIT); if (i == NULL) { PF_SCTP_ENDPOINTS_UNLOCK(); return; } i->af = pd->af; memcpy(&i->addr, a, sizeof(*a)); TAILQ_INSERT_TAIL(&ep->sources, i, entry); SDT_PROBE2(pf, sctp, multihome, add, v_tag, i); PF_SCTP_ENDPOINTS_UNLOCK(); } static void pf_sctp_multihome_delayed(struct pf_pdesc *pd, struct pfi_kkif *kif, struct pf_kstate *s, int action) { struct pf_sctp_multihome_job *j, *tmp; struct pf_sctp_source *i; int ret __unused; struct pf_kstate *sm = NULL; struct pf_krule *ra = NULL; struct pf_krule *r = &V_pf_default_rule; struct pf_kruleset *rs = NULL; bool do_extra = true; PF_RULES_RLOCK_TRACKER; again: TAILQ_FOREACH_SAFE(j, &pd->sctp_multihome_jobs, next, tmp) { if (s == NULL || action != PF_PASS) goto free; /* Confirm we don't recurse here. */ MPASS(! (pd->sctp_flags & PFDESC_SCTP_ADD_IP)); switch (j->op) { case SCTP_ADD_IP_ADDRESS: { uint32_t v_tag = pd->sctp_initiate_tag; if (v_tag == 0) { if (s->direction == pd->dir) v_tag = s->src.scrub->pfss_v_tag; else v_tag = s->dst.scrub->pfss_v_tag; } /* * Avoid duplicating states. We'll already have * created a state based on the source address of * the packet, but SCTP endpoints may also list this * address again in the INIT(_ACK) parameters. */ if (pf_addr_cmp(&j->src, pd->src, pd->af) == 0) { break; } j->pd.sctp_flags |= PFDESC_SCTP_ADD_IP; PF_RULES_RLOCK(); sm = NULL; if (s->rule->rule_flag & PFRULE_ALLOW_RELATED) { j->pd.related_rule = s->rule; } ret = pf_test_rule(&r, &sm, &j->pd, &ra, &rs, NULL); PF_RULES_RUNLOCK(); SDT_PROBE4(pf, sctp, multihome, test, kif, r, j->pd.m, ret); if (ret != PF_DROP && sm != NULL) { /* Inherit v_tag values. */ if (sm->direction == s->direction) { sm->src.scrub->pfss_v_tag = s->src.scrub->pfss_v_tag; sm->dst.scrub->pfss_v_tag = s->dst.scrub->pfss_v_tag; } else { sm->src.scrub->pfss_v_tag = s->dst.scrub->pfss_v_tag; sm->dst.scrub->pfss_v_tag = s->src.scrub->pfss_v_tag; } PF_STATE_UNLOCK(sm); } else { /* If we try duplicate inserts? */ break; } /* Only add the address if we've actually allowed the state. */ pf_sctp_multihome_add_addr(pd, &j->src, v_tag); if (! do_extra) { break; } /* * We need to do this for each of our source addresses. * Find those based on the verification tag. */ struct pf_sctp_endpoint key = { .v_tag = pd->hdr.sctp.v_tag, }; struct pf_sctp_endpoint *ep; PF_SCTP_ENDPOINTS_LOCK(); ep = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key); if (ep == NULL) { PF_SCTP_ENDPOINTS_UNLOCK(); break; } MPASS(ep != NULL); TAILQ_FOREACH(i, &ep->sources, entry) { struct pf_sctp_multihome_job *nj; /* SCTP can intermingle IPv4 and IPv6. */ if (i->af != pd->af) continue; nj = malloc(sizeof(*nj), M_PFTEMP, M_NOWAIT | M_ZERO); if (! nj) { continue; } memcpy(&nj->pd, &j->pd, sizeof(j->pd)); memcpy(&nj->src, &j->src, sizeof(nj->src)); nj->pd.src = &nj->src; // New destination address! memcpy(&nj->dst, &i->addr, sizeof(nj->dst)); nj->pd.dst = &nj->dst; nj->pd.m = j->pd.m; nj->op = j->op; TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, nj, next); } PF_SCTP_ENDPOINTS_UNLOCK(); break; } case SCTP_DEL_IP_ADDRESS: { struct pf_state_key_cmp key; uint8_t psrc; bzero(&key, sizeof(key)); key.af = j->pd.af; key.proto = IPPROTO_SCTP; if (j->pd.dir == PF_IN) { /* wire side, straight */ PF_ACPY(&key.addr[0], j->pd.src, key.af); PF_ACPY(&key.addr[1], j->pd.dst, key.af); key.port[0] = j->pd.hdr.sctp.src_port; key.port[1] = j->pd.hdr.sctp.dest_port; } else { /* stack side, reverse */ PF_ACPY(&key.addr[1], j->pd.src, key.af); PF_ACPY(&key.addr[0], j->pd.dst, key.af); key.port[1] = j->pd.hdr.sctp.src_port; key.port[0] = j->pd.hdr.sctp.dest_port; } sm = pf_find_state(kif, &key, j->pd.dir); if (sm != NULL) { PF_STATE_LOCK_ASSERT(sm); if (j->pd.dir == sm->direction) { psrc = PF_PEER_SRC; } else { psrc = PF_PEER_DST; } pf_set_protostate(sm, psrc, SCTP_SHUTDOWN_PENDING); sm->timeout = PFTM_SCTP_CLOSING; PF_STATE_UNLOCK(sm); } break; default: panic("Unknown op %#x", j->op); } } free: TAILQ_REMOVE(&pd->sctp_multihome_jobs, j, next); free(j, M_PFTEMP); } /* We may have inserted extra work while processing the list. */ if (! TAILQ_EMPTY(&pd->sctp_multihome_jobs)) { do_extra = false; goto again; } } static int pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op) { int off = 0; struct pf_sctp_multihome_job *job; SDT_PROBE4(pf, sctp, multihome_scan, entry, start, len, pd, op); while (off < len) { struct sctp_paramhdr h; if (!pf_pull_hdr(pd->m, start + off, &h, sizeof(h), NULL, NULL, pd->af)) return (PF_DROP); /* Parameters are at least 4 bytes. */ if (ntohs(h.param_length) < 4) return (PF_DROP); SDT_PROBE2(pf, sctp, multihome_scan, param, ntohs(h.param_type), ntohs(h.param_length)); switch (ntohs(h.param_type)) { case SCTP_IPV4_ADDRESS: { struct in_addr t; if (ntohs(h.param_length) != (sizeof(struct sctp_paramhdr) + sizeof(t))) return (PF_DROP); if (!pf_pull_hdr(pd->m, start + off + sizeof(h), &t, sizeof(t), NULL, NULL, pd->af)) return (PF_DROP); if (in_nullhost(t)) t.s_addr = pd->src->v4.s_addr; /* * We hold the state lock (idhash) here, which means * that we can't acquire the keyhash, or we'll get a * LOR (and potentially double-lock things too). We also * can't release the state lock here, so instead we'll * enqueue this for async handling. * There's a relatively small race here, in that a * packet using the new addresses could arrive already, * but that's just though luck for it. */ job = malloc(sizeof(*job), M_PFTEMP, M_NOWAIT | M_ZERO); if (! job) return (PF_DROP); SDT_PROBE2(pf, sctp, multihome_scan, ipv4, &t, op); memcpy(&job->pd, pd, sizeof(*pd)); // New source address! memcpy(&job->src, &t, sizeof(t)); job->pd.src = &job->src; memcpy(&job->dst, pd->dst, sizeof(job->dst)); job->pd.dst = &job->dst; job->pd.m = pd->m; job->op = op; TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, job, next); break; } #ifdef INET6 case SCTP_IPV6_ADDRESS: { struct in6_addr t; if (ntohs(h.param_length) != (sizeof(struct sctp_paramhdr) + sizeof(t))) return (PF_DROP); if (!pf_pull_hdr(pd->m, start + off + sizeof(h), &t, sizeof(t), NULL, NULL, pd->af)) return (PF_DROP); if (memcmp(&t, &pd->src->v6, sizeof(t)) == 0) break; if (memcmp(&t, &in6addr_any, sizeof(t)) == 0) memcpy(&t, &pd->src->v6, sizeof(t)); job = malloc(sizeof(*job), M_PFTEMP, M_NOWAIT | M_ZERO); if (! job) return (PF_DROP); SDT_PROBE2(pf, sctp, multihome_scan, ipv6, &t, op); memcpy(&job->pd, pd, sizeof(*pd)); memcpy(&job->src, &t, sizeof(t)); job->pd.src = &job->src; memcpy(&job->dst, pd->dst, sizeof(job->dst)); job->pd.dst = &job->dst; job->pd.m = pd->m; job->op = op; TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, job, next); break; } #endif case SCTP_ADD_IP_ADDRESS: { int ret; struct sctp_asconf_paramhdr ah; if (!pf_pull_hdr(pd->m, start + off, &ah, sizeof(ah), NULL, NULL, pd->af)) return (PF_DROP); ret = pf_multihome_scan(start + off + sizeof(ah), ntohs(ah.ph.param_length) - sizeof(ah), pd, SCTP_ADD_IP_ADDRESS); if (ret != PF_PASS) return (ret); break; } case SCTP_DEL_IP_ADDRESS: { int ret; struct sctp_asconf_paramhdr ah; if (!pf_pull_hdr(pd->m, start + off, &ah, sizeof(ah), NULL, NULL, pd->af)) return (PF_DROP); ret = pf_multihome_scan(start + off + sizeof(ah), ntohs(ah.ph.param_length) - sizeof(ah), pd, SCTP_DEL_IP_ADDRESS); if (ret != PF_PASS) return (ret); break; } default: break; } off += roundup(ntohs(h.param_length), 4); } return (PF_PASS); } int pf_multihome_scan_init(int start, int len, struct pf_pdesc *pd) { start += sizeof(struct sctp_init_chunk); len -= sizeof(struct sctp_init_chunk); return (pf_multihome_scan(start, len, pd, SCTP_ADD_IP_ADDRESS)); } int pf_multihome_scan_asconf(int start, int len, struct pf_pdesc *pd) { start += sizeof(struct sctp_asconf_chunk); len -= sizeof(struct sctp_asconf_chunk); return (pf_multihome_scan(start, len, pd, SCTP_ADD_IP_ADDRESS)); } int pf_icmp_state_lookup(struct pf_state_key_cmp *key, struct pf_pdesc *pd, struct pf_kstate **state, u_int16_t icmpid, u_int16_t type, int icmp_dir, int *iidx, int multi, int inner) { int direction = pd->dir; key->af = pd->af; key->proto = pd->proto; if (icmp_dir == PF_IN) { *iidx = pd->sidx; key->port[pd->sidx] = icmpid; key->port[pd->didx] = type; } else { *iidx = pd->didx; key->port[pd->sidx] = type; key->port[pd->didx] = icmpid; } if (pf_state_key_addr_setup(pd, key, multi)) return (PF_DROP); STATE_LOOKUP(key, *state, pd); if ((*state)->state_flags & PFSTATE_SLOPPY) return (-1); /* Is this ICMP message flowing in right direction? */ if ((*state)->key[PF_SK_WIRE]->af != (*state)->key[PF_SK_STACK]->af) direction = (pd->af == (*state)->key[PF_SK_WIRE]->af) ? PF_IN : PF_OUT; else direction = (*state)->direction; if ((*state)->rule->type && (((!inner && direction == pd->dir) || (inner && direction != pd->dir)) ? PF_IN : PF_OUT) != icmp_dir) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: icmp type %d in wrong direction (%d): ", ntohs(type), icmp_dir); pf_print_state(*state); printf("\n"); } PF_STATE_UNLOCK(*state); *state = NULL; return (PF_DROP); } return (-1); } static int pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason) { struct pf_addr *saddr = pd->src, *daddr = pd->dst; u_int16_t *icmpsum, virtual_id, virtual_type; u_int8_t icmptype, icmpcode; int icmp_dir, iidx, ret; struct pf_state_key_cmp key; #ifdef INET u_int16_t icmpid; #endif MPASS(*state == NULL); bzero(&key, sizeof(key)); switch (pd->proto) { #ifdef INET case IPPROTO_ICMP: icmptype = pd->hdr.icmp.icmp_type; icmpcode = pd->hdr.icmp.icmp_code; icmpid = pd->hdr.icmp.icmp_id; icmpsum = &pd->hdr.icmp.icmp_cksum; break; #endif /* INET */ #ifdef INET6 case IPPROTO_ICMPV6: icmptype = pd->hdr.icmp6.icmp6_type; icmpcode = pd->hdr.icmp6.icmp6_code; #ifdef INET icmpid = pd->hdr.icmp6.icmp6_id; #endif icmpsum = &pd->hdr.icmp6.icmp6_cksum; break; #endif /* INET6 */ default: panic("unhandled proto %d", pd->proto); } if (pf_icmp_mapping(pd, icmptype, &icmp_dir, &virtual_id, &virtual_type) == 0) { /* * ICMP query/reply message not related to a TCP/UDP/SCTP * packet. Search for an ICMP state. */ ret = pf_icmp_state_lookup(&key, pd, state, virtual_id, virtual_type, icmp_dir, &iidx, 0, 0); /* IPv6? try matching a multicast address */ if (ret == PF_DROP && pd->af == AF_INET6 && icmp_dir == PF_OUT) { MPASS(*state == NULL); ret = pf_icmp_state_lookup(&key, pd, state, virtual_id, virtual_type, icmp_dir, &iidx, 1, 0); } if (ret >= 0) { MPASS(*state == NULL); return (ret); } (*state)->expire = pf_get_uptime(); (*state)->timeout = PFTM_ICMP_ERROR_REPLY; /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk; int afto, sidx, didx; if (PF_REVERSED_KEY(*state, pd->af)) nk = (*state)->key[pd->sidx]; else nk = (*state)->key[pd->didx]; afto = pd->af != nk->af; if (afto && (*state)->direction == PF_IN) { sidx = pd->didx; didx = pd->sidx; iidx = !iidx; } else { sidx = pd->sidx; didx = pd->didx; } switch (pd->af) { #ifdef INET case AF_INET: #ifdef INET6 if (afto) { if (pf_translate_icmp_af(AF_INET6, &pd->hdr.icmp)) return (PF_DROP); pd->proto = IPPROTO_ICMPV6; } #endif if (!afto && PF_ANEQ(pd->src, &nk->addr[sidx], AF_INET)) pf_change_a(&saddr->v4.s_addr, pd->ip_sum, nk->addr[sidx].v4.s_addr, 0); if (!afto && PF_ANEQ(pd->dst, &nk->addr[didx], AF_INET)) pf_change_a(&daddr->v4.s_addr, pd->ip_sum, nk->addr[didx].v4.s_addr, 0); if (nk->port[iidx] != pd->hdr.icmp.icmp_id) { pd->hdr.icmp.icmp_cksum = pf_cksum_fixup( pd->hdr.icmp.icmp_cksum, icmpid, nk->port[iidx], 0); pd->hdr.icmp.icmp_id = nk->port[iidx]; } m_copyback(pd->m, pd->off, ICMP_MINLEN, (caddr_t )&pd->hdr.icmp); break; #endif /* INET */ #ifdef INET6 case AF_INET6: #ifdef INET if (afto) { if (pf_translate_icmp_af(AF_INET, &pd->hdr.icmp6)) return (PF_DROP); pd->proto = IPPROTO_ICMP; } #endif if (!afto && PF_ANEQ(pd->src, &nk->addr[sidx], AF_INET6)) pf_change_a6(saddr, &pd->hdr.icmp6.icmp6_cksum, &nk->addr[sidx], 0); if (!afto && PF_ANEQ(pd->dst, &nk->addr[didx], AF_INET6)) pf_change_a6(daddr, &pd->hdr.icmp6.icmp6_cksum, &nk->addr[didx], 0); if (nk->port[iidx] != pd->hdr.icmp6.icmp6_id) pd->hdr.icmp6.icmp6_id = nk->port[iidx]; m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (caddr_t )&pd->hdr.icmp6); break; #endif /* INET6 */ } if (afto) { PF_ACPY(&pd->nsaddr, &nk->addr[sidx], nk->af); PF_ACPY(&pd->ndaddr, &nk->addr[didx], nk->af); pd->naf = nk->af; return (PF_AFRT); } } return (PF_PASS); } else { /* * ICMP error message in response to a TCP/UDP packet. * Extract the inner TCP/UDP header and search for that state. */ struct pf_pdesc pd2; bzero(&pd2, sizeof pd2); #ifdef INET struct ip h2; #endif /* INET */ #ifdef INET6 struct ip6_hdr h2_6; #endif /* INET6 */ int ipoff2 = 0; pd2.af = pd->af; pd2.dir = pd->dir; /* Payload packet is from the opposite direction. */ pd2.sidx = (pd->dir == PF_IN) ? 1 : 0; pd2.didx = (pd->dir == PF_IN) ? 0 : 1; pd2.m = pd->m; pd2.kif = pd->kif; switch (pd->af) { #ifdef INET case AF_INET: /* offset of h2 in mbuf chain */ ipoff2 = pd->off + ICMP_MINLEN; if (!pf_pull_hdr(pd->m, ipoff2, &h2, sizeof(h2), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(ip)\n")); return (PF_DROP); } /* * ICMP error messages don't refer to non-first * fragments */ if (h2.ip_off & htons(IP_OFFMASK)) { REASON_SET(reason, PFRES_FRAG); return (PF_DROP); } /* offset of protocol header that follows h2 */ pd2.off = ipoff2 + (h2.ip_hl << 2); pd2.proto = h2.ip_p; pd2.tot_len = ntohs(h2.ip_len); pd2.src = (struct pf_addr *)&h2.ip_src; pd2.dst = (struct pf_addr *)&h2.ip_dst; pd2.ip_sum = &h2.ip_sum; break; #endif /* INET */ #ifdef INET6 case AF_INET6: ipoff2 = pd->off + sizeof(struct icmp6_hdr); if (!pf_pull_hdr(pd->m, ipoff2, &h2_6, sizeof(h2_6), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(ip6)\n")); return (PF_DROP); } pd2.off = ipoff2; if (pf_walk_header6(&pd2, &h2_6, reason) != PF_PASS) return (PF_DROP); pd2.tot_len = ntohs(h2_6.ip6_plen) + sizeof(struct ip6_hdr); pd2.src = (struct pf_addr *)&h2_6.ip6_src; pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; pd2.ip_sum = NULL; break; #endif /* INET6 */ default: unhandled_af(pd->af); } if (PF_ANEQ(pd->dst, pd2.src, pd->af)) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: BAD ICMP %d:%d outer dst: ", icmptype, icmpcode); pf_print_host(pd->src, 0, pd->af); printf(" -> "); pf_print_host(pd->dst, 0, pd->af); printf(" inner src: "); pf_print_host(pd2.src, 0, pd2.af); printf(" -> "); pf_print_host(pd2.dst, 0, pd2.af); printf("\n"); } REASON_SET(reason, PFRES_BADSTATE); return (PF_DROP); } switch (pd2.proto) { case IPPROTO_TCP: { struct tcphdr th; u_int32_t seq; struct pf_state_peer *src, *dst; u_int8_t dws; int copyback = 0; /* * Only the first 8 bytes of the TCP header can be * expected. Don't access any TCP header fields after * th_seq, an ackskew test is not possible. */ if (!pf_pull_hdr(pd->m, pd2.off, &th, 8, NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(tcp)\n")); return (PF_DROP); } key.af = pd2.af; key.proto = IPPROTO_TCP; PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); key.port[pd2.sidx] = th.th_sport; key.port[pd2.didx] = th.th_dport; STATE_LOOKUP(&key, *state, pd); if (pd->dir == (*state)->direction) { if (PF_REVERSED_KEY(*state, pd->af)) { src = &(*state)->src; dst = &(*state)->dst; } else { src = &(*state)->dst; dst = &(*state)->src; } } else { if (PF_REVERSED_KEY(*state, pd->af)) { src = &(*state)->dst; dst = &(*state)->src; } else { src = &(*state)->src; dst = &(*state)->dst; } } if (src->wscale && dst->wscale) dws = dst->wscale & PF_WSCALE_MASK; else dws = 0; /* Demodulate sequence number */ seq = ntohl(th.th_seq) - src->seqdiff; if (src->seqdiff) { pf_change_a(&th.th_seq, icmpsum, htonl(seq), 0); copyback = 1; } if (!((*state)->state_flags & PFSTATE_SLOPPY) && (!SEQ_GEQ(src->seqhi, seq) || !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: BAD ICMP %d:%d ", icmptype, icmpcode); pf_print_host(pd->src, 0, pd->af); printf(" -> "); pf_print_host(pd->dst, 0, pd->af); printf(" state: "); pf_print_state(*state); printf(" seq=%u\n", seq); } REASON_SET(reason, PFRES_BADSTATE); return (PF_DROP); } else { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: OK ICMP %d:%d ", icmptype, icmpcode); pf_print_host(pd->src, 0, pd->af); printf(" -> "); pf_print_host(pd->dst, 0, pd->af); printf(" state: "); pf_print_state(*state); printf(" seq=%u\n", seq); } } /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk; if (PF_REVERSED_KEY(*state, pd->af)) nk = (*state)->key[pd->sidx]; else nk = (*state)->key[pd->didx]; #if defined(INET) && defined(INET6) int afto, sidx, didx; afto = pd->af != nk->af; if (afto && (*state)->direction == PF_IN) { sidx = pd2.didx; didx = pd2.sidx; } else { sidx = pd2.sidx; didx = pd2.didx; } if (afto) { if (pf_translate_icmp_af(nk->af, &pd->hdr.icmp)) return (PF_DROP); m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (c_caddr_t)&pd->hdr.icmp6); if (pf_change_icmp_af(pd->m, ipoff2, pd, &pd2, &nk->addr[sidx], &nk->addr[didx], pd->af, nk->af)) return (PF_DROP); if (nk->af == AF_INET) pd->proto = IPPROTO_ICMP; else pd->proto = IPPROTO_ICMPV6; pf_change_ap(pd->m, pd2.src, &th.th_sport, pd->ip_sum, &th.th_sum, &nk->addr[pd2.sidx], nk->port[sidx], 1, pd->af, nk->af); pf_change_ap(pd->m, pd2.dst, &th.th_dport, pd->ip_sum, &th.th_sum, &nk->addr[pd2.didx], nk->port[didx], 1, pd->af, nk->af); m_copyback(pd2.m, pd2.off, 8, (c_caddr_t)&th); PF_ACPY(pd->src, &nk->addr[pd2.sidx], nk->af); PF_ACPY(pd->dst, &nk->addr[pd2.didx], nk->af); pd->naf = nk->af; return (PF_AFRT); } #endif if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af) || nk->port[pd2.sidx] != th.th_sport) pf_change_icmp(pd2.src, &th.th_sport, daddr, &nk->addr[pd2.sidx], nk->port[pd2.sidx], NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, pd2.af); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af) || nk->port[pd2.didx] != th.th_dport) pf_change_icmp(pd2.dst, &th.th_dport, saddr, &nk->addr[pd2.didx], nk->port[pd2.didx], NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, pd2.af); copyback = 1; } if (copyback) { switch (pd2.af) { #ifdef INET case AF_INET: m_copyback(pd->m, pd->off, ICMP_MINLEN, (caddr_t )&pd->hdr.icmp); m_copyback(pd->m, ipoff2, sizeof(h2), (caddr_t )&h2); break; #endif /* INET */ #ifdef INET6 case AF_INET6: m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (caddr_t )&pd->hdr.icmp6); m_copyback(pd->m, ipoff2, sizeof(h2_6), (caddr_t )&h2_6); break; #endif /* INET6 */ default: unhandled_af(pd->af); } m_copyback(pd->m, pd2.off, 8, (caddr_t)&th); } return (PF_PASS); break; } case IPPROTO_UDP: { struct udphdr uh; if (!pf_pull_hdr(pd->m, pd2.off, &uh, sizeof(uh), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(udp)\n")); return (PF_DROP); } key.af = pd2.af; key.proto = IPPROTO_UDP; PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); key.port[pd2.sidx] = uh.uh_sport; key.port[pd2.didx] = uh.uh_dport; STATE_LOOKUP(&key, *state, pd); /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk; if (PF_REVERSED_KEY(*state, pd->af)) nk = (*state)->key[pd->sidx]; else nk = (*state)->key[pd->didx]; #if defined(INET) && defined(INET6) int afto, sidx, didx; afto = pd->af != nk->af; if (afto && (*state)->direction == PF_IN) { sidx = pd2.didx; didx = pd2.sidx; } else { sidx = pd2.sidx; didx = pd2.didx; } if (afto) { if (pf_translate_icmp_af(nk->af, &pd->hdr.icmp)) return (PF_DROP); m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (c_caddr_t)&pd->hdr.icmp6); if (pf_change_icmp_af(pd->m, ipoff2, pd, &pd2, &nk->addr[sidx], &nk->addr[didx], pd->af, nk->af)) return (PF_DROP); if (nk->af == AF_INET) pd->proto = IPPROTO_ICMP; else pd->proto = IPPROTO_ICMPV6; pf_change_ap(pd->m, pd2.src, &uh.uh_sport, pd->ip_sum, &uh.uh_sum, &nk->addr[pd2.sidx], nk->port[sidx], 1, pd->af, nk->af); pf_change_ap(pd->m, pd2.dst, &uh.uh_dport, pd->ip_sum, &uh.uh_sum, &nk->addr[pd2.didx], nk->port[didx], 1, pd->af, nk->af); m_copyback(pd2.m, pd2.off, sizeof(uh), (c_caddr_t)&uh); PF_ACPY(&pd->nsaddr, &nk->addr[pd2.sidx], nk->af); PF_ACPY(&pd->ndaddr, &nk->addr[pd2.didx], nk->af); pd->naf = nk->af; return (PF_AFRT); } #endif if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af) || nk->port[pd2.sidx] != uh.uh_sport) pf_change_icmp(pd2.src, &uh.uh_sport, daddr, &nk->addr[pd2.sidx], nk->port[pd2.sidx], &uh.uh_sum, pd2.ip_sum, icmpsum, pd->ip_sum, 1, pd2.af); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af) || nk->port[pd2.didx] != uh.uh_dport) pf_change_icmp(pd2.dst, &uh.uh_dport, saddr, &nk->addr[pd2.didx], nk->port[pd2.didx], &uh.uh_sum, pd2.ip_sum, icmpsum, pd->ip_sum, 1, pd2.af); switch (pd2.af) { #ifdef INET case AF_INET: m_copyback(pd->m, pd->off, ICMP_MINLEN, (caddr_t )&pd->hdr.icmp); m_copyback(pd->m, ipoff2, sizeof(h2), (caddr_t)&h2); break; #endif /* INET */ #ifdef INET6 case AF_INET6: m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (caddr_t )&pd->hdr.icmp6); m_copyback(pd->m, ipoff2, sizeof(h2_6), (caddr_t )&h2_6); break; #endif /* INET6 */ } m_copyback(pd->m, pd2.off, sizeof(uh), (caddr_t)&uh); } return (PF_PASS); break; } #ifdef INET case IPPROTO_SCTP: { struct sctphdr sh; struct pf_state_peer *src; int copyback = 0; if (! pf_pull_hdr(pd->m, pd2.off, &sh, sizeof(sh), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(sctp)\n")); return (PF_DROP); } key.af = pd2.af; key.proto = IPPROTO_SCTP; PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); key.port[pd2.sidx] = sh.src_port; key.port[pd2.didx] = sh.dest_port; STATE_LOOKUP(&key, *state, pd); if (pd->dir == (*state)->direction) { if (PF_REVERSED_KEY(*state, pd->af)) src = &(*state)->src; else src = &(*state)->dst; } else { if (PF_REVERSED_KEY(*state, pd->af)) src = &(*state)->dst; else src = &(*state)->src; } if (src->scrub->pfss_v_tag != sh.v_tag) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message has incorrect " "SCTP v_tag\n")); return (PF_DROP); } /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk; if (PF_REVERSED_KEY(*state, pd->af)) nk = (*state)->key[pd->sidx]; else nk = (*state)->key[pd->didx]; #if defined(INET) && defined(INET6) int afto, sidx, didx; afto = pd->af != nk->af; if (afto && (*state)->direction == PF_IN) { sidx = pd2.didx; didx = pd2.sidx; } else { sidx = pd2.sidx; didx = pd2.didx; } if (afto) { if (pf_translate_icmp_af(nk->af, &pd->hdr.icmp)) return (PF_DROP); m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (c_caddr_t)&pd->hdr.icmp6); if (pf_change_icmp_af(pd->m, ipoff2, pd, &pd2, &nk->addr[sidx], &nk->addr[didx], pd->af, nk->af)) return (PF_DROP); if (nk->af == AF_INET) pd->proto = IPPROTO_ICMP; else pd->proto = IPPROTO_ICMPV6; sh.src_port = nk->port[sidx]; sh.dest_port = nk->port[didx]; m_copyback(pd2.m, pd2.off, sizeof(sh), (c_caddr_t)&sh); PF_ACPY(pd->src, &nk->addr[pd2.sidx], nk->af); PF_ACPY(pd->dst, &nk->addr[pd2.didx], nk->af); pd->naf = nk->af; return (PF_AFRT); } #endif if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af) || nk->port[pd2.sidx] != sh.src_port) pf_change_icmp(pd2.src, &sh.src_port, daddr, &nk->addr[pd2.sidx], nk->port[pd2.sidx], NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, pd2.af); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af) || nk->port[pd2.didx] != sh.dest_port) pf_change_icmp(pd2.dst, &sh.dest_port, saddr, &nk->addr[pd2.didx], nk->port[pd2.didx], NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, pd2.af); copyback = 1; } if (copyback) { switch (pd2.af) { #ifdef INET case AF_INET: m_copyback(pd->m, pd->off, ICMP_MINLEN, (caddr_t )&pd->hdr.icmp); m_copyback(pd->m, ipoff2, sizeof(h2), (caddr_t )&h2); break; #endif /* INET */ #ifdef INET6 case AF_INET6: m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (caddr_t )&pd->hdr.icmp6); m_copyback(pd->m, ipoff2, sizeof(h2_6), (caddr_t )&h2_6); break; #endif /* INET6 */ } m_copyback(pd->m, pd2.off, sizeof(sh), (caddr_t)&sh); } return (PF_PASS); break; } case IPPROTO_ICMP: { struct icmp *iih = &pd2.hdr.icmp; if (pd2.af != AF_INET) { REASON_SET(reason, PFRES_NORM); return (PF_DROP); } if (!pf_pull_hdr(pd->m, pd2.off, iih, ICMP_MINLEN, NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short i" "(icmp)\n")); return (PF_DROP); } icmpid = iih->icmp_id; pf_icmp_mapping(&pd2, iih->icmp_type, &icmp_dir, &virtual_id, &virtual_type); ret = pf_icmp_state_lookup(&key, &pd2, state, virtual_id, virtual_type, icmp_dir, &iidx, 0, 1); if (ret >= 0) { MPASS(*state == NULL); return (ret); } /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk; if (PF_REVERSED_KEY(*state, pd->af)) nk = (*state)->key[pd->sidx]; else nk = (*state)->key[pd->didx]; #if defined(INET) && defined(INET6) int afto, sidx, didx; afto = pd->af != nk->af; if (afto && (*state)->direction == PF_IN) { sidx = pd2.didx; didx = pd2.sidx; iidx = !iidx; } else { sidx = pd2.sidx; didx = pd2.didx; } if (afto) { if (nk->af != AF_INET6) return (PF_DROP); if (pf_translate_icmp_af(nk->af, &pd->hdr.icmp)) return (PF_DROP); m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (c_caddr_t)&pd->hdr.icmp6); if (pf_change_icmp_af(pd->m, ipoff2, pd, &pd2, &nk->addr[sidx], &nk->addr[didx], pd->af, nk->af)) return (PF_DROP); pd->proto = IPPROTO_ICMPV6; if (pf_translate_icmp_af(nk->af, iih)) return (PF_DROP); if (virtual_type == htons(ICMP_ECHO) && nk->port[iidx] != iih->icmp_id) iih->icmp_id = nk->port[iidx]; m_copyback(pd2.m, pd2.off, ICMP_MINLEN, (c_caddr_t)&iih); PF_ACPY(&pd->nsaddr, &nk->addr[pd2.sidx], nk->af); PF_ACPY(&pd->ndaddr, &nk->addr[pd2.didx], nk->af); pd->naf = nk->af; return (PF_AFRT); } #endif if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af) || (virtual_type == htons(ICMP_ECHO) && nk->port[iidx] != iih->icmp_id)) pf_change_icmp(pd2.src, (virtual_type == htons(ICMP_ECHO)) ? &iih->icmp_id : NULL, daddr, &nk->addr[pd2.sidx], (virtual_type == htons(ICMP_ECHO)) ? nk->port[iidx] : 0, NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, AF_INET); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af)) pf_change_icmp(pd2.dst, NULL, NULL, &nk->addr[pd2.didx], 0, NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, AF_INET); m_copyback(pd->m, pd->off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp); m_copyback(pd->m, ipoff2, sizeof(h2), (caddr_t)&h2); m_copyback(pd->m, pd2.off, ICMP_MINLEN, (caddr_t)iih); } return (PF_PASS); break; } #endif /* INET */ #ifdef INET6 case IPPROTO_ICMPV6: { struct icmp6_hdr *iih = &pd2.hdr.icmp6; if (pd2.af != AF_INET6) { REASON_SET(reason, PFRES_NORM); return (PF_DROP); } if (!pf_pull_hdr(pd->m, pd2.off, iih, sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(icmp6)\n")); return (PF_DROP); } pf_icmp_mapping(&pd2, iih->icmp6_type, &icmp_dir, &virtual_id, &virtual_type); ret = pf_icmp_state_lookup(&key, &pd2, state, virtual_id, virtual_type, icmp_dir, &iidx, 0, 1); /* IPv6? try matching a multicast address */ if (ret == PF_DROP && pd2.af == AF_INET6 && icmp_dir == PF_OUT) { MPASS(*state == NULL); ret = pf_icmp_state_lookup(&key, &pd2, state, virtual_id, virtual_type, icmp_dir, &iidx, 1, 1); } if (ret >= 0) { MPASS(*state == NULL); return (ret); } /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk; if (PF_REVERSED_KEY(*state, pd->af)) nk = (*state)->key[pd->sidx]; else nk = (*state)->key[pd->didx]; #if defined(INET) && defined(INET6) int afto, sidx, didx; afto = pd->af != nk->af; if (afto && (*state)->direction == PF_IN) { sidx = pd2.didx; didx = pd2.sidx; iidx = !iidx; } else { sidx = pd2.sidx; didx = pd2.didx; } if (afto) { if (nk->af != AF_INET) return (PF_DROP); if (pf_translate_icmp_af(nk->af, &pd->hdr.icmp)) return (PF_DROP); m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (c_caddr_t)&pd->hdr.icmp6); if (pf_change_icmp_af(pd->m, ipoff2, pd, &pd2, &nk->addr[sidx], &nk->addr[didx], pd->af, nk->af)) return (PF_DROP); pd->proto = IPPROTO_ICMP; if (pf_translate_icmp_af(nk->af, iih)) return (PF_DROP); if (virtual_type == htons(ICMP6_ECHO_REQUEST) && nk->port[iidx] != iih->icmp6_id) iih->icmp6_id = nk->port[iidx]; m_copyback(pd2.m, pd2.off, sizeof(struct icmp6_hdr), (c_caddr_t)&iih); PF_ACPY(&pd->nsaddr, &nk->addr[pd2.sidx], nk->af); PF_ACPY(&pd->ndaddr, &nk->addr[pd2.didx], nk->af); pd->naf = nk->af; return (PF_AFRT); } #endif if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af) || ((virtual_type == htons(ICMP6_ECHO_REQUEST)) && nk->port[pd2.sidx] != iih->icmp6_id)) pf_change_icmp(pd2.src, (virtual_type == htons(ICMP6_ECHO_REQUEST)) ? &iih->icmp6_id : NULL, daddr, &nk->addr[pd2.sidx], (virtual_type == htons(ICMP6_ECHO_REQUEST)) ? nk->port[iidx] : 0, NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, AF_INET6); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af)) pf_change_icmp(pd2.dst, NULL, NULL, &nk->addr[pd2.didx], 0, NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, AF_INET6); m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (caddr_t)&pd->hdr.icmp6); m_copyback(pd->m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); m_copyback(pd->m, pd2.off, sizeof(struct icmp6_hdr), (caddr_t)iih); } return (PF_PASS); break; } #endif /* INET6 */ default: { key.af = pd2.af; key.proto = pd2.proto; PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); key.port[0] = key.port[1] = 0; STATE_LOOKUP(&key, *state, pd); /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk = (*state)->key[pd->didx]; if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af)) pf_change_icmp(pd2.src, NULL, daddr, &nk->addr[pd2.sidx], 0, NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, pd2.af); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af)) pf_change_icmp(pd2.dst, NULL, saddr, &nk->addr[pd2.didx], 0, NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, pd2.af); switch (pd2.af) { #ifdef INET case AF_INET: m_copyback(pd->m, pd->off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp); m_copyback(pd->m, ipoff2, sizeof(h2), (caddr_t)&h2); break; #endif /* INET */ #ifdef INET6 case AF_INET6: m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr), (caddr_t )&pd->hdr.icmp6); m_copyback(pd->m, ipoff2, sizeof(h2_6), (caddr_t )&h2_6); break; #endif /* INET6 */ } } return (PF_PASS); break; } } } } /* * ipoff and off are measured from the start of the mbuf chain. * h must be at "ipoff" on the mbuf chain. */ void * pf_pull_hdr(const struct mbuf *m, int off, void *p, int len, u_short *actionp, u_short *reasonp, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: { const struct ip *h = mtod(m, struct ip *); u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; if (fragoff) { if (fragoff >= len) ACTION_SET(actionp, PF_PASS); else { ACTION_SET(actionp, PF_DROP); REASON_SET(reasonp, PFRES_FRAG); } return (NULL); } if (m->m_pkthdr.len < off + len || ntohs(h->ip_len) < off + len) { ACTION_SET(actionp, PF_DROP); REASON_SET(reasonp, PFRES_SHORT); return (NULL); } break; } #endif /* INET */ #ifdef INET6 case AF_INET6: { const struct ip6_hdr *h = mtod(m, struct ip6_hdr *); if (m->m_pkthdr.len < off + len || (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < (unsigned)(off + len)) { ACTION_SET(actionp, PF_DROP); REASON_SET(reasonp, PFRES_SHORT); return (NULL); } break; } #endif /* INET6 */ } m_copydata(m, off, len, p); return (p); } int pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *kif, int rtableid) { struct ifnet *ifp; /* * Skip check for addresses with embedded interface scope, * as they would always match anyway. */ if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6)) return (1); if (af != AF_INET && af != AF_INET6) return (0); if (kif == V_pfi_all) return (1); /* Skip checks for ipsec interfaces */ if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) return (1); ifp = (kif != NULL) ? kif->pfik_ifp : NULL; switch (af) { #ifdef INET6 case AF_INET6: return (fib6_check_urpf(rtableid, &addr->v6, 0, NHR_NONE, ifp)); #endif #ifdef INET case AF_INET: return (fib4_check_urpf(rtableid, addr->v4, 0, NHR_NONE, ifp)); #endif } return (0); } #ifdef INET static void pf_route(struct mbuf **m, struct pf_krule *r, struct ifnet *oifp, struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp) { struct mbuf *m0, *m1, *md; struct route ro; const struct sockaddr *gw = &ro.ro_dst; struct sockaddr_in *dst; struct ip *ip; struct ifnet *ifp = NULL; int error = 0; uint16_t ip_len, ip_off; uint16_t tmp; int r_dir; bool skip_test = false; KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__)); SDT_PROBE4(pf, ip, route_to, entry, *m, pd, s, oifp); if (s) { r_dir = s->direction; } else { r_dir = r->direction; } KASSERT(pd->dir == PF_IN || pd->dir == PF_OUT || r_dir == PF_IN || r_dir == PF_OUT, ("%s: invalid direction", __func__)); if ((pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) || pd->pf_mtag->routed++ > 3) { m0 = *m; *m = NULL; SDT_PROBE1(pf, ip, route_to, drop, __LINE__); goto bad_locked; } if (pd->act.rt_kif != NULL) ifp = pd->act.rt_kif->pfik_ifp; if (pd->act.rt == PF_DUPTO) { if ((pd->pf_mtag->flags & PF_MTAG_FLAG_DUPLICATED)) { if (s != NULL) { PF_STATE_UNLOCK(s); } if (ifp == oifp) { /* When the 2nd interface is not skipped */ return; } else { m0 = *m; *m = NULL; SDT_PROBE1(pf, ip, route_to, drop, __LINE__); goto bad; } } else { pd->pf_mtag->flags |= PF_MTAG_FLAG_DUPLICATED; if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) { if (s) PF_STATE_UNLOCK(s); return; } } } else { if ((pd->act.rt == PF_REPLYTO) == (r_dir == pd->dir)) { if (pd->af == pd->naf) { pf_dummynet(pd, s, r, m); if (s) PF_STATE_UNLOCK(s); return; } else { if (r_dir == PF_IN) { skip_test = true; } } } /* * If we're actually doing route-to and af-to and are in the * reply direction. */ if (pd->act.rt_kif && pd->act.rt_kif->pfik_ifp && pd->af != pd->naf) { if (pd->act.rt == PF_ROUTETO && r->naf != AF_INET) { /* Un-set ifp so we do a plain route lookup. */ ifp = NULL; } if (pd->act.rt == PF_REPLYTO && r->naf != AF_INET6) { /* Un-set ifp so we do a plain route lookup. */ ifp = NULL; } } m0 = *m; } ip = mtod(m0, struct ip *); bzero(&ro, sizeof(ro)); dst = (struct sockaddr_in *)&ro.ro_dst; dst->sin_family = AF_INET; dst->sin_len = sizeof(struct sockaddr_in); dst->sin_addr.s_addr = pd->act.rt_addr.v4.s_addr; if (s != NULL){ if (ifp == NULL && (pd->af != pd->naf)) { /* We're in the AFTO case. Do a route lookup. */ const struct nhop_object *nh; nh = fib4_lookup(M_GETFIB(*m), ip->ip_dst, 0, NHR_NONE, 0); if (nh) { ifp = nh->nh_ifp; /* Use the gateway if needed. */ if (nh->nh_flags & NHF_GATEWAY) { gw = &nh->gw_sa; ro.ro_flags |= RT_HAS_GW; } else { dst->sin_addr = ip->ip_dst; } /* * Bind to the correct interface if we're * if-bound. We don't know which interface * that will be until here, so we've inserted * the state on V_pf_all. Fix that now. */ if (s->kif == V_pfi_all && ifp != NULL && r->rule_flag & PFRULE_IFBOUND) s->kif = ifp->if_pf_kif; } } if (r->rule_flag & PFRULE_IFBOUND && pd->act.rt == PF_REPLYTO && s->kif == V_pfi_all) { s->kif = pd->act.rt_kif; s->orig_kif = oifp->if_pf_kif; } PF_STATE_UNLOCK(s); } if (ifp == NULL) { m0 = *m; *m = NULL; SDT_PROBE1(pf, ip, route_to, drop, __LINE__); goto bad; } if (pd->dir == PF_IN && !skip_test) { if (pf_test(AF_INET, PF_OUT, PFIL_FWD, ifp, &m0, inp, &pd->act) != PF_PASS) { SDT_PROBE1(pf, ip, route_to, drop, __LINE__); goto bad; } else if (m0 == NULL) { SDT_PROBE1(pf, ip, route_to, drop, __LINE__); goto done; } if (m0->m_len < sizeof(struct ip)) { DPFPRINTF(PF_DEBUG_URGENT, ("%s: m0->m_len < sizeof(struct ip)\n", __func__)); SDT_PROBE1(pf, ip, route_to, drop, __LINE__); goto bad; } ip = mtod(m0, struct ip *); } if (ifp->if_flags & IFF_LOOPBACK) m0->m_flags |= M_SKIP_FIREWALL; ip_len = ntohs(ip->ip_len); ip_off = ntohs(ip->ip_off); /* Copied from FreeBSD 10.0-CURRENT ip_output. */ m0->m_pkthdr.csum_flags |= CSUM_IP; if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) { in_delayed_cksum(m0); m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) { pf_sctp_checksum(m0, (uint32_t)(ip->ip_hl << 2)); m0->m_pkthdr.csum_flags &= ~CSUM_SCTP; } if (pd->dir == PF_IN) { /* * Make sure dummynet gets the correct direction, in case it needs to * re-inject later. */ pd->dir = PF_OUT; /* * The following processing is actually the rest of the inbound processing, even * though we've marked it as outbound (so we don't look through dummynet) and it * happens after the outbound processing (pf_test(PF_OUT) above). * Swap the dummynet pipe numbers, because it's going to come to the wrong * conclusion about what direction it's processing, and we can't fix it or it * will re-inject incorrectly. Swapping the pipe numbers means that its incorrect * decision will pick the right pipe, and everything will mostly work as expected. */ tmp = pd->act.dnrpipe; pd->act.dnrpipe = pd->act.dnpipe; pd->act.dnpipe = tmp; } /* * If small enough for interface, or the interface will take * care of the fragmentation for us, we can just send directly. */ if (ip_len <= ifp->if_mtu || (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) { ip->ip_sum = 0; if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) { ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); m0->m_pkthdr.csum_flags &= ~CSUM_IP; } m_clrprotoflags(m0); /* Avoid confusing lower layers. */ md = m0; error = pf_dummynet_route(pd, s, r, ifp, gw, &md); if (md != NULL) { error = (*ifp->if_output)(ifp, md, gw, &ro); SDT_PROBE2(pf, ip, route_to, output, ifp, error); } goto done; } /* Balk when DF bit is set or the interface didn't support TSO. */ if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) { error = EMSGSIZE; KMOD_IPSTAT_INC(ips_cantfrag); if (pd->act.rt != PF_DUPTO) { if (s && s->nat_rule != NULL) PACKET_UNDO_NAT(m0, pd, (ip->ip_hl << 2) + (ip_off & IP_OFFMASK), s); icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, ifp->if_mtu); SDT_PROBE1(pf, ip, route_to, drop, __LINE__); goto done; } else { SDT_PROBE1(pf, ip, route_to, drop, __LINE__); goto bad; } } error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist); if (error) { SDT_PROBE1(pf, ip, route_to, drop, __LINE__); goto bad; } for (; m0; m0 = m1) { m1 = m0->m_nextpkt; m0->m_nextpkt = NULL; if (error == 0) { m_clrprotoflags(m0); md = m0; pd->pf_mtag = pf_find_mtag(md); error = pf_dummynet_route(pd, s, r, ifp, gw, &md); if (md != NULL) { error = (*ifp->if_output)(ifp, md, gw, &ro); SDT_PROBE2(pf, ip, route_to, output, ifp, error); } } else m_freem(m0); } if (error == 0) KMOD_IPSTAT_INC(ips_fragmented); done: if (pd->act.rt != PF_DUPTO) *m = NULL; return; bad_locked: if (s) PF_STATE_UNLOCK(s); bad: m_freem(m0); goto done; } #endif /* INET */ #ifdef INET6 static void pf_route6(struct mbuf **m, struct pf_krule *r, struct ifnet *oifp, struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp) { struct mbuf *m0, *md; struct m_tag *mtag; struct sockaddr_in6 dst; struct ip6_hdr *ip6; struct ifnet *ifp = NULL; int r_dir; bool skip_test = false; KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__)); SDT_PROBE4(pf, ip6, route_to, entry, *m, pd, s, oifp); if (s) { r_dir = s->direction; } else { r_dir = r->direction; } KASSERT(pd->dir == PF_IN || pd->dir == PF_OUT || r_dir == PF_IN || r_dir == PF_OUT, ("%s: invalid direction", __func__)); if ((pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) || pd->pf_mtag->routed++ > 3) { m0 = *m; *m = NULL; SDT_PROBE1(pf, ip6, route_to, drop, __LINE__); goto bad_locked; } if (pd->act.rt_kif != NULL) ifp = pd->act.rt_kif->pfik_ifp; if (pd->act.rt == PF_DUPTO) { if ((pd->pf_mtag->flags & PF_MTAG_FLAG_DUPLICATED)) { if (s != NULL) { PF_STATE_UNLOCK(s); } if (ifp == oifp) { /* When the 2nd interface is not skipped */ return; } else { m0 = *m; *m = NULL; SDT_PROBE1(pf, ip6, route_to, drop, __LINE__); goto bad; } } else { pd->pf_mtag->flags |= PF_MTAG_FLAG_DUPLICATED; if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) { if (s) PF_STATE_UNLOCK(s); return; } } } else { if ((pd->act.rt == PF_REPLYTO) == (r_dir == pd->dir)) { if (pd->af == pd->naf) { pf_dummynet(pd, s, r, m); if (s) PF_STATE_UNLOCK(s); return; } else { if (r_dir == PF_IN) { skip_test = true; } } } /* * If we're actually doing route-to and af-to and are in the * reply direction. */ if (pd->act.rt_kif && pd->act.rt_kif->pfik_ifp && pd->af != pd->naf) { if (pd->act.rt == PF_ROUTETO && r->naf != AF_INET6) { /* Un-set ifp so we do a plain route lookup. */ ifp = NULL; } if (pd->act.rt == PF_REPLYTO && r->naf != AF_INET) { /* Un-set ifp so we do a plain route lookup. */ ifp = NULL; } } m0 = *m; } ip6 = mtod(m0, struct ip6_hdr *); bzero(&dst, sizeof(dst)); dst.sin6_family = AF_INET6; dst.sin6_len = sizeof(dst); PF_ACPY((struct pf_addr *)&dst.sin6_addr, &pd->act.rt_addr, AF_INET6); if (s != NULL) { if (ifp == NULL && (pd->af != pd->naf)) { const struct nhop_object *nh; nh = fib6_lookup(M_GETFIB(*m), &ip6->ip6_dst, 0, NHR_NONE, 0); if (nh) { ifp = nh->nh_ifp; /* Use the gateway if needed. */ if (nh->nh_flags & NHF_GATEWAY) bcopy(&nh->gw6_sa.sin6_addr, &dst.sin6_addr, sizeof(dst.sin6_addr)); else dst.sin6_addr = ip6->ip6_dst; /* * Bind to the correct interface if we're * if-bound. We don't know which interface * that will be until here, so we've inserted * the state on V_pf_all. Fix that now. */ if (s->kif == V_pfi_all && ifp != NULL && r->rule_flag & PFRULE_IFBOUND) s->kif = ifp->if_pf_kif; } } if (r->rule_flag & PFRULE_IFBOUND && pd->act.rt == PF_REPLYTO && s->kif == V_pfi_all) { s->kif = pd->act.rt_kif; s->orig_kif = oifp->if_pf_kif; } PF_STATE_UNLOCK(s); } if (pd->af != pd->naf) { struct udphdr *uh = &pd->hdr.udp; if (pd->proto == IPPROTO_UDP && uh->uh_sum == 0) { uh->uh_sum = in6_cksum_pseudo(ip6, ntohs(uh->uh_ulen), IPPROTO_UDP, 0); m_copyback(m0, pd->off, sizeof(*uh), pd->hdr.any); } } if (ifp == NULL) { m0 = *m; *m = NULL; SDT_PROBE1(pf, ip6, route_to, drop, __LINE__); goto bad; } if (pd->dir == PF_IN && !skip_test) { if (pf_test(AF_INET6, PF_OUT, PFIL_FWD | PF_PFIL_NOREFRAGMENT, ifp, &m0, inp, &pd->act) != PF_PASS) { SDT_PROBE1(pf, ip6, route_to, drop, __LINE__); goto bad; } else if (m0 == NULL) { SDT_PROBE1(pf, ip6, route_to, drop, __LINE__); goto done; } if (m0->m_len < sizeof(struct ip6_hdr)) { DPFPRINTF(PF_DEBUG_URGENT, ("%s: m0->m_len < sizeof(struct ip6_hdr)\n", __func__)); SDT_PROBE1(pf, ip6, route_to, drop, __LINE__); goto bad; } ip6 = mtod(m0, struct ip6_hdr *); } if (ifp->if_flags & IFF_LOOPBACK) m0->m_flags |= M_SKIP_FIREWALL; if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 & ~ifp->if_hwassist) { uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6); in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr)); m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6; } if (pd->dir == PF_IN) { uint16_t tmp; /* * Make sure dummynet gets the correct direction, in case it needs to * re-inject later. */ pd->dir = PF_OUT; /* * The following processing is actually the rest of the inbound processing, even * though we've marked it as outbound (so we don't look through dummynet) and it * happens after the outbound processing (pf_test(PF_OUT) above). * Swap the dummynet pipe numbers, because it's going to come to the wrong * conclusion about what direction it's processing, and we can't fix it or it * will re-inject incorrectly. Swapping the pipe numbers means that its incorrect * decision will pick the right pipe, and everything will mostly work as expected. */ tmp = pd->act.dnrpipe; pd->act.dnrpipe = pd->act.dnpipe; pd->act.dnpipe = tmp; } /* * If the packet is too large for the outgoing interface, * send back an icmp6 error. */ if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr)) dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index); mtag = m_tag_find(m0, PACKET_TAG_PF_REASSEMBLED, NULL); if (mtag != NULL) { int ret __sdt_used; ret = pf_refragment6(ifp, &m0, mtag, ifp, true); SDT_PROBE2(pf, ip6, route_to, output, ifp, ret); goto done; } if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) { md = m0; pf_dummynet_route(pd, s, r, ifp, sintosa(&dst), &md); if (md != NULL) { int ret __sdt_used; ret = nd6_output_ifp(ifp, ifp, md, &dst, NULL); SDT_PROBE2(pf, ip6, route_to, output, ifp, ret); } } else { in6_ifstat_inc(ifp, ifs6_in_toobig); if (pd->act.rt != PF_DUPTO) { if (s && s->nat_rule != NULL) PACKET_UNDO_NAT(m0, pd, ((caddr_t)ip6 - m0->m_data) + sizeof(struct ip6_hdr), s); icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); SDT_PROBE1(pf, ip6, route_to, drop, __LINE__); } else { SDT_PROBE1(pf, ip6, route_to, drop, __LINE__); goto bad; } } done: if (pd->act.rt != PF_DUPTO) *m = NULL; return; bad_locked: if (s) PF_STATE_UNLOCK(s); bad: m_freem(m0); goto done; } #endif /* INET6 */ /* * FreeBSD supports cksum offloads for the following drivers. * em(4), fxp(4), lge(4), nge(4), re(4), ti(4), txp(4), xl(4) * * CSUM_DATA_VALID | CSUM_PSEUDO_HDR : * network driver performed cksum including pseudo header, need to verify * csum_data * CSUM_DATA_VALID : * network driver performed cksum, needs to additional pseudo header * cksum computation with partial csum_data(i.e. lack of H/W support for * pseudo header, for instance sk(4) and possibly gem(4)) * * After validating the cksum of packet, set both flag CSUM_DATA_VALID and * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper * TCP/UDP layer. * Also, set csum_data to 0xffff to force cksum validation. */ static int pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af) { u_int16_t sum = 0; int hw_assist = 0; struct ip *ip; if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) return (1); if (m->m_pkthdr.len < off + len) return (1); switch (p) { case IPPROTO_TCP: if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { sum = m->m_pkthdr.csum_data; } else { ip = mtod(m, struct ip *); sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl((u_short)len + m->m_pkthdr.csum_data + IPPROTO_TCP)); } sum ^= 0xffff; ++hw_assist; } break; case IPPROTO_UDP: if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { sum = m->m_pkthdr.csum_data; } else { ip = mtod(m, struct ip *); sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl((u_short)len + m->m_pkthdr.csum_data + IPPROTO_UDP)); } sum ^= 0xffff; ++hw_assist; } break; case IPPROTO_ICMP: #ifdef INET6 case IPPROTO_ICMPV6: #endif /* INET6 */ break; default: return (1); } if (!hw_assist) { switch (af) { case AF_INET: if (m->m_len < sizeof(struct ip)) return (1); sum = in4_cksum(m, (p == IPPROTO_ICMP ? 0 : p), off, len); break; #ifdef INET6 case AF_INET6: if (m->m_len < sizeof(struct ip6_hdr)) return (1); sum = in6_cksum(m, p, off, len); break; #endif /* INET6 */ } } if (sum) { switch (p) { case IPPROTO_TCP: { KMOD_TCPSTAT_INC(tcps_rcvbadsum); break; } case IPPROTO_UDP: { KMOD_UDPSTAT_INC(udps_badsum); break; } #ifdef INET case IPPROTO_ICMP: { KMOD_ICMPSTAT_INC(icps_checksum); break; } #endif #ifdef INET6 case IPPROTO_ICMPV6: { KMOD_ICMP6STAT_INC(icp6s_checksum); break; } #endif /* INET6 */ } return (1); } else { if (p == IPPROTO_TCP || p == IPPROTO_UDP) { m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m->m_pkthdr.csum_data = 0xffff; } } return (0); } static bool pf_pdesc_to_dnflow(const struct pf_pdesc *pd, const struct pf_krule *r, const struct pf_kstate *s, struct ip_fw_args *dnflow) { int dndir = r->direction; if (s && dndir == PF_INOUT) { dndir = s->direction; } else if (dndir == PF_INOUT) { /* Assume primary direction. Happens when we've set dnpipe in * the ethernet level code. */ dndir = pd->dir; } if (pd->pf_mtag->flags & PF_MTAG_FLAG_DUMMYNETED) return (false); memset(dnflow, 0, sizeof(*dnflow)); if (pd->dport != NULL) dnflow->f_id.dst_port = ntohs(*pd->dport); if (pd->sport != NULL) dnflow->f_id.src_port = ntohs(*pd->sport); if (pd->dir == PF_IN) dnflow->flags |= IPFW_ARGS_IN; else dnflow->flags |= IPFW_ARGS_OUT; if (pd->dir != dndir && pd->act.dnrpipe) { dnflow->rule.info = pd->act.dnrpipe; } else if (pd->dir == dndir && pd->act.dnpipe) { dnflow->rule.info = pd->act.dnpipe; } else { return (false); } dnflow->rule.info |= IPFW_IS_DUMMYNET; if (r->free_flags & PFRULE_DN_IS_PIPE || pd->act.flags & PFSTATE_DN_IS_PIPE) dnflow->rule.info |= IPFW_IS_PIPE; dnflow->f_id.proto = pd->proto; dnflow->f_id.extra = dnflow->rule.info; switch (pd->naf) { case AF_INET: dnflow->f_id.addr_type = 4; dnflow->f_id.src_ip = ntohl(pd->src->v4.s_addr); dnflow->f_id.dst_ip = ntohl(pd->dst->v4.s_addr); break; case AF_INET6: dnflow->flags |= IPFW_ARGS_IP6; dnflow->f_id.addr_type = 6; dnflow->f_id.src_ip6 = pd->src->v6; dnflow->f_id.dst_ip6 = pd->dst->v6; break; } return (true); } int pf_test_eth(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp) { struct pfi_kkif *kif; struct mbuf *m = *m0; M_ASSERTPKTHDR(m); MPASS(ifp->if_vnet == curvnet); NET_EPOCH_ASSERT(); if (!V_pf_status.running) return (PF_PASS); kif = (struct pfi_kkif *)ifp->if_pf_kif; if (kif == NULL) { DPFPRINTF(PF_DEBUG_URGENT, ("%s: kif == NULL, if_xname %s\n", __func__, ifp->if_xname)); return (PF_DROP); } if (kif->pfik_flags & PFI_IFLAG_SKIP) return (PF_PASS); if (m->m_flags & M_SKIP_FIREWALL) return (PF_PASS); if (__predict_false(! M_WRITABLE(*m0))) { m = *m0 = m_unshare(*m0, M_NOWAIT); if (*m0 == NULL) return (PF_DROP); } /* Stateless! */ return (pf_test_eth_rule(dir, kif, m0)); } static __inline void pf_dummynet_flag_remove(struct mbuf *m, struct pf_mtag *pf_mtag) { struct m_tag *mtag; pf_mtag->flags &= ~PF_MTAG_FLAG_DUMMYNET; /* dummynet adds this tag, but pf does not need it, * and keeping it creates unexpected behavior, * e.g. in case of divert(4) usage right after dummynet. */ mtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL); if (mtag != NULL) m_tag_delete(m, mtag); } static int pf_dummynet(struct pf_pdesc *pd, struct pf_kstate *s, struct pf_krule *r, struct mbuf **m0) { return (pf_dummynet_route(pd, s, r, NULL, NULL, m0)); } static int pf_dummynet_route(struct pf_pdesc *pd, struct pf_kstate *s, struct pf_krule *r, struct ifnet *ifp, const struct sockaddr *sa, struct mbuf **m0) { struct ip_fw_args dnflow; NET_EPOCH_ASSERT(); if (pd->act.dnpipe == 0 && pd->act.dnrpipe == 0) return (0); if (ip_dn_io_ptr == NULL) { m_freem(*m0); *m0 = NULL; return (ENOMEM); } if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(*m0)) == NULL)) { m_freem(*m0); *m0 = NULL; return (ENOMEM); } if (ifp != NULL) { pd->pf_mtag->flags |= PF_MTAG_FLAG_ROUTE_TO; pd->pf_mtag->if_index = ifp->if_index; pd->pf_mtag->if_idxgen = ifp->if_idxgen; MPASS(sa != NULL); switch (sa->sa_family) { case AF_INET: memcpy(&pd->pf_mtag->dst, sa, sizeof(struct sockaddr_in)); break; case AF_INET6: memcpy(&pd->pf_mtag->dst, sa, sizeof(struct sockaddr_in6)); break; } } if (s != NULL && s->nat_rule != NULL && s->nat_rule->action == PF_RDR && ( #ifdef INET (pd->af == AF_INET && IN_LOOPBACK(ntohl(pd->dst->v4.s_addr))) || #endif (pd->af == AF_INET6 && IN6_IS_ADDR_LOOPBACK(&pd->dst->v6)))) { /* * If we're redirecting to loopback mark this packet * as being local. Otherwise it might get dropped * if dummynet re-injects. */ (*m0)->m_pkthdr.rcvif = V_loif; } if (pf_pdesc_to_dnflow(pd, r, s, &dnflow)) { pd->pf_mtag->flags |= PF_MTAG_FLAG_DUMMYNET; pd->pf_mtag->flags |= PF_MTAG_FLAG_DUMMYNETED; ip_dn_io_ptr(m0, &dnflow); if (*m0 != NULL) { pd->pf_mtag->flags &= ~PF_MTAG_FLAG_ROUTE_TO; pf_dummynet_flag_remove(*m0, pd->pf_mtag); } } return (0); } #ifdef INET6 static int pf_walk_option6(struct pf_pdesc *pd, struct ip6_hdr *h, int off, int end, u_short *reason) { struct ip6_opt opt; struct ip6_opt_jumbo jumbo; while (off < end) { if (!pf_pull_hdr(pd->m, off, &opt.ip6o_type, sizeof(opt.ip6o_type), NULL, reason, AF_INET6)) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short opt type")); return (PF_DROP); } if (opt.ip6o_type == IP6OPT_PAD1) { off++; continue; } if (!pf_pull_hdr(pd->m, off, &opt, sizeof(opt), NULL, reason, AF_INET6)) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short opt")); return (PF_DROP); } if (off + sizeof(opt) + opt.ip6o_len > end) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 long opt")); REASON_SET(reason, PFRES_IPOPTIONS); return (PF_DROP); } switch (opt.ip6o_type) { case IP6OPT_JUMBO: if (pd->jumbolen != 0) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 multiple jumbo")); REASON_SET(reason, PFRES_IPOPTIONS); return (PF_DROP); } if (ntohs(h->ip6_plen) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 bad jumbo plen")); REASON_SET(reason, PFRES_IPOPTIONS); return (PF_DROP); } if (!pf_pull_hdr(pd->m, off, &jumbo, sizeof(jumbo), NULL, reason, AF_INET6)) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short jumbo")); return (PF_DROP); } memcpy(&pd->jumbolen, jumbo.ip6oj_jumbo_len, sizeof(pd->jumbolen)); pd->jumbolen = ntohl(pd->jumbolen); if (pd->jumbolen < IPV6_MAXPACKET) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short jumbolen")); REASON_SET(reason, PFRES_IPOPTIONS); return (PF_DROP); } break; default: break; } off += sizeof(opt) + opt.ip6o_len; } return (PF_PASS); } int pf_walk_header6(struct pf_pdesc *pd, struct ip6_hdr *h, u_short *reason) { struct ip6_frag frag; struct ip6_ext ext; struct ip6_rthdr rthdr; uint32_t end; int fraghdr_cnt = 0, rthdr_cnt = 0; pd->off += sizeof(struct ip6_hdr); end = pd->off + ntohs(h->ip6_plen); pd->fragoff = pd->extoff = pd->jumbolen = 0; pd->proto = h->ip6_nxt; for (;;) { switch (pd->proto) { case IPPROTO_FRAGMENT: if (fraghdr_cnt++) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 multiple fragment")); REASON_SET(reason, PFRES_FRAG); return (PF_DROP); } /* jumbo payload packets cannot be fragmented */ if (pd->jumbolen != 0) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 fragmented jumbo")); REASON_SET(reason, PFRES_FRAG); return (PF_DROP); } if (!pf_pull_hdr(pd->m, pd->off, &frag, sizeof(frag), NULL, reason, AF_INET6)) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short fragment")); return (PF_DROP); } /* stop walking over non initial fragments */ if (ntohs((frag.ip6f_offlg & IP6F_OFF_MASK)) != 0) { pd->fragoff = pd->off; return (PF_PASS); } /* RFC6946: reassemble only non atomic fragments */ if (frag.ip6f_offlg & IP6F_MORE_FRAG) pd->fragoff = pd->off; pd->off += sizeof(frag); pd->proto = frag.ip6f_nxt; break; case IPPROTO_ROUTING: if (rthdr_cnt++) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 multiple rthdr")); REASON_SET(reason, PFRES_IPOPTIONS); return (PF_DROP); } /* fragments may be short */ if (pd->fragoff != 0 && end < pd->off + sizeof(rthdr)) { pd->off = pd->fragoff; pd->proto = IPPROTO_FRAGMENT; return (PF_PASS); } if (!pf_pull_hdr(pd->m, pd->off, &rthdr, sizeof(rthdr), NULL, reason, AF_INET6)) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short rthdr")); return (PF_DROP); } if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 rthdr0")); REASON_SET(reason, PFRES_IPOPTIONS); return (PF_DROP); } /* FALLTHROUGH */ case IPPROTO_AH: case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext), NULL, reason, AF_INET6)) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short exthdr")); return (PF_DROP); } /* fragments may be short */ if (pd->fragoff != 0 && end < pd->off + sizeof(ext)) { pd->off = pd->fragoff; pd->proto = IPPROTO_FRAGMENT; return (PF_PASS); } /* reassembly needs the ext header before the frag */ if (pd->fragoff == 0) pd->extoff = pd->off; if (pd->proto == IPPROTO_HOPOPTS && pd->fragoff == 0) { if (pf_walk_option6(pd, h, pd->off + sizeof(ext), pd->off + (ext.ip6e_len + 1) * 8, reason) != PF_PASS) return (PF_DROP); if (ntohs(h->ip6_plen) == 0 && pd->jumbolen != 0) { DPFPRINTF(PF_DEBUG_MISC, ("IPv6 missing jumbo")); REASON_SET(reason, PFRES_IPOPTIONS); return (PF_DROP); } } if (pd->proto == IPPROTO_AH) pd->off += (ext.ip6e_len + 2) * 4; else pd->off += (ext.ip6e_len + 1) * 8; pd->proto = ext.ip6e_nxt; break; case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_SCTP: case IPPROTO_ICMPV6: /* fragments may be short, ignore inner header then */ if (pd->fragoff != 0 && end < pd->off + (pd->proto == IPPROTO_TCP ? sizeof(struct tcphdr) : pd->proto == IPPROTO_UDP ? sizeof(struct udphdr) : pd->proto == IPPROTO_SCTP ? sizeof(struct sctphdr) : sizeof(struct icmp6_hdr))) { pd->off = pd->fragoff; pd->proto = IPPROTO_FRAGMENT; } /* FALLTHROUGH */ default: return (PF_PASS); } } } #endif static void pf_init_pdesc(struct pf_pdesc *pd, struct mbuf *m) { memset(pd, 0, sizeof(*pd)); pd->pf_mtag = pf_find_mtag(m); pd->m = m; } static int pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0, u_short *action, u_short *reason, struct pfi_kkif *kif, struct pf_rule_actions *default_actions) { pd->dir = dir; pd->kif = kif; pd->m = *m0; pd->sidx = (dir == PF_IN) ? 0 : 1; pd->didx = (dir == PF_IN) ? 1 : 0; pd->af = pd->naf = af; TAILQ_INIT(&pd->sctp_multihome_jobs); if (default_actions != NULL) memcpy(&pd->act, default_actions, sizeof(pd->act)); if (pd->pf_mtag && pd->pf_mtag->dnpipe) { pd->act.dnpipe = pd->pf_mtag->dnpipe; pd->act.flags = pd->pf_mtag->dnflags; } switch (af) { #ifdef INET case AF_INET: { struct ip *h; if (__predict_false((*m0)->m_len < sizeof(struct ip)) && (pd->m = *m0 = m_pullup(*m0, sizeof(struct ip))) == NULL) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test: m_len < sizeof(struct ip), pullup failed\n")); *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } if (pf_normalize_ip(reason, pd) != PF_PASS) { /* We do IP header normalization and packet reassembly here */ *m0 = pd->m; *action = PF_DROP; return (-1); } *m0 = pd->m; h = mtod(pd->m, struct ip *); pd->off = h->ip_hl << 2; if (pd->off < (int)sizeof(*h)) { *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } pd->src = (struct pf_addr *)&h->ip_src; pd->dst = (struct pf_addr *)&h->ip_dst; pd->ip_sum = &h->ip_sum; pd->virtual_proto = pd->proto = h->ip_p; pd->tos = h->ip_tos & ~IPTOS_ECN_MASK; pd->ttl = h->ip_ttl; pd->tot_len = ntohs(h->ip_len); pd->act.rtableid = -1; pd->df = h->ip_off & htons(IP_DF); if (h->ip_hl > 5) /* has options */ pd->badopts++; if (h->ip_off & htons(IP_MF | IP_OFFMASK)) pd->virtual_proto = PF_VPROTO_FRAGMENT; break; } #endif #ifdef INET6 case AF_INET6: { struct ip6_hdr *h; if (__predict_false((*m0)->m_len < sizeof(struct ip6_hdr)) && (pd->m = *m0 = m_pullup(*m0, sizeof(struct ip6_hdr))) == NULL) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test6: m_len < sizeof(struct ip6_hdr)" ", pullup failed\n")); *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } h = mtod(pd->m, struct ip6_hdr *); pd->off = 0; if (pf_walk_header6(pd, h, reason) != PF_PASS) { *action = PF_DROP; return (-1); } h = mtod(pd->m, struct ip6_hdr *); pd->src = (struct pf_addr *)&h->ip6_src; pd->dst = (struct pf_addr *)&h->ip6_dst; pd->ip_sum = NULL; pd->tos = IPV6_DSCP(h); pd->ttl = h->ip6_hlim; pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); pd->virtual_proto = pd->proto = h->ip6_nxt; pd->act.rtableid = -1; if (pd->fragoff != 0) pd->virtual_proto = PF_VPROTO_FRAGMENT; /* * we do not support jumbogram. if we keep going, zero ip6_plen * will do something bad, so drop the packet for now. */ if (htons(h->ip6_plen) == 0) { *action = PF_DROP; return (-1); } /* We do IP header normalization and packet reassembly here */ if (pf_normalize_ip6(pd->fragoff, reason, pd) != PF_PASS) { *m0 = pd->m; *action = PF_DROP; return (-1); } *m0 = pd->m; if (pd->m == NULL) { /* packet sits in reassembly queue, no error */ *action = PF_PASS; return (-1); } /* Update pointers into the packet. */ h = mtod(pd->m, struct ip6_hdr *); pd->src = (struct pf_addr *)&h->ip6_src; pd->dst = (struct pf_addr *)&h->ip6_dst; pd->off = 0; if (pf_walk_header6(pd, h, reason) != PF_PASS) { *action = PF_DROP; return (-1); } if (m_tag_find(pd->m, PACKET_TAG_PF_REASSEMBLED, NULL) != NULL) { /* * Reassembly may have changed the next protocol from * fragment to something else, so update. */ pd->virtual_proto = pd->proto; MPASS(pd->fragoff == 0); } if (pd->fragoff != 0) pd->virtual_proto = PF_VPROTO_FRAGMENT; break; } #endif default: panic("pf_setup_pdesc called with illegal af %u", af); } switch (pd->virtual_proto) { case IPPROTO_TCP: { struct tcphdr *th = &pd->hdr.tcp; if (!pf_pull_hdr(pd->m, pd->off, th, sizeof(*th), action, reason, af)) { *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } pd->hdrlen = sizeof(*th); pd->p_len = pd->tot_len - pd->off - (th->th_off << 2); pd->sport = &th->th_sport; pd->dport = &th->th_dport; pd->pcksum = &th->th_sum; break; } case IPPROTO_UDP: { struct udphdr *uh = &pd->hdr.udp; if (!pf_pull_hdr(pd->m, pd->off, uh, sizeof(*uh), action, reason, af)) { *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } pd->hdrlen = sizeof(*uh); if (uh->uh_dport == 0 || ntohs(uh->uh_ulen) > pd->m->m_pkthdr.len - pd->off || ntohs(uh->uh_ulen) < sizeof(struct udphdr)) { *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } pd->sport = &uh->uh_sport; pd->dport = &uh->uh_dport; pd->pcksum = &uh->uh_sum; break; } case IPPROTO_SCTP: { if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.sctp, sizeof(pd->hdr.sctp), action, reason, af)) { *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } pd->hdrlen = sizeof(pd->hdr.sctp); pd->p_len = pd->tot_len - pd->off; pd->sport = &pd->hdr.sctp.src_port; pd->dport = &pd->hdr.sctp.dest_port; if (pd->hdr.sctp.src_port == 0 || pd->hdr.sctp.dest_port == 0) { *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } if (pf_scan_sctp(pd) != PF_PASS) { *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } /* * Placeholder. The SCTP checksum is 32-bits, but * pf_test_state() expects to update a 16-bit checksum. * Provide a dummy value which we'll subsequently ignore. */ pd->pcksum = &pd->sctp_dummy_sum; break; } case IPPROTO_ICMP: { if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp, ICMP_MINLEN, action, reason, af)) { *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } pd->hdrlen = ICMP_MINLEN; break; } #ifdef INET6 case IPPROTO_ICMPV6: { size_t icmp_hlen = sizeof(struct icmp6_hdr); if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp6, icmp_hlen, action, reason, af)) { *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } /* ICMP headers we look further into to match state */ switch (pd->hdr.icmp6.icmp6_type) { case MLD_LISTENER_QUERY: case MLD_LISTENER_REPORT: icmp_hlen = sizeof(struct mld_hdr); break; case ND_NEIGHBOR_SOLICIT: case ND_NEIGHBOR_ADVERT: icmp_hlen = sizeof(struct nd_neighbor_solicit); break; } if (icmp_hlen > sizeof(struct icmp6_hdr) && !pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp6, icmp_hlen, action, reason, af)) { *action = PF_DROP; REASON_SET(reason, PFRES_SHORT); return (-1); } pd->hdrlen = icmp_hlen; pd->pcksum = &pd->hdr.icmp.icmp_cksum; break; } #endif } if (pd->sport) pd->osport = pd->nsport = *pd->sport; if (pd->dport) pd->odport = pd->ndport = *pd->dport; return (0); } static void pf_counters_inc(int action, struct pf_pdesc *pd, struct pf_kstate *s, struct pf_krule *r, struct pf_krule *a) { struct pf_krule *tr; int dir = pd->dir; int dirndx; pf_counter_u64_critical_enter(); pf_counter_u64_add_protected( &pd->kif->pfik_bytes[pd->af == AF_INET6][dir == PF_OUT][action != PF_PASS], pd->tot_len); pf_counter_u64_add_protected( &pd->kif->pfik_packets[pd->af == AF_INET6][dir == PF_OUT][action != PF_PASS], 1); if (action == PF_PASS || action == PF_AFRT || r->action == PF_DROP) { dirndx = (dir == PF_OUT); pf_counter_u64_add_protected(&r->packets[dirndx], 1); pf_counter_u64_add_protected(&r->bytes[dirndx], pd->tot_len); pf_update_timestamp(r); if (a != NULL) { pf_counter_u64_add_protected(&a->packets[dirndx], 1); pf_counter_u64_add_protected(&a->bytes[dirndx], pd->tot_len); } if (s != NULL) { struct pf_krule_item *ri; if (s->nat_rule != NULL) { pf_counter_u64_add_protected(&s->nat_rule->packets[dirndx], 1); pf_counter_u64_add_protected(&s->nat_rule->bytes[dirndx], pd->tot_len); } /* * Source nodes are accessed unlocked here. * But since we are operating with stateful tracking * and the state is locked, those SNs could not have * been freed. */ for (pf_sn_types_t sn_type=0; sn_typesns[sn_type] != NULL) { counter_u64_add( s->sns[sn_type]->packets[dirndx], 1); counter_u64_add( s->sns[sn_type]->bytes[dirndx], pd->tot_len); } } dirndx = (dir == s->direction) ? 0 : 1; s->packets[dirndx]++; s->bytes[dirndx] += pd->tot_len; SLIST_FOREACH(ri, &s->match_rules, entry) { pf_counter_u64_add_protected(&ri->r->packets[dirndx], 1); pf_counter_u64_add_protected(&ri->r->bytes[dirndx], pd->tot_len); if (ri->r->src.addr.type == PF_ADDR_TABLE) pfr_update_stats(ri->r->src.addr.p.tbl, (s == NULL) ? pd->src : &s->key[(s->direction == PF_IN)]-> addr[(s->direction == PF_OUT)], pd->af, pd->tot_len, dir == PF_OUT, r->action == PF_PASS, ri->r->src.neg); if (ri->r->dst.addr.type == PF_ADDR_TABLE) pfr_update_stats(ri->r->dst.addr.p.tbl, (s == NULL) ? pd->dst : &s->key[(s->direction == PF_IN)]-> addr[(s->direction == PF_IN)], pd->af, pd->tot_len, dir == PF_OUT, r->action == PF_PASS, ri->r->dst.neg); } } tr = r; if (s != NULL && s->nat_rule != NULL && r == &V_pf_default_rule) tr = s->nat_rule; if (tr->src.addr.type == PF_ADDR_TABLE) pfr_update_stats(tr->src.addr.p.tbl, (s == NULL) ? pd->src : &s->key[(s->direction == PF_IN)]-> addr[(s->direction == PF_OUT)], pd->af, pd->tot_len, dir == PF_OUT, r->action == PF_PASS, tr->src.neg); if (tr->dst.addr.type == PF_ADDR_TABLE) pfr_update_stats(tr->dst.addr.p.tbl, (s == NULL) ? pd->dst : &s->key[(s->direction == PF_IN)]-> addr[(s->direction == PF_IN)], pd->af, pd->tot_len, dir == PF_OUT, r->action == PF_PASS, tr->dst.neg); } pf_counter_u64_critical_exit(); } static void pf_log_matches(struct pf_pdesc *pd, struct pf_krule *rm, struct pf_krule *am, struct pf_kruleset *ruleset, struct pf_krule_slist *matchrules) { struct pf_krule_item *ri; /* if this is the log(matches) rule, packet has been logged already */ if (rm->log & PF_LOG_MATCHES) return; SLIST_FOREACH(ri, matchrules, entry) if (ri->r->log & PF_LOG_MATCHES) PFLOG_PACKET(rm->action, PFRES_MATCH, rm, am, ruleset, pd, 1, ri->r); } #if defined(INET) || defined(INET6) int pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp, struct pf_rule_actions *default_actions) { struct pfi_kkif *kif; u_short action, reason = 0; struct m_tag *mtag; struct pf_krule *a = NULL, *r = &V_pf_default_rule; struct pf_kstate *s = NULL; struct pf_kruleset *ruleset = NULL; struct pf_pdesc pd; int use_2nd_queue = 0; uint16_t tag; PF_RULES_RLOCK_TRACKER; KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir)); M_ASSERTPKTHDR(*m0); if (!V_pf_status.running) return (PF_PASS); PF_RULES_RLOCK(); kif = (struct pfi_kkif *)ifp->if_pf_kif; if (__predict_false(kif == NULL)) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); PF_RULES_RUNLOCK(); return (PF_DROP); } if (kif->pfik_flags & PFI_IFLAG_SKIP) { PF_RULES_RUNLOCK(); return (PF_PASS); } if ((*m0)->m_flags & M_SKIP_FIREWALL) { PF_RULES_RUNLOCK(); return (PF_PASS); } if (__predict_false(! M_WRITABLE(*m0))) { *m0 = m_unshare(*m0, M_NOWAIT); if (*m0 == NULL) { PF_RULES_RUNLOCK(); return (PF_DROP); } } pf_init_pdesc(&pd, *m0); if (pd.pf_mtag != NULL && (pd.pf_mtag->flags & PF_MTAG_FLAG_ROUTE_TO)) { pd.pf_mtag->flags &= ~PF_MTAG_FLAG_ROUTE_TO; ifp = ifnet_byindexgen(pd.pf_mtag->if_index, pd.pf_mtag->if_idxgen); if (ifp == NULL || ifp->if_flags & IFF_DYING) { PF_RULES_RUNLOCK(); m_freem(*m0); *m0 = NULL; return (PF_PASS); } PF_RULES_RUNLOCK(); (ifp->if_output)(ifp, *m0, sintosa(&pd.pf_mtag->dst), NULL); *m0 = NULL; return (PF_PASS); } if (ip_dn_io_ptr != NULL && pd.pf_mtag != NULL && pd.pf_mtag->flags & PF_MTAG_FLAG_DUMMYNET) { /* Dummynet re-injects packets after they've * completed their delay. We've already * processed them, so pass unconditionally. */ /* But only once. We may see the packet multiple times (e.g. * PFIL_IN/PFIL_OUT). */ pf_dummynet_flag_remove(pd.m, pd.pf_mtag); PF_RULES_RUNLOCK(); return (PF_PASS); } if (pf_setup_pdesc(af, dir, &pd, m0, &action, &reason, kif, default_actions) == -1) { if (action != PF_PASS) pd.act.log |= PF_LOG_FORCE; goto done; } #ifdef INET if (af == AF_INET && dir == PF_OUT && pflags & PFIL_FWD && pd.df && (*m0)->m_pkthdr.len > ifp->if_mtu) { PF_RULES_RUNLOCK(); icmp_error(*m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, ifp->if_mtu); *m0 = NULL; return (PF_DROP); } #endif #ifdef INET6 /* * If we end up changing IP addresses (e.g. binat) the stack may get * confused and fail to send the icmp6 packet too big error. Just send * it here, before we do any NAT. */ if (af == AF_INET6 && dir == PF_OUT && pflags & PFIL_FWD && IN6_LINKMTU(ifp) < pf_max_frag_size(*m0)) { PF_RULES_RUNLOCK(); icmp6_error(*m0, ICMP6_PACKET_TOO_BIG, 0, IN6_LINKMTU(ifp)); *m0 = NULL; return (PF_DROP); } #endif if (__predict_false(ip_divert_ptr != NULL) && ((mtag = m_tag_locate(pd.m, MTAG_PF_DIVERT, 0, NULL)) != NULL)) { struct pf_divert_mtag *dt = (struct pf_divert_mtag *)(mtag+1); if ((dt->idir == PF_DIVERT_MTAG_DIR_IN && dir == PF_IN) || (dt->idir == PF_DIVERT_MTAG_DIR_OUT && dir == PF_OUT)) { if (pd.pf_mtag == NULL && ((pd.pf_mtag = pf_get_mtag(pd.m)) == NULL)) { action = PF_DROP; goto done; } pd.pf_mtag->flags |= PF_MTAG_FLAG_PACKET_LOOPED; } if (pd.pf_mtag && pd.pf_mtag->flags & PF_MTAG_FLAG_FASTFWD_OURS_PRESENT) { pd.m->m_flags |= M_FASTFWD_OURS; pd.pf_mtag->flags &= ~PF_MTAG_FLAG_FASTFWD_OURS_PRESENT; } m_tag_delete(pd.m, mtag); mtag = m_tag_locate(pd.m, MTAG_IPFW_RULE, 0, NULL); if (mtag != NULL) m_tag_delete(pd.m, mtag); } switch (pd.virtual_proto) { case PF_VPROTO_FRAGMENT: /* * handle fragments that aren't reassembled by * normalization */ if (kif == NULL || r == NULL) /* pflog */ action = PF_DROP; else action = pf_test_rule(&r, &s, &pd, &a, &ruleset, inp); if (action != PF_PASS) REASON_SET(&reason, PFRES_FRAG); break; case IPPROTO_TCP: { /* Respond to SYN with a syncookie. */ if ((tcp_get_flags(&pd.hdr.tcp) & (TH_SYN|TH_ACK|TH_RST)) == TH_SYN && pd.dir == PF_IN && pf_synflood_check(&pd)) { pf_syncookie_send(&pd); action = PF_DROP; break; } if ((tcp_get_flags(&pd.hdr.tcp) & TH_ACK) && pd.p_len == 0) use_2nd_queue = 1; action = pf_normalize_tcp(&pd); if (action == PF_DROP) goto done; action = pf_test_state(&s, &pd, &reason); if (action == PF_PASS || action == PF_AFRT) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule; a = s->anchor; } else if (s == NULL) { /* Validate remote SYN|ACK, re-create original SYN if * valid. */ if ((tcp_get_flags(&pd.hdr.tcp) & (TH_SYN|TH_ACK|TH_RST)) == TH_ACK && pf_syncookie_validate(&pd) && pd.dir == PF_IN) { struct mbuf *msyn; msyn = pf_syncookie_recreate_syn(&pd); if (msyn == NULL) { action = PF_DROP; break; } action = pf_test(af, dir, pflags, ifp, &msyn, inp, &pd.act); m_freem(msyn); if (action != PF_PASS) break; action = pf_test_state(&s, &pd, &reason); if (action != PF_PASS || s == NULL) { action = PF_DROP; break; } s->src.seqhi = ntohl(pd.hdr.tcp.th_ack) - 1; s->src.seqlo = ntohl(pd.hdr.tcp.th_seq) - 1; pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_DST); action = pf_synproxy(&pd, &s, &reason); break; } else { action = pf_test_rule(&r, &s, &pd, &a, &ruleset, inp); } } break; } case IPPROTO_SCTP: action = pf_normalize_sctp(&pd); if (action == PF_DROP) goto done; /* fallthrough */ case IPPROTO_UDP: default: action = pf_test_state(&s, &pd, &reason); if (action == PF_PASS || action == PF_AFRT) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule; a = s->anchor; } else if (s == NULL) { action = pf_test_rule(&r, &s, &pd, &a, &ruleset, inp); } break; case IPPROTO_ICMP: case IPPROTO_ICMPV6: { if (pd.virtual_proto == IPPROTO_ICMP && af != AF_INET) { action = PF_DROP; REASON_SET(&reason, PFRES_NORM); DPFPRINTF(PF_DEBUG_MISC, ("dropping IPv6 packet with ICMPv4 payload")); goto done; } if (pd.virtual_proto == IPPROTO_ICMPV6 && af != AF_INET6) { action = PF_DROP; REASON_SET(&reason, PFRES_NORM); DPFPRINTF(PF_DEBUG_MISC, ("pf: dropping IPv4 packet with ICMPv6 payload\n")); goto done; } action = pf_test_state_icmp(&s, &pd, &reason); if (action == PF_PASS || action == PF_AFRT) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule; a = s->anchor; } else if (s == NULL) action = pf_test_rule(&r, &s, &pd, &a, &ruleset, inp); break; } } done: PF_RULES_RUNLOCK(); if (pd.m == NULL) goto eat_pkt; if (action == PF_PASS && pd.badopts && !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { action = PF_DROP; REASON_SET(&reason, PFRES_IPOPTIONS); pd.act.log = PF_LOG_FORCE; DPFPRINTF(PF_DEBUG_MISC, ("pf: dropping packet with dangerous headers\n")); } if (s) { uint8_t log = pd.act.log; memcpy(&pd.act, &s->act, sizeof(struct pf_rule_actions)); pd.act.log |= log; tag = s->tag; } else { tag = r->tag; } if (tag > 0 && pf_tag_packet(&pd, tag)) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); } pf_scrub(&pd); if (pd.proto == IPPROTO_TCP && pd.act.max_mss) pf_normalize_mss(&pd); if (pd.act.rtableid >= 0) M_SETFIB(pd.m, pd.act.rtableid); if (pd.act.flags & PFSTATE_SETPRIO) { if (pd.tos & IPTOS_LOWDELAY) use_2nd_queue = 1; if (vlan_set_pcp(pd.m, pd.act.set_prio[use_2nd_queue])) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); pd.act.log = PF_LOG_FORCE; DPFPRINTF(PF_DEBUG_MISC, ("pf: failed to allocate 802.1q mtag\n")); } } #ifdef ALTQ if (action == PF_PASS && pd.act.qid) { if (pd.pf_mtag == NULL && ((pd.pf_mtag = pf_get_mtag(pd.m)) == NULL)) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); } else { if (s != NULL) pd.pf_mtag->qid_hash = pf_state_hash(s); if (use_2nd_queue || (pd.tos & IPTOS_LOWDELAY)) pd.pf_mtag->qid = pd.act.pqid; else pd.pf_mtag->qid = pd.act.qid; /* Add hints for ecn. */ pd.pf_mtag->hdr = mtod(pd.m, void *); } } #endif /* ALTQ */ /* * connections redirected to loopback should not match sockets * bound specifically to loopback due to security implications, * see tcp_input() and in_pcblookup_listen(). */ if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule != NULL && (s->nat_rule->action == PF_RDR || s->nat_rule->action == PF_BINAT) && pf_is_loopback(af, pd.dst)) pd.m->m_flags |= M_SKIP_FIREWALL; if (af == AF_INET && __predict_false(ip_divert_ptr != NULL) && action == PF_PASS && r->divert.port && !PACKET_LOOPED(&pd)) { mtag = m_tag_alloc(MTAG_PF_DIVERT, 0, sizeof(struct pf_divert_mtag), M_NOWAIT | M_ZERO); if (mtag != NULL) { ((struct pf_divert_mtag *)(mtag+1))->port = ntohs(r->divert.port); ((struct pf_divert_mtag *)(mtag+1))->idir = (dir == PF_IN) ? PF_DIVERT_MTAG_DIR_IN : PF_DIVERT_MTAG_DIR_OUT; if (s) PF_STATE_UNLOCK(s); m_tag_prepend(pd.m, mtag); if (pd.m->m_flags & M_FASTFWD_OURS) { if (pd.pf_mtag == NULL && ((pd.pf_mtag = pf_get_mtag(pd.m)) == NULL)) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); pd.act.log = PF_LOG_FORCE; DPFPRINTF(PF_DEBUG_MISC, ("pf: failed to allocate tag\n")); } else { pd.pf_mtag->flags |= PF_MTAG_FLAG_FASTFWD_OURS_PRESENT; pd.m->m_flags &= ~M_FASTFWD_OURS; } } ip_divert_ptr(*m0, dir == PF_IN); *m0 = NULL; return (action); } else { /* XXX: ipfw has the same behaviour! */ action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); pd.act.log = PF_LOG_FORCE; DPFPRINTF(PF_DEBUG_MISC, ("pf: failed to allocate divert tag\n")); } } /* XXX: Anybody working on it?! */ if (af == AF_INET6 && r->divert.port) printf("pf: divert(9) is not supported for IPv6\n"); /* this flag will need revising if the pkt is forwarded */ if (pd.pf_mtag) pd.pf_mtag->flags &= ~PF_MTAG_FLAG_PACKET_LOOPED; if (pd.act.log) { struct pf_krule *lr; struct pf_krule_item *ri; if (s != NULL && s->nat_rule != NULL && s->nat_rule->log & PF_LOG_ALL) lr = s->nat_rule; else lr = r; if (pd.act.log & PF_LOG_FORCE || lr->log & PF_LOG_ALL) PFLOG_PACKET(action, reason, lr, a, ruleset, &pd, (s == NULL), NULL); if (s) { SLIST_FOREACH(ri, &s->match_rules, entry) if (ri->r->log & PF_LOG_ALL) PFLOG_PACKET(action, reason, ri->r, a, ruleset, &pd, 0, NULL); } } pf_counters_inc(action, &pd, s, r, a); switch (action) { case PF_SYNPROXY_DROP: m_freem(*m0); case PF_DEFER: *m0 = NULL; action = PF_PASS; break; case PF_DROP: m_freem(*m0); *m0 = NULL; break; case PF_AFRT: if (pf_translate_af(&pd)) { if (!pd.m) *m0 = NULL; action = PF_DROP; break; } *m0 = pd.m; /* pf_translate_af may change pd.m */ #ifdef INET if (pd.naf == AF_INET) pf_route(m0, r, kif->pfik_ifp, s, &pd, inp); #endif #ifdef INET6 if (pd.naf == AF_INET6) pf_route6(m0, r, kif->pfik_ifp, s, &pd, inp); #endif *m0 = NULL; action = PF_PASS; goto out; break; default: if (pd.act.rt) { switch (af) { #ifdef INET case AF_INET: /* pf_route() returns unlocked. */ pf_route(m0, r, kif->pfik_ifp, s, &pd, inp); break; #endif #ifdef INET6 case AF_INET6: /* pf_route6() returns unlocked. */ pf_route6(m0, r, kif->pfik_ifp, s, &pd, inp); break; #endif } goto out; } if (pf_dummynet(&pd, s, r, m0) != 0) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); } break; } eat_pkt: SDT_PROBE4(pf, ip, test, done, action, reason, r, s); if (s && action != PF_DROP) { if (!s->if_index_in && dir == PF_IN) s->if_index_in = ifp->if_index; else if (!s->if_index_out && dir == PF_OUT) s->if_index_out = ifp->if_index; } if (s) PF_STATE_UNLOCK(s); out: #ifdef INET6 /* If reassembled packet passed, create new fragments. */ if (af == AF_INET6 && action == PF_PASS && *m0 && dir == PF_OUT && (! (pflags & PF_PFIL_NOREFRAGMENT)) && (mtag = m_tag_find(pd.m, PACKET_TAG_PF_REASSEMBLED, NULL)) != NULL) action = pf_refragment6(ifp, m0, mtag, NULL, pflags & PFIL_FWD); #endif pf_sctp_multihome_delayed(&pd, kif, s, action); return (action); } #endif /* INET || INET6 */ diff --git a/sys/netpfil/pf/pf_norm.c b/sys/netpfil/pf/pf_norm.c index 98539be8c6ce..fd72fec62a3b 100644 --- a/sys/netpfil/pf/pf_norm.c +++ b/sys/netpfil/pf/pf_norm.c @@ -1,2262 +1,2262 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2001 Niels Provos * Copyright 2011-2018 Alexander Bluhm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $ */ #include #include "opt_inet.h" #include "opt_inet6.h" #include "opt_pf.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #endif /* INET6 */ struct pf_frent { TAILQ_ENTRY(pf_frent) fr_next; struct mbuf *fe_m; uint16_t fe_hdrlen; /* ipv4 header length with ip options ipv6, extension, fragment header */ uint16_t fe_extoff; /* last extension header offset or 0 */ uint16_t fe_len; /* fragment length */ uint16_t fe_off; /* fragment offset */ uint16_t fe_mff; /* more fragment flag */ }; struct pf_fragment_cmp { struct pf_addr frc_src; struct pf_addr frc_dst; uint32_t frc_id; sa_family_t frc_af; uint8_t frc_proto; }; struct pf_fragment { struct pf_fragment_cmp fr_key; #define fr_src fr_key.frc_src #define fr_dst fr_key.frc_dst #define fr_id fr_key.frc_id #define fr_af fr_key.frc_af #define fr_proto fr_key.frc_proto /* pointers to queue element */ struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS]; /* count entries between pointers */ uint8_t fr_entries[PF_FRAG_ENTRY_POINTS]; RB_ENTRY(pf_fragment) fr_entry; TAILQ_ENTRY(pf_fragment) frag_next; uint32_t fr_timeout; TAILQ_HEAD(pf_fragq, pf_frent) fr_queue; uint16_t fr_maxlen; /* maximum length of single fragment */ u_int16_t fr_holes; /* number of holes in the queue */ }; VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx); #define V_pf_frag_mtx VNET(pf_frag_mtx) #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx) #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx) #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED) VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z); #define V_pf_frent_z VNET(pf_frent_z) VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z); #define V_pf_frag_z VNET(pf_frag_z) TAILQ_HEAD(pf_fragqueue, pf_fragment); TAILQ_HEAD(pf_cachequeue, pf_fragment); VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue); #define V_pf_fragqueue VNET(pf_fragqueue) RB_HEAD(pf_frag_tree, pf_fragment); VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree); #define V_pf_frag_tree VNET(pf_frag_tree) static int pf_frag_compare(struct pf_fragment *, struct pf_fragment *); static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); static void pf_flush_fragments(void); static void pf_free_fragment(struct pf_fragment *); static struct pf_frent *pf_create_fragment(u_short *); static int pf_frent_holes(struct pf_frent *frent); static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree); static inline int pf_frent_index(struct pf_frent *); static int pf_frent_insert(struct pf_fragment *, struct pf_frent *, struct pf_frent *); void pf_frent_remove(struct pf_fragment *, struct pf_frent *); struct pf_frent *pf_frent_previous(struct pf_fragment *, struct pf_frent *); static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *, struct pf_frent *, u_short *); static struct mbuf *pf_join_fragment(struct pf_fragment *); #ifdef INET static int pf_reassemble(struct mbuf **, int, u_short *); #endif /* INET */ #ifdef INET6 static int pf_reassemble6(struct mbuf **, struct ip6_frag *, uint16_t, uint16_t, u_short *); #endif /* INET6 */ #define DPFPRINTF(x) do { \ if (V_pf_status.debug >= PF_DEBUG_MISC) { \ printf("%s: ", __func__); \ printf x ; \ } \ } while(0) #ifdef INET static void pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key) { key->frc_src.v4 = ip->ip_src; key->frc_dst.v4 = ip->ip_dst; key->frc_af = AF_INET; key->frc_proto = ip->ip_p; key->frc_id = ip->ip_id; } #endif /* INET */ void pf_normalize_init(void) { V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); V_pf_state_scrub_z = uma_zcreate("pf state scrubs", sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF); V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z; V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); TAILQ_INIT(&V_pf_fragqueue); } void pf_normalize_cleanup(void) { uma_zdestroy(V_pf_state_scrub_z); uma_zdestroy(V_pf_frent_z); uma_zdestroy(V_pf_frag_z); mtx_destroy(&V_pf_frag_mtx); } static int pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) { int diff; if ((diff = a->fr_id - b->fr_id) != 0) return (diff); if ((diff = a->fr_proto - b->fr_proto) != 0) return (diff); if ((diff = a->fr_af - b->fr_af) != 0) return (diff); if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0) return (diff); if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0) return (diff); return (0); } void pf_purge_expired_fragments(void) { u_int32_t expire = time_uptime - V_pf_default_rule.timeout[PFTM_FRAG]; pf_purge_fragments(expire); } void pf_purge_fragments(uint32_t expire) { struct pf_fragment *frag; PF_FRAG_LOCK(); while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) { if (frag->fr_timeout > expire) break; DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); pf_free_fragment(frag); } PF_FRAG_UNLOCK(); } /* * Try to flush old fragments to make space for new ones */ static void pf_flush_fragments(void) { struct pf_fragment *frag; int goal; PF_FRAG_ASSERT(); goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10; DPFPRINTF(("trying to free %d frag entriess\n", goal)); while (goal < uma_zone_get_cur(V_pf_frent_z)) { frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue); if (frag) pf_free_fragment(frag); else break; } } /* * Remove a fragment from the fragment queue, free its fragment entries, * and free the fragment itself. */ static void pf_free_fragment(struct pf_fragment *frag) { struct pf_frent *frent; PF_FRAG_ASSERT(); RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); /* Free all fragment entries */ while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) { TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); m_freem(frent->fe_m); uma_zfree(V_pf_frent_z, frent); } uma_zfree(V_pf_frag_z, frag); } static struct pf_fragment * pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree) { struct pf_fragment *frag; PF_FRAG_ASSERT(); frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key); if (frag != NULL) { TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); } return (frag); } static struct pf_frent * pf_create_fragment(u_short *reason) { struct pf_frent *frent; PF_FRAG_ASSERT(); frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); if (frent == NULL) { pf_flush_fragments(); frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); if (frent == NULL) { REASON_SET(reason, PFRES_MEMORY); return (NULL); } } return (frent); } /* * Calculate the additional holes that were created in the fragment * queue by inserting this fragment. A fragment in the middle * creates one more hole by splitting. For each connected side, * it loses one hole. * Fragment entry must be in the queue when calling this function. */ static int pf_frent_holes(struct pf_frent *frent) { struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next); struct pf_frent *next = TAILQ_NEXT(frent, fr_next); int holes = 1; if (prev == NULL) { if (frent->fe_off == 0) holes--; } else { KASSERT(frent->fe_off != 0, ("frent->fe_off != 0")); if (frent->fe_off == prev->fe_off + prev->fe_len) holes--; } if (next == NULL) { if (!frent->fe_mff) holes--; } else { KASSERT(frent->fe_mff, ("frent->fe_mff")); if (next->fe_off == frent->fe_off + frent->fe_len) holes--; } return holes; } static inline int pf_frent_index(struct pf_frent *frent) { /* * We have an array of 16 entry points to the queue. A full size * 65535 octet IP packet can have 8192 fragments. So the queue * traversal length is at most 512 and at most 16 entry points are * checked. We need 128 additional bytes on a 64 bit architecture. */ CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) == 16 - 1); CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1); return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS); } static int pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent, struct pf_frent *prev) { int index; CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff); /* * A packet has at most 65536 octets. With 16 entry points, each one * spawns 4096 octets. We limit these to 64 fragments each, which * means on average every fragment must have at least 64 octets. */ index = pf_frent_index(frent); if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT) return ENOBUFS; frag->fr_entries[index]++; if (prev == NULL) { TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); } else { KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, ("overlapping fragment")); TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next); } if (frag->fr_firstoff[index] == NULL) { KASSERT(prev == NULL || pf_frent_index(prev) < index, ("prev == NULL || pf_frent_index(pref) < index")); frag->fr_firstoff[index] = frent; } else { if (frent->fe_off < frag->fr_firstoff[index]->fe_off) { KASSERT(prev == NULL || pf_frent_index(prev) < index, ("prev == NULL || pf_frent_index(pref) < index")); frag->fr_firstoff[index] = frent; } else { KASSERT(prev != NULL, ("prev != NULL")); KASSERT(pf_frent_index(prev) == index, ("pf_frent_index(prev) == index")); } } frag->fr_holes += pf_frent_holes(frent); return 0; } void pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent) { #ifdef INVARIANTS struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next); #endif struct pf_frent *next = TAILQ_NEXT(frent, fr_next); int index; frag->fr_holes -= pf_frent_holes(frent); index = pf_frent_index(frent); KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found")); if (frag->fr_firstoff[index]->fe_off == frent->fe_off) { if (next == NULL) { frag->fr_firstoff[index] = NULL; } else { KASSERT(frent->fe_off + frent->fe_len <= next->fe_off, ("overlapping fragment")); if (pf_frent_index(next) == index) { frag->fr_firstoff[index] = next; } else { frag->fr_firstoff[index] = NULL; } } } else { KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off, ("frag->fr_firstoff[index]->fe_off < frent->fe_off")); KASSERT(prev != NULL, ("prev != NULL")); KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, ("overlapping fragment")); KASSERT(pf_frent_index(prev) == index, ("pf_frent_index(prev) == index")); } TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining")); frag->fr_entries[index]--; } struct pf_frent * pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent) { struct pf_frent *prev, *next; int index; /* * If there are no fragments after frag, take the final one. Assume * that the global queue is not empty. */ prev = TAILQ_LAST(&frag->fr_queue, pf_fragq); KASSERT(prev != NULL, ("prev != NULL")); if (prev->fe_off <= frent->fe_off) return prev; /* * We want to find a fragment entry that is before frag, but still * close to it. Find the first fragment entry that is in the same * entry point or in the first entry point after that. As we have * already checked that there are entries behind frag, this will * succeed. */ for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS; index++) { prev = frag->fr_firstoff[index]; if (prev != NULL) break; } KASSERT(prev != NULL, ("prev != NULL")); /* * In prev we may have a fragment from the same entry point that is * before frent, or one that is just one position behind frent. * In the latter case, we go back one step and have the predecessor. * There may be none if the new fragment will be the first one. */ if (prev->fe_off > frent->fe_off) { prev = TAILQ_PREV(prev, pf_fragq, fr_next); if (prev == NULL) return NULL; KASSERT(prev->fe_off <= frent->fe_off, ("prev->fe_off <= frent->fe_off")); return prev; } /* * In prev is the first fragment of the entry point. The offset * of frag is behind it. Find the closest previous fragment. */ for (next = TAILQ_NEXT(prev, fr_next); next != NULL; next = TAILQ_NEXT(next, fr_next)) { if (next->fe_off > frent->fe_off) break; prev = next; } return prev; } static struct pf_fragment * pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent, u_short *reason) { struct pf_frent *after, *next, *prev; struct pf_fragment *frag; uint16_t total; PF_FRAG_ASSERT(); /* No empty fragments. */ if (frent->fe_len == 0) { DPFPRINTF(("bad fragment: len 0\n")); goto bad_fragment; } /* All fragments are 8 byte aligned. */ if (frent->fe_mff && (frent->fe_len & 0x7)) { DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len)); goto bad_fragment; } /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */ if (frent->fe_off + frent->fe_len > IP_MAXPACKET) { DPFPRINTF(("bad fragment: max packet %d\n", frent->fe_off + frent->fe_len)); goto bad_fragment; } DPFPRINTF((key->frc_af == AF_INET ? "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n", key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len)); /* Fully buffer all of the fragments in this fragment queue. */ frag = pf_find_fragment(key, &V_pf_frag_tree); /* Create a new reassembly queue for this packet. */ if (frag == NULL) { frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); if (frag == NULL) { pf_flush_fragments(); frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); if (frag == NULL) { REASON_SET(reason, PFRES_MEMORY); goto drop_fragment; } } *(struct pf_fragment_cmp *)frag = *key; memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff)); memset(frag->fr_entries, 0, sizeof(frag->fr_entries)); frag->fr_timeout = time_uptime; TAILQ_INIT(&frag->fr_queue); frag->fr_maxlen = frent->fe_len; frag->fr_holes = 1; RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag); TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); /* We do not have a previous fragment, cannot fail. */ pf_frent_insert(frag, frent, NULL); return (frag); } KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue")); /* Remember maximum fragment len for refragmentation. */ if (frent->fe_len > frag->fr_maxlen) frag->fr_maxlen = frent->fe_len; /* Maximum data we have seen already. */ total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; /* Non terminal fragments must have more fragments flag. */ if (frent->fe_off + frent->fe_len < total && !frent->fe_mff) goto bad_fragment; /* Check if we saw the last fragment already. */ if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) { if (frent->fe_off + frent->fe_len > total || (frent->fe_off + frent->fe_len == total && frent->fe_mff)) goto bad_fragment; } else { if (frent->fe_off + frent->fe_len == total && !frent->fe_mff) goto bad_fragment; } /* Find neighbors for newly inserted fragment */ prev = pf_frent_previous(frag, frent); if (prev == NULL) { after = TAILQ_FIRST(&frag->fr_queue); KASSERT(after != NULL, ("after != NULL")); } else { after = TAILQ_NEXT(prev, fr_next); } if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) { uint16_t precut; if (frag->fr_af == AF_INET6) goto free_fragment; precut = prev->fe_off + prev->fe_len - frent->fe_off; if (precut >= frent->fe_len) { DPFPRINTF(("new frag overlapped\n")); goto drop_fragment; } DPFPRINTF(("frag head overlap %d\n", precut)); m_adj(frent->fe_m, precut); frent->fe_off += precut; frent->fe_len -= precut; } for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off; after = next) { uint16_t aftercut; aftercut = frent->fe_off + frent->fe_len - after->fe_off; if (aftercut < after->fe_len) { DPFPRINTF(("frag tail overlap %d", aftercut)); m_adj(after->fe_m, aftercut); /* Fragment may switch queue as fe_off changes */ pf_frent_remove(frag, after); after->fe_off += aftercut; after->fe_len -= aftercut; /* Insert into correct queue */ if (pf_frent_insert(frag, after, prev)) { DPFPRINTF(("fragment requeue limit exceeded")); m_freem(after->fe_m); uma_zfree(V_pf_frent_z, after); /* There is not way to recover */ goto free_fragment; } break; } /* This fragment is completely overlapped, lose it. */ DPFPRINTF(("old frag overlapped\n")); next = TAILQ_NEXT(after, fr_next); pf_frent_remove(frag, after); m_freem(after->fe_m); uma_zfree(V_pf_frent_z, after); } /* If part of the queue gets too long, there is not way to recover. */ if (pf_frent_insert(frag, frent, prev)) { DPFPRINTF(("fragment queue limit exceeded\n")); goto bad_fragment; } return (frag); free_fragment: /* * RFC 5722, Errata 3089: When reassembling an IPv6 datagram, if one * or more its constituent fragments is determined to be an overlapping * fragment, the entire datagram (and any constituent fragments) MUST * be silently discarded. */ DPFPRINTF(("flush overlapping fragments\n")); pf_free_fragment(frag); bad_fragment: REASON_SET(reason, PFRES_FRAG); drop_fragment: uma_zfree(V_pf_frent_z, frent); return (NULL); } static struct mbuf * pf_join_fragment(struct pf_fragment *frag) { struct mbuf *m, *m2; struct pf_frent *frent; frent = TAILQ_FIRST(&frag->fr_queue); TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); m = frent->fe_m; if ((frent->fe_hdrlen + frent->fe_len) < m->m_pkthdr.len) m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len); uma_zfree(V_pf_frent_z, frent); while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) { TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); m2 = frent->fe_m; /* Strip off ip header. */ m_adj(m2, frent->fe_hdrlen); /* Strip off any trailing bytes. */ if (frent->fe_len < m2->m_pkthdr.len) m_adj(m2, frent->fe_len - m2->m_pkthdr.len); uma_zfree(V_pf_frent_z, frent); m_cat(m, m2); } /* Remove from fragment queue. */ pf_free_fragment(frag); return (m); } #ifdef INET static int pf_reassemble(struct mbuf **m0, int dir, u_short *reason) { struct mbuf *m = *m0; struct ip *ip = mtod(m, struct ip *); struct pf_frent *frent; struct pf_fragment *frag; struct m_tag *mtag; struct pf_fragment_tag *ftag; struct pf_fragment_cmp key; uint16_t total, hdrlen; uint32_t frag_id; uint16_t maxlen; /* Get an entry for the fragment queue */ if ((frent = pf_create_fragment(reason)) == NULL) return (PF_DROP); frent->fe_m = m; frent->fe_hdrlen = ip->ip_hl << 2; frent->fe_extoff = 0; frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2); frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; frent->fe_mff = ntohs(ip->ip_off) & IP_MF; pf_ip2key(ip, dir, &key); if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) return (PF_DROP); /* The mbuf is part of the fragment entry, no direct free or access */ m = *m0 = NULL; if (frag->fr_holes) { DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes)); return (PF_PASS); /* drop because *m0 is NULL, no error */ } /* We have all the data */ frent = TAILQ_FIRST(&frag->fr_queue); KASSERT(frent != NULL, ("frent != NULL")); total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; hdrlen = frent->fe_hdrlen; maxlen = frag->fr_maxlen; frag_id = frag->fr_id; m = *m0 = pf_join_fragment(frag); frag = NULL; if (m->m_flags & M_PKTHDR) { int plen = 0; for (m = *m0; m; m = m->m_next) plen += m->m_len; m = *m0; m->m_pkthdr.len = plen; } if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED, sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL) { REASON_SET(reason, PFRES_SHORT); /* PF_DROP requires a valid mbuf *m0 in pf_test() */ return (PF_DROP); } ftag = (struct pf_fragment_tag *)(mtag + 1); ftag->ft_hdrlen = hdrlen; ftag->ft_extoff = 0; ftag->ft_maxlen = maxlen; ftag->ft_id = frag_id; m_tag_prepend(m, mtag); ip = mtod(m, struct ip *); ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len, htons(hdrlen + total), 0); ip->ip_len = htons(hdrlen + total); ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off, ip->ip_off & ~(IP_MF|IP_OFFMASK), 0); ip->ip_off &= ~(IP_MF|IP_OFFMASK); if (hdrlen + total > IP_MAXPACKET) { DPFPRINTF(("drop: too big: %d\n", total)); ip->ip_len = 0; REASON_SET(reason, PFRES_SHORT); /* PF_DROP requires a valid mbuf *m0 in pf_test() */ return (PF_DROP); } DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); return (PF_PASS); } #endif /* INET */ #ifdef INET6 static int pf_reassemble6(struct mbuf **m0, struct ip6_frag *fraghdr, uint16_t hdrlen, uint16_t extoff, u_short *reason) { struct mbuf *m = *m0; struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); struct pf_frent *frent; struct pf_fragment *frag; struct pf_fragment_cmp key; struct m_tag *mtag; struct pf_fragment_tag *ftag; int off; uint32_t frag_id; uint16_t total, maxlen; uint8_t proto; PF_FRAG_LOCK(); /* Get an entry for the fragment queue. */ if ((frent = pf_create_fragment(reason)) == NULL) { PF_FRAG_UNLOCK(); return (PF_DROP); } frent->fe_m = m; frent->fe_hdrlen = hdrlen; frent->fe_extoff = extoff; frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen; frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK); frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG; key.frc_src.v6 = ip6->ip6_src; key.frc_dst.v6 = ip6->ip6_dst; key.frc_af = AF_INET6; /* Only the first fragment's protocol is relevant. */ key.frc_proto = 0; key.frc_id = fraghdr->ip6f_ident; if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) { PF_FRAG_UNLOCK(); return (PF_DROP); } /* The mbuf is part of the fragment entry, no direct free or access. */ m = *m0 = NULL; if (frag->fr_holes) { DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes)); PF_FRAG_UNLOCK(); return (PF_PASS); /* Drop because *m0 is NULL, no error. */ } /* We have all the data. */ frent = TAILQ_FIRST(&frag->fr_queue); KASSERT(frent != NULL, ("frent != NULL")); extoff = frent->fe_extoff; maxlen = frag->fr_maxlen; frag_id = frag->fr_id; total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag); m = *m0 = pf_join_fragment(frag); frag = NULL; PF_FRAG_UNLOCK(); /* Take protocol from first fragment header. */ m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off); KASSERT(m, ("%s: short mbuf chain", __func__)); proto = *(mtod(m, uint8_t *) + off); m = *m0; /* Delete frag6 header */ if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0) goto fail; if (m->m_flags & M_PKTHDR) { int plen = 0; for (m = *m0; m; m = m->m_next) plen += m->m_len; m = *m0; m->m_pkthdr.len = plen; } if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED, sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL) goto fail; ftag = (struct pf_fragment_tag *)(mtag + 1); ftag->ft_hdrlen = hdrlen; ftag->ft_extoff = extoff; ftag->ft_maxlen = maxlen; ftag->ft_id = frag_id; m_tag_prepend(m, mtag); ip6 = mtod(m, struct ip6_hdr *); ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total); if (extoff) { /* Write protocol into next field of last extension header. */ m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), &off); KASSERT(m, ("%s: short mbuf chain", __func__)); *(mtod(m, char *) + off) = proto; m = *m0; } else ip6->ip6_nxt = proto; if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) { DPFPRINTF(("drop: too big: %d\n", total)); ip6->ip6_plen = 0; REASON_SET(reason, PFRES_SHORT); /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */ return (PF_DROP); } DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen))); return (PF_PASS); fail: REASON_SET(reason, PFRES_MEMORY); /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */ return (PF_DROP); } #endif /* INET6 */ #ifdef INET6 int pf_max_frag_size(struct mbuf *m) { struct m_tag *tag; struct pf_fragment_tag *ftag; tag = m_tag_find(m, PACKET_TAG_PF_REASSEMBLED, NULL); if (tag == NULL) return (m->m_pkthdr.len); ftag = (struct pf_fragment_tag *)(tag + 1); return (ftag->ft_maxlen); } int pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag, struct ifnet *rt, bool forward) { struct mbuf *m = *m0, *t; struct ip6_hdr *hdr; struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1); struct pf_pdesc pd; uint32_t frag_id; uint16_t hdrlen, extoff, maxlen; uint8_t proto; int error, action; hdrlen = ftag->ft_hdrlen; extoff = ftag->ft_extoff; maxlen = ftag->ft_maxlen; frag_id = ftag->ft_id; m_tag_delete(m, mtag); mtag = NULL; ftag = NULL; if (extoff) { int off; /* Use protocol from next field of last extension header */ m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), &off); KASSERT((m != NULL), ("pf_refragment6: short mbuf chain")); proto = *(mtod(m, uint8_t *) + off); *(mtod(m, char *) + off) = IPPROTO_FRAGMENT; m = *m0; } else { hdr = mtod(m, struct ip6_hdr *); proto = hdr->ip6_nxt; hdr->ip6_nxt = IPPROTO_FRAGMENT; } /* In case of link-local traffic we'll need a scope set. */ hdr = mtod(m, struct ip6_hdr *); in6_setscope(&hdr->ip6_src, ifp, NULL); in6_setscope(&hdr->ip6_dst, ifp, NULL); /* The MTU must be a multiple of 8 bytes, or we risk doing the * fragmentation wrong. */ maxlen = maxlen & ~7; /* * Maxlen may be less than 8 if there was only a single * fragment. As it was fragmented before, add a fragment * header also for a single fragment. If total or maxlen * is less than 8, ip6_fragment() will return EMSGSIZE and * we drop the packet. */ error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id); m = (*m0)->m_nextpkt; (*m0)->m_nextpkt = NULL; if (error == 0) { /* The first mbuf contains the unfragmented packet. */ m_freem(*m0); *m0 = NULL; action = PF_PASS; } else { /* Drop expects an mbuf to free. */ DPFPRINTF(("refragment error %d\n", error)); action = PF_DROP; } for (; m; m = t) { t = m->m_nextpkt; m->m_nextpkt = NULL; m->m_flags |= M_SKIP_FIREWALL; memset(&pd, 0, sizeof(pd)); pd.pf_mtag = pf_find_mtag(m); if (error != 0) { m_freem(m); continue; } if (rt != NULL) { struct sockaddr_in6 dst; hdr = mtod(m, struct ip6_hdr *); bzero(&dst, sizeof(dst)); dst.sin6_family = AF_INET6; dst.sin6_len = sizeof(dst); dst.sin6_addr = hdr->ip6_dst; nd6_output_ifp(rt, rt, m, &dst, NULL); } else if (forward) { MPASS(m->m_pkthdr.rcvif != NULL); ip6_forward(m, 0); } else { (void)ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); } } return (action); } #endif /* INET6 */ #ifdef INET int pf_normalize_ip(u_short *reason, struct pf_pdesc *pd) { struct pf_krule *r; struct ip *h = mtod(pd->m, struct ip *); int mff = (ntohs(h->ip_off) & IP_MF); int hlen = h->ip_hl << 2; u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; u_int16_t max; int ip_len; int tag = -1; int verdict; bool scrub_compat; PF_RULES_RASSERT(); r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); /* * Check if there are any scrub rules, matching or not. * Lack of scrub rules means: * - enforced packet normalization operation just like in OpenBSD * - fragment reassembly depends on V_pf_status.reass * With scrub rules: * - packet normalization is performed if there is a matching scrub rule * - fragment reassembly is performed if the matching rule has no * PFRULE_FRAGMENT_NOREASS flag */ scrub_compat = (r != NULL); while (r != NULL) { pf_counter_u64_add(&r->evaluations, 1); if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) r = r->skip[PF_SKIP_IFP]; else if (r->direction && r->direction != pd->dir) r = r->skip[PF_SKIP_DIR]; else if (r->af && r->af != AF_INET) r = r->skip[PF_SKIP_AF]; else if (r->proto && r->proto != h->ip_p) r = r->skip[PF_SKIP_PROTO]; else if (PF_MISMATCHAW(&r->src.addr, (struct pf_addr *)&h->ip_src.s_addr, AF_INET, r->src.neg, pd->kif, M_GETFIB(pd->m))) r = r->skip[PF_SKIP_SRC_ADDR]; else if (PF_MISMATCHAW(&r->dst.addr, (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, r->dst.neg, NULL, M_GETFIB(pd->m))) r = r->skip[PF_SKIP_DST_ADDR]; else if (r->match_tag && !pf_match_tag(pd->m, r, &tag, pd->pf_mtag ? pd->pf_mtag->tag : 0)) r = TAILQ_NEXT(r, entries); else break; } if (scrub_compat) { /* With scrub rules present IPv4 normalization happens only * if one of rules has matched and it's not a "no scrub" rule */ if (r == NULL || r->action == PF_NOSCRUB) return (PF_PASS); pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); pf_counter_u64_critical_exit(); pf_rule_to_actions(r, &pd->act); } /* Check for illegal packets */ if (hlen < (int)sizeof(struct ip)) { REASON_SET(reason, PFRES_NORM); goto drop; } if (hlen > ntohs(h->ip_len)) { REASON_SET(reason, PFRES_NORM); goto drop; } /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */ if (((!scrub_compat && V_pf_status.reass & PF_REASS_NODF) || (r != NULL && r->rule_flag & PFRULE_NODF)) && (h->ip_off & htons(IP_DF)) ) { u_int16_t ip_off = h->ip_off; h->ip_off &= htons(~IP_DF); h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); } /* We will need other tests here */ if (!fragoff && !mff) goto no_fragment; /* We're dealing with a fragment now. Don't allow fragments * with IP_DF to enter the cache. If the flag was cleared by * no-df above, fine. Otherwise drop it. */ if (h->ip_off & htons(IP_DF)) { DPFPRINTF(("IP_DF\n")); goto bad; } ip_len = ntohs(h->ip_len) - hlen; /* All fragments are 8 byte aligned */ if (mff && (ip_len & 0x7)) { DPFPRINTF(("mff and %d\n", ip_len)); goto bad; } /* Respect maximum length */ if (fragoff + ip_len > IP_MAXPACKET) { DPFPRINTF(("max packet %d\n", fragoff + ip_len)); goto bad; } if ((!scrub_compat && V_pf_status.reass) || (r != NULL && !(r->rule_flag & PFRULE_FRAGMENT_NOREASS)) ) { max = fragoff + ip_len; /* Fully buffer all of the fragments * Might return a completely reassembled mbuf, or NULL */ PF_FRAG_LOCK(); DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); verdict = pf_reassemble(&pd->m, pd->dir, reason); PF_FRAG_UNLOCK(); if (verdict != PF_PASS) return (PF_DROP); if (pd->m == NULL) return (PF_DROP); h = mtod(pd->m, struct ip *); pd->tot_len = htons(h->ip_len); no_fragment: /* At this point, only IP_DF is allowed in ip_off */ if (h->ip_off & ~htons(IP_DF)) { u_int16_t ip_off = h->ip_off; h->ip_off &= htons(IP_DF); h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); } } return (PF_PASS); bad: DPFPRINTF(("dropping bad fragment\n")); REASON_SET(reason, PFRES_FRAG); drop: if (r != NULL && r->log) PFLOG_PACKET(PF_DROP, *reason, r, NULL, NULL, pd, 1, NULL); return (PF_DROP); } #endif #ifdef INET6 int pf_normalize_ip6(int off, u_short *reason, struct pf_pdesc *pd) { struct pf_krule *r; struct ip6_hdr *h; struct ip6_frag frag; bool scrub_compat; PF_RULES_RASSERT(); r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); /* * Check if there are any scrub rules, matching or not. * Lack of scrub rules means: * - enforced packet normalization operation just like in OpenBSD * With scrub rules: * - packet normalization is performed if there is a matching scrub rule * XXX: Fragment reassembly always performed for IPv6! */ scrub_compat = (r != NULL); while (r != NULL) { pf_counter_u64_add(&r->evaluations, 1); if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) r = r->skip[PF_SKIP_IFP]; else if (r->direction && r->direction != pd->dir) r = r->skip[PF_SKIP_DIR]; else if (r->af && r->af != AF_INET6) r = r->skip[PF_SKIP_AF]; else if (r->proto && r->proto != pd->proto) r = r->skip[PF_SKIP_PROTO]; else if (PF_MISMATCHAW(&r->src.addr, (struct pf_addr *)&pd->src, AF_INET6, r->src.neg, pd->kif, M_GETFIB(pd->m))) r = r->skip[PF_SKIP_SRC_ADDR]; else if (PF_MISMATCHAW(&r->dst.addr, (struct pf_addr *)&pd->dst, AF_INET6, r->dst.neg, NULL, M_GETFIB(pd->m))) r = r->skip[PF_SKIP_DST_ADDR]; else break; } if (scrub_compat) { /* With scrub rules present IPv6 normalization happens only * if one of rules has matched and it's not a "no scrub" rule */ if (r == NULL || r->action == PF_NOSCRUB) return (PF_PASS); pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); pf_counter_u64_critical_exit(); pf_rule_to_actions(r, &pd->act); } if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), NULL, reason, AF_INET6)) return (PF_DROP); /* Offset now points to data portion. */ off += sizeof(frag); if (pd->virtual_proto == PF_VPROTO_FRAGMENT) { /* Returns PF_DROP or *m0 is NULL or completely reassembled * mbuf. */ if (pf_reassemble6(&pd->m, &frag, off, pd->extoff, reason) != PF_PASS) return (PF_DROP); if (pd->m == NULL) return (PF_DROP); h = mtod(pd->m, struct ip6_hdr *); pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); } return (PF_PASS); } #endif /* INET6 */ int pf_normalize_tcp(struct pf_pdesc *pd) { struct pf_krule *r, *rm = NULL; struct tcphdr *th = &pd->hdr.tcp; int rewrite = 0; u_short reason; u_int16_t flags; sa_family_t af = pd->af; int srs; PF_RULES_RASSERT(); r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); /* Check if there any scrub rules. Lack of scrub rules means enforced * packet normalization operation just like in OpenBSD. */ srs = (r != NULL); while (r != NULL) { pf_counter_u64_add(&r->evaluations, 1); if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) r = r->skip[PF_SKIP_IFP]; else if (r->direction && r->direction != pd->dir) r = r->skip[PF_SKIP_DIR]; else if (r->af && r->af != af) r = r->skip[PF_SKIP_AF]; else if (r->proto && r->proto != pd->proto) r = r->skip[PF_SKIP_PROTO]; else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.neg, pd->kif, M_GETFIB(pd->m))) r = r->skip[PF_SKIP_SRC_ADDR]; else if (r->src.port_op && !pf_match_port(r->src.port_op, r->src.port[0], r->src.port[1], th->th_sport)) r = r->skip[PF_SKIP_SRC_PORT]; else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.neg, NULL, M_GETFIB(pd->m))) r = r->skip[PF_SKIP_DST_ADDR]; else if (r->dst.port_op && !pf_match_port(r->dst.port_op, r->dst.port[0], r->dst.port[1], th->th_dport)) r = r->skip[PF_SKIP_DST_PORT]; else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( pf_osfp_fingerprint(pd, th), r->os_fingerprint)) r = TAILQ_NEXT(r, entries); else { rm = r; break; } } if (srs) { /* With scrub rules present TCP normalization happens only * if one of rules has matched and it's not a "no scrub" rule */ if (rm == NULL || rm->action == PF_NOSCRUB) return (PF_PASS); pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); pf_counter_u64_critical_exit(); pf_rule_to_actions(rm, &pd->act); } if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP) pd->flags |= PFDESC_TCP_NORM; flags = tcp_get_flags(th); if (flags & TH_SYN) { /* Illegal packet */ if (flags & TH_RST) goto tcp_drop; if (flags & TH_FIN) goto tcp_drop; } else { /* Illegal packet */ if (!(flags & (TH_ACK|TH_RST))) goto tcp_drop; } if (!(flags & TH_ACK)) { /* These flags are only valid if ACK is set */ if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) goto tcp_drop; } /* Check for illegal header length */ if (th->th_off < (sizeof(struct tcphdr) >> 2)) goto tcp_drop; /* If flags changed, or reserved data set, then adjust */ if (flags != tcp_get_flags(th) || (tcp_get_flags(th) & (TH_RES1|TH_RES2|TH_RES2)) != 0) { u_int16_t ov, nv; ov = *(u_int16_t *)(&th->th_ack + 1); flags &= ~(TH_RES1 | TH_RES2 | TH_RES3); tcp_set_flags(th, flags); nv = *(u_int16_t *)(&th->th_ack + 1); th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, ov, nv, 0); rewrite = 1; } /* Remove urgent pointer, if TH_URG is not set */ if (!(flags & TH_URG) && th->th_urp) { th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, th->th_urp, 0, 0); th->th_urp = 0; rewrite = 1; } /* copy back packet headers if we sanitized */ if (rewrite) m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th); return (PF_PASS); tcp_drop: REASON_SET(&reason, PFRES_NORM); if (rm != NULL && r->log) PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 1, NULL); return (PF_DROP); } int pf_normalize_tcp_init(struct pf_pdesc *pd, struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst) { u_int32_t tsval, tsecr; u_int8_t hdr[60]; u_int8_t *opt; KASSERT((src->scrub == NULL), ("pf_normalize_tcp_init: src->scrub != NULL")); src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); if (src->scrub == NULL) return (1); switch (pd->af) { #ifdef INET case AF_INET: { struct ip *h = mtod(pd->m, struct ip *); src->scrub->pfss_ttl = h->ip_ttl; break; } #endif /* INET */ #ifdef INET6 case AF_INET6: { struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *); src->scrub->pfss_ttl = h->ip6_hlim; break; } #endif /* INET6 */ } /* * All normalizations below are only begun if we see the start of * the connections. They must all set an enabled bit in pfss_flags */ if ((tcp_get_flags(th) & TH_SYN) == 0) return (0); if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { /* Diddle with TCP options */ int hlen; opt = hdr + sizeof(struct tcphdr); hlen = (th->th_off << 2) - sizeof(struct tcphdr); while (hlen >= TCPOLEN_TIMESTAMP) { switch (*opt) { case TCPOPT_EOL: /* FALLTHROUGH */ case TCPOPT_NOP: opt++; hlen--; break; case TCPOPT_TIMESTAMP: if (opt[1] >= TCPOLEN_TIMESTAMP) { src->scrub->pfss_flags |= PFSS_TIMESTAMP; src->scrub->pfss_ts_mod = htonl(arc4random()); /* note PFSS_PAWS not set yet */ memcpy(&tsval, &opt[2], sizeof(u_int32_t)); memcpy(&tsecr, &opt[6], sizeof(u_int32_t)); src->scrub->pfss_tsval0 = ntohl(tsval); src->scrub->pfss_tsval = ntohl(tsval); src->scrub->pfss_tsecr = ntohl(tsecr); getmicrouptime(&src->scrub->pfss_last); } /* FALLTHROUGH */ default: hlen -= MAX(opt[1], 2); opt += MAX(opt[1], 2); break; } } } return (0); } void pf_normalize_tcp_cleanup(struct pf_kstate *state) { /* XXX Note: this also cleans up SCTP. */ uma_zfree(V_pf_state_scrub_z, state->src.scrub); uma_zfree(V_pf_state_scrub_z, state->dst.scrub); /* Someday... flush the TCP segment reassembly descriptors. */ } int pf_normalize_sctp_init(struct pf_pdesc *pd, struct pf_state_peer *src, struct pf_state_peer *dst) { src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); if (src->scrub == NULL) return (1); dst->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); if (dst->scrub == NULL) { uma_zfree(V_pf_state_scrub_z, src); return (1); } dst->scrub->pfss_v_tag = pd->sctp_initiate_tag; return (0); } int pf_normalize_tcp_stateful(struct pf_pdesc *pd, u_short *reason, struct tcphdr *th, struct pf_kstate *state, struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback) { struct timeval uptime; u_int32_t tsval, tsecr; u_int tsval_from_last; u_int8_t hdr[60]; u_int8_t *opt; int copyback = 0; int got_ts = 0; size_t startoff; KASSERT((src->scrub || dst->scrub), ("%s: src->scrub && dst->scrub!", __func__)); /* * Enforce the minimum TTL seen for this connection. Negate a common * technique to evade an intrusion detection system and confuse * firewall state code. */ switch (pd->af) { #ifdef INET case AF_INET: { if (src->scrub) { struct ip *h = mtod(pd->m, struct ip *); if (h->ip_ttl > src->scrub->pfss_ttl) src->scrub->pfss_ttl = h->ip_ttl; h->ip_ttl = src->scrub->pfss_ttl; } break; } #endif /* INET */ #ifdef INET6 case AF_INET6: { if (src->scrub) { struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *); if (h->ip6_hlim > src->scrub->pfss_ttl) src->scrub->pfss_ttl = h->ip6_hlim; h->ip6_hlim = src->scrub->pfss_ttl; } break; } #endif /* INET6 */ } if (th->th_off > (sizeof(struct tcphdr) >> 2) && ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { /* Diddle with TCP options */ int hlen; opt = hdr + sizeof(struct tcphdr); hlen = (th->th_off << 2) - sizeof(struct tcphdr); while (hlen >= TCPOLEN_TIMESTAMP) { startoff = opt - (hdr + sizeof(struct tcphdr)); switch (*opt) { case TCPOPT_EOL: /* FALLTHROUGH */ case TCPOPT_NOP: opt++; hlen--; break; case TCPOPT_TIMESTAMP: /* Modulate the timestamps. Can be used for * NAT detection, OS uptime determination or * reboot detection. */ if (got_ts) { /* Huh? Multiple timestamps!? */ if (V_pf_status.debug >= PF_DEBUG_MISC) { DPFPRINTF(("multiple TS??\n")); pf_print_state(state); printf("\n"); } REASON_SET(reason, PFRES_TS); return (PF_DROP); } if (opt[1] >= TCPOLEN_TIMESTAMP) { memcpy(&tsval, &opt[2], sizeof(u_int32_t)); if (tsval && src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) { tsval = ntohl(tsval); pf_patch_32_unaligned(pd->m, &th->th_sum, &opt[2], htonl(tsval + src->scrub->pfss_ts_mod), PF_ALGNMNT(startoff), 0); copyback = 1; } /* Modulate TS reply iff valid (!0) */ memcpy(&tsecr, &opt[6], sizeof(u_int32_t)); if (tsecr && dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { tsecr = ntohl(tsecr) - dst->scrub->pfss_ts_mod; pf_patch_32_unaligned(pd->m, &th->th_sum, &opt[6], htonl(tsecr), PF_ALGNMNT(startoff), 0); copyback = 1; } got_ts = 1; } /* FALLTHROUGH */ default: hlen -= MAX(opt[1], 2); opt += MAX(opt[1], 2); break; } } if (copyback) { /* Copyback the options, caller copys back header */ *writeback = 1; m_copyback(pd->m, pd->off + sizeof(struct tcphdr), (th->th_off << 2) - sizeof(struct tcphdr), hdr + sizeof(struct tcphdr)); } } /* * Must invalidate PAWS checks on connections idle for too long. * The fastest allowed timestamp clock is 1ms. That turns out to * be about 24 days before it wraps. XXX Right now our lowerbound * TS echo check only works for the first 12 days of a connection * when the TS has exhausted half its 32bit space */ #define TS_MAX_IDLE (24*24*60*60) #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ getmicrouptime(&uptime); if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || time_uptime - (state->creation / 1000) > TS_MAX_CONN)) { if (V_pf_status.debug >= PF_DEBUG_MISC) { DPFPRINTF(("src idled out of PAWS\n")); pf_print_state(state); printf("\n"); } src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) | PFSS_PAWS_IDLED; } if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { if (V_pf_status.debug >= PF_DEBUG_MISC) { DPFPRINTF(("dst idled out of PAWS\n")); pf_print_state(state); printf("\n"); } dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) | PFSS_PAWS_IDLED; } if (got_ts && src->scrub && dst->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && (dst->scrub->pfss_flags & PFSS_PAWS)) { /* Validate that the timestamps are "in-window". * RFC1323 describes TCP Timestamp options that allow * measurement of RTT (round trip time) and PAWS * (protection against wrapped sequence numbers). PAWS * gives us a set of rules for rejecting packets on * long fat pipes (packets that were somehow delayed * in transit longer than the time it took to send the * full TCP sequence space of 4Gb). We can use these * rules and infer a few others that will let us treat * the 32bit timestamp and the 32bit echoed timestamp * as sequence numbers to prevent a blind attacker from * inserting packets into a connection. * * RFC1323 tells us: * - The timestamp on this packet must be greater than * or equal to the last value echoed by the other * endpoint. The RFC says those will be discarded * since it is a dup that has already been acked. * This gives us a lowerbound on the timestamp. * timestamp >= other last echoed timestamp * - The timestamp will be less than or equal to * the last timestamp plus the time between the * last packet and now. The RFC defines the max * clock rate as 1ms. We will allow clocks to be * up to 10% fast and will allow a total difference * or 30 seconds due to a route change. And this * gives us an upperbound on the timestamp. * timestamp <= last timestamp + max ticks * We have to be careful here. Windows will send an * initial timestamp of zero and then initialize it * to a random value after the 3whs; presumably to * avoid a DoS by having to call an expensive RNG * during a SYN flood. Proof MS has at least one * good security geek. * * - The TCP timestamp option must also echo the other * endpoints timestamp. The timestamp echoed is the * one carried on the earliest unacknowledged segment * on the left edge of the sequence window. The RFC * states that the host will reject any echoed * timestamps that were larger than any ever sent. * This gives us an upperbound on the TS echo. * tescr <= largest_tsval * - The lowerbound on the TS echo is a little more * tricky to determine. The other endpoint's echoed * values will not decrease. But there may be * network conditions that re-order packets and * cause our view of them to decrease. For now the * only lowerbound we can safely determine is that * the TS echo will never be less than the original * TS. XXX There is probably a better lowerbound. * Remove TS_MAX_CONN with better lowerbound check. * tescr >= other original TS * * It is also important to note that the fastest * timestamp clock of 1ms will wrap its 32bit space in * 24 days. So we just disable TS checking after 24 * days of idle time. We actually must use a 12d * connection limit until we can come up with a better * lowerbound to the TS echo check. */ struct timeval delta_ts; int ts_fudge; /* * PFTM_TS_DIFF is how many seconds of leeway to allow * a host's timestamp. This can happen if the previous * packet got delayed in transit for much longer than * this packet. */ if ((ts_fudge = state->rule->timeout[PFTM_TS_DIFF]) == 0) ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF]; /* Calculate max ticks since the last timestamp */ #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ #define TS_MICROSECS 1000000 /* microseconds per second */ delta_ts = uptime; timevalsub(&delta_ts, &src->scrub->pfss_last); tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); if ((src->state >= TCPS_ESTABLISHED && dst->state >= TCPS_ESTABLISHED) && (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { /* Bad RFC1323 implementation or an insertion attack. * * - Solaris 2.6 and 2.7 are known to send another ACK * after the FIN,FIN|ACK,ACK closing that carries * an old timestamp. */ DPFPRINTF(("Timestamp failed %c%c%c%c\n", SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ? '1' : ' ', SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u " "idle: %jus %lums\n", tsval, tsecr, tsval_from_last, (uintmax_t)delta_ts.tv_sec, delta_ts.tv_usec / 1000)); DPFPRINTF((" src->tsval: %u tsecr: %u\n", src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" "\n", dst->scrub->pfss_tsval, dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); if (V_pf_status.debug >= PF_DEBUG_MISC) { pf_print_state(state); pf_print_flags(tcp_get_flags(th)); printf("\n"); } REASON_SET(reason, PFRES_TS); return (PF_DROP); } /* XXX I'd really like to require tsecr but it's optional */ } else if (!got_ts && (tcp_get_flags(th) & TH_RST) == 0 && ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) || pd->p_len > 0 || (tcp_get_flags(th) & TH_SYN)) && src->scrub && dst->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && (dst->scrub->pfss_flags & PFSS_PAWS)) { /* Didn't send a timestamp. Timestamps aren't really useful * when: * - connection opening or closing (often not even sent). * but we must not let an attacker to put a FIN on a * data packet to sneak it through our ESTABLISHED check. * - on a TCP reset. RFC suggests not even looking at TS. * - on an empty ACK. The TS will not be echoed so it will * probably not help keep the RTT calculation in sync and * there isn't as much danger when the sequence numbers * got wrapped. So some stacks don't include TS on empty * ACKs :-( * * To minimize the disruption to mostly RFC1323 conformant * stacks, we will only require timestamps on data packets. * * And what do ya know, we cannot require timestamps on data * packets. There appear to be devices that do legitimate * TCP connection hijacking. There are HTTP devices that allow * a 3whs (with timestamps) and then buffer the HTTP request. * If the intermediate device has the HTTP response cache, it * will spoof the response but not bother timestamping its * packets. So we can look for the presence of a timestamp in * the first data packet and if there, require it in all future * packets. */ if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { /* * Hey! Someone tried to sneak a packet in. Or the * stack changed its RFC1323 behavior?!?! */ if (V_pf_status.debug >= PF_DEBUG_MISC) { DPFPRINTF(("Did not receive expected RFC1323 " "timestamp\n")); pf_print_state(state); pf_print_flags(tcp_get_flags(th)); printf("\n"); } REASON_SET(reason, PFRES_TS); return (PF_DROP); } } /* * We will note if a host sends his data packets with or without * timestamps. And require all data packets to contain a timestamp * if the first does. PAWS implicitly requires that all data packets be * timestamped. But I think there are middle-man devices that hijack * TCP streams immediately after the 3whs and don't timestamp their * packets (seen in a WWW accelerator or cache). */ if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { if (got_ts) src->scrub->pfss_flags |= PFSS_DATA_TS; else { src->scrub->pfss_flags |= PFSS_DATA_NOTS; if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { /* Don't warn if other host rejected RFC1323 */ DPFPRINTF(("Broken RFC1323 stack did not " "timestamp data packet. Disabled PAWS " "security.\n")); pf_print_state(state); pf_print_flags(tcp_get_flags(th)); printf("\n"); } } } /* * Update PAWS values */ if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { getmicrouptime(&src->scrub->pfss_last); if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || (src->scrub->pfss_flags & PFSS_PAWS) == 0) src->scrub->pfss_tsval = tsval; if (tsecr) { if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || (src->scrub->pfss_flags & PFSS_PAWS) == 0) src->scrub->pfss_tsecr = tsecr; if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && (SEQ_LT(tsval, src->scrub->pfss_tsval0) || src->scrub->pfss_tsval0 == 0)) { /* tsval0 MUST be the lowest timestamp */ src->scrub->pfss_tsval0 = tsval; } /* Only fully initialized after a TS gets echoed */ if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) src->scrub->pfss_flags |= PFSS_PAWS; } } /* I have a dream.... TCP segment reassembly.... */ return (0); } int pf_normalize_mss(struct pf_pdesc *pd) { struct tcphdr *th = &pd->hdr.tcp; u_int16_t *mss; int thoff; int opt, cnt, optlen = 0; u_char opts[TCP_MAXOLEN]; u_char *optp = opts; size_t startoff; thoff = th->th_off << 2; cnt = thoff - sizeof(struct tcphdr); if (cnt <= 0 || cnt > MAX_TCPOPTLEN || !pf_pull_hdr(pd->m, pd->off + sizeof(*th), opts, cnt, NULL, NULL, pd->af)) return (0); for (; cnt > 0; cnt -= optlen, optp += optlen) { startoff = optp - opts; opt = optp[0]; if (opt == TCPOPT_EOL) break; if (opt == TCPOPT_NOP) optlen = 1; else { if (cnt < 2) break; optlen = optp[1]; if (optlen < 2 || optlen > cnt) break; } switch (opt) { case TCPOPT_MAXSEG: mss = (u_int16_t *)(optp + 2); if ((ntohs(*mss)) > pd->act.max_mss) { pf_patch_16_unaligned(pd->m, &th->th_sum, mss, htons(pd->act.max_mss), PF_ALGNMNT(startoff), 0); m_copyback(pd->m, pd->off + sizeof(*th), thoff - sizeof(*th), opts); m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th); } break; default: break; } } return (0); } int pf_scan_sctp(struct pf_pdesc *pd) { struct sctp_chunkhdr ch = { }; int chunk_off = sizeof(struct sctphdr); int chunk_start; int ret; while (pd->off + chunk_off < pd->tot_len) { if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch), NULL, NULL, pd->af)) return (PF_DROP); /* Length includes the header, this must be at least 4. */ if (ntohs(ch.chunk_length) < 4) return (PF_DROP); chunk_start = chunk_off; chunk_off += roundup(ntohs(ch.chunk_length), 4); switch (ch.chunk_type) { case SCTP_INITIATION: case SCTP_INITIATION_ACK: { struct sctp_init_chunk init; if (!pf_pull_hdr(pd->m, pd->off + chunk_start, &init, sizeof(init), NULL, NULL, pd->af)) return (PF_DROP); /* * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have * any value except 0." */ if (init.init.initiate_tag == 0) return (PF_DROP); if (init.init.num_inbound_streams == 0) return (PF_DROP); if (init.init.num_outbound_streams == 0) return (PF_DROP); if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND) return (PF_DROP); /* * RFC 9260, Section 3.1, INIT chunks MUST have zero * verification tag. */ if (ch.chunk_type == SCTP_INITIATION && pd->hdr.sctp.v_tag != 0) return (PF_DROP); pd->sctp_initiate_tag = init.init.initiate_tag; if (ch.chunk_type == SCTP_INITIATION) pd->sctp_flags |= PFDESC_SCTP_INIT; else pd->sctp_flags |= PFDESC_SCTP_INIT_ACK; ret = pf_multihome_scan_init(pd->off + chunk_start, ntohs(init.ch.chunk_length), pd); if (ret != PF_PASS) return (ret); break; } case SCTP_ABORT_ASSOCIATION: pd->sctp_flags |= PFDESC_SCTP_ABORT; break; case SCTP_SHUTDOWN: case SCTP_SHUTDOWN_ACK: pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN; break; case SCTP_SHUTDOWN_COMPLETE: pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE; break; case SCTP_COOKIE_ECHO: pd->sctp_flags |= PFDESC_SCTP_COOKIE; break; case SCTP_COOKIE_ACK: pd->sctp_flags |= PFDESC_SCTP_COOKIE_ACK; break; case SCTP_DATA: pd->sctp_flags |= PFDESC_SCTP_DATA; break; case SCTP_HEARTBEAT_REQUEST: pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT; break; case SCTP_HEARTBEAT_ACK: pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT_ACK; break; case SCTP_ASCONF: pd->sctp_flags |= PFDESC_SCTP_ASCONF; ret = pf_multihome_scan_asconf(pd->off + chunk_start, ntohs(ch.chunk_length), pd); if (ret != PF_PASS) return (ret); break; default: pd->sctp_flags |= PFDESC_SCTP_OTHER; break; } } /* Validate chunk lengths vs. packet length. */ if (pd->off + chunk_off != pd->tot_len) return (PF_DROP); /* * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only * one in a packet. */ if ((pd->sctp_flags & PFDESC_SCTP_INIT) && (pd->sctp_flags & ~PFDESC_SCTP_INIT)) return (PF_DROP); if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) && (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK)) return (PF_DROP); if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) && (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE)) return (PF_DROP); if ((pd->sctp_flags & PFDESC_SCTP_ABORT) && (pd->sctp_flags & PFDESC_SCTP_DATA)) { /* * RFC4960 3.3.7: DATA chunks MUST NOT be * bundled with ABORT. */ return (PF_DROP); } return (PF_PASS); } int pf_normalize_sctp(struct pf_pdesc *pd) { struct pf_krule *r, *rm = NULL; struct sctphdr *sh = &pd->hdr.sctp; u_short reason; sa_family_t af = pd->af; int srs; PF_RULES_RASSERT(); r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); /* Check if there any scrub rules. Lack of scrub rules means enforced * packet normalization operation just like in OpenBSD. */ srs = (r != NULL); while (r != NULL) { pf_counter_u64_add(&r->evaluations, 1); if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) r = r->skip[PF_SKIP_IFP]; else if (r->direction && r->direction != pd->dir) r = r->skip[PF_SKIP_DIR]; else if (r->af && r->af != af) r = r->skip[PF_SKIP_AF]; else if (r->proto && r->proto != pd->proto) r = r->skip[PF_SKIP_PROTO]; else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.neg, pd->kif, M_GETFIB(pd->m))) r = r->skip[PF_SKIP_SRC_ADDR]; else if (r->src.port_op && !pf_match_port(r->src.port_op, r->src.port[0], r->src.port[1], sh->src_port)) r = r->skip[PF_SKIP_SRC_PORT]; else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.neg, NULL, M_GETFIB(pd->m))) r = r->skip[PF_SKIP_DST_ADDR]; else if (r->dst.port_op && !pf_match_port(r->dst.port_op, r->dst.port[0], r->dst.port[1], sh->dest_port)) r = r->skip[PF_SKIP_DST_PORT]; else { rm = r; break; } } if (srs) { /* With scrub rules present SCTP normalization happens only * if one of rules has matched and it's not a "no scrub" rule */ if (rm == NULL || rm->action == PF_NOSCRUB) return (PF_PASS); pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); pf_counter_u64_critical_exit(); } /* Verify we're a multiple of 4 bytes long */ if ((pd->tot_len - pd->off - sizeof(struct sctphdr)) % 4) goto sctp_drop; /* INIT chunk needs to be the only chunk */ if (pd->sctp_flags & PFDESC_SCTP_INIT) if (pd->sctp_flags & ~PFDESC_SCTP_INIT) goto sctp_drop; return (PF_PASS); sctp_drop: REASON_SET(&reason, PFRES_NORM); if (rm != NULL && r->log) PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 1, NULL); return (PF_DROP); } #if defined(INET) || defined(INET6) void pf_scrub(struct pf_pdesc *pd) { struct ip *h = mtod(pd->m, struct ip *); #ifdef INET6 struct ip6_hdr *h6 = mtod(pd->m, struct ip6_hdr *); #endif /* Clear IP_DF if no-df was requested */ if (pd->af == AF_INET && pd->act.flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) { u_int16_t ip_off = h->ip_off; h->ip_off &= htons(~IP_DF); h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); } /* Enforce a minimum ttl, may cause endless packet loops */ if (pd->af == AF_INET && pd->act.min_ttl && h->ip_ttl < pd->act.min_ttl) { u_int16_t ip_ttl = h->ip_ttl; h->ip_ttl = pd->act.min_ttl; h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); } #ifdef INET6 /* Enforce a minimum ttl, may cause endless packet loops */ if (pd->af == AF_INET6 && pd->act.min_ttl && h6->ip6_hlim < pd->act.min_ttl) h6->ip6_hlim = pd->act.min_ttl; #endif /* Enforce tos */ if (pd->act.flags & PFSTATE_SETTOS) { switch (pd->af) { case AF_INET: { u_int16_t ov, nv; ov = *(u_int16_t *)h; h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK); nv = *(u_int16_t *)h; h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); break; } #ifdef INET6 case AF_INET6: h6->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK; h6->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h6)) << 20); break; #endif } } /* random-id, but not for fragments */ #ifdef INET if (pd->af == AF_INET && pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { uint16_t ip_id = h->ip_id; - ip_fillid(h); + ip_fillid(h, V_ip_random_id); h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); } #endif } #endif