Index: head/sys/contrib/ipfilter/netinet/ip_compat.h =================================================================== --- head/sys/contrib/ipfilter/netinet/ip_compat.h (revision 358557) +++ head/sys/contrib/ipfilter/netinet/ip_compat.h (revision 358558) @@ -1,1281 +1,1281 @@ /* * Copyright (C) 2012 by Darren Reed. * * See the IPFILTER.LICENCE file for details on licencing. * * @(#)ip_compat.h 1.8 1/14/96 * $FreeBSD$ * Id: ip_compat.h,v 2.142.2.57 2007/10/10 09:51:42 darrenr Exp $ */ #ifndef __IP_COMPAT_H__ #define __IP_COMPAT_H__ #ifndef __P # ifdef __STDC__ # define __P(x) x # else # define __P(x) () # endif #endif #ifndef __STDC__ # undef const # define const #endif #if defined(_KERNEL) || defined(KERNEL) || defined(__KERNEL__) # undef KERNEL # undef _KERNEL # undef __KERNEL__ # define KERNEL # define _KERNEL # define __KERNEL__ #endif #ifndef SOLARIS # if defined(sun) && defined(__SVR4) # define SOLARIS 1 # else # define SOLARIS 0 # endif #endif #if defined(__SVR4) # define index strchr # if !defined(_KERNEL) # define bzero(a,b) memset(a,0,b) # define bcmp memcmp # define bcopy(a,b,c) memmove(b,a,c) # endif #endif #ifndef LIFNAMSIZ # ifdef IF_NAMESIZE # define LIFNAMSIZ IF_NAMESIZE # else # ifdef IFNAMSIZ # define LIFNAMSIZ IFNAMSIZ # else # define LIFNAMSIZ 16 # endif # endif #endif # ifdef __STDC__ # define IPL_EXTERN(ep) ipl##ep # else # define IPL_EXTERN(ep) ipl/**/ep # endif /* * This is a workaround for troubles on FreeBSD and OpenBSD. */ # ifndef _KERNEL # define ADD_KERNEL # define _KERNEL # define KERNEL # endif # include # ifdef ADD_KERNEL # undef _KERNEL # undef KERNEL # endif #define NETBSD_GE_REV(x) (defined(__NetBSD_Version__) && \ (__NetBSD_Version__ >= (x))) #define NETBSD_GT_REV(x) (defined(__NetBSD_Version__) && \ (__NetBSD_Version__ > (x))) #define NETBSD_LT_REV(x) (defined(__NetBSD_Version__) && \ (__NetBSD_Version__ < (x))) #define FREEBSD_GE_REV(x) (defined(__FreeBSD_version) && \ (__FreeBSD_version >= (x))) #define FREEBSD_GT_REV(x) (defined(__FreeBSD_version) && \ (__FreeBSD_version > (x))) #define FREEBSD_LT_REV(x) (defined(__FreeBSD_version) && \ (__FreeBSD_version < (x))) #define BSD_GE_YEAR(x) (defined(BSD) && (BSD >= (x))) #define BSD_GT_YEAR(x) (defined(BSD) && (BSD > (x))) #define BSD_LT_YEAR(x) (defined(BSD) && (BSD < (x))) /* ----------------------------------------------------------------------- */ /* F R E E B S D */ /* ----------------------------------------------------------------------- */ # define HAS_SYS_MD5_H 1 # if defined(_KERNEL) # include "opt_bpf.h" # include "opt_inet6.h" # if defined(INET6) && !defined(USE_INET6) # define USE_INET6 # endif # else # if !defined(USE_INET6) && !defined(NOINET6) # define USE_INET6 # endif # endif # if defined(_KERNEL) # include # define p_cred td_ucred # define p_uid td_ucred->cr_ruid /* * When #define'd, the 5.2.1 kernel panics when used with the ftp proxy. * There may be other, safe, kernels but this is not extensively tested yet. */ # define HAVE_M_PULLDOWN -# if !defined(IPFILTER_LKM) && (__FreeBSD_version >= 300000) +# if !defined(IPFILTER_LKM) && defined(__FreeBSD_version) # include "opt_ipfilter.h" # endif # define COPYIN(a,b,c) copyin((caddr_t)(a), (caddr_t)(b), (c)) # define COPYOUT(a,b,c) copyout((caddr_t)(a), (caddr_t)(b), (c)) # else # include # endif /* _KERNEL */ # include # include # include # include # define KRWLOCK_FILL_SZ 56 # define KMUTEX_FILL_SZ 56 # include # define KMUTEX_T struct mtx # define KRWLOCK_T struct rwlock #ifdef _KERNEL # define READ_ENTER(x) rw_rlock(&(x)->ipf_lk) # define WRITE_ENTER(x) rw_wlock(&(x)->ipf_lk) # define MUTEX_DOWNGRADE(x) rw_downgrade(&(x)->ipf_lk) # define MUTEX_TRY_UPGRADE(x) rw_try_upgrade(&(x)->ipf_lk) # define RWLOCK_INIT(x,y) rw_init(&(x)->ipf_lk, (y)) # define RW_DESTROY(x) rw_destroy(&(x)->ipf_lk) # define RWLOCK_EXIT(x) do { \ if (rw_wowned(&(x)->ipf_lk)) \ rw_wunlock(&(x)->ipf_lk); \ else \ rw_runlock(&(x)->ipf_lk); \ } while (0) # include # define GETKTIME(x) microtime((struct timeval *)x) # define if_addrlist if_addrhead # include # include # include # define USE_MUTEXES # define MUTEX_ENTER(x) mtx_lock(&(x)->ipf_lk) # define MUTEX_EXIT(x) mtx_unlock(&(x)->ipf_lk) # define MUTEX_INIT(x,y) mtx_init(&(x)->ipf_lk, (y), NULL,\ MTX_DEF) # define MUTEX_DESTROY(x) mtx_destroy(&(x)->ipf_lk) # define MUTEX_NUKE(x) bzero((x), sizeof(*(x))) /* * Whilst the sx(9) locks on FreeBSD have the right semantics and interface * for what we want to use them for, despite testing showing they work - * with a WITNESS kernel, it generates LOR messages. */ # include # define ATOMIC_INC(x) { mtx_lock(&softc->ipf_rw.ipf_lk); (x)++; \ mtx_unlock(&softc->ipf_rw.ipf_lk); } # define ATOMIC_DEC(x) { mtx_lock(&softc->ipf_rw.ipf_lk); (x)--; \ mtx_unlock(&softc->ipf_rw.ipf_lk); } # define ATOMIC_INCL(x) atomic_add_long(&(x), 1) # define ATOMIC_INC64(x) ATOMIC_INC(x) # define ATOMIC_INC32(x) atomic_add_32((u_int *)&(x), 1) # define ATOMIC_DECL(x) atomic_add_long(&(x), -1) # define ATOMIC_DEC64(x) ATOMIC_DEC(x) # define ATOMIC_DEC32(x) atomic_add_32((u_int *)&(x), -1) # define SPL_X(x) ; # define SPL_NET(x) ; # define SPL_IMP(x) ; # define SPL_SCHED(x) ; # define GET_MINOR dev2unit # define MSGDSIZE(m) mbufchainlen(m) # define M_LEN(m) (m)->m_len # define M_ADJ(m,x) m_adj(m, x) # define M_COPY(x) m_copym((x), 0, M_COPYALL, M_NOWAIT) # define M_DUP(m) m_dup(m, M_NOWAIT) # define IPF_PANIC(x,y) if (x) { printf y; panic("ipf_panic"); } typedef struct mbuf mb_t; #else /* !_KERNEL */ #ifndef _NET_IF_VAR_H_ /* * Userland emulation of struct ifnet. */ struct route; struct mbuf; struct ifnet { char if_xname[IFNAMSIZ]; STAILQ_HEAD(, ifaddr) if_addrlist; int (*if_output)(struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *); }; #endif /* _NET_IF_VAR_H_ */ #endif /* _KERNEL */ # define IFNAME(x) ((struct ifnet *)x)->if_xname # define COPYIFNAME(v, x, b) \ (void) strncpy(b, \ ((struct ifnet *)x)->if_xname, \ LIFNAMSIZ) typedef u_long ioctlcmd_t; typedef struct uio uio_t; typedef int minor_t; typedef u_int32_t u_32_t; # define U_32_T 1 /* ----------------------------------------------------------------------- */ /* G E N E R I C */ /* ----------------------------------------------------------------------- */ /* * For BSD kernels, if bpf is in the kernel, enable ipfilter to use bpf in * filter rules. */ #if !defined(IPFILTER_BPF) # if (defined(NBPF) && (NBPF > 0)) || (defined(DEV_BPF) && (DEV_BPF > 0)) || \ (defined(NBPFILTER) && (NBPFILTER > 0)) # define IPFILTER_BPF # endif #endif /* * Userland locking primitives */ #ifndef _KERNEL #if !defined(KMUTEX_FILL_SZ) # define KMUTEX_FILL_SZ 1 #endif #if !defined(KRWLOCK_FILL_SZ) # define KRWLOCK_FILL_SZ 1 #endif #endif typedef struct { char *eMm_owner; char *eMm_heldin; u_int eMm_magic; int eMm_held; int eMm_heldat; } eMmutex_t; typedef struct { char *eMrw_owner; char *eMrw_heldin; u_int eMrw_magic; short eMrw_read; short eMrw_write; int eMrw_heldat; } eMrwlock_t; typedef union { char _fill[KMUTEX_FILL_SZ]; #ifdef KMUTEX_T struct { KMUTEX_T ipf_slk; const char *ipf_lname; } ipf_lkun_s; #endif eMmutex_t ipf_emu; } ipfmutex_t; typedef union { char _fill[KRWLOCK_FILL_SZ]; #ifdef KRWLOCK_T struct { KRWLOCK_T ipf_slk; const char *ipf_lname; int ipf_sr; int ipf_sw; u_int ipf_magic; } ipf_lkun_s; #endif eMrwlock_t ipf_emu; } ipfrwlock_t; #define ipf_lk ipf_lkun_s.ipf_slk #define ipf_lname ipf_lkun_s.ipf_lname #define ipf_isr ipf_lkun_s.ipf_sr #define ipf_isw ipf_lkun_s.ipf_sw #define ipf_magic ipf_lkun_s.ipf_magic #if !defined(__GNUC__) || defined(__FreeBSD_version) # ifndef INLINE # define INLINE # endif #else # define INLINE __inline__ #endif #if defined(__FreeBSD_version) && defined(_KERNEL) CTASSERT(sizeof(ipfrwlock_t) == KRWLOCK_FILL_SZ); CTASSERT(sizeof(ipfmutex_t) == KMUTEX_FILL_SZ); #endif /* * In a non-kernel environment, there are a lot of macros that need to be * filled in to be null-ops or to point to some compatibility function, * somewhere in userland. */ #ifndef _KERNEL typedef struct mb_s { struct mb_s *mb_next; char *mb_data; void *mb_ifp; int mb_len; int mb_flags; u_long mb_buf[2048]; } mb_t; # undef m_next # define m_next mb_next # undef m_len # define m_len mb_len # undef m_flags # define m_flags mb_flags # undef m_data # define m_data mb_data # undef M_MCAST # define M_MCAST 0x01 # undef M_BCAST # define M_BCAST 0x02 # undef M_MBCAST # define M_MBCAST 0x04 # define MSGDSIZE(m) msgdsize(m) # define M_LEN(m) (m)->mb_len # define M_ADJ(m,x) (m)->mb_len += x # define M_COPY(m) dupmbt(m) # define M_DUP(m) dupmbt(m) # define GETKTIME(x) gettimeofday((struct timeval *)(x), NULL) # define MTOD(m, t) ((t)(m)->mb_data) # define FREE_MB_T(m) freembt(m) # define ALLOC_MB_T(m,l) (m) = allocmbt(l) # define PREP_MB_T(f, m) do { \ (m)->mb_next = *(f)->fin_mp; \ *(fin)->fin_mp = (m); \ (f)->fin_m = (m); \ } while (0) # define SLEEP(x,y) 1; # define WAKEUP(x,y) ; # define POLLWAKEUP(y) ; # define IPF_PANIC(x,y) ; # define PANIC(x,y) ; # define SPL_SCHED(x) ; # define SPL_NET(x) ; # define SPL_IMP(x) ; # define SPL_X(x) ; # define KMALLOC(a,b) (a) = (b)malloc(sizeof(*a)) # define KMALLOCS(a,b,c) (a) = (b)malloc(c) # define KFREE(x) free(x) # define KFREES(x,s) free(x) # define GETIFP(x, v) get_unit(x,v) # define GETIFMTU_4(x) 2048 # define GETIFMTU_6(x) 2048 # define COPYIN(a,b,c) bcopywrap((a), (b), (c)) # define COPYOUT(a,b,c) bcopywrap((a), (b), (c)) # define COPYDATA(m, o, l, b) bcopy(MTOD((mb_t *)m, char *) + (o), \ (b), (l)) # define COPYBACK(m, o, l, b) bcopy((b), \ MTOD((mb_t *)m, char *) + (o), \ (l)) # define UIOMOVE(a,b,c,d) ipfuiomove((caddr_t)a,b,c,d) extern void m_copydata __P((mb_t *, int, int, caddr_t)); extern int ipfuiomove __P((caddr_t, int, int, struct uio *)); extern int bcopywrap __P((void *, void *, size_t)); extern mb_t *allocmbt __P((size_t)); extern mb_t *dupmbt __P((mb_t *)); extern void freembt __P((mb_t *)); # define MUTEX_DESTROY(x) eMmutex_destroy(&(x)->ipf_emu, \ __FILE__, __LINE__) # define MUTEX_ENTER(x) eMmutex_enter(&(x)->ipf_emu, \ __FILE__, __LINE__) # define MUTEX_EXIT(x) eMmutex_exit(&(x)->ipf_emu, \ __FILE__, __LINE__) # define MUTEX_INIT(x,y) eMmutex_init(&(x)->ipf_emu, y, \ __FILE__, __LINE__) # define MUTEX_NUKE(x) bzero((x), sizeof(*(x))) # define MUTEX_DOWNGRADE(x) eMrwlock_downgrade(&(x)->ipf_emu, \ __FILE__, __LINE__) # define MUTEX_TRY_UPGRADE(x) eMrwlock_try_upgrade(&(x)->ipf_emu, \ __FILE__, __LINE__) # define READ_ENTER(x) eMrwlock_read_enter(&(x)->ipf_emu, \ __FILE__, __LINE__) # define RWLOCK_INIT(x, y) eMrwlock_init(&(x)->ipf_emu, y) # define RWLOCK_EXIT(x) eMrwlock_exit(&(x)->ipf_emu) # define RW_DESTROY(x) eMrwlock_destroy(&(x)->ipf_emu) # define WRITE_ENTER(x) eMrwlock_write_enter(&(x)->ipf_emu, \ __FILE__, \ __LINE__) # define USE_MUTEXES 1 extern void eMmutex_destroy __P((eMmutex_t *, char *, int)); extern void eMmutex_enter __P((eMmutex_t *, char *, int)); extern void eMmutex_exit __P((eMmutex_t *, char *, int)); extern void eMmutex_init __P((eMmutex_t *, char *, char *, int)); extern void eMrwlock_destroy __P((eMrwlock_t *)); extern void eMrwlock_exit __P((eMrwlock_t *)); extern void eMrwlock_init __P((eMrwlock_t *, char *)); extern void eMrwlock_read_enter __P((eMrwlock_t *, char *, int)); extern void eMrwlock_write_enter __P((eMrwlock_t *, char *, int)); extern void eMrwlock_downgrade __P((eMrwlock_t *, char *, int)); #endif extern mb_t *allocmbt(size_t); #define MAX_IPV4HDR ((0xf << 2) + sizeof(struct icmp) + sizeof(ip_t) + 8) #ifndef IP_OFFMASK # define IP_OFFMASK 0x1fff #endif /* * On BSD's use quad_t as a guarantee for getting at least a 64bit sized * object. */ #if !defined(__amd64__) && BSD_GT_YEAR(199306) # define USE_QUAD_T # define U_QUAD_T unsigned long long # define QUAD_T long long #else /* BSD > 199306 */ # if !defined(U_QUAD_T) # define U_QUAD_T u_long # define QUAD_T long # endif #endif /* BSD > 199306 */ #ifdef USE_INET6 # if defined(__NetBSD__) || defined(__FreeBSD__) # include # include # if defined(_KERNEL) # include # endif typedef struct ip6_hdr ip6_t; # endif #endif #ifndef MAX # define MAX(a,b) (((a) > (b)) ? (a) : (b)) #endif #if defined(_KERNEL) # if defined(MENTAT) && !defined(INSTANCES) # define COPYDATA mb_copydata # define COPYBACK mb_copyback # else # define COPYDATA m_copydata # define COPYBACK m_copyback # endif # if (defined(__NetBSD_Version__) && (__NetBSD_Version__ < 105180000)) || \ defined(__FreeBSD__) # include # endif # if NETBSD_GE_REV(105180000) # include # else # include extern vm_map_t kmem_map; # endif # include # ifdef IPFILTER_M_IPFILTER # include MALLOC_DECLARE(M_IPFILTER); # define _M_IPF M_IPFILTER # else /* IPFILTER_M_IPFILTER */ # ifdef M_PFIL # define _M_IPF M_PFIL # else # ifdef M_IPFILTER # define _M_IPF M_IPFILTER # else # define _M_IPF M_TEMP # endif /* M_IPFILTER */ # endif /* M_PFIL */ # endif /* IPFILTER_M_IPFILTER */ # if !defined(KMALLOC) # define KMALLOC(a, b) (a) = (b)malloc(sizeof(*(a)), _M_IPF, M_NOWAIT) # endif # if !defined(KMALLOCS) # define KMALLOCS(a, b, c) (a) = (b)malloc((c), _M_IPF, M_NOWAIT) # endif # if !defined(KFREE) # define KFREE(x) free((x), _M_IPF) # endif # if !defined(KFREES) # define KFREES(x,s) free((x), _M_IPF) # endif # define UIOMOVE(a,b,c,d) uiomove((caddr_t)a,b,d) # define SLEEP(id, n) tsleep((id), PPAUSE|PCATCH, n, 0) # define WAKEUP(id,x) wakeup(id+x) # if !defined(POLLWAKEUP) # define POLLWAKEUP(x) selwakeup(softc->ipf_selwait+x) # endif # define GETIFP(n, v) ifunit(n) # define GETIFMTU_4(x) ((struct ifnet *)x)->if_mtu # define GETIFMTU_6(x) ((struct ifnet *)x)->if_mtu # if !defined(USE_MUTEXES) && !defined(SPL_NET) # define SPL_IMP(x) x = splimp() # define SPL_NET(x) x = splnet() # if !defined(SPL_SCHED) # define SPL_SCHED(x) x = splsched() # endif # define SPL_X(x) (void) splx(x) # endif /* !USE_MUTEXES */ # ifndef FREE_MB_T # define FREE_MB_T(m) m_freem(m) # endif # ifndef ALLOC_MB_T # ifdef MGETHDR # define ALLOC_MB_T(m,l) do { \ MGETHDR((m), M_NOWAIT, MT_HEADER); \ if ((m) != NULL) { \ (m)->m_len = (l); \ (m)->m_pkthdr.len = (l); \ } \ } while (0) # else # define ALLOC_MB_T(m,l) do { \ MGET((m), M_NOWAIT, MT_HEADER); \ if ((m) != NULL) { \ (m)->m_len = (l); \ (m)->m_pkthdr.len = (l); \ } \ } while (0) # endif # endif # ifndef PREP_MB_T # define PREP_MB_T(f, m) do { \ mb_t *_o = *(f)->fin_mp; \ (m)->m_next = _o; \ *(fin)->fin_mp = (m); \ if (_o->m_flags & M_PKTHDR) { \ (m)->m_pkthdr.len += \ _o->m_pkthdr.len; \ (m)->m_pkthdr.rcvif = \ _o->m_pkthdr.rcvif; \ } \ } while (0) # endif # ifndef M_DUP # ifdef M_COPYALL # define M_DUP(m) m_dup(m, 0, M_COPYALL, 0) # else # define M_DUP(m) m_dup(m) # endif # endif # ifndef MTOD # define MTOD(m,t) mtod(m,t) # endif # ifndef COPYIN # define COPYIN(a,b,c) (bcopy((caddr_t)(a), (caddr_t)(b), (c)), 0) # define COPYOUT(a,b,c) (bcopy((caddr_t)(a), (caddr_t)(b), (c)), 0) # endif # if SOLARIS && !defined(KMALLOC) # define KMALLOC(a,b) (a) = (b)new_kmem_alloc(sizeof(*(a)), \ KMEM_NOSLEEP) # define KMALLOCS(a,b,c) (a) = (b)new_kmem_alloc((c), KMEM_NOSLEEP) # endif # ifndef GET_MINOR # define GET_MINOR(x) dev2unit(x) # endif # define PANIC(x,y) if (x) panic y #endif /* _KERNEL */ #if !defined(IFNAME) && !defined(_KERNEL) # define IFNAME(x) get_ifname((struct ifnet *)x) #endif #ifndef COPYIFNAME # define NEED_FRGETIFNAME extern char *ipf_getifname __P((struct ifnet *, char *)); # define COPYIFNAME(v, x, b) \ ipf_getifname((struct ifnet *)x, b) #endif #ifndef ASSERT # ifdef _KERNEL # define ASSERT(x) # else # define ASSERT(x) do { if (!(x)) abort(); } while (0) # endif #endif #ifndef BCOPYIN # define BCOPYIN(a,b,c) (bcopy((caddr_t)(a), (caddr_t)(b), (c)), 0) # define BCOPYOUT(a,b,c) (bcopy((caddr_t)(a), (caddr_t)(b), (c)), 0) #endif /* * Because the ctype(3) posix definition, if used "safely" in code everywhere, * would mean all normal code that walks through strings needed casts. Yuck. */ #define ISALNUM(x) isalnum((u_char)(x)) #define ISALPHA(x) isalpha((u_char)(x)) #define ISDIGIT(x) isdigit((u_char)(x)) #define ISSPACE(x) isspace((u_char)(x)) #define ISUPPER(x) isupper((u_char)(x)) #define ISXDIGIT(x) isxdigit((u_char)(x)) #define ISLOWER(x) islower((u_char)(x)) #define TOUPPER(x) toupper((u_char)(x)) #define TOLOWER(x) tolower((u_char)(x)) /* * If mutexes aren't being used, turn all the mutex functions into null-ops. */ #if !defined(USE_MUTEXES) # define USE_SPL 1 # undef RW_DESTROY # undef MUTEX_INIT # undef MUTEX_NUKE # undef MUTEX_DESTROY # define MUTEX_ENTER(x) ; # define READ_ENTER(x) ; # define WRITE_ENTER(x) ; # define MUTEX_DOWNGRADE(x) ; # define MUTEX_TRY_UPGRADE(x) ; # define RWLOCK_INIT(x, y) ; # define RWLOCK_EXIT(x) ; # define RW_DESTROY(x) ; # define MUTEX_EXIT(x) ; # define MUTEX_INIT(x,y) ; # define MUTEX_DESTROY(x) ; # define MUTEX_NUKE(x) ; #endif /* !USE_MUTEXES */ #ifndef ATOMIC_INC # define ATOMIC_INC(x) (x)++ # define ATOMIC_DEC(x) (x)-- #endif #if defined(USE_SPL) && defined(_KERNEL) # define SPL_INT(x) int x #else # define SPL_INT(x) #endif /* * If there are no atomic operations for bit sizes defined, define them to all * use a generic one that works for all sizes. */ #ifndef ATOMIC_INCL # define ATOMIC_INCL ATOMIC_INC # define ATOMIC_INC64 ATOMIC_INC # define ATOMIC_INC32 ATOMIC_INC # define ATOMIC_DECL ATOMIC_DEC # define ATOMIC_DEC64 ATOMIC_DEC # define ATOMIC_DEC32 ATOMIC_DEC #endif #ifndef HDR_T_PRIVATE typedef struct tcphdr tcphdr_t; typedef struct udphdr udphdr_t; #endif typedef struct icmp icmphdr_t; typedef struct ip ip_t; typedef struct ether_header ether_header_t; typedef struct tcpiphdr tcpiphdr_t; #ifndef FR_GROUPLEN # define FR_GROUPLEN 16 #endif #ifndef offsetof # define offsetof(t,m) (size_t)((&((t *)0L)->m)) #endif #ifndef stsizeof # define stsizeof(t,m) sizeof(((t *)0L)->m) #endif /* * This set of macros has been brought about because on Tru64 it is not * possible to easily assign or examine values in a structure that are * bit fields. */ #ifndef IP_V # define IP_V(x) (x)->ip_v #endif #ifndef IP_V_A # define IP_V_A(x,y) (x)->ip_v = (y) #endif #ifndef IP_HL # define IP_HL(x) (x)->ip_hl #endif #ifndef IP_HL_A # define IP_HL_A(x,y) (x)->ip_hl = ((y) & 0xf) #endif #ifndef TCP_X2 # define TCP_X2(x) (x)->th_x2 #endif #ifndef TCP_X2_A # define TCP_X2_A(x,y) (x)->th_x2 = (y) #endif #ifndef TCP_OFF # define TCP_OFF(x) (x)->th_off #endif #ifndef TCP_OFF_A # define TCP_OFF_A(x,y) (x)->th_off = (y) #endif #define IPMINLEN(i, h) ((i)->ip_len >= (IP_HL(i) * 4 + sizeof(struct h))) #define TCPF_ALL (TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG|\ TH_ECN|TH_CWR) #if BSD_GE_YEAR(199306) && !defined(m_act) # define m_act m_nextpkt #endif /* * Security Options for Intenet Protocol (IPSO) as defined in RFC 1108. * * Basic Option * * 00000001 - (Reserved 4) * 00111101 - Top Secret * 01011010 - Secret * 10010110 - Confidential * 01100110 - (Reserved 3) * 11001100 - (Reserved 2) * 10101011 - Unclassified * 11110001 - (Reserved 1) */ #define IPSO_CLASS_RES4 0x01 #define IPSO_CLASS_TOPS 0x3d #define IPSO_CLASS_SECR 0x5a #define IPSO_CLASS_CONF 0x96 #define IPSO_CLASS_RES3 0x66 #define IPSO_CLASS_RES2 0xcc #define IPSO_CLASS_UNCL 0xab #define IPSO_CLASS_RES1 0xf1 #define IPSO_AUTH_GENSER 0x80 #define IPSO_AUTH_ESI 0x40 #define IPSO_AUTH_SCI 0x20 #define IPSO_AUTH_NSA 0x10 #define IPSO_AUTH_DOE 0x08 #define IPSO_AUTH_UN 0x06 #define IPSO_AUTH_FTE 0x01 /* * IP option #defines */ #undef IPOPT_RR #define IPOPT_RR 7 #undef IPOPT_ZSU #define IPOPT_ZSU 10 /* ZSU */ #undef IPOPT_MTUP #define IPOPT_MTUP 11 /* MTUP */ #undef IPOPT_MTUR #define IPOPT_MTUR 12 /* MTUR */ #undef IPOPT_ENCODE #define IPOPT_ENCODE 15 /* ENCODE */ #undef IPOPT_TS #define IPOPT_TS 68 #undef IPOPT_TR #define IPOPT_TR 82 /* TR */ #undef IPOPT_SECURITY #define IPOPT_SECURITY 130 #undef IPOPT_LSRR #define IPOPT_LSRR 131 #undef IPOPT_E_SEC #define IPOPT_E_SEC 133 /* E-SEC */ #undef IPOPT_CIPSO #define IPOPT_CIPSO 134 /* CIPSO */ #undef IPOPT_SATID #define IPOPT_SATID 136 #ifndef IPOPT_SID # define IPOPT_SID IPOPT_SATID #endif #undef IPOPT_SSRR #define IPOPT_SSRR 137 #undef IPOPT_ADDEXT #define IPOPT_ADDEXT 147 /* ADDEXT */ #undef IPOPT_VISA #define IPOPT_VISA 142 /* VISA */ #undef IPOPT_IMITD #define IPOPT_IMITD 144 /* IMITD */ #undef IPOPT_EIP #define IPOPT_EIP 145 /* EIP */ #undef IPOPT_RTRALRT #define IPOPT_RTRALRT 148 /* RTRALRT */ #undef IPOPT_SDB #define IPOPT_SDB 149 #undef IPOPT_NSAPA #define IPOPT_NSAPA 150 #undef IPOPT_DPS #define IPOPT_DPS 151 #undef IPOPT_UMP #define IPOPT_UMP 152 #undef IPOPT_FINN #define IPOPT_FINN 205 /* FINN */ #undef IPOPT_AH #define IPOPT_AH 256+IPPROTO_AH # define ICMP_UNREACH_ADMIN_PROHIBIT ICMP_UNREACH_FILTER_PROHIB # define ICMP_UNREACH_FILTER ICMP_UNREACH_FILTER_PROHIB #ifndef IPVERSION # define IPVERSION 4 #endif #ifndef IPOPT_MINOFF # define IPOPT_MINOFF 4 #endif #ifndef IPOPT_COPIED # define IPOPT_COPIED(x) ((x)&0x80) #endif #ifndef IPOPT_EOL # define IPOPT_EOL 0 #endif #ifndef IPOPT_NOP # define IPOPT_NOP 1 #endif #ifndef IP_MF # define IP_MF ((u_short)0x2000) #endif #ifndef ETHERTYPE_IP # define ETHERTYPE_IP ((u_short)0x0800) #endif #ifndef TH_FIN # define TH_FIN 0x01 #endif #ifndef TH_SYN # define TH_SYN 0x02 #endif #ifndef TH_RST # define TH_RST 0x04 #endif #ifndef TH_PUSH # define TH_PUSH 0x08 #endif #ifndef TH_ACK # define TH_ACK 0x10 #endif #ifndef TH_URG # define TH_URG 0x20 #endif #undef TH_ACKMASK #define TH_ACKMASK (TH_FIN|TH_SYN|TH_RST|TH_ACK) #ifndef IPOPT_EOL # define IPOPT_EOL 0 #endif #ifndef IPOPT_NOP # define IPOPT_NOP 1 #endif #ifndef IPOPT_RR # define IPOPT_RR 7 #endif #ifndef IPOPT_TS # define IPOPT_TS 68 #endif #ifndef IPOPT_SECURITY # define IPOPT_SECURITY 130 #endif #ifndef IPOPT_LSRR # define IPOPT_LSRR 131 #endif #ifndef IPOPT_SATID # define IPOPT_SATID 136 #endif #ifndef IPOPT_SSRR # define IPOPT_SSRR 137 #endif #ifndef IPOPT_SECUR_UNCLASS # define IPOPT_SECUR_UNCLASS ((u_short)0x0000) #endif #ifndef IPOPT_SECUR_CONFID # define IPOPT_SECUR_CONFID ((u_short)0xf135) #endif #ifndef IPOPT_SECUR_EFTO # define IPOPT_SECUR_EFTO ((u_short)0x789a) #endif #ifndef IPOPT_SECUR_MMMM # define IPOPT_SECUR_MMMM ((u_short)0xbc4d) #endif #ifndef IPOPT_SECUR_RESTR # define IPOPT_SECUR_RESTR ((u_short)0xaf13) #endif #ifndef IPOPT_SECUR_SECRET # define IPOPT_SECUR_SECRET ((u_short)0xd788) #endif #ifndef IPOPT_SECUR_TOPSECRET # define IPOPT_SECUR_TOPSECRET ((u_short)0x6bc5) #endif #ifndef IPOPT_OLEN # define IPOPT_OLEN 1 #endif #ifndef IPPROTO_HOPOPTS # define IPPROTO_HOPOPTS 0 #endif #ifndef IPPROTO_IPIP # define IPPROTO_IPIP 4 #endif #ifndef IPPROTO_ENCAP # define IPPROTO_ENCAP 98 #endif #ifndef IPPROTO_IPV6 # define IPPROTO_IPV6 41 #endif #ifndef IPPROTO_ROUTING # define IPPROTO_ROUTING 43 #endif #ifndef IPPROTO_FRAGMENT # define IPPROTO_FRAGMENT 44 #endif #ifndef IPPROTO_GRE # define IPPROTO_GRE 47 /* GRE encaps RFC 1701 */ #endif #ifndef IPPROTO_ESP # define IPPROTO_ESP 50 #endif #ifndef IPPROTO_AH # define IPPROTO_AH 51 #endif #ifndef IPPROTO_ICMPV6 # define IPPROTO_ICMPV6 58 #endif #ifndef IPPROTO_NONE # define IPPROTO_NONE 59 #endif #ifndef IPPROTO_DSTOPTS # define IPPROTO_DSTOPTS 60 #endif #ifndef IPPROTO_MOBILITY # define IPPROTO_MOBILITY 135 #endif #ifndef ICMP_ROUTERADVERT # define ICMP_ROUTERADVERT 9 #endif #ifndef ICMP_ROUTERSOLICIT # define ICMP_ROUTERSOLICIT 10 #endif #ifndef ICMP6_DST_UNREACH # define ICMP6_DST_UNREACH 1 #endif #ifndef ICMP6_PACKET_TOO_BIG # define ICMP6_PACKET_TOO_BIG 2 #endif #ifndef ICMP6_TIME_EXCEEDED # define ICMP6_TIME_EXCEEDED 3 #endif #ifndef ICMP6_PARAM_PROB # define ICMP6_PARAM_PROB 4 #endif #ifndef ICMP6_ECHO_REQUEST # define ICMP6_ECHO_REQUEST 128 #endif #ifndef ICMP6_ECHO_REPLY # define ICMP6_ECHO_REPLY 129 #endif #ifndef ICMP6_MEMBERSHIP_QUERY # define ICMP6_MEMBERSHIP_QUERY 130 #endif #ifndef MLD6_LISTENER_QUERY # define MLD6_LISTENER_QUERY 130 #endif #ifndef ICMP6_MEMBERSHIP_REPORT # define ICMP6_MEMBERSHIP_REPORT 131 #endif #ifndef MLD6_LISTENER_REPORT # define MLD6_LISTENER_REPORT 131 #endif #ifndef ICMP6_MEMBERSHIP_REDUCTION # define ICMP6_MEMBERSHIP_REDUCTION 132 #endif #ifndef MLD6_LISTENER_DONE # define MLD6_LISTENER_DONE 132 #endif #ifndef ND_ROUTER_SOLICIT # define ND_ROUTER_SOLICIT 133 #endif #ifndef ND_ROUTER_ADVERT # define ND_ROUTER_ADVERT 134 #endif #ifndef ND_NEIGHBOR_SOLICIT # define ND_NEIGHBOR_SOLICIT 135 #endif #ifndef ND_NEIGHBOR_ADVERT # define ND_NEIGHBOR_ADVERT 136 #endif #ifndef ND_REDIRECT # define ND_REDIRECT 137 #endif #ifndef ICMP6_ROUTER_RENUMBERING # define ICMP6_ROUTER_RENUMBERING 138 #endif #ifndef ICMP6_WRUREQUEST # define ICMP6_WRUREQUEST 139 #endif #ifndef ICMP6_WRUREPLY # define ICMP6_WRUREPLY 140 #endif #ifndef ICMP6_FQDN_QUERY # define ICMP6_FQDN_QUERY 139 #endif #ifndef ICMP6_FQDN_REPLY # define ICMP6_FQDN_REPLY 140 #endif #ifndef ICMP6_NI_QUERY # define ICMP6_NI_QUERY 139 #endif #ifndef ICMP6_NI_REPLY # define ICMP6_NI_REPLY 140 #endif #ifndef MLD6_MTRACE_RESP # define MLD6_MTRACE_RESP 200 #endif #ifndef MLD6_MTRACE # define MLD6_MTRACE 201 #endif #ifndef ICMP6_HADISCOV_REQUEST # define ICMP6_HADISCOV_REQUEST 202 #endif #ifndef ICMP6_HADISCOV_REPLY # define ICMP6_HADISCOV_REPLY 203 #endif #ifndef ICMP6_MOBILEPREFIX_SOLICIT # define ICMP6_MOBILEPREFIX_SOLICIT 204 #endif #ifndef ICMP6_MOBILEPREFIX_ADVERT # define ICMP6_MOBILEPREFIX_ADVERT 205 #endif #ifndef ICMP6_MAXTYPE # define ICMP6_MAXTYPE 205 #endif #ifndef ICMP6_DST_UNREACH_NOROUTE # define ICMP6_DST_UNREACH_NOROUTE 0 #endif #ifndef ICMP6_DST_UNREACH_ADMIN # define ICMP6_DST_UNREACH_ADMIN 1 #endif #ifndef ICMP6_DST_UNREACH_NOTNEIGHBOR # define ICMP6_DST_UNREACH_NOTNEIGHBOR 2 #endif #ifndef ICMP6_DST_UNREACH_BEYONDSCOPE # define ICMP6_DST_UNREACH_BEYONDSCOPE 2 #endif #ifndef ICMP6_DST_UNREACH_ADDR # define ICMP6_DST_UNREACH_ADDR 3 #endif #ifndef ICMP6_DST_UNREACH_NOPORT # define ICMP6_DST_UNREACH_NOPORT 4 #endif #ifndef ICMP6_TIME_EXCEED_TRANSIT # define ICMP6_TIME_EXCEED_TRANSIT 0 #endif #ifndef ICMP6_TIME_EXCEED_REASSEMBLY # define ICMP6_TIME_EXCEED_REASSEMBLY 1 #endif #ifndef ICMP6_NI_SUCCESS # define ICMP6_NI_SUCCESS 0 #endif #ifndef ICMP6_NI_REFUSED # define ICMP6_NI_REFUSED 1 #endif #ifndef ICMP6_NI_UNKNOWN # define ICMP6_NI_UNKNOWN 2 #endif #ifndef ICMP6_ROUTER_RENUMBERING_COMMAND # define ICMP6_ROUTER_RENUMBERING_COMMAND 0 #endif #ifndef ICMP6_ROUTER_RENUMBERING_RESULT # define ICMP6_ROUTER_RENUMBERING_RESULT 1 #endif #ifndef ICMP6_ROUTER_RENUMBERING_SEQNUM_RESET # define ICMP6_ROUTER_RENUMBERING_SEQNUM_RESET 255 #endif #ifndef ICMP6_PARAMPROB_HEADER # define ICMP6_PARAMPROB_HEADER 0 #endif #ifndef ICMP6_PARAMPROB_NEXTHEADER # define ICMP6_PARAMPROB_NEXTHEADER 1 #endif #ifndef ICMP6_PARAMPROB_OPTION # define ICMP6_PARAMPROB_OPTION 2 #endif #ifndef ICMP6_NI_SUBJ_IPV6 # define ICMP6_NI_SUBJ_IPV6 0 #endif #ifndef ICMP6_NI_SUBJ_FQDN # define ICMP6_NI_SUBJ_FQDN 1 #endif #ifndef ICMP6_NI_SUBJ_IPV4 # define ICMP6_NI_SUBJ_IPV4 2 #endif #ifndef MLD_MTRACE_RESP # define MLD_MTRACE_RESP 200 #endif #ifndef MLD_MTRACE # define MLD_MTRACE 201 #endif #ifndef MLD6_MTRACE_RESP # define MLD6_MTRACE_RESP MLD_MTRACE_RESP #endif #ifndef MLD6_MTRACE # define MLD6_MTRACE MLD_MTRACE #endif #if !defined(IPV6_FLOWINFO_MASK) # if (BYTE_ORDER == BIG_ENDIAN) || defined(_BIG_ENDIAN) # define IPV6_FLOWINFO_MASK 0x0fffffff /* flow info (28 bits) */ # else # if(BYTE_ORDER == LITTLE_ENDIAN) || !defined(_BIG_ENDIAN) # define IPV6_FLOWINFO_MASK 0xffffff0f /* flow info (28 bits) */ # endif /* LITTLE_ENDIAN */ # endif #endif #if !defined(IPV6_FLOWLABEL_MASK) # if (BYTE_ORDER == BIG_ENDIAN) || defined(_BIG_ENDIAN) # define IPV6_FLOWLABEL_MASK 0x000fffff /* flow label (20 bits) */ # else # if (BYTE_ORDER == LITTLE_ENDIAN) || !defined(_BIG_ENDIAN) # define IPV6_FLOWLABEL_MASK 0xffff0f00 /* flow label (20 bits) */ # endif /* LITTLE_ENDIAN */ # endif #endif /* * ECN is a new addition to TCP - RFC 2481 */ #ifndef TH_ECN # define TH_ECN 0x40 #endif #ifndef TH_CWR # define TH_CWR 0x80 #endif #define TH_ECNALL (TH_ECN|TH_CWR) /* * TCP States */ #define IPF_TCPS_LISTEN 0 /* listening for connection */ #define IPF_TCPS_SYN_SENT 1 /* active, have sent syn */ #define IPF_TCPS_SYN_RECEIVED 2 /* have send and received syn */ #define IPF_TCPS_HALF_ESTAB 3 /* for connections not fully "up" */ /* states < IPF_TCPS_ESTABLISHED are those where connections not established */ #define IPF_TCPS_ESTABLISHED 4 /* established */ #define IPF_TCPS_CLOSE_WAIT 5 /* rcvd fin, waiting for close */ /* states > IPF_TCPS_CLOSE_WAIT are those where user has closed */ #define IPF_TCPS_FIN_WAIT_1 6 /* have closed, sent fin */ #define IPF_TCPS_CLOSING 7 /* closed xchd FIN; await FIN ACK */ #define IPF_TCPS_LAST_ACK 8 /* had fin and close; await FIN ACK */ /* states > IPF_TCPS_CLOSE_WAIT && < IPF_TCPS_FIN_WAIT_2 await ACK of FIN */ #define IPF_TCPS_FIN_WAIT_2 9 /* have closed, fin is acked */ #define IPF_TCPS_TIME_WAIT 10 /* in 2*msl quiet wait after close */ #define IPF_TCPS_CLOSED 11 /* closed */ #define IPF_TCP_NSTATES 12 #define TCP_MSL 120 #undef ICMP_MAX_UNREACH #define ICMP_MAX_UNREACH 14 #undef ICMP_MAXTYPE #define ICMP_MAXTYPE 18 #ifndef LOG_FTP # define LOG_FTP (11<<3) #endif #ifndef LOG_AUTHPRIV # define LOG_AUTHPRIV (10<<3) #endif #ifndef LOG_AUDIT # define LOG_AUDIT (13<<3) #endif #ifndef LOG_NTP # define LOG_NTP (12<<3) #endif #ifndef LOG_SECURITY # define LOG_SECURITY (13<<3) #endif #ifndef LOG_LFMT # define LOG_LFMT (14<<3) #endif #ifndef LOG_CONSOLE # define LOG_CONSOLE (14<<3) #endif /* * ICMP error replies have an IP header (20 bytes), 8 bytes of ICMP data, * another IP header and then 64 bits of data, totalling 56. Of course, * the last 64 bits is dependent on that being available. */ #define ICMPERR_ICMPHLEN 8 #define ICMPERR_IPICMPHLEN (20 + 8) #define ICMPERR_MINPKTLEN (20 + 8 + 20) #define ICMPERR_MAXPKTLEN (20 + 8 + 20 + 8) #define ICMP6ERR_MINPKTLEN (40 + 8) #define ICMP6ERR_IPICMPHLEN (40 + 8 + 40) #ifndef MIN # define MIN(a,b) (((a)<(b))?(a):(b)) #endif #ifdef RESCUE # undef IPFILTER_BPF #endif #ifdef IPF_DEBUG # define DPRINT(x) printf x #else # define DPRINT(x) #endif #ifdef DTRACE_PROBE # ifdef _KERNEL # define DT(_n) DTRACE_PROBE(_n) # define DT1(_n,_a,_b) DTRACE_PROBE1(_n,_a,_b) # define DT2(_n,_a,_b,_c,_d) DTRACE_PROBE2(_n,_a,_b,_c,_d) # define DT3(_n,_a,_b,_c,_d,_e,_f) \ DTRACE_PROBE3(_n,_a,_b,_c,_d,_e,_f) # define DT4(_n,_a,_b,_c,_d,_e,_f,_g,_h) \ DTRACE_PROBE4(_n,_a,_b,_c,_d,_e,_f,_g,_h) # else # define DT(_n) # define DT1(_n,_a,_b) # define DT2(_n,_a,_b,_c,_d) # define DT3(_n,_a,_b,_c,_d,_e,_f) # define DT4(_n,_a,_b,_c,_d,_e,_f,_g,_h) # endif #else # define DT(_n) # define DT1(_n,_a,_b) # define DT2(_n,_a,_b,_c,_d) # define DT3(_n,_a,_b,_c,_d,_e,_f) # define DT4(_n,_a,_b,_c,_d,_e,_f,_g,_h) #endif struct ip6_routing { u_char ip6r_nxt; /* next header */ u_char ip6r_len; /* length in units of 8 octets */ u_char ip6r_type; /* always zero */ u_char ip6r_segleft; /* segments left */ u_32_t ip6r_reserved; /* reserved field */ }; #endif /* __IP_COMPAT_H__ */ Index: head/sys/contrib/ipfilter/netinet/ip_fil.h =================================================================== --- head/sys/contrib/ipfilter/netinet/ip_fil.h (revision 358557) +++ head/sys/contrib/ipfilter/netinet/ip_fil.h (revision 358558) @@ -1,1909 +1,1908 @@ /* * Copyright (C) 2012 by Darren Reed. * * See the IPFILTER.LICENCE file for details on licencing. * * @(#)ip_fil.h 1.35 6/5/96 * $FreeBSD$ * Id: ip_fil.h,v 2.170.2.51 2007/10/10 09:48:03 darrenr Exp $ */ #ifndef __IP_FIL_H__ #define __IP_FIL_H__ # include #include "netinet/ip_compat.h" #include "netinet/ipf_rb.h" #if NETBSD_GE_REV(104040000) # include #endif #if defined(BSD) && defined(_KERNEL) # include #endif #ifndef SOLARIS # if defined(sun) && defined(__SVR4) # define SOLARIS 1 # else # define SOLARIS 0 # endif #endif #ifndef __P # ifdef __STDC__ # define __P(x) x # else # define __P(x) () # endif #endif #if defined(__STDC__) || defined(__GNUC__) # define SIOCADAFR _IOW('r', 60, struct ipfobj) # define SIOCRMAFR _IOW('r', 61, struct ipfobj) # define SIOCSETFF _IOW('r', 62, u_int) # define SIOCGETFF _IOR('r', 63, u_int) # define SIOCGETFS _IOWR('r', 64, struct ipfobj) # define SIOCIPFFL _IOWR('r', 65, int) # define SIOCIPFFB _IOR('r', 66, int) # define SIOCADIFR _IOW('r', 67, struct ipfobj) # define SIOCRMIFR _IOW('r', 68, struct ipfobj) # define SIOCSWAPA _IOR('r', 69, u_int) # define SIOCINAFR _IOW('r', 70, struct ipfobj) # define SIOCINIFR _IOW('r', 71, struct ipfobj) # define SIOCFRENB _IOW('r', 72, u_int) # define SIOCFRSYN _IOW('r', 73, u_int) # define SIOCFRZST _IOWR('r', 74, struct ipfobj) # define SIOCZRLST _IOWR('r', 75, struct ipfobj) # define SIOCAUTHW _IOWR('r', 76, struct ipfobj) # define SIOCAUTHR _IOWR('r', 77, struct ipfobj) # define SIOCSTAT1 _IOWR('r', 78, struct ipfobj) # define SIOCSTLCK _IOWR('r', 79, u_int) # define SIOCSTPUT _IOWR('r', 80, struct ipfobj) # define SIOCSTGET _IOWR('r', 81, struct ipfobj) # define SIOCSTGSZ _IOWR('r', 82, struct ipfobj) # define SIOCSTAT2 _IOWR('r', 83, struct ipfobj) # define SIOCSETLG _IOWR('r', 84, int) # define SIOCGETLG _IOWR('r', 85, int) # define SIOCFUNCL _IOWR('r', 86, struct ipfunc_resolve) # define SIOCIPFGETNEXT _IOWR('r', 87, struct ipfobj) # define SIOCIPFGET _IOWR('r', 88, struct ipfobj) # define SIOCIPFSET _IOWR('r', 89, struct ipfobj) # define SIOCIPFL6 _IOWR('r', 90, int) # define SIOCIPFITER _IOWR('r', 91, struct ipfobj) # define SIOCGENITER _IOWR('r', 92, struct ipfobj) # define SIOCGTABL _IOWR('r', 93, struct ipfobj) # define SIOCIPFDELTOK _IOWR('r', 94, int) # define SIOCLOOKUPITER _IOWR('r', 95, struct ipfobj) # define SIOCGTQTAB _IOWR('r', 96, struct ipfobj) # define SIOCMATCHFLUSH _IOWR('r', 97, struct ipfobj) # define SIOCIPFINTERROR _IOR('r', 98, int) #else # define SIOCADAFR _IOW(r, 60, struct ipfobj) # define SIOCRMAFR _IOW(r, 61, struct ipfobj) # define SIOCSETFF _IOW(r, 62, u_int) # define SIOCGETFF _IOR(r, 63, u_int) # define SIOCGETFS _IOWR(r, 64, struct ipfobj) # define SIOCIPFFL _IOWR(r, 65, int) # define SIOCIPFFB _IOR(r, 66, int) # define SIOCADIFR _IOW(r, 67, struct ipfobj) # define SIOCRMIFR _IOW(r, 68, struct ipfobj) # define SIOCSWAPA _IOR(r, 69, u_int) # define SIOCINAFR _IOW(r, 70, struct ipfobj) # define SIOCINIFR _IOW(r, 71, struct ipfobj) # define SIOCFRENB _IOW(r, 72, u_int) # define SIOCFRSYN _IOW(r, 73, u_int) # define SIOCFRZST _IOWR(r, 74, struct ipfobj) # define SIOCZRLST _IOWR(r, 75, struct ipfobj) # define SIOCAUTHW _IOWR(r, 76, struct ipfobj) # define SIOCAUTHR _IOWR(r, 77, struct ipfobj) # define SIOCSTAT1 _IOWR(r, 78, struct ipfobj) # define SIOCSTLCK _IOWR(r, 79, u_int) # define SIOCSTPUT _IOWR(r, 80, struct ipfobj) # define SIOCSTGET _IOWR(r, 81, struct ipfobj) # define SIOCSTGSZ _IOWR(r, 82, struct ipfobj) # define SIOCSTAT2 _IOWR(r, 83, struct ipfobj) # define SIOCSETLG _IOWR(r, 84, int) # define SIOCGETLG _IOWR(r, 85, int) # define SIOCFUNCL _IOWR(r, 86, struct ipfunc_resolve) # define SIOCIPFGETNEXT _IOWR(r, 87, struct ipfobj) # define SIOCIPFGET _IOWR(r, 88, struct ipfobj) # define SIOCIPFSET _IOWR(r, 89, struct ipfobj) # define SIOCIPFL6 _IOWR(r, 90, int) # define SIOCIPFITER _IOWR(r, 91, struct ipfobj) # define SIOCGENITER _IOWR(r, 92, struct ipfobj) # define SIOCGTABL _IOWR(r, 93, struct ipfobj) # define SIOCIPFDELTOK _IOWR(r, 94, int) # define SIOCLOOKUPITER _IOWR(r, 95, struct ipfobj) # define SIOCGTQTAB _IOWR(r, 96, struct ipfobj) # define SIOCMATCHFLUSH _IOWR(r, 97, struct ipfobj) # define SIOCIPFINTERROR _IOR(r, 98, int) #endif #define SIOCADDFR SIOCADAFR #define SIOCDELFR SIOCRMAFR #define SIOCINSFR SIOCINAFR #define SIOCATHST SIOCSTAT1 #define SIOCGFRST SIOCSTAT2 struct ipscan; struct ifnet; struct ipf_main_softc_s; typedef int (* lookupfunc_t) __P((struct ipf_main_softc_s *, void *, int, void *, u_int)); /* * i6addr is used as a container for both IPv4 and IPv6 addresses, as well * as other types of objects, depending on its qualifier. */ typedef union i6addr { u_32_t i6[4]; struct in_addr in4; #ifdef USE_INET6 struct in6_addr in6; #endif void *vptr[2]; lookupfunc_t lptr[2]; struct { u_short type; u_short subtype; int name; } i6un; } i6addr_t; #define in4_addr in4.s_addr #define iplookupnum i6[1] #define iplookupname i6un.name #define iplookuptype i6un.type #define iplookupsubtype i6un.subtype /* * NOTE: These DO overlap the above on 64bit systems and this IS recognised. */ #define iplookupptr vptr[0] #define iplookupfunc lptr[1] #define I60(x) (((u_32_t *)(x))[0]) #define I61(x) (((u_32_t *)(x))[1]) #define I62(x) (((u_32_t *)(x))[2]) #define I63(x) (((u_32_t *)(x))[3]) #define HI60(x) ntohl(((u_32_t *)(x))[0]) #define HI61(x) ntohl(((u_32_t *)(x))[1]) #define HI62(x) ntohl(((u_32_t *)(x))[2]) #define HI63(x) ntohl(((u_32_t *)(x))[3]) #define IP6_EQ(a,b) ((I63(a) == I63(b)) && (I62(a) == I62(b)) && \ (I61(a) == I61(b)) && (I60(a) == I60(b))) #define IP6_NEQ(a,b) ((I63(a) != I63(b)) || (I62(a) != I62(b)) || \ (I61(a) != I61(b)) || (I60(a) != I60(b))) #define IP6_ISZERO(a) ((I60(a) | I61(a) | I62(a) | I63(a)) == 0) #define IP6_NOTZERO(a) ((I60(a) | I61(a) | I62(a) | I63(a)) != 0) #define IP6_ISONES(a) ((I63(a) == 0xffffffff) && (I62(a) == 0xffffffff) && \ (I61(a) == 0xffffffff) && (I60(a) == 0xffffffff)) #define IP6_GT(a,b) (ntohl(HI60(a)) > ntohl(HI60(b)) || \ (HI60(a) == HI60(b) && \ (ntohl(HI61(a)) > ntohl(HI61(b)) || \ (HI61(a) == HI61(b) && \ (ntohl(HI62(a)) > ntohl(HI62(b)) || \ (HI62(a) == HI62(b) && \ ntohl(HI63(a)) > ntohl(HI63(b)))))))) #define IP6_LT(a,b) (ntohl(HI60(a)) < ntohl(HI60(b)) || \ (HI60(a) == HI60(b) && \ (ntohl(HI61(a)) < ntohl(HI61(b)) || \ (HI61(a) == HI61(b) && \ (ntohl(HI62(a)) < ntohl(HI62(b)) || \ (HI62(a) == HI62(b) && \ ntohl(HI63(a)) < ntohl(HI63(b)))))))) #define NLADD(n,x) htonl(ntohl(n) + (x)) #define IP6_INC(a) \ do { u_32_t *_i6 = (u_32_t *)(a); \ _i6[3] = NLADD(_i6[3], 1); \ if (_i6[3] == 0) { \ _i6[2] = NLADD(_i6[2], 1); \ if (_i6[2] == 0) { \ _i6[1] = NLADD(_i6[1], 1); \ if (_i6[1] == 0) { \ _i6[0] = NLADD(_i6[0], 1); \ } \ } \ } \ } while (0) #define IP6_ADD(a,x,d) \ do { i6addr_t *_s = (i6addr_t *)(a); \ i6addr_t *_d = (i6addr_t *)(d); \ _d->i6[0] = NLADD(_s->i6[0], x); \ if (ntohl(_d->i6[0]) < ntohl(_s->i6[0])) { \ _d->i6[1] = NLADD(_d->i6[1], 1); \ if (ntohl(_d->i6[1]) < ntohl(_s->i6[1])) { \ _d->i6[2] = NLADD(_d->i6[2], 1); \ if (ntohl(_d->i6[2]) < ntohl(_s->i6[2])) { \ _d->i6[3] = NLADD(_d->i6[3], 1); \ } \ } \ } \ } while (0) #define IP6_AND(a,b,d) do { i6addr_t *_s1 = (i6addr_t *)(a); \ i6addr_t *_s2 = (i6addr_t *)(b); \ i6addr_t *_d = (i6addr_t *)(d); \ _d->i6[0] = _s1->i6[0] & _s2->i6[0]; \ _d->i6[1] = _s1->i6[1] & _s2->i6[1]; \ _d->i6[2] = _s1->i6[2] & _s2->i6[2]; \ _d->i6[3] = _s1->i6[3] & _s2->i6[3]; \ } while (0) #define IP6_ANDASSIGN(a,m) \ do { i6addr_t *_d = (i6addr_t *)(a); \ i6addr_t *_m = (i6addr_t *)(m); \ _d->i6[0] &= _m->i6[0]; \ _d->i6[1] &= _m->i6[1]; \ _d->i6[2] &= _m->i6[2]; \ _d->i6[3] &= _m->i6[3]; \ } while (0) #define IP6_MASKEQ(a,m,b) \ (((I60(a) & I60(m)) == I60(b)) && \ ((I61(a) & I61(m)) == I61(b)) && \ ((I62(a) & I62(m)) == I62(b)) && \ ((I63(a) & I63(m)) == I63(b))) #define IP6_MASKNEQ(a,m,b) \ (((I60(a) & I60(m)) != I60(b)) || \ ((I61(a) & I61(m)) != I61(b)) || \ ((I62(a) & I62(m)) != I62(b)) || \ ((I63(a) & I63(m)) != I63(b))) #define IP6_MERGE(a,b,c) \ do { i6addr_t *_d, *_s1, *_s2; \ _d = (i6addr_t *)(a); \ _s1 = (i6addr_t *)(b); \ _s2 = (i6addr_t *)(c); \ _d->i6[0] |= _s1->i6[0] & ~_s2->i6[0]; \ _d->i6[1] |= _s1->i6[1] & ~_s2->i6[1]; \ _d->i6[2] |= _s1->i6[2] & ~_s2->i6[2]; \ _d->i6[3] |= _s1->i6[3] & ~_s2->i6[3]; \ } while (0) #define IP6_MASK(a,b,c) \ do { i6addr_t *_d, *_s1, *_s2; \ _d = (i6addr_t *)(a); \ _s1 = (i6addr_t *)(b); \ _s2 = (i6addr_t *)(c); \ _d->i6[0] = _s1->i6[0] & ~_s2->i6[0]; \ _d->i6[1] = _s1->i6[1] & ~_s2->i6[1]; \ _d->i6[2] = _s1->i6[2] & ~_s2->i6[2]; \ _d->i6[3] = _s1->i6[3] & ~_s2->i6[3]; \ } while (0) #define IP6_SETONES(a) \ do { i6addr_t *_d = (i6addr_t *)(a); \ _d->i6[0] = 0xffffffff; \ _d->i6[1] = 0xffffffff; \ _d->i6[2] = 0xffffffff; \ _d->i6[3] = 0xffffffff; \ } while (0) typedef union ipso_u { u_short ipso_ripso[2]; u_32_t ipso_doi; } ipso_t; typedef struct fr_ip { u_32_t fi_v:4; /* IP version */ u_32_t fi_xx:4; /* spare */ u_32_t fi_tos:8; /* IP packet TOS */ u_32_t fi_ttl:8; /* IP packet TTL */ u_32_t fi_p:8; /* IP packet protocol */ u_32_t fi_optmsk; /* bitmask composed from IP options */ i6addr_t fi_src; /* source address from packet */ i6addr_t fi_dst; /* destination address from packet */ ipso_t fi_ipso; /* IP security options */ u_32_t fi_flx; /* packet flags */ u_32_t fi_tcpmsk; /* TCP options set/reset */ u_32_t fi_ports[2]; /* TCP ports */ u_char fi_tcpf; /* TCP flags */ u_char fi_sensitivity; u_char fi_xxx[2]; /* pad */ } fr_ip_t; /* * For use in fi_flx */ #define FI_TCPUDP 0x0001 /* TCP/UCP implied comparison*/ #define FI_OPTIONS 0x0002 #define FI_FRAG 0x0004 #define FI_SHORT 0x0008 #define FI_NATED 0x0010 #define FI_MULTICAST 0x0020 #define FI_BROADCAST 0x0040 #define FI_MBCAST 0x0080 #define FI_STATE 0x0100 #define FI_BADNAT 0x0200 #define FI_BAD 0x0400 #define FI_OOW 0x0800 /* Out of state window, else match */ #define FI_ICMPERR 0x1000 #define FI_FRAGBODY 0x2000 #define FI_BADSRC 0x4000 #define FI_LOWTTL 0x8000 #define FI_CMP 0x5cfe3 /* Not FI_FRAG,FI_NATED,FI_FRAGTAIL */ #define FI_ICMPCMP 0x0003 /* Flags we can check for ICMP error packets */ #define FI_WITH 0x5effe /* Not FI_TCPUDP */ #define FI_V6EXTHDR 0x10000 #define FI_COALESCE 0x20000 #define FI_NEWNAT 0x40000 #define FI_ICMPQUERY 0x80000 #define FI_ENCAP 0x100000 /* encap/decap with NAT */ #define FI_AH 0x200000 /* AH header present */ #define FI_DOCKSUM 0x10000000 /* Proxy wants L4 recalculation */ #define FI_NOCKSUM 0x20000000 /* don't do a L4 checksum validation */ #define FI_NOWILD 0x40000000 /* Do not do wildcard searches */ #define FI_IGNORE 0x80000000 #define fi_secmsk fi_ipso.ipso_ripso[0] #define fi_auth fi_ipso.ipso_ripso[1] #define fi_doi fi_ipso.ipso_doi #define fi_saddr fi_src.in4.s_addr #define fi_daddr fi_dst.in4.s_addr #define fi_srcnum fi_src.iplookupnum #define fi_dstnum fi_dst.iplookupnum #define fi_srcname fi_src.iplookupname #define fi_dstname fi_dst.iplookupname #define fi_srctype fi_src.iplookuptype #define fi_dsttype fi_dst.iplookuptype #define fi_srcsubtype fi_src.iplookupsubtype #define fi_dstsubtype fi_dst.iplookupsubtype #define fi_srcptr fi_src.iplookupptr #define fi_dstptr fi_dst.iplookupptr #define fi_srcfunc fi_src.iplookupfunc #define fi_dstfunc fi_dst.iplookupfunc /* * These are both used by the state and NAT code to indicate that one port or * the other should be treated as a wildcard. * NOTE: When updating, check bit masks in ip_state.h and update there too. */ #define SI_W_SPORT 0x00000100 #define SI_W_DPORT 0x00000200 #define SI_WILDP (SI_W_SPORT|SI_W_DPORT) #define SI_W_SADDR 0x00000400 #define SI_W_DADDR 0x00000800 #define SI_WILDA (SI_W_SADDR|SI_W_DADDR) #define SI_NEWFR 0x00001000 #define SI_CLONE 0x00002000 #define SI_CLONED 0x00004000 #define SI_NEWCLONE 0x00008000 typedef struct { u_short fda_ports[2]; u_char fda_tcpf; /* TCP header flags (SYN, ACK, etc) */ } frdat_t; typedef enum fr_breasons_e { FRB_BLOCKED = 0, FRB_LOGFAIL = 1, FRB_PPSRATE = 2, FRB_JUMBO = 3, FRB_MAKEFRIP = 4, FRB_STATEADD = 5, FRB_UPDATEIPID = 6, FRB_LOGFAIL2 = 7, FRB_DECAPFRIP = 8, FRB_AUTHNEW = 9, FRB_AUTHCAPTURE = 10, FRB_COALESCE = 11, FRB_PULLUP = 12, FRB_AUTHFEEDBACK = 13, FRB_BADFRAG = 14, FRB_NATV4 = 15, FRB_NATV6 = 16, } fr_breason_t; #define FRB_MAX_VALUE 16 typedef enum ipf_cksum_e { FI_CK_BAD = -1, FI_CK_NEEDED = 0, FI_CK_SUMOK = 1, FI_CK_L4PART = 2, FI_CK_L4FULL = 4 } ipf_cksum_t; typedef struct fr_info { void *fin_main_soft; void *fin_ifp; /* interface packet is `on' */ struct frentry *fin_fr; /* last matching rule */ int fin_out; /* in or out ? 1 == out, 0 == in */ fr_ip_t fin_fi; /* IP Packet summary */ frdat_t fin_dat; /* TCP/UDP ports, ICMP code/type */ int fin_dlen; /* length of data portion of packet */ int fin_plen; u_32_t fin_rule; /* rule # last matched */ u_short fin_hlen; /* length of IP header in bytes */ char fin_group[FR_GROUPLEN]; /* group number, -1 for none */ void *fin_dp; /* start of data past IP header */ /* * Fields after fin_dp aren't used for compression of log records. * fin_fi contains the IP version (fin_family) * fin_rule isn't included because adding a new rule can change it but * not change fin_fr. fin_rule is the rule number reported. * It isn't necessary to include fin_crc because that is checked * for explicitly, before calling bcmp. */ u_32_t fin_crc; /* Simple calculation for logging */ int fin_family; /* AF_INET, etc. */ int fin_icode; /* ICMP error to return */ int fin_mtu; /* MTU input for ICMP need-frag */ int fin_rev; /* state only: 1 = reverse */ int fin_ipoff; /* # bytes from buffer start to hdr */ u_32_t fin_id; /* IP packet id field */ u_short fin_l4hlen; /* length of L4 header, if known */ u_short fin_off; int fin_depth; /* Group nesting depth */ int fin_error; /* Error code to return */ ipf_cksum_t fin_cksum; /* -1 = bad, 1 = good, 0 = not done */ fr_breason_t fin_reason; /* why auto blocked */ u_int fin_pktnum; void *fin_nattag; struct frdest *fin_dif; struct frdest *fin_tif; union { ip_t *fip_ip; #ifdef USE_INET6 ip6_t *fip_ip6; #endif } fin_ipu; mb_t **fin_mp; /* pointer to pointer to mbuf */ mb_t *fin_m; /* pointer to mbuf */ #ifdef MENTAT mb_t *fin_qfm; /* pointer to mblk where pkt starts */ void *fin_qpi; char fin_ifname[LIFNAMSIZ]; #endif void *fin_fraghdr; /* pointer to start of ipv6 frag hdr */ } fr_info_t; #define fin_ip fin_ipu.fip_ip #define fin_ip6 fin_ipu.fip_ip6 #define fin_v fin_fi.fi_v #define fin_p fin_fi.fi_p #define fin_flx fin_fi.fi_flx #define fin_optmsk fin_fi.fi_optmsk #define fin_secmsk fin_fi.fi_secmsk #define fin_doi fin_fi.fi_doi #define fin_auth fin_fi.fi_auth #define fin_src fin_fi.fi_src.in4 #define fin_saddr fin_fi.fi_saddr #define fin_dst fin_fi.fi_dst.in4 #define fin_daddr fin_fi.fi_daddr #define fin_data fin_fi.fi_ports #define fin_sport fin_fi.fi_ports[0] #define fin_dport fin_fi.fi_ports[1] #define fin_tcpf fin_fi.fi_tcpf #define fin_src6 fin_fi.fi_src #define fin_dst6 fin_fi.fi_dst #define fin_srcip6 fin_fi.fi_src.in6 #define fin_dstip6 fin_fi.fi_dst.in6 #define IPF_IN 0 #define IPF_OUT 1 typedef struct frentry *(*ipfunc_t) __P((fr_info_t *, u_32_t *)); typedef int (*ipfuncinit_t) __P((struct ipf_main_softc_s *, struct frentry *)); typedef struct ipfunc_resolve { char ipfu_name[32]; ipfunc_t ipfu_addr; ipfuncinit_t ipfu_init; ipfuncinit_t ipfu_fini; } ipfunc_resolve_t; /* * Size for compares on fr_info structures */ #define FI_CSIZE offsetof(fr_info_t, fin_icode) #define FI_LCSIZE offsetof(fr_info_t, fin_dp) /* * Size for copying cache fr_info structure */ #define FI_COPYSIZE offsetof(fr_info_t, fin_dp) /* * Structure for holding IPFilter's tag information */ #define IPFTAG_LEN 16 typedef struct { union { u_32_t iptu_num[4]; char iptu_tag[IPFTAG_LEN]; } ipt_un; int ipt_not; } ipftag_t; #define ipt_tag ipt_un.iptu_tag #define ipt_num ipt_un.iptu_num /* * Structure to define address for pool lookups. */ typedef struct { u_char adf_len; sa_family_t adf_family; u_char adf_xxx[2]; i6addr_t adf_addr; } addrfamily_t; RBI_LINK(ipf_rb, host_node_s); typedef struct host_node_s { RBI_FIELD(ipf_rb) hn_entry; addrfamily_t hn_addr; int hn_active; } host_node_t; typedef RBI_HEAD(ipf_rb, host_node_s) ipf_rb_head_t; typedef struct host_track_s { ipf_rb_head_t ht_root; int ht_max_nodes; int ht_max_per_node; int ht_netmask; int ht_cur_nodes; } host_track_t; typedef enum fr_dtypes_e { FRD_NORMAL = 0, FRD_DSTLIST } fr_dtypes_t; /* * This structure is used to hold information about the next hop for where * to forward a packet. */ typedef struct frdest { void *fd_ptr; addrfamily_t fd_addr; fr_dtypes_t fd_type; int fd_name; } frdest_t; #define fd_ip6 fd_addr.adf_addr #define fd_ip fd_ip6.in4 typedef enum fr_ctypes_e { FR_NONE = 0, FR_EQUAL, FR_NEQUAL, FR_LESST, FR_GREATERT, FR_LESSTE, FR_GREATERTE, FR_OUTRANGE, FR_INRANGE, FR_INCRANGE } fr_ctypes_t; /* * This structure holds information about a port comparison. */ typedef struct frpcmp { fr_ctypes_t frp_cmp; /* data for port comparisons */ u_32_t frp_port; /* low port for <> and >< */ u_32_t frp_top; /* high port for <> and >< */ } frpcmp_t; /* * Structure containing all the relevant TCP/UDP things that can be checked in * a filter rule. */ typedef struct frtuc { u_char ftu_tcpfm; /* tcp flags mask */ u_char ftu_tcpf; /* tcp flags */ frpcmp_t ftu_src; /* source port */ frpcmp_t ftu_dst; /* destination port */ } frtuc_t; #define ftu_scmp ftu_src.frp_cmp #define ftu_dcmp ftu_dst.frp_cmp #define ftu_sport ftu_src.frp_port #define ftu_dport ftu_dst.frp_port #define ftu_stop ftu_src.frp_top #define ftu_dtop ftu_dst.frp_top #define FR_TCPFMAX 0x3f typedef enum fr_atypes_e { FRI_NONE = -1, /* For LHS of NAT */ FRI_NORMAL = 0, /* Normal address */ FRI_DYNAMIC, /* dynamic address */ FRI_LOOKUP, /* address is a pool # */ FRI_RANGE, /* address/mask is a range */ FRI_NETWORK, /* network address from if */ FRI_BROADCAST, /* broadcast address from if */ FRI_PEERADDR, /* Peer address for P-to-P */ FRI_NETMASKED, /* network address with netmask from if */ FRI_SPLIT, /* For NAT compatibility */ FRI_INTERFACE /* address is based on interface name */ } fr_atypes_t; /* * This structure makes up what is considered to be the IPFilter specific * matching components of a filter rule, as opposed to the data structures * used to define the result which are in frentry_t and not here. */ typedef struct fripf { fr_ip_t fri_ip; fr_ip_t fri_mip; /* mask structure */ u_short fri_icmpm; /* data for ICMP packets (mask) */ u_short fri_icmp; frtuc_t fri_tuc; fr_atypes_t fri_satype; /* addres type */ fr_atypes_t fri_datype; /* addres type */ int fri_sifpidx; /* doing dynamic addressing */ int fri_difpidx; /* index into fr_ifps[] to use when */ } fripf_t; #define fri_dlookup fri_mip.fi_dst #define fri_slookup fri_mip.fi_src #define fri_dstnum fri_mip.fi_dstnum #define fri_srcnum fri_mip.fi_srcnum #define fri_dstname fri_mip.fi_dstname #define fri_srcname fri_mip.fi_srcname #define fri_dstptr fri_mip.fi_dstptr #define fri_srcptr fri_mip.fi_srcptr typedef enum fr_rtypes_e { FR_T_NONE = 0, FR_T_IPF, /* IPF structures */ FR_T_BPFOPC, /* BPF opcode */ FR_T_CALLFUNC, /* callout to function in fr_func only */ FR_T_COMPIPF, /* compiled C code */ FR_T_IPFEXPR, /* IPF expression */ FR_T_BUILTIN = 0x40000000, /* rule is in kernel space */ FR_T_IPF_BUILTIN, FR_T_BPFOPC_BUILTIN, FR_T_CALLFUNC_BUILTIN, FR_T_COMPIPF_BUILTIN, FR_T_IPFEXPR_BUILTIN } fr_rtypes_t; typedef struct frentry * (* frentfunc_t) __P((fr_info_t *)); typedef struct frentry { ipfmutex_t fr_lock; struct frentry *fr_next; struct frentry **fr_pnext; struct frgroup *fr_grp; struct frgroup *fr_grphead; struct frgroup *fr_icmpgrp; struct ipscan *fr_isc; struct frentry *fr_dnext; /* 2 fr_die linked list pointers */ struct frentry **fr_pdnext; void *fr_ifas[4]; void *fr_ptr; /* for use with fr_arg */ int fr_comment; /* text comment for rule */ int fr_size; /* size of this structure */ int fr_ref; /* reference count */ int fr_statecnt; /* state count - for limit rules */ u_32_t fr_die; /* only used on loading the rule */ u_int fr_cksum; /* checksum on filter rules for performance */ /* * The line number from a file is here because we need to be able to * match the rule generated with ``grep rule ipf.conf | ipf -rf -'' * with the rule loaded using ``ipf -f ipf.conf'' - thus it can't be * on the other side of fr_func. */ int fr_flineno; /* line number from conf file */ /* * These are only incremented when a packet matches this rule and * it is the last match */ U_QUAD_T fr_hits; U_QUAD_T fr_bytes; /* * For PPS rate limiting * fr_lpu is used to always have the same size for this field, * allocating 64bits for seconds and 32bits for milliseconds. */ union { struct timeval frp_lastpkt; char frp_bytes[12]; } fr_lpu; int fr_curpps; union { void *fru_data; char *fru_caddr; fripf_t *fru_ipf; frentfunc_t fru_func; } fr_dun; /* * Fields after this may not change whilst in the kernel. */ ipfunc_t fr_func; /* call this function */ int fr_dsize; int fr_pps; fr_rtypes_t fr_type; u_32_t fr_flags; /* per-rule flags && options (see below) */ u_32_t fr_logtag; /* user defined log tag # */ u_32_t fr_collect; /* collection number */ u_int fr_arg; /* misc. numeric arg for rule */ u_int fr_loglevel; /* syslog log facility + priority */ u_char fr_family; u_char fr_icode; /* return ICMP code */ int fr_group; /* group to which this rule belongs */ int fr_grhead; /* group # which this rule starts */ int fr_isctag; int fr_rpc; /* XID Filtering */ ipftag_t fr_nattag; /* * These are all options related to stateful filtering */ host_track_t fr_srctrack; int fr_nostatelog; int fr_statemax; /* max reference count */ int fr_icmphead; /* ICMP group for state options */ u_int fr_age[2]; /* non-TCP state timeouts */ /* * These are compared separately. */ int fr_ifnames[4]; frdest_t fr_tifs[2]; /* "to"/"reply-to" interface */ frdest_t fr_dif; /* duplicate packet interface */ /* * How big is the name buffer at the end? */ int fr_namelen; char fr_names[1]; } frentry_t; #define fr_lastpkt fr_lpu.frp_lastpkt #define fr_caddr fr_dun.fru_caddr #define fr_data fr_dun.fru_data #define fr_dfunc fr_dun.fru_func #define fr_ipf fr_dun.fru_ipf #define fr_ip fr_ipf->fri_ip #define fr_mip fr_ipf->fri_mip #define fr_icmpm fr_ipf->fri_icmpm #define fr_icmp fr_ipf->fri_icmp #define fr_tuc fr_ipf->fri_tuc #define fr_satype fr_ipf->fri_satype #define fr_datype fr_ipf->fri_datype #define fr_sifpidx fr_ipf->fri_sifpidx #define fr_difpidx fr_ipf->fri_difpidx #define fr_proto fr_ip.fi_p #define fr_mproto fr_mip.fi_p #define fr_ttl fr_ip.fi_ttl #define fr_mttl fr_mip.fi_ttl #define fr_tos fr_ip.fi_tos #define fr_mtos fr_mip.fi_tos #define fr_tcpfm fr_tuc.ftu_tcpfm #define fr_tcpf fr_tuc.ftu_tcpf #define fr_scmp fr_tuc.ftu_scmp #define fr_dcmp fr_tuc.ftu_dcmp #define fr_dport fr_tuc.ftu_dport #define fr_sport fr_tuc.ftu_sport #define fr_stop fr_tuc.ftu_stop #define fr_dtop fr_tuc.ftu_dtop #define fr_dst fr_ip.fi_dst.in4 #define fr_dst6 fr_ip.fi_dst #define fr_daddr fr_ip.fi_dst.in4.s_addr #define fr_src fr_ip.fi_src.in4 #define fr_src6 fr_ip.fi_src #define fr_saddr fr_ip.fi_src.in4.s_addr #define fr_dmsk fr_mip.fi_dst.in4 #define fr_dmsk6 fr_mip.fi_dst #define fr_dmask fr_mip.fi_dst.in4.s_addr #define fr_smsk fr_mip.fi_src.in4 #define fr_smsk6 fr_mip.fi_src #define fr_smask fr_mip.fi_src.in4.s_addr #define fr_dstnum fr_ip.fi_dstnum #define fr_srcnum fr_ip.fi_srcnum #define fr_dlookup fr_ip.fi_dst #define fr_slookup fr_ip.fi_src #define fr_dstname fr_ip.fi_dstname #define fr_srcname fr_ip.fi_srcname #define fr_dsttype fr_ip.fi_dsttype #define fr_srctype fr_ip.fi_srctype #define fr_dstsubtype fr_ip.fi_dstsubtype #define fr_srcsubtype fr_ip.fi_srcsubtype #define fr_dstptr fr_mip.fi_dstptr #define fr_srcptr fr_mip.fi_srcptr #define fr_dstfunc fr_mip.fi_dstfunc #define fr_srcfunc fr_mip.fi_srcfunc #define fr_optbits fr_ip.fi_optmsk #define fr_optmask fr_mip.fi_optmsk #define fr_secbits fr_ip.fi_secmsk #define fr_secmask fr_mip.fi_secmsk #define fr_authbits fr_ip.fi_auth #define fr_authmask fr_mip.fi_auth #define fr_doi fr_ip.fi_doi #define fr_doimask fr_mip.fi_doi #define fr_flx fr_ip.fi_flx #define fr_mflx fr_mip.fi_flx #define fr_ifa fr_ifas[0] #define fr_oifa fr_ifas[2] #define fr_tif fr_tifs[0] #define fr_rif fr_tifs[1] #define FR_NOLOGTAG 0 #define FR_CMPSIZ (offsetof(struct frentry, fr_ifnames) - \ offsetof(struct frentry, fr_func)) #define FR_NAME(_f, _n) (_f)->fr_names + (_f)->_n #define FR_NUM(_a) (sizeof(_a) / sizeof(*_a)) /* * fr_flags */ #define FR_BLOCK 0x00001 /* do not allow packet to pass */ #define FR_PASS 0x00002 /* allow packet to pass */ #define FR_AUTH 0x00003 /* use authentication */ #define FR_PREAUTH 0x00004 /* require preauthentication */ #define FR_ACCOUNT 0x00005 /* Accounting rule */ #define FR_SKIP 0x00006 /* skip rule */ #define FR_DECAPSULATE 0x00008 /* decapsulate rule */ #define FR_CALL 0x00009 /* call rule */ #define FR_CMDMASK 0x0000f #define FR_LOG 0x00010 /* Log */ #define FR_LOGB 0x00011 /* Log-fail */ #define FR_LOGP 0x00012 /* Log-pass */ #define FR_LOGMASK (FR_LOG|FR_CMDMASK) #define FR_CALLNOW 0x00020 /* call another function (fr_func) if matches */ #define FR_NOTSRCIP 0x00040 #define FR_NOTDSTIP 0x00080 #define FR_QUICK 0x00100 /* match & stop processing list */ #define FR_KEEPFRAG 0x00200 /* keep fragment information */ #define FR_KEEPSTATE 0x00400 /* keep `connection' state information */ #define FR_FASTROUTE 0x00800 /* bypass normal routing */ #define FR_RETRST 0x01000 /* Return TCP RST packet - reset connection */ #define FR_RETICMP 0x02000 /* Return ICMP unreachable packet */ #define FR_FAKEICMP 0x03000 /* Return ICMP unreachable with fake source */ #define FR_OUTQUE 0x04000 /* outgoing packets */ #define FR_INQUE 0x08000 /* ingoing packets */ #define FR_LOGBODY 0x10000 /* Log the body */ #define FR_LOGFIRST 0x20000 /* Log the first byte if state held */ #define FR_LOGORBLOCK 0x40000 /* block the packet if it can't be logged */ #define FR_STLOOSE 0x80000 /* loose state checking */ #define FR_FRSTRICT 0x100000 /* strict frag. cache */ #define FR_STSTRICT 0x200000 /* strict keep state */ #define FR_NEWISN 0x400000 /* new ISN for outgoing TCP */ #define FR_NOICMPERR 0x800000 /* do not match ICMP errors in state */ #define FR_STATESYNC 0x1000000 /* synchronize state to slave */ #define FR_COPIED 0x2000000 /* copied from user space */ #define FR_INACTIVE 0x4000000 /* only used when flush'ing rules */ #define FR_NOMATCH 0x8000000 /* no match occured */ /* 0x10000000 FF_LOGPASS */ /* 0x20000000 FF_LOGBLOCK */ /* 0x40000000 FF_LOGNOMATCH */ /* 0x80000000 FF_BLOCKNONIP */ #define FR_RETMASK (FR_RETICMP|FR_RETRST|FR_FAKEICMP) #define FR_ISBLOCK(x) (((x) & FR_CMDMASK) == FR_BLOCK) #define FR_ISPASS(x) (((x) & FR_CMDMASK) == FR_PASS) #define FR_ISAUTH(x) (((x) & FR_CMDMASK) == FR_AUTH) #define FR_ISPREAUTH(x) (((x) & FR_CMDMASK) == FR_PREAUTH) #define FR_ISACCOUNT(x) (((x) & FR_CMDMASK) == FR_ACCOUNT) #define FR_ISSKIP(x) (((x) & FR_CMDMASK) == FR_SKIP) #define FR_ISDECAPS(x) (((x) & FR_CMDMASK) == FR_DECAPSULATE) #define FR_ISNOMATCH(x) ((x) & FR_NOMATCH) #define FR_INOUT (FR_INQUE|FR_OUTQUE) /* * recognized flags for SIOCGETFF and SIOCSETFF, and get put in fr_flags */ #define FF_LOGPASS 0x10000000 #define FF_LOGBLOCK 0x20000000 #define FF_LOGNOMATCH 0x40000000 #define FF_LOGGING (FF_LOGPASS|FF_LOGBLOCK|FF_LOGNOMATCH) #define FF_BLOCKNONIP 0x80000000 /* Solaris2 Only */ /* * Structure that passes information on what/how to flush to the kernel. */ typedef struct ipfflush { int ipflu_how; int ipflu_arg; } ipfflush_t; /* * */ typedef struct ipfgetctl { u_int ipfg_min; /* min value */ u_int ipfg_current; /* current value */ u_int ipfg_max; /* max value */ u_int ipfg_default; /* default value */ u_int ipfg_steps; /* value increments */ char ipfg_name[40]; /* tag name for this control */ } ipfgetctl_t; typedef struct ipfsetctl { int ipfs_which; /* 0 = min 1 = current 2 = max 3 = default */ u_int ipfs_value; /* min value */ char ipfs_name[40]; /* tag name for this control */ } ipfsetctl_t; /* * Some of the statistics below are in their own counters, but most are kept * in this single structure so that they can all easily be collected and * copied back as required. */ typedef struct ipf_statistics { u_long fr_icmp_coalesce; u_long fr_tcp_frag; u_long fr_tcp_pullup; u_long fr_tcp_short; u_long fr_tcp_small; u_long fr_tcp_bad_flags; u_long fr_udp_pullup; u_long fr_ip_freed; u_long fr_v6_ah_bad; u_long fr_v6_bad; u_long fr_v6_badfrag; u_long fr_v6_dst_bad; u_long fr_v6_esp_pullup; u_long fr_v6_ext_short; u_long fr_v6_ext_pullup; u_long fr_v6_ext_hlen; u_long fr_v6_frag_bad; u_long fr_v6_frag_pullup; u_long fr_v6_frag_size; u_long fr_v6_gre_pullup; u_long fr_v6_icmp6_pullup; u_long fr_v6_rh_bad; u_long fr_v6_badttl; /* TTL in packet doesn't reach minimum */ u_long fr_v4_ah_bad; u_long fr_v4_ah_pullup; u_long fr_v4_esp_pullup; u_long fr_v4_cipso_bad; u_long fr_v4_cipso_tlen; u_long fr_v4_gre_frag; u_long fr_v4_gre_pullup; u_long fr_v4_icmp_frag; u_long fr_v4_icmp_pullup; u_long fr_v4_badttl; /* TTL in packet doesn't reach minimum */ u_long fr_v4_badsrc; /* source received doesn't match route */ u_long fr_l4_badcksum; /* layer 4 header checksum failure */ u_long fr_badcoalesces; u_long fr_pass; /* packets allowed */ u_long fr_block; /* packets denied */ u_long fr_nom; /* packets which don't match any rule */ u_long fr_short; /* packets which are short */ u_long fr_ppkl; /* packets allowed and logged */ u_long fr_bpkl; /* packets denied and logged */ u_long fr_npkl; /* packets unmatched and logged */ u_long fr_ret; /* packets for which a return is sent */ u_long fr_acct; /* packets for which counting was performed */ u_long fr_bnfr; /* bad attempts to allocate fragment state */ u_long fr_nfr; /* new fragment state kept */ u_long fr_cfr; /* add new fragment state but complete pkt */ u_long fr_bads; /* bad attempts to allocate packet state */ u_long fr_ads; /* new packet state kept */ u_long fr_chit; /* cached hit */ u_long fr_cmiss; /* cached miss */ u_long fr_tcpbad; /* TCP checksum check failures */ u_long fr_pull[2]; /* good and bad pullup attempts */ u_long fr_bad; /* bad IP packets to the filter */ u_long fr_ipv6; /* IPv6 packets in/out */ u_long fr_ppshit; /* dropped because of pps ceiling */ u_long fr_ipud; /* IP id update failures */ u_long fr_blocked[FRB_MAX_VALUE + 1]; } ipf_statistics_t; /* * Log structure. Each packet header logged is prepended by one of these. * Following this in the log records read from the device will be an ipflog * structure which is then followed by any packet data. */ typedef struct iplog { u_32_t ipl_magic; u_int ipl_count; u_32_t ipl_seqnum; struct timeval ipl_time; size_t ipl_dsize; struct iplog *ipl_next; } iplog_t; #define ipl_sec ipl_time.tv_sec #define ipl_usec ipl_time.tv_usec #define IPL_MAGIC 0x49504c4d /* 'IPLM' */ #define IPL_MAGIC_NAT 0x49504c4e /* 'IPLN' */ #define IPL_MAGIC_STATE 0x49504c53 /* 'IPLS' */ #define IPLOG_SIZE sizeof(iplog_t) typedef struct ipflog { u_int fl_unit; u_32_t fl_rule; u_32_t fl_flags; u_32_t fl_lflags; u_32_t fl_logtag; ipftag_t fl_nattag; u_short fl_plen; /* extra data after hlen */ u_short fl_loglevel; /* syslog log level */ char fl_group[FR_GROUPLEN]; u_char fl_hlen; /* length of IP headers saved */ u_char fl_dir; u_char fl_breason; /* from fin_reason */ u_char fl_family; /* address family of packet logged */ char fl_ifname[LIFNAMSIZ]; } ipflog_t; #ifndef IPF_LOGGING # define IPF_LOGGING 0 #endif #ifndef IPF_DEFAULT_PASS # define IPF_DEFAULT_PASS FR_PASS #endif #define DEFAULT_IPFLOGSIZE 32768 #ifndef IPFILTER_LOGSIZE # define IPFILTER_LOGSIZE DEFAULT_IPFLOGSIZE #else # if IPFILTER_LOGSIZE < 8192 # error IPFILTER_LOGSIZE too small. Must be >= 8192 # endif #endif #define IPF_OPTCOPY 0x07ff00 /* bit mask of copied options */ /* * Device filenames for reading log information. Use ipf on Solaris2 because * ipl is already a name used by something else. */ #ifndef IPL_NAME # if SOLARIS # define IPL_NAME "/dev/ipf" # else # define IPL_NAME "/dev/ipl" # endif #endif /* * Pathnames for various IP Filter control devices. Used by LKM * and userland, so defined here. */ #define IPNAT_NAME "/dev/ipnat" #define IPSTATE_NAME "/dev/ipstate" #define IPAUTH_NAME "/dev/ipauth" #define IPSYNC_NAME "/dev/ipsync" #define IPSCAN_NAME "/dev/ipscan" #define IPLOOKUP_NAME "/dev/iplookup" #define IPL_LOGIPF 0 /* Minor device #'s for accessing logs */ #define IPL_LOGNAT 1 #define IPL_LOGSTATE 2 #define IPL_LOGAUTH 3 #define IPL_LOGSYNC 4 #define IPL_LOGSCAN 5 #define IPL_LOGLOOKUP 6 #define IPL_LOGCOUNT 7 #define IPL_LOGMAX 7 #define IPL_LOGSIZE IPL_LOGMAX + 1 #define IPL_LOGALL -1 #define IPL_LOGNONE -2 /* * For SIOCGETFS */ typedef struct friostat { ipf_statistics_t f_st[2]; frentry_t *f_ipf[2][2]; frentry_t *f_acct[2][2]; frentry_t *f_auth; struct frgroup *f_groups[IPL_LOGSIZE][2]; u_long f_froute[2]; u_long f_log_ok; u_long f_log_fail; u_long f_rb_no_mem; u_long f_rb_node_max; u_32_t f_ticks; int f_locks[IPL_LOGSIZE]; int f_defpass; /* default pass - from fr_pass */ int f_active; /* 1 or 0 - active rule set */ int f_running; /* 1 if running, else 0 */ int f_logging; /* 1 if enabled, else 0 */ int f_features; char f_version[32]; /* version string */ } friostat_t; #define f_fin f_ipf[0] #define f_fout f_ipf[1] #define f_acctin f_acct[0] #define f_acctout f_acct[1] #define IPF_FEAT_LKM 0x001 #define IPF_FEAT_LOG 0x002 #define IPF_FEAT_LOOKUP 0x004 #define IPF_FEAT_BPF 0x008 #define IPF_FEAT_COMPILED 0x010 #define IPF_FEAT_CKSUM 0x020 #define IPF_FEAT_SYNC 0x040 #define IPF_FEAT_SCAN 0x080 #define IPF_FEAT_IPV6 0x100 typedef struct optlist { u_short ol_val; int ol_bit; } optlist_t; /* * Group list structure. */ typedef struct frgroup { struct frgroup *fg_next; struct frentry *fg_head; struct frentry *fg_start; struct frgroup **fg_set; u_32_t fg_flags; int fg_ref; char fg_name[FR_GROUPLEN]; } frgroup_t; #define FG_NAME(g) (*(g)->fg_name == '\0' ? "" : (g)->fg_name) /* * Used by state and NAT tables */ typedef struct icmpinfo { u_short ici_id; u_short ici_seq; u_char ici_type; } icmpinfo_t; typedef struct udpinfo { u_short us_sport; u_short us_dport; } udpinfo_t; typedef struct tcpdata { u_32_t td_end; u_32_t td_maxend; u_32_t td_maxwin; u_32_t td_winscale; u_32_t td_maxseg; int td_winflags; } tcpdata_t; #define TCP_WSCALE_MAX 14 #define TCP_WSCALE_SEEN 0x00000001 #define TCP_WSCALE_FIRST 0x00000002 #define TCP_SACK_PERMIT 0x00000004 typedef struct tcpinfo { u_32_t ts_sport; u_32_t ts_dport; tcpdata_t ts_data[2]; } tcpinfo_t; /* * Structures to define a GRE header as seen in a packet. */ struct grebits { #if defined(sparc) u_32_t grb_ver:3; u_32_t grb_flags:3; u_32_t grb_A:1; u_32_t grb_recur:1; u_32_t grb_s:1; u_32_t grb_S:1; u_32_t grb_K:1; u_32_t grb_R:1; u_32_t grb_C:1; #else u_32_t grb_C:1; u_32_t grb_R:1; u_32_t grb_K:1; u_32_t grb_S:1; u_32_t grb_s:1; u_32_t grb_recur:1; u_32_t grb_A:1; u_32_t grb_flags:3; u_32_t grb_ver:3; #endif u_short grb_ptype; }; typedef struct grehdr { union { struct grebits gru_bits; u_short gru_flags; } gr_un; u_short gr_len; u_short gr_call; } grehdr_t; #define gr_flags gr_un.gru_flags #define gr_bits gr_un.gru_bits #define gr_ptype gr_bits.grb_ptype #define gr_C gr_bits.grb_C #define gr_R gr_bits.grb_R #define gr_K gr_bits.grb_K #define gr_S gr_bits.grb_S #define gr_s gr_bits.grb_s #define gr_recur gr_bits.grb_recur #define gr_A gr_bits.grb_A #define gr_ver gr_bits.grb_ver /* * GRE information tracked by "keep state" */ typedef struct greinfo { u_short gs_call[2]; u_short gs_flags; u_short gs_ptype; } greinfo_t; #define GRE_REV(x) ((ntohs(x) >> 13) & 7) /* * Format of an Authentication header */ typedef struct authhdr { u_char ah_next; u_char ah_plen; u_short ah_reserved; u_32_t ah_spi; u_32_t ah_seq; /* Following the sequence number field is 0 or more bytes of */ /* authentication data, as specified by ah_plen - RFC 2402. */ } authhdr_t; /* * Timeout tail queue list member */ typedef struct ipftqent { struct ipftqent **tqe_pnext; struct ipftqent *tqe_next; struct ipftq *tqe_ifq; void *tqe_parent; /* pointer back to NAT/state struct */ u_32_t tqe_die; /* when this entriy is to die */ u_32_t tqe_touched; int tqe_flags; int tqe_state[2]; /* current state of this entry */ } ipftqent_t; #define TQE_RULEBASED 0x00000001 #define TQE_DELETE 0x00000002 /* * Timeout tail queue head for IPFilter */ typedef struct ipftq { ipfmutex_t ifq_lock; u_int ifq_ttl; ipftqent_t *ifq_head; ipftqent_t **ifq_tail; struct ipftq *ifq_next; struct ipftq **ifq_pnext; int ifq_ref; u_int ifq_flags; } ipftq_t; #define IFQF_USER 0x01 /* User defined aging */ #define IFQF_DELETE 0x02 /* Marked for deletion */ #define IFQF_PROXY 0x04 /* Timeout queue in use by a proxy */ #define IPFTQ_INIT(x,y,z) do { \ (x)->ifq_ttl = (y); \ (x)->ifq_head = NULL; \ (x)->ifq_ref = 1; \ (x)->ifq_tail = &(x)->ifq_head; \ MUTEX_INIT(&(x)->ifq_lock, (z)); \ } while (0) #define IPF_HZ_MULT 1 #define IPF_HZ_DIVIDE 2 /* How many times a second ipfilter */ /* checks its timeout queues. */ #define IPF_TTLVAL(x) (((x) / IPF_HZ_MULT) * IPF_HZ_DIVIDE) typedef int (*ipftq_delete_fn_t)(struct ipf_main_softc_s *, void *); /* * Object structure description. For passing through in ioctls. */ typedef struct ipfobj { u_32_t ipfo_rev; /* IPFilter version number */ u_32_t ipfo_size; /* size of object at ipfo_ptr */ void *ipfo_ptr; /* pointer to object */ int ipfo_type; /* type of object being pointed to */ int ipfo_offset; /* bytes from ipfo_ptr where to start */ int ipfo_retval; /* return value */ u_char ipfo_xxxpad[28]; /* reserved for future use */ } ipfobj_t; #define IPFOBJ_FRENTRY 0 /* struct frentry */ #define IPFOBJ_IPFSTAT 1 /* struct friostat */ #define IPFOBJ_IPFINFO 2 /* struct fr_info */ #define IPFOBJ_AUTHSTAT 3 /* struct fr_authstat */ #define IPFOBJ_FRAGSTAT 4 /* struct ipfrstat */ #define IPFOBJ_IPNAT 5 /* struct ipnat */ #define IPFOBJ_NATSTAT 6 /* struct natstat */ #define IPFOBJ_STATESAVE 7 /* struct ipstate_save */ #define IPFOBJ_NATSAVE 8 /* struct nat_save */ #define IPFOBJ_NATLOOKUP 9 /* struct natlookup */ #define IPFOBJ_IPSTATE 10 /* struct ipstate */ #define IPFOBJ_STATESTAT 11 /* struct ips_stat */ #define IPFOBJ_FRAUTH 12 /* struct frauth */ #define IPFOBJ_TUNEABLE 13 /* struct ipftune */ #define IPFOBJ_NAT 14 /* struct nat */ #define IPFOBJ_IPFITER 15 /* struct ipfruleiter */ #define IPFOBJ_GENITER 16 /* struct ipfgeniter */ #define IPFOBJ_GTABLE 17 /* struct ipftable */ #define IPFOBJ_LOOKUPITER 18 /* struct ipflookupiter */ #define IPFOBJ_STATETQTAB 19 /* struct ipftq * NSTATES */ #define IPFOBJ_IPFEXPR 20 #define IPFOBJ_PROXYCTL 21 /* strct ap_ctl */ #define IPFOBJ_FRIPF 22 /* structfripf */ #define IPFOBJ_COUNT 23 /* How many #defines are above this? */ typedef union ipftunevalptr { void *ipftp_void; u_long *ipftp_long; u_int *ipftp_int; u_short *ipftp_short; u_char *ipftp_char; u_long ipftp_offset; } ipftunevalptr_t; typedef union ipftuneval { u_long ipftu_long; u_int ipftu_int; u_short ipftu_short; u_char ipftu_char; } ipftuneval_t; struct ipftuneable; typedef int (* ipftunefunc_t) __P((struct ipf_main_softc_s *, struct ipftuneable *, ipftuneval_t *)); typedef struct ipftuneable { ipftunevalptr_t ipft_una; const char *ipft_name; u_long ipft_min; u_long ipft_max; int ipft_sz; int ipft_flags; struct ipftuneable *ipft_next; ipftunefunc_t ipft_func; } ipftuneable_t; #define ipft_addr ipft_una.ipftp_void #define ipft_plong ipft_una.ipftp_long #define ipft_pint ipft_una.ipftp_int #define ipft_pshort ipft_una.ipftp_short #define ipft_pchar ipft_una.ipftp_char #define IPFT_RDONLY 1 /* read-only */ #define IPFT_WRDISABLED 2 /* write when disabled only */ typedef struct ipftune { void *ipft_cookie; ipftuneval_t ipft_un; u_long ipft_min; u_long ipft_max; int ipft_sz; int ipft_flags; char ipft_name[80]; } ipftune_t; #define ipft_vlong ipft_un.ipftu_long #define ipft_vint ipft_un.ipftu_int #define ipft_vshort ipft_un.ipftu_short #define ipft_vchar ipft_un.ipftu_char /* * Hash table header */ #define IPFHASH(x,y) typedef struct { \ ipfrwlock_t ipfh_lock; \ struct x *ipfh_head; \ } y /* ** HPUX Port */ -#if !defined(CDEV_MAJOR) && defined (__FreeBSD_version) && \ - (__FreeBSD_version >= 220000) +#if !defined(CDEV_MAJOR) && defined (__FreeBSD_version) # define CDEV_MAJOR 79 #endif #ifdef _KERNEL # define FR_VERBOSE(verb_pr) # define FR_DEBUG(verb_pr) #else extern void ipfkdebug __P((char *, ...)); extern void ipfkverbose __P((char *, ...)); # define FR_VERBOSE(verb_pr) ipfkverbose verb_pr # define FR_DEBUG(verb_pr) ipfkdebug verb_pr #endif /* * */ typedef struct ipfruleiter { int iri_inout; char iri_group[FR_GROUPLEN]; int iri_active; int iri_nrules; int iri_v; /* No longer used (compatibility) */ frentry_t *iri_rule; } ipfruleiter_t; /* * Values for iri_inout */ #define F_IN 0 #define F_OUT 1 #define F_ACIN 2 #define F_ACOUT 3 typedef struct ipfgeniter { int igi_type; int igi_nitems; void *igi_data; } ipfgeniter_t; #define IPFGENITER_IPF 0 #define IPFGENITER_NAT 1 #define IPFGENITER_IPNAT 2 #define IPFGENITER_FRAG 3 #define IPFGENITER_AUTH 4 #define IPFGENITER_STATE 5 #define IPFGENITER_NATFRAG 6 #define IPFGENITER_HOSTMAP 7 #define IPFGENITER_LOOKUP 8 typedef struct ipftable { int ita_type; void *ita_table; } ipftable_t; #define IPFTABLE_BUCKETS 1 #define IPFTABLE_BUCKETS_NATIN 2 #define IPFTABLE_BUCKETS_NATOUT 3 typedef struct ipf_v4_masktab_s { u_32_t imt4_active[33]; int imt4_masks[33]; int imt4_max; } ipf_v4_masktab_t; typedef struct ipf_v6_masktab_s { i6addr_t imt6_active[129]; int imt6_masks[129]; int imt6_max; } ipf_v6_masktab_t; /* * */ typedef struct ipftoken { struct ipftoken *ipt_next; struct ipftoken **ipt_pnext; void *ipt_ctx; void *ipt_data; u_long ipt_die; int ipt_type; int ipt_uid; int ipt_subtype; int ipt_ref; int ipt_complete; } ipftoken_t; /* * */ typedef struct ipfexp { int ipfe_cmd; int ipfe_not; int ipfe_narg; int ipfe_size; int ipfe_arg0[1]; } ipfexp_t; /* * Currently support commands (ipfe_cmd) * 32bits is split up follows: * aabbcccc * aa = 0 = packet matching, 1 = meta data matching * bb = IP protocol number * cccc = command */ #define IPF_EXP_IP_PR 0x00000001 #define IPF_EXP_IP_ADDR 0x00000002 #define IPF_EXP_IP_SRCADDR 0x00000003 #define IPF_EXP_IP_DSTADDR 0x00000004 #define IPF_EXP_IP6_ADDR 0x00000005 #define IPF_EXP_IP6_SRCADDR 0x00000006 #define IPF_EXP_IP6_DSTADDR 0x00000007 #define IPF_EXP_TCP_FLAGS 0x00060001 #define IPF_EXP_TCP_PORT 0x00060002 #define IPF_EXP_TCP_SPORT 0x00060003 #define IPF_EXP_TCP_DPORT 0x00060004 #define IPF_EXP_UDP_PORT 0x00110002 #define IPF_EXP_UDP_SPORT 0x00110003 #define IPF_EXP_UDP_DPORT 0x00110004 #define IPF_EXP_IDLE_GT 0x01000001 #define IPF_EXP_TCP_STATE 0x01060002 #define IPF_EXP_END 0xffffffff #define ONE_DAY IPF_TTLVAL(1 * 86400) /* 1 day */ #define FIVE_DAYS (5 * ONE_DAY) typedef struct ipf_main_softc_s { struct ipf_main_softc_s *ipf_next; ipfmutex_t ipf_rw; ipfmutex_t ipf_timeoutlock; ipfrwlock_t ipf_mutex; ipfrwlock_t ipf_frag; ipfrwlock_t ipf_global; ipfrwlock_t ipf_tokens; ipfrwlock_t ipf_state; ipfrwlock_t ipf_nat; ipfrwlock_t ipf_natfrag; ipfrwlock_t ipf_poolrw; int ipf_dynamic_softc; int ipf_refcnt; int ipf_running; int ipf_flags; int ipf_active; int ipf_control_forwarding; int ipf_update_ipid; int ipf_chksrc; /* causes a system crash if enabled */ int ipf_pass; int ipf_minttl; int ipf_icmpminfragmtu; int ipf_interror; /* Should be in a struct that is per */ /* thread or process. Does not belong */ /* here but there's a lot more work */ /* in doing that properly. For now, */ /* it is squatting. */ u_int ipf_tcpidletimeout; u_int ipf_tcpclosewait; u_int ipf_tcplastack; u_int ipf_tcptimewait; u_int ipf_tcptimeout; u_int ipf_tcpsynsent; u_int ipf_tcpsynrecv; u_int ipf_tcpclosed; u_int ipf_tcphalfclosed; u_int ipf_udptimeout; u_int ipf_udpacktimeout; u_int ipf_icmptimeout; u_int ipf_icmpacktimeout; u_int ipf_iptimeout; u_long ipf_ticks; u_long ipf_userifqs; u_long ipf_rb_no_mem; u_long ipf_rb_node_max; u_long ipf_frouteok[2]; ipftuneable_t *ipf_tuners; void *ipf_frag_soft; void *ipf_nat_soft; void *ipf_state_soft; void *ipf_auth_soft; void *ipf_proxy_soft; void *ipf_sync_soft; void *ipf_lookup_soft; void *ipf_log_soft; struct frgroup *ipf_groups[IPL_LOGSIZE][2]; frentry_t *ipf_rules[2][2]; frentry_t *ipf_acct[2][2]; frentry_t *ipf_rule_explist[2]; ipftoken_t *ipf_token_head; ipftoken_t **ipf_token_tail; #if defined(__FreeBSD_version) && defined(_KERNEL) struct callout ipf_slow_ch; #endif #if NETBSD_GE_REV(104040000) struct callout ipf_slow_ch; #endif #if SOLARIS timeout_id_t ipf_slow_ch; #endif #if defined(_KERNEL) # if SOLARIS struct pollhead ipf_poll_head[IPL_LOGSIZE]; void *ipf_dip; # if defined(INSTANCES) int ipf_get_loopback; u_long ipf_idnum; net_handle_t ipf_nd_v4; net_handle_t ipf_nd_v6; hook_t *ipf_hk_v4_in; hook_t *ipf_hk_v4_out; hook_t *ipf_hk_v4_nic; hook_t *ipf_hk_v6_in; hook_t *ipf_hk_v6_out; hook_t *ipf_hk_v6_nic; hook_t *ipf_hk_loop_v4_in; hook_t *ipf_hk_loop_v4_out; hook_t *ipf_hk_loop_v6_in; hook_t *ipf_hk_loop_v6_out; # endif # else struct selinfo ipf_selwait[IPL_LOGSIZE]; # endif #endif void *ipf_slow; ipf_statistics_t ipf_stats[2]; u_char ipf_iss_secret[32]; u_short ipf_ip_id; } ipf_main_softc_t; #define IPFERROR(_e) do { softc->ipf_interror = (_e); \ DT1(user_error, int, _e); \ } while (0) #ifndef _KERNEL extern int ipf_check __P((void *, struct ip *, int, struct ifnet *, int, mb_t **)); extern struct ifnet *get_unit __P((char *, int)); extern char *get_ifname __P((struct ifnet *)); extern int ipfioctl __P((ipf_main_softc_t *, int, ioctlcmd_t, caddr_t, int)); extern void m_freem __P((mb_t *)); extern size_t msgdsize __P((mb_t *)); extern int bcopywrap __P((void *, void *, size_t)); extern void ip_fillid(struct ip *); #else /* #ifndef _KERNEL */ # if defined(__NetBSD__) && defined(PFIL_HOOKS) extern void ipfilterattach __P((int)); # endif extern int ipl_enable __P((void)); extern int ipl_disable __P((void)); # ifdef MENTAT /* XXX MENTAT is always defined for Solaris */ extern int ipf_check __P((void *, struct ip *, int, struct ifnet *, int, void *, mblk_t **)); # if SOLARIS extern void ipf_prependmbt(fr_info_t *, mblk_t *); extern int ipfioctl __P((dev_t, int, intptr_t, int, cred_t *, int *)); # endif extern int ipf_qout __P((queue_t *, mblk_t *)); # else /* MENTAT */ /* XXX MENTAT is never defined for FreeBSD & NetBSD */ extern int ipf_check __P((void *, struct ip *, int, struct ifnet *, int, mb_t **)); extern int (*fr_checkp) __P((ip_t *, int, void *, int, mb_t **)); extern size_t mbufchainlen __P((mb_t *)); # ifdef IPFILTER_LKM extern int ipf_identify __P((char *)); # endif # if defined(__FreeBSD_version) extern int ipfioctl __P((struct cdev*, u_long, caddr_t, int, struct thread *)); # elif defined(__NetBSD__) extern int ipfioctl __P((dev_t, u_long, void *, int, struct lwp *)); # endif # endif /* MENTAT */ # if defined(__FreeBSD_version) extern int ipf_pfil_hook __P((void)); extern int ipf_pfil_unhook __P((void)); extern void ipf_event_reg __P((void)); extern void ipf_event_dereg __P((void)); # endif # if defined(INSTANCES) extern ipf_main_softc_t *ipf_find_softc __P((u_long)); extern int ipf_set_loopback __P((ipf_main_softc_t *, ipftuneable_t *, ipftuneval_t *)); # endif #endif /* #ifndef _KERNEL */ extern char *memstr __P((const char *, char *, size_t, size_t)); extern int count4bits __P((u_32_t)); #ifdef USE_INET6 extern int count6bits __P((u_32_t *)); #endif extern int frrequest __P((ipf_main_softc_t *, int, ioctlcmd_t, caddr_t, int, int)); extern char *getifname __P((struct ifnet *)); extern int ipfattach __P((ipf_main_softc_t *)); extern int ipfdetach __P((ipf_main_softc_t *)); extern u_short ipf_cksum __P((u_short *, int)); extern int copyinptr __P((ipf_main_softc_t *, void *, void *, size_t)); extern int copyoutptr __P((ipf_main_softc_t *, void *, void *, size_t)); extern int ipf_fastroute __P((mb_t *, mb_t **, fr_info_t *, frdest_t *)); extern int ipf_inject __P((fr_info_t *, mb_t *)); extern int ipf_inobj __P((ipf_main_softc_t *, void *, ipfobj_t *, void *, int)); extern int ipf_inobjsz __P((ipf_main_softc_t *, void *, void *, int , int)); extern int ipf_ioctlswitch __P((ipf_main_softc_t *, int, void *, ioctlcmd_t, int, int, void *)); extern int ipf_ipf_ioctl __P((ipf_main_softc_t *, caddr_t, ioctlcmd_t, int, int, void *)); extern int ipf_ipftune __P((ipf_main_softc_t *, ioctlcmd_t, void *)); extern int ipf_matcharray_load __P((ipf_main_softc_t *, caddr_t, ipfobj_t *, int **)); extern int ipf_matcharray_verify __P((int *, int)); extern int ipf_outobj __P((ipf_main_softc_t *, void *, void *, int)); extern int ipf_outobjk __P((ipf_main_softc_t *, ipfobj_t *, void *)); extern int ipf_outobjsz __P((ipf_main_softc_t *, void *, void *, int, int)); extern void *ipf_pullup __P((mb_t *, fr_info_t *, int)); extern int ipf_resolvedest __P((ipf_main_softc_t *, char *, struct frdest *, int)); extern int ipf_resolvefunc __P((ipf_main_softc_t *, void *)); extern void *ipf_resolvenic __P((ipf_main_softc_t *, char *, int)); extern int ipf_send_icmp_err __P((int, fr_info_t *, int)); extern int ipf_send_reset __P((fr_info_t *)); extern void ipf_apply_timeout __P((ipftq_t *, u_int)); extern ipftq_t *ipf_addtimeoutqueue __P((ipf_main_softc_t *, ipftq_t **, u_int)); extern void ipf_deletequeueentry __P((ipftqent_t *)); extern int ipf_deletetimeoutqueue __P((ipftq_t *)); extern void ipf_freetimeoutqueue __P((ipf_main_softc_t *, ipftq_t *)); extern void ipf_movequeue __P((u_long, ipftqent_t *, ipftq_t *, ipftq_t *)); extern void ipf_queueappend __P((u_long, ipftqent_t *, ipftq_t *, void *)); extern void ipf_queueback __P((u_long, ipftqent_t *)); extern int ipf_queueflush __P((ipf_main_softc_t *, ipftq_delete_fn_t, ipftq_t *, ipftq_t *, u_int *, int, int)); extern void ipf_queuefront __P((ipftqent_t *)); extern int ipf_settimeout_tcp __P((ipftuneable_t *, ipftuneval_t *, ipftq_t *)); extern int ipf_checkv4sum __P((fr_info_t *)); extern int ipf_checkl4sum __P((fr_info_t *)); extern int ipf_ifpfillv4addr __P((int, struct sockaddr_in *, struct sockaddr_in *, struct in_addr *, struct in_addr *)); extern int ipf_coalesce __P((fr_info_t *)); #ifdef USE_INET6 extern int ipf_checkv6sum __P((fr_info_t *)); extern int ipf_ifpfillv6addr __P((int, struct sockaddr_in6 *, struct sockaddr_in6 *, i6addr_t *, i6addr_t *)); #endif extern int ipf_tune_add __P((ipf_main_softc_t *, ipftuneable_t *)); extern int ipf_tune_add_array __P((ipf_main_softc_t *, ipftuneable_t *)); extern int ipf_tune_del __P((ipf_main_softc_t *, ipftuneable_t *)); extern int ipf_tune_del_array __P((ipf_main_softc_t *, ipftuneable_t *)); extern int ipf_tune_array_link __P((ipf_main_softc_t *, ipftuneable_t *)); extern int ipf_tune_array_unlink __P((ipf_main_softc_t *, ipftuneable_t *)); extern ipftuneable_t *ipf_tune_array_copy __P((void *, size_t, ipftuneable_t *)); extern int ipf_pr_pullup __P((fr_info_t *, int)); extern int ipf_flush __P((ipf_main_softc_t *, minor_t, int)); extern frgroup_t *ipf_group_add __P((ipf_main_softc_t *, char *, void *, u_32_t, minor_t, int)); extern void ipf_group_del __P((ipf_main_softc_t *, frgroup_t *, frentry_t *)); extern int ipf_derefrule __P((ipf_main_softc_t *, frentry_t **)); extern frgroup_t *ipf_findgroup __P((ipf_main_softc_t *, char *, minor_t, int, frgroup_t ***)); extern int ipf_log_init __P((void)); extern int ipf_log_bytesused __P((ipf_main_softc_t *, int)); extern int ipf_log_canread __P((ipf_main_softc_t *, int)); extern int ipf_log_clear __P((ipf_main_softc_t *, minor_t)); extern u_long ipf_log_failures __P((ipf_main_softc_t *, int)); extern int ipf_log_read __P((ipf_main_softc_t *, minor_t, uio_t *)); extern int ipf_log_items __P((ipf_main_softc_t *, int, fr_info_t *, void **, size_t *, int *, int)); extern u_long ipf_log_logok __P((ipf_main_softc_t *, int)); extern void ipf_log_unload __P((ipf_main_softc_t *)); extern int ipf_log_pkt __P((fr_info_t *, u_int)); extern frentry_t *ipf_acctpkt __P((fr_info_t *, u_32_t *)); extern u_short fr_cksum __P((fr_info_t *, ip_t *, int, void *)); extern void ipf_deinitialise __P((ipf_main_softc_t *)); extern int ipf_deliverlocal __P((ipf_main_softc_t *, int, void *, i6addr_t *)); extern frentry_t *ipf_dstgrpmap __P((fr_info_t *, u_32_t *)); extern void ipf_fixskip __P((frentry_t **, frentry_t *, int)); extern void ipf_forgetifp __P((ipf_main_softc_t *, void *)); extern frentry_t *ipf_getrulen __P((ipf_main_softc_t *, int, char *, u_32_t)); extern int ipf_ifpaddr __P((ipf_main_softc_t *, int, int, void *, i6addr_t *, i6addr_t *)); extern void ipf_inet_mask_add __P((int, ipf_v4_masktab_t *)); extern void ipf_inet_mask_del __P((int, ipf_v4_masktab_t *)); #ifdef USE_INET6 extern void ipf_inet6_mask_add __P((int, i6addr_t *, ipf_v6_masktab_t *)); extern void ipf_inet6_mask_del __P((int, i6addr_t *, ipf_v6_masktab_t *)); #endif extern int ipf_initialise __P((void)); extern int ipf_lock __P((caddr_t, int *)); extern int ipf_makefrip __P((int, ip_t *, fr_info_t *)); extern int ipf_matchtag __P((ipftag_t *, ipftag_t *)); extern int ipf_matchicmpqueryreply __P((int, icmpinfo_t *, struct icmp *, int)); extern u_32_t ipf_newisn __P((fr_info_t *)); extern u_int ipf_pcksum __P((fr_info_t *, int, u_int)); #ifdef USE_INET6 extern u_int ipf_pcksum6 __P((struct mbuf *, ip6_t *, u_int32_t, u_int32_t)); #endif extern void ipf_rule_expire __P((ipf_main_softc_t *)); extern int ipf_scanlist __P((fr_info_t *, u_32_t)); extern frentry_t *ipf_srcgrpmap __P((fr_info_t *, u_32_t *)); extern int ipf_tcpudpchk __P((fr_ip_t *, frtuc_t *)); extern int ipf_verifysrc __P((fr_info_t *fin)); extern int ipf_zerostats __P((ipf_main_softc_t *, char *)); extern int ipf_getnextrule __P((ipf_main_softc_t *, ipftoken_t *, void *)); extern int ipf_sync __P((ipf_main_softc_t *, void *)); extern int ipf_token_deref __P((ipf_main_softc_t *, ipftoken_t *)); extern void ipf_token_expire __P((ipf_main_softc_t *)); extern ipftoken_t *ipf_token_find __P((ipf_main_softc_t *, int, int, void *)); extern int ipf_token_del __P((ipf_main_softc_t *, int, int, void *)); extern void ipf_token_mark_complete __P((ipftoken_t *)); extern int ipf_genericiter __P((ipf_main_softc_t *, void *, int, void *)); #ifdef IPFILTER_LOOKUP extern void *ipf_resolvelookup __P((int, u_int, u_int, lookupfunc_t *)); #endif extern u_32_t ipf_random __P((void)); extern int ipf_main_load __P((void)); extern void *ipf_main_soft_create __P((void *)); extern void ipf_main_soft_destroy __P((ipf_main_softc_t *)); extern int ipf_main_soft_init __P((ipf_main_softc_t *)); extern int ipf_main_soft_fini __P((ipf_main_softc_t *)); extern int ipf_main_unload __P((void)); extern int ipf_load_all __P((void)); extern int ipf_unload_all __P((void)); extern void ipf_destroy_all __P((ipf_main_softc_t *)); extern ipf_main_softc_t *ipf_create_all __P((void *)); extern int ipf_init_all __P((ipf_main_softc_t *)); extern int ipf_fini_all __P((ipf_main_softc_t *)); extern void ipf_log_soft_destroy __P((ipf_main_softc_t *, void *)); extern void *ipf_log_soft_create __P((ipf_main_softc_t *)); extern int ipf_log_soft_init __P((ipf_main_softc_t *, void *)); extern int ipf_log_soft_fini __P((ipf_main_softc_t *, void *)); extern int ipf_log_main_load __P((void)); extern int ipf_log_main_unload __P((void)); extern char ipfilter_version[]; #ifdef USE_INET6 extern int icmptoicmp6types[ICMP_MAXTYPE+1]; extern int icmptoicmp6unreach[ICMP_MAX_UNREACH]; extern int icmpreplytype6[ICMP6_MAXTYPE + 1]; #endif #ifdef IPFILTER_COMPAT extern int ipf_in_compat __P((ipf_main_softc_t *, ipfobj_t *, void *,int)); extern int ipf_out_compat __P((ipf_main_softc_t *, ipfobj_t *, void *)); #endif extern int icmpreplytype4[ICMP_MAXTYPE + 1]; extern int ipf_ht_node_add __P((ipf_main_softc_t *, host_track_t *, int, i6addr_t *)); extern int ipf_ht_node_del __P((host_track_t *, int, i6addr_t *)); extern void ipf_rb_ht_flush __P((host_track_t *)); extern void ipf_rb_ht_freenode __P((host_node_t *, void *)); extern void ipf_rb_ht_init __P((host_track_t *)); #endif /* __IP_FIL_H__ */ Index: head/sys/contrib/ipfilter/netinet/ip_proxy.c =================================================================== --- head/sys/contrib/ipfilter/netinet/ip_proxy.c (revision 358557) +++ head/sys/contrib/ipfilter/netinet/ip_proxy.c (revision 358558) @@ -1,1466 +1,1466 @@ /* $FreeBSD$ */ /* * Copyright (C) 2012 by Darren Reed. * * See the IPFILTER.LICENCE file for details on licencing. */ #if defined(KERNEL) || defined(_KERNEL) # undef KERNEL # undef _KERNEL # define KERNEL 1 # define _KERNEL 1 #endif #include #include #include #include #include # include #if !defined(_KERNEL) && !defined(__KERNEL__) # include # include # include # include # define _KERNEL # include # undef _KERNEL #endif # include #include #if defined(_KERNEL) #ifdef __FreeBSD_version # include # endif # include # if !defined(__SVR4) # include # endif #endif #if defined(_KERNEL) && defined(__FreeBSD_version) # include # include #else # include #endif #if defined(__SVR4) # include # ifdef _KERNEL # include # endif # include # include #endif -#if __FreeBSD_version >= 300000 +#ifdef __FreeBSD_version # include #endif #include #if defined(__FreeBSD_version) && defined(_KERNEL) #include #else #define CURVNET_SET(arg) #define CURVNET_RESTORE() #define VNET_DEFINE(_t, _v) _t _v #define VNET_DECLARE(_t, _v) extern _t _v #define VNET(arg) arg #endif #ifdef sun # include #endif #include #include #include # include #include #include #include #include "netinet/ip_compat.h" #include #include "netinet/ip_fil.h" #include "netinet/ip_nat.h" #include "netinet/ip_state.h" #include "netinet/ip_proxy.h" #if defined(__FreeBSD_version) # include #endif /* END OF INCLUDES */ #include "netinet/ip_ftp_pxy.c" #include "netinet/ip_tftp_pxy.c" #include "netinet/ip_rcmd_pxy.c" #include "netinet/ip_pptp_pxy.c" #if defined(_KERNEL) # include "netinet/ip_irc_pxy.c" # include "netinet/ip_raudio_pxy.c" # include "netinet/ip_netbios_pxy.c" #endif #include "netinet/ip_ipsec_pxy.c" #include "netinet/ip_rpcb_pxy.c" #if !defined(lint) static const char rcsid[] = "@(#)$Id$"; #endif #define AP_SESS_SIZE 53 static int ipf_proxy_fixseqack __P((fr_info_t *, ip_t *, ap_session_t *, int )); static aproxy_t *ipf_proxy_create_clone __P((ipf_main_softc_t *, aproxy_t *)); typedef struct ipf_proxy_softc_s { int ips_proxy_debug; int ips_proxy_session_size; ap_session_t **ips_sess_tab; ap_session_t *ips_sess_list; aproxy_t *ips_proxies; int ips_init_run; ipftuneable_t *ipf_proxy_tune; } ipf_proxy_softc_t; static ipftuneable_t ipf_proxy_tuneables[] = { { { (void *)offsetof(ipf_proxy_softc_t, ips_proxy_debug) }, "proxy_debug", 0, 0x1f, stsizeof(ipf_proxy_softc_t, ips_proxy_debug), 0, NULL, NULL }, { { NULL }, NULL, 0, 0, 0, 0, NULL, NULL} }; static aproxy_t *ap_proxylist = NULL; static aproxy_t ips_proxies[] = { #ifdef IPF_FTP_PROXY { NULL, NULL, "ftp", (char)IPPROTO_TCP, 0, 0, 0, ipf_p_ftp_main_load, ipf_p_ftp_main_unload, ipf_p_ftp_soft_create, ipf_p_ftp_soft_destroy, NULL, NULL, ipf_p_ftp_new, ipf_p_ftp_del, ipf_p_ftp_in, ipf_p_ftp_out, NULL, NULL, NULL, NULL, NULL }, #endif #ifdef IPF_TFTP_PROXY { NULL, NULL, "tftp", (char)IPPROTO_UDP, 0, 0, 0, ipf_p_tftp_main_load, ipf_p_tftp_main_unload, ipf_p_tftp_soft_create, ipf_p_tftp_soft_destroy, NULL, NULL, ipf_p_tftp_new, ipf_p_tftp_del, ipf_p_tftp_in, ipf_p_tftp_out, NULL, NULL, NULL, NULL, NULL }, #endif #ifdef IPF_IRC_PROXY { NULL, NULL, "irc", (char)IPPROTO_TCP, 0, 0, 0, ipf_p_irc_main_load, ipf_p_irc_main_unload, NULL, NULL, NULL, NULL, ipf_p_irc_new, NULL, NULL, ipf_p_irc_out, NULL, NULL, NULL, NULL, NULL }, #endif #ifdef IPF_RCMD_PROXY { NULL, NULL, "rcmd", (char)IPPROTO_TCP, 0, 0, 0, ipf_p_rcmd_main_load, ipf_p_rcmd_main_unload, NULL, NULL, NULL, NULL, ipf_p_rcmd_new, ipf_p_rcmd_del, ipf_p_rcmd_in, ipf_p_rcmd_out, NULL, NULL, NULL, NULL, NULL }, #endif #ifdef IPF_RAUDIO_PROXY { NULL, NULL, "raudio", (char)IPPROTO_TCP, 0, 0, 0, ipf_p_raudio_main_load, ipf_p_raudio_main_unload, NULL, NULL, NULL, NULL, ipf_p_raudio_new, NULL, ipf_p_raudio_in, ipf_p_raudio_out, NULL, NULL, NULL, NULL, NULL }, #endif #ifdef IPF_MSNRPC_PROXY { NULL, NULL, "msnrpc", (char)IPPROTO_TCP, 0, 0, 0, ipf_p_msnrpc_init, ipf_p_msnrpc_fini, NULL, NULL, NULL, NULL, ipf_p_msnrpc_new, NULL, ipf_p_msnrpc_in, ipf_p_msnrpc_out, NULL, NULL, NULL, NULL, NULL }, #endif #ifdef IPF_NETBIOS_PROXY { NULL, NULL, "netbios", (char)IPPROTO_UDP, 0, 0, 0, ipf_p_netbios_main_load, ipf_p_netbios_main_unload, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ipf_p_netbios_out, NULL, NULL, NULL, NULL, NULL }, #endif #ifdef IPF_IPSEC_PROXY { NULL, NULL, "ipsec", (char)IPPROTO_UDP, 0, 0, 0, NULL, NULL, ipf_p_ipsec_soft_create, ipf_p_ipsec_soft_destroy, ipf_p_ipsec_soft_init, ipf_p_ipsec_soft_fini, ipf_p_ipsec_new, ipf_p_ipsec_del, ipf_p_ipsec_inout, ipf_p_ipsec_inout, ipf_p_ipsec_match, NULL, NULL, NULL, NULL }, #endif #ifdef IPF_DNS_PROXY { NULL, NULL, "dns", (char)IPPROTO_UDP, 0, 0, 0, NULL, NULL, ipf_p_dns_soft_create, ipf_p_dns_soft_destroy, NULL, NULL, ipf_p_dns_new, ipf_p_ipsec_del, ipf_p_dns_inout, ipf_p_dns_inout, ipf_p_dns_match, ipf_p_dns_ctl, NULL, NULL, NULL }, #endif #ifdef IPF_PPTP_PROXY { NULL, NULL, "pptp", (char)IPPROTO_TCP, 0, 0, 0, ipf_p_pptp_main_load, ipf_p_pptp_main_unload, NULL, NULL, NULL, NULL, ipf_p_pptp_new, ipf_p_pptp_del, ipf_p_pptp_inout, ipf_p_pptp_inout, NULL, NULL, NULL, NULL, NULL }, #endif #ifdef IPF_RPCB_PROXY # ifndef _KERNEL { NULL, NULL, "rpcbt", (char)IPPROTO_TCP, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, ipf_p_rpcb_new, ipf_p_rpcb_del, ipf_p_rpcb_in, ipf_p_rpcb_out, NULL, NULL, NULL, NULL, NULL }, # endif { NULL, NULL, "rpcbu", (char)IPPROTO_UDP, 0, 0, 0, ipf_p_rpcb_main_load, ipf_p_rpcb_main_unload, NULL, NULL, NULL, NULL, ipf_p_rpcb_new, ipf_p_rpcb_del, ipf_p_rpcb_in, ipf_p_rpcb_out, NULL, NULL, NULL, NULL, NULL }, #endif { NULL, NULL, "", '\0', 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL } }; /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_main_load */ /* Returns: int - 0 == success, else failure. */ /* Parameters: Nil */ /* */ /* Initialise hook for kernel application proxies. */ /* Call the initialise routine for all the compiled in kernel proxies. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_main_load() { aproxy_t *ap; for (ap = ips_proxies; ap->apr_p; ap++) { if (ap->apr_load != NULL) (*ap->apr_load)(); } return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_main_unload */ /* Returns: int - 0 == success, else failure. */ /* Parameters: Nil */ /* */ /* Unload hook for kernel application proxies. */ /* Call the finialise routine for all the compiled in kernel proxies. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_main_unload() { aproxy_t *ap; for (ap = ips_proxies; ap->apr_p; ap++) if (ap->apr_unload != NULL) (*ap->apr_unload)(); for (ap = ap_proxylist; ap; ap = ap->apr_next) if (ap->apr_unload != NULL) (*ap->apr_unload)(); return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_soft_create */ /* Returns: void * - */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Build the structure to hold all of the run time data to support proxies. */ /* ------------------------------------------------------------------------ */ void * ipf_proxy_soft_create(softc) ipf_main_softc_t *softc; { ipf_proxy_softc_t *softp; aproxy_t *last; aproxy_t *apn; aproxy_t *ap; KMALLOC(softp, ipf_proxy_softc_t *); if (softp == NULL) return softp; bzero((char *)softp, sizeof(*softp)); #if defined(_KERNEL) softp->ips_proxy_debug = 0; #else softp->ips_proxy_debug = 2; #endif softp->ips_proxy_session_size = AP_SESS_SIZE; softp->ipf_proxy_tune = ipf_tune_array_copy(softp, sizeof(ipf_proxy_tuneables), ipf_proxy_tuneables); if (softp->ipf_proxy_tune == NULL) { ipf_proxy_soft_destroy(softc, softp); return NULL; } if (ipf_tune_array_link(softc, softp->ipf_proxy_tune) == -1) { ipf_proxy_soft_destroy(softc, softp); return NULL; } last = NULL; for (ap = ips_proxies; ap->apr_p; ap++) { apn = ipf_proxy_create_clone(softc, ap); if (apn == NULL) goto failed; if (last != NULL) last->apr_next = apn; else softp->ips_proxies = apn; last = apn; } for (ap = ips_proxies; ap != NULL; ap = ap->apr_next) { apn = ipf_proxy_create_clone(softc, ap); if (apn == NULL) goto failed; if (last != NULL) last->apr_next = apn; else softp->ips_proxies = apn; last = apn; } return softp; failed: ipf_proxy_soft_destroy(softc, softp); return NULL; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_soft_create */ /* Returns: void * - */ /* Parameters: softc(I) - pointer to soft context main structure */ /* orig(I) - pointer to proxy definition to copy */ /* */ /* This function clones a proxy definition given by orig and returns a */ /* a pointer to that copy. */ /* ------------------------------------------------------------------------ */ static aproxy_t * ipf_proxy_create_clone(softc, orig) ipf_main_softc_t *softc; aproxy_t *orig; { aproxy_t *apn; KMALLOC(apn, aproxy_t *); if (apn == NULL) return NULL; bcopy((char *)orig, (char *)apn, sizeof(*apn)); apn->apr_next = NULL; apn->apr_soft = NULL; if (apn->apr_create != NULL) { apn->apr_soft = (*apn->apr_create)(softc); if (apn->apr_soft == NULL) { KFREE(apn); return NULL; } } apn->apr_parent = orig; orig->apr_clones++; return apn; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_soft_create */ /* Returns: int - 0 == success, else failure. */ /* Parameters: softc(I) - pointer to soft context main structure */ /* arg(I) - pointer to proxy contect data */ /* */ /* Initialise the proxy context and walk through each of the proxies and */ /* call its initialisation function. This allows for proxies to do any */ /* local setup prior to actual use. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_soft_init(softc, arg) ipf_main_softc_t *softc; void *arg; { ipf_proxy_softc_t *softp; aproxy_t *ap; u_int size; int err; softp = arg; size = softp->ips_proxy_session_size * sizeof(ap_session_t *); KMALLOCS(softp->ips_sess_tab, ap_session_t **, size); if (softp->ips_sess_tab == NULL) return -1; bzero(softp->ips_sess_tab, size); for (ap = softp->ips_proxies; ap != NULL; ap = ap->apr_next) { if (ap->apr_init != NULL) { err = (*ap->apr_init)(softc, ap->apr_soft); if (err != 0) return -2; } } softp->ips_init_run = 1; return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_soft_create */ /* Returns: int - 0 == success, else failure. */ /* Parameters: softc(I) - pointer to soft context main structure */ /* arg(I) - pointer to proxy contect data */ /* */ /* This function should always succeed. It is responsible for ensuring that */ /* the proxy context can be safely called when ipf_proxy_soft_destroy is */ /* called and suring all of the proxies have similarly been instructed. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_soft_fini(softc, arg) ipf_main_softc_t *softc; void *arg; { ipf_proxy_softc_t *softp = arg; aproxy_t *ap; for (ap = softp->ips_proxies; ap != NULL; ap = ap->apr_next) { if (ap->apr_fini != NULL) { (*ap->apr_fini)(softc, ap->apr_soft); } } if (softp->ips_sess_tab != NULL) { KFREES(softp->ips_sess_tab, softp->ips_proxy_session_size * sizeof(ap_session_t *)); softp->ips_sess_tab = NULL; } softp->ips_init_run = 0; return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_soft_destroy */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* arg(I) - pointer to proxy contect data */ /* */ /* Free up all of the local data structures allocated during creation. */ /* ------------------------------------------------------------------------ */ void ipf_proxy_soft_destroy(softc, arg) ipf_main_softc_t *softc; void *arg; { ipf_proxy_softc_t *softp = arg; aproxy_t *ap; while ((ap = softp->ips_proxies) != NULL) { softp->ips_proxies = ap->apr_next; if (ap->apr_destroy != NULL) (*ap->apr_destroy)(softc, ap->apr_soft); ap->apr_parent->apr_clones--; KFREE(ap); } if (softp->ipf_proxy_tune != NULL) { ipf_tune_array_unlink(softc, softp->ipf_proxy_tune); KFREES(softp->ipf_proxy_tune, sizeof(ipf_proxy_tuneables)); softp->ipf_proxy_tune = NULL; } KFREE(softp); } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_flush */ /* Returns: Nil */ /* Parameters: arg(I) - pointer to proxy contect data */ /* how(I) - indicates the type of flush operation */ /* */ /* Walk through all of the proxies and pass on the flush command as either */ /* a flush or a clear. */ /* ------------------------------------------------------------------------ */ void ipf_proxy_flush(arg, how) void *arg; int how; { ipf_proxy_softc_t *softp = arg; aproxy_t *ap; switch (how) { case 0 : for (ap = softp->ips_proxies; ap; ap = ap->apr_next) if (ap->apr_flush != NULL) (*ap->apr_flush)(ap, how); break; case 1 : for (ap = softp->ips_proxies; ap; ap = ap->apr_next) if (ap->apr_clear != NULL) (*ap->apr_clear)(ap); break; default : break; } } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_add */ /* Returns: int - 0 == success, else failure. */ /* Parameters: ap(I) - pointer to proxy structure */ /* */ /* Dynamically add a new kernel proxy. Ensure that it is unique in the */ /* collection compiled in and dynamically added. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_add(arg, ap) void *arg; aproxy_t *ap; { ipf_proxy_softc_t *softp = arg; aproxy_t *a; for (a = ips_proxies; a->apr_p; a++) if ((a->apr_p == ap->apr_p) && !strncmp(a->apr_label, ap->apr_label, sizeof(ap->apr_label))) { if (softp->ips_proxy_debug & 0x01) printf("ipf_proxy_add: %s/%d present (B)\n", a->apr_label, a->apr_p); return -1; } for (a = ap_proxylist; (a != NULL); a = a->apr_next) if ((a->apr_p == ap->apr_p) && !strncmp(a->apr_label, ap->apr_label, sizeof(ap->apr_label))) { if (softp->ips_proxy_debug & 0x01) printf("ipf_proxy_add: %s/%d present (D)\n", a->apr_label, a->apr_p); return -1; } ap->apr_next = ap_proxylist; ap_proxylist = ap; if (ap->apr_load != NULL) (*ap->apr_load)(); return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_ctl */ /* Returns: int - 0 == success, else error */ /* Parameters: softc(I) - pointer to soft context main structure */ /* arg(I) - pointer to proxy context */ /* ctl(I) - pointer to proxy control structure */ /* */ /* Check to see if the proxy this control request has come through for */ /* exists, and if it does and it has a control function then invoke that */ /* control function. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_ctl(softc, arg, ctl) ipf_main_softc_t *softc; void *arg; ap_ctl_t *ctl; { ipf_proxy_softc_t *softp = arg; aproxy_t *a; int error; a = ipf_proxy_lookup(arg, ctl->apc_p, ctl->apc_label); if (a == NULL) { if (softp->ips_proxy_debug & 0x01) printf("ipf_proxy_ctl: can't find %s/%d\n", ctl->apc_label, ctl->apc_p); IPFERROR(80001); error = ESRCH; } else if (a->apr_ctl == NULL) { if (softp->ips_proxy_debug & 0x01) printf("ipf_proxy_ctl: no ctl function for %s/%d\n", ctl->apc_label, ctl->apc_p); IPFERROR(80002); error = ENXIO; } else { error = (*a->apr_ctl)(softc, a->apr_soft, ctl); if ((error != 0) && (softp->ips_proxy_debug & 0x02)) printf("ipf_proxy_ctl: %s/%d ctl error %d\n", a->apr_label, a->apr_p, error); } return error; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_del */ /* Returns: int - 0 == success, else failure. */ /* Parameters: ap(I) - pointer to proxy structure */ /* */ /* Delete a proxy that has been added dynamically from those available. */ /* If it is in use, return 1 (do not destroy NOW), not in use 0 or -1 */ /* if it cannot be matched. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_del(ap) aproxy_t *ap; { aproxy_t *a, **app; for (app = &ap_proxylist; ((a = *app) != NULL); app = &a->apr_next) { if (a == ap) { a->apr_flags |= APR_DELETE; if (ap->apr_ref == 0 && ap->apr_clones == 0) { *app = a->apr_next; return 0; } return 1; } } return -1; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_ok */ /* Returns: int - 1 == good match else not. */ /* Parameters: fin(I) - pointer to packet information */ /* tcp(I) - pointer to TCP/UDP header */ /* nat(I) - pointer to current NAT session */ /* */ /* This function extends the NAT matching to ensure that a packet that has */ /* arrived matches the proxy information attached to the NAT rule. Notably, */ /* if the proxy is scheduled to be deleted then packets will not match the */ /* rule even if the rule is still active. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_ok(fin, tcp, np) fr_info_t *fin; tcphdr_t *tcp; ipnat_t *np; { aproxy_t *apr = np->in_apr; u_short dport = np->in_odport; if ((apr == NULL) || (apr->apr_flags & APR_DELETE) || (fin->fin_p != apr->apr_p)) return 0; if ((tcp == NULL) && dport) return 0; return 1; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_ioctl */ /* Returns: int - 0 == success, else error */ /* Parameters: softc(I) - pointer to soft context main structure */ /* data(I) - pointer to ioctl data */ /* cmd(I) - ioctl command */ /* mode(I) - mode bits for device */ /* ctx(I) - pointer to context information */ /* */ /* ------------------------------------------------------------------------ */ int ipf_proxy_ioctl(softc, data, cmd, mode, ctx) ipf_main_softc_t *softc; caddr_t data; ioctlcmd_t cmd; int mode; void *ctx; { ap_ctl_t ctl; caddr_t ptr; int error; mode = mode; /* LINT */ switch (cmd) { case SIOCPROXY : error = ipf_inobj(softc, data, NULL, &ctl, IPFOBJ_PROXYCTL); if (error != 0) { return error; } ptr = NULL; if (ctl.apc_dsize > 0) { KMALLOCS(ptr, caddr_t, ctl.apc_dsize); if (ptr == NULL) { IPFERROR(80003); error = ENOMEM; } else { error = copyinptr(softc, ctl.apc_data, ptr, ctl.apc_dsize); if (error == 0) ctl.apc_data = ptr; } } else { ctl.apc_data = NULL; error = 0; } if (error == 0) error = ipf_proxy_ctl(softc, softc->ipf_proxy_soft, &ctl); if ((error != 0) && (ptr != NULL)) { KFREES(ptr, ctl.apc_dsize); } break; default : IPFERROR(80004); error = EINVAL; } return error; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_match */ /* Returns: int - 0 == success, else error */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to current NAT session */ /* */ /* If a proxy has a match function, call that to do extended packet */ /* matching. Whilst other parts of the NAT code are rather lenient when it */ /* comes to the quality of the packet that it will transform, the proxy */ /* matching is not because they need to work with data, not just headers. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_match(fin, nat) fr_info_t *fin; nat_t *nat; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_proxy_softc_t *softp = softc->ipf_proxy_soft; aproxy_t *apr; ipnat_t *ipn; int result; ipn = nat->nat_ptr; if (softp->ips_proxy_debug & 0x04) printf("ipf_proxy_match(%lx,%lx) aps %lx ptr %lx\n", (u_long)fin, (u_long)nat, (u_long)nat->nat_aps, (u_long)ipn); if ((fin->fin_flx & (FI_SHORT|FI_BAD)) != 0) { if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_match: flx 0x%x (BAD|SHORT)\n", fin->fin_flx); return -1; } apr = ipn->in_apr; if ((apr == NULL) || (apr->apr_flags & APR_DELETE)) { if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_match:apr %lx apr_flags 0x%x\n", (u_long)apr, apr ? apr->apr_flags : 0); return -1; } if (apr->apr_match != NULL) { result = (*apr->apr_match)(fin, nat->nat_aps, nat); if (result != 0) { if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_match: result %d\n", result); return -1; } } return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_new */ /* Returns: int - 0 == success, else error */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to current NAT session */ /* */ /* Allocate a new application proxy structure and fill it in with the */ /* relevant details. call the init function once complete, prior to */ /* returning. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_new(fin, nat) fr_info_t *fin; nat_t *nat; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_proxy_softc_t *softp = softc->ipf_proxy_soft; register ap_session_t *aps; aproxy_t *apr; if (softp->ips_proxy_debug & 0x04) printf("ipf_proxy_new(%lx,%lx) \n", (u_long)fin, (u_long)nat); if ((nat->nat_ptr == NULL) || (nat->nat_aps != NULL)) { if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_new: nat_ptr %lx nat_aps %lx\n", (u_long)nat->nat_ptr, (u_long)nat->nat_aps); return -1; } apr = nat->nat_ptr->in_apr; if ((apr->apr_flags & APR_DELETE) || (fin->fin_p != apr->apr_p)) { if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_new: apr_flags 0x%x p %d/%d\n", apr->apr_flags, fin->fin_p, apr->apr_p); return -1; } KMALLOC(aps, ap_session_t *); if (!aps) { if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_new: malloc failed (%lu)\n", (u_long)sizeof(ap_session_t)); return -1; } bzero((char *)aps, sizeof(*aps)); aps->aps_data = NULL; aps->aps_apr = apr; aps->aps_psiz = 0; if (apr->apr_new != NULL) if ((*apr->apr_new)(apr->apr_soft, fin, aps, nat) == -1) { if ((aps->aps_data != NULL) && (aps->aps_psiz != 0)) { KFREES(aps->aps_data, aps->aps_psiz); } KFREE(aps); if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_new: new(%lx) failed\n", (u_long)apr->apr_new); return -1; } aps->aps_nat = nat; aps->aps_next = softp->ips_sess_list; softp->ips_sess_list = aps; nat->nat_aps = aps; return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_check */ /* Returns: int - -1 == error, 0 == success */ /* Parameters: fin(I) - pointer to packet information */ /* nat(I) - pointer to current NAT session */ /* */ /* Check to see if a packet should be passed through an active proxy */ /* routine if one has been setup for it. We don't need to check the */ /* checksum here if IPFILTER_CKSUM is defined because if it is, a failed */ /* check causes FI_BAD to be set. */ /* ------------------------------------------------------------------------ */ int ipf_proxy_check(fin, nat) fr_info_t *fin; nat_t *nat; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_proxy_softc_t *softp = softc->ipf_proxy_soft; #if SOLARIS && defined(_KERNEL) && defined(ICK_VALID) mb_t *m; #endif tcphdr_t *tcp = NULL; udphdr_t *udp = NULL; ap_session_t *aps; aproxy_t *apr; short adjlen; int dosum; ip_t *ip; short rv; int err; #if !defined(_KERNEL) || defined(MENTAT) u_32_t s1, s2, sd; #endif if (fin->fin_flx & FI_BAD) { if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_check: flx 0x%x (BAD)\n", fin->fin_flx); return -1; } #ifndef IPFILTER_CKSUM if ((fin->fin_out == 0) && (ipf_checkl4sum(fin) == -1)) { if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_check: l4 checksum failure %d\n", fin->fin_p); if (fin->fin_p == IPPROTO_TCP) softc->ipf_stats[fin->fin_out].fr_tcpbad++; return -1; } #endif aps = nat->nat_aps; if (aps != NULL) { /* * If there is data in this packet to be proxied then try and * get it all into the one buffer, else drop it. */ #if defined(MENTAT) || defined(HAVE_M_PULLDOWN) if ((fin->fin_dlen > 0) && !(fin->fin_flx & FI_COALESCE)) if (ipf_coalesce(fin) == -1) { if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_check: %s %x\n", "coalesce failed", fin->fin_flx); return -1; } #endif ip = fin->fin_ip; if (fin->fin_cksum > FI_CK_SUMOK) dosum = 0; else dosum = 1; switch (fin->fin_p) { case IPPROTO_TCP : tcp = (tcphdr_t *)fin->fin_dp; #if SOLARIS && defined(_KERNEL) && defined(ICK_VALID) m = fin->fin_qfm; if (dohwcksum && (m->b_ick_flag == ICK_VALID)) dosum = 0; #endif break; case IPPROTO_UDP : udp = (udphdr_t *)fin->fin_dp; break; default : break; } apr = aps->aps_apr; err = 0; if (fin->fin_out != 0) { if (apr->apr_outpkt != NULL) err = (*apr->apr_outpkt)(apr->apr_soft, fin, aps, nat); } else { if (apr->apr_inpkt != NULL) err = (*apr->apr_inpkt)(apr->apr_soft, fin, aps, nat); } rv = APR_EXIT(err); if (((softp->ips_proxy_debug & 0x08) && (rv != 0)) || (softp->ips_proxy_debug & 0x04)) printf("ipf_proxy_check: out %d err %x rv %d\n", fin->fin_out, err, rv); if (rv == 1) return -1; if (rv == 2) { ipf_proxy_deref(apr); nat->nat_aps = NULL; return -1; } /* * If err != 0 then the data size of the packet has changed * so we need to recalculate the header checksums for the * packet. */ adjlen = APR_INC(err); #if !defined(_KERNEL) || defined(MENTAT) s1 = LONG_SUM(fin->fin_plen - adjlen); s2 = LONG_SUM(fin->fin_plen); CALC_SUMD(s1, s2, sd); if ((err != 0) && (fin->fin_cksum < FI_CK_L4PART) && fin->fin_v == 4) ipf_fix_outcksum(0, &ip->ip_sum, sd, 0); #endif if (fin->fin_flx & FI_DOCKSUM) dosum = 1; /* * For TCP packets, we may need to adjust the sequence and * acknowledgement numbers to reflect changes in size of the * data stream. * * For both TCP and UDP, recalculate the layer 4 checksum, * regardless, as we can't tell (here) if data has been * changed or not. */ if (tcp != NULL) { err = ipf_proxy_fixseqack(fin, ip, aps, adjlen); if (fin->fin_cksum == FI_CK_L4PART) { u_short sum = ntohs(tcp->th_sum); sum += adjlen; tcp->th_sum = htons(sum); } else if (fin->fin_cksum < FI_CK_L4PART) { tcp->th_sum = fr_cksum(fin, ip, IPPROTO_TCP, tcp); } } else if ((udp != NULL) && (udp->uh_sum != 0)) { if (fin->fin_cksum == FI_CK_L4PART) { u_short sum = ntohs(udp->uh_sum); sum += adjlen; udp->uh_sum = htons(sum); } else if (dosum) { udp->uh_sum = fr_cksum(fin, ip, IPPROTO_UDP, udp); } } aps->aps_bytes += fin->fin_plen; aps->aps_pkts++; return 1; } return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_lookup */ /* Returns: int - -1 == error, 0 == success */ /* Parameters: arg(I) - pointer to proxy context information */ /* pr(I) - protocol number for proxy */ /* name(I) - proxy name */ /* */ /* Search for an proxy by the protocol it is being used with and its name. */ /* ------------------------------------------------------------------------ */ aproxy_t * ipf_proxy_lookup(arg, pr, name) void *arg; u_int pr; char *name; { ipf_proxy_softc_t *softp = arg; aproxy_t *ap; if (softp->ips_proxy_debug & 0x04) printf("ipf_proxy_lookup(%d,%s)\n", pr, name); for (ap = softp->ips_proxies; ap != NULL; ap = ap->apr_next) if ((ap->apr_p == pr) && !strncmp(name, ap->apr_label, sizeof(ap->apr_label))) { ap->apr_ref++; return ap; } if (softp->ips_proxy_debug & 0x08) printf("ipf_proxy_lookup: failed for %d/%s\n", pr, name); return NULL; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_deref */ /* Returns: Nil */ /* Parameters: ap(I) - pointer to proxy structure */ /* */ /* Drop the reference counter associated with the proxy. */ /* ------------------------------------------------------------------------ */ void ipf_proxy_deref(ap) aproxy_t *ap; { ap->apr_ref--; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_free */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* aps(I) - pointer to current proxy session */ /* Locks Held: ipf_nat_new, ipf_nat(W) */ /* */ /* Free up proxy session information allocated to be used with a NAT */ /* session. */ /* ------------------------------------------------------------------------ */ void ipf_proxy_free(softc, aps) ipf_main_softc_t *softc; ap_session_t *aps; { ipf_proxy_softc_t *softp = softc->ipf_proxy_soft; ap_session_t *a, **ap; aproxy_t *apr; if (!aps) return; for (ap = &softp->ips_sess_list; ((a = *ap) != NULL); ap = &a->aps_next) if (a == aps) { *ap = a->aps_next; break; } apr = aps->aps_apr; if ((apr != NULL) && (apr->apr_del != NULL)) (*apr->apr_del)(softc, aps); if ((aps->aps_data != NULL) && (aps->aps_psiz != 0)) KFREES(aps->aps_data, aps->aps_psiz); KFREE(aps); } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_fixseqack */ /* Returns: int - 2 if TCP ack/seq is changed, else 0 */ /* Parameters: fin(I) - pointer to packet information */ /* ip(I) - pointer to IP header */ /* nat(I) - pointer to current NAT session */ /* inc(I) - delta to apply to TCP sequence numbering */ /* */ /* Adjust the TCP sequence/acknowledge numbers in the TCP header based on */ /* whether or not the new header is past the point at which an adjustment */ /* occurred. This might happen because of (say) an FTP string being changed */ /* and the new string being a different length to the old. */ /* ------------------------------------------------------------------------ */ static int ipf_proxy_fixseqack(fin, ip, aps, inc) fr_info_t *fin; ip_t *ip; ap_session_t *aps; int inc; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_proxy_softc_t *softp = softc->ipf_proxy_soft; int sel, ch = 0, out, nlen; u_32_t seq1, seq2; tcphdr_t *tcp; short inc2; tcp = (tcphdr_t *)fin->fin_dp; out = fin->fin_out; /* * ip_len has already been adjusted by 'inc'. */ nlen = fin->fin_dlen; nlen -= (TCP_OFF(tcp) << 2); inc2 = inc; inc = (int)inc2; if (out != 0) { seq1 = (u_32_t)ntohl(tcp->th_seq); sel = aps->aps_sel[out]; /* switch to other set ? */ if ((aps->aps_seqmin[!sel] > aps->aps_seqmin[sel]) && (seq1 > aps->aps_seqmin[!sel])) { if (softp->ips_proxy_debug & 0x10) printf("proxy out switch set seq %d -> %d %x > %x\n", sel, !sel, seq1, aps->aps_seqmin[!sel]); sel = aps->aps_sel[out] = !sel; } if (aps->aps_seqoff[sel]) { seq2 = aps->aps_seqmin[sel] - aps->aps_seqoff[sel]; if (seq1 > seq2) { seq2 = aps->aps_seqoff[sel]; seq1 += seq2; tcp->th_seq = htonl(seq1); ch = 1; } } if (inc && (seq1 > aps->aps_seqmin[!sel])) { aps->aps_seqmin[sel] = seq1 + nlen - 1; aps->aps_seqoff[sel] = aps->aps_seqoff[sel] + inc; if (softp->ips_proxy_debug & 0x10) printf("proxy seq set %d at %x to %d + %d\n", sel, aps->aps_seqmin[sel], aps->aps_seqoff[sel], inc); } /***/ seq1 = ntohl(tcp->th_ack); sel = aps->aps_sel[1 - out]; /* switch to other set ? */ if ((aps->aps_ackmin[!sel] > aps->aps_ackmin[sel]) && (seq1 > aps->aps_ackmin[!sel])) { if (softp->ips_proxy_debug & 0x10) printf("proxy out switch set ack %d -> %d %x > %x\n", sel, !sel, seq1, aps->aps_ackmin[!sel]); sel = aps->aps_sel[1 - out] = !sel; } if (aps->aps_ackoff[sel] && (seq1 > aps->aps_ackmin[sel])) { seq2 = aps->aps_ackoff[sel]; tcp->th_ack = htonl(seq1 - seq2); ch = 1; } } else { seq1 = ntohl(tcp->th_seq); sel = aps->aps_sel[out]; /* switch to other set ? */ if ((aps->aps_ackmin[!sel] > aps->aps_ackmin[sel]) && (seq1 > aps->aps_ackmin[!sel])) { if (softp->ips_proxy_debug & 0x10) printf("proxy in switch set ack %d -> %d %x > %x\n", sel, !sel, seq1, aps->aps_ackmin[!sel]); sel = aps->aps_sel[out] = !sel; } if (aps->aps_ackoff[sel]) { seq2 = aps->aps_ackmin[sel] - aps->aps_ackoff[sel]; if (seq1 > seq2) { seq2 = aps->aps_ackoff[sel]; seq1 += seq2; tcp->th_seq = htonl(seq1); ch = 1; } } if (inc && (seq1 > aps->aps_ackmin[!sel])) { aps->aps_ackmin[!sel] = seq1 + nlen - 1; aps->aps_ackoff[!sel] = aps->aps_ackoff[sel] + inc; if (softp->ips_proxy_debug & 0x10) printf("proxy ack set %d at %x to %d + %d\n", !sel, aps->aps_seqmin[!sel], aps->aps_seqoff[sel], inc); } /***/ seq1 = ntohl(tcp->th_ack); sel = aps->aps_sel[1 - out]; /* switch to other set ? */ if ((aps->aps_seqmin[!sel] > aps->aps_seqmin[sel]) && (seq1 > aps->aps_seqmin[!sel])) { if (softp->ips_proxy_debug & 0x10) printf("proxy in switch set seq %d -> %d %x > %x\n", sel, !sel, seq1, aps->aps_seqmin[!sel]); sel = aps->aps_sel[1 - out] = !sel; } if (aps->aps_seqoff[sel] != 0) { if (softp->ips_proxy_debug & 0x10) printf("sel %d seqoff %d seq1 %x seqmin %x\n", sel, aps->aps_seqoff[sel], seq1, aps->aps_seqmin[sel]); if (seq1 > aps->aps_seqmin[sel]) { seq2 = aps->aps_seqoff[sel]; tcp->th_ack = htonl(seq1 - seq2); ch = 1; } } } if (softp->ips_proxy_debug & 0x10) printf("ipf_proxy_fixseqack: seq %u ack %u\n", (u_32_t)ntohl(tcp->th_seq), (u_32_t)ntohl(tcp->th_ack)); return ch ? 2 : 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_rule_rev */ /* Returns: ipnat_t * - NULL = failure, else pointer to new rule */ /* Parameters: nat(I) - pointer to NAT session to create rule from */ /* */ /* This function creates a NAT rule that is based upon the reverse packet */ /* flow associated with this NAT session. Thus if this NAT session was */ /* created with a map rule then this function will create a rdr rule. */ /* Only address fields and network interfaces are assigned in this function */ /* and the address fields are formed such that an exact is required. If the */ /* original rule had a netmask, that is not replicated here not is it */ /* desired. The ultimate goal here is to create a NAT rule to support a NAT */ /* session being created that does not have a user configured rule. The */ /* classic example is supporting the FTP proxy, where a data channel needs */ /* to be setup, based on the addresses used for the control connection. In */ /* that case, this function is used to handle creating NAT rules to support */ /* data connections with the PORT and EPRT commands. */ /* ------------------------------------------------------------------------ */ ipnat_t * ipf_proxy_rule_rev(nat) nat_t *nat; { ipnat_t *old; ipnat_t *ipn; int size; old = nat->nat_ptr; size = old->in_size; KMALLOCS(ipn, ipnat_t *, size); if (ipn == NULL) return NULL; bzero((char *)ipn, size); ipn->in_use = 1; ipn->in_hits = 1; ipn->in_ippip = 1; ipn->in_apr = NULL; ipn->in_size = size; ipn->in_pr[0] = old->in_pr[1]; ipn->in_pr[1] = old->in_pr[0]; ipn->in_v[0] = old->in_v[1]; ipn->in_v[1] = old->in_v[0]; ipn->in_ifps[0] = old->in_ifps[1]; ipn->in_ifps[1] = old->in_ifps[0]; ipn->in_flags = (old->in_flags | IPN_PROXYRULE); ipn->in_nsrcip6 = nat->nat_odst6; ipn->in_osrcip6 = nat->nat_ndst6; if ((old->in_redir & NAT_REDIRECT) != 0) { ipn->in_redir = NAT_MAP; if (ipn->in_v[0] == 4) { ipn->in_snip = ntohl(nat->nat_odstaddr); ipn->in_dnip = ntohl(nat->nat_nsrcaddr); } else { #ifdef USE_INET6 ipn->in_snip6 = nat->nat_odst6; ipn->in_dnip6 = nat->nat_nsrc6; #endif } ipn->in_ndstip6 = nat->nat_nsrc6; ipn->in_odstip6 = nat->nat_osrc6; } else { ipn->in_redir = NAT_REDIRECT; if (ipn->in_v[0] == 4) { ipn->in_snip = ntohl(nat->nat_odstaddr); ipn->in_dnip = ntohl(nat->nat_osrcaddr); } else { #ifdef USE_INET6 ipn->in_snip6 = nat->nat_odst6; ipn->in_dnip6 = nat->nat_osrc6; #endif } ipn->in_ndstip6 = nat->nat_osrc6; ipn->in_odstip6 = nat->nat_nsrc6; } IP6_SETONES(&ipn->in_osrcmsk6); IP6_SETONES(&ipn->in_nsrcmsk6); IP6_SETONES(&ipn->in_odstmsk6); IP6_SETONES(&ipn->in_ndstmsk6); ipn->in_namelen = old->in_namelen; ipn->in_ifnames[0] = old->in_ifnames[1]; ipn->in_ifnames[1] = old->in_ifnames[0]; bcopy(old->in_names, ipn->in_names, ipn->in_namelen); MUTEX_INIT(&ipn->in_lock, "ipnat rev rule lock"); return ipn; } /* ------------------------------------------------------------------------ */ /* Function: ipf_proxy_rule_fwd */ /* Returns: ipnat_t * - NULL = failure, else pointer to new rule */ /* Parameters: nat(I) - pointer to NAT session to create rule from */ /* */ /* The purpose and rationale of this function is much the same as the above */ /* function, ipf_proxy_rule_rev, except that a rule is created that matches */ /* the same direction as that of the existing NAT session. Thus if this NAT */ /* session was created with a map rule then this function will also create */ /* a data structure to represent a map rule. Whereas ipf_proxy_rule_rev is */ /* used to support PORT/EPRT, this function supports PASV/EPSV. */ /* ------------------------------------------------------------------------ */ ipnat_t * ipf_proxy_rule_fwd(nat) nat_t *nat; { ipnat_t *old; ipnat_t *ipn; int size; old = nat->nat_ptr; size = old->in_size; KMALLOCS(ipn, ipnat_t *, size); if (ipn == NULL) return NULL; bzero((char *)ipn, size); ipn->in_use = 1; ipn->in_hits = 1; ipn->in_ippip = 1; ipn->in_apr = NULL; ipn->in_size = size; ipn->in_pr[0] = old->in_pr[0]; ipn->in_pr[1] = old->in_pr[1]; ipn->in_v[0] = old->in_v[0]; ipn->in_v[1] = old->in_v[1]; ipn->in_ifps[0] = nat->nat_ifps[0]; ipn->in_ifps[1] = nat->nat_ifps[1]; ipn->in_flags = (old->in_flags | IPN_PROXYRULE); ipn->in_nsrcip6 = nat->nat_nsrc6; ipn->in_osrcip6 = nat->nat_osrc6; ipn->in_ndstip6 = nat->nat_ndst6; ipn->in_odstip6 = nat->nat_odst6; ipn->in_redir = old->in_redir; if (ipn->in_v[0] == 4) { ipn->in_snip = ntohl(nat->nat_nsrcaddr); ipn->in_dnip = ntohl(nat->nat_ndstaddr); } else { #ifdef USE_INET6 ipn->in_snip6 = nat->nat_nsrc6; ipn->in_dnip6 = nat->nat_ndst6; #endif } IP6_SETONES(&ipn->in_osrcmsk6); IP6_SETONES(&ipn->in_nsrcmsk6); IP6_SETONES(&ipn->in_odstmsk6); IP6_SETONES(&ipn->in_ndstmsk6); ipn->in_namelen = old->in_namelen; ipn->in_ifnames[0] = old->in_ifnames[0]; ipn->in_ifnames[1] = old->in_ifnames[1]; bcopy(old->in_names, ipn->in_names, ipn->in_namelen); MUTEX_INIT(&ipn->in_lock, "ipnat fwd rule lock"); return ipn; } Index: head/sys/contrib/ipfilter/netinet/ip_state.c =================================================================== --- head/sys/contrib/ipfilter/netinet/ip_state.c (revision 358557) +++ head/sys/contrib/ipfilter/netinet/ip_state.c (revision 358558) @@ -1,5394 +1,5394 @@ /* $FreeBSD$ */ /* * Copyright (C) 2012 by Darren Reed. * * See the IPFILTER.LICENCE file for details on licencing. * * Copyright 2008 Sun Microsystems. * * $Id$ */ #if defined(KERNEL) || defined(_KERNEL) # undef KERNEL # undef _KERNEL # define KERNEL 1 # define _KERNEL 1 #endif #include #include #include #include #if defined(_KERNEL) && defined(__FreeBSD_version) && \ !defined(KLD_MODULE) #include "opt_inet6.h" #endif #if !defined(_KERNEL) && !defined(__KERNEL__) # include # include # include # define _KERNEL # include # undef _KERNEL #endif #if defined(_KERNEL) && defined(__FreeBSD_version) # include # include #else # include #endif #include # include #include #if defined(_KERNEL) # include # if !defined(__SVR4) # include # endif #endif #if defined(__SVR4) # include # include # ifdef _KERNEL # include # endif # include # include #endif #include #ifdef sun # include #endif #include #include #include #include # include #include #include #if !defined(_KERNEL) # include "ipf.h" #endif #include "netinet/ip_compat.h" #include "netinet/ip_fil.h" #include "netinet/ip_nat.h" #include "netinet/ip_frag.h" #include "netinet/ip_state.h" #include "netinet/ip_proxy.h" #include "netinet/ip_lookup.h" #include "netinet/ip_dstlist.h" #include "netinet/ip_sync.h" #ifdef USE_INET6 #include #endif -#if FREEBSD_GE_REV(300000) +#ifdef __FreeBSD_version # include # if defined(_KERNEL) && !defined(IPFILTER_LKM) # include # include # endif #endif /* END OF INCLUDES */ #if !defined(lint) static const char sccsid[] = "@(#)ip_state.c 1.8 6/5/96 (C) 1993-2000 Darren Reed"; static const char rcsid[] = "@(#)$Id$"; #endif static ipftuneable_t ipf_state_tuneables[] = { { { (void *)offsetof(ipf_state_softc_t, ipf_state_max) }, "state_max", 1, 0x7fffffff, stsizeof(ipf_state_softc_t, ipf_state_max), 0, NULL, NULL }, { { (void *)offsetof(ipf_state_softc_t, ipf_state_size) }, "state_size", 1, 0x7fffffff, stsizeof(ipf_state_softc_t, ipf_state_size), 0, NULL, ipf_state_rehash }, { { (void *)offsetof(ipf_state_softc_t, ipf_state_lock) }, "state_lock", 0, 1, stsizeof(ipf_state_softc_t, ipf_state_lock), IPFT_RDONLY, NULL, NULL }, { { (void *)offsetof(ipf_state_softc_t, ipf_state_maxbucket) }, "state_maxbucket", 1, 0x7fffffff, stsizeof(ipf_state_softc_t, ipf_state_maxbucket), 0, NULL, NULL }, { { (void *)offsetof(ipf_state_softc_t, ipf_state_logging) }, "state_logging",0, 1, stsizeof(ipf_state_softc_t, ipf_state_logging), 0, NULL, NULL }, { { (void *)offsetof(ipf_state_softc_t, ipf_state_wm_high) }, "state_wm_high",2, 100, stsizeof(ipf_state_softc_t, ipf_state_wm_high), 0, NULL, NULL }, { { (void *)offsetof(ipf_state_softc_t, ipf_state_wm_low) }, "state_wm_low", 1, 99, stsizeof(ipf_state_softc_t, ipf_state_wm_low), 0, NULL, NULL }, { { (void *)offsetof(ipf_state_softc_t, ipf_state_wm_freq) }, "state_wm_freq",2, 999999, stsizeof(ipf_state_softc_t, ipf_state_wm_freq), 0, NULL, NULL }, { { NULL }, NULL, 0, 0, 0, 0, NULL, NULL } }; #define SINCL(x) ATOMIC_INCL(softs->x) #define SBUMP(x) (softs->x)++ #define SBUMPD(x, y) do { (softs->x.y)++; DT(y); } while (0) #define SBUMPDX(x, y, z)do { (softs->x.y)++; DT(z); } while (0) #ifdef USE_INET6 static ipstate_t *ipf_checkicmp6matchingstate __P((fr_info_t *)); #endif static int ipf_allowstateicmp __P((fr_info_t *, ipstate_t *, i6addr_t *)); static ipstate_t *ipf_matchsrcdst __P((fr_info_t *, ipstate_t *, i6addr_t *, i6addr_t *, tcphdr_t *, u_32_t)); static ipstate_t *ipf_checkicmpmatchingstate __P((fr_info_t *)); static int ipf_state_flush_entry __P((ipf_main_softc_t *, void *)); static ips_stat_t *ipf_state_stats __P((ipf_main_softc_t *)); static int ipf_state_del __P((ipf_main_softc_t *, ipstate_t *, int)); static int ipf_state_remove __P((ipf_main_softc_t *, caddr_t)); static int ipf_state_match __P((ipstate_t *is1, ipstate_t *is2)); static int ipf_state_matchaddresses __P((ipstate_t *is1, ipstate_t *is2)); static int ipf_state_matchipv4addrs __P((ipstate_t *is1, ipstate_t *is2)); static int ipf_state_matchipv6addrs __P((ipstate_t *is1, ipstate_t *is2)); static int ipf_state_matchisps __P((ipstate_t *is1, ipstate_t *is2)); static int ipf_state_matchports __P((udpinfo_t *is1, udpinfo_t *is2)); static int ipf_state_matcharray __P((ipstate_t *, int *, u_long)); static void ipf_ipsmove __P((ipf_state_softc_t *, ipstate_t *, u_int)); static int ipf_state_tcp __P((ipf_main_softc_t *, ipf_state_softc_t *, fr_info_t *, tcphdr_t *, ipstate_t *)); static int ipf_tcpoptions __P((ipf_state_softc_t *, fr_info_t *, tcphdr_t *, tcpdata_t *)); static ipstate_t *ipf_state_clone __P((fr_info_t *, tcphdr_t *, ipstate_t *)); static void ipf_fixinisn __P((fr_info_t *, ipstate_t *)); static void ipf_fixoutisn __P((fr_info_t *, ipstate_t *)); static void ipf_checknewisn __P((fr_info_t *, ipstate_t *)); static int ipf_state_iter __P((ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *, ipfobj_t *)); static int ipf_state_gettable __P((ipf_main_softc_t *, ipf_state_softc_t *, char *)); static int ipf_state_tcpinwindow __P((struct fr_info *, struct tcpdata *, struct tcpdata *, tcphdr_t *, int)); static int ipf_state_getent __P((ipf_main_softc_t *, ipf_state_softc_t *, caddr_t)); static int ipf_state_putent __P((ipf_main_softc_t *, ipf_state_softc_t *, caddr_t)); #define ONE_DAY IPF_TTLVAL(1 * 86400) /* 1 day */ #define FIVE_DAYS (5 * ONE_DAY) #define DOUBLE_HASH(x) (((x) + softs->ipf_state_seed[(x) % \ softs->ipf_state_size]) % softs->ipf_state_size) /* ------------------------------------------------------------------------ */ /* Function: ipf_state_main_load */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: Nil */ /* */ /* A null-op function that exists as a placeholder so that the flow in */ /* other functions is obvious. */ /* ------------------------------------------------------------------------ */ int ipf_state_main_load() { return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_main_unload */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: Nil */ /* */ /* A null-op function that exists as a placeholder so that the flow in */ /* other functions is obvious. */ /* ------------------------------------------------------------------------ */ int ipf_state_main_unload() { return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_soft_create */ /* Returns: void * - NULL = failure, else pointer to soft context */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Create a new state soft context structure and populate it with the list */ /* of tunables and other default settings. */ /* ------------------------------------------------------------------------ */ void * ipf_state_soft_create(softc) ipf_main_softc_t *softc; { ipf_state_softc_t *softs; KMALLOC(softs, ipf_state_softc_t *); if (softs == NULL) return NULL; bzero((char *)softs, sizeof(*softs)); softs->ipf_state_tune = ipf_tune_array_copy(softs, sizeof(ipf_state_tuneables), ipf_state_tuneables); if (softs->ipf_state_tune == NULL) { ipf_state_soft_destroy(softc, softs); return NULL; } if (ipf_tune_array_link(softc, softs->ipf_state_tune) == -1) { ipf_state_soft_destroy(softc, softs); return NULL; } #ifdef IPFILTER_LOG softs->ipf_state_logging = 1; #else softs->ipf_state_logging = 0; #endif softs->ipf_state_size = IPSTATE_SIZE, softs->ipf_state_maxbucket = 0; softs->ipf_state_wm_freq = IPF_TTLVAL(10); softs->ipf_state_max = IPSTATE_MAX; softs->ipf_state_wm_last = 0; softs->ipf_state_wm_high = 99; softs->ipf_state_wm_low = 90; softs->ipf_state_inited = 0; softs->ipf_state_lock = 0; softs->ipf_state_doflush = 0; return softs; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_soft_destroy */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* arg(I) - pointer to local context to use */ /* */ /* Undo only what we did in soft create: unlink and free the tunables and */ /* free the soft context structure itself. */ /* ------------------------------------------------------------------------ */ void ipf_state_soft_destroy(softc, arg) ipf_main_softc_t *softc; void *arg; { ipf_state_softc_t *softs = arg; if (softs->ipf_state_tune != NULL) { ipf_tune_array_unlink(softc, softs->ipf_state_tune); KFREES(softs->ipf_state_tune, sizeof(ipf_state_tuneables)); softs->ipf_state_tune = NULL; } KFREE(softs); } static void * ipf_state_seed_alloc(u_int state_size, u_int state_max) { u_int i; u_long *state_seed; KMALLOCS(state_seed, u_long *, state_size * sizeof(*state_seed)); if (state_seed == NULL) return NULL; for (i = 0; i < state_size; i++) { /* * XXX - ipf_state_seed[X] should be a random number of sorts. */ -#if FREEBSD_GE_REV(400000) +#ifdef __FreeBSD_version state_seed[i] = arc4random(); #else state_seed[i] = ((u_long)state_seed + i) * state_size; state_seed[i] ^= 0xa5a55a5a; state_seed[i] *= (u_long)state_seed; state_seed[i] ^= 0x5a5aa5a5; state_seed[i] *= state_max; #endif } return state_seed; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_soft_init */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* arg(I) - pointer to local context to use */ /* */ /* Initialise the state soft context structure so it is ready for use. */ /* This involves: */ /* - allocating a hash table and zero'ing it out */ /* - building a secondary table of seeds for double hashing to make it more */ /* difficult to attempt to attack the hash table itself (for DoS) */ /* - initialise all of the timeout queues, including a table for TCP, some */ /* pairs of query/response for UDP and other IP protocols (typically the */ /* reply queue has a shorter timeout than the query) */ /* ------------------------------------------------------------------------ */ int ipf_state_soft_init(softc, arg) ipf_main_softc_t *softc; void *arg; { ipf_state_softc_t *softs = arg; int i; KMALLOCS(softs->ipf_state_table, ipstate_t **, softs->ipf_state_size * sizeof(ipstate_t *)); if (softs->ipf_state_table == NULL) return -1; bzero((char *)softs->ipf_state_table, softs->ipf_state_size * sizeof(ipstate_t *)); softs->ipf_state_seed = ipf_state_seed_alloc(softs->ipf_state_size, softs->ipf_state_max); if (softs->ipf_state_seed == NULL) return -2; KMALLOCS(softs->ipf_state_stats.iss_bucketlen, u_int *, softs->ipf_state_size * sizeof(u_int)); if (softs->ipf_state_stats.iss_bucketlen == NULL) return -3; bzero((char *)softs->ipf_state_stats.iss_bucketlen, softs->ipf_state_size * sizeof(u_int)); if (softs->ipf_state_maxbucket == 0) { for (i = softs->ipf_state_size; i > 0; i >>= 1) softs->ipf_state_maxbucket++; softs->ipf_state_maxbucket *= 2; } ipf_sttab_init(softc, softs->ipf_state_tcptq); softs->ipf_state_stats.iss_tcptab = softs->ipf_state_tcptq; softs->ipf_state_tcptq[IPF_TCP_NSTATES - 1].ifq_next = &softs->ipf_state_udptq; IPFTQ_INIT(&softs->ipf_state_udptq, softc->ipf_udptimeout, "ipftq udp tab"); softs->ipf_state_udptq.ifq_next = &softs->ipf_state_udpacktq; IPFTQ_INIT(&softs->ipf_state_udpacktq, softc->ipf_udpacktimeout, "ipftq udpack tab"); softs->ipf_state_udpacktq.ifq_next = &softs->ipf_state_icmptq; IPFTQ_INIT(&softs->ipf_state_icmptq, softc->ipf_icmptimeout, "ipftq icmp tab"); softs->ipf_state_icmptq.ifq_next = &softs->ipf_state_icmpacktq; IPFTQ_INIT(&softs->ipf_state_icmpacktq, softc->ipf_icmpacktimeout, "ipftq icmpack tab"); softs->ipf_state_icmpacktq.ifq_next = &softs->ipf_state_iptq; IPFTQ_INIT(&softs->ipf_state_iptq, softc->ipf_iptimeout, "ipftq iptimeout tab"); softs->ipf_state_iptq.ifq_next = &softs->ipf_state_pending; IPFTQ_INIT(&softs->ipf_state_pending, IPF_HZ_DIVIDE, "ipftq pending"); softs->ipf_state_pending.ifq_next = &softs->ipf_state_deletetq; IPFTQ_INIT(&softs->ipf_state_deletetq, 1, "ipftq delete"); softs->ipf_state_deletetq.ifq_next = NULL; MUTEX_INIT(&softs->ipf_stinsert, "ipf state insert mutex"); softs->ipf_state_wm_last = softc->ipf_ticks; softs->ipf_state_inited = 1; return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_soft_fini */ /* Returns: int - 0 = success, -1 = failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* arg(I) - pointer to local context to use */ /* */ /* Release and destroy any resources acquired or initialised so that */ /* IPFilter can be unloaded or re-initialised. */ /* ------------------------------------------------------------------------ */ int ipf_state_soft_fini(softc, arg) ipf_main_softc_t *softc; void *arg; { ipf_state_softc_t *softs = arg; ipftq_t *ifq, *ifqnext; ipstate_t *is; while ((is = softs->ipf_state_list) != NULL) ipf_state_del(softc, is, ISL_UNLOAD); /* * Proxy timeout queues are not cleaned here because although they * exist on the state list, appr_unload is called after * ipf_state_unload and the proxies actually are responsible for them * being created. Should the proxy timeouts have their own list? * There's no real justification as this is the only complication. */ for (ifq = softs->ipf_state_usertq; ifq != NULL; ifq = ifqnext) { ifqnext = ifq->ifq_next; if (ipf_deletetimeoutqueue(ifq) == 0) ipf_freetimeoutqueue(softc, ifq); } softs->ipf_state_stats.iss_inuse = 0; softs->ipf_state_stats.iss_active = 0; if (softs->ipf_state_inited == 1) { softs->ipf_state_inited = 0; ipf_sttab_destroy(softs->ipf_state_tcptq); MUTEX_DESTROY(&softs->ipf_state_udptq.ifq_lock); MUTEX_DESTROY(&softs->ipf_state_icmptq.ifq_lock); MUTEX_DESTROY(&softs->ipf_state_udpacktq.ifq_lock); MUTEX_DESTROY(&softs->ipf_state_icmpacktq.ifq_lock); MUTEX_DESTROY(&softs->ipf_state_iptq.ifq_lock); MUTEX_DESTROY(&softs->ipf_state_deletetq.ifq_lock); MUTEX_DESTROY(&softs->ipf_state_pending.ifq_lock); MUTEX_DESTROY(&softs->ipf_stinsert); } if (softs->ipf_state_table != NULL) { KFREES(softs->ipf_state_table, softs->ipf_state_size * sizeof(*softs->ipf_state_table)); softs->ipf_state_table = NULL; } if (softs->ipf_state_seed != NULL) { KFREES(softs->ipf_state_seed, softs->ipf_state_size * sizeof(*softs->ipf_state_seed)); softs->ipf_state_seed = NULL; } if (softs->ipf_state_stats.iss_bucketlen != NULL) { KFREES(softs->ipf_state_stats.iss_bucketlen, softs->ipf_state_size * sizeof(u_int)); softs->ipf_state_stats.iss_bucketlen = NULL; } return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_setlock */ /* Returns: Nil */ /* Parameters: arg(I) - pointer to local context to use */ /* tmp(I) - new value for lock */ /* */ /* Stub function that allows for external manipulation of ipf_state_lock */ /* ------------------------------------------------------------------------ */ void ipf_state_setlock(arg, tmp) void *arg; int tmp; { ipf_state_softc_t *softs = arg; softs->ipf_state_lock = tmp; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_stats */ /* Returns: ips_state_t* - pointer to state stats structure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Put all the current numbers and pointers into a single struct and return */ /* a pointer to it. */ /* ------------------------------------------------------------------------ */ static ips_stat_t * ipf_state_stats(softc) ipf_main_softc_t *softc; { ipf_state_softc_t *softs = softc->ipf_state_soft; ips_stat_t *issp = &softs->ipf_state_stats; issp->iss_state_size = softs->ipf_state_size; issp->iss_state_max = softs->ipf_state_max; issp->iss_table = softs->ipf_state_table; issp->iss_list = softs->ipf_state_list; issp->iss_ticks = softc->ipf_ticks; #ifdef IPFILTER_LOGGING issp->iss_log_ok = ipf_log_logok(softc, IPF_LOGSTATE); issp->iss_log_fail = ipf_log_failures(softc, IPF_LOGSTATE); #else issp->iss_log_ok = 0; issp->iss_log_fail = 0; #endif return issp; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_remove */ /* Returns: int - 0 == success, != 0 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* data(I) - pointer to state structure to delete from table */ /* */ /* Search for a state structure that matches the one passed, according to */ /* the IP addresses and other protocol specific information. */ /* ------------------------------------------------------------------------ */ static int ipf_state_remove(softc, data) ipf_main_softc_t *softc; caddr_t data; { ipf_state_softc_t *softs = softc->ipf_state_soft; ipstate_t *sp, st; int error; sp = &st; error = ipf_inobj(softc, data, NULL, &st, IPFOBJ_IPSTATE); if (error) return EFAULT; WRITE_ENTER(&softc->ipf_state); for (sp = softs->ipf_state_list; sp; sp = sp->is_next) if ((sp->is_p == st.is_p) && (sp->is_v == st.is_v) && !bcmp((caddr_t)&sp->is_src, (caddr_t)&st.is_src, sizeof(st.is_src)) && !bcmp((caddr_t)&sp->is_dst, (caddr_t)&st.is_dst, sizeof(st.is_dst)) && !bcmp((caddr_t)&sp->is_ps, (caddr_t)&st.is_ps, sizeof(st.is_ps))) { ipf_state_del(softc, sp, ISL_REMOVE); RWLOCK_EXIT(&softc->ipf_state); return 0; } RWLOCK_EXIT(&softc->ipf_state); IPFERROR(100001); return ESRCH; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_ioctl */ /* Returns: int - 0 == success, != 0 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* data(I) - pointer to ioctl data */ /* cmd(I) - ioctl command integer */ /* mode(I) - file mode bits used with open */ /* uid(I) - uid of process making the ioctl call */ /* ctx(I) - pointer specific to context of the call */ /* */ /* Processes an ioctl call made to operate on the IP Filter state device. */ /* ------------------------------------------------------------------------ */ int ipf_state_ioctl(softc, data, cmd, mode, uid, ctx) ipf_main_softc_t *softc; caddr_t data; ioctlcmd_t cmd; int mode, uid; void *ctx; { ipf_state_softc_t *softs = softc->ipf_state_soft; int arg, ret, error = 0; SPL_INT(s); switch (cmd) { /* * Delete an entry from the state table. */ case SIOCDELST : error = ipf_state_remove(softc, data); break; /* * Flush the state table */ case SIOCIPFFL : error = BCOPYIN(data, &arg, sizeof(arg)); if (error != 0) { IPFERROR(100002); error = EFAULT; } else { WRITE_ENTER(&softc->ipf_state); ret = ipf_state_flush(softc, arg, 4); RWLOCK_EXIT(&softc->ipf_state); error = BCOPYOUT(&ret, data, sizeof(ret)); if (error != 0) { IPFERROR(100003); error = EFAULT; } } break; #ifdef USE_INET6 case SIOCIPFL6 : error = BCOPYIN(data, &arg, sizeof(arg)); if (error != 0) { IPFERROR(100004); error = EFAULT; } else { WRITE_ENTER(&softc->ipf_state); ret = ipf_state_flush(softc, arg, 6); RWLOCK_EXIT(&softc->ipf_state); error = BCOPYOUT(&ret, data, sizeof(ret)); if (error != 0) { IPFERROR(100005); error = EFAULT; } } break; #endif case SIOCMATCHFLUSH : WRITE_ENTER(&softc->ipf_state); error = ipf_state_matchflush(softc, data); RWLOCK_EXIT(&softc->ipf_state); break; #ifdef IPFILTER_LOG /* * Flush the state log. */ case SIOCIPFFB : if (!(mode & FWRITE)) { IPFERROR(100008); error = EPERM; } else { int tmp; tmp = ipf_log_clear(softc, IPL_LOGSTATE); error = BCOPYOUT(&tmp, data, sizeof(tmp)); if (error != 0) { IPFERROR(100009); error = EFAULT; } } break; /* * Turn logging of state information on/off. */ case SIOCSETLG : if (!(mode & FWRITE)) { IPFERROR(100010); error = EPERM; } else { error = BCOPYIN(data, &softs->ipf_state_logging, sizeof(softs->ipf_state_logging)); if (error != 0) { IPFERROR(100011); error = EFAULT; } } break; /* * Return the current state of logging. */ case SIOCGETLG : error = BCOPYOUT(&softs->ipf_state_logging, data, sizeof(softs->ipf_state_logging)); if (error != 0) { IPFERROR(100012); error = EFAULT; } break; /* * Return the number of bytes currently waiting to be read. */ case FIONREAD : arg = ipf_log_bytesused(softc, IPL_LOGSTATE); error = BCOPYOUT(&arg, data, sizeof(arg)); if (error != 0) { IPFERROR(100013); error = EFAULT; } break; #endif /* * Get the current state statistics. */ case SIOCGETFS : error = ipf_outobj(softc, data, ipf_state_stats(softc), IPFOBJ_STATESTAT); break; /* * Lock/Unlock the state table. (Locking prevents any changes, which * means no packets match). */ case SIOCSTLCK : if (!(mode & FWRITE)) { IPFERROR(100014); error = EPERM; } else { error = ipf_lock(data, &softs->ipf_state_lock); } break; /* * Add an entry to the current state table. */ case SIOCSTPUT : if (!softs->ipf_state_lock || !(mode &FWRITE)) { IPFERROR(100015); error = EACCES; break; } error = ipf_state_putent(softc, softs, data); break; /* * Get a state table entry. */ case SIOCSTGET : if (!softs->ipf_state_lock) { IPFERROR(100016); error = EACCES; break; } error = ipf_state_getent(softc, softs, data); break; /* * Return a copy of the hash table bucket lengths */ case SIOCSTAT1 : error = BCOPYOUT(softs->ipf_state_stats.iss_bucketlen, data, softs->ipf_state_size * sizeof(u_int)); if (error != 0) { IPFERROR(100017); error = EFAULT; } break; case SIOCGENITER : { ipftoken_t *token; ipfgeniter_t iter; ipfobj_t obj; error = ipf_inobj(softc, data, &obj, &iter, IPFOBJ_GENITER); if (error != 0) break; SPL_SCHED(s); token = ipf_token_find(softc, IPFGENITER_STATE, uid, ctx); if (token != NULL) { error = ipf_state_iter(softc, token, &iter, &obj); WRITE_ENTER(&softc->ipf_tokens); ipf_token_deref(softc, token); RWLOCK_EXIT(&softc->ipf_tokens); } else { IPFERROR(100018); error = ESRCH; } SPL_X(s); break; } case SIOCGTABL : error = ipf_state_gettable(softc, softs, data); break; case SIOCIPFDELTOK : error = BCOPYIN(data, &arg, sizeof(arg)); if (error != 0) { IPFERROR(100019); error = EFAULT; } else { SPL_SCHED(s); error = ipf_token_del(softc, arg, uid, ctx); SPL_X(s); } break; case SIOCGTQTAB : error = ipf_outobj(softc, data, softs->ipf_state_tcptq, IPFOBJ_STATETQTAB); break; default : IPFERROR(100020); error = EINVAL; break; } return error; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_getent */ /* Returns: int - 0 == success, != 0 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softs(I) - pointer to state context structure */ /* data(I) - pointer to state structure to retrieve from table*/ /* */ /* Copy out state information from the kernel to a user space process. If */ /* there is a filter rule associated with the state entry, copy that out */ /* as well. The entry to copy out is taken from the value of "ips_next" in */ /* the struct passed in and if not null and not found in the list of current*/ /* state entries, the retrieval fails. */ /* ------------------------------------------------------------------------ */ static int ipf_state_getent(softc, softs, data) ipf_main_softc_t *softc; ipf_state_softc_t *softs; caddr_t data; { ipstate_t *is, *isn; ipstate_save_t ips; int error; error = ipf_inobj(softc, data, NULL, &ips, IPFOBJ_STATESAVE); if (error) return EFAULT; READ_ENTER(&softc->ipf_state); isn = ips.ips_next; if (isn == NULL) { isn = softs->ipf_state_list; if (isn == NULL) { if (ips.ips_next == NULL) { RWLOCK_EXIT(&softc->ipf_state); IPFERROR(100021); return ENOENT; } return 0; } } else { /* * Make sure the pointer we're copying from exists in the * current list of entries. Security precaution to prevent * copying of random kernel data. */ for (is = softs->ipf_state_list; is; is = is->is_next) if (is == isn) break; if (!is) { RWLOCK_EXIT(&softc->ipf_state); IPFERROR(100022); return ESRCH; } } ips.ips_next = isn->is_next; bcopy((char *)isn, (char *)&ips.ips_is, sizeof(ips.ips_is)); ips.ips_rule = isn->is_rule; if (isn->is_rule != NULL) bcopy((char *)isn->is_rule, (char *)&ips.ips_fr, sizeof(ips.ips_fr)); RWLOCK_EXIT(&softc->ipf_state); error = ipf_outobj(softc, data, &ips, IPFOBJ_STATESAVE); return error; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_putent */ /* Returns: int - 0 == success, != 0 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softs(I) - pointer to state context structure */ /* data(I) - pointer to state information struct */ /* */ /* This function implements the SIOCSTPUT ioctl: insert a state entry into */ /* the state table. If the state info. includes a pointer to a filter rule */ /* then also add in an orphaned rule (will not show up in any "ipfstat -io" */ /* output. */ /* ------------------------------------------------------------------------ */ int ipf_state_putent(softc, softs, data) ipf_main_softc_t *softc; ipf_state_softc_t *softs; caddr_t data; { ipstate_t *is, *isn; ipstate_save_t ips; int error, out, i; frentry_t *fr; char *name; error = ipf_inobj(softc, data, NULL, &ips, IPFOBJ_STATESAVE); if (error != 0) return error; KMALLOC(isn, ipstate_t *); if (isn == NULL) { IPFERROR(100023); return ENOMEM; } bcopy((char *)&ips.ips_is, (char *)isn, sizeof(*isn)); bzero((char *)isn, offsetof(struct ipstate, is_pkts)); isn->is_sti.tqe_pnext = NULL; isn->is_sti.tqe_next = NULL; isn->is_sti.tqe_ifq = NULL; isn->is_sti.tqe_parent = isn; isn->is_ifp[0] = NULL; isn->is_ifp[1] = NULL; isn->is_ifp[2] = NULL; isn->is_ifp[3] = NULL; isn->is_sync = NULL; fr = ips.ips_rule; if (fr == NULL) { int inserr; READ_ENTER(&softc->ipf_state); inserr = ipf_state_insert(softc, isn, 0); MUTEX_EXIT(&isn->is_lock); RWLOCK_EXIT(&softc->ipf_state); return inserr; } if (isn->is_flags & SI_NEWFR) { KMALLOC(fr, frentry_t *); if (fr == NULL) { KFREE(isn); IPFERROR(100024); return ENOMEM; } bcopy((char *)&ips.ips_fr, (char *)fr, sizeof(*fr)); out = fr->fr_flags & FR_OUTQUE ? 1 : 0; isn->is_rule = fr; ips.ips_is.is_rule = fr; MUTEX_NUKE(&fr->fr_lock); MUTEX_INIT(&fr->fr_lock, "state filter rule lock"); /* * Look up all the interface names in the rule. */ for (i = 0; i < FR_NUM(fr->fr_ifnames); i++) { if (fr->fr_ifnames[i] == -1) { fr->fr_ifas[i] = NULL; continue; } name = FR_NAME(fr, fr_ifnames[i]); fr->fr_ifas[i] = ipf_resolvenic(softc, name, fr->fr_family); } for (i = 0; i < FR_NUM(isn->is_ifname); i++) { name = isn->is_ifname[i]; isn->is_ifp[i] = ipf_resolvenic(softc, name, isn->is_v); } fr->fr_ref = 0; fr->fr_dsize = 0; fr->fr_data = NULL; fr->fr_type = FR_T_NONE; (void) ipf_resolvedest(softc, fr->fr_names, &fr->fr_tifs[0], fr->fr_family); (void) ipf_resolvedest(softc, fr->fr_names, &fr->fr_tifs[1], fr->fr_family); (void) ipf_resolvedest(softc, fr->fr_names, &fr->fr_dif, fr->fr_family); /* * send a copy back to userland of what we ended up * to allow for verification. */ error = ipf_outobj(softc, data, &ips, IPFOBJ_STATESAVE); if (error != 0) { KFREE(isn); MUTEX_DESTROY(&fr->fr_lock); KFREE(fr); IPFERROR(100025); return EFAULT; } READ_ENTER(&softc->ipf_state); error = ipf_state_insert(softc, isn, 0); MUTEX_EXIT(&isn->is_lock); RWLOCK_EXIT(&softc->ipf_state); } else { READ_ENTER(&softc->ipf_state); for (is = softs->ipf_state_list; is; is = is->is_next) if (is->is_rule == fr) { error = ipf_state_insert(softc, isn, 0); MUTEX_EXIT(&isn->is_lock); break; } if (is == NULL) { KFREE(isn); isn = NULL; } RWLOCK_EXIT(&softc->ipf_state); if (isn == NULL) { IPFERROR(100033); error = ESRCH; } } return error; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_insert */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* Parameters: is(I) - pointer to state structure */ /* rev(I) - flag indicating direction of packet */ /* */ /* Inserts a state structure into the hash table (for lookups) and the list */ /* of state entries (for enumeration). Resolves all of the interface names */ /* to pointers and adjusts running stats for the hash table as appropriate. */ /* */ /* This function can fail if the filter rule has had a population policy of */ /* IP addresses used with stateful filtering assigned to it. */ /* */ /* Locking: it is assumed that some kind of lock on ipf_state is held. */ /* Exits with is_lock initialised and held - *EVEN IF ERROR*. */ /* ------------------------------------------------------------------------ */ int ipf_state_insert(softc, is, rev) ipf_main_softc_t *softc; ipstate_t *is; int rev; { ipf_state_softc_t *softs = softc->ipf_state_soft; frentry_t *fr; u_int hv; int i; /* * Look up all the interface names in the state entry. */ for (i = 0; i < FR_NUM(is->is_ifp); i++) { if (is->is_ifp[i] != NULL) continue; is->is_ifp[i] = ipf_resolvenic(softc, is->is_ifname[i], is->is_v); } /* * If we could trust is_hv, then the modulus would not be needed, * but when running with IPFILTER_SYNC, this stops bad values. */ hv = is->is_hv % softs->ipf_state_size; /* TRACE is, hv */ is->is_hv = hv; /* * We need to get both of these locks...the first because it is * possible that once the insert is complete another packet might * come along, match the entry and want to update it. */ MUTEX_INIT(&is->is_lock, "ipf state entry"); MUTEX_ENTER(&is->is_lock); MUTEX_ENTER(&softs->ipf_stinsert); fr = is->is_rule; if (fr != NULL) { if ((fr->fr_srctrack.ht_max_nodes != 0) && (ipf_ht_node_add(softc, &fr->fr_srctrack, is->is_family, &is->is_src) == -1)) { SBUMPD(ipf_state_stats, iss_max_track); MUTEX_EXIT(&softs->ipf_stinsert); return -1; } MUTEX_ENTER(&fr->fr_lock); fr->fr_ref++; MUTEX_EXIT(&fr->fr_lock); fr->fr_statecnt++; } if (is->is_flags & (SI_WILDP|SI_WILDA)) { DT(iss_wild_plus_one); SINCL(ipf_state_stats.iss_wild); } SBUMP(ipf_state_stats.iss_proto[is->is_p]); SBUMP(ipf_state_stats.iss_active_proto[is->is_p]); /* * add into list table. */ if (softs->ipf_state_list != NULL) softs->ipf_state_list->is_pnext = &is->is_next; is->is_pnext = &softs->ipf_state_list; is->is_next = softs->ipf_state_list; softs->ipf_state_list = is; if (softs->ipf_state_table[hv] != NULL) softs->ipf_state_table[hv]->is_phnext = &is->is_hnext; else softs->ipf_state_stats.iss_inuse++; is->is_phnext = softs->ipf_state_table + hv; is->is_hnext = softs->ipf_state_table[hv]; softs->ipf_state_table[hv] = is; softs->ipf_state_stats.iss_bucketlen[hv]++; softs->ipf_state_stats.iss_active++; MUTEX_EXIT(&softs->ipf_stinsert); ipf_state_setqueue(softc, is, rev); return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_matchipv4addrs */ /* Returns: int - 2 addresses match (strong match), 1 reverse match, */ /* 0 no match */ /* Parameters: is1, is2 pointers to states we are checking */ /* */ /* Function matches IPv4 addresses it returns strong match for ICMP proto */ /* even there is only reverse match */ /* ------------------------------------------------------------------------ */ static int ipf_state_matchipv4addrs(is1, is2) ipstate_t *is1, *is2; { int rv; if (is1->is_saddr == is2->is_saddr && is1->is_daddr == is2->is_daddr) rv = 2; else if (is1->is_saddr == is2->is_daddr && is1->is_daddr == is2->is_saddr) { /* force strong match for ICMP protocol */ rv = (is1->is_p == IPPROTO_ICMP) ? 2 : 1; } else rv = 0; return (rv); } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_matchipv6addrs */ /* Returns: int - 2 addresses match (strong match), 1 reverse match, */ /* 0 no match */ /* Parameters: is1, is2 pointers to states we are checking */ /* */ /* Function matches IPv6 addresses it returns strong match for ICMP proto */ /* even there is only reverse match */ /* ------------------------------------------------------------------------ */ static int ipf_state_matchipv6addrs(is1, is2) ipstate_t *is1, *is2; { int rv; if (IP6_EQ(&is1->is_src, &is2->is_src) && IP6_EQ(&is1->is_dst, &is2->is_dst)) rv = 2; else if (IP6_EQ(&is1->is_src, &is2->is_dst) && IP6_EQ(&is1->is_dst, &is2->is_src)) { /* force strong match for ICMPv6 protocol */ rv = (is1->is_p == IPPROTO_ICMPV6) ? 2 : 1; } else rv = 0; return (rv); } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_matchaddresses */ /* Returns: int - 2 addresses match, 1 reverse match, zero no match */ /* Parameters: is1, is2 pointers to states we are checking */ /* */ /* function retruns true if two pairs of addresses belong to single */ /* connection. suppose there are two endpoints: */ /* endpoint1 1.1.1.1 */ /* endpoint2 1.1.1.2 */ /* */ /* the state is established by packet flying from .1 to .2 so we see: */ /* is1->src = 1.1.1.1 */ /* is1->dst = 1.1.1.2 */ /* now endpoint 1.1.1.2 sends answer */ /* retreives is1 record created by first packat and compares it with is2 */ /* temporal record, is2 is initialized as follows: */ /* is2->src = 1.1.1.2 */ /* is2->dst = 1.1.1.1 */ /* in this case 1 will be returned */ /* */ /* the ipf_matchaddresses() assumes those two records to be same. of course */ /* the ipf_matchaddresses() also assume records are same in case you pass */ /* identical arguments (i.e. ipf_matchaddress(is1, is1) would return 2 */ /* ------------------------------------------------------------------------ */ static int ipf_state_matchaddresses(is1, is2) ipstate_t *is1, *is2; { int rv; if (is1->is_v == 4) { rv = ipf_state_matchipv4addrs(is1, is2); } else { rv = ipf_state_matchipv6addrs(is1, is2); } return (rv); } /* ------------------------------------------------------------------------ */ /* Function: ipf_matchports */ /* Returns: int - 2 match, 1 rverse match, 0 no match */ /* Parameters: ppairs1, ppairs - src, dst ports we want to match */ /* */ /* performs the same match for isps members as for addresses */ /* ------------------------------------------------------------------------ */ static int ipf_state_matchports(ppairs1, ppairs2) udpinfo_t *ppairs1, *ppairs2; { int rv; if (ppairs1->us_sport == ppairs2->us_sport && ppairs1->us_dport == ppairs2->us_dport) rv = 2; else if (ppairs1->us_sport == ppairs2->us_dport && ppairs1->us_dport == ppairs2->us_sport) rv = 1; else rv = 0; return (rv); } /* ------------------------------------------------------------------------ */ /* Function: ipf_matchisps */ /* Returns: int - nonzero if isps members match, 0 nomatch */ /* Parameters: is1, is2 - states we want to match */ /* */ /* performs the same match for isps members as for addresses */ /* ------------------------------------------------------------------------ */ static int ipf_state_matchisps(is1, is2) ipstate_t *is1, *is2; { int rv; if (is1->is_p == is2->is_p) { switch (is1->is_p) { case IPPROTO_TCP : case IPPROTO_UDP : case IPPROTO_GRE : /* greinfo_t can be also interprted as port pair */ rv = ipf_state_matchports(&is1->is_ps.is_us, &is2->is_ps.is_us); break; case IPPROTO_ICMP : case IPPROTO_ICMPV6 : /* force strong match for ICMP datagram. */ if (bcmp(&is1->is_ps, &is2->is_ps, sizeof(icmpinfo_t)) == 0) { rv = 2; } else { rv = 0; } break; default: rv = 0; } } else { rv = 0; } return (rv); } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_match */ /* Returns: int - nonzero match, zero no match */ /* Parameters: is1, is2 - states we want to match */ /* */ /* ------------------------------------------------------------------------ */ static int ipf_state_match(is1, is2) ipstate_t *is1, *is2; { int rv; int amatch; int pomatch; if (bcmp(&is1->is_pass, &is2->is_pass, offsetof(struct ipstate, is_authmsk) - offsetof(struct ipstate, is_pass)) == 0) { pomatch = ipf_state_matchisps(is1, is2); amatch = ipf_state_matchaddresses(is1, is2); rv = (amatch != 0) && (amatch == pomatch); } else { rv = 0; } return (rv); } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_add */ /* Returns: ipstate_t - 0 = success */ /* Parameters: softc(I) - pointer to soft context main structure */ /* fin(I) - pointer to packet information */ /* stsave(O) - pointer to place to save pointer to created */ /* state structure. */ /* flags(I) - flags to use when creating the structure */ /* */ /* Creates a new IP state structure from the packet information collected. */ /* Inserts it into the state table and appends to the bottom of the active */ /* list. If the capacity of the table has reached the maximum allowed then */ /* the call will fail and a flush is scheduled for the next timeout call. */ /* */ /* NOTE: The use of stsave to point to nat_state will result in memory */ /* corruption. It should only be used to point to objects that will */ /* either outlive this (not expired) or will deref the ip_state_t */ /* when they are deleted. */ /* ------------------------------------------------------------------------ */ int ipf_state_add(softc, fin, stsave, flags) ipf_main_softc_t *softc; fr_info_t *fin; ipstate_t **stsave; u_int flags; { ipf_state_softc_t *softs = softc->ipf_state_soft; ipstate_t *is, ips; struct icmp *ic; u_int pass, hv; frentry_t *fr; tcphdr_t *tcp; frdest_t *fdp; int out; /* * If a locally created packet is trying to egress but it * does not match because of this lock, it is likely that * the policy will block it and return network unreachable further * up the stack. To mitigate this error, EAGAIN is returned instead, * telling the IP stack to try sending this packet again later. */ if (softs->ipf_state_lock) { SBUMPD(ipf_state_stats, iss_add_locked); fin->fin_error = EAGAIN; return -1; } if (fin->fin_flx & (FI_SHORT|FI_STATE|FI_FRAGBODY|FI_BAD)) { SBUMPD(ipf_state_stats, iss_add_bad); return -1; } if ((fin->fin_flx & FI_OOW) && !(fin->fin_tcpf & TH_SYN)) { SBUMPD(ipf_state_stats, iss_add_oow); return -1; } if ((softs->ipf_state_stats.iss_active * 100 / softs->ipf_state_max) > softs->ipf_state_wm_high) { softs->ipf_state_doflush = 1; } /* * If a "keep state" rule has reached the maximum number of references * to it, then schedule an automatic flush in case we can clear out * some "dead old wood". Note that because the lock isn't held on * fr it is possible that we could overflow. The cost of overflowing * is being ignored here as the number by which it can overflow is * a product of the number of simultaneous threads that could be * executing in here, so a limit of 100 won't result in 200, but could * result in 101 or 102. */ fr = fin->fin_fr; if (fr != NULL) { if ((softs->ipf_state_stats.iss_active >= softs->ipf_state_max) && (fr->fr_statemax == 0)) { SBUMPD(ipf_state_stats, iss_max); return 1; } if ((fr->fr_statemax != 0) && (fr->fr_statecnt >= fr->fr_statemax)) { SBUMPD(ipf_state_stats, iss_max_ref); return 2; } } is = &ips; if (fr == NULL) { pass = softc->ipf_flags; is->is_tag = FR_NOLOGTAG; } else { pass = fr->fr_flags; } ic = NULL; tcp = NULL; out = fin->fin_out; bzero((char *)is, sizeof(*is)); is->is_die = 1 + softc->ipf_ticks; /* * We want to check everything that is a property of this packet, * but we don't (automatically) care about its fragment status as * this may change. */ is->is_pass = pass; is->is_v = fin->fin_v; is->is_sec = fin->fin_secmsk; is->is_secmsk = 0xffff; is->is_auth = fin->fin_auth; is->is_authmsk = 0xffff; is->is_family = fin->fin_family; is->is_opt[0] = fin->fin_optmsk; is->is_optmsk[0] = 0xffffffff; if (is->is_v == 6) { is->is_opt[0] &= ~0x8; is->is_optmsk[0] &= ~0x8; } /* * Copy and calculate... */ hv = (is->is_p = fin->fin_fi.fi_p); is->is_src = fin->fin_fi.fi_src; hv += is->is_saddr; is->is_dst = fin->fin_fi.fi_dst; hv += is->is_daddr; #ifdef USE_INET6 if (fin->fin_v == 6) { /* * For ICMPv6, we check to see if the destination address is * a multicast address. If it is, do not include it in the * calculation of the hash because the correct reply will come * back from a real address, not a multicast address. */ if ((is->is_p == IPPROTO_ICMPV6) && IN6_IS_ADDR_MULTICAST(&is->is_dst.in6)) { /* * So you can do keep state with neighbour discovery. * * Here we could use the address from the neighbour * solicit message to put in the state structure and * we could use that without a wildcard flag too... */ flags |= SI_W_DADDR; hv -= is->is_daddr; } else { hv += is->is_dst.i6[1]; hv += is->is_dst.i6[2]; hv += is->is_dst.i6[3]; } hv += is->is_src.i6[1]; hv += is->is_src.i6[2]; hv += is->is_src.i6[3]; } #endif if ((fin->fin_v == 4) && (fin->fin_flx & (FI_MULTICAST|FI_BROADCAST|FI_MBCAST))) { flags |= SI_W_DADDR; hv -= is->is_daddr; } switch (is->is_p) { #ifdef USE_INET6 case IPPROTO_ICMPV6 : ic = fin->fin_dp; switch (ic->icmp_type) { case ICMP6_ECHO_REQUEST : hv += (is->is_icmp.ici_id = ic->icmp_id); /*FALLTHROUGH*/ case ICMP6_MEMBERSHIP_QUERY : case ND_ROUTER_SOLICIT : case ND_NEIGHBOR_SOLICIT : case ICMP6_NI_QUERY : is->is_icmp.ici_type = ic->icmp_type; break; default : SBUMPD(ipf_state_stats, iss_icmp6_notquery); return -2; } break; #endif case IPPROTO_ICMP : ic = fin->fin_dp; switch (ic->icmp_type) { case ICMP_ECHO : case ICMP_TSTAMP : case ICMP_IREQ : case ICMP_MASKREQ : is->is_icmp.ici_type = ic->icmp_type; hv += (is->is_icmp.ici_id = ic->icmp_id); break; default : SBUMPD(ipf_state_stats, iss_icmp_notquery); return -3; } break; #if 0 case IPPROTO_GRE : gre = fin->fin_dp; is->is_gre.gs_flags = gre->gr_flags; is->is_gre.gs_ptype = gre->gr_ptype; if (GRE_REV(is->is_gre.gs_flags) == 1) { is->is_call[0] = fin->fin_data[0]; is->is_call[1] = fin->fin_data[1]; } break; #endif case IPPROTO_TCP : tcp = fin->fin_dp; if (tcp->th_flags & TH_RST) { SBUMPD(ipf_state_stats, iss_tcp_rstadd); return -4; } /* TRACE is, flags, hv */ /* * The endian of the ports doesn't matter, but the ack and * sequence numbers do as we do mathematics on them later. */ is->is_sport = htons(fin->fin_data[0]); is->is_dport = htons(fin->fin_data[1]); if ((flags & (SI_W_DPORT|SI_W_SPORT)) == 0) { hv += is->is_sport; hv += is->is_dport; } /* TRACE is, flags, hv */ /* * If this is a real packet then initialise fields in the * state information structure from the TCP header information. */ is->is_maxdwin = 1; is->is_maxswin = ntohs(tcp->th_win); if (is->is_maxswin == 0) is->is_maxswin = 1; if ((fin->fin_flx & FI_IGNORE) == 0) { is->is_send = ntohl(tcp->th_seq) + fin->fin_dlen - (TCP_OFF(tcp) << 2) + ((tcp->th_flags & TH_SYN) ? 1 : 0) + ((tcp->th_flags & TH_FIN) ? 1 : 0); is->is_maxsend = is->is_send; /* * Window scale option is only present in * SYN/SYN-ACK packet. */ if ((tcp->th_flags & ~(TH_FIN|TH_ACK|TH_ECNALL)) == TH_SYN && (TCP_OFF(tcp) > (sizeof(tcphdr_t) >> 2))) { if (ipf_tcpoptions(softs, fin, tcp, &is->is_tcp.ts_data[0]) == -1) { fin->fin_flx |= FI_BAD; DT1(ipf_fi_bad_tcpoptions_th_fin_ack_ecnall, fr_info_t *, fin); } } if ((fin->fin_out != 0) && (pass & FR_NEWISN) != 0) { ipf_checknewisn(fin, is); ipf_fixoutisn(fin, is); } if ((tcp->th_flags & TH_OPENING) == TH_SYN) flags |= IS_TCPFSM; else { is->is_maxdwin = is->is_maxswin * 2; is->is_dend = ntohl(tcp->th_ack); is->is_maxdend = ntohl(tcp->th_ack); is->is_maxdwin *= 2; } } /* * If we're creating state for a starting connection, start * the timer on it as we'll never see an error if it fails * to connect. */ break; case IPPROTO_UDP : tcp = fin->fin_dp; is->is_sport = htons(fin->fin_data[0]); is->is_dport = htons(fin->fin_data[1]); if ((flags & (SI_W_DPORT|SI_W_SPORT)) == 0) { hv += tcp->th_dport; hv += tcp->th_sport; } break; default : break; } hv = DOUBLE_HASH(hv); is->is_hv = hv; /* * Look for identical state. */ for (is = softs->ipf_state_table[hv % softs->ipf_state_size]; is != NULL; is = is->is_hnext) { if (ipf_state_match(&ips, is) == 1) break; } if (is != NULL) { SBUMPD(ipf_state_stats, iss_add_dup); return 3; } if (softs->ipf_state_stats.iss_bucketlen[hv] >= softs->ipf_state_maxbucket) { SBUMPD(ipf_state_stats, iss_bucket_full); return 4; } /* * No existing state; create new */ KMALLOC(is, ipstate_t *); if (is == NULL) { SBUMPD(ipf_state_stats, iss_nomem); return 5; } bcopy((char *)&ips, (char *)is, sizeof(*is)); is->is_flags = flags & IS_INHERITED; is->is_rulen = fin->fin_rule; is->is_rule = fr; /* * Do not do the modulus here, it is done in ipf_state_insert(). */ if (fr != NULL) { ipftq_t *tq; (void) strncpy(is->is_group, FR_NAME(fr, fr_group), FR_GROUPLEN); if (fr->fr_age[0] != 0) { tq = ipf_addtimeoutqueue(softc, &softs->ipf_state_usertq, fr->fr_age[0]); is->is_tqehead[0] = tq; is->is_sti.tqe_flags |= TQE_RULEBASED; } if (fr->fr_age[1] != 0) { tq = ipf_addtimeoutqueue(softc, &softs->ipf_state_usertq, fr->fr_age[1]); is->is_tqehead[1] = tq; is->is_sti.tqe_flags |= TQE_RULEBASED; } is->is_tag = fr->fr_logtag; } /* * It may seem strange to set is_ref to 2, but if stsave is not NULL * then a copy of the pointer is being stored somewhere else and in * the end, it will expect to be able to do something with it. */ is->is_me = stsave; if (stsave != NULL) { *stsave = is; is->is_ref = 2; } else { is->is_ref = 1; } is->is_pkts[0] = 0, is->is_bytes[0] = 0; is->is_pkts[1] = 0, is->is_bytes[1] = 0; is->is_pkts[2] = 0, is->is_bytes[2] = 0; is->is_pkts[3] = 0, is->is_bytes[3] = 0; if ((fin->fin_flx & FI_IGNORE) == 0) { is->is_pkts[out] = 1; fin->fin_pktnum = 1; is->is_bytes[out] = fin->fin_plen; is->is_flx[out][0] = fin->fin_flx & FI_CMP; is->is_flx[out][0] &= ~FI_OOW; } if (pass & FR_STLOOSE) is->is_flags |= IS_LOOSE; if (pass & FR_STSTRICT) is->is_flags |= IS_STRICT; if (pass & FR_STATESYNC) is->is_flags |= IS_STATESYNC; if (pass & FR_LOGFIRST) is->is_pass &= ~(FR_LOGFIRST|FR_LOG); READ_ENTER(&softc->ipf_state); if (ipf_state_insert(softc, is, fin->fin_rev) == -1) { RWLOCK_EXIT(&softc->ipf_state); /* * This is a bit more manual than it should be but * ipf_state_del cannot be called. */ MUTEX_EXIT(&is->is_lock); MUTEX_DESTROY(&is->is_lock); if (is->is_tqehead[0] != NULL) { if (ipf_deletetimeoutqueue(is->is_tqehead[0]) == 0) ipf_freetimeoutqueue(softc, is->is_tqehead[0]); is->is_tqehead[0] = NULL; } if (is->is_tqehead[1] != NULL) { if (ipf_deletetimeoutqueue(is->is_tqehead[1]) == 0) ipf_freetimeoutqueue(softc, is->is_tqehead[1]); is->is_tqehead[1] = NULL; } KFREE(is); return -1; } /* * Filling in the interface name is after the insert so that an * event (such as add/delete) of an interface that is referenced * by this rule will see this state entry. */ if (fr != NULL) { /* * The name '-' is special for network interfaces and causes * a NULL name to be present, always, allowing packets to * match it, regardless of their interface. */ if ((fin->fin_ifp == NULL) || (fr->fr_ifnames[out << 1] != -1 && fr->fr_names[fr->fr_ifnames[out << 1] + 0] == '-' && fr->fr_names[fr->fr_ifnames[out << 1] + 1] == '\0')) { is->is_ifp[out << 1] = fr->fr_ifas[0]; strncpy(is->is_ifname[out << 1], FR_NAME(fr, fr_ifnames[0]), sizeof(fr->fr_ifnames[0])); } else { is->is_ifp[out << 1] = fin->fin_ifp; COPYIFNAME(fin->fin_v, fin->fin_ifp, is->is_ifname[out << 1]); } is->is_ifp[(out << 1) + 1] = fr->fr_ifas[1]; if (fr->fr_ifnames[1] != -1) { strncpy(is->is_ifname[(out << 1) + 1], FR_NAME(fr, fr_ifnames[1]), sizeof(fr->fr_ifnames[1])); } is->is_ifp[(1 - out) << 1] = fr->fr_ifas[2]; if (fr->fr_ifnames[2] != -1) { strncpy(is->is_ifname[((1 - out) << 1)], FR_NAME(fr, fr_ifnames[2]), sizeof(fr->fr_ifnames[2])); } is->is_ifp[((1 - out) << 1) + 1] = fr->fr_ifas[3]; if (fr->fr_ifnames[3] != -1) { strncpy(is->is_ifname[((1 - out) << 1) + 1], FR_NAME(fr, fr_ifnames[3]), sizeof(fr->fr_ifnames[3])); } } else { if (fin->fin_ifp != NULL) { is->is_ifp[out << 1] = fin->fin_ifp; COPYIFNAME(fin->fin_v, fin->fin_ifp, is->is_ifname[out << 1]); } } if (fin->fin_p == IPPROTO_TCP) { /* * If we're creating state for a starting connection, start the * timer on it as we'll never see an error if it fails to * connect. */ (void) ipf_tcp_age(&is->is_sti, fin, softs->ipf_state_tcptq, is->is_flags, 2); } MUTEX_EXIT(&is->is_lock); if ((is->is_flags & IS_STATESYNC) && ((is->is_flags & SI_CLONE) == 0)) is->is_sync = ipf_sync_new(softc, SMC_STATE, fin, is); if (softs->ipf_state_logging) ipf_state_log(softc, is, ISL_NEW); RWLOCK_EXIT(&softc->ipf_state); fin->fin_flx |= FI_STATE; if (fin->fin_flx & FI_FRAG) (void) ipf_frag_new(softc, fin, pass); fdp = &fr->fr_tifs[0]; if (fdp->fd_type == FRD_DSTLIST) { ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &is->is_tifs[0]); } else { bcopy(fdp, &is->is_tifs[0], sizeof(*fdp)); } fdp = &fr->fr_tifs[1]; if (fdp->fd_type == FRD_DSTLIST) { ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &is->is_tifs[1]); } else { bcopy(fdp, &is->is_tifs[1], sizeof(*fdp)); } fin->fin_tif = &is->is_tifs[fin->fin_rev]; fdp = &fr->fr_dif; if (fdp->fd_type == FRD_DSTLIST) { ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &is->is_dif); } else { bcopy(fdp, &is->is_dif, sizeof(*fdp)); } fin->fin_dif = &is->is_dif; return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_tcpoptions */ /* Returns: int - 1 == packet matches state entry, 0 == it does not, */ /* -1 == packet has bad TCP options data */ /* Parameters: softs(I) - pointer to state context structure */ /* fin(I) - pointer to packet information */ /* tcp(I) - pointer to TCP packet header */ /* td(I) - pointer to TCP data held as part of the state */ /* */ /* Look after the TCP header for any options and deal with those that are */ /* present. Record details about those that we recogise. */ /* ------------------------------------------------------------------------ */ static int ipf_tcpoptions(softs, fin, tcp, td) ipf_state_softc_t *softs; fr_info_t *fin; tcphdr_t *tcp; tcpdata_t *td; { int off, mlen, ol, i, len, retval; char buf[64], *s, opt; mb_t *m = NULL; len = (TCP_OFF(tcp) << 2); if (fin->fin_dlen < len) { SBUMPD(ipf_state_stats, iss_tcp_toosmall); return 0; } len -= sizeof(*tcp); off = fin->fin_plen - fin->fin_dlen + sizeof(*tcp) + fin->fin_ipoff; m = fin->fin_m; mlen = MSGDSIZE(m) - off; if (len > mlen) { len = mlen; retval = 0; } else { retval = 1; } COPYDATA(m, off, len, buf); for (s = buf; len > 0; ) { opt = *s; if (opt == TCPOPT_EOL) break; else if (opt == TCPOPT_NOP) ol = 1; else { if (len < 2) break; ol = (int)*(s + 1); if (ol < 2 || ol > len) break; /* * Extract the TCP options we are interested in out of * the header and store them in the the tcpdata struct. */ switch (opt) { case TCPOPT_WINDOW : if (ol == TCPOLEN_WINDOW) { i = (int)*(s + 2); if (i > TCP_WSCALE_MAX) i = TCP_WSCALE_MAX; else if (i < 0) i = 0; td->td_winscale = i; td->td_winflags |= TCP_WSCALE_SEEN| TCP_WSCALE_FIRST; } else retval = -1; break; case TCPOPT_MAXSEG : /* * So, if we wanted to set the TCP MAXSEG, * it should be done here... */ if (ol == TCPOLEN_MAXSEG) { i = (int)*(s + 2); i <<= 8; i += (int)*(s + 3); td->td_maxseg = i; } else retval = -1; break; case TCPOPT_SACK_PERMITTED : if (ol == TCPOLEN_SACK_PERMITTED) td->td_winflags |= TCP_SACK_PERMIT; else retval = -1; break; } } len -= ol; s += ol; } if (retval == -1) { SBUMPD(ipf_state_stats, iss_tcp_badopt); } return retval; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_tcp */ /* Returns: int - 1 == packet matches state entry, 0 == it does not */ /* Parameters: softc(I) - pointer to soft context main structure */ /* softs(I) - pointer to state context structure */ /* fin(I) - pointer to packet information */ /* tcp(I) - pointer to TCP packet header */ /* is(I) - pointer to master state structure */ /* */ /* Check to see if a packet with TCP headers fits within the TCP window. */ /* Change timeout depending on whether new packet is a SYN-ACK returning */ /* for a SYN or a RST or FIN which indicate time to close up shop. */ /* ------------------------------------------------------------------------ */ static int ipf_state_tcp(softc, softs, fin, tcp, is) ipf_main_softc_t *softc; ipf_state_softc_t *softs; fr_info_t *fin; tcphdr_t *tcp; ipstate_t *is; { tcpdata_t *fdata, *tdata; int source, ret, flags; source = !fin->fin_rev; if (((is->is_flags & IS_TCPFSM) != 0) && (source == 1) && (ntohs(is->is_sport) != fin->fin_data[0])) source = 0; fdata = &is->is_tcp.ts_data[!source]; tdata = &is->is_tcp.ts_data[source]; MUTEX_ENTER(&is->is_lock); /* * If a SYN packet is received for a connection that is on the way out * but hasn't yet departed then advance this session along the way. */ if ((tcp->th_flags & TH_OPENING) == TH_SYN) { if ((is->is_state[0] > IPF_TCPS_ESTABLISHED) && (is->is_state[1] > IPF_TCPS_ESTABLISHED)) { is->is_state[!source] = IPF_TCPS_CLOSED; ipf_movequeue(softc->ipf_ticks, &is->is_sti, is->is_sti.tqe_ifq, &softs->ipf_state_deletetq); MUTEX_EXIT(&is->is_lock); DT1(iss_tcp_closing, ipstate_t *, is); SBUMP(ipf_state_stats.iss_tcp_closing); return 0; } } if (is->is_flags & IS_LOOSE) ret = 1; else ret = ipf_state_tcpinwindow(fin, fdata, tdata, tcp, is->is_flags); if (ret > 0) { /* * Nearing end of connection, start timeout. */ ret = ipf_tcp_age(&is->is_sti, fin, softs->ipf_state_tcptq, is->is_flags, ret); if (ret == 0) { MUTEX_EXIT(&is->is_lock); DT2(iss_tcp_fsm, fr_info_t *, fin, ipstate_t *, is); SBUMP(ipf_state_stats.iss_tcp_fsm); return 0; } if (softs->ipf_state_logging > 4) ipf_state_log(softc, is, ISL_STATECHANGE); /* * set s0's as appropriate. Use syn-ack packet as it * contains both pieces of required information. */ /* * Window scale option is only present in SYN/SYN-ACK packet. * Compare with ~TH_FIN to mask out T/TCP setups. */ flags = tcp->th_flags & ~(TH_FIN|TH_ECNALL); if (flags == (TH_SYN|TH_ACK)) { is->is_s0[source] = ntohl(tcp->th_ack); is->is_s0[!source] = ntohl(tcp->th_seq) + 1; if ((TCP_OFF(tcp) > (sizeof(tcphdr_t) >> 2))) { if (ipf_tcpoptions(softs, fin, tcp, fdata) == -1) { fin->fin_flx |= FI_BAD; DT1(ipf_fi_bad_winscale_syn_ack, fr_info_t *, fin); } } if ((fin->fin_out != 0) && (is->is_pass & FR_NEWISN)) ipf_checknewisn(fin, is); } else if (flags == TH_SYN) { is->is_s0[source] = ntohl(tcp->th_seq) + 1; if ((TCP_OFF(tcp) > (sizeof(tcphdr_t) >> 2))) { if (ipf_tcpoptions(softs, fin, tcp, fdata) == -1) { fin->fin_flx |= FI_BAD; DT1(ipf_fi_bad_winscale_syn, fr_info_t *, fin); } } if ((fin->fin_out != 0) && (is->is_pass & FR_NEWISN)) ipf_checknewisn(fin, is); } ret = 1; } else { DT2(iss_tcp_oow, fr_info_t *, fin, ipstate_t *, is); SBUMP(ipf_state_stats.iss_tcp_oow); ret = 0; } MUTEX_EXIT(&is->is_lock); return ret; } /* ------------------------------------------------------------------------ */ /* Function: ipf_checknewisn */ /* Returns: Nil */ /* Parameters: fin(I) - pointer to packet information */ /* is(I) - pointer to master state structure */ /* */ /* Check to see if this TCP connection is expecting and needs a new */ /* sequence number for a particular direction of the connection. */ /* */ /* NOTE: This does not actually change the sequence numbers, only gets new */ /* one ready. */ /* ------------------------------------------------------------------------ */ static void ipf_checknewisn(fin, is) fr_info_t *fin; ipstate_t *is; { u_32_t sumd, old, new; tcphdr_t *tcp; int i; i = fin->fin_rev; tcp = fin->fin_dp; if (((i == 0) && !(is->is_flags & IS_ISNSYN)) || ((i == 1) && !(is->is_flags & IS_ISNACK))) { old = ntohl(tcp->th_seq); new = ipf_newisn(fin); is->is_isninc[i] = new - old; CALC_SUMD(old, new, sumd); is->is_sumd[i] = (sumd & 0xffff) + (sumd >> 16); is->is_flags |= ((i == 0) ? IS_ISNSYN : IS_ISNACK); } } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_tcpinwindow */ /* Returns: int - 1 == packet inside TCP "window", 0 == not inside. */ /* Parameters: fin(I) - pointer to packet information */ /* fdata(I) - pointer to tcp state informatio (forward) */ /* tdata(I) - pointer to tcp state informatio (reverse) */ /* tcp(I) - pointer to TCP packet header */ /* */ /* Given a packet has matched addresses and ports, check to see if it is */ /* within the TCP data window. In a show of generosity, allow packets that */ /* are within the window space behind the current sequence # as well. */ /* ------------------------------------------------------------------------ */ static int ipf_state_tcpinwindow(fin, fdata, tdata, tcp, flags) fr_info_t *fin; tcpdata_t *fdata, *tdata; tcphdr_t *tcp; int flags; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_state_softc_t *softs = softc->ipf_state_soft; tcp_seq seq, ack, end; int ackskew, tcpflags; u_32_t win, maxwin; int dsize, inseq; /* * Find difference between last checked packet and this packet. */ tcpflags = tcp->th_flags; seq = ntohl(tcp->th_seq); ack = ntohl(tcp->th_ack); if (tcpflags & TH_SYN) win = ntohs(tcp->th_win); else win = ntohs(tcp->th_win) << fdata->td_winscale; /* * A window of 0 produces undesirable behaviour from this function. */ if (win == 0) win = 1; dsize = fin->fin_dlen - (TCP_OFF(tcp) << 2) + ((tcpflags & TH_SYN) ? 1 : 0) + ((tcpflags & TH_FIN) ? 1 : 0); /* * if window scaling is present, the scaling is only allowed * for windows not in the first SYN packet. In that packet the * window is 65535 to specify the largest window possible * for receivers not implementing the window scale option. * Currently, we do not assume TTCP here. That means that * if we see a second packet from a host (after the initial * SYN), we can assume that the receiver of the SYN did * already send back the SYN/ACK (and thus that we know if * the receiver also does window scaling) */ if (!(tcpflags & TH_SYN) && (fdata->td_winflags & TCP_WSCALE_FIRST)) { fdata->td_winflags &= ~TCP_WSCALE_FIRST; fdata->td_maxwin = win; } end = seq + dsize; if ((fdata->td_end == 0) && (!(flags & IS_TCPFSM) || ((tcpflags & TH_OPENING) == TH_OPENING))) { /* * Must be a (outgoing) SYN-ACK in reply to a SYN. */ fdata->td_end = end - 1; fdata->td_maxwin = 1; fdata->td_maxend = end + win; } if (!(tcpflags & TH_ACK)) { /* Pretend an ack was sent */ ack = tdata->td_end; } else if (((tcpflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) && (ack == 0)) { /* gross hack to get around certain broken tcp stacks */ ack = tdata->td_end; } maxwin = tdata->td_maxwin; ackskew = tdata->td_end - ack; /* * Strict sequencing only allows in-order delivery. */ if ((flags & IS_STRICT) != 0) { if (seq != fdata->td_end) { DT2(iss_tcp_struct, tcpdata_t *, fdata, int, seq); SBUMP(ipf_state_stats.iss_tcp_strict); fin->fin_flx |= FI_OOW; return 0; } } #define SEQ_GE(a,b) ((int)((a) - (b)) >= 0) #define SEQ_GT(a,b) ((int)((a) - (b)) > 0) inseq = 0; if ((SEQ_GE(fdata->td_maxend, end)) && (SEQ_GE(seq, fdata->td_end - maxwin)) && /* XXX what about big packets */ #define MAXACKWINDOW 66000 (-ackskew <= (MAXACKWINDOW)) && ( ackskew <= (MAXACKWINDOW << fdata->td_winscale))) { inseq = 1; /* * Microsoft Windows will send the next packet to the right of the * window if SACK is in use. */ } else if ((seq == fdata->td_maxend) && (ackskew == 0) && (fdata->td_winflags & TCP_SACK_PERMIT) && (tdata->td_winflags & TCP_SACK_PERMIT)) { DT2(iss_sinsack, tcpdata_t *, fdata, int, seq); SBUMP(ipf_state_stats.iss_winsack); inseq = 1; /* * Sometimes a TCP RST will be generated with only the ACK field * set to non-zero. */ } else if ((seq == 0) && (tcpflags == (TH_RST|TH_ACK)) && (ackskew >= -1) && (ackskew <= 1)) { inseq = 1; } else if (!(flags & IS_TCPFSM)) { int i; i = (fin->fin_rev << 1) + fin->fin_out; #if 0 if (is_pkts[i]0 == 0) { /* * Picking up a connection in the middle, the "next" * packet seen from a direction that is new should be * accepted, even if it appears out of sequence. */ inseq = 1; } else #endif if (!(fdata->td_winflags & (TCP_WSCALE_SEEN|TCP_WSCALE_FIRST))) { /* * No TCPFSM and no window scaling, so make some * extra guesses. */ if ((seq == fdata->td_maxend) && (ackskew == 0)) inseq = 1; else if (SEQ_GE(seq + maxwin, fdata->td_end - maxwin)) inseq = 1; } } /* TRACE(inseq, fdata, tdata, seq, end, ack, ackskew, win, maxwin) */ if (inseq) { /* if ackskew < 0 then this should be due to fragmented * packets. There is no way to know the length of the * total packet in advance. * We do know the total length from the fragment cache though. * Note however that there might be more sessions with * exactly the same source and destination parameters in the * state cache (and source and destination is the only stuff * that is saved in the fragment cache). Note further that * some TCP connections in the state cache are hashed with * sport and dport as well which makes it not worthwhile to * look for them. * Thus, when ackskew is negative but still seems to belong * to this session, we bump up the destinations end value. */ if (ackskew < 0) tdata->td_end = ack; /* update max window seen */ if (fdata->td_maxwin < win) fdata->td_maxwin = win; if (SEQ_GT(end, fdata->td_end)) fdata->td_end = end; if (SEQ_GE(ack + win, tdata->td_maxend)) tdata->td_maxend = ack + win; return 1; } SBUMP(ipf_state_stats.iss_oow); fin->fin_flx |= FI_OOW; return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_clone */ /* Returns: ipstate_t* - NULL == cloning failed, */ /* else pointer to new state structure */ /* Parameters: fin(I) - pointer to packet information */ /* tcp(I) - pointer to TCP/UDP header */ /* is(I) - pointer to master state structure */ /* */ /* Create a "duplcate" state table entry from the master. */ /* ------------------------------------------------------------------------ */ static ipstate_t * ipf_state_clone(fin, tcp, is) fr_info_t *fin; tcphdr_t *tcp; ipstate_t *is; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_state_softc_t *softs = softc->ipf_state_soft; ipstate_t *clone; u_32_t send; if (softs->ipf_state_stats.iss_active == softs->ipf_state_max) { SBUMPD(ipf_state_stats, iss_max); softs->ipf_state_doflush = 1; return NULL; } KMALLOC(clone, ipstate_t *); if (clone == NULL) { SBUMPD(ipf_state_stats, iss_clone_nomem); return NULL; } bcopy((char *)is, (char *)clone, sizeof(*clone)); MUTEX_NUKE(&clone->is_lock); /* * It has not yet been placed on any timeout queue, so make sure * all of that data is zero'd out. */ clone->is_sti.tqe_pnext = NULL; clone->is_sti.tqe_next = NULL; clone->is_sti.tqe_ifq = NULL; clone->is_sti.tqe_parent = clone; clone->is_die = ONE_DAY + softc->ipf_ticks; clone->is_state[0] = 0; clone->is_state[1] = 0; send = ntohl(tcp->th_seq) + fin->fin_dlen - (TCP_OFF(tcp) << 2) + ((tcp->th_flags & TH_SYN) ? 1 : 0) + ((tcp->th_flags & TH_FIN) ? 1 : 0); if (fin->fin_rev == 1) { clone->is_dend = send; clone->is_maxdend = send; clone->is_send = 0; clone->is_maxswin = 1; clone->is_maxdwin = ntohs(tcp->th_win); if (clone->is_maxdwin == 0) clone->is_maxdwin = 1; } else { clone->is_send = send; clone->is_maxsend = send; clone->is_dend = 0; clone->is_maxdwin = 1; clone->is_maxswin = ntohs(tcp->th_win); if (clone->is_maxswin == 0) clone->is_maxswin = 1; } clone->is_flags &= ~SI_CLONE; clone->is_flags |= SI_CLONED; if (ipf_state_insert(softc, clone, fin->fin_rev) == -1) { KFREE(clone); return NULL; } clone->is_ref = 1; if (clone->is_p == IPPROTO_TCP) { (void) ipf_tcp_age(&clone->is_sti, fin, softs->ipf_state_tcptq, clone->is_flags, 2); } MUTEX_EXIT(&clone->is_lock); if (is->is_flags & IS_STATESYNC) clone->is_sync = ipf_sync_new(softc, SMC_STATE, fin, clone); DT2(iss_clone, ipstate_t *, is, ipstate_t *, clone); SBUMP(ipf_state_stats.iss_cloned); return clone; } /* ------------------------------------------------------------------------ */ /* Function: ipf_matchsrcdst */ /* Returns: Nil */ /* Parameters: fin(I) - pointer to packet information */ /* is(I) - pointer to state structure */ /* src(I) - pointer to source address */ /* dst(I) - pointer to destination address */ /* tcp(I) - pointer to TCP/UDP header */ /* cmask(I) - mask of FI_* bits to check */ /* */ /* Match a state table entry against an IP packet. The logic below is that */ /* ret gets set to one if the match succeeds, else remains 0. If it is */ /* still 0 after the test. no match. */ /* ------------------------------------------------------------------------ */ static ipstate_t * ipf_matchsrcdst(fin, is, src, dst, tcp, cmask) fr_info_t *fin; ipstate_t *is; i6addr_t *src, *dst; tcphdr_t *tcp; u_32_t cmask; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_state_softc_t *softs = softc->ipf_state_soft; int ret = 0, rev, out, flags, flx = 0, idx; u_short sp, dp; u_32_t cflx; void *ifp; /* * If a connection is about to be deleted, no packets * are allowed to match it. */ if (is->is_sti.tqe_ifq == &softs->ipf_state_deletetq) return NULL; rev = IP6_NEQ(&is->is_dst, dst); ifp = fin->fin_ifp; out = fin->fin_out; flags = is->is_flags; sp = 0; dp = 0; if (tcp != NULL) { sp = htons(fin->fin_sport); dp = ntohs(fin->fin_dport); } if (!rev) { if (tcp != NULL) { if (!(flags & SI_W_SPORT) && (sp != is->is_sport)) rev = 1; else if (!(flags & SI_W_DPORT) && (dp != is->is_dport)) rev = 1; } } idx = (out << 1) + rev; /* * If the interface for this 'direction' is set, make sure it matches. * An interface name that is not set matches any, as does a name of *. */ if ((is->is_ifp[idx] == ifp) || (is->is_ifp[idx] == NULL && (*is->is_ifname[idx] == '\0' || *is->is_ifname[idx] == '-' || *is->is_ifname[idx] == '*'))) ret = 1; if (ret == 0) { DT2(iss_lookup_badifp, fr_info_t *, fin, ipstate_t *, is); SBUMP(ipf_state_stats.iss_lookup_badifp); /* TRACE is, out, rev, idx */ return NULL; } ret = 0; /* * Match addresses and ports. */ if (rev == 0) { if ((IP6_EQ(&is->is_dst, dst) || (flags & SI_W_DADDR)) && (IP6_EQ(&is->is_src, src) || (flags & SI_W_SADDR))) { if (tcp) { if ((sp == is->is_sport || flags & SI_W_SPORT) && (dp == is->is_dport || flags & SI_W_DPORT)) ret = 1; } else { ret = 1; } } } else { if ((IP6_EQ(&is->is_dst, src) || (flags & SI_W_DADDR)) && (IP6_EQ(&is->is_src, dst) || (flags & SI_W_SADDR))) { if (tcp) { if ((dp == is->is_sport || flags & SI_W_SPORT) && (sp == is->is_dport || flags & SI_W_DPORT)) ret = 1; } else { ret = 1; } } } if (ret == 0) { SBUMP(ipf_state_stats.iss_lookup_badport); DT2(iss_lookup_badport, fr_info_t *, fin, ipstate_t *, is); /* TRACE rev, is, sp, dp, src, dst */ return NULL; } /* * Whether or not this should be here, is questionable, but the aim * is to get this out of the main line. */ if (tcp == NULL) flags = is->is_flags & ~(SI_WILDP|SI_NEWFR|SI_CLONE|SI_CLONED); /* * Only one of the source or destination address can be flaged as a * wildcard. Fill in the missing address, if set. * For IPv6, if the address being copied in is multicast, then * don't reset the wild flag - multicast causes it to be set in the * first place! */ if ((flags & (SI_W_SADDR|SI_W_DADDR))) { fr_ip_t *fi = &fin->fin_fi; if ((flags & SI_W_SADDR) != 0) { if (rev == 0) { is->is_src = fi->fi_src; is->is_flags &= ~SI_W_SADDR; } else { if (!(fin->fin_flx & (FI_MULTICAST|FI_MBCAST))){ is->is_src = fi->fi_dst; is->is_flags &= ~SI_W_SADDR; } } } else if ((flags & SI_W_DADDR) != 0) { if (rev == 0) { if (!(fin->fin_flx & (FI_MULTICAST|FI_MBCAST))){ is->is_dst = fi->fi_dst; is->is_flags &= ~SI_W_DADDR; } } else { is->is_dst = fi->fi_src; is->is_flags &= ~SI_W_DADDR; } } if ((is->is_flags & (SI_WILDA|SI_WILDP)) == 0) { ATOMIC_DECL(softs->ipf_state_stats.iss_wild); } } flx = fin->fin_flx & cmask; cflx = is->is_flx[out][rev]; /* * Match up any flags set from IP options. */ if ((cflx && (flx != (cflx & cmask))) || ((fin->fin_optmsk & is->is_optmsk[rev]) != is->is_opt[rev]) || ((fin->fin_secmsk & is->is_secmsk) != is->is_sec) || ((fin->fin_auth & is->is_authmsk) != is->is_auth)) { SBUMPD(ipf_state_stats, iss_miss_mask); return NULL; } if ((fin->fin_flx & FI_IGNORE) != 0) { fin->fin_rev = rev; return is; } /* * Only one of the source or destination port can be flagged as a * wildcard. When filling it in, fill in a copy of the matched entry * if it has the cloning flag set. */ if ((flags & (SI_W_SPORT|SI_W_DPORT))) { if ((flags & SI_CLONE) != 0) { ipstate_t *clone; clone = ipf_state_clone(fin, tcp, is); if (clone == NULL) return NULL; is = clone; } else { ATOMIC_DECL(softs->ipf_state_stats.iss_wild); } if ((flags & SI_W_SPORT) != 0) { if (rev == 0) { is->is_sport = sp; is->is_send = ntohl(tcp->th_seq); } else { is->is_sport = dp; is->is_send = ntohl(tcp->th_ack); } is->is_maxsend = is->is_send + 1; } else if ((flags & SI_W_DPORT) != 0) { if (rev == 0) { is->is_dport = dp; is->is_dend = ntohl(tcp->th_ack); } else { is->is_dport = sp; is->is_dend = ntohl(tcp->th_seq); } is->is_maxdend = is->is_dend + 1; } is->is_flags &= ~(SI_W_SPORT|SI_W_DPORT); if ((flags & SI_CLONED) && softs->ipf_state_logging) ipf_state_log(softc, is, ISL_CLONE); } ret = -1; if (is->is_flx[out][rev] == 0) { is->is_flx[out][rev] = flx; if (rev == 1 && is->is_optmsk[1] == 0) { is->is_opt[1] = fin->fin_optmsk; is->is_optmsk[1] = 0xffffffff; if (is->is_v == 6) { is->is_opt[1] &= ~0x8; is->is_optmsk[1] &= ~0x8; } } } /* * Check if the interface name for this "direction" is set and if not, * fill it in. */ if (is->is_ifp[idx] == NULL && (*is->is_ifname[idx] == '\0' || *is->is_ifname[idx] == '*')) { is->is_ifp[idx] = ifp; COPYIFNAME(fin->fin_v, ifp, is->is_ifname[idx]); } fin->fin_rev = rev; return is; } /* ------------------------------------------------------------------------ */ /* Function: ipf_checkicmpmatchingstate */ /* Returns: Nil */ /* Parameters: fin(I) - pointer to packet information */ /* */ /* If we've got an ICMP error message, using the information stored in the */ /* ICMP packet, look for a matching state table entry. */ /* */ /* If we return NULL then no lock on ipf_state is held. */ /* If we return non-null then a read-lock on ipf_state is held. */ /* ------------------------------------------------------------------------ */ static ipstate_t * ipf_checkicmpmatchingstate(fin) fr_info_t *fin; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_state_softc_t *softs = softc->ipf_state_soft; ipstate_t *is, **isp; i6addr_t dst, src; struct icmp *ic; u_short savelen; icmphdr_t *icmp; fr_info_t ofin; tcphdr_t *tcp; int type, len; u_char pr; ip_t *oip; u_int hv; /* * Does it at least have the return (basic) IP header ? * Is it an actual recognised ICMP error type? * Only a basic IP header (no options) should be with * an ICMP error header. */ if ((fin->fin_v != 4) || (fin->fin_hlen != sizeof(ip_t)) || (fin->fin_plen < ICMPERR_MINPKTLEN) || !(fin->fin_flx & FI_ICMPERR)) { SBUMPD(ipf_state_stats, iss_icmp_bad); return NULL; } ic = fin->fin_dp; type = ic->icmp_type; oip = (ip_t *)((char *)ic + ICMPERR_ICMPHLEN); /* * Check if the at least the old IP header (with options) and * 8 bytes of payload is present. */ if (fin->fin_plen < ICMPERR_MAXPKTLEN + ((IP_HL(oip) - 5) << 2)) { SBUMPDX(ipf_state_stats, iss_icmp_short, iss_icmp_short_1); return NULL; } /* * Sanity Checks. */ len = fin->fin_dlen - ICMPERR_ICMPHLEN; if ((len <= 0) || ((IP_HL(oip) << 2) > len)) { DT2(iss_icmp_len, fr_info_t *, fin, struct ip*, oip); SBUMPDX(ipf_state_stats, iss_icmp_short, iss_icmp_short_1); return NULL; } /* * Is the buffer big enough for all of it ? It's the size of the IP * header claimed in the encapsulated part which is of concern. It * may be too big to be in this buffer but not so big that it's * outside the ICMP packet, leading to TCP deref's causing problems. * This is possible because we don't know how big oip_hl is when we * do the pullup early in ipf_check() and thus can't guarantee it is * all here now. */ #ifdef _KERNEL { mb_t *m; m = fin->fin_m; # if defined(MENTAT) if ((char *)oip + len > (char *)m->b_wptr) { SBUMPDX(ipf_state_stats, iss_icmp_short, iss_icmp_short_2); return NULL; } # else if ((char *)oip + len > (char *)fin->fin_ip + m->m_len) { SBUMPDX(ipf_state_stats, iss_icmp_short, iss_icmp_short_3); return NULL; } # endif } #endif bcopy((char *)fin, (char *)&ofin, sizeof(*fin)); /* * in the IPv4 case we must zero the i6addr union otherwise * the IP6_EQ and IP6_NEQ macros produce the wrong results because * of the 'junk' in the unused part of the union */ bzero((char *)&src, sizeof(src)); bzero((char *)&dst, sizeof(dst)); /* * we make an fin entry to be able to feed it to * matchsrcdst note that not all fields are encessary * but this is the cleanest way. Note further we fill * in fin_mp such that if someone uses it we'll get * a kernel panic. ipf_matchsrcdst does not use this. * * watch out here, as ip is in host order and oip in network * order. Any change we make must be undone afterwards, like * oip->ip_len. */ savelen = oip->ip_len; oip->ip_len = htons(len); ofin.fin_flx = FI_NOCKSUM; ofin.fin_v = 4; ofin.fin_ip = oip; ofin.fin_m = NULL; /* if dereferenced, panic XXX */ ofin.fin_mp = NULL; /* if dereferenced, panic XXX */ (void) ipf_makefrip(IP_HL(oip) << 2, oip, &ofin); ofin.fin_ifp = fin->fin_ifp; ofin.fin_out = !fin->fin_out; hv = (pr = oip->ip_p); src.in4 = oip->ip_src; hv += src.in4.s_addr; dst.in4 = oip->ip_dst; hv += dst.in4.s_addr; /* * Reset the short and bad flag here because in ipf_matchsrcdst() * the flags for the current packet (fin_flx) are compared against * those for the existing session. */ ofin.fin_flx &= ~(FI_BAD|FI_SHORT); /* * Put old values of ip_len back as we don't know * if we have to forward the packet or process it again. */ oip->ip_len = savelen; switch (oip->ip_p) { case IPPROTO_ICMP : /* * an ICMP error can only be generated as a result of an * ICMP query, not as the response on an ICMP error * * XXX theoretically ICMP_ECHOREP and the other reply's are * ICMP query's as well, but adding them here seems strange XXX */ if ((ofin.fin_flx & FI_ICMPERR) != 0) { DT1(iss_icmp_icmperr, fr_info_t *, &ofin); SBUMP(ipf_state_stats.iss_icmp_icmperr); return NULL; } /* * perform a lookup of the ICMP packet in the state table */ icmp = (icmphdr_t *)((char *)oip + (IP_HL(oip) << 2)); hv += icmp->icmp_id; hv = DOUBLE_HASH(hv); READ_ENTER(&softc->ipf_state); for (isp = &softs->ipf_state_table[hv]; ((is = *isp) != NULL); ) { isp = &is->is_hnext; if ((is->is_p != pr) || (is->is_v != 4)) continue; if (is->is_pass & FR_NOICMPERR) continue; is = ipf_matchsrcdst(&ofin, is, &src, &dst, NULL, FI_ICMPCMP); if ((is != NULL) && !ipf_allowstateicmp(fin, is, &src)) return is; } RWLOCK_EXIT(&softc->ipf_state); SBUMPDX(ipf_state_stats, iss_icmp_miss, iss_icmp_miss_1); return NULL; case IPPROTO_TCP : case IPPROTO_UDP : break; default : SBUMPDX(ipf_state_stats, iss_icmp_miss, iss_icmp_miss_2); return NULL; } tcp = (tcphdr_t *)((char *)oip + (IP_HL(oip) << 2)); hv += tcp->th_dport;; hv += tcp->th_sport;; hv = DOUBLE_HASH(hv); READ_ENTER(&softc->ipf_state); for (isp = &softs->ipf_state_table[hv]; ((is = *isp) != NULL); ) { isp = &is->is_hnext; /* * Only allow this icmp though if the * encapsulated packet was allowed through the * other way around. Note that the minimal amount * of info present does not allow for checking against * tcp internals such as seq and ack numbers. Only the * ports are known to be present and can be even if the * short flag is set. */ if ((is->is_p == pr) && (is->is_v == 4) && (is = ipf_matchsrcdst(&ofin, is, &src, &dst, tcp, FI_ICMPCMP))) { if (ipf_allowstateicmp(fin, is, &src) == 0) return is; } } RWLOCK_EXIT(&softc->ipf_state); SBUMPDX(ipf_state_stats, iss_icmp_miss, iss_icmp_miss_3); return NULL; } /* ------------------------------------------------------------------------ */ /* Function: ipf_allowstateicmp */ /* Returns: int - 1 = packet denied, 0 = packet allowed */ /* Parameters: fin(I) - pointer to packet information */ /* is(I) - pointer to state table entry */ /* src(I) - source address to check permission for */ /* */ /* For an ICMP packet that has so far matched a state table entry, check if */ /* there are any further refinements that might mean we want to block this */ /* packet. This code isn't specific to either IPv4 or IPv6. */ /* ------------------------------------------------------------------------ */ static int ipf_allowstateicmp(fin, is, src) fr_info_t *fin; ipstate_t *is; i6addr_t *src; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_state_softc_t *softs = softc->ipf_state_soft; frentry_t *savefr; frentry_t *fr; u_32_t ipass; int backward; int oi; int i; fr = is->is_rule; if (fr != NULL && fr->fr_icmpgrp != NULL) { savefr = fin->fin_fr; fin->fin_fr = fr->fr_icmpgrp->fg_start; ipass = ipf_scanlist(fin, softc->ipf_pass); fin->fin_fr = savefr; if (FR_ISBLOCK(ipass)) { SBUMPD(ipf_state_stats, iss_icmp_headblock); return 1; } } /* * i : the index of this packet (the icmp unreachable) * oi : the index of the original packet found in the * icmp header (i.e. the packet causing this icmp) * backward : original packet was backward compared to * the state */ backward = IP6_NEQ(&is->is_src, src); fin->fin_rev = !backward; i = (!backward << 1) + fin->fin_out; oi = (backward << 1) + !fin->fin_out; if (is->is_pass & FR_NOICMPERR) { SBUMPD(ipf_state_stats, iss_icmp_banned); return 1; } if (is->is_icmppkts[i] > is->is_pkts[oi]) { SBUMPD(ipf_state_stats, iss_icmp_toomany); return 1; } DT2(iss_icmp_hits, fr_info_t *, fin, ipstate_t *, is); SBUMP(ipf_state_stats.iss_icmp_hits); is->is_icmppkts[i]++; /* * we deliberately do not touch the timeouts * for the accompanying state table entry. * It remains to be seen if that is correct. XXX */ return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_ipsmove */ /* Returns: Nil */ /* Parameters: is(I) - pointer to state table entry */ /* hv(I) - new hash value for state table entry */ /* Write Locks: ipf_state */ /* */ /* Move a state entry from one position in the hash table to another. */ /* ------------------------------------------------------------------------ */ static void ipf_ipsmove(softs, is, hv) ipf_state_softc_t *softs; ipstate_t *is; u_int hv; { ipstate_t **isp; u_int hvm; hvm = is->is_hv; /* TRACE is, is_hv, hvm */ /* * Remove the hash from the old location... */ isp = is->is_phnext; if (is->is_hnext) is->is_hnext->is_phnext = isp; *isp = is->is_hnext; if (softs->ipf_state_table[hvm] == NULL) softs->ipf_state_stats.iss_inuse--; softs->ipf_state_stats.iss_bucketlen[hvm]--; /* * ...and put the hash in the new one. */ hvm = DOUBLE_HASH(hv); is->is_hv = hvm; /* TRACE is, hv, is_hv, hvm */ isp = &softs->ipf_state_table[hvm]; if (*isp) (*isp)->is_phnext = &is->is_hnext; else softs->ipf_state_stats.iss_inuse++; softs->ipf_state_stats.iss_bucketlen[hvm]++; is->is_phnext = isp; is->is_hnext = *isp; *isp = is; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_lookup */ /* Returns: ipstate_t* - NULL == no matching state found, */ /* else pointer to state information is returned */ /* Parameters: fin(I) - pointer to packet information */ /* tcp(I) - pointer to TCP/UDP header. */ /* ifqp(O) - pointer for storing tailq timeout */ /* */ /* Search the state table for a matching entry to the packet described by */ /* the contents of *fin. For certain protocols, when a match is found the */ /* timeout queue is also selected and stored in ifpq if it is non-NULL. */ /* */ /* If we return NULL then no lock on ipf_state is held. */ /* If we return non-null then a read-lock on ipf_state is held. */ /* ------------------------------------------------------------------------ */ ipstate_t * ipf_state_lookup(fin, tcp, ifqp) fr_info_t *fin; tcphdr_t *tcp; ipftq_t **ifqp; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_state_softc_t *softs = softc->ipf_state_soft; u_int hv, hvm, pr, v, tryagain; ipstate_t *is, **isp; u_short dport, sport; i6addr_t src, dst; struct icmp *ic; ipftq_t *ifq; int oow; is = NULL; ifq = NULL; tcp = fin->fin_dp; ic = (struct icmp *)tcp; hv = (pr = fin->fin_fi.fi_p); src = fin->fin_fi.fi_src; dst = fin->fin_fi.fi_dst; hv += src.in4.s_addr; hv += dst.in4.s_addr; v = fin->fin_fi.fi_v; #ifdef USE_INET6 if (v == 6) { hv += fin->fin_fi.fi_src.i6[1]; hv += fin->fin_fi.fi_src.i6[2]; hv += fin->fin_fi.fi_src.i6[3]; if ((fin->fin_p == IPPROTO_ICMPV6) && IN6_IS_ADDR_MULTICAST(&fin->fin_fi.fi_dst.in6)) { hv -= dst.in4.s_addr; } else { hv += fin->fin_fi.fi_dst.i6[1]; hv += fin->fin_fi.fi_dst.i6[2]; hv += fin->fin_fi.fi_dst.i6[3]; } } #endif if ((v == 4) && (fin->fin_flx & (FI_MULTICAST|FI_BROADCAST|FI_MBCAST))) { if (fin->fin_out == 0) { hv -= src.in4.s_addr; } else { hv -= dst.in4.s_addr; } } /* TRACE fin_saddr, fin_daddr, hv */ /* * Search the hash table for matching packet header info. */ switch (pr) { #ifdef USE_INET6 case IPPROTO_ICMPV6 : tryagain = 0; if (v == 6) { if ((ic->icmp_type == ICMP6_ECHO_REQUEST) || (ic->icmp_type == ICMP6_ECHO_REPLY)) { hv += ic->icmp_id; } } READ_ENTER(&softc->ipf_state); icmp6again: hvm = DOUBLE_HASH(hv); for (isp = &softs->ipf_state_table[hvm]; ((is = *isp) != NULL); ) { isp = &is->is_hnext; if ((is->is_p != pr) || (is->is_v != v)) continue; is = ipf_matchsrcdst(fin, is, &src, &dst, NULL, FI_CMP); if (is != NULL && ipf_matchicmpqueryreply(v, &is->is_icmp, ic, fin->fin_rev)) { if (fin->fin_rev) ifq = &softs->ipf_state_icmpacktq; else ifq = &softs->ipf_state_icmptq; break; } } if (is != NULL) { if ((tryagain != 0) && !(is->is_flags & SI_W_DADDR)) { hv += fin->fin_fi.fi_src.i6[0]; hv += fin->fin_fi.fi_src.i6[1]; hv += fin->fin_fi.fi_src.i6[2]; hv += fin->fin_fi.fi_src.i6[3]; ipf_ipsmove(softs, is, hv); MUTEX_DOWNGRADE(&softc->ipf_state); } break; } RWLOCK_EXIT(&softc->ipf_state); /* * No matching icmp state entry. Perhaps this is a * response to another state entry. * * XXX With some ICMP6 packets, the "other" address is already * in the packet, after the ICMP6 header, and this could be * used in place of the multicast address. However, taking * advantage of this requires some significant code changes * to handle the specific types where that is the case. */ if ((softs->ipf_state_stats.iss_wild != 0) && ((fin->fin_flx & FI_NOWILD) == 0) && (v == 6) && (tryagain == 0)) { hv -= fin->fin_fi.fi_src.i6[0]; hv -= fin->fin_fi.fi_src.i6[1]; hv -= fin->fin_fi.fi_src.i6[2]; hv -= fin->fin_fi.fi_src.i6[3]; tryagain = 1; WRITE_ENTER(&softc->ipf_state); goto icmp6again; } is = ipf_checkicmp6matchingstate(fin); if (is != NULL) return is; break; #endif case IPPROTO_ICMP : if (v == 4) { hv += ic->icmp_id; } hv = DOUBLE_HASH(hv); READ_ENTER(&softc->ipf_state); for (isp = &softs->ipf_state_table[hv]; ((is = *isp) != NULL); ) { isp = &is->is_hnext; if ((is->is_p != pr) || (is->is_v != v)) continue; is = ipf_matchsrcdst(fin, is, &src, &dst, NULL, FI_CMP); if ((is != NULL) && (ic->icmp_id == is->is_icmp.ici_id) && ipf_matchicmpqueryreply(v, &is->is_icmp, ic, fin->fin_rev)) { if (fin->fin_rev) ifq = &softs->ipf_state_icmpacktq; else ifq = &softs->ipf_state_icmptq; break; } } if (is == NULL) { RWLOCK_EXIT(&softc->ipf_state); } break; case IPPROTO_TCP : case IPPROTO_UDP : ifqp = NULL; sport = htons(fin->fin_data[0]); hv += sport; dport = htons(fin->fin_data[1]); hv += dport; oow = 0; tryagain = 0; READ_ENTER(&softc->ipf_state); retry_tcpudp: hvm = DOUBLE_HASH(hv); /* TRACE hv, hvm */ for (isp = &softs->ipf_state_table[hvm]; ((is = *isp) != NULL); ) { isp = &is->is_hnext; if ((is->is_p != pr) || (is->is_v != v)) continue; fin->fin_flx &= ~FI_OOW; is = ipf_matchsrcdst(fin, is, &src, &dst, tcp, FI_CMP); if (is != NULL) { if (pr == IPPROTO_TCP) { if (!ipf_state_tcp(softc, softs, fin, tcp, is)) { oow |= fin->fin_flx & FI_OOW; continue; } } break; } } if (is != NULL) { if (tryagain && !(is->is_flags & (SI_CLONE|SI_WILDP|SI_WILDA))) { hv += dport; hv += sport; ipf_ipsmove(softs, is, hv); MUTEX_DOWNGRADE(&softc->ipf_state); } break; } RWLOCK_EXIT(&softc->ipf_state); if ((softs->ipf_state_stats.iss_wild != 0) && ((fin->fin_flx & FI_NOWILD) == 0)) { if (tryagain == 0) { hv -= dport; hv -= sport; } else if (tryagain == 1) { hv = fin->fin_fi.fi_p; /* * If we try to pretend this is a reply to a * multicast/broadcast packet then we need to * exclude part of the address from the hash * calculation. */ if (fin->fin_out == 0) { hv += src.in4.s_addr; } else { hv += dst.in4.s_addr; } hv += dport; hv += sport; } tryagain++; if (tryagain <= 2) { WRITE_ENTER(&softc->ipf_state); goto retry_tcpudp; } } fin->fin_flx |= oow; break; #if 0 case IPPROTO_GRE : gre = fin->fin_dp; if (GRE_REV(gre->gr_flags) == 1) { hv += gre->gr_call; } /* FALLTHROUGH */ #endif default : ifqp = NULL; hvm = DOUBLE_HASH(hv); READ_ENTER(&softc->ipf_state); for (isp = &softs->ipf_state_table[hvm]; ((is = *isp) != NULL); ) { isp = &is->is_hnext; if ((is->is_p != pr) || (is->is_v != v)) continue; is = ipf_matchsrcdst(fin, is, &src, &dst, NULL, FI_CMP); if (is != NULL) { ifq = &softs->ipf_state_iptq; break; } } if (is == NULL) { RWLOCK_EXIT(&softc->ipf_state); } break; } if (is != NULL) { if (((is->is_sti.tqe_flags & TQE_RULEBASED) != 0) && (is->is_tqehead[fin->fin_rev] != NULL)) ifq = is->is_tqehead[fin->fin_rev]; if (ifq != NULL && ifqp != NULL) *ifqp = ifq; } else { SBUMP(ipf_state_stats.iss_lookup_miss); } return is; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_check */ /* Returns: frentry_t* - NULL == search failed, */ /* else pointer to rule for matching state */ /* Parameters: fin(I) - pointer to packet information */ /* passp(I) - pointer to filtering result flags */ /* */ /* Check if a packet is associated with an entry in the state table. */ /* ------------------------------------------------------------------------ */ frentry_t * ipf_state_check(fin, passp) fr_info_t *fin; u_32_t *passp; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_state_softc_t *softs = softc->ipf_state_soft; ipftqent_t *tqe; ipstate_t *is; frentry_t *fr; tcphdr_t *tcp; ipftq_t *ifq; u_int pass; int inout; if (softs->ipf_state_lock || (softs->ipf_state_list == NULL)) return NULL; if (fin->fin_flx & (FI_SHORT|FI_FRAGBODY|FI_BAD)) { SBUMPD(ipf_state_stats, iss_check_bad); return NULL; } if ((fin->fin_flx & FI_TCPUDP) || (fin->fin_fi.fi_p == IPPROTO_ICMP) #ifdef USE_INET6 || (fin->fin_fi.fi_p == IPPROTO_ICMPV6) #endif ) tcp = fin->fin_dp; else tcp = NULL; ifq = NULL; /* * Search the hash table for matching packet header info. */ is = ipf_state_lookup(fin, tcp, &ifq); switch (fin->fin_p) { #ifdef USE_INET6 case IPPROTO_ICMPV6 : if (is != NULL) break; if (fin->fin_v == 6) { is = ipf_checkicmp6matchingstate(fin); } break; #endif case IPPROTO_ICMP : if (is != NULL) break; /* * No matching icmp state entry. Perhaps this is a * response to another state entry. */ is = ipf_checkicmpmatchingstate(fin); break; case IPPROTO_TCP : if (is == NULL) break; if (is->is_pass & FR_NEWISN) { if (fin->fin_out == 0) ipf_fixinisn(fin, is); else if (fin->fin_out == 1) ipf_fixoutisn(fin, is); } break; default : if (fin->fin_rev) ifq = &softs->ipf_state_udpacktq; else ifq = &softs->ipf_state_udptq; break; } if (is == NULL) { SBUMP(ipf_state_stats.iss_check_miss); return NULL; } fr = is->is_rule; if (fr != NULL) { if ((fin->fin_out == 0) && (fr->fr_nattag.ipt_num[0] != 0)) { if (fin->fin_nattag == NULL) { RWLOCK_EXIT(&softc->ipf_state); SBUMPD(ipf_state_stats, iss_check_notag); return NULL; } if (ipf_matchtag(&fr->fr_nattag, fin->fin_nattag)!=0) { RWLOCK_EXIT(&softc->ipf_state); SBUMPD(ipf_state_stats, iss_check_nattag); return NULL; } } (void) strncpy(fin->fin_group, FR_NAME(fr, fr_group), FR_GROUPLEN); fin->fin_icode = fr->fr_icode; } fin->fin_rule = is->is_rulen; fin->fin_fr = fr; /* * If this packet is a fragment and the rule says to track fragments, * then create a new fragment cache entry. */ if (fin->fin_flx & FI_FRAG && FR_ISPASS(is->is_pass) && is->is_pass & FR_KEEPFRAG) (void) ipf_frag_new(softc, fin, is->is_pass); /* * For TCP packets, ifq == NULL. For all others, check if this new * queue is different to the last one it was on and move it if so. */ tqe = &is->is_sti; if ((tqe->tqe_flags & TQE_RULEBASED) != 0) ifq = is->is_tqehead[fin->fin_rev]; MUTEX_ENTER(&is->is_lock); if (ifq != NULL) ipf_movequeue(softc->ipf_ticks, tqe, tqe->tqe_ifq, ifq); inout = (fin->fin_rev << 1) + fin->fin_out; is->is_pkts[inout]++; is->is_bytes[inout] += fin->fin_plen; fin->fin_pktnum = is->is_pkts[inout] + is->is_icmppkts[inout]; MUTEX_EXIT(&is->is_lock); pass = is->is_pass; if (is->is_flags & IS_STATESYNC) ipf_sync_update(softc, SMC_STATE, fin, is->is_sync); RWLOCK_EXIT(&softc->ipf_state); SBUMP(ipf_state_stats.iss_hits); fin->fin_dif = &is->is_dif; fin->fin_tif = &is->is_tifs[fin->fin_rev]; fin->fin_flx |= FI_STATE; if ((pass & FR_LOGFIRST) != 0) pass &= ~(FR_LOGFIRST|FR_LOG); *passp = pass; return fr; } /* ------------------------------------------------------------------------ */ /* Function: ipf_fixoutisn */ /* Returns: Nil */ /* Parameters: fin(I) - pointer to packet information */ /* is(I) - pointer to master state structure */ /* */ /* Called only for outbound packets, adjusts the sequence number and the */ /* TCP checksum to match that change. */ /* ------------------------------------------------------------------------ */ static void ipf_fixoutisn(fin, is) fr_info_t *fin; ipstate_t *is; { tcphdr_t *tcp; int rev; u_32_t seq; tcp = fin->fin_dp; rev = fin->fin_rev; if ((is->is_flags & IS_ISNSYN) != 0) { if ((rev == 0) && (fin->fin_cksum < FI_CK_L4PART)) { seq = ntohl(tcp->th_seq); seq += is->is_isninc[0]; tcp->th_seq = htonl(seq); ipf_fix_outcksum(0, &tcp->th_sum, is->is_sumd[0], 0); } } if ((is->is_flags & IS_ISNACK) != 0) { if ((rev == 1) && (fin->fin_cksum < FI_CK_L4PART)) { seq = ntohl(tcp->th_seq); seq += is->is_isninc[1]; tcp->th_seq = htonl(seq); ipf_fix_outcksum(0, &tcp->th_sum, is->is_sumd[1], 0); } } } /* ------------------------------------------------------------------------ */ /* Function: ipf_fixinisn */ /* Returns: Nil */ /* Parameters: fin(I) - pointer to packet information */ /* is(I) - pointer to master state structure */ /* */ /* Called only for inbound packets, adjusts the acknowledge number and the */ /* TCP checksum to match that change. */ /* ------------------------------------------------------------------------ */ static void ipf_fixinisn(fin, is) fr_info_t *fin; ipstate_t *is; { tcphdr_t *tcp; int rev; u_32_t ack; tcp = fin->fin_dp; rev = fin->fin_rev; if ((is->is_flags & IS_ISNSYN) != 0) { if ((rev == 1) && (fin->fin_cksum < FI_CK_L4PART)) { ack = ntohl(tcp->th_ack); ack -= is->is_isninc[0]; tcp->th_ack = htonl(ack); ipf_fix_incksum(0, &tcp->th_sum, is->is_sumd[0], 0); } } if ((is->is_flags & IS_ISNACK) != 0) { if ((rev == 0) && (fin->fin_cksum < FI_CK_L4PART)) { ack = ntohl(tcp->th_ack); ack -= is->is_isninc[1]; tcp->th_ack = htonl(ack); ipf_fix_incksum(0, &tcp->th_sum, is->is_sumd[1], 0); } } } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_sync */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* ifp(I) - pointer to interface */ /* */ /* Walk through all state entries and if an interface pointer match is */ /* found then look it up again, based on its name in case the pointer has */ /* changed since last time. */ /* */ /* If ifp is passed in as being non-null then we are only doing updates for */ /* existing, matching, uses of it. */ /* ------------------------------------------------------------------------ */ void ipf_state_sync(softc, ifp) ipf_main_softc_t *softc; void *ifp; { ipf_state_softc_t *softs = softc->ipf_state_soft; ipstate_t *is; int i; if (softc->ipf_running <= 0) return; WRITE_ENTER(&softc->ipf_state); if (softc->ipf_running <= 0) { RWLOCK_EXIT(&softc->ipf_state); return; } for (is = softs->ipf_state_list; is; is = is->is_next) { /* * Look up all the interface names in the state entry. */ for (i = 0; i < FR_NUM(is->is_ifp); i++) { if (ifp == NULL || ifp == is->is_ifp[i]) is->is_ifp[i] = ipf_resolvenic(softc, is->is_ifname[i], is->is_v); } } RWLOCK_EXIT(&softc->ipf_state); } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_del */ /* Returns: int - 0 = deleted, else refernce count on active struct */ /* Parameters: softc(I) - pointer to soft context main structure */ /* is(I) - pointer to state structure to delete */ /* why(I) - if not 0, log reason why it was deleted */ /* Write Locks: ipf_state */ /* */ /* Deletes a state entry from the enumerated list as well as the hash table */ /* and timeout queue lists. Make adjustments to hash table statistics and */ /* global counters as required. */ /* ------------------------------------------------------------------------ */ static int ipf_state_del(softc, is, why) ipf_main_softc_t *softc; ipstate_t *is; int why; { ipf_state_softc_t *softs = softc->ipf_state_soft; int orphan = 1; frentry_t *fr; /* * Since we want to delete this, remove it from the state table, * where it can be found & used, first. */ if (is->is_phnext != NULL) { *is->is_phnext = is->is_hnext; if (is->is_hnext != NULL) is->is_hnext->is_phnext = is->is_phnext; if (softs->ipf_state_table[is->is_hv] == NULL) softs->ipf_state_stats.iss_inuse--; softs->ipf_state_stats.iss_bucketlen[is->is_hv]--; is->is_phnext = NULL; is->is_hnext = NULL; orphan = 0; } /* * Because ipf_state_stats.iss_wild is a count of entries in the state * table that have wildcard flags set, only decerement it once * and do it here. */ if (is->is_flags & (SI_WILDP|SI_WILDA)) { if (!(is->is_flags & SI_CLONED)) { ATOMIC_DECL(softs->ipf_state_stats.iss_wild); } is->is_flags &= ~(SI_WILDP|SI_WILDA); } /* * Next, remove it from the timeout queue it is in. */ if (is->is_sti.tqe_ifq != NULL) ipf_deletequeueentry(&is->is_sti); /* * If it is still in use by something else, do not go any further, * but note that at this point it is now an orphan. How can this * be? ipf_state_flush() calls ipf_delete() directly because it wants * to empty the table out and if something has a hold on a state * entry (such as ipfstat), it'll do the deref path that'll bring * us back here to do the real delete & free. */ MUTEX_ENTER(&is->is_lock); if (is->is_me != NULL) { *is->is_me = NULL; is->is_me = NULL; is->is_ref--; } is->is_ref--; if (is->is_ref > 0) { int refs; refs = is->is_ref; MUTEX_EXIT(&is->is_lock); if (!orphan) softs->ipf_state_stats.iss_orphan++; return refs; } fr = is->is_rule; is->is_rule = NULL; if (fr != NULL) { if (fr->fr_srctrack.ht_max_nodes != 0) { (void) ipf_ht_node_del(&fr->fr_srctrack, is->is_family, &is->is_src); } } ASSERT(is->is_ref == 0); MUTEX_EXIT(&is->is_lock); if (is->is_tqehead[0] != NULL) { if (ipf_deletetimeoutqueue(is->is_tqehead[0]) == 0) ipf_freetimeoutqueue(softc, is->is_tqehead[0]); } if (is->is_tqehead[1] != NULL) { if (ipf_deletetimeoutqueue(is->is_tqehead[1]) == 0) ipf_freetimeoutqueue(softc, is->is_tqehead[1]); } if (is->is_sync) ipf_sync_del_state(softc->ipf_sync_soft, is->is_sync); /* * Now remove it from the linked list of known states */ if (is->is_pnext != NULL) { *is->is_pnext = is->is_next; if (is->is_next != NULL) is->is_next->is_pnext = is->is_pnext; is->is_pnext = NULL; is->is_next = NULL; } if (softs->ipf_state_logging != 0 && why != 0) ipf_state_log(softc, is, why); if (is->is_p == IPPROTO_TCP) softs->ipf_state_stats.iss_fin++; else softs->ipf_state_stats.iss_expire++; if (orphan) softs->ipf_state_stats.iss_orphan--; if (fr != NULL) { fr->fr_statecnt--; (void) ipf_derefrule(softc, &fr); } softs->ipf_state_stats.iss_active_proto[is->is_p]--; MUTEX_DESTROY(&is->is_lock); KFREE(is); softs->ipf_state_stats.iss_active--; return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_expire */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* */ /* Slowly expire held state for thingslike UDP and ICMP. The algorithm */ /* used here is to keep the queue sorted with the oldest things at the top */ /* and the youngest at the bottom. So if the top one doesn't need to be */ /* expired then neither will any under it. */ /* ------------------------------------------------------------------------ */ void ipf_state_expire(softc) ipf_main_softc_t *softc; { ipf_state_softc_t *softs = softc->ipf_state_soft; ipftq_t *ifq, *ifqnext; ipftqent_t *tqe, *tqn; ipstate_t *is; SPL_INT(s); SPL_NET(s); WRITE_ENTER(&softc->ipf_state); for (ifq = softs->ipf_state_tcptq; ifq != NULL; ifq = ifq->ifq_next) for (tqn = ifq->ifq_head; ((tqe = tqn) != NULL); ) { if (tqe->tqe_die > softc->ipf_ticks) break; tqn = tqe->tqe_next; is = tqe->tqe_parent; ipf_state_del(softc, is, ISL_EXPIRE); } for (ifq = softs->ipf_state_usertq; ifq != NULL; ifq = ifqnext) { ifqnext = ifq->ifq_next; for (tqn = ifq->ifq_head; ((tqe = tqn) != NULL); ) { if (tqe->tqe_die > softc->ipf_ticks) break; tqn = tqe->tqe_next; is = tqe->tqe_parent; ipf_state_del(softc, is, ISL_EXPIRE); } } for (ifq = softs->ipf_state_usertq; ifq != NULL; ifq = ifqnext) { ifqnext = ifq->ifq_next; if (((ifq->ifq_flags & IFQF_DELETE) != 0) && (ifq->ifq_ref == 0)) { ipf_freetimeoutqueue(softc, ifq); } } if (softs->ipf_state_doflush) { (void) ipf_state_flush(softc, 2, 0); softs->ipf_state_doflush = 0; softs->ipf_state_wm_last = softc->ipf_ticks; } RWLOCK_EXIT(&softc->ipf_state); SPL_X(s); } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_flush */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: softc(I) - pointer to soft context main structure */ /* which(I) - which flush action to perform */ /* proto(I) - which protocol to flush (0 == ALL) */ /* Write Locks: ipf_state */ /* */ /* Flush state tables. Three actions currently defined: */ /* which == 0 : flush all state table entries */ /* which == 1 : flush TCP connections which have started to close but are */ /* stuck for some reason. */ /* which == 2 : flush TCP connections which have been idle for a long time, */ /* starting at > 4 days idle and working back in successive half-*/ /* days to at most 12 hours old. If this fails to free enough */ /* slots then work backwards in half hour slots to 30 minutes. */ /* If that too fails, then work backwards in 30 second intervals */ /* for the last 30 minutes to at worst 30 seconds idle. */ /* ------------------------------------------------------------------------ */ int ipf_state_flush(softc, which, proto) ipf_main_softc_t *softc; int which, proto; { ipf_state_softc_t *softs = softc->ipf_state_soft; ipftqent_t *tqe, *tqn; ipstate_t *is, **isp; ipftq_t *ifq; int removed; SPL_INT(s); removed = 0; SPL_NET(s); switch (which) { case 0 : SBUMP(ipf_state_stats.iss_flush_all); /* * Style 0 flush removes everything... */ for (isp = &softs->ipf_state_list; ((is = *isp) != NULL); ) { if ((proto != 0) && (is->is_v != proto)) { isp = &is->is_next; continue; } if (ipf_state_del(softc, is, ISL_FLUSH) == 0) removed++; else isp = &is->is_next; } break; case 1 : SBUMP(ipf_state_stats.iss_flush_closing); /* * Since we're only interested in things that are closing, * we can start with the appropriate timeout queue. */ for (ifq = softs->ipf_state_tcptq + IPF_TCPS_CLOSE_WAIT; ifq != NULL; ifq = ifq->ifq_next) { for (tqn = ifq->ifq_head; ((tqe = tqn) != NULL); ) { tqn = tqe->tqe_next; is = tqe->tqe_parent; if (is->is_p != IPPROTO_TCP) break; if (ipf_state_del(softc, is, ISL_FLUSH) == 0) removed++; } } /* * Also need to look through the user defined queues. */ for (ifq = softs->ipf_state_usertq; ifq != NULL; ifq = ifq->ifq_next) { for (tqn = ifq->ifq_head; ((tqe = tqn) != NULL); ) { tqn = tqe->tqe_next; is = tqe->tqe_parent; if (is->is_p != IPPROTO_TCP) continue; if ((is->is_state[0] > IPF_TCPS_ESTABLISHED) && (is->is_state[1] > IPF_TCPS_ESTABLISHED)) { if (ipf_state_del(softc, is, ISL_FLUSH) == 0) removed++; } } } break; case 2 : break; /* * Args 5-11 correspond to flushing those particular states * for TCP connections. */ case IPF_TCPS_CLOSE_WAIT : case IPF_TCPS_FIN_WAIT_1 : case IPF_TCPS_CLOSING : case IPF_TCPS_LAST_ACK : case IPF_TCPS_FIN_WAIT_2 : case IPF_TCPS_TIME_WAIT : case IPF_TCPS_CLOSED : SBUMP(ipf_state_stats.iss_flush_queue); tqn = softs->ipf_state_tcptq[which].ifq_head; while (tqn != NULL) { tqe = tqn; tqn = tqe->tqe_next; is = tqe->tqe_parent; if (ipf_state_del(softc, is, ISL_FLUSH) == 0) removed++; } break; default : if (which < 30) break; SBUMP(ipf_state_stats.iss_flush_state); /* * Take a large arbitrary number to mean the number of seconds * for which which consider to be the maximum value we'll allow * the expiration to be. */ which = IPF_TTLVAL(which); for (isp = &softs->ipf_state_list; ((is = *isp) != NULL); ) { if ((proto == 0) || (is->is_v == proto)) { if (softc->ipf_ticks - is->is_touched > which) { if (ipf_state_del(softc, is, ISL_FLUSH) == 0) { removed++; continue; } } } isp = &is->is_next; } break; } if (which != 2) { SPL_X(s); return removed; } SBUMP(ipf_state_stats.iss_flush_timeout); /* * Asked to remove inactive entries because the table is full, try * again, 3 times, if first attempt failed with a different criteria * each time. The order tried in must be in decreasing age. * Another alternative is to implement random drop and drop N entries * at random until N have been freed up. */ if (softc->ipf_ticks - softs->ipf_state_wm_last > softs->ipf_state_wm_freq) { removed = ipf_queueflush(softc, ipf_state_flush_entry, softs->ipf_state_tcptq, softs->ipf_state_usertq, &softs->ipf_state_stats.iss_active, softs->ipf_state_size, softs->ipf_state_wm_low); softs->ipf_state_wm_last = softc->ipf_ticks; } SPL_X(s); return removed; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_flush_entry */ /* Returns: int - 0 = entry deleted, else not deleted */ /* Parameters: softc(I) - pointer to soft context main structure */ /* entry(I) - pointer to state structure to delete */ /* Write Locks: ipf_state */ /* */ /* This function is a stepping stone between ipf_queueflush() and */ /* ipf_state_del(). It is used so we can provide a uniform interface via */ /* the ipf_queueflush() function. */ /* ------------------------------------------------------------------------ */ static int ipf_state_flush_entry(softc, entry) ipf_main_softc_t *softc; void *entry; { return ipf_state_del(softc, entry, ISL_FLUSH); } /* ------------------------------------------------------------------------ */ /* Function: ipf_tcp_age */ /* Returns: int - 1 == state transition made, 0 == no change (rejected) */ /* Parameters: tqe(I) - pointer to timeout queue information */ /* fin(I) - pointer to packet information */ /* tqtab(I) - TCP timeout queue table this is in */ /* flags(I) - flags from state/NAT entry */ /* ok(I) - can we advance state */ /* */ /* Rewritten by Arjan de Vet , 2000-07-29: */ /* */ /* - (try to) base state transitions on real evidence only, */ /* i.e. packets that are sent and have been received by ipfilter; */ /* diagram 18.12 of TCP/IP volume 1 by W. Richard Stevens was used. */ /* */ /* - deal with half-closed connections correctly; */ /* */ /* - store the state of the source in state[0] such that ipfstat */ /* displays the state as source/dest instead of dest/source; the calls */ /* to ipf_tcp_age have been changed accordingly. */ /* */ /* Internal Parameters: */ /* */ /* state[0] = state of source (host that initiated connection) */ /* state[1] = state of dest (host that accepted the connection) */ /* */ /* dir == 0 : a packet from source to dest */ /* dir == 1 : a packet from dest to source */ /* */ /* A typical procession for a connection is as follows: */ /* */ /* +--------------+-------------------+ */ /* | Side '0' | Side '1' | */ /* +--------------+-------------------+ */ /* | 0 -> 1 (SYN) | | */ /* | | 0 -> 2 (SYN-ACK) | */ /* | 1 -> 3 (ACK) | | */ /* | | 2 -> 4 (ACK-PUSH) | */ /* | 3 -> 4 (ACK) | | */ /* | ... | ... | */ /* | | 4 -> 6 (FIN-ACK) | */ /* | 4 -> 5 (ACK) | | */ /* | | 6 -> 6 (ACK-PUSH) | */ /* | 5 -> 5 (ACK) | | */ /* | 5 -> 8 (FIN) | | */ /* | | 6 -> 10 (ACK) | */ /* +--------------+-------------------+ */ /* */ /* Locking: it is assumed that the parent of the tqe structure is locked. */ /* ------------------------------------------------------------------------ */ int ipf_tcp_age(tqe, fin, tqtab, flags, ok) ipftqent_t *tqe; fr_info_t *fin; ipftq_t *tqtab; int flags, ok; { ipf_main_softc_t *softc = fin->fin_main_soft; int dlen, ostate, nstate, rval, dir; u_char tcpflags; tcphdr_t *tcp; tcp = fin->fin_dp; rval = 0; dir = fin->fin_rev; tcpflags = tcp->th_flags; dlen = fin->fin_dlen - (TCP_OFF(tcp) << 2); ostate = tqe->tqe_state[1 - dir]; nstate = tqe->tqe_state[dir]; if (tcpflags & TH_RST) { if (!(tcpflags & TH_PUSH) && !dlen) nstate = IPF_TCPS_CLOSED; else nstate = IPF_TCPS_CLOSE_WAIT; if (ostate <= IPF_TCPS_ESTABLISHED) { tqe->tqe_state[1 - dir] = IPF_TCPS_CLOSE_WAIT; } rval = 1; } else { switch (nstate) { case IPF_TCPS_LISTEN: /* 0 */ if ((tcpflags & TH_OPENING) == TH_OPENING) { /* * 'dir' received an S and sends SA in * response, LISTEN -> SYN_RECEIVED */ nstate = IPF_TCPS_SYN_RECEIVED; rval = 1; } else if ((tcpflags & TH_OPENING) == TH_SYN) { /* 'dir' sent S, LISTEN -> SYN_SENT */ nstate = IPF_TCPS_SYN_SENT; rval = 1; } /* * the next piece of code makes it possible to get * already established connections into the state table * after a restart or reload of the filter rules; this * does not work when a strict 'flags S keep state' is * used for tcp connections of course */ if (((flags & IS_TCPFSM) == 0) && ((tcpflags & TH_ACKMASK) == TH_ACK)) { /* * we saw an A, guess 'dir' is in ESTABLISHED * mode */ switch (ostate) { case IPF_TCPS_LISTEN : case IPF_TCPS_SYN_RECEIVED : nstate = IPF_TCPS_HALF_ESTAB; rval = 1; break; case IPF_TCPS_HALF_ESTAB : case IPF_TCPS_ESTABLISHED : nstate = IPF_TCPS_ESTABLISHED; rval = 1; break; default : break; } } /* * TODO: besides regular ACK packets we can have other * packets as well; it is yet to be determined how we * should initialize the states in those cases */ break; case IPF_TCPS_SYN_SENT: /* 1 */ if ((tcpflags & ~(TH_ECN|TH_CWR)) == TH_SYN) { /* * A retransmitted SYN packet. We do not reset * the timeout here to ipf_tcptimeout because a * connection connect timeout does not renew * after every packet that is sent. We need to * set rval so as to indicate the packet has * passed the check for its flags being valid * in the TCP FSM. Setting rval to 2 has the * result of not resetting the timeout. */ rval = 2; } else if ((tcpflags & (TH_SYN|TH_FIN|TH_ACK)) == TH_ACK) { /* * we see an A from 'dir' which is in SYN_SENT * state: 'dir' sent an A in response to an SA * which it received, SYN_SENT -> ESTABLISHED */ nstate = IPF_TCPS_ESTABLISHED; rval = 1; } else if (tcpflags & TH_FIN) { /* * we see an F from 'dir' which is in SYN_SENT * state and wants to close its side of the * connection; SYN_SENT -> FIN_WAIT_1 */ nstate = IPF_TCPS_FIN_WAIT_1; rval = 1; } else if ((tcpflags & TH_OPENING) == TH_OPENING) { /* * we see an SA from 'dir' which is already in * SYN_SENT state, this means we have a * simultaneous open; SYN_SENT -> SYN_RECEIVED */ nstate = IPF_TCPS_SYN_RECEIVED; rval = 1; } break; case IPF_TCPS_SYN_RECEIVED: /* 2 */ if ((tcpflags & (TH_SYN|TH_FIN|TH_ACK)) == TH_ACK) { /* * we see an A from 'dir' which was in * SYN_RECEIVED state so it must now be in * established state, SYN_RECEIVED -> * ESTABLISHED */ nstate = IPF_TCPS_ESTABLISHED; rval = 1; } else if ((tcpflags & ~(TH_ECN|TH_CWR)) == TH_OPENING) { /* * We see an SA from 'dir' which is already in * SYN_RECEIVED state. */ rval = 2; } else if (tcpflags & TH_FIN) { /* * we see an F from 'dir' which is in * SYN_RECEIVED state and wants to close its * side of the connection; SYN_RECEIVED -> * FIN_WAIT_1 */ nstate = IPF_TCPS_FIN_WAIT_1; rval = 1; } break; case IPF_TCPS_HALF_ESTAB: /* 3 */ if (tcpflags & TH_FIN) { nstate = IPF_TCPS_FIN_WAIT_1; rval = 1; } else if ((tcpflags & TH_ACKMASK) == TH_ACK) { /* * If we've picked up a connection in mid * flight, we could be looking at a follow on * packet from the same direction as the one * that created this state. Recognise it but * do not advance the entire connection's * state. */ switch (ostate) { case IPF_TCPS_LISTEN : case IPF_TCPS_SYN_SENT : case IPF_TCPS_SYN_RECEIVED : rval = 1; break; case IPF_TCPS_HALF_ESTAB : case IPF_TCPS_ESTABLISHED : nstate = IPF_TCPS_ESTABLISHED; rval = 1; break; default : break; } } break; case IPF_TCPS_ESTABLISHED: /* 4 */ rval = 1; if (tcpflags & TH_FIN) { /* * 'dir' closed its side of the connection; * this gives us a half-closed connection; * ESTABLISHED -> FIN_WAIT_1 */ if (ostate == IPF_TCPS_FIN_WAIT_1) { nstate = IPF_TCPS_CLOSING; } else { nstate = IPF_TCPS_FIN_WAIT_1; } } else if (tcpflags & TH_ACK) { /* * an ACK, should we exclude other flags here? */ if (ostate == IPF_TCPS_FIN_WAIT_1) { /* * We know the other side did an active * close, so we are ACKing the recvd * FIN packet (does the window matching * code guarantee this?) and go into * CLOSE_WAIT state; this gives us a * half-closed connection */ nstate = IPF_TCPS_CLOSE_WAIT; } else if (ostate < IPF_TCPS_CLOSE_WAIT) { /* * still a fully established * connection reset timeout */ nstate = IPF_TCPS_ESTABLISHED; } } break; case IPF_TCPS_CLOSE_WAIT: /* 5 */ rval = 1; if (tcpflags & TH_FIN) { /* * application closed and 'dir' sent a FIN, * we're now going into LAST_ACK state */ nstate = IPF_TCPS_LAST_ACK; } else { /* * we remain in CLOSE_WAIT because the other * side has closed already and we did not * close our side yet; reset timeout */ nstate = IPF_TCPS_CLOSE_WAIT; } break; case IPF_TCPS_FIN_WAIT_1: /* 6 */ rval = 1; if ((tcpflags & TH_ACK) && ostate > IPF_TCPS_CLOSE_WAIT) { /* * if the other side is not active anymore * it has sent us a FIN packet that we are * ack'ing now with an ACK; this means both * sides have now closed the connection and * we go into TIME_WAIT */ /* * XXX: how do we know we really are ACKing * the FIN packet here? does the window code * guarantee that? */ nstate = IPF_TCPS_LAST_ACK; } else { /* * we closed our side of the connection * already but the other side is still active * (ESTABLISHED/CLOSE_WAIT); continue with * this half-closed connection */ nstate = IPF_TCPS_FIN_WAIT_1; } break; case IPF_TCPS_CLOSING: /* 7 */ if ((tcpflags & (TH_FIN|TH_ACK)) == TH_ACK) { nstate = IPF_TCPS_TIME_WAIT; } rval = 1; break; case IPF_TCPS_LAST_ACK: /* 8 */ if (tcpflags & TH_ACK) { rval = 1; } /* * we cannot detect when we go out of LAST_ACK state * to CLOSED because that is based on the reception * of ACK packets; ipfilter can only detect that a * packet has been sent by a host */ break; case IPF_TCPS_FIN_WAIT_2: /* 9 */ /* NOT USED */ break; case IPF_TCPS_TIME_WAIT: /* 10 */ /* we're in 2MSL timeout now */ if (ostate == IPF_TCPS_LAST_ACK) { nstate = IPF_TCPS_CLOSED; rval = 1; } else { rval = 2; } break; case IPF_TCPS_CLOSED: /* 11 */ rval = 2; break; default : #if !defined(_KERNEL) abort(); #endif break; } } /* * If rval == 2 then do not update the queue position, but treat the * packet as being ok. */ if (rval == 2) rval = 1; else if (rval == 1) { if (ok) tqe->tqe_state[dir] = nstate; if ((tqe->tqe_flags & TQE_RULEBASED) == 0) ipf_movequeue(softc->ipf_ticks, tqe, tqe->tqe_ifq, tqtab + nstate); } return rval; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_log */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* is(I) - pointer to state structure */ /* type(I) - type of log entry to create */ /* */ /* Creates a state table log entry using the state structure and type info. */ /* passed in. Log packet/byte counts, source/destination address and other */ /* protocol specific information. */ /* ------------------------------------------------------------------------ */ void ipf_state_log(softc, is, type) ipf_main_softc_t *softc; struct ipstate *is; u_int type; { #ifdef IPFILTER_LOG struct ipslog ipsl; size_t sizes[1]; void *items[1]; int types[1]; /* * Copy information out of the ipstate_t structure and into the * structure used for logging. */ ipsl.isl_type = type; ipsl.isl_pkts[0] = is->is_pkts[0] + is->is_icmppkts[0]; ipsl.isl_bytes[0] = is->is_bytes[0]; ipsl.isl_pkts[1] = is->is_pkts[1] + is->is_icmppkts[1]; ipsl.isl_bytes[1] = is->is_bytes[1]; ipsl.isl_pkts[2] = is->is_pkts[2] + is->is_icmppkts[2]; ipsl.isl_bytes[2] = is->is_bytes[2]; ipsl.isl_pkts[3] = is->is_pkts[3] + is->is_icmppkts[3]; ipsl.isl_bytes[3] = is->is_bytes[3]; ipsl.isl_src = is->is_src; ipsl.isl_dst = is->is_dst; ipsl.isl_p = is->is_p; ipsl.isl_v = is->is_v; ipsl.isl_flags = is->is_flags; ipsl.isl_tag = is->is_tag; ipsl.isl_rulen = is->is_rulen; (void) strncpy(ipsl.isl_group, is->is_group, FR_GROUPLEN); if (ipsl.isl_p == IPPROTO_TCP || ipsl.isl_p == IPPROTO_UDP) { ipsl.isl_sport = is->is_sport; ipsl.isl_dport = is->is_dport; if (ipsl.isl_p == IPPROTO_TCP) { ipsl.isl_state[0] = is->is_state[0]; ipsl.isl_state[1] = is->is_state[1]; } } else if (ipsl.isl_p == IPPROTO_ICMP) { ipsl.isl_itype = is->is_icmp.ici_type; } else if (ipsl.isl_p == IPPROTO_ICMPV6) { ipsl.isl_itype = is->is_icmp.ici_type; } else { ipsl.isl_ps.isl_filler[0] = 0; ipsl.isl_ps.isl_filler[1] = 0; } items[0] = &ipsl; sizes[0] = sizeof(ipsl); types[0] = 0; (void) ipf_log_items(softc, IPL_LOGSTATE, NULL, items, sizes, types, 1); #endif } #ifdef USE_INET6 /* ------------------------------------------------------------------------ */ /* Function: ipf_checkicmp6matchingstate */ /* Returns: ipstate_t* - NULL == no match found, */ /* else pointer to matching state entry */ /* Parameters: fin(I) - pointer to packet information */ /* Locks: NULL == no locks, else Read Lock on ipf_state */ /* */ /* If we've got an ICMPv6 error message, using the information stored in */ /* the ICMPv6 packet, look for a matching state table entry. */ /* ------------------------------------------------------------------------ */ static ipstate_t * ipf_checkicmp6matchingstate(fin) fr_info_t *fin; { ipf_main_softc_t *softc = fin->fin_main_soft; ipf_state_softc_t *softs = softc->ipf_state_soft; struct icmp6_hdr *ic6, *oic; ipstate_t *is, **isp; u_short sport, dport; i6addr_t dst, src; u_short savelen; icmpinfo_t *ic; fr_info_t ofin; tcphdr_t *tcp; ip6_t *oip6; u_char pr; u_int hv; int type; /* * Does it at least have the return (basic) IP header ? * Is it an actual recognised ICMP error type? * Only a basic IP header (no options) should be with * an ICMP error header. */ if ((fin->fin_v != 6) || (fin->fin_plen < ICMP6ERR_MINPKTLEN) || !(fin->fin_flx & FI_ICMPERR)) { SBUMPD(ipf_state_stats, iss_icmp_bad); return NULL; } ic6 = fin->fin_dp; type = ic6->icmp6_type; oip6 = (ip6_t *)((char *)ic6 + ICMPERR_ICMPHLEN); if (fin->fin_plen < sizeof(*oip6)) { SBUMPD(ipf_state_stats, iss_icmp_short); return NULL; } bcopy((char *)fin, (char *)&ofin, sizeof(*fin)); ofin.fin_v = 6; ofin.fin_ifp = fin->fin_ifp; ofin.fin_out = !fin->fin_out; ofin.fin_m = NULL; /* if dereferenced, panic XXX */ ofin.fin_mp = NULL; /* if dereferenced, panic XXX */ /* * We make a fin entry to be able to feed it to * matchsrcdst. Note that not all fields are necessary * but this is the cleanest way. Note further we fill * in fin_mp such that if someone uses it we'll get * a kernel panic. ipf_matchsrcdst does not use this. * * watch out here, as ip is in host order and oip6 in network * order. Any change we make must be undone afterwards. */ savelen = oip6->ip6_plen; oip6->ip6_plen = htons(fin->fin_dlen - ICMPERR_ICMPHLEN); ofin.fin_flx = FI_NOCKSUM; ofin.fin_ip = (ip_t *)oip6; (void) ipf_makefrip(sizeof(*oip6), (ip_t *)oip6, &ofin); ofin.fin_flx &= ~(FI_BAD|FI_SHORT); oip6->ip6_plen = savelen; pr = ofin.fin_p; /* * an ICMP error can never generate an ICMP error in response. */ if (ofin.fin_flx & FI_ICMPERR) { DT1(iss_icmp6_icmperr, fr_info_t *, &ofin); SBUMP(ipf_state_stats.iss_icmp6_icmperr); return NULL; } if (oip6->ip6_nxt == IPPROTO_ICMPV6) { oic = ofin.fin_dp; /* * an ICMP error can only be generated as a result of an * ICMP query, not as the response on an ICMP error * * XXX theoretically ICMP_ECHOREP and the other reply's are * ICMP query's as well, but adding them here seems strange XXX */ if (!(oic->icmp6_type & ICMP6_INFOMSG_MASK)) { DT1(iss_icmp6_notinfo, fr_info_t *, &ofin); SBUMP(ipf_state_stats.iss_icmp6_notinfo); return NULL; } /* * perform a lookup of the ICMP packet in the state table */ hv = (pr = oip6->ip6_nxt); src.in6 = oip6->ip6_src; hv += src.in4.s_addr; dst.in6 = oip6->ip6_dst; hv += dst.in4.s_addr; hv += oic->icmp6_id; hv += oic->icmp6_seq; hv = DOUBLE_HASH(hv); READ_ENTER(&softc->ipf_state); for (isp = &softs->ipf_state_table[hv]; ((is = *isp) != NULL); ) { ic = &is->is_icmp; isp = &is->is_hnext; if ((is->is_p == pr) && !(is->is_pass & FR_NOICMPERR) && (oic->icmp6_id == ic->ici_id) && (oic->icmp6_seq == ic->ici_seq) && (is = ipf_matchsrcdst(&ofin, is, &src, &dst, NULL, FI_ICMPCMP))) { /* * in the state table ICMP query's are stored * with the type of the corresponding ICMP * response. Correct here */ if (((ic->ici_type == ICMP6_ECHO_REPLY) && (oic->icmp6_type == ICMP6_ECHO_REQUEST)) || (ic->ici_type - 1 == oic->icmp6_type )) { if (!ipf_allowstateicmp(fin, is, &src)) return is; } } } RWLOCK_EXIT(&softc->ipf_state); SBUMPD(ipf_state_stats, iss_icmp6_miss); return NULL; } hv = (pr = oip6->ip6_nxt); src.in6 = oip6->ip6_src; hv += src.i6[0]; hv += src.i6[1]; hv += src.i6[2]; hv += src.i6[3]; dst.in6 = oip6->ip6_dst; hv += dst.i6[0]; hv += dst.i6[1]; hv += dst.i6[2]; hv += dst.i6[3]; tcp = NULL; switch (oip6->ip6_nxt) { case IPPROTO_TCP : case IPPROTO_UDP : tcp = (tcphdr_t *)(oip6 + 1); dport = tcp->th_dport; sport = tcp->th_sport; hv += dport; hv += sport; break; case IPPROTO_ICMPV6 : oic = (struct icmp6_hdr *)(oip6 + 1); hv += oic->icmp6_id; hv += oic->icmp6_seq; break; default : break; } hv = DOUBLE_HASH(hv); READ_ENTER(&softc->ipf_state); for (isp = &softs->ipf_state_table[hv]; ((is = *isp) != NULL); ) { isp = &is->is_hnext; /* * Only allow this icmp though if the * encapsulated packet was allowed through the * other way around. Note that the minimal amount * of info present does not allow for checking against * tcp internals such as seq and ack numbers. */ if ((is->is_p != pr) || (is->is_v != 6) || (is->is_pass & FR_NOICMPERR)) continue; is = ipf_matchsrcdst(&ofin, is, &src, &dst, tcp, FI_ICMPCMP); if ((is != NULL) && (ipf_allowstateicmp(fin, is, &src) == 0)) return is; } RWLOCK_EXIT(&softc->ipf_state); SBUMPD(ipf_state_stats, iss_icmp_miss); return NULL; } #endif /* ------------------------------------------------------------------------ */ /* Function: ipf_sttab_init */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* tqp(I) - pointer to an array of timeout queues for TCP */ /* */ /* Initialise the array of timeout queues for TCP. */ /* ------------------------------------------------------------------------ */ void ipf_sttab_init(softc, tqp) ipf_main_softc_t *softc; ipftq_t *tqp; { int i; for (i = IPF_TCP_NSTATES - 1; i >= 0; i--) { IPFTQ_INIT(&tqp[i], 0, "ipftq tcp tab"); tqp[i].ifq_next = tqp + i + 1; } tqp[IPF_TCP_NSTATES - 1].ifq_next = NULL; tqp[IPF_TCPS_CLOSED].ifq_ttl = softc->ipf_tcpclosed; tqp[IPF_TCPS_LISTEN].ifq_ttl = softc->ipf_tcptimeout; tqp[IPF_TCPS_SYN_SENT].ifq_ttl = softc->ipf_tcpsynsent; tqp[IPF_TCPS_SYN_RECEIVED].ifq_ttl = softc->ipf_tcpsynrecv; tqp[IPF_TCPS_ESTABLISHED].ifq_ttl = softc->ipf_tcpidletimeout; tqp[IPF_TCPS_CLOSE_WAIT].ifq_ttl = softc->ipf_tcphalfclosed; tqp[IPF_TCPS_FIN_WAIT_1].ifq_ttl = softc->ipf_tcphalfclosed; tqp[IPF_TCPS_CLOSING].ifq_ttl = softc->ipf_tcptimeout; tqp[IPF_TCPS_LAST_ACK].ifq_ttl = softc->ipf_tcplastack; tqp[IPF_TCPS_FIN_WAIT_2].ifq_ttl = softc->ipf_tcpclosewait; tqp[IPF_TCPS_TIME_WAIT].ifq_ttl = softc->ipf_tcptimewait; tqp[IPF_TCPS_HALF_ESTAB].ifq_ttl = softc->ipf_tcptimeout; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sttab_destroy */ /* Returns: Nil */ /* Parameters: tqp(I) - pointer to an array of timeout queues for TCP */ /* */ /* Do whatever is necessary to "destroy" each of the entries in the array */ /* of timeout queues for TCP. */ /* ------------------------------------------------------------------------ */ void ipf_sttab_destroy(tqp) ipftq_t *tqp; { int i; for (i = IPF_TCP_NSTATES - 1; i >= 0; i--) MUTEX_DESTROY(&tqp[i].ifq_lock); } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_deref */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* isp(I) - pointer to pointer to state table entry */ /* */ /* Decrement the reference counter for this state table entry and free it */ /* if there are no more things using it. */ /* */ /* This function is only called when cleaning up after increasing is_ref by */ /* one earlier in the 'code path' so if is_ref is 1 when entering, we do */ /* have an orphan, otherwise not. However there is a possible race between */ /* the entry being deleted via flushing with an ioctl call (that calls the */ /* delete function directly) and the tail end of packet processing so we */ /* need to grab is_lock before doing the check to synchronise the two code */ /* paths. */ /* */ /* When operating in userland (ipftest), we have no timers to clear a state */ /* entry. Therefore, we make a few simple tests before deleting an entry */ /* outright. We compare states on each side looking for a combination of */ /* TIME_WAIT (should really be FIN_WAIT_2?) and LAST_ACK. Then we factor */ /* in packet direction with the interface list to make sure we don't */ /* prematurely delete an entry on a final inbound packet that's we're also */ /* supposed to route elsewhere. */ /* */ /* Internal parameters: */ /* state[0] = state of source (host that initiated connection) */ /* state[1] = state of dest (host that accepted the connection) */ /* */ /* dir == 0 : a packet from source to dest */ /* dir == 1 : a packet from dest to source */ /* ------------------------------------------------------------------------ */ void ipf_state_deref(softc, isp) ipf_main_softc_t *softc; ipstate_t **isp; { ipstate_t *is = *isp; is = *isp; *isp = NULL; MUTEX_ENTER(&is->is_lock); if (is->is_ref > 1) { is->is_ref--; MUTEX_EXIT(&is->is_lock); #ifndef _KERNEL if ((is->is_sti.tqe_state[0] > IPF_TCPS_ESTABLISHED) || (is->is_sti.tqe_state[1] > IPF_TCPS_ESTABLISHED)) { ipf_state_del(softc, is, ISL_EXPIRE); } #endif return; } MUTEX_EXIT(&is->is_lock); WRITE_ENTER(&softc->ipf_state); ipf_state_del(softc, is, ISL_ORPHAN); RWLOCK_EXIT(&softc->ipf_state); } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_setqueue */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to soft context main structure */ /* is(I) - pointer to state structure */ /* rev(I) - forward(0) or reverse(1) direction */ /* Locks: ipf_state (read or write) */ /* */ /* Put the state entry on its default queue entry, using rev as a helped in */ /* determining which queue it should be placed on. */ /* ------------------------------------------------------------------------ */ void ipf_state_setqueue(softc, is, rev) ipf_main_softc_t *softc; ipstate_t *is; int rev; { ipf_state_softc_t *softs = softc->ipf_state_soft; ipftq_t *oifq, *nifq; if ((is->is_sti.tqe_flags & TQE_RULEBASED) != 0) nifq = is->is_tqehead[rev]; else nifq = NULL; if (nifq == NULL) { switch (is->is_p) { #ifdef USE_INET6 case IPPROTO_ICMPV6 : if (rev == 1) nifq = &softs->ipf_state_icmpacktq; else nifq = &softs->ipf_state_icmptq; break; #endif case IPPROTO_ICMP : if (rev == 1) nifq = &softs->ipf_state_icmpacktq; else nifq = &softs->ipf_state_icmptq; break; case IPPROTO_TCP : nifq = softs->ipf_state_tcptq + is->is_state[rev]; break; case IPPROTO_UDP : if (rev == 1) nifq = &softs->ipf_state_udpacktq; else nifq = &softs->ipf_state_udptq; break; default : nifq = &softs->ipf_state_iptq; break; } } oifq = is->is_sti.tqe_ifq; /* * If it's currently on a timeout queue, move it from one queue to * another, else put it on the end of the newly determined queue. */ if (oifq != NULL) ipf_movequeue(softc->ipf_ticks, &is->is_sti, oifq, nifq); else ipf_queueappend(softc->ipf_ticks, &is->is_sti, nifq, is); return; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_iter */ /* Returns: int - 0 == success, else error */ /* Parameters: softc(I) - pointer to main soft context */ /* token(I) - pointer to ipftoken structure */ /* itp(I) - pointer to ipfgeniter structure */ /* obj(I) - pointer to data description structure */ /* */ /* This function handles the SIOCGENITER ioctl for the state tables and */ /* walks through the list of entries in the state table list (softs->ipf_state_list.) */ /* ------------------------------------------------------------------------ */ static int ipf_state_iter(softc, token, itp, obj) ipf_main_softc_t *softc; ipftoken_t *token; ipfgeniter_t *itp; ipfobj_t *obj; { ipf_state_softc_t *softs = softc->ipf_state_soft; ipstate_t *is, *next, zero; int error; if (itp->igi_data == NULL) { IPFERROR(100026); return EFAULT; } if (itp->igi_nitems < 1) { IPFERROR(100027); return ENOSPC; } if (itp->igi_type != IPFGENITER_STATE) { IPFERROR(100028); return EINVAL; } is = token->ipt_data; if (is == (void *)-1) { IPFERROR(100029); return ESRCH; } error = 0; obj->ipfo_type = IPFOBJ_IPSTATE; obj->ipfo_size = sizeof(ipstate_t); READ_ENTER(&softc->ipf_state); is = token->ipt_data; if (is == NULL) { next = softs->ipf_state_list; } else { next = is->is_next; } /* * If we find a state entry to use, bump its reference count so that * it can be used for is_next when we come back. */ if (next != NULL) { MUTEX_ENTER(&next->is_lock); next->is_ref++; MUTEX_EXIT(&next->is_lock); token->ipt_data = next; } else { bzero(&zero, sizeof(zero)); next = &zero; token->ipt_data = NULL; } if (next->is_next == NULL) ipf_token_mark_complete(token); RWLOCK_EXIT(&softc->ipf_state); obj->ipfo_ptr = itp->igi_data; error = ipf_outobjk(softc, obj, next); if (is != NULL) ipf_state_deref(softc, &is); return error; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_gettable */ /* Returns: int - 0 = success, else error */ /* Parameters: softc(I) - pointer to main soft context */ /* softs(I) - pointer to state context structure */ /* data(I) - pointer to ioctl data */ /* */ /* This function handles ioctl requests for tables of state information. */ /* At present the only table it deals with is the hash bucket statistics. */ /* ------------------------------------------------------------------------ */ static int ipf_state_gettable(softc, softs, data) ipf_main_softc_t *softc; ipf_state_softc_t *softs; char *data; { ipftable_t table; int error; error = ipf_inobj(softc, data, NULL, &table, IPFOBJ_GTABLE); if (error != 0) return error; if (table.ita_type != IPFTABLE_BUCKETS) { IPFERROR(100031); return EINVAL; } error = COPYOUT(softs->ipf_state_stats.iss_bucketlen, table.ita_table, softs->ipf_state_size * sizeof(u_int)); if (error != 0) { IPFERROR(100032); error = EFAULT; } return error; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_setpending */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to main soft context */ /* is(I) - pointer to state structure */ /* Locks: ipf_state (read or write) */ /* */ /* Put the state entry on to the pending queue - this queue has a very */ /* short lifetime where items are put that can't be deleted straight away */ /* because of locking issues but we want to delete them ASAP, anyway. */ /* ------------------------------------------------------------------------ */ void ipf_state_setpending(softc, is) ipf_main_softc_t *softc; ipstate_t *is; { ipf_state_softc_t *softs = softc->ipf_state_soft; ipftq_t *oifq; oifq = is->is_sti.tqe_ifq; if (oifq != NULL) ipf_movequeue(softc->ipf_ticks, &is->is_sti, oifq, &softs->ipf_state_pending); else ipf_queueappend(softc->ipf_ticks, &is->is_sti, &softs->ipf_state_pending, is); MUTEX_ENTER(&is->is_lock); if (is->is_me != NULL) { *is->is_me = NULL; is->is_me = NULL; is->is_ref--; } MUTEX_EXIT(&is->is_lock); } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_matchflush */ /* Returns: Nil */ /* Parameters: softc(I) - pointer to main soft context */ /* data(I) - pointer to state structure */ /* Locks: ipf_state (read or write) */ /* */ /* Flush all entries from the list of state entries that match the */ /* properties in the array loaded. */ /* ------------------------------------------------------------------------ */ int ipf_state_matchflush(softc, data) ipf_main_softc_t *softc; caddr_t data; { ipf_state_softc_t *softs = softc->ipf_state_soft; int *array, flushed, error; ipstate_t *state, *statenext; ipfobj_t obj; error = ipf_matcharray_load(softc, data, &obj, &array); if (error != 0) return error; flushed = 0; for (state = softs->ipf_state_list; state != NULL; state = statenext) { statenext = state->is_next; if (ipf_state_matcharray(state, array, softc->ipf_ticks) == 0) { ipf_state_del(softc, state, ISL_FLUSH); flushed++; } } obj.ipfo_retval = flushed; error = BCOPYOUT(&obj, data, sizeof(obj)); KFREES(array, array[0] * sizeof(*array)); return error; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_matcharray */ /* Returns: int - 0 = no match, 1 = match */ /* Parameters: state(I) - pointer to state structure */ /* array(I) - pointer to ipf matching expression */ /* ticks(I) - current value of ipfilter tick timer */ /* Locks: ipf_state (read or write) */ /* */ /* Compare a state entry with the match array passed in and return a value */ /* to indicate whether or not the matching was successful. */ /* ------------------------------------------------------------------------ */ static int ipf_state_matcharray(state, array, ticks) ipstate_t *state; int *array; u_long ticks; { int i, n, *x, rv, p; ipfexp_t *e; rv = 0; n = array[0]; x = array + 1; for (; n > 0; x += 3 + x[3], rv = 0) { e = (ipfexp_t *)x; n -= e->ipfe_size; if (x[0] == IPF_EXP_END) break; /* * If we need to match the protocol and that doesn't match, * don't even both with the instruction array. */ p = e->ipfe_cmd >> 16; if ((p != 0) && (p != state->is_p)) break; switch (e->ipfe_cmd) { case IPF_EXP_IP_PR : for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= (state->is_p == e->ipfe_arg0[i]); } break; case IPF_EXP_IP_SRCADDR : if (state->is_v != 4) break; for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= ((state->is_saddr & e->ipfe_arg0[i * 2 + 1]) == e->ipfe_arg0[i * 2]); } break; case IPF_EXP_IP_DSTADDR : if (state->is_v != 4) break; for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= ((state->is_daddr & e->ipfe_arg0[i * 2 + 1]) == e->ipfe_arg0[i * 2]); } break; case IPF_EXP_IP_ADDR : if (state->is_v != 4) break; for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= ((state->is_saddr & e->ipfe_arg0[i * 2 + 1]) == e->ipfe_arg0[i * 2]) || ((state->is_daddr & e->ipfe_arg0[i * 2 + 1]) == e->ipfe_arg0[i * 2]); } break; #ifdef USE_INET6 case IPF_EXP_IP6_SRCADDR : if (state->is_v != 6) break; for (i = 0; !rv && i < x[3]; i++) { rv |= IP6_MASKEQ(&state->is_src.in6, &e->ipfe_arg0[i * 8 + 4], &e->ipfe_arg0[i * 8]); } break; case IPF_EXP_IP6_DSTADDR : if (state->is_v != 6) break; for (i = 0; !rv && i < x[3]; i++) { rv |= IP6_MASKEQ(&state->is_dst.in6, &e->ipfe_arg0[i * 8 + 4], &e->ipfe_arg0[i * 8]); } break; case IPF_EXP_IP6_ADDR : if (state->is_v != 6) break; for (i = 0; !rv && i < x[3]; i++) { rv |= IP6_MASKEQ(&state->is_src.in6, &e->ipfe_arg0[i * 8 + 4], &e->ipfe_arg0[i * 8]) || IP6_MASKEQ(&state->is_dst.in6, &e->ipfe_arg0[i * 8 + 4], &e->ipfe_arg0[i * 8]); } break; #endif case IPF_EXP_UDP_PORT : case IPF_EXP_TCP_PORT : for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= (state->is_sport == e->ipfe_arg0[i]) || (state->is_dport == e->ipfe_arg0[i]); } break; case IPF_EXP_UDP_SPORT : case IPF_EXP_TCP_SPORT : for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= (state->is_sport == e->ipfe_arg0[i]); } break; case IPF_EXP_UDP_DPORT : case IPF_EXP_TCP_DPORT : for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= (state->is_dport == e->ipfe_arg0[i]); } break; case IPF_EXP_TCP_STATE : for (i = 0; !rv && i < e->ipfe_narg; i++) { rv |= (state->is_state[0] == e->ipfe_arg0[i]) || (state->is_state[1] == e->ipfe_arg0[i]); } break; case IPF_EXP_IDLE_GT : rv |= (ticks - state->is_touched > e->ipfe_arg0[0]); break; } /* * Factor in doing a negative match. */ rv ^= e->ipfe_not; if (rv == 0) break; } return rv; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_settimeout */ /* Returns: int 0 = success, else failure */ /* Parameters: softc(I) - pointer to main soft context */ /* t(I) - pointer to tuneable being changed */ /* p(I) - pointer to the new value */ /* */ /* Sets a timeout value for one of the many timeout queues. We find the */ /* correct queue using a somewhat manual process of comparing the timeout */ /* names for each specific value available and calling ipf_apply_timeout on */ /* that queue so that all of the items on it are updated accordingly. */ /* ------------------------------------------------------------------------ */ int ipf_state_settimeout(softc, t, p) struct ipf_main_softc_s *softc; ipftuneable_t *t; ipftuneval_t *p; { ipf_state_softc_t *softs = softc->ipf_state_soft; /* * In case there is nothing to do... */ if (*t->ipft_pint == p->ipftu_int) return 0; if (!strncmp(t->ipft_name, "tcp_", 4)) return ipf_settimeout_tcp(t, p, softs->ipf_state_tcptq); if (!strcmp(t->ipft_name, "udp_timeout")) { ipf_apply_timeout(&softs->ipf_state_udptq, p->ipftu_int); } else if (!strcmp(t->ipft_name, "udp_ack_timeout")) { ipf_apply_timeout(&softs->ipf_state_udpacktq, p->ipftu_int); } else if (!strcmp(t->ipft_name, "icmp_timeout")) { ipf_apply_timeout(&softs->ipf_state_icmptq, p->ipftu_int); } else if (!strcmp(t->ipft_name, "icmp_ack_timeout")) { ipf_apply_timeout(&softs->ipf_state_icmpacktq, p->ipftu_int); } else if (!strcmp(t->ipft_name, "ip_timeout")) { ipf_apply_timeout(&softs->ipf_state_iptq, p->ipftu_int); } else { IPFERROR(100034); return ESRCH; } /* * Update the tuneable being set. */ *t->ipft_pint = p->ipftu_int; return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_rehash */ /* Returns: int 0 = success, else failure */ /* Parameters: softc(I) - pointer to main soft context */ /* t(I) - pointer to tuneable being changed */ /* p(I) - pointer to the new value */ /* */ /* To change the size of the state hash table at runtime, a new table has */ /* to be allocated and then all of the existing entries put in it, bumping */ /* up the bucketlength for it as we go along. */ /* ------------------------------------------------------------------------ */ int ipf_state_rehash(softc, t, p) ipf_main_softc_t *softc; ipftuneable_t *t; ipftuneval_t *p; { ipf_state_softc_t *softs = softc->ipf_state_soft; ipstate_t **newtab, *is; u_long *newseed; u_int *bucketlens; u_int maxbucket; u_int newsize; u_int hv; int i; newsize = p->ipftu_int; /* * In case there is nothing to do... */ if (newsize == softs->ipf_state_size) return 0; KMALLOCS(newtab, ipstate_t **, newsize * sizeof(ipstate_t *)); if (newtab == NULL) { IPFERROR(100035); return ENOMEM; } KMALLOCS(bucketlens, u_int *, newsize * sizeof(u_int)); if (bucketlens == NULL) { KFREES(newtab, newsize * sizeof(*softs->ipf_state_table)); IPFERROR(100036); return ENOMEM; } newseed = ipf_state_seed_alloc(newsize, softs->ipf_state_max); if (newseed == NULL) { KFREES(bucketlens, newsize * sizeof(*bucketlens)); KFREES(newtab, newsize * sizeof(*newtab)); IPFERROR(100037); return ENOMEM; } for (maxbucket = 0, i = newsize; i > 0; i >>= 1) maxbucket++; maxbucket *= 2; bzero((char *)newtab, newsize * sizeof(ipstate_t *)); bzero((char *)bucketlens, newsize * sizeof(u_int)); WRITE_ENTER(&softc->ipf_state); if (softs->ipf_state_table != NULL) { KFREES(softs->ipf_state_table, softs->ipf_state_size * sizeof(*softs->ipf_state_table)); } softs->ipf_state_table = newtab; if (softs->ipf_state_seed != NULL) { KFREES(softs->ipf_state_seed, softs->ipf_state_size * sizeof(*softs->ipf_state_seed)); } softs->ipf_state_seed = newseed; if (softs->ipf_state_stats.iss_bucketlen != NULL) { KFREES(softs->ipf_state_stats.iss_bucketlen, softs->ipf_state_size * sizeof(u_int)); } softs->ipf_state_stats.iss_bucketlen = bucketlens; softs->ipf_state_maxbucket = maxbucket; softs->ipf_state_size = newsize; /* * Walk through the entire list of state table entries and put them * in the new state table, somewhere. Because we have a new table, * we need to restart the counter of how many chains are in use. */ softs->ipf_state_stats.iss_inuse = 0; for (is = softs->ipf_state_list; is != NULL; is = is->is_next) { is->is_hnext = NULL; is->is_phnext = NULL; hv = is->is_hv % softs->ipf_state_size; if (softs->ipf_state_table[hv] != NULL) softs->ipf_state_table[hv]->is_phnext = &is->is_hnext; else softs->ipf_state_stats.iss_inuse++; is->is_phnext = softs->ipf_state_table + hv; is->is_hnext = softs->ipf_state_table[hv]; softs->ipf_state_table[hv] = is; softs->ipf_state_stats.iss_bucketlen[hv]++; } RWLOCK_EXIT(&softc->ipf_state); return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_state_add_tq */ /* Returns: ipftq_t * - NULL = failure, else pointer to new timeout */ /* queue */ /* Parameters: softc(I) - pointer to main soft context */ /* ttl(I) - pointer to the ttl for the new queue */ /* */ /* Request a pointer to a timeout queue that has a ttl as given by the */ /* value being passed in. The timeout queue is added tot the list of those */ /* used internally for stateful filtering. */ /* ------------------------------------------------------------------------ */ ipftq_t * ipf_state_add_tq(softc, ttl) ipf_main_softc_t *softc; int ttl; { ipf_state_softc_t *softs = softc->ipf_state_soft; return ipf_addtimeoutqueue(softc, &softs->ipf_state_usertq, ttl); } #ifndef _KERNEL /* * Display the built up state table rules and mapping entries. */ void ipf_state_dump(softc, arg) ipf_main_softc_t *softc; void *arg; { ipf_state_softc_t *softs = arg; ipstate_t *ips; printf("List of active state sessions:\n"); for (ips = softs->ipf_state_list; ips != NULL; ) ips = printstate(ips, opts & (OPT_DEBUG|OPT_VERBOSE), softc->ipf_ticks); } #endif Index: head/sys/contrib/ipfilter/netinet/ip_sync.c =================================================================== --- head/sys/contrib/ipfilter/netinet/ip_sync.c (revision 358557) +++ head/sys/contrib/ipfilter/netinet/ip_sync.c (revision 358558) @@ -1,1461 +1,1461 @@ /* $FreeBSD$ */ /* * Copyright (C) 2012 by Darren Reed. * * See the IPFILTER.LICENCE file for details on licencing. */ #if defined(KERNEL) || defined(_KERNEL) # undef KERNEL # undef _KERNEL # define KERNEL 1 # define _KERNEL 1 #endif #include #include #include #include #if !defined(_KERNEL) && !defined(__KERNEL__) # include # include # include # define _KERNEL # define KERNEL # include # undef _KERNEL # undef KERNEL #else # include # if !defined(__SVR4) # include # endif # include -# if __FreeBSD_version >= 500000 +# ifdef __FreeBSD_version # include # endif #endif #if defined(__NetBSD__) && (__NetBSD_Version__ >= 104000000) # include #endif #if defined(_KERNEL) && defined(__FreeBSD_version) # include # include #else # include #endif #include # include #include #if defined(__SVR4) # include # include # ifdef _KERNEL # include # endif # include # include #endif #include #ifdef sun # include #endif #include #include #include #include # include # include #include #include #include "netinet/ip_compat.h" #include #include "netinet/ip_fil.h" #include "netinet/ip_nat.h" #include "netinet/ip_frag.h" #include "netinet/ip_state.h" #include "netinet/ip_proxy.h" #include "netinet/ip_sync.h" #ifdef USE_INET6 #include #endif #if defined(__FreeBSD_version) # include # if defined(_KERNEL) && !defined(IPFILTER_LKM) # include # include # endif #endif /* END OF INCLUDES */ #if !defined(lint) static const char rcsid[] = "@(#)$Id$"; #endif #define SYNC_STATETABSZ 256 #define SYNC_NATTABSZ 256 typedef struct ipf_sync_softc_s { ipfmutex_t ipf_syncadd; ipfmutex_t ipsl_mutex; ipfrwlock_t ipf_syncstate; ipfrwlock_t ipf_syncnat; #if SOLARIS && defined(_KERNEL) kcondvar_t ipslwait; #endif synclist_t **syncstatetab; synclist_t **syncnattab; synclogent_t *synclog; syncupdent_t *syncupd; u_int ipf_sync_num; u_int ipf_sync_wrap; u_int sl_idx; /* next available sync log entry */ u_int su_idx; /* next available sync update entry */ u_int sl_tail; /* next sync log entry to read */ u_int su_tail; /* next sync update entry to read */ int ipf_sync_log_sz; int ipf_sync_nat_tab_sz; int ipf_sync_state_tab_sz; int ipf_sync_debug; int ipf_sync_events; u_32_t ipf_sync_lastwakeup; int ipf_sync_wake_interval; int ipf_sync_event_high_wm; int ipf_sync_queue_high_wm; int ipf_sync_inited; } ipf_sync_softc_t; static int ipf_sync_flush_table __P((ipf_sync_softc_t *, int, synclist_t **)); static void ipf_sync_wakeup __P((ipf_main_softc_t *)); static void ipf_sync_del __P((ipf_sync_softc_t *, synclist_t *)); static void ipf_sync_poll_wakeup __P((ipf_main_softc_t *)); static int ipf_sync_nat __P((ipf_main_softc_t *, synchdr_t *, void *)); static int ipf_sync_state __P((ipf_main_softc_t *, synchdr_t *, void *)); # if !defined(sparc) && !defined(__hppa) void ipf_sync_tcporder __P((int, struct tcpdata *)); void ipf_sync_natorder __P((int, struct nat *)); void ipf_sync_storder __P((int, struct ipstate *)); # endif void * ipf_sync_soft_create(softc) ipf_main_softc_t *softc; { ipf_sync_softc_t *softs; KMALLOC(softs, ipf_sync_softc_t *); if (softs == NULL) { IPFERROR(110024); return NULL; } bzero((char *)softs, sizeof(*softs)); softs->ipf_sync_log_sz = SYNCLOG_SZ; softs->ipf_sync_nat_tab_sz = SYNC_STATETABSZ; softs->ipf_sync_state_tab_sz = SYNC_STATETABSZ; softs->ipf_sync_event_high_wm = SYNCLOG_SZ * 100 / 90; /* 90% */ softs->ipf_sync_queue_high_wm = SYNCLOG_SZ * 100 / 90; /* 90% */ return softs; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_init */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: Nil */ /* */ /* Initialise all of the locks required for the sync code and initialise */ /* any data structures, as required. */ /* ------------------------------------------------------------------------ */ int ipf_sync_soft_init(softc, arg) ipf_main_softc_t *softc; void *arg; { ipf_sync_softc_t *softs = arg; KMALLOCS(softs->synclog, synclogent_t *, softs->ipf_sync_log_sz * sizeof(*softs->synclog)); if (softs->synclog == NULL) return -1; bzero((char *)softs->synclog, softs->ipf_sync_log_sz * sizeof(*softs->synclog)); KMALLOCS(softs->syncupd, syncupdent_t *, softs->ipf_sync_log_sz * sizeof(*softs->syncupd)); if (softs->syncupd == NULL) return -2; bzero((char *)softs->syncupd, softs->ipf_sync_log_sz * sizeof(*softs->syncupd)); KMALLOCS(softs->syncstatetab, synclist_t **, softs->ipf_sync_state_tab_sz * sizeof(*softs->syncstatetab)); if (softs->syncstatetab == NULL) return -3; bzero((char *)softs->syncstatetab, softs->ipf_sync_state_tab_sz * sizeof(*softs->syncstatetab)); KMALLOCS(softs->syncnattab, synclist_t **, softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab)); if (softs->syncnattab == NULL) return -3; bzero((char *)softs->syncnattab, softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab)); softs->ipf_sync_num = 1; softs->ipf_sync_wrap = 0; softs->sl_idx = 0; softs->su_idx = 0; softs->sl_tail = 0; softs->su_tail = 0; softs->ipf_sync_events = 0; softs->ipf_sync_lastwakeup = 0; # if SOLARIS && defined(_KERNEL) cv_init(&softs->ipslwait, "ipsl condvar", CV_DRIVER, NULL); # endif RWLOCK_INIT(&softs->ipf_syncstate, "add things to state sync table"); RWLOCK_INIT(&softs->ipf_syncnat, "add things to nat sync table"); MUTEX_INIT(&softs->ipf_syncadd, "add things to sync table"); MUTEX_INIT(&softs->ipsl_mutex, "read ring lock"); softs->ipf_sync_inited = 1; return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_unload */ /* Returns: int - 0 == success, -1 == failure */ /* Parameters: Nil */ /* */ /* Destroy the locks created when initialising and free any memory in use */ /* with the synchronisation tables. */ /* ------------------------------------------------------------------------ */ int ipf_sync_soft_fini(softc, arg) ipf_main_softc_t *softc; void *arg; { ipf_sync_softc_t *softs = arg; if (softs->syncnattab != NULL) { ipf_sync_flush_table(softs, softs->ipf_sync_nat_tab_sz, softs->syncnattab); KFREES(softs->syncnattab, softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab)); softs->syncnattab = NULL; } if (softs->syncstatetab != NULL) { ipf_sync_flush_table(softs, softs->ipf_sync_state_tab_sz, softs->syncstatetab); KFREES(softs->syncstatetab, softs->ipf_sync_state_tab_sz * sizeof(*softs->syncstatetab)); softs->syncstatetab = NULL; } if (softs->syncupd != NULL) { KFREES(softs->syncupd, softs->ipf_sync_log_sz * sizeof(*softs->syncupd)); softs->syncupd = NULL; } if (softs->synclog != NULL) { KFREES(softs->synclog, softs->ipf_sync_log_sz * sizeof(*softs->synclog)); softs->synclog = NULL; } if (softs->ipf_sync_inited == 1) { MUTEX_DESTROY(&softs->ipsl_mutex); MUTEX_DESTROY(&softs->ipf_syncadd); RW_DESTROY(&softs->ipf_syncnat); RW_DESTROY(&softs->ipf_syncstate); softs->ipf_sync_inited = 0; } return 0; } void ipf_sync_soft_destroy(softc, arg) ipf_main_softc_t *softc; void *arg; { ipf_sync_softc_t *softs = arg; KFREE(softs); } # if !defined(sparc) /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_tcporder */ /* Returns: Nil */ /* Parameters: way(I) - direction of byte order conversion. */ /* td(IO) - pointer to data to be converted. */ /* */ /* Do byte swapping on values in the TCP state information structure that */ /* need to be used at both ends by the host in their native byte order. */ /* ------------------------------------------------------------------------ */ void ipf_sync_tcporder(way, td) int way; tcpdata_t *td; { if (way) { td->td_maxwin = htons(td->td_maxwin); td->td_end = htonl(td->td_end); td->td_maxend = htonl(td->td_maxend); } else { td->td_maxwin = ntohs(td->td_maxwin); td->td_end = ntohl(td->td_end); td->td_maxend = ntohl(td->td_maxend); } } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_natorder */ /* Returns: Nil */ /* Parameters: way(I) - direction of byte order conversion. */ /* nat(IO) - pointer to data to be converted. */ /* */ /* Do byte swapping on values in the NAT data structure that need to be */ /* used at both ends by the host in their native byte order. */ /* ------------------------------------------------------------------------ */ void ipf_sync_natorder(way, n) int way; nat_t *n; { if (way) { n->nat_age = htonl(n->nat_age); n->nat_flags = htonl(n->nat_flags); n->nat_ipsumd = htonl(n->nat_ipsumd); n->nat_use = htonl(n->nat_use); n->nat_dir = htonl(n->nat_dir); } else { n->nat_age = ntohl(n->nat_age); n->nat_flags = ntohl(n->nat_flags); n->nat_ipsumd = ntohl(n->nat_ipsumd); n->nat_use = ntohl(n->nat_use); n->nat_dir = ntohl(n->nat_dir); } } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_storder */ /* Returns: Nil */ /* Parameters: way(I) - direction of byte order conversion. */ /* ips(IO) - pointer to data to be converted. */ /* */ /* Do byte swapping on values in the IP state data structure that need to */ /* be used at both ends by the host in their native byte order. */ /* ------------------------------------------------------------------------ */ void ipf_sync_storder(way, ips) int way; ipstate_t *ips; { ipf_sync_tcporder(way, &ips->is_tcp.ts_data[0]); ipf_sync_tcporder(way, &ips->is_tcp.ts_data[1]); if (way) { ips->is_hv = htonl(ips->is_hv); ips->is_die = htonl(ips->is_die); ips->is_pass = htonl(ips->is_pass); ips->is_flags = htonl(ips->is_flags); ips->is_opt[0] = htonl(ips->is_opt[0]); ips->is_opt[1] = htonl(ips->is_opt[1]); ips->is_optmsk[0] = htonl(ips->is_optmsk[0]); ips->is_optmsk[1] = htonl(ips->is_optmsk[1]); ips->is_sec = htons(ips->is_sec); ips->is_secmsk = htons(ips->is_secmsk); ips->is_auth = htons(ips->is_auth); ips->is_authmsk = htons(ips->is_authmsk); ips->is_s0[0] = htonl(ips->is_s0[0]); ips->is_s0[1] = htonl(ips->is_s0[1]); ips->is_smsk[0] = htons(ips->is_smsk[0]); ips->is_smsk[1] = htons(ips->is_smsk[1]); } else { ips->is_hv = ntohl(ips->is_hv); ips->is_die = ntohl(ips->is_die); ips->is_pass = ntohl(ips->is_pass); ips->is_flags = ntohl(ips->is_flags); ips->is_opt[0] = ntohl(ips->is_opt[0]); ips->is_opt[1] = ntohl(ips->is_opt[1]); ips->is_optmsk[0] = ntohl(ips->is_optmsk[0]); ips->is_optmsk[1] = ntohl(ips->is_optmsk[1]); ips->is_sec = ntohs(ips->is_sec); ips->is_secmsk = ntohs(ips->is_secmsk); ips->is_auth = ntohs(ips->is_auth); ips->is_authmsk = ntohs(ips->is_authmsk); ips->is_s0[0] = ntohl(ips->is_s0[0]); ips->is_s0[1] = ntohl(ips->is_s0[1]); ips->is_smsk[0] = ntohl(ips->is_smsk[0]); ips->is_smsk[1] = ntohl(ips->is_smsk[1]); } } # else /* !defined(sparc) */ # define ipf_sync_tcporder(x,y) # define ipf_sync_natorder(x,y) # define ipf_sync_storder(x,y) # endif /* !defined(sparc) */ /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_write */ /* Returns: int - 0 == success, else error value. */ /* Parameters: uio(I) - pointer to information about data to write */ /* */ /* Moves data from user space into the kernel and uses it for updating data */ /* structures in the state/NAT tables. */ /* ------------------------------------------------------------------------ */ int ipf_sync_write(softc, uio) ipf_main_softc_t *softc; struct uio *uio; { ipf_sync_softc_t *softs = softc->ipf_sync_soft; synchdr_t sh; /* * THIS MUST BE SUFFICIENT LARGE TO STORE * ANY POSSIBLE DATA TYPE */ char data[2048]; int err = 0; # if BSD_GE_YEAR(199306) || defined(__FreeBSD__) uio->uio_rw = UIO_WRITE; # endif /* Try to get bytes */ while (uio->uio_resid > 0) { if (uio->uio_resid >= sizeof(sh)) { err = UIOMOVE(&sh, sizeof(sh), UIO_WRITE, uio); if (err) { if (softs->ipf_sync_debug > 2) printf("uiomove(header) failed: %d\n", err); return err; } /* convert to host order */ sh.sm_magic = ntohl(sh.sm_magic); sh.sm_len = ntohl(sh.sm_len); sh.sm_num = ntohl(sh.sm_num); if (softs->ipf_sync_debug > 8) printf("[%d] Read v:%d p:%d cmd:%d table:%d rev:%d len:%d magic:%x\n", sh.sm_num, sh.sm_v, sh.sm_p, sh.sm_cmd, sh.sm_table, sh.sm_rev, sh.sm_len, sh.sm_magic); if (sh.sm_magic != SYNHDRMAGIC) { if (softs->ipf_sync_debug > 2) printf("uiomove(header) invalid %s\n", "magic"); IPFERROR(110001); return EINVAL; } if (sh.sm_v != 4 && sh.sm_v != 6) { if (softs->ipf_sync_debug > 2) printf("uiomove(header) invalid %s\n", "protocol"); IPFERROR(110002); return EINVAL; } if (sh.sm_cmd > SMC_MAXCMD) { if (softs->ipf_sync_debug > 2) printf("uiomove(header) invalid %s\n", "command"); IPFERROR(110003); return EINVAL; } if (sh.sm_table > SMC_MAXTBL) { if (softs->ipf_sync_debug > 2) printf("uiomove(header) invalid %s\n", "table"); IPFERROR(110004); return EINVAL; } } else { /* unsufficient data, wait until next call */ if (softs->ipf_sync_debug > 2) printf("uiomove(header) insufficient data"); IPFERROR(110005); return EAGAIN; } /* * We have a header, so try to read the amount of data * needed for the request */ /* not supported */ if (sh.sm_len == 0) { if (softs->ipf_sync_debug > 2) printf("uiomove(data zero length %s\n", "not supported"); IPFERROR(110006); return EINVAL; } if (uio->uio_resid >= sh.sm_len) { err = UIOMOVE(data, sh.sm_len, UIO_WRITE, uio); if (err) { if (softs->ipf_sync_debug > 2) printf("uiomove(data) failed: %d\n", err); return err; } if (softs->ipf_sync_debug > 7) printf("uiomove(data) %d bytes read\n", sh.sm_len); if (sh.sm_table == SMC_STATE) err = ipf_sync_state(softc, &sh, data); else if (sh.sm_table == SMC_NAT) err = ipf_sync_nat(softc, &sh, data); if (softs->ipf_sync_debug > 7) printf("[%d] Finished with error %d\n", sh.sm_num, err); } else { /* insufficient data, wait until next call */ if (softs->ipf_sync_debug > 2) printf("uiomove(data) %s %d bytes, got %d\n", "insufficient data, need", sh.sm_len, (int)uio->uio_resid); IPFERROR(110007); return EAGAIN; } } /* no more data */ return 0; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_read */ /* Returns: int - 0 == success, else error value. */ /* Parameters: uio(O) - pointer to information about where to store data */ /* */ /* This function is called when a user program wants to read some data */ /* for pending state/NAT updates. If no data is available, the caller is */ /* put to sleep, pending a wakeup from the "lower half" of this code. */ /* ------------------------------------------------------------------------ */ int ipf_sync_read(softc, uio) ipf_main_softc_t *softc; struct uio *uio; { ipf_sync_softc_t *softs = softc->ipf_sync_soft; syncupdent_t *su; synclogent_t *sl; int err = 0; if ((uio->uio_resid & 3) || (uio->uio_resid < 8)) { IPFERROR(110008); return EINVAL; } # if BSD_GE_YEAR(199306) || defined(__FreeBSD__) uio->uio_rw = UIO_READ; # endif MUTEX_ENTER(&softs->ipsl_mutex); while ((softs->sl_tail == softs->sl_idx) && (softs->su_tail == softs->su_idx)) { # if defined(_KERNEL) # if SOLARIS if (!cv_wait_sig(&softs->ipslwait, &softs->ipsl_mutex.ipf_lk)) { MUTEX_EXIT(&softs->ipsl_mutex); IPFERROR(110009); return EINTR; } # else MUTEX_EXIT(&softs->ipsl_mutex); err = SLEEP(&softs->sl_tail, "ipl sleep"); if (err) { IPFERROR(110012); return EINTR; } MUTEX_ENTER(&softs->ipsl_mutex); # endif /* SOLARIS */ # endif /* _KERNEL */ } while ((softs->sl_tail < softs->sl_idx) && (uio->uio_resid > sizeof(*sl))) { sl = softs->synclog + softs->sl_tail++; MUTEX_EXIT(&softs->ipsl_mutex); err = UIOMOVE(sl, sizeof(*sl), UIO_READ, uio); if (err != 0) goto goterror; MUTEX_ENTER(&softs->ipsl_mutex); } while ((softs->su_tail < softs->su_idx) && (uio->uio_resid > sizeof(*su))) { su = softs->syncupd + softs->su_tail; softs->su_tail++; MUTEX_EXIT(&softs->ipsl_mutex); err = UIOMOVE(su, sizeof(*su), UIO_READ, uio); if (err != 0) goto goterror; MUTEX_ENTER(&softs->ipsl_mutex); if (su->sup_hdr.sm_sl != NULL) su->sup_hdr.sm_sl->sl_idx = -1; } if (softs->sl_tail == softs->sl_idx) softs->sl_tail = softs->sl_idx = 0; if (softs->su_tail == softs->su_idx) softs->su_tail = softs->su_idx = 0; MUTEX_EXIT(&softs->ipsl_mutex); goterror: return err; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_state */ /* Returns: int - 0 == success, else error value. */ /* Parameters: sp(I) - pointer to sync packet data header */ /* uio(I) - pointer to user data for further information */ /* */ /* Updates the state table according to information passed in the sync */ /* header. As required, more data is fetched from the uio structure but */ /* varies depending on the contents of the sync header. This function can */ /* create a new state entry or update one. Deletion is left to the state */ /* structures being timed out correctly. */ /* ------------------------------------------------------------------------ */ static int ipf_sync_state(softc, sp, data) ipf_main_softc_t *softc; synchdr_t *sp; void *data; { ipf_sync_softc_t *softs = softc->ipf_sync_soft; synctcp_update_t su; ipstate_t *is, sn; synclist_t *sl; frentry_t *fr; u_int hv; int err = 0; hv = sp->sm_num & (softs->ipf_sync_state_tab_sz - 1); switch (sp->sm_cmd) { case SMC_CREATE : bcopy(data, &sn, sizeof(sn)); KMALLOC(is, ipstate_t *); if (is == NULL) { IPFERROR(110013); err = ENOMEM; break; } KMALLOC(sl, synclist_t *); if (sl == NULL) { IPFERROR(110014); err = ENOMEM; KFREE(is); break; } bzero((char *)is, offsetof(ipstate_t, is_die)); bcopy((char *)&sn.is_die, (char *)&is->is_die, sizeof(*is) - offsetof(ipstate_t, is_die)); ipf_sync_storder(0, is); /* * We need to find the same rule on the slave as was used on * the master to create this state entry. */ READ_ENTER(&softc->ipf_mutex); fr = ipf_getrulen(softc, IPL_LOGIPF, sn.is_group, sn.is_rulen); if (fr != NULL) { MUTEX_ENTER(&fr->fr_lock); fr->fr_ref++; fr->fr_statecnt++; MUTEX_EXIT(&fr->fr_lock); } RWLOCK_EXIT(&softc->ipf_mutex); if (softs->ipf_sync_debug > 4) printf("[%d] Filter rules = %p\n", sp->sm_num, fr); is->is_rule = fr; is->is_sync = sl; sl->sl_idx = -1; sl->sl_ips = is; bcopy(sp, &sl->sl_hdr, sizeof(struct synchdr)); WRITE_ENTER(&softs->ipf_syncstate); WRITE_ENTER(&softc->ipf_state); sl->sl_pnext = softs->syncstatetab + hv; sl->sl_next = softs->syncstatetab[hv]; if (softs->syncstatetab[hv] != NULL) softs->syncstatetab[hv]->sl_pnext = &sl->sl_next; softs->syncstatetab[hv] = sl; MUTEX_DOWNGRADE(&softs->ipf_syncstate); ipf_state_insert(softc, is, sp->sm_rev); /* * Do not initialise the interface pointers for the state * entry as the full complement of interface names may not * be present. * * Put this state entry on its timeout queue. */ /*fr_setstatequeue(is, sp->sm_rev);*/ break; case SMC_UPDATE : bcopy(data, &su, sizeof(su)); if (softs->ipf_sync_debug > 4) printf("[%d] Update age %lu state %d/%d \n", sp->sm_num, su.stu_age, su.stu_state[0], su.stu_state[1]); READ_ENTER(&softs->ipf_syncstate); for (sl = softs->syncstatetab[hv]; (sl != NULL); sl = sl->sl_next) if (sl->sl_hdr.sm_num == sp->sm_num) break; if (sl == NULL) { if (softs->ipf_sync_debug > 1) printf("[%d] State not found - can't update\n", sp->sm_num); RWLOCK_EXIT(&softs->ipf_syncstate); IPFERROR(110015); err = ENOENT; break; } READ_ENTER(&softc->ipf_state); if (softs->ipf_sync_debug > 6) printf("[%d] Data from state v:%d p:%d cmd:%d table:%d rev:%d\n", sp->sm_num, sl->sl_hdr.sm_v, sl->sl_hdr.sm_p, sl->sl_hdr.sm_cmd, sl->sl_hdr.sm_table, sl->sl_hdr.sm_rev); is = sl->sl_ips; MUTEX_ENTER(&is->is_lock); switch (sp->sm_p) { case IPPROTO_TCP : /* XXX FV --- shouldn't we do ntohl/htonl???? XXX */ is->is_send = su.stu_data[0].td_end; is->is_maxsend = su.stu_data[0].td_maxend; is->is_maxswin = su.stu_data[0].td_maxwin; is->is_state[0] = su.stu_state[0]; is->is_dend = su.stu_data[1].td_end; is->is_maxdend = su.stu_data[1].td_maxend; is->is_maxdwin = su.stu_data[1].td_maxwin; is->is_state[1] = su.stu_state[1]; break; default : break; } if (softs->ipf_sync_debug > 6) printf("[%d] Setting timers for state\n", sp->sm_num); ipf_state_setqueue(softc, is, sp->sm_rev); MUTEX_EXIT(&is->is_lock); break; default : IPFERROR(110016); err = EINVAL; break; } if (err == 0) { RWLOCK_EXIT(&softc->ipf_state); RWLOCK_EXIT(&softs->ipf_syncstate); } if (softs->ipf_sync_debug > 6) printf("[%d] Update completed with error %d\n", sp->sm_num, err); return err; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_del */ /* Returns: Nil */ /* Parameters: sl(I) - pointer to synclist object to delete */ /* */ /* Deletes an object from the synclist. */ /* ------------------------------------------------------------------------ */ static void ipf_sync_del(softs, sl) ipf_sync_softc_t *softs; synclist_t *sl; { *sl->sl_pnext = sl->sl_next; if (sl->sl_next != NULL) sl->sl_next->sl_pnext = sl->sl_pnext; if (sl->sl_idx != -1) softs->syncupd[sl->sl_idx].sup_hdr.sm_sl = NULL; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_del_state */ /* Returns: Nil */ /* Parameters: sl(I) - pointer to synclist object to delete */ /* */ /* Deletes an object from the synclist state table and free's its memory. */ /* ------------------------------------------------------------------------ */ void ipf_sync_del_state(arg, sl) void *arg; synclist_t *sl; { ipf_sync_softc_t *softs = arg; WRITE_ENTER(&softs->ipf_syncstate); ipf_sync_del(softs, sl); RWLOCK_EXIT(&softs->ipf_syncstate); KFREE(sl); } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_del_nat */ /* Returns: Nil */ /* Parameters: sl(I) - pointer to synclist object to delete */ /* */ /* Deletes an object from the synclist nat table and free's its memory. */ /* ------------------------------------------------------------------------ */ void ipf_sync_del_nat(arg, sl) void *arg; synclist_t *sl; { ipf_sync_softc_t *softs = arg; WRITE_ENTER(&softs->ipf_syncnat); ipf_sync_del(softs, sl); RWLOCK_EXIT(&softs->ipf_syncnat); KFREE(sl); } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_nat */ /* Returns: int - 0 == success, else error value. */ /* Parameters: sp(I) - pointer to sync packet data header */ /* uio(I) - pointer to user data for further information */ /* */ /* Updates the NAT table according to information passed in the sync */ /* header. As required, more data is fetched from the uio structure but */ /* varies depending on the contents of the sync header. This function can */ /* create a new NAT entry or update one. Deletion is left to the NAT */ /* structures being timed out correctly. */ /* ------------------------------------------------------------------------ */ static int ipf_sync_nat(softc, sp, data) ipf_main_softc_t *softc; synchdr_t *sp; void *data; { ipf_sync_softc_t *softs = softc->ipf_sync_soft; syncupdent_t su; nat_t *n, *nat; synclist_t *sl; u_int hv = 0; int err = 0; READ_ENTER(&softs->ipf_syncnat); switch (sp->sm_cmd) { case SMC_CREATE : KMALLOC(n, nat_t *); if (n == NULL) { IPFERROR(110017); err = ENOMEM; break; } KMALLOC(sl, synclist_t *); if (sl == NULL) { IPFERROR(110018); err = ENOMEM; KFREE(n); break; } nat = (nat_t *)data; bzero((char *)n, offsetof(nat_t, nat_age)); bcopy((char *)&nat->nat_age, (char *)&n->nat_age, sizeof(*n) - offsetof(nat_t, nat_age)); ipf_sync_natorder(0, n); n->nat_sync = sl; n->nat_rev = sl->sl_rev; sl->sl_idx = -1; sl->sl_ipn = n; sl->sl_num = ntohl(sp->sm_num); WRITE_ENTER(&softc->ipf_nat); sl->sl_pnext = softs->syncnattab + hv; sl->sl_next = softs->syncnattab[hv]; if (softs->syncnattab[hv] != NULL) softs->syncnattab[hv]->sl_pnext = &sl->sl_next; softs->syncnattab[hv] = sl; (void) ipf_nat_insert(softc, softc->ipf_nat_soft, n); RWLOCK_EXIT(&softc->ipf_nat); break; case SMC_UPDATE : bcopy(data, &su, sizeof(su)); for (sl = softs->syncnattab[hv]; (sl != NULL); sl = sl->sl_next) if (sl->sl_hdr.sm_num == sp->sm_num) break; if (sl == NULL) { IPFERROR(110019); err = ENOENT; break; } READ_ENTER(&softc->ipf_nat); nat = sl->sl_ipn; nat->nat_rev = sl->sl_rev; MUTEX_ENTER(&nat->nat_lock); ipf_nat_setqueue(softc, softc->ipf_nat_soft, nat); MUTEX_EXIT(&nat->nat_lock); RWLOCK_EXIT(&softc->ipf_nat); break; default : IPFERROR(110020); err = EINVAL; break; } RWLOCK_EXIT(&softs->ipf_syncnat); return err; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_new */ /* Returns: synclist_t* - NULL == failure, else pointer to new synclist */ /* data structure. */ /* Parameters: tab(I) - type of synclist_t to create */ /* fin(I) - pointer to packet information */ /* ptr(I) - pointer to owning object */ /* */ /* Creates a new sync table entry and notifies any sleepers that it's there */ /* waiting to be processed. */ /* ------------------------------------------------------------------------ */ synclist_t * ipf_sync_new(softc, tab, fin, ptr) ipf_main_softc_t *softc; int tab; fr_info_t *fin; void *ptr; { ipf_sync_softc_t *softs = softc->ipf_sync_soft; synclist_t *sl, *ss; synclogent_t *sle; u_int hv, sz; if (softs->sl_idx == softs->ipf_sync_log_sz) return NULL; KMALLOC(sl, synclist_t *); if (sl == NULL) return NULL; MUTEX_ENTER(&softs->ipf_syncadd); /* * Get a unique number for this synclist_t. The number is only meant * to be unique for the lifetime of the structure and may be reused * later. */ softs->ipf_sync_num++; if (softs->ipf_sync_num == 0) { softs->ipf_sync_num = 1; softs->ipf_sync_wrap++; } /* * Use the synch number of the object as the hash key. Should end up * with relatively even distribution over time. * XXX - an attacker could lunch an DoS attack, of sorts, if they are * the only one causing new table entries by only keeping open every * nth connection they make, where n is a value in the interval * [0, SYNC_STATETABSZ-1]. */ switch (tab) { case SMC_STATE : hv = softs->ipf_sync_num & (softs->ipf_sync_state_tab_sz - 1); while (softs->ipf_sync_wrap != 0) { for (ss = softs->syncstatetab[hv]; ss; ss = ss->sl_next) if (ss->sl_hdr.sm_num == softs->ipf_sync_num) break; if (ss == NULL) break; softs->ipf_sync_num++; hv = softs->ipf_sync_num & (softs->ipf_sync_state_tab_sz - 1); } sl->sl_pnext = softs->syncstatetab + hv; sl->sl_next = softs->syncstatetab[hv]; softs->syncstatetab[hv] = sl; break; case SMC_NAT : hv = softs->ipf_sync_num & (softs->ipf_sync_nat_tab_sz - 1); while (softs->ipf_sync_wrap != 0) { for (ss = softs->syncnattab[hv]; ss; ss = ss->sl_next) if (ss->sl_hdr.sm_num == softs->ipf_sync_num) break; if (ss == NULL) break; softs->ipf_sync_num++; hv = softs->ipf_sync_num & (softs->ipf_sync_nat_tab_sz - 1); } sl->sl_pnext = softs->syncnattab + hv; sl->sl_next = softs->syncnattab[hv]; softs->syncnattab[hv] = sl; break; default : break; } sl->sl_num = softs->ipf_sync_num; MUTEX_EXIT(&softs->ipf_syncadd); sl->sl_magic = htonl(SYNHDRMAGIC); sl->sl_v = fin->fin_v; sl->sl_p = fin->fin_p; sl->sl_cmd = SMC_CREATE; sl->sl_idx = -1; sl->sl_table = tab; sl->sl_rev = fin->fin_rev; if (tab == SMC_STATE) { sl->sl_ips = ptr; sz = sizeof(*sl->sl_ips); } else if (tab == SMC_NAT) { sl->sl_ipn = ptr; sz = sizeof(*sl->sl_ipn); } else { ptr = NULL; sz = 0; } sl->sl_len = sz; /* * Create the log entry to be read by a user daemon. When it has been * finished and put on the queue, send a signal to wakeup any waiters. */ MUTEX_ENTER(&softs->ipf_syncadd); sle = softs->synclog + softs->sl_idx++; bcopy((char *)&sl->sl_hdr, (char *)&sle->sle_hdr, sizeof(sle->sle_hdr)); sle->sle_hdr.sm_num = htonl(sle->sle_hdr.sm_num); sle->sle_hdr.sm_len = htonl(sle->sle_hdr.sm_len); if (ptr != NULL) { bcopy((char *)ptr, (char *)&sle->sle_un, sz); if (tab == SMC_STATE) { ipf_sync_storder(1, &sle->sle_un.sleu_ips); } else if (tab == SMC_NAT) { ipf_sync_natorder(1, &sle->sle_un.sleu_ipn); } } MUTEX_EXIT(&softs->ipf_syncadd); ipf_sync_wakeup(softc); return sl; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_update */ /* Returns: Nil */ /* Parameters: tab(I) - type of synclist_t to create */ /* fin(I) - pointer to packet information */ /* sl(I) - pointer to synchronisation object */ /* */ /* For outbound packets, only, create an sync update record for the user */ /* process to read. */ /* ------------------------------------------------------------------------ */ void ipf_sync_update(softc, tab, fin, sl) ipf_main_softc_t *softc; int tab; fr_info_t *fin; synclist_t *sl; { ipf_sync_softc_t *softs = softc->ipf_sync_soft; synctcp_update_t *st; syncupdent_t *slu; ipstate_t *ips; nat_t *nat; ipfrwlock_t *lock; if (fin->fin_out == 0 || sl == NULL) return; if (tab == SMC_STATE) { lock = &softs->ipf_syncstate; } else { lock = &softs->ipf_syncnat; } READ_ENTER(lock); if (sl->sl_idx == -1) { MUTEX_ENTER(&softs->ipf_syncadd); slu = softs->syncupd + softs->su_idx; sl->sl_idx = softs->su_idx++; MUTEX_EXIT(&softs->ipf_syncadd); bcopy((char *)&sl->sl_hdr, (char *)&slu->sup_hdr, sizeof(slu->sup_hdr)); slu->sup_hdr.sm_magic = htonl(SYNHDRMAGIC); slu->sup_hdr.sm_sl = sl; slu->sup_hdr.sm_cmd = SMC_UPDATE; slu->sup_hdr.sm_table = tab; slu->sup_hdr.sm_num = htonl(sl->sl_num); slu->sup_hdr.sm_len = htonl(sizeof(struct synctcp_update)); slu->sup_hdr.sm_rev = fin->fin_rev; # if 0 if (fin->fin_p == IPPROTO_TCP) { st->stu_len[0] = 0; st->stu_len[1] = 0; } # endif } else slu = softs->syncupd + sl->sl_idx; /* * Only TCP has complex timeouts, others just use default timeouts. * For TCP, we only need to track the connection state and window. */ if (fin->fin_p == IPPROTO_TCP) { st = &slu->sup_tcp; if (tab == SMC_STATE) { ips = sl->sl_ips; st->stu_age = htonl(ips->is_die); st->stu_data[0].td_end = ips->is_send; st->stu_data[0].td_maxend = ips->is_maxsend; st->stu_data[0].td_maxwin = ips->is_maxswin; st->stu_state[0] = ips->is_state[0]; st->stu_data[1].td_end = ips->is_dend; st->stu_data[1].td_maxend = ips->is_maxdend; st->stu_data[1].td_maxwin = ips->is_maxdwin; st->stu_state[1] = ips->is_state[1]; } else if (tab == SMC_NAT) { nat = sl->sl_ipn; st->stu_age = htonl(nat->nat_age); } } RWLOCK_EXIT(lock); ipf_sync_wakeup(softc); } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_flush_table */ /* Returns: int - number of entries freed by flushing table */ /* Parameters: tabsize(I) - size of the array pointed to by table */ /* table(I) - pointer to sync table to empty */ /* */ /* Walk through a table of sync entries and free each one. It is assumed */ /* that some lock is held so that nobody else tries to access the table */ /* during this cleanup. */ /* ------------------------------------------------------------------------ */ static int ipf_sync_flush_table(softs, tabsize, table) ipf_sync_softc_t *softs; int tabsize; synclist_t **table; { synclist_t *sl; int i, items; items = 0; for (i = 0; i < tabsize; i++) { while ((sl = table[i]) != NULL) { switch (sl->sl_table) { case SMC_STATE : if (sl->sl_ips != NULL) sl->sl_ips->is_sync = NULL; break; case SMC_NAT : if (sl->sl_ipn != NULL) sl->sl_ipn->nat_sync = NULL; break; } if (sl->sl_next != NULL) sl->sl_next->sl_pnext = sl->sl_pnext; table[i] = sl->sl_next; if (sl->sl_idx != -1) softs->syncupd[sl->sl_idx].sup_hdr.sm_sl = NULL; KFREE(sl); items++; } } return items; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_ioctl */ /* Returns: int - 0 == success, != 0 == failure */ /* Parameters: data(I) - pointer to ioctl data */ /* cmd(I) - ioctl command integer */ /* mode(I) - file mode bits used with open */ /* */ /* This function currently does not handle any ioctls and so just returns */ /* EINVAL on all occasions. */ /* ------------------------------------------------------------------------ */ int ipf_sync_ioctl(softc, data, cmd, mode, uid, ctx) ipf_main_softc_t *softc; caddr_t data; ioctlcmd_t cmd; int mode, uid; void *ctx; { ipf_sync_softc_t *softs = softc->ipf_sync_soft; int error, i; SPL_INT(s); switch (cmd) { case SIOCIPFFL: error = BCOPYIN(data, &i, sizeof(i)); if (error != 0) { IPFERROR(110023); error = EFAULT; break; } switch (i) { case SMC_RLOG : SPL_NET(s); MUTEX_ENTER(&softs->ipsl_mutex); i = (softs->sl_tail - softs->sl_idx) + (softs->su_tail - softs->su_idx); softs->sl_idx = 0; softs->su_idx = 0; softs->sl_tail = 0; softs->su_tail = 0; MUTEX_EXIT(&softs->ipsl_mutex); SPL_X(s); break; case SMC_NAT : SPL_NET(s); WRITE_ENTER(&softs->ipf_syncnat); i = ipf_sync_flush_table(softs, SYNC_NATTABSZ, softs->syncnattab); RWLOCK_EXIT(&softs->ipf_syncnat); SPL_X(s); break; case SMC_STATE : SPL_NET(s); WRITE_ENTER(&softs->ipf_syncstate); i = ipf_sync_flush_table(softs, SYNC_STATETABSZ, softs->syncstatetab); RWLOCK_EXIT(&softs->ipf_syncstate); SPL_X(s); break; } error = BCOPYOUT(&i, data, sizeof(i)); if (error != 0) { IPFERROR(110022); error = EFAULT; } break; default : IPFERROR(110021); error = EINVAL; break; } return error; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_canread */ /* Returns: int - 0 == success, != 0 == failure */ /* Parameters: Nil */ /* */ /* This function provides input to the poll handler about whether or not */ /* there is data waiting to be read from the /dev/ipsync device. */ /* ------------------------------------------------------------------------ */ int ipf_sync_canread(arg) void *arg; { ipf_sync_softc_t *softs = arg; return !((softs->sl_tail == softs->sl_idx) && (softs->su_tail == softs->su_idx)); } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_canwrite */ /* Returns: int - 1 == can always write */ /* Parameters: Nil */ /* */ /* This function lets the poll handler know that it is always ready willing */ /* to accept write events. */ /* XXX Maybe this should return false if the sync table is full? */ /* ------------------------------------------------------------------------ */ int ipf_sync_canwrite(arg) void *arg; { return 1; } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_wakeup */ /* Parameters: Nil */ /* Returns: Nil */ /* */ /* This function implements the heuristics that decide how often to */ /* generate a poll wakeup for programs that are waiting for information */ /* about when they can do a read on /dev/ipsync. */ /* */ /* There are three different considerations here: */ /* - do not keep a program waiting too long: ipf_sync_wake_interval is the */ /* maximum number of ipf ticks to let pass by; */ /* - do not let the queue of ouststanding things to generate notifies for */ /* get too full (ipf_sync_queue_high_wm is the high water mark); */ /* - do not let too many events get collapsed in before deciding that the */ /* other host(s) need an update (ipf_sync_event_high_wm is the high water */ /* mark for this counter.) */ /* ------------------------------------------------------------------------ */ static void ipf_sync_wakeup(softc) ipf_main_softc_t *softc; { ipf_sync_softc_t *softs = softc->ipf_sync_soft; softs->ipf_sync_events++; if ((softc->ipf_ticks > softs->ipf_sync_lastwakeup + softs->ipf_sync_wake_interval) || (softs->ipf_sync_events > softs->ipf_sync_event_high_wm) || ((softs->sl_tail - softs->sl_idx) > softs->ipf_sync_queue_high_wm) || ((softs->su_tail - softs->su_idx) > softs->ipf_sync_queue_high_wm)) { ipf_sync_poll_wakeup(softc); } } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_poll_wakeup */ /* Parameters: Nil */ /* Returns: Nil */ /* */ /* Deliver a poll wakeup and reset counters for two of the three heuristics */ /* ------------------------------------------------------------------------ */ static void ipf_sync_poll_wakeup(softc) ipf_main_softc_t *softc; { ipf_sync_softc_t *softs = softc->ipf_sync_soft; softs->ipf_sync_events = 0; softs->ipf_sync_lastwakeup = softc->ipf_ticks; # ifdef _KERNEL # if SOLARIS MUTEX_ENTER(&softs->ipsl_mutex); cv_signal(&softs->ipslwait); MUTEX_EXIT(&softs->ipsl_mutex); pollwakeup(&softc->ipf_poll_head[IPL_LOGSYNC], POLLIN|POLLRDNORM); # else WAKEUP(&softs->sl_tail, 0); POLLWAKEUP(IPL_LOGSYNC); # endif # endif } /* ------------------------------------------------------------------------ */ /* Function: ipf_sync_expire */ /* Parameters: Nil */ /* Returns: Nil */ /* */ /* This is the function called even ipf_tick. It implements one of the */ /* three heuristics above *IF* there are events waiting. */ /* ------------------------------------------------------------------------ */ void ipf_sync_expire(softc) ipf_main_softc_t *softc; { ipf_sync_softc_t *softs = softc->ipf_sync_soft; if ((softs->ipf_sync_events > 0) && (softc->ipf_ticks > softs->ipf_sync_lastwakeup + softs->ipf_sync_wake_interval)) { ipf_sync_poll_wakeup(softc); } }