Index: sys/net/pfvar.h =================================================================== --- sys/net/pfvar.h +++ sys/net/pfvar.h @@ -819,8 +819,8 @@ #ifdef _KERNEL /* pfsync */ typedef int pfsync_state_import_t(struct pfsync_state *, u_int8_t); -typedef void pfsync_insert_state_t(struct pf_state *); -typedef void pfsync_update_state_t(struct pf_state *); +typedef int pfsync_insert_state_t(struct pf_state *); +typedef int pfsync_update_state_t(struct pf_state *); typedef void pfsync_delete_state_t(struct pf_state *); typedef void pfsync_clear_states_t(u_int32_t, const char *); typedef int pfsync_defer_t(struct pf_state *, struct mbuf *); Index: sys/netpfil/pf/if_pfsync.c =================================================================== --- sys/netpfil/pf/if_pfsync.c +++ sys/netpfil/pf/if_pfsync.c @@ -66,6 +66,8 @@ #include "opt_inet6.h" #include "opt_pf.h" +#include + #include #include #include @@ -144,16 +146,16 @@ }; struct pfsync_q { - void (*write)(struct pf_state *, void *); + int (*write)(struct pf_state *, void *); size_t len; u_int8_t action; }; /* we have one of these for every PFSYNC_S_ */ -static void pfsync_out_state(struct pf_state *, void *); -static void pfsync_out_iack(struct pf_state *, void *); -static void pfsync_out_upd_c(struct pf_state *, void *); -static void pfsync_out_del(struct pf_state *, void *); +static int pfsync_out_state(struct pf_state *, void *); +static int pfsync_out_iack(struct pf_state *, void *); +static int pfsync_out_upd_c(struct pf_state *, void *); +static int pfsync_out_del(struct pf_state *, void *); static struct pfsync_q pfsync_qs[] = { { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS }, @@ -166,7 +168,7 @@ static void pfsync_q_ins(struct pf_state *, int, bool); static void pfsync_q_del(struct pf_state *, bool); -static void pfsync_update_state(struct pf_state *); +static int pfsync_update_state(struct pf_state *); struct pfsync_upd_req_item { TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; @@ -183,6 +185,17 @@ struct mbuf *pd_m; }; +struct pfsync_msg { + struct pf_state *st; + struct mbuf *m; + int type; +#define PFSYNC_MSG_INSERT 0x01 +#define PFSYNC_MSG_DEFER 0x02 +#define PFSYNC_MSG_UNDEFER 0x03 +#define PFSYNC_MSG_UPDATE 0x04 +#define PFSYNC_MSG_DELETE 0x05 +}; + struct pfsync_softc { /* Configuration */ struct ifnet *sc_ifp; @@ -200,6 +213,10 @@ /* Queued data */ size_t sc_len; + ck_ring_t *sc_msgs; + ck_ring_buffer_t *sc_msgs_buffer; + uma_zone_t sc_msg_zone; + TAILQ_HEAD(, pf_state) sc_qs[PFSYNC_S_COUNT]; TAILQ_HEAD(, pfsync_upd_req_item) sc_upd_req_list; TAILQ_HEAD(, pfsync_deferral) sc_deferrals; @@ -233,6 +250,8 @@ #define V_pfsyncif VNET(pfsyncif) VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL; #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie) +VNET_DEFINE_STATIC(void *, pfsync_msg_swi_cookie) = NULL; +#define V_pfsync_msg_swi_cookie VNET(pfsync_msg_swi_cookie) VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats); #define V_pfsyncstats VNET(pfsyncstats) VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW; @@ -249,12 +268,16 @@ static int pfsync_init(void); static void pfsync_uninit(void); +static unsigned long pfsync_max_msgs; + SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC"); SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(pfsyncstats), pfsyncstats, "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW, &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); +SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_max_msgs, CTLFLAG_RDTUN, + &pfsync_max_msgs, 0, "Maximum in-flight internal messages"); static int pfsync_clone_create(struct if_clone *, int, caddr_t); static void pfsync_clone_destroy(struct ifnet *); @@ -265,6 +288,7 @@ static int pfsyncioctl(struct ifnet *, u_long, caddr_t); static int pfsync_defer(struct pf_state *, struct mbuf *); +static void pfsync_defer_now(struct pf_state *st, struct mbuf *m); static void pfsync_undefer(struct pfsync_deferral *, int); static void pfsync_undefer_state(struct pf_state *, int); static void pfsync_defer_tmo(void *); @@ -281,7 +305,10 @@ static void pfsync_bulk_update(void *); static void pfsync_bulk_fail(void *); +static void pfsync_delete_state_now(struct pfsync_softc *, + struct pf_state *st); static void pfsync_detach_ifnet(struct ifnet *); + #ifdef IPSEC static void pfsync_update_net_tdb(struct pfsync_tdb *); #endif @@ -291,6 +318,141 @@ VNET_DEFINE(struct if_clone *, pfsync_cloner); #define V_pfsync_cloner VNET(pfsync_cloner) +static void +pfsync_update_now(struct pfsync_softc *sc, struct pf_state *st) +{ + bool sync = false, ref = true; + + if (st->state_flags & PFSTATE_ACK) + pfsync_undefer_state(st, 0); + if (st->state_flags & PFSTATE_NOSYNC) { + if (st->sync_state != PFSYNC_S_NONE) + pfsync_q_del(st, true); + return; + } + + if (sc->sc_len == PFSYNC_MINPKT) + callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif); + + /** Do this when we get a PFSYNC_MSG_UPDATE */ + switch (st->sync_state) { + case PFSYNC_S_UPD_C: + case PFSYNC_S_UPD: + case PFSYNC_S_INS: + /* we're already handling it */ + + if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { + st->sync_updates++; + if (st->sync_updates >= sc->sc_maxupdates) + sync = true; + } + break; + + case PFSYNC_S_IACK: + pfsync_q_del(st, false); + ref = false; + /* FALLTHROUGH */ + + case PFSYNC_S_NONE: + pfsync_q_ins(st, PFSYNC_S_UPD_C, ref); + st->sync_updates = 0; + break; + + default: + panic("%s: unexpected sync state %d", __func__, st->sync_state); + } + + if (sync || (time_uptime - st->pfsync_time) < 2) + pfsync_push(sc); +} + +static void +pfsync_msg_process(struct pfsync_softc *sc, struct pfsync_msg *msg) +{ + struct pf_state *st = msg->st; + struct mbuf *m = msg->m; + + KASSERT(sc, ("sc == NULL")); + KASSERT(msg, ("msg == NULL")); + PFSYNC_LOCK_ASSERT(sc); + + /* The state may have expired by the time we get here. + * Deleting doesn't need the key, so that's safe to do. */ + if (st->timeout >= PFTM_MAX && msg->type != PFSYNC_MSG_DELETE) { + pf_release_state(st); + return; + } + + switch (msg->type) + { + case PFSYNC_MSG_INSERT: + if (sc->sc_len == PFSYNC_MINPKT) + callout_reset(&sc->sc_tmo, 1 * hz, + pfsync_timeout, V_pfsyncif); + + pfsync_q_ins(st, PFSYNC_S_INS, true); + st->sync_updates = 0; + break; + case PFSYNC_MSG_DEFER: + pfsync_defer_now(st, m); + break; + case PFSYNC_MSG_UNDEFER: + pfsync_undefer_state(st, 0); + break; + case PFSYNC_MSG_UPDATE: + pfsync_update_now(sc, st); + break; + case PFSYNC_MSG_DELETE: + pfsync_delete_state_now(sc, st); + break; + default: + KASSERT(false, ("Unknown message type %d", msg->type)); + break; + } + + /* Let go of the temporary reference we held so we could process the + * state. */ + pf_release_state(st); +} + +static void +pfsync_msg_intr(void *arg) +{ + struct pfsync_softc *sc = (struct pfsync_softc*)arg; + struct pfsync_msg *msg; + + CURVNET_SET(sc->sc_ifp->if_vnet); + PFSYNC_LOCK(sc); + + /* Process all of the messages we can. */ + while (ck_ring_dequeue_mpmc(sc->sc_msgs, sc->sc_msgs_buffer, &msg)) { + pfsync_msg_process(sc, msg); + uma_zfree(sc->sc_msg_zone, msg); + } + + PFSYNC_UNLOCK(sc); + CURVNET_RESTORE(); +} + +static bool +pfsync_msg_enqueue(struct pfsync_softc *sc, struct pfsync_msg *msg) +{ + KASSERT(msg->type != 0, ("msg type not set")); + + /* Ensure we don't lose the state before we can process it. */ + pf_ref_state(msg->st); + + if (! ck_ring_enqueue_mpmc(sc->sc_msgs, sc->sc_msgs_buffer, msg)) { + pf_release_state(msg->st); + return (false); + } + + /* Ensure that we process these messages. */ + swi_sched(V_pfsync_msg_swi_cookie, 0); + + return (true); +} + static int pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) { @@ -304,6 +466,25 @@ sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO); sc->sc_flags |= PFSYNCF_OK; + /* ck_ring size must be a power of two, and we need a few entries to + * work at all. */ + if (pfsync_max_msgs < 4) + pfsync_max_msgs = 4096; + pfsync_max_msgs = 1 << (flsl(pfsync_max_msgs) - 1); + + sc->sc_msgs = malloc(sizeof(ck_ring_t), M_PFSYNC, M_WAITOK); + sc->sc_msgs_buffer = malloc(sizeof(ck_ring_buffer_t) * pfsync_max_msgs, + M_PFSYNC, M_WAITOK); + ck_ring_init(sc->sc_msgs, pfsync_max_msgs); + sc->sc_msg_zone = uma_zcreate("pfsync msg", sizeof(struct pfsync_msg), + NULL, NULL, NULL, NULL, 0, 0); + if (! sc->sc_msg_zone) { + free(sc->sc_msgs, M_PFSYNC); + free(sc->sc_msgs_buffer, M_PFSYNC); + free(sc, M_PFSYNC); + return (ENOMEM); + } + for (q = 0; q < PFSYNC_S_COUNT; q++) TAILQ_INIT(&sc->sc_qs[q]); @@ -345,6 +526,7 @@ pfsync_clone_destroy(struct ifnet *ifp) { struct pfsync_softc *sc = ifp->if_softc; + struct pfsync_msg *msg; /* * At this stage, everything should have already been @@ -376,8 +558,18 @@ bpfdetach(ifp); if_detach(ifp); + /* Clean up queued messages */ + while (ck_ring_dequeue_mpmc(sc->sc_msgs, sc->sc_msgs_buffer, &msg)) { + pf_release_state(msg->st); + uma_zfree(sc->sc_msg_zone, msg); + } + pfsync_drop(sc); + free(sc->sc_msgs, M_PFSYNC); + free(sc->sc_msgs_buffer, M_PFSYNC); + uma_zdestroy(sc->sc_msg_zone); + if_free(ifp); if (sc->sc_imo.imo_membership) pfsync_multicast_cleanup(sc); @@ -1438,24 +1630,38 @@ return (0); } -static void +static int pfsync_out_state(struct pf_state *st, void *buf) { struct pfsync_state *sp = buf; + PF_STATE_LOCK(st); + + /* We need to check again here, because it might have timed out by now. + * Now we'll be sure, becaue we hold the lock. */ + if (st->timeout >= PFTM_MAX) { + PF_STATE_UNLOCK(st); + return (ETIMEDOUT); + } + pfsync_state_export(sp, st); + PF_STATE_UNLOCK(st); + + return (0); } -static void +static int pfsync_out_iack(struct pf_state *st, void *buf) { struct pfsync_ins_ack *iack = buf; iack->id = st->id; iack->creatorid = st->creatorid; + + return (0); } -static void +static int pfsync_out_upd_c(struct pf_state *st, void *buf) { struct pfsync_upd_c *up = buf; @@ -1466,9 +1672,11 @@ pf_state_peer_hton(&st->dst, &up->dst); up->creatorid = st->creatorid; up->timeout = st->timeout; + + return (0); } -static void +static int pfsync_out_del(struct pf_state *st, void *buf) { struct pfsync_del_c *dp = buf; @@ -1476,6 +1684,8 @@ dp->id = st->id; dp->creatorid = st->creatorid; st->state_flags |= PFSTATE_NOSYNC; + + return (0); } static void @@ -1575,8 +1785,8 @@ * XXXGL: some of write methods do unlocked reads * of state data :( */ - pfsync_qs[q].write(st, m->m_data + offset); - offset += pfsync_qs[q].len; + if (! pfsync_qs[q].write(st, m->m_data + offset)) + offset += pfsync_qs[q].len; st->sync_state = PFSYNC_S_NONE; pf_release_state(st); count++; @@ -1655,56 +1865,89 @@ swi_sched(V_pfsync_swi_cookie, 0); } -static void +static int pfsync_insert_state(struct pf_state *st) { struct pfsync_softc *sc = V_pfsyncif; + struct pfsync_msg *msg; if (st->state_flags & PFSTATE_NOSYNC) - return; + return (0); if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) || st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { st->state_flags |= PFSTATE_NOSYNC; - return; + return (0); } KASSERT(st->sync_state == PFSYNC_S_NONE, ("%s: st->sync_state %u", __func__, st->sync_state)); - PFSYNC_LOCK(sc); - if (sc->sc_len == PFSYNC_MINPKT) - callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif); + msg = uma_zalloc(sc->sc_msg_zone, M_NOWAIT | M_ZERO); + if (! msg) + return (ENOMEM); - pfsync_q_ins(st, PFSYNC_S_INS, true); - PFSYNC_UNLOCK(sc); + msg->st = st; + msg->type = PFSYNC_MSG_INSERT; + + if (! pfsync_msg_enqueue(sc, msg)) { + uma_zfree(sc->sc_msg_zone, msg); + return (ENOBUFS); + } - st->sync_updates = 0; + return (0); } static int pfsync_defer(struct pf_state *st, struct mbuf *m) { struct pfsync_softc *sc = V_pfsyncif; - struct pfsync_deferral *pd; + struct pfsync_msg *msg; if (m->m_flags & (M_BCAST|M_MCAST)) return (0); - PFSYNC_LOCK(sc); - if (sc == NULL || !(sc->sc_ifp->if_flags & IFF_DRV_RUNNING) || !(sc->sc_flags & PFSYNCF_DEFER)) { - PFSYNC_UNLOCK(sc); return (0); } - if (sc->sc_deferred >= 128) + msg = uma_zalloc(sc->sc_msg_zone, M_NOWAIT | M_ZERO); + if (! msg) + return (0); + + msg->st = st; + msg->m = m; + msg->type = PFSYNC_MSG_DEFER; + + pf_ref_state(st); + + if (! pfsync_msg_enqueue(sc, msg)) { + uma_zfree(sc->sc_msg_zone, msg); + pf_release_state(st); + return (0); + } + + return (1); +} + +static void +pfsync_defer_now(struct pf_state *st, struct mbuf *m) +{ + struct pfsync_softc *sc = V_pfsyncif; + struct pfsync_deferral *pd; + + PFSYNC_LOCK_ASSERT(sc); + + if (sc->sc_deferred >= 128) pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT); - if (pd == NULL) - return (0); + if (pd == NULL) { + pf_release_state(st); + m_freem(m); + return; + } sc->sc_deferred++; m->m_flags |= M_SKIP_FIREWALL; @@ -1721,8 +1964,6 @@ callout_reset(&pd->pd_tmo, 10, pfsync_defer_tmo, pd); pfsync_push(sc); - - return (1); } static void @@ -1767,6 +2008,7 @@ free(pd, M_PFSYNC); PFSYNC_UNLOCK(sc); + m->m_flags |= M_SKIP_FIREWALL; ip_output(m, NULL, NULL, 0, NULL, NULL); pf_release_state(st); @@ -1793,58 +2035,27 @@ panic("%s: unable to find deferred state", __func__); } -static void +static int pfsync_update_state(struct pf_state *st) { struct pfsync_softc *sc = V_pfsyncif; - bool sync = false, ref = true; + struct pfsync_msg *msg; PF_STATE_LOCK_ASSERT(st); - PFSYNC_LOCK(sc); - - if (st->state_flags & PFSTATE_ACK) - pfsync_undefer_state(st, 0); - if (st->state_flags & PFSTATE_NOSYNC) { - if (st->sync_state != PFSYNC_S_NONE) - pfsync_q_del(st, true); - PFSYNC_UNLOCK(sc); - return; - } - - if (sc->sc_len == PFSYNC_MINPKT) - callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif); - - switch (st->sync_state) { - case PFSYNC_S_UPD_C: - case PFSYNC_S_UPD: - case PFSYNC_S_INS: - /* we're already handling it */ - - if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { - st->sync_updates++; - if (st->sync_updates >= sc->sc_maxupdates) - sync = true; - } - break; - case PFSYNC_S_IACK: - pfsync_q_del(st, false); - ref = false; - /* FALLTHROUGH */ + msg = uma_zalloc(sc->sc_msg_zone, M_NOWAIT | M_ZERO); + if (! msg) + return (PFRES_MEMORY); - case PFSYNC_S_NONE: - pfsync_q_ins(st, PFSYNC_S_UPD_C, ref); - st->sync_updates = 0; - break; + msg->st = st; + msg->type = PFSYNC_MSG_UPDATE; - default: - panic("%s: unexpected sync state %d", __func__, st->sync_state); + if (! pfsync_msg_enqueue(sc, msg)) { + uma_zfree(sc->sc_msg_zone, msg); + return (PFRES_CONGEST); } - if (sync || (time_uptime - st->pfsync_time) < 2) - pfsync_push(sc); - - PFSYNC_UNLOCK(sc); + return (0); } static void @@ -1932,15 +2143,37 @@ pfsync_delete_state(struct pf_state *st) { struct pfsync_softc *sc = V_pfsyncif; + struct pfsync_msg *msg; + + msg = uma_zalloc(sc->sc_msg_zone, M_NOWAIT | M_ZERO); + if (! msg) { + PFSYNC_LOCK(sc); + pfsync_delete_state_now(sc, st); + PFSYNC_UNLOCK(sc); + return; + } + msg->st = st; + msg->type = PFSYNC_MSG_DELETE; + + if (! pfsync_msg_enqueue(sc, msg)) { + uma_zfree(sc->sc_msg_zone, msg); + + PFSYNC_LOCK(sc); + pfsync_delete_state_now(sc, st); + PFSYNC_UNLOCK(sc); + } +} + +static void +pfsync_delete_state_now(struct pfsync_softc *sc, struct pf_state *st) +{ bool ref = true; - PFSYNC_LOCK(sc); if (st->state_flags & PFSTATE_ACK) pfsync_undefer_state(st, 1); if (st->state_flags & PFSTATE_NOSYNC) { if (st->sync_state != PFSYNC_S_NONE) pfsync_q_del(st, true); - PFSYNC_UNLOCK(sc); return; } @@ -1967,8 +2200,6 @@ default: panic("%s: unexpected sync state %d", __func__, st->sync_state); } - - PFSYNC_UNLOCK(sc); } static void @@ -2225,6 +2456,7 @@ { struct pfsync_softc *sc = arg; struct mbuf *m, *n; + int ret; CURVNET_SET(sc->sc_ifp->if_vnet); @@ -2246,10 +2478,16 @@ * own pfsync packet based on M_SKIP_FIREWALL * flag. This is XXX. */ - if (m->m_flags & M_SKIP_FIREWALL) - ip_output(m, NULL, NULL, 0, NULL, NULL); - else if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, - NULL) == 0) + if (m->m_flags & M_SKIP_FIREWALL) { + ret = ip_output(m, NULL, NULL, 0, NULL, NULL); + } + else { + m->m_flags |= M_SKIP_FIREWALL; + ret = ip_output(m, NULL, NULL, IP_RAWOUTPUT, + &sc->sc_imo, NULL); + } + + if (ret == 0) V_pfsyncstats.pfsyncs_opackets++; else V_pfsyncstats.pfsyncs_oerrors++; @@ -2373,6 +2611,13 @@ log(LOG_INFO, "swi_add() failed in %s\n", __func__); } + error = swi_add(NULL, pfsyncname, pfsync_msg_intr, V_pfsyncif, + SWI_NET, INTR_TYPE_NET | INTR_MPSAFE, &V_pfsync_msg_swi_cookie); + if (error) { + if_clone_detach(V_pfsync_cloner); + log(LOG_INFO, "swi_add() failed in %s\n", __func__); + } + pfsync_pointers_init(); } VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY, @@ -2386,6 +2631,7 @@ if_clone_detach(V_pfsync_cloner); swi_remove(V_pfsync_swi_cookie); + swi_remove(V_pfsync_msg_swi_cookie); } VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH, Index: sys/netpfil/pf/pf.c =================================================================== --- sys/netpfil/pf/pf.c +++ sys/netpfil/pf/pf.c @@ -1268,8 +1268,14 @@ refcount_init(&s->refs, 2); counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1); - if (V_pfsync_insert_state_ptr != NULL) - V_pfsync_insert_state_ptr(s); + if (V_pfsync_insert_state_ptr != NULL) { + if (V_pfsync_insert_state_ptr(s)) { + LIST_REMOVE(s, entry); + PF_HASHROW_UNLOCK(ih); + pf_detach_state(s); + return (ENOMEM); + } + } /* Returns locked. */ return (0); @@ -5897,6 +5903,7 @@ struct pf_ruleset *ruleset = NULL; struct pf_pdesc pd; int off, dirndx, pqid = 0; + int err; PF_RULES_RLOCK_TRACKER; @@ -5996,8 +6003,13 @@ action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, &reason); if (action == PF_PASS) { - if (V_pfsync_update_state_ptr != NULL) - V_pfsync_update_state_ptr(s); + if (V_pfsync_update_state_ptr != NULL) { + if ((err = V_pfsync_update_state_ptr(s))) { + action = PF_DROP; + REASON_SET(&reason, err); + goto done; + } + } r = s->rule.ptr; a = s->anchor.ptr; log = s->log; @@ -6025,8 +6037,13 @@ } action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); if (action == PF_PASS) { - if (V_pfsync_update_state_ptr != NULL) - V_pfsync_update_state_ptr(s); + if (V_pfsync_update_state_ptr != NULL) { + if ((err = V_pfsync_update_state_ptr(s))) { + action = PF_DROP; + REASON_SET(&reason, err); + goto done; + } + } r = s->rule.ptr; a = s->anchor.ptr; log = s->log; @@ -6048,8 +6065,13 @@ action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, &reason); if (action == PF_PASS) { - if (V_pfsync_update_state_ptr != NULL) - V_pfsync_update_state_ptr(s); + if (V_pfsync_update_state_ptr != NULL) { + if ((err = V_pfsync_update_state_ptr(s))) { + action = PF_DROP; + REASON_SET(&reason, err); + goto done; + } + } r = s->rule.ptr; a = s->anchor.ptr; log = s->log; @@ -6071,8 +6093,13 @@ default: action = pf_test_state_other(&s, dir, kif, m, &pd); if (action == PF_PASS) { - if (V_pfsync_update_state_ptr != NULL) - V_pfsync_update_state_ptr(s); + if (V_pfsync_update_state_ptr != NULL) { + if ((err = V_pfsync_update_state_ptr(s))) { + action = PF_DROP; + REASON_SET(&reason, err); + goto done; + } + } r = s->rule.ptr; a = s->anchor.ptr; log = s->log; @@ -6286,6 +6313,7 @@ struct pf_ruleset *ruleset = NULL; struct pf_pdesc pd; int off, terminal = 0, dirndx, rh_cnt = 0, pqid = 0; + int err; PF_RULES_RLOCK_TRACKER; M_ASSERTPKTHDR(m); @@ -6435,8 +6463,13 @@ action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, &reason); if (action == PF_PASS) { - if (V_pfsync_update_state_ptr != NULL) - V_pfsync_update_state_ptr(s); + if (V_pfsync_update_state_ptr != NULL) { + if ((err = V_pfsync_update_state_ptr(s))) { + action = PF_DROP; + REASON_SET(&reason, err); + goto done; + } + } r = s->rule.ptr; a = s->anchor.ptr; log = s->log; @@ -6464,8 +6497,13 @@ } action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); if (action == PF_PASS) { - if (V_pfsync_update_state_ptr != NULL) - V_pfsync_update_state_ptr(s); + if (V_pfsync_update_state_ptr != NULL) { + if ((err = V_pfsync_update_state_ptr(s))) { + action = PF_DROP; + REASON_SET(&reason, err); + goto done; + } + } r = s->rule.ptr; a = s->anchor.ptr; log = s->log; @@ -6494,8 +6532,13 @@ action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, &reason); if (action == PF_PASS) { - if (V_pfsync_update_state_ptr != NULL) - V_pfsync_update_state_ptr(s); + if (V_pfsync_update_state_ptr != NULL) { + if ((err = V_pfsync_update_state_ptr(s))) { + action = PF_DROP; + REASON_SET(&reason, err); + goto done; + } + } r = s->rule.ptr; a = s->anchor.ptr; log = s->log; @@ -6508,8 +6551,13 @@ default: action = pf_test_state_other(&s, dir, kif, m, &pd); if (action == PF_PASS) { - if (V_pfsync_update_state_ptr != NULL) - V_pfsync_update_state_ptr(s); + if (V_pfsync_update_state_ptr != NULL) { + if ((err = V_pfsync_update_state_ptr(s))) { + action = PF_DROP; + REASON_SET(&reason, err); + goto done; + } + } r = s->rule.ptr; a = s->anchor.ptr; log = s->log; Index: sys/netpfil/pf/pf_ioctl.c =================================================================== --- sys/netpfil/pf/pf_ioctl.c +++ sys/netpfil/pf/pf_ioctl.c @@ -3727,6 +3727,9 @@ void pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) { + + KASSERT(st->timeout < PFTM_MAX, ("Expired state")); + bzero(sp, sizeof(struct pfsync_state)); /* copy from state key */