Changeset View
Changeset View
Standalone View
Standalone View
sys/netpfil/pf/if_pfsync.c
Context not available. | |||||
#include <sys/mutex.h> | #include <sys/mutex.h> | ||||
#include <sys/priv.h> | #include <sys/priv.h> | ||||
#include <sys/protosw.h> | #include <sys/protosw.h> | ||||
#include <sys/smp.h> | |||||
#include <sys/socket.h> | #include <sys/socket.h> | ||||
#include <sys/sockio.h> | #include <sys/sockio.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
Context not available. | |||||
sizeof(struct pfsync_header) + \ | sizeof(struct pfsync_header) + \ | ||||
sizeof(struct pfsync_subheader) ) | sizeof(struct pfsync_subheader) ) | ||||
struct pfsync_bucket; | |||||
struct pfsync_pkt { | struct pfsync_pkt { | ||||
struct ip *ip; | struct ip *ip; | ||||
struct in_addr src; | struct in_addr src; | ||||
Context not available. | |||||
}; | }; | ||||
static void pfsync_q_ins(struct pf_state *, int, bool); | static void pfsync_q_ins(struct pf_state *, int, bool); | ||||
static void pfsync_q_del(struct pf_state *, bool); | static void pfsync_q_del(struct pf_state *, bool, struct pfsync_bucket *); | ||||
static void pfsync_update_state(struct pf_state *); | static void pfsync_update_state(struct pf_state *); | ||||
Context not available. | |||||
struct mbuf *pd_m; | struct mbuf *pd_m; | ||||
}; | }; | ||||
struct pfsync_sofct; | |||||
struct pfsync_bucket | |||||
{ | |||||
int b_id; | |||||
struct pfsync_softc *b_sc; | |||||
struct mtx b_mtx; | |||||
struct callout b_tmo; | |||||
int b_flags; | |||||
#define PFSYNCF_BUCKET_PUSH 0x00000001 | |||||
size_t b_len; | |||||
TAILQ_HEAD(, pf_state) b_qs[PFSYNC_S_COUNT]; | |||||
TAILQ_HEAD(, pfsync_upd_req_item) b_upd_req_list; | |||||
TAILQ_HEAD(, pfsync_deferral) b_deferrals; | |||||
u_int b_deferred; | |||||
void *b_plus; | |||||
size_t b_pluslen; | |||||
struct ifaltq b_snd; | |||||
}; | |||||
struct pfsync_softc { | struct pfsync_softc { | ||||
/* Configuration */ | /* Configuration */ | ||||
struct ifnet *sc_ifp; | struct ifnet *sc_ifp; | ||||
Context not available. | |||||
uint32_t sc_flags; | uint32_t sc_flags; | ||||
#define PFSYNCF_OK 0x00000001 | #define PFSYNCF_OK 0x00000001 | ||||
#define PFSYNCF_DEFER 0x00000002 | #define PFSYNCF_DEFER 0x00000002 | ||||
#define PFSYNCF_PUSH 0x00000004 | |||||
uint8_t sc_maxupdates; | uint8_t sc_maxupdates; | ||||
struct ip sc_template; | struct ip sc_template; | ||||
struct callout sc_tmo; | |||||
struct mtx sc_mtx; | struct mtx sc_mtx; | ||||
/* Queued data */ | /* Queued data */ | ||||
size_t sc_len; | struct pfsync_bucket *sc_buckets; | ||||
TAILQ_HEAD(, pf_state) sc_qs[PFSYNC_S_COUNT]; | |||||
TAILQ_HEAD(, pfsync_upd_req_item) sc_upd_req_list; | |||||
TAILQ_HEAD(, pfsync_deferral) sc_deferrals; | |||||
u_int sc_deferred; | |||||
void *sc_plus; | |||||
size_t sc_pluslen; | |||||
/* Bulk update info */ | /* Bulk update info */ | ||||
struct mtx sc_bulk_mtx; | struct mtx sc_bulk_mtx; | ||||
Context not available. | |||||
#define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) | #define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) | ||||
#define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) | #define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) | ||||
#define PFSYNC_BUCKET_LOCK(b) mtx_lock(&(b)->b_mtx) | |||||
#define PFSYNC_BUCKET_UNLOCK(b) mtx_unlock(&(b)->b_mtx) | |||||
#define PFSYNC_BUCKET_LOCK_ASSERT(b) mtx_assert(&(b)->b_mtx, MA_OWNED) | |||||
#define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx) | #define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx) | ||||
#define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx) | #define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx) | ||||
#define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED) | #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED) | ||||
Context not available. | |||||
#define V_pfsync_carp_adj VNET(pfsync_carp_adj) | #define V_pfsync_carp_adj VNET(pfsync_carp_adj) | ||||
static void pfsync_timeout(void *); | static void pfsync_timeout(void *); | ||||
static void pfsync_push(struct pfsync_softc *); | static void pfsync_push(struct pfsync_bucket *); | ||||
static void pfsync_push_all(struct pfsync_softc *); | |||||
static void pfsyncintr(void *); | static void pfsyncintr(void *); | ||||
static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *, | static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *, | ||||
void *); | void *); | ||||
Context not available. | |||||
static int pfsync_init(void); | static int pfsync_init(void); | ||||
static void pfsync_uninit(void); | static void pfsync_uninit(void); | ||||
static unsigned long pfsync_buckets; | |||||
SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC"); | SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC"); | ||||
SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW, | SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW, | ||||
&VNET_NAME(pfsyncstats), pfsyncstats, | &VNET_NAME(pfsyncstats), pfsyncstats, | ||||
"PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); | "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); | ||||
SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW, | SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW, | ||||
&VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); | &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); | ||||
SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN, | |||||
&pfsync_buckets, 0, "Number of pfsync hash buckets"); | |||||
static int pfsync_clone_create(struct if_clone *, int, caddr_t); | static int pfsync_clone_create(struct if_clone *, int, caddr_t); | ||||
static void pfsync_clone_destroy(struct ifnet *); | static void pfsync_clone_destroy(struct ifnet *); | ||||
Context not available. | |||||
static void pfsync_defer_tmo(void *); | static void pfsync_defer_tmo(void *); | ||||
static void pfsync_request_update(u_int32_t, u_int64_t); | static void pfsync_request_update(u_int32_t, u_int64_t); | ||||
static void pfsync_update_state_req(struct pf_state *); | static bool pfsync_update_state_req(struct pf_state *); | ||||
static void pfsync_drop(struct pfsync_softc *); | static void pfsync_drop(struct pfsync_softc *); | ||||
static void pfsync_sendout(int); | static void pfsync_sendout(int, int); | ||||
static void pfsync_send_plus(void *, size_t); | static void pfsync_send_plus(void *, size_t); | ||||
static void pfsync_bulk_start(void); | static void pfsync_bulk_start(void); | ||||
Context not available. | |||||
#ifdef IPSEC | #ifdef IPSEC | ||||
static void pfsync_update_net_tdb(struct pfsync_tdb *); | static void pfsync_update_net_tdb(struct pfsync_tdb *); | ||||
#endif | #endif | ||||
static struct pfsync_bucket *pfsync_get_bucket(struct pfsync_softc *, | |||||
struct pf_state *); | |||||
#define PFSYNC_MAX_BULKTRIES 12 | #define PFSYNC_MAX_BULKTRIES 12 | ||||
Context not available. | |||||
{ | { | ||||
struct pfsync_softc *sc; | struct pfsync_softc *sc; | ||||
struct ifnet *ifp; | struct ifnet *ifp; | ||||
int q; | struct pfsync_bucket *b; | ||||
int c, q; | |||||
if (unit != 0) | if (unit != 0) | ||||
return (EINVAL); | return (EINVAL); | ||||
sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO); | if (! pfsync_buckets) | ||||
sc->sc_flags |= PFSYNCF_OK; | pfsync_buckets = mp_ncpus * 2; | ||||
for (q = 0; q < PFSYNC_S_COUNT; q++) | |||||
TAILQ_INIT(&sc->sc_qs[q]); | |||||
TAILQ_INIT(&sc->sc_upd_req_list); | |||||
TAILQ_INIT(&sc->sc_deferrals); | |||||
sc->sc_len = PFSYNC_MINPKT; | sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO); | ||||
sc->sc_maxupdates = 128; | sc->sc_maxupdates = 128; | ||||
ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); | ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); | ||||
Context not available. | |||||
ifp->if_ioctl = pfsyncioctl; | ifp->if_ioctl = pfsyncioctl; | ||||
ifp->if_output = pfsyncoutput; | ifp->if_output = pfsyncoutput; | ||||
ifp->if_type = IFT_PFSYNC; | ifp->if_type = IFT_PFSYNC; | ||||
ifp->if_snd.ifq_maxlen = ifqmaxlen; | |||||
ifp->if_hdrlen = sizeof(struct pfsync_header); | ifp->if_hdrlen = sizeof(struct pfsync_header); | ||||
ifp->if_mtu = ETHERMTU; | ifp->if_mtu = ETHERMTU; | ||||
mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF); | mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF); | ||||
mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF); | mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF); | ||||
callout_init(&sc->sc_tmo, 1); | |||||
callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0); | callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0); | ||||
callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0); | callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0); | ||||
Context not available. | |||||
bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); | bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); | ||||
sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets), | |||||
M_PFSYNC, M_ZERO | M_WAITOK); | |||||
for (c = 0; c < pfsync_buckets; c++) { | |||||
b = &sc->sc_buckets[c]; | |||||
mtx_init(&b->b_mtx, pfsyncname, NULL, MTX_DEF); | |||||
b->b_id = c; | |||||
b->b_sc = sc; | |||||
b->b_len = PFSYNC_MINPKT; | |||||
for (q = 0; q < PFSYNC_S_COUNT; q++) | |||||
TAILQ_INIT(&b->b_qs[q]); | |||||
TAILQ_INIT(&b->b_upd_req_list); | |||||
TAILQ_INIT(&b->b_deferrals); | |||||
callout_init(&b->b_tmo, 1); | |||||
b->b_snd.ifq_maxlen = ifqmaxlen; | |||||
} | |||||
V_pfsyncif = sc; | V_pfsyncif = sc; | ||||
return (0); | return (0); | ||||
Context not available. | |||||
pfsync_clone_destroy(struct ifnet *ifp) | pfsync_clone_destroy(struct ifnet *ifp) | ||||
{ | { | ||||
struct pfsync_softc *sc = ifp->if_softc; | struct pfsync_softc *sc = ifp->if_softc; | ||||
struct pfsync_bucket *b; | |||||
int c; | |||||
/* | for (c = 0; c < pfsync_buckets; c++) { | ||||
* At this stage, everything should have already been | b = &sc->sc_buckets[c]; | ||||
* cleared by pfsync_uninit(), and we have only to | /* | ||||
* drain callouts. | * At this stage, everything should have already been | ||||
*/ | * cleared by pfsync_uninit(), and we have only to | ||||
while (sc->sc_deferred > 0) { | * drain callouts. | ||||
struct pfsync_deferral *pd = TAILQ_FIRST(&sc->sc_deferrals); | */ | ||||
while (b->b_deferred > 0) { | |||||
TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); | struct pfsync_deferral *pd = | ||||
sc->sc_deferred--; | TAILQ_FIRST(&b->b_deferrals); | ||||
if (callout_stop(&pd->pd_tmo) > 0) { | |||||
pf_release_state(pd->pd_st); | TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); | ||||
m_freem(pd->pd_m); | b->b_deferred--; | ||||
free(pd, M_PFSYNC); | if (callout_stop(&pd->pd_tmo) > 0) { | ||||
} else { | pf_release_state(pd->pd_st); | ||||
pd->pd_refs++; | m_freem(pd->pd_m); | ||||
callout_drain(&pd->pd_tmo); | free(pd, M_PFSYNC); | ||||
free(pd, M_PFSYNC); | } else { | ||||
pd->pd_refs++; | |||||
callout_drain(&pd->pd_tmo); | |||||
free(pd, M_PFSYNC); | |||||
} | |||||
} | } | ||||
callout_drain(&b->b_tmo); | |||||
} | } | ||||
callout_drain(&sc->sc_tmo); | |||||
callout_drain(&sc->sc_bulkfail_tmo); | callout_drain(&sc->sc_bulkfail_tmo); | ||||
callout_drain(&sc->sc_bulk_tmo); | callout_drain(&sc->sc_bulk_tmo); | ||||
Context not available. | |||||
pfsync_multicast_cleanup(sc); | pfsync_multicast_cleanup(sc); | ||||
mtx_destroy(&sc->sc_mtx); | mtx_destroy(&sc->sc_mtx); | ||||
mtx_destroy(&sc->sc_bulk_mtx); | mtx_destroy(&sc->sc_bulk_mtx); | ||||
free(sc->sc_buckets, M_PFSYNC); | |||||
free(sc, M_PFSYNC); | free(sc, M_PFSYNC); | ||||
V_pfsyncif = NULL; | V_pfsyncif = NULL; | ||||
Context not available. | |||||
st->state_flags &= ~PFSTATE_NOSYNC; | st->state_flags &= ~PFSTATE_NOSYNC; | ||||
if (st->state_flags & PFSTATE_ACK) { | if (st->state_flags & PFSTATE_ACK) { | ||||
pfsync_q_ins(st, PFSYNC_S_IACK, true); | pfsync_q_ins(st, PFSYNC_S_IACK, true); | ||||
pfsync_push(sc); | pfsync_push_all(sc); | ||||
} | } | ||||
} | } | ||||
st->state_flags &= ~PFSTATE_ACK; | st->state_flags &= ~PFSTATE_ACK; | ||||
Context not available. | |||||
continue; | continue; | ||||
if (st->state_flags & PFSTATE_ACK) { | if (st->state_flags & PFSTATE_ACK) { | ||||
PFSYNC_LOCK(V_pfsyncif); | |||||
pfsync_undefer_state(st, 0); | pfsync_undefer_state(st, 0); | ||||
PFSYNC_UNLOCK(V_pfsyncif); | |||||
} | } | ||||
PF_STATE_UNLOCK(st); | PF_STATE_UNLOCK(st); | ||||
} | } | ||||
Context not available. | |||||
} | } | ||||
if (st->state_flags & PFSTATE_ACK) { | if (st->state_flags & PFSTATE_ACK) { | ||||
PFSYNC_LOCK(sc); | |||||
pfsync_undefer_state(st, 1); | pfsync_undefer_state(st, 1); | ||||
PFSYNC_UNLOCK(sc); | |||||
} | } | ||||
if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) | if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) | ||||
Context not available. | |||||
pfsync_update_state(st); | pfsync_update_state(st); | ||||
PF_STATE_UNLOCK(st); | PF_STATE_UNLOCK(st); | ||||
PFSYNC_LOCK(sc); | pfsync_push_all(sc); | ||||
pfsync_push(sc); | |||||
PFSYNC_UNLOCK(sc); | |||||
continue; | continue; | ||||
} | } | ||||
PF_STATE_UNLOCK(st); | PF_STATE_UNLOCK(st); | ||||
Context not available. | |||||
st = pf_find_state_byid(up->id, up->creatorid); | st = pf_find_state_byid(up->id, up->creatorid); | ||||
if (st == NULL) { | if (st == NULL) { | ||||
/* We don't have this state. Ask for it. */ | /* We don't have this state. Ask for it. */ | ||||
PFSYNC_LOCK(sc); | PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); | ||||
pfsync_request_update(up->creatorid, up->id); | pfsync_request_update(up->creatorid, up->id); | ||||
PFSYNC_UNLOCK(sc); | PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); | ||||
continue; | continue; | ||||
} | } | ||||
if (st->state_flags & PFSTATE_ACK) { | if (st->state_flags & PFSTATE_ACK) { | ||||
PFSYNC_LOCK(sc); | |||||
pfsync_undefer_state(st, 1); | pfsync_undefer_state(st, 1); | ||||
PFSYNC_UNLOCK(sc); | |||||
} | } | ||||
if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) | if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) | ||||
Context not available. | |||||
pfsync_update_state(st); | pfsync_update_state(st); | ||||
PF_STATE_UNLOCK(st); | PF_STATE_UNLOCK(st); | ||||
PFSYNC_LOCK(sc); | pfsync_push_all(sc); | ||||
pfsync_push(sc); | |||||
PFSYNC_UNLOCK(sc); | |||||
continue; | continue; | ||||
} | } | ||||
PF_STATE_UNLOCK(st); | PF_STATE_UNLOCK(st); | ||||
Context not available. | |||||
struct ifreq *ifr = (struct ifreq *)data; | struct ifreq *ifr = (struct ifreq *)data; | ||||
struct pfsyncreq pfsyncr; | struct pfsyncreq pfsyncr; | ||||
int error; | int error; | ||||
int c; | |||||
switch (cmd) { | switch (cmd) { | ||||
case SIOCSIFFLAGS: | case SIOCSIFFLAGS: | ||||
Context not available. | |||||
ifr->ifr_mtu > sc->sc_sync_if->if_mtu) | ifr->ifr_mtu > sc->sc_sync_if->if_mtu) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (ifr->ifr_mtu < ifp->if_mtu) { | if (ifr->ifr_mtu < ifp->if_mtu) { | ||||
PFSYNC_LOCK(sc); | for (c = 0; c < pfsync_buckets; c++) { | ||||
if (sc->sc_len > PFSYNC_MINPKT) | PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); | ||||
pfsync_sendout(1); | if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT) | ||||
PFSYNC_UNLOCK(sc); | pfsync_sendout(1, c); | ||||
eri: Is this respecting at all the ifqmaxlen that is set when the interface is created? | |||||
kpAuthorUnsubmitted Done Inline ActionsYes, in pfsync_sendout() there's a check (_IF_QFULL). There's no change here, other than that the queue is now per-bucket (but so is the maxlen). kp: Yes, in pfsync_sendout() there's a check (_IF_QFULL). There's no change here, other than that… | |||||
PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); | |||||
} | |||||
} | } | ||||
ifp->if_mtu = ifr->ifr_mtu; | ifp->if_mtu = ifr->ifr_mtu; | ||||
break; | break; | ||||
Context not available. | |||||
break; | break; | ||||
} | } | ||||
if (sc->sc_len > PFSYNC_MINPKT && | for (c = 0; c < pfsync_buckets; c++) { | ||||
(sifp->if_mtu < sc->sc_ifp->if_mtu || | PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); | ||||
(sc->sc_sync_if != NULL && | if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT && | ||||
sifp->if_mtu < sc->sc_sync_if->if_mtu) || | (sifp->if_mtu < sc->sc_ifp->if_mtu || | ||||
sifp->if_mtu < MCLBYTES - sizeof(struct ip))) | (sc->sc_sync_if != NULL && | ||||
pfsync_sendout(1); | sifp->if_mtu < sc->sc_sync_if->if_mtu) || | ||||
sifp->if_mtu < MCLBYTES - sizeof(struct ip))) | |||||
pfsync_sendout(1, c); | |||||
eriUnsubmitted Done Inline ActionsSame here, is the ifqmaxlen being honored? eri: Same here, is the ifqmaxlen being honored? | |||||
PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); | |||||
} | |||||
if (imo->imo_membership) | if (imo->imo_membership) | ||||
pfsync_multicast_cleanup(sc); | pfsync_multicast_cleanup(sc); | ||||
Context not available. | |||||
sc->sc_flags &= ~PFSYNCF_OK; | sc->sc_flags &= ~PFSYNCF_OK; | ||||
if (V_pf_status.debug >= PF_DEBUG_MISC) | if (V_pf_status.debug >= PF_DEBUG_MISC) | ||||
printf("pfsync: requesting bulk update\n"); | printf("pfsync: requesting bulk update\n"); | ||||
pfsync_request_update(0, 0); | |||||
PFSYNC_UNLOCK(sc); | PFSYNC_UNLOCK(sc); | ||||
PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); | |||||
pfsync_request_update(0, 0); | |||||
PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); | |||||
PFSYNC_BLOCK(sc); | PFSYNC_BLOCK(sc); | ||||
sc->sc_ureq_sent = time_uptime; | sc->sc_ureq_sent = time_uptime; | ||||
callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, | callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, | ||||
Context not available. | |||||
{ | { | ||||
struct pf_state *st, *next; | struct pf_state *st, *next; | ||||
struct pfsync_upd_req_item *ur; | struct pfsync_upd_req_item *ur; | ||||
int q; | struct pfsync_bucket *b; | ||||
int c, q; | |||||
for (q = 0; q < PFSYNC_S_COUNT; q++) { | for (c = 0; c < pfsync_buckets; c++) { | ||||
if (TAILQ_EMPTY(&sc->sc_qs[q])) | b = &sc->sc_buckets[c]; | ||||
continue; | for (q = 0; q < PFSYNC_S_COUNT; q++) { | ||||
if (TAILQ_EMPTY(&b->b_qs[q])) | |||||
continue; | |||||
TAILQ_FOREACH_SAFE(st, &sc->sc_qs[q], sync_list, next) { | TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) { | ||||
KASSERT(st->sync_state == q, | KASSERT(st->sync_state == q, | ||||
("%s: st->sync_state == q", | ("%s: st->sync_state == q", | ||||
__func__)); | __func__)); | ||||
st->sync_state = PFSYNC_S_NONE; | st->sync_state = PFSYNC_S_NONE; | ||||
pf_release_state(st); | pf_release_state(st); | ||||
} | |||||
TAILQ_INIT(&b->b_qs[q]); | |||||
} | } | ||||
TAILQ_INIT(&sc->sc_qs[q]); | |||||
} | |||||
while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { | while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { | ||||
TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); | TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); | ||||
free(ur, M_PFSYNC); | free(ur, M_PFSYNC); | ||||
} | } | ||||
sc->sc_plus = NULL; | b->b_len = PFSYNC_MINPKT; | ||||
sc->sc_len = PFSYNC_MINPKT; | b->b_plus = NULL; | ||||
} | |||||
} | } | ||||
static void | static void | ||||
pfsync_sendout(int schedswi) | pfsync_sendout(int schedswi, int c) | ||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | struct pfsync_softc *sc = V_pfsyncif; | ||||
struct ifnet *ifp = sc->sc_ifp; | struct ifnet *ifp = sc->sc_ifp; | ||||
Context not available. | |||||
struct pfsync_subheader *subh; | struct pfsync_subheader *subh; | ||||
struct pf_state *st, *st_next; | struct pf_state *st, *st_next; | ||||
struct pfsync_upd_req_item *ur; | struct pfsync_upd_req_item *ur; | ||||
struct pfsync_bucket *b = &sc->sc_buckets[c]; | |||||
int offset; | int offset; | ||||
int q, count = 0; | int q, count = 0; | ||||
KASSERT(sc != NULL, ("%s: null sc", __func__)); | KASSERT(sc != NULL, ("%s: null sc", __func__)); | ||||
KASSERT(sc->sc_len > PFSYNC_MINPKT, | KASSERT(b->b_len > PFSYNC_MINPKT, | ||||
("%s: sc_len %zu", __func__, sc->sc_len)); | ("%s: sc_len %zu", __func__, b->b_len)); | ||||
PFSYNC_LOCK_ASSERT(sc); | PFSYNC_BUCKET_LOCK_ASSERT(b); | ||||
if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { | if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { | ||||
pfsync_drop(sc); | pfsync_drop(sc); | ||||
return; | return; | ||||
} | } | ||||
m = m_get2(max_linkhdr + sc->sc_len, M_NOWAIT, MT_DATA, M_PKTHDR); | m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR); | ||||
if (m == NULL) { | if (m == NULL) { | ||||
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); | if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); | ||||
V_pfsyncstats.pfsyncs_onomem++; | V_pfsyncstats.pfsyncs_onomem++; | ||||
return; | return; | ||||
} | } | ||||
m->m_data += max_linkhdr; | m->m_data += max_linkhdr; | ||||
m->m_len = m->m_pkthdr.len = sc->sc_len; | m->m_len = m->m_pkthdr.len = b->b_len; | ||||
/* build the ip header */ | /* build the ip header */ | ||||
ip = (struct ip *)m->m_data; | ip = (struct ip *)m->m_data; | ||||
Context not available. | |||||
offset += sizeof(*ph); | offset += sizeof(*ph); | ||||
ph->version = PFSYNC_VERSION; | ph->version = PFSYNC_VERSION; | ||||
ph->len = htons(sc->sc_len - sizeof(*ip)); | ph->len = htons(b->b_len - sizeof(*ip)); | ||||
bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); | bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); | ||||
/* walk the queues */ | /* walk the queues */ | ||||
for (q = 0; q < PFSYNC_S_COUNT; q++) { | for (q = 0; q < PFSYNC_S_COUNT; q++) { | ||||
if (TAILQ_EMPTY(&sc->sc_qs[q])) | if (TAILQ_EMPTY(&b->b_qs[q])) | ||||
continue; | continue; | ||||
subh = (struct pfsync_subheader *)(m->m_data + offset); | subh = (struct pfsync_subheader *)(m->m_data + offset); | ||||
offset += sizeof(*subh); | offset += sizeof(*subh); | ||||
count = 0; | count = 0; | ||||
TAILQ_FOREACH_SAFE(st, &sc->sc_qs[q], sync_list, st_next) { | TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) { | ||||
KASSERT(st->sync_state == q, | KASSERT(st->sync_state == q, | ||||
("%s: st->sync_state == q", | ("%s: st->sync_state == q", | ||||
__func__)); | __func__)); | ||||
Context not available. | |||||
pf_release_state(st); | pf_release_state(st); | ||||
count++; | count++; | ||||
} | } | ||||
TAILQ_INIT(&sc->sc_qs[q]); | TAILQ_INIT(&b->b_qs[q]); | ||||
bzero(subh, sizeof(*subh)); | bzero(subh, sizeof(*subh)); | ||||
subh->action = pfsync_qs[q].action; | subh->action = pfsync_qs[q].action; | ||||
Context not available. | |||||
V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count; | V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count; | ||||
} | } | ||||
if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) { | if (!TAILQ_EMPTY(&b->b_upd_req_list)) { | ||||
subh = (struct pfsync_subheader *)(m->m_data + offset); | subh = (struct pfsync_subheader *)(m->m_data + offset); | ||||
offset += sizeof(*subh); | offset += sizeof(*subh); | ||||
count = 0; | count = 0; | ||||
while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { | while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { | ||||
TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); | TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); | ||||
bcopy(&ur->ur_msg, m->m_data + offset, | bcopy(&ur->ur_msg, m->m_data + offset, | ||||
sizeof(ur->ur_msg)); | sizeof(ur->ur_msg)); | ||||
Context not available. | |||||
} | } | ||||
/* has someone built a custom region for us to add? */ | /* has someone built a custom region for us to add? */ | ||||
if (sc->sc_plus != NULL) { | if (b->b_plus != NULL) { | ||||
bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen); | bcopy(b->b_plus, m->m_data + offset, b->b_pluslen); | ||||
offset += sc->sc_pluslen; | offset += b->b_pluslen; | ||||
sc->sc_plus = NULL; | b->b_plus = NULL; | ||||
} | } | ||||
subh = (struct pfsync_subheader *)(m->m_data + offset); | subh = (struct pfsync_subheader *)(m->m_data + offset); | ||||
Context not available. | |||||
/* we're done, let's put it on the wire */ | /* we're done, let's put it on the wire */ | ||||
if (ifp->if_bpf) { | if (ifp->if_bpf) { | ||||
m->m_data += sizeof(*ip); | m->m_data += sizeof(*ip); | ||||
m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip); | m->m_len = m->m_pkthdr.len = b->b_len - sizeof(*ip); | ||||
BPF_MTAP(ifp, m); | BPF_MTAP(ifp, m); | ||||
m->m_data -= sizeof(*ip); | m->m_data -= sizeof(*ip); | ||||
m->m_len = m->m_pkthdr.len = sc->sc_len; | m->m_len = m->m_pkthdr.len = b->b_len; | ||||
} | } | ||||
if (sc->sc_sync_if == NULL) { | if (sc->sc_sync_if == NULL) { | ||||
sc->sc_len = PFSYNC_MINPKT; | b->b_len = PFSYNC_MINPKT; | ||||
m_freem(m); | m_freem(m); | ||||
return; | return; | ||||
} | } | ||||
if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1); | if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1); | ||||
if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); | if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); | ||||
sc->sc_len = PFSYNC_MINPKT; | b->b_len = PFSYNC_MINPKT; | ||||
if (!_IF_QFULL(&sc->sc_ifp->if_snd)) | if (!_IF_QFULL(&b->b_snd)) | ||||
_IF_ENQUEUE(&sc->sc_ifp->if_snd, m); | _IF_ENQUEUE(&b->b_snd, m); | ||||
else { | else { | ||||
m_freem(m); | m_freem(m); | ||||
if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1); | if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1); | ||||
Context not available. | |||||
pfsync_insert_state(struct pf_state *st) | pfsync_insert_state(struct pf_state *st) | ||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | struct pfsync_softc *sc = V_pfsyncif; | ||||
struct pfsync_bucket *b = pfsync_get_bucket(sc, st); | |||||
if (st->state_flags & PFSTATE_NOSYNC) | if (st->state_flags & PFSTATE_NOSYNC) | ||||
return; | return; | ||||
Context not available. | |||||
KASSERT(st->sync_state == PFSYNC_S_NONE, | KASSERT(st->sync_state == PFSYNC_S_NONE, | ||||
("%s: st->sync_state %u", __func__, st->sync_state)); | ("%s: st->sync_state %u", __func__, st->sync_state)); | ||||
PFSYNC_LOCK(sc); | PFSYNC_BUCKET_LOCK(b); | ||||
if (sc->sc_len == PFSYNC_MINPKT) | if (b->b_len == PFSYNC_MINPKT) | ||||
callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif); | callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); | ||||
pfsync_q_ins(st, PFSYNC_S_INS, true); | pfsync_q_ins(st, PFSYNC_S_INS, true); | ||||
PFSYNC_UNLOCK(sc); | PFSYNC_BUCKET_UNLOCK(b); | ||||
st->sync_updates = 0; | st->sync_updates = 0; | ||||
} | } | ||||
Context not available. | |||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | struct pfsync_softc *sc = V_pfsyncif; | ||||
struct pfsync_deferral *pd; | struct pfsync_deferral *pd; | ||||
struct pfsync_bucket *b = pfsync_get_bucket(sc, st); | |||||
if (m->m_flags & (M_BCAST|M_MCAST)) | if (m->m_flags & (M_BCAST|M_MCAST)) | ||||
return (0); | return (0); | ||||
Context not available. | |||||
return (0); | return (0); | ||||
} | } | ||||
if (sc->sc_deferred >= 128) | if (b->b_deferred >= 128) | ||||
pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); | pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0); | ||||
pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT); | pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT); | ||||
if (pd == NULL) | if (pd == NULL) | ||||
return (0); | return (0); | ||||
sc->sc_deferred++; | b->b_deferred++; | ||||
m->m_flags |= M_SKIP_FIREWALL; | m->m_flags |= M_SKIP_FIREWALL; | ||||
st->state_flags |= PFSTATE_ACK; | st->state_flags |= PFSTATE_ACK; | ||||
Context not available. | |||||
pf_ref_state(st); | pf_ref_state(st); | ||||
pd->pd_m = m; | pd->pd_m = m; | ||||
TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry); | TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry); | ||||
callout_init_mtx(&pd->pd_tmo, &sc->sc_mtx, CALLOUT_RETURNUNLOCKED); | callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED); | ||||
callout_reset(&pd->pd_tmo, 10, pfsync_defer_tmo, pd); | callout_reset(&pd->pd_tmo, 10, pfsync_defer_tmo, pd); | ||||
pfsync_push(sc); | pfsync_push(b); | ||||
return (1); | return (1); | ||||
} | } | ||||
Context not available. | |||||
struct pfsync_softc *sc = pd->pd_sc; | struct pfsync_softc *sc = pd->pd_sc; | ||||
struct mbuf *m = pd->pd_m; | struct mbuf *m = pd->pd_m; | ||||
struct pf_state *st = pd->pd_st; | struct pf_state *st = pd->pd_st; | ||||
struct pfsync_bucket *b = pfsync_get_bucket(sc, st); | |||||
PFSYNC_LOCK_ASSERT(sc); | PFSYNC_BUCKET_LOCK_ASSERT(b); | ||||
TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); | TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); | ||||
sc->sc_deferred--; | b->b_deferred--; | ||||
pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ | pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ | ||||
free(pd, M_PFSYNC); | free(pd, M_PFSYNC); | ||||
pf_release_state(st); | pf_release_state(st); | ||||
Context not available. | |||||
if (drop) | if (drop) | ||||
m_freem(m); | m_freem(m); | ||||
else { | else { | ||||
_IF_ENQUEUE(&sc->sc_ifp->if_snd, m); | _IF_ENQUEUE(&b->b_snd, m); | ||||
pfsync_push(sc); | pfsync_push(b); | ||||
} | } | ||||
} | } | ||||
Context not available. | |||||
struct pfsync_softc *sc = pd->pd_sc; | struct pfsync_softc *sc = pd->pd_sc; | ||||
struct mbuf *m = pd->pd_m; | struct mbuf *m = pd->pd_m; | ||||
struct pf_state *st = pd->pd_st; | struct pf_state *st = pd->pd_st; | ||||
struct pfsync_bucket *b = pfsync_get_bucket(sc, st); | |||||
PFSYNC_LOCK_ASSERT(sc); | PFSYNC_BUCKET_LOCK_ASSERT(b); | ||||
CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); | CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); | ||||
TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); | TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); | ||||
sc->sc_deferred--; | b->b_deferred--; | ||||
pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ | pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ | ||||
if (pd->pd_refs == 0) | if (pd->pd_refs == 0) | ||||
free(pd, M_PFSYNC); | free(pd, M_PFSYNC); | ||||
Context not available. | |||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | struct pfsync_softc *sc = V_pfsyncif; | ||||
struct pfsync_deferral *pd; | struct pfsync_deferral *pd; | ||||
struct pfsync_bucket *b = pfsync_get_bucket(sc, st); | |||||
PFSYNC_LOCK_ASSERT(sc); | PFSYNC_BUCKET_LOCK(b); | ||||
TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) { | TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) { | ||||
if (pd->pd_st == st) { | if (pd->pd_st == st) { | ||||
if (callout_stop(&pd->pd_tmo) > 0) | if (callout_stop(&pd->pd_tmo) > 0) | ||||
pfsync_undefer(pd, drop); | pfsync_undefer(pd, drop); | ||||
PFSYNC_BUCKET_UNLOCK(b); | |||||
return; | return; | ||||
} | } | ||||
} | } | ||||
PFSYNC_BUCKET_UNLOCK(b); | |||||
panic("%s: unable to find deferred state", __func__); | panic("%s: unable to find deferred state", __func__); | ||||
} | } | ||||
static struct pfsync_bucket* | |||||
pfsync_get_bucket(struct pfsync_softc *sc, struct pf_state *st) | |||||
{ | |||||
int c = PF_IDHASH(st) % pfsync_buckets; | |||||
return &sc->sc_buckets[c]; | |||||
} | |||||
static void | static void | ||||
pfsync_update_state(struct pf_state *st) | pfsync_update_state(struct pf_state *st) | ||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | struct pfsync_softc *sc = V_pfsyncif; | ||||
bool sync = false, ref = true; | bool sync = false, ref = true; | ||||
struct pfsync_bucket *b = pfsync_get_bucket(sc, st); | |||||
PF_STATE_LOCK_ASSERT(st); | PF_STATE_LOCK_ASSERT(st); | ||||
PFSYNC_LOCK(sc); | PFSYNC_BUCKET_LOCK(b); | ||||
if (st->state_flags & PFSTATE_ACK) | if (st->state_flags & PFSTATE_ACK) | ||||
pfsync_undefer_state(st, 0); | pfsync_undefer_state(st, 0); | ||||
if (st->state_flags & PFSTATE_NOSYNC) { | if (st->state_flags & PFSTATE_NOSYNC) { | ||||
if (st->sync_state != PFSYNC_S_NONE) | if (st->sync_state != PFSYNC_S_NONE) | ||||
pfsync_q_del(st, true); | pfsync_q_del(st, true, b); | ||||
PFSYNC_UNLOCK(sc); | PFSYNC_BUCKET_UNLOCK(b); | ||||
return; | return; | ||||
} | } | ||||
if (sc->sc_len == PFSYNC_MINPKT) | if (b->b_len == PFSYNC_MINPKT) | ||||
callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif); | callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); | ||||
switch (st->sync_state) { | switch (st->sync_state) { | ||||
case PFSYNC_S_UPD_C: | case PFSYNC_S_UPD_C: | ||||
Context not available. | |||||
break; | break; | ||||
case PFSYNC_S_IACK: | case PFSYNC_S_IACK: | ||||
pfsync_q_del(st, false); | pfsync_q_del(st, false, b); | ||||
ref = false; | ref = false; | ||||
/* FALLTHROUGH */ | /* FALLTHROUGH */ | ||||
Context not available. | |||||
} | } | ||||
if (sync || (time_uptime - st->pfsync_time) < 2) | if (sync || (time_uptime - st->pfsync_time) < 2) | ||||
pfsync_push(sc); | pfsync_push(b); | ||||
PFSYNC_UNLOCK(sc); | PFSYNC_BUCKET_UNLOCK(b); | ||||
} | } | ||||
static void | static void | ||||
pfsync_request_update(u_int32_t creatorid, u_int64_t id) | pfsync_request_update(u_int32_t creatorid, u_int64_t id) | ||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | struct pfsync_softc *sc = V_pfsyncif; | ||||
struct pfsync_bucket *b = &sc->sc_buckets[0]; | |||||
struct pfsync_upd_req_item *item; | struct pfsync_upd_req_item *item; | ||||
size_t nlen = sizeof(struct pfsync_upd_req); | size_t nlen = sizeof(struct pfsync_upd_req); | ||||
PFSYNC_LOCK_ASSERT(sc); | PFSYNC_BUCKET_LOCK_ASSERT(b); | ||||
/* | /* | ||||
* This code does a bit to prevent multiple update requests for the | * This code does a bit to prevent multiple update requests for the | ||||
* same state being generated. It searches current subheader queue, | * same state being generated. It searches current subheader queue, | ||||
* but it doesn't lookup into queue of already packed datagrams. | * but it doesn't lookup into queue of already packed datagrams. | ||||
*/ | */ | ||||
TAILQ_FOREACH(item, &sc->sc_upd_req_list, ur_entry) | TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry) | ||||
if (item->ur_msg.id == id && | if (item->ur_msg.id == id && | ||||
item->ur_msg.creatorid == creatorid) | item->ur_msg.creatorid == creatorid) | ||||
return; | return; | ||||
Context not available. | |||||
item->ur_msg.id = id; | item->ur_msg.id = id; | ||||
item->ur_msg.creatorid = creatorid; | item->ur_msg.creatorid = creatorid; | ||||
if (TAILQ_EMPTY(&sc->sc_upd_req_list)) | if (TAILQ_EMPTY(&b->b_upd_req_list)) | ||||
nlen += sizeof(struct pfsync_subheader); | nlen += sizeof(struct pfsync_subheader); | ||||
if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) { | if (b->b_len + nlen > sc->sc_ifp->if_mtu) { | ||||
pfsync_sendout(1); | pfsync_sendout(1, 0); | ||||
nlen = sizeof(struct pfsync_subheader) + | nlen = sizeof(struct pfsync_subheader) + | ||||
sizeof(struct pfsync_upd_req); | sizeof(struct pfsync_upd_req); | ||||
} | } | ||||
TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry); | TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry); | ||||
sc->sc_len += nlen; | b->b_len += nlen; | ||||
} | } | ||||
static void | static bool | ||||
pfsync_update_state_req(struct pf_state *st) | pfsync_update_state_req(struct pf_state *st) | ||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | struct pfsync_softc *sc = V_pfsyncif; | ||||
bool ref = true; | bool ref = true, full = false; | ||||
struct pfsync_bucket *b = pfsync_get_bucket(sc, st); | |||||
PF_STATE_LOCK_ASSERT(st); | PF_STATE_LOCK_ASSERT(st); | ||||
PFSYNC_LOCK(sc); | PFSYNC_BUCKET_LOCK(b); | ||||
if (st->state_flags & PFSTATE_NOSYNC) { | if (st->state_flags & PFSTATE_NOSYNC) { | ||||
if (st->sync_state != PFSYNC_S_NONE) | if (st->sync_state != PFSYNC_S_NONE) | ||||
pfsync_q_del(st, true); | pfsync_q_del(st, true, b); | ||||
PFSYNC_UNLOCK(sc); | PFSYNC_BUCKET_UNLOCK(b); | ||||
return; | return (full); | ||||
} | } | ||||
switch (st->sync_state) { | switch (st->sync_state) { | ||||
case PFSYNC_S_UPD_C: | case PFSYNC_S_UPD_C: | ||||
case PFSYNC_S_IACK: | case PFSYNC_S_IACK: | ||||
pfsync_q_del(st, false); | pfsync_q_del(st, false, b); | ||||
ref = false; | ref = false; | ||||
/* FALLTHROUGH */ | /* FALLTHROUGH */ | ||||
case PFSYNC_S_NONE: | case PFSYNC_S_NONE: | ||||
pfsync_q_ins(st, PFSYNC_S_UPD, ref); | pfsync_q_ins(st, PFSYNC_S_UPD, ref); | ||||
pfsync_push(sc); | pfsync_push(b); | ||||
break; | break; | ||||
case PFSYNC_S_INS: | case PFSYNC_S_INS: | ||||
Context not available. | |||||
panic("%s: unexpected sync state %d", __func__, st->sync_state); | panic("%s: unexpected sync state %d", __func__, st->sync_state); | ||||
} | } | ||||
PFSYNC_UNLOCK(sc); | if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(struct pfsync_state)) | ||||
full = true; | |||||
PFSYNC_BUCKET_UNLOCK(b); | |||||
return (full); | |||||
} | } | ||||
static void | static void | ||||
pfsync_delete_state(struct pf_state *st) | pfsync_delete_state(struct pf_state *st) | ||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | struct pfsync_softc *sc = V_pfsyncif; | ||||
struct pfsync_bucket *b = pfsync_get_bucket(sc, st); | |||||
bool ref = true; | bool ref = true; | ||||
PFSYNC_LOCK(sc); | PFSYNC_BUCKET_LOCK(b); | ||||
if (st->state_flags & PFSTATE_ACK) | if (st->state_flags & PFSTATE_ACK) | ||||
pfsync_undefer_state(st, 1); | pfsync_undefer_state(st, 1); | ||||
if (st->state_flags & PFSTATE_NOSYNC) { | if (st->state_flags & PFSTATE_NOSYNC) { | ||||
if (st->sync_state != PFSYNC_S_NONE) | if (st->sync_state != PFSYNC_S_NONE) | ||||
pfsync_q_del(st, true); | pfsync_q_del(st, true, b); | ||||
PFSYNC_UNLOCK(sc); | PFSYNC_BUCKET_UNLOCK(b); | ||||
return; | return; | ||||
} | } | ||||
if (sc->sc_len == PFSYNC_MINPKT) | if (b->b_len == PFSYNC_MINPKT) | ||||
callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif); | callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); | ||||
switch (st->sync_state) { | switch (st->sync_state) { | ||||
case PFSYNC_S_INS: | case PFSYNC_S_INS: | ||||
/* We never got to tell the world so just forget about it. */ | /* We never got to tell the world so just forget about it. */ | ||||
pfsync_q_del(st, true); | pfsync_q_del(st, true, b); | ||||
break; | break; | ||||
case PFSYNC_S_UPD_C: | case PFSYNC_S_UPD_C: | ||||
case PFSYNC_S_UPD: | case PFSYNC_S_UPD: | ||||
case PFSYNC_S_IACK: | case PFSYNC_S_IACK: | ||||
pfsync_q_del(st, false); | pfsync_q_del(st, false, b); | ||||
ref = false; | ref = false; | ||||
/* FALLTHROUGH */ | /* FALLTHROUGH */ | ||||
Context not available. | |||||
panic("%s: unexpected sync state %d", __func__, st->sync_state); | panic("%s: unexpected sync state %d", __func__, st->sync_state); | ||||
} | } | ||||
PFSYNC_UNLOCK(sc); | PFSYNC_BUCKET_UNLOCK(b); | ||||
} | } | ||||
static void | static void | ||||
pfsync_clear_states(u_int32_t creatorid, const char *ifname) | pfsync_clear_states(u_int32_t creatorid, const char *ifname) | ||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | |||||
struct { | struct { | ||||
struct pfsync_subheader subh; | struct pfsync_subheader subh; | ||||
struct pfsync_clr clr; | struct pfsync_clr clr; | ||||
Context not available. | |||||
strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); | strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); | ||||
r.clr.creatorid = creatorid; | r.clr.creatorid = creatorid; | ||||
PFSYNC_LOCK(sc); | |||||
pfsync_send_plus(&r, sizeof(r)); | pfsync_send_plus(&r, sizeof(r)); | ||||
PFSYNC_UNLOCK(sc); | |||||
} | } | ||||
static void | static void | ||||
Context not available. | |||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | struct pfsync_softc *sc = V_pfsyncif; | ||||
size_t nlen = pfsync_qs[q].len; | size_t nlen = pfsync_qs[q].len; | ||||
struct pfsync_bucket *b = pfsync_get_bucket(sc, st); | |||||
PFSYNC_LOCK_ASSERT(sc); | PFSYNC_BUCKET_LOCK_ASSERT(b); | ||||
KASSERT(st->sync_state == PFSYNC_S_NONE, | KASSERT(st->sync_state == PFSYNC_S_NONE, | ||||
("%s: st->sync_state %u", __func__, st->sync_state)); | ("%s: st->sync_state %u", __func__, st->sync_state)); | ||||
KASSERT(sc->sc_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu", | KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu", | ||||
sc->sc_len)); | b->b_len)); | ||||
if (TAILQ_EMPTY(&sc->sc_qs[q])) | if (TAILQ_EMPTY(&b->b_qs[q])) | ||||
nlen += sizeof(struct pfsync_subheader); | nlen += sizeof(struct pfsync_subheader); | ||||
if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) { | if (b->b_len + nlen > sc->sc_ifp->if_mtu) { | ||||
pfsync_sendout(1); | pfsync_sendout(1, b->b_id); | ||||
nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; | nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; | ||||
} | } | ||||
sc->sc_len += nlen; | b->b_len += nlen; | ||||
TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list); | TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list); | ||||
st->sync_state = q; | st->sync_state = q; | ||||
if (ref) | if (ref) | ||||
pf_ref_state(st); | pf_ref_state(st); | ||||
} | } | ||||
static void | static void | ||||
pfsync_q_del(struct pf_state *st, bool unref) | pfsync_q_del(struct pf_state *st, bool unref, struct pfsync_bucket *b) | ||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | |||||
int q = st->sync_state; | int q = st->sync_state; | ||||
PFSYNC_LOCK_ASSERT(sc); | PFSYNC_BUCKET_LOCK_ASSERT(b); | ||||
KASSERT(st->sync_state != PFSYNC_S_NONE, | KASSERT(st->sync_state != PFSYNC_S_NONE, | ||||
("%s: st->sync_state != PFSYNC_S_NONE", __func__)); | ("%s: st->sync_state != PFSYNC_S_NONE", __func__)); | ||||
sc->sc_len -= pfsync_qs[q].len; | b->b_len -= pfsync_qs[q].len; | ||||
TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list); | TAILQ_REMOVE(&b->b_qs[q], st, sync_list); | ||||
st->sync_state = PFSYNC_S_NONE; | st->sync_state = PFSYNC_S_NONE; | ||||
if (unref) | if (unref) | ||||
pf_release_state(st); | pf_release_state(st); | ||||
if (TAILQ_EMPTY(&sc->sc_qs[q])) | if (TAILQ_EMPTY(&b->b_qs[q])) | ||||
sc->sc_len -= sizeof(struct pfsync_subheader); | b->b_len -= sizeof(struct pfsync_subheader); | ||||
} | } | ||||
static void | static void | ||||
Context not available. | |||||
} | } | ||||
for (; s; s = LIST_NEXT(s, entry)) { | for (; s; s = LIST_NEXT(s, entry)) { | ||||
if (sent > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) < | |||||
sizeof(struct pfsync_state)) { | |||||
/* We've filled a packet. */ | |||||
sc->sc_bulk_hashid = i; | |||||
sc->sc_bulk_stateid = s->id; | |||||
sc->sc_bulk_creatorid = s->creatorid; | |||||
PF_HASHROW_UNLOCK(ih); | |||||
callout_reset(&sc->sc_bulk_tmo, 1, | |||||
pfsync_bulk_update, sc); | |||||
goto full; | |||||
} | |||||
if (s->sync_state == PFSYNC_S_NONE && | if (s->sync_state == PFSYNC_S_NONE && | ||||
s->timeout < PFTM_MAX && | s->timeout < PFTM_MAX && | ||||
s->pfsync_time <= sc->sc_ureq_received) { | s->pfsync_time <= sc->sc_ureq_received) { | ||||
pfsync_update_state_req(s); | if (pfsync_update_state_req(s)) { | ||||
/* We've filled a packet. */ | |||||
sc->sc_bulk_hashid = i; | |||||
sc->sc_bulk_stateid = s->id; | |||||
sc->sc_bulk_creatorid = s->creatorid; | |||||
PF_HASHROW_UNLOCK(ih); | |||||
callout_reset(&sc->sc_bulk_tmo, 1, | |||||
pfsync_bulk_update, sc); | |||||
goto full; | |||||
} | |||||
sent++; | sent++; | ||||
} | } | ||||
} | } | ||||
Context not available. | |||||
/* We're done. */ | /* We're done. */ | ||||
pfsync_bulk_status(PFSYNC_BUS_END); | pfsync_bulk_status(PFSYNC_BUS_END); | ||||
full: | full: | ||||
CURVNET_RESTORE(); | CURVNET_RESTORE(); | ||||
} | } | ||||
Context not available. | |||||
r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); | r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); | ||||
r.bus.status = status; | r.bus.status = status; | ||||
PFSYNC_LOCK(sc); | |||||
pfsync_send_plus(&r, sizeof(r)); | pfsync_send_plus(&r, sizeof(r)); | ||||
PFSYNC_UNLOCK(sc); | |||||
} | } | ||||
static void | static void | ||||
pfsync_bulk_fail(void *arg) | pfsync_bulk_fail(void *arg) | ||||
{ | { | ||||
struct pfsync_softc *sc = arg; | struct pfsync_softc *sc = arg; | ||||
struct pfsync_bucket *b = &sc->sc_buckets[0]; | |||||
CURVNET_SET(sc->sc_ifp->if_vnet); | CURVNET_SET(sc->sc_ifp->if_vnet); | ||||
Context not available. | |||||
/* Try again */ | /* Try again */ | ||||
callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, | callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, | ||||
pfsync_bulk_fail, V_pfsyncif); | pfsync_bulk_fail, V_pfsyncif); | ||||
PFSYNC_LOCK(sc); | PFSYNC_BUCKET_LOCK(b); | ||||
pfsync_request_update(0, 0); | pfsync_request_update(0, 0); | ||||
PFSYNC_UNLOCK(sc); | PFSYNC_BUCKET_UNLOCK(b); | ||||
} else { | } else { | ||||
/* Pretend like the transfer was ok. */ | /* Pretend like the transfer was ok. */ | ||||
sc->sc_ureq_sent = 0; | sc->sc_ureq_sent = 0; | ||||
Context not available. | |||||
pfsync_send_plus(void *plus, size_t pluslen) | pfsync_send_plus(void *plus, size_t pluslen) | ||||
{ | { | ||||
struct pfsync_softc *sc = V_pfsyncif; | struct pfsync_softc *sc = V_pfsyncif; | ||||
struct pfsync_bucket *b = &sc->sc_buckets[0]; | |||||
PFSYNC_LOCK_ASSERT(sc); | PFSYNC_BUCKET_LOCK(b); | ||||
if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) | if (b->b_len + pluslen > sc->sc_ifp->if_mtu) | ||||
pfsync_sendout(1); | pfsync_sendout(1, b->b_id); | ||||
sc->sc_plus = plus; | b->b_plus = plus; | ||||
sc->sc_len += (sc->sc_pluslen = pluslen); | b->b_len += (b->b_pluslen = pluslen); | ||||
pfsync_sendout(1); | pfsync_sendout(1, b->b_id); | ||||
PFSYNC_BUCKET_UNLOCK(b); | |||||
} | } | ||||
static void | static void | ||||
pfsync_timeout(void *arg) | pfsync_timeout(void *arg) | ||||
{ | { | ||||
struct pfsync_softc *sc = arg; | struct pfsync_bucket *b = arg; | ||||
CURVNET_SET(sc->sc_ifp->if_vnet); | CURVNET_SET(b->b_sc->sc_ifp->if_vnet); | ||||
PFSYNC_LOCK(sc); | PFSYNC_BUCKET_LOCK(b); | ||||
pfsync_push(sc); | pfsync_push(b); | ||||
PFSYNC_UNLOCK(sc); | PFSYNC_BUCKET_UNLOCK(b); | ||||
CURVNET_RESTORE(); | CURVNET_RESTORE(); | ||||
} | } | ||||
static void | static void | ||||
pfsync_push(struct pfsync_softc *sc) | pfsync_push(struct pfsync_bucket *b) | ||||
{ | { | ||||
PFSYNC_LOCK_ASSERT(sc); | PFSYNC_BUCKET_LOCK_ASSERT(b); | ||||
sc->sc_flags |= PFSYNCF_PUSH; | b->b_flags |= PFSYNCF_BUCKET_PUSH; | ||||
swi_sched(V_pfsync_swi_cookie, 0); | swi_sched(V_pfsync_swi_cookie, 0); | ||||
} | } | ||||
static void | |||||
pfsync_push_all(struct pfsync_softc *sc) | |||||
{ | |||||
int c; | |||||
struct pfsync_bucket *b; | |||||
for (c = 0; c < pfsync_buckets; c++) { | |||||
b = &sc->sc_buckets[c]; | |||||
PFSYNC_BUCKET_LOCK(b); | |||||
pfsync_push(b); | |||||
PFSYNC_BUCKET_UNLOCK(b); | |||||
} | |||||
} | |||||
static void | static void | ||||
pfsyncintr(void *arg) | pfsyncintr(void *arg) | ||||
{ | { | ||||
struct pfsync_softc *sc = arg; | struct pfsync_softc *sc = arg; | ||||
struct pfsync_bucket *b; | |||||
struct mbuf *m, *n; | struct mbuf *m, *n; | ||||
int c; | |||||
CURVNET_SET(sc->sc_ifp->if_vnet); | CURVNET_SET(sc->sc_ifp->if_vnet); | ||||
PFSYNC_LOCK(sc); | for (c = 0; c < pfsync_buckets; c++) { | ||||
if ((sc->sc_flags & PFSYNCF_PUSH) && sc->sc_len > PFSYNC_MINPKT) { | b = &sc->sc_buckets[c]; | ||||
pfsync_sendout(0); | |||||
sc->sc_flags &= ~PFSYNCF_PUSH; | |||||
} | |||||
_IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m); | |||||
PFSYNC_UNLOCK(sc); | |||||
for (; m != NULL; m = n) { | PFSYNC_BUCKET_LOCK(b); | ||||
if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) { | |||||
pfsync_sendout(0, b->b_id); | |||||
b->b_flags &= ~PFSYNCF_BUCKET_PUSH; | |||||
} | |||||
_IF_DEQUEUE_ALL(&b->b_snd, m); | |||||
PFSYNC_BUCKET_UNLOCK(b); | |||||
n = m->m_nextpkt; | for (; m != NULL; m = n) { | ||||
m->m_nextpkt = NULL; | |||||
/* | n = m->m_nextpkt; | ||||
* We distinguish between a deferral packet and our | m->m_nextpkt = NULL; | ||||
* own pfsync packet based on M_SKIP_FIREWALL | |||||
* flag. This is XXX. | /* | ||||
*/ | * We distinguish between a deferral packet and our | ||||
if (m->m_flags & M_SKIP_FIREWALL) | * own pfsync packet based on M_SKIP_FIREWALL | ||||
ip_output(m, NULL, NULL, 0, NULL, NULL); | * flag. This is XXX. | ||||
else if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, | */ | ||||
NULL) == 0) | if (m->m_flags & M_SKIP_FIREWALL) | ||||
V_pfsyncstats.pfsyncs_opackets++; | ip_output(m, NULL, NULL, 0, NULL, NULL); | ||||
else | else if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, | ||||
V_pfsyncstats.pfsyncs_oerrors++; | NULL) == 0) | ||||
V_pfsyncstats.pfsyncs_opackets++; | |||||
else | |||||
V_pfsyncstats.pfsyncs_oerrors++; | |||||
} | |||||
} | } | ||||
CURVNET_RESTORE(); | CURVNET_RESTORE(); | ||||
} | } | ||||
Context not available. |
Is this respecting at all the ifqmaxlen that is set when the interface is created?