diff --git a/lib/libpfctl/libpfctl.c b/lib/libpfctl/libpfctl.c index f34f83cfb57c..c75f9ab12889 100644 --- a/lib/libpfctl/libpfctl.c +++ b/lib/libpfctl/libpfctl.c @@ -1,1506 +1,1507 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libpfctl.h" const char* PFCTL_SYNCOOKIES_MODE_NAMES[] = { "never", "always", "adaptive" }; static int _pfctl_clear_states(int , const struct pfctl_kill *, unsigned int *, uint64_t); static int pfctl_do_ioctl(int dev, uint cmd, size_t size, nvlist_t **nvl) { struct pfioc_nv nv; void *data; size_t nvlen; int ret; data = nvlist_pack(*nvl, &nvlen); retry: nv.data = malloc(size); memcpy(nv.data, data, nvlen); free(data); nv.len = nvlen; nv.size = size; ret = ioctl(dev, cmd, &nv); if (ret == -1 && errno == ENOSPC) { size *= 2; free(nv.data); goto retry; } nvlist_destroy(*nvl); *nvl = NULL; if (ret == 0) { *nvl = nvlist_unpack(nv.data, nv.len, 0); if (*nvl == NULL) { free(nv.data); return (EIO); } } else { ret = errno; } free(nv.data); return (ret); } static void pf_nvuint_8_array(const nvlist_t *nvl, const char *name, size_t maxelems, uint8_t *numbers, size_t *nelems) { const uint64_t *tmp; size_t elems; tmp = nvlist_get_number_array(nvl, name, &elems); assert(elems <= maxelems); for (size_t i = 0; i < elems; i++) numbers[i] = tmp[i]; if (nelems) *nelems = elems; } static void pf_nvuint_16_array(const nvlist_t *nvl, const char *name, size_t maxelems, uint16_t *numbers, size_t *nelems) { const uint64_t *tmp; size_t elems; tmp = nvlist_get_number_array(nvl, name, &elems); assert(elems <= maxelems); for (size_t i = 0; i < elems; i++) numbers[i] = tmp[i]; if (nelems) *nelems = elems; } static void pf_nvuint_32_array(const nvlist_t *nvl, const char *name, size_t maxelems, uint32_t *numbers, size_t *nelems) { const uint64_t *tmp; size_t elems; tmp = nvlist_get_number_array(nvl, name, &elems); assert(elems <= maxelems); for (size_t i = 0; i < elems; i++) numbers[i] = tmp[i]; if (nelems) *nelems = elems; } static void pf_nvuint_64_array(const nvlist_t *nvl, const char *name, size_t maxelems, uint64_t *numbers, size_t *nelems) { const uint64_t *tmp; size_t elems; tmp = nvlist_get_number_array(nvl, name, &elems); assert(elems <= maxelems); for (size_t i = 0; i < elems; i++) numbers[i] = tmp[i]; if (nelems) *nelems = elems; } static void _pfctl_get_status_counters(const nvlist_t *nvl, struct pfctl_status_counters *counters) { const uint64_t *ids, *counts; const char *const *names; size_t id_len, counter_len, names_len; ids = nvlist_get_number_array(nvl, "ids", &id_len); counts = nvlist_get_number_array(nvl, "counters", &counter_len); names = nvlist_get_string_array(nvl, "names", &names_len); assert(id_len == counter_len); assert(counter_len == names_len); TAILQ_INIT(counters); for (size_t i = 0; i < id_len; i++) { struct pfctl_status_counter *c; c = malloc(sizeof(*c)); c->id = ids[i]; c->counter = counts[i]; c->name = strdup(names[i]); TAILQ_INSERT_TAIL(counters, c, entry); } } struct pfctl_status * pfctl_get_status(int dev) { struct pfctl_status *status; nvlist_t *nvl; size_t len; const void *chksum; status = calloc(1, sizeof(*status)); if (status == NULL) return (NULL); nvl = nvlist_create(0); if (pfctl_do_ioctl(dev, DIOCGETSTATUSNV, 4096, &nvl)) { free(status); return (NULL); } status->running = nvlist_get_bool(nvl, "running"); status->since = nvlist_get_number(nvl, "since"); status->debug = nvlist_get_number(nvl, "debug"); status->hostid = ntohl(nvlist_get_number(nvl, "hostid")); status->states = nvlist_get_number(nvl, "states"); status->src_nodes = nvlist_get_number(nvl, "src_nodes"); status->syncookies_active = nvlist_get_bool(nvl, "syncookies_active"); + status->reass = nvlist_get_number(nvl, "reass"); strlcpy(status->ifname, nvlist_get_string(nvl, "ifname"), IFNAMSIZ); chksum = nvlist_get_binary(nvl, "chksum", &len); assert(len == PF_MD5_DIGEST_LENGTH); memcpy(status->pf_chksum, chksum, len); _pfctl_get_status_counters(nvlist_get_nvlist(nvl, "counters"), &status->counters); _pfctl_get_status_counters(nvlist_get_nvlist(nvl, "lcounters"), &status->lcounters); _pfctl_get_status_counters(nvlist_get_nvlist(nvl, "fcounters"), &status->fcounters); _pfctl_get_status_counters(nvlist_get_nvlist(nvl, "scounters"), &status->scounters); pf_nvuint_64_array(nvl, "pcounters", 2 * 2 * 3, (uint64_t *)status->pcounters, NULL); pf_nvuint_64_array(nvl, "bcounters", 2 * 2, (uint64_t *)status->bcounters, NULL); nvlist_destroy(nvl); return (status); } void pfctl_free_status(struct pfctl_status *status) { struct pfctl_status_counter *c, *tmp; TAILQ_FOREACH_SAFE(c, &status->counters, entry, tmp) { free(c->name); free(c); } TAILQ_FOREACH_SAFE(c, &status->lcounters, entry, tmp) { free(c->name); free(c); } TAILQ_FOREACH_SAFE(c, &status->fcounters, entry, tmp) { free(c->name); free(c); } TAILQ_FOREACH_SAFE(c, &status->scounters, entry, tmp) { free(c->name); free(c); } free(status); } static void pfctl_nv_add_addr(nvlist_t *nvparent, const char *name, const struct pf_addr *addr) { nvlist_t *nvl = nvlist_create(0); nvlist_add_binary(nvl, "addr", addr, sizeof(*addr)); nvlist_add_nvlist(nvparent, name, nvl); nvlist_destroy(nvl); } static void pf_nvaddr_to_addr(const nvlist_t *nvl, struct pf_addr *addr) { size_t len; const void *data; data = nvlist_get_binary(nvl, "addr", &len); assert(len == sizeof(struct pf_addr)); memcpy(addr, data, len); } static void pfctl_nv_add_addr_wrap(nvlist_t *nvparent, const char *name, const struct pf_addr_wrap *addr) { nvlist_t *nvl = nvlist_create(0); nvlist_add_number(nvl, "type", addr->type); nvlist_add_number(nvl, "iflags", addr->iflags); if (addr->type == PF_ADDR_DYNIFTL) nvlist_add_string(nvl, "ifname", addr->v.ifname); if (addr->type == PF_ADDR_TABLE) nvlist_add_string(nvl, "tblname", addr->v.tblname); pfctl_nv_add_addr(nvl, "addr", &addr->v.a.addr); pfctl_nv_add_addr(nvl, "mask", &addr->v.a.mask); nvlist_add_nvlist(nvparent, name, nvl); nvlist_destroy(nvl); } static void pf_nvaddr_wrap_to_addr_wrap(const nvlist_t *nvl, struct pf_addr_wrap *addr) { bzero(addr, sizeof(*addr)); addr->type = nvlist_get_number(nvl, "type"); addr->iflags = nvlist_get_number(nvl, "iflags"); if (addr->type == PF_ADDR_DYNIFTL) { strlcpy(addr->v.ifname, nvlist_get_string(nvl, "ifname"), IFNAMSIZ); addr->p.dyncnt = nvlist_get_number(nvl, "dyncnt"); } if (addr->type == PF_ADDR_TABLE) { strlcpy(addr->v.tblname, nvlist_get_string(nvl, "tblname"), PF_TABLE_NAME_SIZE); addr->p.tblcnt = nvlist_get_number(nvl, "tblcnt"); } pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "addr"), &addr->v.a.addr); pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "mask"), &addr->v.a.mask); } static void pfctl_nv_add_rule_addr(nvlist_t *nvparent, const char *name, const struct pf_rule_addr *addr) { uint64_t ports[2]; nvlist_t *nvl = nvlist_create(0); pfctl_nv_add_addr_wrap(nvl, "addr", &addr->addr); ports[0] = addr->port[0]; ports[1] = addr->port[1]; nvlist_add_number_array(nvl, "port", ports, 2); nvlist_add_number(nvl, "neg", addr->neg); nvlist_add_number(nvl, "port_op", addr->port_op); nvlist_add_nvlist(nvparent, name, nvl); nvlist_destroy(nvl); } static void pf_nvrule_addr_to_rule_addr(const nvlist_t *nvl, struct pf_rule_addr *addr) { pf_nvaddr_wrap_to_addr_wrap(nvlist_get_nvlist(nvl, "addr"), &addr->addr); pf_nvuint_16_array(nvl, "port", 2, addr->port, NULL); addr->neg = nvlist_get_number(nvl, "neg"); addr->port_op = nvlist_get_number(nvl, "port_op"); } static void pfctl_nv_add_mape(nvlist_t *nvparent, const char *name, const struct pf_mape_portset *mape) { nvlist_t *nvl = nvlist_create(0); nvlist_add_number(nvl, "offset", mape->offset); nvlist_add_number(nvl, "psidlen", mape->psidlen); nvlist_add_number(nvl, "psid", mape->psid); nvlist_add_nvlist(nvparent, name, nvl); nvlist_destroy(nvl); } static void pfctl_nv_add_pool(nvlist_t *nvparent, const char *name, const struct pfctl_pool *pool) { uint64_t ports[2]; nvlist_t *nvl = nvlist_create(0); nvlist_add_binary(nvl, "key", &pool->key, sizeof(pool->key)); pfctl_nv_add_addr(nvl, "counter", &pool->counter); nvlist_add_number(nvl, "tblidx", pool->tblidx); ports[0] = pool->proxy_port[0]; ports[1] = pool->proxy_port[1]; nvlist_add_number_array(nvl, "proxy_port", ports, 2); nvlist_add_number(nvl, "opts", pool->opts); pfctl_nv_add_mape(nvl, "mape", &pool->mape); nvlist_add_nvlist(nvparent, name, nvl); nvlist_destroy(nvl); } static void pf_nvmape_to_mape(const nvlist_t *nvl, struct pf_mape_portset *mape) { mape->offset = nvlist_get_number(nvl, "offset"); mape->psidlen = nvlist_get_number(nvl, "psidlen"); mape->psid = nvlist_get_number(nvl, "psid"); } static void pf_nvpool_to_pool(const nvlist_t *nvl, struct pfctl_pool *pool) { size_t len; const void *data; data = nvlist_get_binary(nvl, "key", &len); assert(len == sizeof(pool->key)); memcpy(&pool->key, data, len); pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "counter"), &pool->counter); pool->tblidx = nvlist_get_number(nvl, "tblidx"); pf_nvuint_16_array(nvl, "proxy_port", 2, pool->proxy_port, NULL); pool->opts = nvlist_get_number(nvl, "opts"); if (nvlist_exists_nvlist(nvl, "mape")) pf_nvmape_to_mape(nvlist_get_nvlist(nvl, "mape"), &pool->mape); } static void pfctl_nv_add_uid(nvlist_t *nvparent, const char *name, const struct pf_rule_uid *uid) { uint64_t uids[2]; nvlist_t *nvl = nvlist_create(0); uids[0] = uid->uid[0]; uids[1] = uid->uid[1]; nvlist_add_number_array(nvl, "uid", uids, 2); nvlist_add_number(nvl, "op", uid->op); nvlist_add_nvlist(nvparent, name, nvl); nvlist_destroy(nvl); } static void pf_nvrule_uid_to_rule_uid(const nvlist_t *nvl, struct pf_rule_uid *uid) { pf_nvuint_32_array(nvl, "uid", 2, uid->uid, NULL); uid->op = nvlist_get_number(nvl, "op"); } static void pfctl_nv_add_divert(nvlist_t *nvparent, const char *name, const struct pfctl_rule *r) { nvlist_t *nvl = nvlist_create(0); pfctl_nv_add_addr(nvl, "addr", &r->divert.addr); nvlist_add_number(nvl, "port", r->divert.port); nvlist_add_nvlist(nvparent, name, nvl); nvlist_destroy(nvl); } static void pf_nvdivert_to_divert(const nvlist_t *nvl, struct pfctl_rule *rule) { pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "addr"), &rule->divert.addr); rule->divert.port = nvlist_get_number(nvl, "port"); } static void pf_nvrule_to_rule(const nvlist_t *nvl, struct pfctl_rule *rule) { const uint64_t *skip; const char *const *labels; size_t skipcount, labelcount; rule->nr = nvlist_get_number(nvl, "nr"); pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"), &rule->src); pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"), &rule->dst); skip = nvlist_get_number_array(nvl, "skip", &skipcount); assert(skip); assert(skipcount == PF_SKIP_COUNT); for (int i = 0; i < PF_SKIP_COUNT; i++) rule->skip[i].nr = skip[i]; labels = nvlist_get_string_array(nvl, "labels", &labelcount); assert(labelcount <= PF_RULE_MAX_LABEL_COUNT); for (size_t i = 0; i < labelcount; i++) strlcpy(rule->label[i], labels[i], PF_RULE_LABEL_SIZE); rule->ridentifier = nvlist_get_number(nvl, "ridentifier"); strlcpy(rule->ifname, nvlist_get_string(nvl, "ifname"), IFNAMSIZ); strlcpy(rule->qname, nvlist_get_string(nvl, "qname"), PF_QNAME_SIZE); strlcpy(rule->pqname, nvlist_get_string(nvl, "pqname"), PF_QNAME_SIZE); strlcpy(rule->tagname, nvlist_get_string(nvl, "tagname"), PF_TAG_NAME_SIZE); strlcpy(rule->match_tagname, nvlist_get_string(nvl, "match_tagname"), PF_TAG_NAME_SIZE); strlcpy(rule->overload_tblname, nvlist_get_string(nvl, "overload_tblname"), PF_TABLE_NAME_SIZE); pf_nvpool_to_pool(nvlist_get_nvlist(nvl, "rpool"), &rule->rpool); rule->evaluations = nvlist_get_number(nvl, "evaluations"); pf_nvuint_64_array(nvl, "packets", 2, rule->packets, NULL); pf_nvuint_64_array(nvl, "bytes", 2, rule->bytes, NULL); if (nvlist_exists_number(nvl, "timestamp")) { rule->last_active_timestamp = nvlist_get_number(nvl, "timestamp"); } rule->os_fingerprint = nvlist_get_number(nvl, "os_fingerprint"); rule->rtableid = nvlist_get_number(nvl, "rtableid"); pf_nvuint_32_array(nvl, "timeout", PFTM_MAX, rule->timeout, NULL); rule->max_states = nvlist_get_number(nvl, "max_states"); rule->max_src_nodes = nvlist_get_number(nvl, "max_src_nodes"); rule->max_src_states = nvlist_get_number(nvl, "max_src_states"); rule->max_src_conn = nvlist_get_number(nvl, "max_src_conn"); rule->max_src_conn_rate.limit = nvlist_get_number(nvl, "max_src_conn_rate.limit"); rule->max_src_conn_rate.seconds = nvlist_get_number(nvl, "max_src_conn_rate.seconds"); rule->qid = nvlist_get_number(nvl, "qid"); rule->pqid = nvlist_get_number(nvl, "pqid"); rule->dnpipe = nvlist_get_number(nvl, "dnpipe"); rule->dnrpipe = nvlist_get_number(nvl, "dnrpipe"); rule->free_flags = nvlist_get_number(nvl, "dnflags"); rule->prob = nvlist_get_number(nvl, "prob"); rule->cuid = nvlist_get_number(nvl, "cuid"); rule->cpid = nvlist_get_number(nvl, "cpid"); rule->return_icmp = nvlist_get_number(nvl, "return_icmp"); rule->return_icmp6 = nvlist_get_number(nvl, "return_icmp6"); rule->max_mss = nvlist_get_number(nvl, "max_mss"); rule->scrub_flags = nvlist_get_number(nvl, "scrub_flags"); pf_nvrule_uid_to_rule_uid(nvlist_get_nvlist(nvl, "uid"), &rule->uid); pf_nvrule_uid_to_rule_uid(nvlist_get_nvlist(nvl, "gid"), (struct pf_rule_uid *)&rule->gid); rule->rule_flag = nvlist_get_number(nvl, "rule_flag"); rule->action = nvlist_get_number(nvl, "action"); rule->direction = nvlist_get_number(nvl, "direction"); rule->log = nvlist_get_number(nvl, "log"); rule->logif = nvlist_get_number(nvl, "logif"); rule->quick = nvlist_get_number(nvl, "quick"); rule->ifnot = nvlist_get_number(nvl, "ifnot"); rule->match_tag_not = nvlist_get_number(nvl, "match_tag_not"); rule->natpass = nvlist_get_number(nvl, "natpass"); rule->keep_state = nvlist_get_number(nvl, "keep_state"); rule->af = nvlist_get_number(nvl, "af"); rule->proto = nvlist_get_number(nvl, "proto"); rule->type = nvlist_get_number(nvl, "type"); rule->code = nvlist_get_number(nvl, "code"); rule->flags = nvlist_get_number(nvl, "flags"); rule->flagset = nvlist_get_number(nvl, "flagset"); rule->min_ttl = nvlist_get_number(nvl, "min_ttl"); rule->allow_opts = nvlist_get_number(nvl, "allow_opts"); rule->rt = nvlist_get_number(nvl, "rt"); rule->return_ttl = nvlist_get_number(nvl, "return_ttl"); rule->tos = nvlist_get_number(nvl, "tos"); rule->set_tos = nvlist_get_number(nvl, "set_tos"); rule->anchor_relative = nvlist_get_number(nvl, "anchor_relative"); rule->anchor_wildcard = nvlist_get_number(nvl, "anchor_wildcard"); rule->flush = nvlist_get_number(nvl, "flush"); rule->prio = nvlist_get_number(nvl, "prio"); pf_nvuint_8_array(nvl, "set_prio", 2, rule->set_prio, NULL); pf_nvdivert_to_divert(nvlist_get_nvlist(nvl, "divert"), rule); rule->states_cur = nvlist_get_number(nvl, "states_cur"); rule->states_tot = nvlist_get_number(nvl, "states_tot"); rule->src_nodes = nvlist_get_number(nvl, "src_nodes"); } static void pfctl_nveth_addr_to_eth_addr(const nvlist_t *nvl, struct pfctl_eth_addr *addr) { static const u_int8_t EMPTY_MAC[ETHER_ADDR_LEN] = { 0 }; size_t len; const void *data; data = nvlist_get_binary(nvl, "addr", &len); assert(len == sizeof(addr->addr)); memcpy(addr->addr, data, sizeof(addr->addr)); data = nvlist_get_binary(nvl, "mask", &len); assert(len == sizeof(addr->mask)); memcpy(addr->mask, data, sizeof(addr->mask)); addr->neg = nvlist_get_bool(nvl, "neg"); /* To make checks for 'is this address set?' easier. */ addr->isset = memcmp(addr->addr, EMPTY_MAC, ETHER_ADDR_LEN) != 0; } static nvlist_t * pfctl_eth_addr_to_nveth_addr(const struct pfctl_eth_addr *addr) { nvlist_t *nvl; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); nvlist_add_bool(nvl, "neg", addr->neg); nvlist_add_binary(nvl, "addr", &addr->addr, ETHER_ADDR_LEN); nvlist_add_binary(nvl, "mask", &addr->mask, ETHER_ADDR_LEN); return (nvl); } static void pfctl_nveth_rule_to_eth_rule(const nvlist_t *nvl, struct pfctl_eth_rule *rule) { rule->nr = nvlist_get_number(nvl, "nr"); rule->quick = nvlist_get_bool(nvl, "quick"); strlcpy(rule->ifname, nvlist_get_string(nvl, "ifname"), IFNAMSIZ); rule->ifnot = nvlist_get_bool(nvl, "ifnot"); rule->direction = nvlist_get_number(nvl, "direction"); rule->proto = nvlist_get_number(nvl, "proto"); strlcpy(rule->match_tagname, nvlist_get_string(nvl, "match_tagname"), PF_TAG_NAME_SIZE); rule->match_tag = nvlist_get_number(nvl, "match_tag"); rule->match_tag_not = nvlist_get_bool(nvl, "match_tag_not"); pfctl_nveth_addr_to_eth_addr(nvlist_get_nvlist(nvl, "src"), &rule->src); pfctl_nveth_addr_to_eth_addr(nvlist_get_nvlist(nvl, "dst"), &rule->dst); pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "ipsrc"), &rule->ipsrc); pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "ipdst"), &rule->ipdst); rule->evaluations = nvlist_get_number(nvl, "evaluations"); rule->packets[0] = nvlist_get_number(nvl, "packets-in"); rule->packets[1] = nvlist_get_number(nvl, "packets-out"); rule->bytes[0] = nvlist_get_number(nvl, "bytes-in"); rule->bytes[1] = nvlist_get_number(nvl, "bytes-out"); if (nvlist_exists_number(nvl, "timestamp")) { rule->last_active_timestamp = nvlist_get_number(nvl, "timestamp"); } strlcpy(rule->qname, nvlist_get_string(nvl, "qname"), PF_QNAME_SIZE); strlcpy(rule->tagname, nvlist_get_string(nvl, "tagname"), PF_TAG_NAME_SIZE); rule->dnpipe = nvlist_get_number(nvl, "dnpipe"); rule->dnflags = nvlist_get_number(nvl, "dnflags"); rule->anchor_relative = nvlist_get_number(nvl, "anchor_relative"); rule->anchor_wildcard = nvlist_get_number(nvl, "anchor_wildcard"); strlcpy(rule->bridge_to, nvlist_get_string(nvl, "bridge_to"), IFNAMSIZ); rule->action = nvlist_get_number(nvl, "action"); } int pfctl_get_eth_rulesets_info(int dev, struct pfctl_eth_rulesets_info *ri, const char *path) { nvlist_t *nvl; int ret; bzero(ri, sizeof(*ri)); nvl = nvlist_create(0); nvlist_add_string(nvl, "path", path); if ((ret = pfctl_do_ioctl(dev, DIOCGETETHRULESETS, 256, &nvl)) != 0) return (ret); ri->nr = nvlist_get_number(nvl, "nr"); nvlist_destroy(nvl); return (0); } int pfctl_get_eth_ruleset(int dev, const char *path, int nr, struct pfctl_eth_ruleset_info *ri) { nvlist_t *nvl; int ret; bzero(ri, sizeof(*ri)); nvl = nvlist_create(0); nvlist_add_string(nvl, "path", path); nvlist_add_number(nvl, "nr", nr); if ((ret = pfctl_do_ioctl(dev, DIOCGETETHRULESET, 1024, &nvl)) != 0) return (ret); ri->nr = nvlist_get_number(nvl, "nr"); strlcpy(ri->path, nvlist_get_string(nvl, "path"), MAXPATHLEN); strlcpy(ri->name, nvlist_get_string(nvl, "name"), PF_ANCHOR_NAME_SIZE); return (0); } int pfctl_get_eth_rules_info(int dev, struct pfctl_eth_rules_info *rules, const char *path) { nvlist_t *nvl; int ret; bzero(rules, sizeof(*rules)); nvl = nvlist_create(0); nvlist_add_string(nvl, "anchor", path); if ((ret = pfctl_do_ioctl(dev, DIOCGETETHRULES, 1024, &nvl)) != 0) return (ret); rules->nr = nvlist_get_number(nvl, "nr"); rules->ticket = nvlist_get_number(nvl, "ticket"); nvlist_destroy(nvl); return (0); } int pfctl_get_eth_rule(int dev, uint32_t nr, uint32_t ticket, const char *path, struct pfctl_eth_rule *rule, bool clear, char *anchor_call) { nvlist_t *nvl; int ret; nvl = nvlist_create(0); nvlist_add_string(nvl, "anchor", path); nvlist_add_number(nvl, "ticket", ticket); nvlist_add_number(nvl, "nr", nr); nvlist_add_bool(nvl, "clear", clear); if ((ret = pfctl_do_ioctl(dev, DIOCGETETHRULE, 4096, &nvl)) != 0) return (ret); pfctl_nveth_rule_to_eth_rule(nvl, rule); if (anchor_call) strlcpy(anchor_call, nvlist_get_string(nvl, "anchor_call"), MAXPATHLEN); nvlist_destroy(nvl); return (0); } int pfctl_add_eth_rule(int dev, const struct pfctl_eth_rule *r, const char *anchor, const char *anchor_call, uint32_t ticket) { struct pfioc_nv nv; nvlist_t *nvl, *addr; void *packed; int error = 0; size_t size; nvl = nvlist_create(0); nvlist_add_number(nvl, "ticket", ticket); nvlist_add_string(nvl, "anchor", anchor); nvlist_add_string(nvl, "anchor_call", anchor_call); nvlist_add_number(nvl, "nr", r->nr); nvlist_add_bool(nvl, "quick", r->quick); nvlist_add_string(nvl, "ifname", r->ifname); nvlist_add_bool(nvl, "ifnot", r->ifnot); nvlist_add_number(nvl, "direction", r->direction); nvlist_add_number(nvl, "proto", r->proto); nvlist_add_string(nvl, "match_tagname", r->match_tagname); nvlist_add_bool(nvl, "match_tag_not", r->match_tag_not); addr = pfctl_eth_addr_to_nveth_addr(&r->src); if (addr == NULL) { nvlist_destroy(nvl); return (ENOMEM); } nvlist_add_nvlist(nvl, "src", addr); nvlist_destroy(addr); addr = pfctl_eth_addr_to_nveth_addr(&r->dst); if (addr == NULL) { nvlist_destroy(nvl); return (ENOMEM); } nvlist_add_nvlist(nvl, "dst", addr); nvlist_destroy(addr); pfctl_nv_add_rule_addr(nvl, "ipsrc", &r->ipsrc); pfctl_nv_add_rule_addr(nvl, "ipdst", &r->ipdst); nvlist_add_string(nvl, "qname", r->qname); nvlist_add_string(nvl, "tagname", r->tagname); nvlist_add_number(nvl, "dnpipe", r->dnpipe); nvlist_add_number(nvl, "dnflags", r->dnflags); nvlist_add_string(nvl, "bridge_to", r->bridge_to); nvlist_add_number(nvl, "action", r->action); packed = nvlist_pack(nvl, &size); if (packed == NULL) { nvlist_destroy(nvl); return (ENOMEM); } nv.len = size; nv.size = size; nv.data = packed; if (ioctl(dev, DIOCADDETHRULE, &nv) != 0) error = errno; free(packed); nvlist_destroy(nvl); return (error); } int pfctl_add_rule(int dev, const struct pfctl_rule *r, const char *anchor, const char *anchor_call, uint32_t ticket, uint32_t pool_ticket) { struct pfioc_nv nv; uint64_t timeouts[PFTM_MAX]; uint64_t set_prio[2]; nvlist_t *nvl, *nvlr; size_t labelcount; int ret; nvl = nvlist_create(0); nvlr = nvlist_create(0); nvlist_add_number(nvl, "ticket", ticket); nvlist_add_number(nvl, "pool_ticket", pool_ticket); nvlist_add_string(nvl, "anchor", anchor); nvlist_add_string(nvl, "anchor_call", anchor_call); nvlist_add_number(nvlr, "nr", r->nr); pfctl_nv_add_rule_addr(nvlr, "src", &r->src); pfctl_nv_add_rule_addr(nvlr, "dst", &r->dst); labelcount = 0; while (r->label[labelcount][0] != 0 && labelcount < PF_RULE_MAX_LABEL_COUNT) { nvlist_append_string_array(nvlr, "labels", r->label[labelcount]); labelcount++; } nvlist_add_number(nvlr, "ridentifier", r->ridentifier); nvlist_add_string(nvlr, "ifname", r->ifname); nvlist_add_string(nvlr, "qname", r->qname); nvlist_add_string(nvlr, "pqname", r->pqname); nvlist_add_string(nvlr, "tagname", r->tagname); nvlist_add_string(nvlr, "match_tagname", r->match_tagname); nvlist_add_string(nvlr, "overload_tblname", r->overload_tblname); pfctl_nv_add_pool(nvlr, "rpool", &r->rpool); nvlist_add_number(nvlr, "os_fingerprint", r->os_fingerprint); nvlist_add_number(nvlr, "rtableid", r->rtableid); for (int i = 0; i < PFTM_MAX; i++) timeouts[i] = r->timeout[i]; nvlist_add_number_array(nvlr, "timeout", timeouts, PFTM_MAX); nvlist_add_number(nvlr, "max_states", r->max_states); nvlist_add_number(nvlr, "max_src_nodes", r->max_src_nodes); nvlist_add_number(nvlr, "max_src_states", r->max_src_states); nvlist_add_number(nvlr, "max_src_conn", r->max_src_conn); nvlist_add_number(nvlr, "max_src_conn_rate.limit", r->max_src_conn_rate.limit); nvlist_add_number(nvlr, "max_src_conn_rate.seconds", r->max_src_conn_rate.seconds); nvlist_add_number(nvlr, "dnpipe", r->dnpipe); nvlist_add_number(nvlr, "dnrpipe", r->dnrpipe); nvlist_add_number(nvlr, "dnflags", r->free_flags); nvlist_add_number(nvlr, "prob", r->prob); nvlist_add_number(nvlr, "cuid", r->cuid); nvlist_add_number(nvlr, "cpid", r->cpid); nvlist_add_number(nvlr, "return_icmp", r->return_icmp); nvlist_add_number(nvlr, "return_icmp6", r->return_icmp6); nvlist_add_number(nvlr, "max_mss", r->max_mss); nvlist_add_number(nvlr, "scrub_flags", r->scrub_flags); pfctl_nv_add_uid(nvlr, "uid", &r->uid); pfctl_nv_add_uid(nvlr, "gid", (const struct pf_rule_uid *)&r->gid); nvlist_add_number(nvlr, "rule_flag", r->rule_flag); nvlist_add_number(nvlr, "action", r->action); nvlist_add_number(nvlr, "direction", r->direction); nvlist_add_number(nvlr, "log", r->log); nvlist_add_number(nvlr, "logif", r->logif); nvlist_add_number(nvlr, "quick", r->quick); nvlist_add_number(nvlr, "ifnot", r->ifnot); nvlist_add_number(nvlr, "match_tag_not", r->match_tag_not); nvlist_add_number(nvlr, "natpass", r->natpass); nvlist_add_number(nvlr, "keep_state", r->keep_state); nvlist_add_number(nvlr, "af", r->af); nvlist_add_number(nvlr, "proto", r->proto); nvlist_add_number(nvlr, "type", r->type); nvlist_add_number(nvlr, "code", r->code); nvlist_add_number(nvlr, "flags", r->flags); nvlist_add_number(nvlr, "flagset", r->flagset); nvlist_add_number(nvlr, "min_ttl", r->min_ttl); nvlist_add_number(nvlr, "allow_opts", r->allow_opts); nvlist_add_number(nvlr, "rt", r->rt); nvlist_add_number(nvlr, "return_ttl", r->return_ttl); nvlist_add_number(nvlr, "tos", r->tos); nvlist_add_number(nvlr, "set_tos", r->set_tos); nvlist_add_number(nvlr, "anchor_relative", r->anchor_relative); nvlist_add_number(nvlr, "anchor_wildcard", r->anchor_wildcard); nvlist_add_number(nvlr, "flush", r->flush); nvlist_add_number(nvlr, "prio", r->prio); set_prio[0] = r->set_prio[0]; set_prio[1] = r->set_prio[1]; nvlist_add_number_array(nvlr, "set_prio", set_prio, 2); pfctl_nv_add_divert(nvlr, "divert", r); nvlist_add_nvlist(nvl, "rule", nvlr); nvlist_destroy(nvlr); /* Now do the call. */ nv.data = nvlist_pack(nvl, &nv.len); nv.size = nv.len; ret = ioctl(dev, DIOCADDRULENV, &nv); if (ret == -1) ret = errno; free(nv.data); nvlist_destroy(nvl); return (ret); } int pfctl_get_rules_info(int dev, struct pfctl_rules_info *rules, uint32_t ruleset, const char *path) { struct pfioc_rule pr; int ret; bzero(&pr, sizeof(pr)); if (strlcpy(pr.anchor, path, sizeof(pr.anchor)) >= sizeof(pr.anchor)) return (E2BIG); pr.rule.action = ruleset; ret = ioctl(dev, DIOCGETRULES, &pr); if (ret != 0) return (ret); rules->nr = pr.nr; rules->ticket = pr.ticket; return (0); } int pfctl_get_rule(int dev, uint32_t nr, uint32_t ticket, const char *anchor, uint32_t ruleset, struct pfctl_rule *rule, char *anchor_call) { return (pfctl_get_clear_rule(dev, nr, ticket, anchor, ruleset, rule, anchor_call, false)); } int pfctl_get_clear_rule(int dev, uint32_t nr, uint32_t ticket, const char *anchor, uint32_t ruleset, struct pfctl_rule *rule, char *anchor_call, bool clear) { nvlist_t *nvl; int ret; nvl = nvlist_create(0); if (nvl == 0) return (ENOMEM); nvlist_add_number(nvl, "nr", nr); nvlist_add_number(nvl, "ticket", ticket); nvlist_add_string(nvl, "anchor", anchor); nvlist_add_number(nvl, "ruleset", ruleset); if (clear) nvlist_add_bool(nvl, "clear_counter", true); if ((ret = pfctl_do_ioctl(dev, DIOCGETRULENV, 8192, &nvl)) != 0) return (ret); pf_nvrule_to_rule(nvlist_get_nvlist(nvl, "rule"), rule); if (anchor_call) strlcpy(anchor_call, nvlist_get_string(nvl, "anchor_call"), MAXPATHLEN); nvlist_destroy(nvl); return (0); } int pfctl_set_keepcounters(int dev, bool keep) { struct pfioc_nv nv; nvlist_t *nvl; int ret; nvl = nvlist_create(0); nvlist_add_bool(nvl, "keep_counters", keep); nv.data = nvlist_pack(nvl, &nv.len); nv.size = nv.len; nvlist_destroy(nvl); ret = ioctl(dev, DIOCKEEPCOUNTERS, &nv); free(nv.data); return (ret); } static void pfctl_nv_add_state_cmp(nvlist_t *nvl, const char *name, const struct pfctl_state_cmp *cmp) { nvlist_t *nv; nv = nvlist_create(0); nvlist_add_number(nv, "id", cmp->id); nvlist_add_number(nv, "creatorid", htonl(cmp->creatorid)); nvlist_add_number(nv, "direction", cmp->direction); nvlist_add_nvlist(nvl, name, nv); nvlist_destroy(nv); } static void pf_state_key_export_to_state_key(struct pfctl_state_key *ps, const struct pf_state_key_export *s) { bcopy(s->addr, ps->addr, sizeof(ps->addr[0]) * 2); ps->port[0] = s->port[0]; ps->port[1] = s->port[1]; } static void pf_state_peer_export_to_state_peer(struct pfctl_state_peer *ps, const struct pf_state_peer_export *s) { /* Ignore scrub. */ ps->seqlo = s->seqlo; ps->seqhi = s->seqhi; ps->seqdiff = s->seqdiff; /* Ignore max_win & mss */ ps->state = s->state; ps->wscale = s->wscale; } static void pf_state_export_to_state(struct pfctl_state *ps, const struct pf_state_export *s) { assert(s->version >= PF_STATE_VERSION); ps->id = s->id; strlcpy(ps->ifname, s->ifname, sizeof(ps->ifname)); strlcpy(ps->orig_ifname, s->orig_ifname, sizeof(ps->orig_ifname)); pf_state_key_export_to_state_key(&ps->key[0], &s->key[0]); pf_state_key_export_to_state_key(&ps->key[1], &s->key[1]); pf_state_peer_export_to_state_peer(&ps->src, &s->src); pf_state_peer_export_to_state_peer(&ps->dst, &s->dst); bcopy(&s->rt_addr, &ps->rt_addr, sizeof(ps->rt_addr)); ps->rule = ntohl(s->rule); ps->anchor = ntohl(s->anchor); ps->nat_rule = ntohl(s->nat_rule); ps->creation = ntohl(s->creation); ps->expire = ntohl(s->expire); ps->packets[0] = s->packets[0]; ps->packets[1] = s->packets[1]; ps->bytes[0] = s->bytes[0]; ps->bytes[1] = s->bytes[1]; ps->creatorid = ntohl(s->creatorid); ps->key[0].proto = s->proto; ps->key[1].proto = s->proto; ps->key[0].af = s->af; ps->key[1].af = s->af; ps->direction = s->direction; ps->state_flags = s->state_flags; ps->sync_flags = s->sync_flags; } int pfctl_get_states(int dev, struct pfctl_states *states) { struct pfioc_states_v2 ps; struct pf_state_export *p; char *inbuf = NULL, *newinbuf = NULL; unsigned int len = 0; int i, error; bzero(&ps, sizeof(ps)); ps.ps_req_version = PF_STATE_VERSION; bzero(states, sizeof(*states)); TAILQ_INIT(&states->states); for (;;) { ps.ps_len = len; if (len) { newinbuf = realloc(inbuf, len); if (newinbuf == NULL) return (ENOMEM); ps.ps_buf = inbuf = newinbuf; } if ((error = ioctl(dev, DIOCGETSTATESV2, &ps)) < 0) { free(inbuf); return (error); } if (ps.ps_len + sizeof(struct pfioc_states_v2) < len) break; if (len == 0 && ps.ps_len == 0) goto out; if (len == 0 && ps.ps_len != 0) len = ps.ps_len; if (ps.ps_len == 0) goto out; /* no states */ len *= 2; } p = ps.ps_states; for (i = 0; i < ps.ps_len; i += sizeof(*p), p++) { struct pfctl_state *s = malloc(sizeof(*s)); if (s == NULL) { pfctl_free_states(states); error = ENOMEM; goto out; } pf_state_export_to_state(s, p); TAILQ_INSERT_TAIL(&states->states, s, entry); } out: free(inbuf); return (error); } void pfctl_free_states(struct pfctl_states *states) { struct pfctl_state *s, *tmp; TAILQ_FOREACH_SAFE(s, &states->states, entry, tmp) { free(s); } bzero(states, sizeof(*states)); } static int _pfctl_clear_states(int dev, const struct pfctl_kill *kill, unsigned int *killed, uint64_t ioctlval) { nvlist_t *nvl; int ret; nvl = nvlist_create(0); pfctl_nv_add_state_cmp(nvl, "cmp", &kill->cmp); nvlist_add_number(nvl, "af", kill->af); nvlist_add_number(nvl, "proto", kill->proto); pfctl_nv_add_rule_addr(nvl, "src", &kill->src); pfctl_nv_add_rule_addr(nvl, "dst", &kill->dst); pfctl_nv_add_rule_addr(nvl, "rt_addr", &kill->rt_addr); nvlist_add_string(nvl, "ifname", kill->ifname); nvlist_add_string(nvl, "label", kill->label); nvlist_add_bool(nvl, "kill_match", kill->kill_match); if ((ret = pfctl_do_ioctl(dev, ioctlval, 1024, &nvl)) != 0) return (ret); if (killed) *killed = nvlist_get_number(nvl, "killed"); nvlist_destroy(nvl); return (ret); } int pfctl_clear_states(int dev, const struct pfctl_kill *kill, unsigned int *killed) { return (_pfctl_clear_states(dev, kill, killed, DIOCCLRSTATESNV)); } int pfctl_kill_states(int dev, const struct pfctl_kill *kill, unsigned int *killed) { return (_pfctl_clear_states(dev, kill, killed, DIOCKILLSTATESNV)); } int pfctl_clear_rules(int dev, const char *anchorname) { struct pfioc_trans trans; struct pfioc_trans_e transe[2]; int ret; bzero(&trans, sizeof(trans)); bzero(&transe, sizeof(transe)); transe[0].rs_num = PF_RULESET_SCRUB; if (strlcpy(transe[0].anchor, anchorname, sizeof(transe[0].anchor)) >= sizeof(transe[0].anchor)) return (E2BIG); transe[1].rs_num = PF_RULESET_FILTER; if (strlcpy(transe[1].anchor, anchorname, sizeof(transe[1].anchor)) >= sizeof(transe[1].anchor)) return (E2BIG); trans.size = 2; trans.esize = sizeof(transe[0]); trans.array = transe; ret = ioctl(dev, DIOCXBEGIN, &trans); if (ret != 0) return (ret); return ioctl(dev, DIOCXCOMMIT, &trans); } int pfctl_clear_nat(int dev, const char *anchorname) { struct pfioc_trans trans; struct pfioc_trans_e transe[3]; int ret; bzero(&trans, sizeof(trans)); bzero(&transe, sizeof(transe)); transe[0].rs_num = PF_RULESET_NAT; if (strlcpy(transe[0].anchor, anchorname, sizeof(transe[0].anchor)) >= sizeof(transe[0].anchor)) return (E2BIG); transe[1].rs_num = PF_RULESET_BINAT; if (strlcpy(transe[1].anchor, anchorname, sizeof(transe[1].anchor)) >= sizeof(transe[0].anchor)) return (E2BIG); transe[2].rs_num = PF_RULESET_RDR; if (strlcpy(transe[2].anchor, anchorname, sizeof(transe[2].anchor)) >= sizeof(transe[2].anchor)) return (E2BIG); trans.size = 3; trans.esize = sizeof(transe[0]); trans.array = transe; ret = ioctl(dev, DIOCXBEGIN, &trans); if (ret != 0) return (ret); return ioctl(dev, DIOCXCOMMIT, &trans); } int pfctl_clear_eth_rules(int dev, const char *anchorname) { struct pfioc_trans trans; struct pfioc_trans_e transe; int ret; bzero(&trans, sizeof(trans)); bzero(&transe, sizeof(transe)); transe.rs_num = PF_RULESET_ETH; if (strlcpy(transe.anchor, anchorname, sizeof(transe.anchor)) >= sizeof(transe.anchor)) return (E2BIG); trans.size = 1; trans.esize = sizeof(transe); trans.array = &transe; ret = ioctl(dev, DIOCXBEGIN, &trans); if (ret != 0) return (ret); return ioctl(dev, DIOCXCOMMIT, &trans); } static int pfctl_get_limit(int dev, const int index, uint *limit) { struct pfioc_limit pl; bzero(&pl, sizeof(pl)); pl.index = index; if (ioctl(dev, DIOCGETLIMIT, &pl) == -1) return (errno); *limit = pl.limit; return (0); } int pfctl_set_syncookies(int dev, const struct pfctl_syncookies *s) { struct pfioc_nv nv; nvlist_t *nvl; int ret; uint state_limit; uint64_t lim, hi, lo; ret = pfctl_get_limit(dev, PF_LIMIT_STATES, &state_limit); if (ret != 0) return (ret); lim = state_limit; hi = lim * s->highwater / 100; lo = lim * s->lowwater / 100; if (lo == hi) hi++; nvl = nvlist_create(0); nvlist_add_bool(nvl, "enabled", s->mode != PFCTL_SYNCOOKIES_NEVER); nvlist_add_bool(nvl, "adaptive", s->mode == PFCTL_SYNCOOKIES_ADAPTIVE); nvlist_add_number(nvl, "highwater", hi); nvlist_add_number(nvl, "lowwater", lo); nv.data = nvlist_pack(nvl, &nv.len); nv.size = nv.len; nvlist_destroy(nvl); nvl = NULL; ret = ioctl(dev, DIOCSETSYNCOOKIES, &nv); free(nv.data); return (ret); } int pfctl_get_syncookies(int dev, struct pfctl_syncookies *s) { nvlist_t *nvl; int ret; uint state_limit; bool enabled, adaptive; ret = pfctl_get_limit(dev, PF_LIMIT_STATES, &state_limit); if (ret != 0) return (ret); bzero(s, sizeof(*s)); nvl = nvlist_create(0); if ((ret = pfctl_do_ioctl(dev, DIOCGETSYNCOOKIES, 256, &nvl)) != 0) return (errno); enabled = nvlist_get_bool(nvl, "enabled"); adaptive = nvlist_get_bool(nvl, "adaptive"); if (enabled) { if (adaptive) s->mode = PFCTL_SYNCOOKIES_ADAPTIVE; else s->mode = PFCTL_SYNCOOKIES_ALWAYS; } else { s->mode = PFCTL_SYNCOOKIES_NEVER; } s->highwater = nvlist_get_number(nvl, "highwater") * 100 / state_limit; s->lowwater = nvlist_get_number(nvl, "lowwater") * 100 / state_limit; nvlist_destroy(nvl); return (0); } int pfctl_table_add_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *nadd, int flags) { struct pfioc_table io; if (tbl == NULL || size < 0 || (size && addr == NULL)) { return (EINVAL); } bzero(&io, sizeof io); io.pfrio_flags = flags; io.pfrio_table = *tbl; io.pfrio_buffer = addr; io.pfrio_esize = sizeof(*addr); io.pfrio_size = size; if (ioctl(dev, DIOCRADDADDRS, &io)) return (errno); if (nadd != NULL) *nadd = io.pfrio_nadd; return (0); } int pfctl_table_del_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *ndel, int flags) { struct pfioc_table io; if (tbl == NULL || size < 0 || (size && addr == NULL)) { return (EINVAL); } bzero(&io, sizeof io); io.pfrio_flags = flags; io.pfrio_table = *tbl; io.pfrio_buffer = addr; io.pfrio_esize = sizeof(*addr); io.pfrio_size = size; if (ioctl(dev, DIOCRDELADDRS, &io)) return (errno); if (ndel != NULL) *ndel = io.pfrio_ndel; return (0); } int pfctl_table_set_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *size2, int *nadd, int *ndel, int *nchange, int flags) { struct pfioc_table io; if (tbl == NULL || size < 0 || (size && addr == NULL)) { return (EINVAL); } bzero(&io, sizeof io); io.pfrio_flags = flags; io.pfrio_table = *tbl; io.pfrio_buffer = addr; io.pfrio_esize = sizeof(*addr); io.pfrio_size = size; io.pfrio_size2 = (size2 != NULL) ? *size2 : 0; if (ioctl(dev, DIOCRSETADDRS, &io)) return (-1); if (nadd != NULL) *nadd = io.pfrio_nadd; if (ndel != NULL) *ndel = io.pfrio_ndel; if (nchange != NULL) *nchange = io.pfrio_nchange; if (size2 != NULL) *size2 = io.pfrio_size2; return (0); } int pfctl_table_get_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int *size, int flags) { struct pfioc_table io; if (tbl == NULL || size == NULL || *size < 0 || (*size && addr == NULL)) { return (EINVAL); } bzero(&io, sizeof io); io.pfrio_flags = flags; io.pfrio_table = *tbl; io.pfrio_buffer = addr; io.pfrio_esize = sizeof(*addr); io.pfrio_size = *size; if (ioctl(dev, DIOCRGETADDRS, &io)) return (-1); *size = io.pfrio_size; return (0); } diff --git a/lib/libpfctl/libpfctl.h b/lib/libpfctl/libpfctl.h index 684e73c1815d..1a07b74dc10f 100644 --- a/lib/libpfctl/libpfctl.h +++ b/lib/libpfctl/libpfctl.h @@ -1,419 +1,420 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _PFCTL_IOCTL_H_ #define _PFCTL_IOCTL_H_ #include struct pfctl_anchor; struct pfctl_eth_anchor; struct pfctl_status_counter { uint64_t id; uint64_t counter; char *name; TAILQ_ENTRY(pfctl_status_counter) entry; }; TAILQ_HEAD(pfctl_status_counters, pfctl_status_counter); struct pfctl_status { bool running; uint32_t since; uint32_t debug; uint32_t hostid; uint64_t states; uint64_t src_nodes; char ifname[IFNAMSIZ]; uint8_t pf_chksum[PF_MD5_DIGEST_LENGTH]; bool syncookies_active; + uint32_t reass; struct pfctl_status_counters counters; struct pfctl_status_counters lcounters; struct pfctl_status_counters fcounters; struct pfctl_status_counters scounters; uint64_t pcounters[2][2][3]; uint64_t bcounters[2][2]; }; struct pfctl_eth_rulesets_info { uint32_t nr; }; struct pfctl_eth_rules_info { uint32_t nr; uint32_t ticket; }; struct pfctl_eth_addr { uint8_t addr[ETHER_ADDR_LEN]; uint8_t mask[ETHER_ADDR_LEN]; bool neg; bool isset; }; struct pfctl_eth_rule { uint32_t nr; bool quick; /* Filter */ char ifname[IFNAMSIZ]; uint8_t ifnot; uint8_t direction; uint16_t proto; struct pfctl_eth_addr src, dst; struct pf_rule_addr ipsrc, ipdst; char match_tagname[PF_TAG_NAME_SIZE]; uint16_t match_tag; bool match_tag_not; /* Stats */ uint64_t evaluations; uint64_t packets[2]; uint64_t bytes[2]; time_t last_active_timestamp; /* Action */ char qname[PF_QNAME_SIZE]; char tagname[PF_TAG_NAME_SIZE]; uint16_t dnpipe; uint32_t dnflags; char bridge_to[IFNAMSIZ]; uint8_t action; struct pfctl_eth_anchor *anchor; uint8_t anchor_relative; uint8_t anchor_wildcard; TAILQ_ENTRY(pfctl_eth_rule) entries; }; TAILQ_HEAD(pfctl_eth_rules, pfctl_eth_rule); struct pfctl_eth_ruleset_info { uint32_t nr; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; }; struct pfctl_eth_ruleset { struct pfctl_eth_rules rules; struct pfctl_eth_anchor *anchor; }; struct pfctl_eth_anchor { struct pfctl_eth_anchor *parent; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; struct pfctl_eth_ruleset ruleset; int refcnt; /* anchor rules */ int match; /* XXX: used for pfctl black magic */ }; struct pfctl_pool { struct pf_palist list; struct pf_pooladdr *cur; struct pf_poolhashkey key; struct pf_addr counter; struct pf_mape_portset mape; int tblidx; uint16_t proxy_port[2]; uint8_t opts; }; struct pfctl_rules_info { uint32_t nr; uint32_t ticket; }; struct pfctl_rule { struct pf_rule_addr src; struct pf_rule_addr dst; union pf_rule_ptr skip[PF_SKIP_COUNT]; char label[PF_RULE_MAX_LABEL_COUNT][PF_RULE_LABEL_SIZE]; uint32_t ridentifier; char ifname[IFNAMSIZ]; char qname[PF_QNAME_SIZE]; char pqname[PF_QNAME_SIZE]; char tagname[PF_TAG_NAME_SIZE]; char match_tagname[PF_TAG_NAME_SIZE]; char overload_tblname[PF_TABLE_NAME_SIZE]; TAILQ_ENTRY(pfctl_rule) entries; struct pfctl_pool rpool; uint64_t evaluations; uint64_t packets[2]; uint64_t bytes[2]; time_t last_active_timestamp; struct pfi_kif *kif; struct pfctl_anchor *anchor; struct pfr_ktable *overload_tbl; pf_osfp_t os_fingerprint; int rtableid; uint32_t timeout[PFTM_MAX]; uint32_t max_states; uint32_t max_src_nodes; uint32_t max_src_states; uint32_t max_src_conn; struct { uint32_t limit; uint32_t seconds; } max_src_conn_rate; uint32_t qid; uint32_t pqid; uint16_t dnpipe; uint16_t dnrpipe; uint32_t free_flags; uint32_t nr; uint32_t prob; uid_t cuid; pid_t cpid; uint64_t states_cur; uint64_t states_tot; uint64_t src_nodes; uint16_t return_icmp; uint16_t return_icmp6; uint16_t max_mss; uint16_t tag; uint16_t match_tag; uint16_t scrub_flags; struct pf_rule_uid uid; struct pf_rule_gid gid; uint32_t rule_flag; uint8_t action; uint8_t direction; uint8_t log; uint8_t logif; uint8_t quick; uint8_t ifnot; uint8_t match_tag_not; uint8_t natpass; uint8_t keep_state; sa_family_t af; uint8_t proto; uint8_t type; uint8_t code; uint8_t flags; uint8_t flagset; uint8_t min_ttl; uint8_t allow_opts; uint8_t rt; uint8_t return_ttl; uint8_t tos; uint8_t set_tos; uint8_t anchor_relative; uint8_t anchor_wildcard; uint8_t flush; uint8_t prio; uint8_t set_prio[2]; struct { struct pf_addr addr; uint16_t port; } divert; }; TAILQ_HEAD(pfctl_rulequeue, pfctl_rule); struct pfctl_ruleset { struct { struct pfctl_rulequeue queues[2]; struct { struct pfctl_rulequeue *ptr; struct pfctl_rule **ptr_array; uint32_t rcount; uint32_t ticket; int open; } active, inactive; } rules[PF_RULESET_MAX]; struct pfctl_anchor *anchor; uint32_t tticket; int tables; int topen; }; RB_HEAD(pfctl_anchor_global, pfctl_anchor); RB_HEAD(pfctl_anchor_node, pfctl_anchor); struct pfctl_anchor { RB_ENTRY(pfctl_anchor) entry_global; RB_ENTRY(pfctl_anchor) entry_node; struct pfctl_anchor *parent; struct pfctl_anchor_node children; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; struct pfctl_ruleset ruleset; int refcnt; /* anchor rules */ int match; /* XXX: used for pfctl black magic */ }; RB_PROTOTYPE(pfctl_anchor_global, pfctl_anchor, entry_global, pf_anchor_compare); RB_PROTOTYPE(pfctl_anchor_node, pfctl_anchor, entry_node, pf_anchor_compare); struct pfctl_state_cmp { uint64_t id; uint32_t creatorid; uint8_t direction; }; struct pfctl_kill { struct pfctl_state_cmp cmp; sa_family_t af; int proto; struct pf_rule_addr src; struct pf_rule_addr dst; struct pf_rule_addr rt_addr; char ifname[IFNAMSIZ]; char label[PF_RULE_LABEL_SIZE]; bool kill_match; }; struct pfctl_state_peer { uint32_t seqlo; uint32_t seqhi; uint32_t seqdiff; uint8_t state; uint8_t wscale; }; struct pfctl_state_key { struct pf_addr addr[2]; uint16_t port[2]; sa_family_t af; uint8_t proto; }; struct pfctl_state { TAILQ_ENTRY(pfctl_state) entry; uint64_t id; uint32_t creatorid; uint8_t direction; struct pfctl_state_peer src; struct pfctl_state_peer dst; uint32_t rule; uint32_t anchor; uint32_t nat_rule; struct pf_addr rt_addr; struct pfctl_state_key key[2]; /* addresses stack and wire */ char ifname[IFNAMSIZ]; char orig_ifname[IFNAMSIZ]; uint64_t packets[2]; uint64_t bytes[2]; uint32_t creation; uint32_t expire; uint32_t pfsync_time; - uint8_t state_flags; + uint16_t state_flags; uint32_t sync_flags; }; TAILQ_HEAD(pfctl_statelist, pfctl_state); struct pfctl_states { struct pfctl_statelist states; size_t count; }; enum pfctl_syncookies_mode { PFCTL_SYNCOOKIES_NEVER, PFCTL_SYNCOOKIES_ALWAYS, PFCTL_SYNCOOKIES_ADAPTIVE }; extern const char* PFCTL_SYNCOOKIES_MODE_NAMES[]; struct pfctl_syncookies { enum pfctl_syncookies_mode mode; uint8_t highwater; /* Percent */ uint8_t lowwater; /* Percent */ }; struct pfctl_status* pfctl_get_status(int dev); void pfctl_free_status(struct pfctl_status *status); int pfctl_get_eth_rulesets_info(int dev, struct pfctl_eth_rulesets_info *ri, const char *path); int pfctl_get_eth_ruleset(int dev, const char *path, int nr, struct pfctl_eth_ruleset_info *ri); int pfctl_get_eth_rules_info(int dev, struct pfctl_eth_rules_info *rules, const char *path); int pfctl_get_eth_rule(int dev, uint32_t nr, uint32_t ticket, const char *path, struct pfctl_eth_rule *rule, bool clear, char *anchor_call); int pfctl_add_eth_rule(int dev, const struct pfctl_eth_rule *r, const char *anchor, const char *anchor_call, uint32_t ticket); int pfctl_get_rules_info(int dev, struct pfctl_rules_info *rules, uint32_t ruleset, const char *path); int pfctl_get_rule(int dev, uint32_t nr, uint32_t ticket, const char *anchor, uint32_t ruleset, struct pfctl_rule *rule, char *anchor_call); int pfctl_get_clear_rule(int dev, uint32_t nr, uint32_t ticket, const char *anchor, uint32_t ruleset, struct pfctl_rule *rule, char *anchor_call, bool clear); int pfctl_add_rule(int dev, const struct pfctl_rule *r, const char *anchor, const char *anchor_call, uint32_t ticket, uint32_t pool_ticket); int pfctl_set_keepcounters(int dev, bool keep); int pfctl_get_states(int dev, struct pfctl_states *states); void pfctl_free_states(struct pfctl_states *states); int pfctl_clear_states(int dev, const struct pfctl_kill *kill, unsigned int *killed); int pfctl_kill_states(int dev, const struct pfctl_kill *kill, unsigned int *killed); int pfctl_clear_rules(int dev, const char *anchorname); int pfctl_clear_nat(int dev, const char *anchorname); int pfctl_clear_eth_rules(int dev, const char *anchorname); int pfctl_set_syncookies(int dev, const struct pfctl_syncookies *s); int pfctl_get_syncookies(int dev, struct pfctl_syncookies *s); int pfctl_table_add_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *nadd, int flags); int pfctl_table_del_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *ndel, int flags); int pfctl_table_set_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *size2, int *nadd, int *ndel, int *nchange, int flags); int pfctl_table_get_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int *size, int flags); #endif diff --git a/sbin/pfctl/parse.y b/sbin/pfctl/parse.y index 6f9494828d53..e5629f9fcd5f 100644 --- a/sbin/pfctl/parse.y +++ b/sbin/pfctl/parse.y @@ -1,7089 +1,7162 @@ /* $OpenBSD: parse.y,v 1.554 2008/10/17 12:59:53 henning Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Markus Friedl. All rights reserved. * Copyright (c) 2001 Daniel Hartmeier. All rights reserved. * Copyright (c) 2001 Theo de Raadt. All rights reserved. * Copyright (c) 2002,2003 Henning Brauer. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ %{ #include __FBSDID("$FreeBSD$"); #define PFIOC_USE_LATEST #include #include #include #ifdef __FreeBSD__ #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pfctl_parser.h" #include "pfctl.h" static struct pfctl *pf = NULL; static int debug = 0; static int rulestate = 0; static u_int16_t returnicmpdefault = (ICMP_UNREACH << 8) | ICMP_UNREACH_PORT; static u_int16_t returnicmp6default = (ICMP6_DST_UNREACH << 8) | ICMP6_DST_UNREACH_NOPORT; static int blockpolicy = PFRULE_DROP; static int failpolicy = PFRULE_DROP; static int require_order = 1; static int default_statelock; static TAILQ_HEAD(files, file) files = TAILQ_HEAD_INITIALIZER(files); static struct file { TAILQ_ENTRY(file) entry; FILE *stream; char *name; int lineno; int errors; } *file; struct file *pushfile(const char *, int); int popfile(void); int check_file_secrecy(int, const char *); int yyparse(void); int yylex(void); int yyerror(const char *, ...); int kw_cmp(const void *, const void *); int lookup(char *); int lgetc(int); int lungetc(int); int findeol(void); static TAILQ_HEAD(symhead, sym) symhead = TAILQ_HEAD_INITIALIZER(symhead); struct sym { TAILQ_ENTRY(sym) entry; int used; int persist; char *nam; char *val; }; int symset(const char *, const char *, int); char *symget(const char *); int atoul(char *, u_long *); enum { PFCTL_STATE_NONE, PFCTL_STATE_OPTION, PFCTL_STATE_ETHER, PFCTL_STATE_SCRUB, PFCTL_STATE_QUEUE, PFCTL_STATE_NAT, PFCTL_STATE_FILTER }; struct node_etherproto { u_int16_t proto; struct node_etherproto *next; struct node_etherproto *tail; }; struct node_proto { u_int8_t proto; struct node_proto *next; struct node_proto *tail; }; struct node_port { u_int16_t port[2]; u_int8_t op; struct node_port *next; struct node_port *tail; }; struct node_uid { uid_t uid[2]; u_int8_t op; struct node_uid *next; struct node_uid *tail; }; struct node_gid { gid_t gid[2]; u_int8_t op; struct node_gid *next; struct node_gid *tail; }; struct node_icmp { u_int8_t code; u_int8_t type; u_int8_t proto; struct node_icmp *next; struct node_icmp *tail; }; enum { PF_STATE_OPT_MAX, PF_STATE_OPT_NOSYNC, PF_STATE_OPT_SRCTRACK, PF_STATE_OPT_MAX_SRC_STATES, PF_STATE_OPT_MAX_SRC_CONN, PF_STATE_OPT_MAX_SRC_CONN_RATE, PF_STATE_OPT_MAX_SRC_NODES, PF_STATE_OPT_OVERLOAD, PF_STATE_OPT_STATELOCK, PF_STATE_OPT_TIMEOUT, PF_STATE_OPT_SLOPPY, }; enum { PF_SRCTRACK_NONE, PF_SRCTRACK, PF_SRCTRACK_GLOBAL, PF_SRCTRACK_RULE }; struct node_state_opt { int type; union { u_int32_t max_states; u_int32_t max_src_states; u_int32_t max_src_conn; struct { u_int32_t limit; u_int32_t seconds; } max_src_conn_rate; struct { u_int8_t flush; char tblname[PF_TABLE_NAME_SIZE]; } overload; u_int32_t max_src_nodes; u_int8_t src_track; u_int32_t statelock; struct { int number; u_int32_t seconds; } timeout; } data; struct node_state_opt *next; struct node_state_opt *tail; }; struct peer { struct node_host *host; struct node_port *port; }; static struct node_queue { char queue[PF_QNAME_SIZE]; char parent[PF_QNAME_SIZE]; char ifname[IFNAMSIZ]; int scheduler; struct node_queue *next; struct node_queue *tail; } *queues = NULL; struct node_qassign { char *qname; char *pqname; }; static struct filter_opts { int marker; -#define FOM_FLAGS 0x01 -#define FOM_ICMP 0x02 -#define FOM_TOS 0x04 -#define FOM_KEEP 0x08 -#define FOM_SRCTRACK 0x10 +#define FOM_FLAGS 0x0001 +#define FOM_ICMP 0x0002 +#define FOM_TOS 0x0004 +#define FOM_KEEP 0x0008 +#define FOM_SRCTRACK 0x0010 +#define FOM_MINTTL 0x0020 +#define FOM_MAXMSS 0x0040 +#define FOM_AFTO 0x0080 /* not yet implemmented */ +#define FOM_SETTOS 0x0100 +#define FOM_SCRUB_TCP 0x0200 #define FOM_SETPRIO 0x0400 +#define FOM_ONCE 0x1000 /* not yet implemmented */ #define FOM_PRIO 0x2000 +#define FOM_SETDELAY 0x4000 +#define FOM_FRAGCACHE 0x8000 /* does not exist in OpenBSD */ struct node_uid *uid; struct node_gid *gid; struct { u_int8_t b1; u_int8_t b2; u_int16_t w; u_int16_t w2; } flags; struct node_icmp *icmpspec; u_int32_t tos; u_int32_t prob; u_int32_t ridentifier; struct { int action; struct node_state_opt *options; } keep; int fragment; int allowopts; char *label[PF_RULE_MAX_LABEL_COUNT]; int labelcount; struct node_qassign queues; char *tag; char *match_tag; u_int8_t match_tag_not; u_int16_t dnpipe; u_int16_t dnrpipe; u_int32_t free_flags; u_int rtableid; u_int8_t prio; u_int8_t set_prio[2]; struct { struct node_host *addr; u_int16_t port; } divert; + /* new-style scrub opts */ + int nodf; + int minttl; + int settos; + int randomid; + int max_mss; } filter_opts; static struct antispoof_opts { char *label[PF_RULE_MAX_LABEL_COUNT]; int labelcount; u_int32_t ridentifier; u_int rtableid; } antispoof_opts; static struct scrub_opts { int marker; -#define SOM_MINTTL 0x01 -#define SOM_MAXMSS 0x02 -#define SOM_FRAGCACHE 0x04 -#define SOM_SETTOS 0x08 int nodf; int minttl; int maxmss; int settos; int fragcache; int randomid; int reassemble_tcp; char *match_tag; u_int8_t match_tag_not; u_int rtableid; } scrub_opts; static struct queue_opts { int marker; #define QOM_BWSPEC 0x01 #define QOM_SCHEDULER 0x02 #define QOM_PRIORITY 0x04 #define QOM_TBRSIZE 0x08 #define QOM_QLIMIT 0x10 struct node_queue_bw queue_bwspec; struct node_queue_opt scheduler; int priority; unsigned int tbrsize; int qlimit; } queue_opts; static struct table_opts { int flags; int init_addr; struct node_tinithead init_nodes; } table_opts; static struct pool_opts { int marker; #define POM_TYPE 0x01 #define POM_STICKYADDRESS 0x02 u_int8_t opts; int type; int staticport; struct pf_poolhashkey *key; struct pf_mape_portset mape; } pool_opts; static struct codel_opts codel_opts; static struct node_hfsc_opts hfsc_opts; static struct node_fairq_opts fairq_opts; static struct node_state_opt *keep_state_defaults = NULL; static struct pfctl_watermarks syncookie_opts; int disallow_table(struct node_host *, const char *); int disallow_urpf_failed(struct node_host *, const char *); int disallow_alias(struct node_host *, const char *); int rule_consistent(struct pfctl_rule *, int); int filter_consistent(struct pfctl_rule *, int); int nat_consistent(struct pfctl_rule *); int rdr_consistent(struct pfctl_rule *); int process_tabledef(char *, struct table_opts *); void expand_label_str(char *, size_t, const char *, const char *); void expand_label_if(const char *, char *, size_t, const char *); void expand_label_addr(const char *, char *, size_t, u_int8_t, struct pf_rule_addr *); void expand_label_port(const char *, char *, size_t, struct pf_rule_addr *); void expand_label_proto(const char *, char *, size_t, u_int8_t); void expand_label_nr(const char *, char *, size_t, struct pfctl_rule *); void expand_eth_rule(struct pfctl_eth_rule *, struct node_if *, struct node_etherproto *, struct node_mac *, struct node_mac *, struct node_host *, struct node_host *, const char *, const char *); void expand_rule(struct pfctl_rule *, struct node_if *, struct node_host *, struct node_proto *, struct node_os *, struct node_host *, struct node_port *, struct node_host *, struct node_port *, struct node_uid *, struct node_gid *, struct node_icmp *, const char *); int expand_altq(struct pf_altq *, struct node_if *, struct node_queue *, struct node_queue_bw bwspec, struct node_queue_opt *); int expand_queue(struct pf_altq *, struct node_if *, struct node_queue *, struct node_queue_bw, struct node_queue_opt *); int expand_skip_interface(struct node_if *); int check_rulestate(int); int getservice(char *); int rule_label(struct pfctl_rule *, char *s[PF_RULE_MAX_LABEL_COUNT]); int rt_tableid_max(void); void mv_rules(struct pfctl_ruleset *, struct pfctl_ruleset *); void mv_eth_rules(struct pfctl_eth_ruleset *, struct pfctl_eth_ruleset *); void decide_address_family(struct node_host *, sa_family_t *); void remove_invalid_hosts(struct node_host **, sa_family_t *); int invalid_redirect(struct node_host *, sa_family_t); u_int16_t parseicmpspec(char *, sa_family_t); int kw_casecmp(const void *, const void *); int map_tos(char *string, int *); struct node_mac* node_mac_from_string(const char *); struct node_mac* node_mac_from_string_masklen(const char *, int); struct node_mac* node_mac_from_string_mask(const char *, const char *); static TAILQ_HEAD(loadanchorshead, loadanchors) loadanchorshead = TAILQ_HEAD_INITIALIZER(loadanchorshead); struct loadanchors { TAILQ_ENTRY(loadanchors) entries; char *anchorname; char *filename; }; typedef struct { union { int64_t number; double probability; int i; char *string; u_int rtableid; struct { u_int8_t b1; u_int8_t b2; u_int16_t w; u_int16_t w2; } b; struct range { int a; int b; int t; } range; struct node_if *interface; struct node_proto *proto; struct node_etherproto *etherproto; struct node_icmp *icmp; struct node_host *host; struct node_os *os; struct node_port *port; struct node_uid *uid; struct node_gid *gid; struct node_state_opt *state_opt; struct peer peer; struct { struct peer src, dst; struct node_os *src_os; } fromto; struct { struct node_mac *src; struct node_mac *dst; } etherfromto; struct node_mac *mac; struct { struct node_mac *mac; } etheraddr; char *bridge_to; struct { struct node_host *host; u_int8_t rt; u_int8_t pool_opts; sa_family_t af; struct pf_poolhashkey *key; } route; struct redirection { struct node_host *host; struct range rport; } *redirection; struct { int action; struct node_state_opt *options; } keep_state; struct { u_int8_t log; u_int8_t logif; u_int8_t quick; } logquick; struct { int neg; char *name; } tagged; struct pf_poolhashkey *hashkey; struct node_queue *queue; struct node_queue_opt queue_options; struct node_queue_bw queue_bwspec; struct node_qassign qassign; struct filter_opts filter_opts; struct antispoof_opts antispoof_opts; struct queue_opts queue_opts; struct scrub_opts scrub_opts; struct table_opts table_opts; struct pool_opts pool_opts; struct node_hfsc_opts hfsc_opts; struct node_fairq_opts fairq_opts; struct codel_opts codel_opts; struct pfctl_watermarks *watermarks; } v; int lineno; } YYSTYPE; #define PPORT_RANGE 1 #define PPORT_STAR 2 int parseport(char *, struct range *r, int); #define DYNIF_MULTIADDR(addr) ((addr).type == PF_ADDR_DYNIFTL && \ (!((addr).iflags & PFI_AFLAG_NOALIAS) || \ !isdigit((addr).v.ifname[strlen((addr).v.ifname)-1]))) %} %token PASS BLOCK MATCH SCRUB RETURN IN OS OUT LOG QUICK ON FROM TO FLAGS %token RETURNRST RETURNICMP RETURNICMP6 PROTO INET INET6 ALL ANY ICMPTYPE %token ICMP6TYPE CODE KEEP MODULATE STATE PORT RDR NAT BINAT ARROW NODF %token MINTTL ERROR ALLOWOPTS FASTROUTE FILENAME ROUTETO DUPTO REPLYTO NO LABEL %token NOROUTE URPFFAILED FRAGMENT USER GROUP MAXMSS MAXIMUM TTL TOS DROP TABLE %token REASSEMBLE ANCHOR NATANCHOR RDRANCHOR BINATANCHOR %token SET OPTIMIZATION TIMEOUT LIMIT LOGINTERFACE BLOCKPOLICY FAILPOLICY %token RANDOMID REQUIREORDER SYNPROXY FINGERPRINTS NOSYNC DEBUG SKIP HOSTID %token ANTISPOOF FOR INCLUDE KEEPCOUNTERS SYNCOOKIES L3 %token ETHER %token BITMASK RANDOM SOURCEHASH ROUNDROBIN STATICPORT PROBABILITY MAPEPORTSET %token ALTQ CBQ CODEL PRIQ HFSC FAIRQ BANDWIDTH TBRSIZE LINKSHARE REALTIME %token UPPERLIMIT QUEUE PRIORITY QLIMIT HOGS BUCKETS RTABLE TARGET INTERVAL %token DNPIPE DNQUEUE RIDENTIFIER %token LOAD RULESET_OPTIMIZATION PRIO %token STICKYADDRESS MAXSRCSTATES MAXSRCNODES SOURCETRACK GLOBAL RULE %token MAXSRCCONN MAXSRCCONNRATE OVERLOAD FLUSH SLOPPY %token TAGGED TAG IFBOUND FLOATING STATEPOLICY STATEDEFAULTS ROUTE SETTOS %token DIVERTTO DIVERTREPLY BRIDGE_TO %token STRING %token NUMBER %token PORTBINARY %type interface if_list if_item_not if_item %type number icmptype icmp6type uid gid -%type tos not yesno +%type tos not yesno optnodf %type probability %type no dir af fragcache optimizer syncookie_val %type sourcetrack flush unaryop statelock %type etherprotoval %type action nataction natpasslog scrubaction %type flags flag blockspec prio %type portplain portstar portrange %type hashkey %type proto proto_list proto_item %type protoval %type icmpspec %type icmp_list icmp_item %type icmp6_list icmp6_item %type reticmpspec reticmp6spec %type fromto l3fromto %type ipportspec from to %type ipspec toipspec xhost host dynaddr host_list %type redir_host_list redirspec %type route_host route_host_list routespec %type os xos os_list %type portspec port_list port_item %type uids uid_list uid_item %type gids gid_list gid_item %type route %type redirection redirpool %type label stringall tag anchorname %type string varstring numberstring %type keep %type state_opt_spec state_opt_list state_opt_item %type logquick quick log logopts logopt %type antispoof_ifspc antispoof_iflst antispoof_if %type qname etherqname %type qassign qassign_list qassign_item %type scheduler %type cbqflags_list cbqflags_item %type priqflags_list priqflags_item %type hfscopts_list hfscopts_item hfsc_opts %type fairqopts_list fairqopts_item fairq_opts %type codelopts_list codelopts_item codel_opts %type bandwidth %type filter_opts filter_opt filter_opts_l etherfilter_opts etherfilter_opt etherfilter_opts_l %type filter_sets filter_set filter_sets_l %type antispoof_opts antispoof_opt antispoof_opts_l %type queue_opts queue_opt queue_opts_l %type scrub_opts scrub_opt scrub_opts_l %type table_opts table_opt table_opts_l %type pool_opts pool_opt pool_opts_l %type tagged %type rtable %type syncookie_opts %type etherproto etherproto_list etherproto_item %type etherfromto %type etherfrom etherto %type bridge %type xmac mac mac_list macspec %% ruleset : /* empty */ | ruleset include '\n' | ruleset '\n' | ruleset option '\n' | ruleset etherrule '\n' | ruleset etheranchorrule '\n' | ruleset scrubrule '\n' | ruleset natrule '\n' | ruleset binatrule '\n' | ruleset pfrule '\n' | ruleset anchorrule '\n' | ruleset loadrule '\n' | ruleset altqif '\n' | ruleset queuespec '\n' | ruleset varset '\n' | ruleset antispoof '\n' | ruleset tabledef '\n' | '{' fakeanchor '}' '\n'; | ruleset error '\n' { file->errors++; } ; include : INCLUDE STRING { struct file *nfile; if ((nfile = pushfile($2, 0)) == NULL) { yyerror("failed to include file %s", $2); free($2); YYERROR; } free($2); file = nfile; lungetc('\n'); } ; /* * apply to previouslys specified rule: must be careful to note * what that is: pf or nat or binat or rdr */ fakeanchor : fakeanchor '\n' | fakeanchor anchorrule '\n' | fakeanchor binatrule '\n' | fakeanchor natrule '\n' | fakeanchor pfrule '\n' | fakeanchor error '\n' ; optimizer : string { if (!strcmp($1, "none")) $$ = 0; else if (!strcmp($1, "basic")) $$ = PF_OPTIMIZE_BASIC; else if (!strcmp($1, "profile")) $$ = PF_OPTIMIZE_BASIC | PF_OPTIMIZE_PROFILE; else { yyerror("unknown ruleset-optimization %s", $1); YYERROR; } } ; -option : SET OPTIMIZATION STRING { +optnodf : /* empty */ { $$ = 0; } + | NODF { $$ = 1; } + ; + +option : SET REASSEMBLE yesno optnodf { + if (check_rulestate(PFCTL_STATE_OPTION)) + YYERROR; + pfctl_set_reassembly(pf, $3, $4); + } + | SET OPTIMIZATION STRING { if (check_rulestate(PFCTL_STATE_OPTION)) { free($3); YYERROR; } if (pfctl_set_optimization(pf, $3) != 0) { yyerror("unknown optimization %s", $3); free($3); YYERROR; } free($3); } | SET RULESET_OPTIMIZATION optimizer { if (!(pf->opts & PF_OPT_OPTIMIZE)) { pf->opts |= PF_OPT_OPTIMIZE; pf->optimize = $3; } } | SET TIMEOUT timeout_spec | SET TIMEOUT '{' optnl timeout_list '}' | SET LIMIT limit_spec | SET LIMIT '{' optnl limit_list '}' | SET LOGINTERFACE stringall { if (check_rulestate(PFCTL_STATE_OPTION)) { free($3); YYERROR; } if (pfctl_set_logif(pf, $3) != 0) { yyerror("error setting loginterface %s", $3); free($3); YYERROR; } free($3); } | SET HOSTID number { if ($3 == 0 || $3 > UINT_MAX) { yyerror("hostid must be non-zero"); YYERROR; } if (pfctl_set_hostid(pf, $3) != 0) { yyerror("error setting hostid %08x", $3); YYERROR; } } | SET BLOCKPOLICY DROP { if (pf->opts & PF_OPT_VERBOSE) printf("set block-policy drop\n"); if (check_rulestate(PFCTL_STATE_OPTION)) YYERROR; blockpolicy = PFRULE_DROP; } | SET BLOCKPOLICY RETURN { if (pf->opts & PF_OPT_VERBOSE) printf("set block-policy return\n"); if (check_rulestate(PFCTL_STATE_OPTION)) YYERROR; blockpolicy = PFRULE_RETURN; } | SET FAILPOLICY DROP { if (pf->opts & PF_OPT_VERBOSE) printf("set fail-policy drop\n"); if (check_rulestate(PFCTL_STATE_OPTION)) YYERROR; failpolicy = PFRULE_DROP; } | SET FAILPOLICY RETURN { if (pf->opts & PF_OPT_VERBOSE) printf("set fail-policy return\n"); if (check_rulestate(PFCTL_STATE_OPTION)) YYERROR; failpolicy = PFRULE_RETURN; } | SET REQUIREORDER yesno { if (pf->opts & PF_OPT_VERBOSE) printf("set require-order %s\n", $3 == 1 ? "yes" : "no"); require_order = $3; } | SET FINGERPRINTS STRING { if (pf->opts & PF_OPT_VERBOSE) printf("set fingerprints \"%s\"\n", $3); if (check_rulestate(PFCTL_STATE_OPTION)) { free($3); YYERROR; } if (!pf->anchor->name[0]) { if (pfctl_file_fingerprints(pf->dev, pf->opts, $3)) { yyerror("error loading " "fingerprints %s", $3); free($3); YYERROR; } } free($3); } | SET STATEPOLICY statelock { if (pf->opts & PF_OPT_VERBOSE) switch ($3) { case 0: printf("set state-policy floating\n"); break; case PFRULE_IFBOUND: printf("set state-policy if-bound\n"); break; } default_statelock = $3; } | SET DEBUG STRING { if (check_rulestate(PFCTL_STATE_OPTION)) { free($3); YYERROR; } if (pfctl_set_debug(pf, $3) != 0) { yyerror("error setting debuglevel %s", $3); free($3); YYERROR; } free($3); } | SET SKIP interface { if (expand_skip_interface($3) != 0) { yyerror("error setting skip interface(s)"); YYERROR; } } | SET STATEDEFAULTS state_opt_list { if (keep_state_defaults != NULL) { yyerror("cannot redefine state-defaults"); YYERROR; } keep_state_defaults = $3; } | SET KEEPCOUNTERS { pf->keep_counters = true; } | SET SYNCOOKIES syncookie_val syncookie_opts { if (pfctl_cfg_syncookies(pf, $3, $4)) { yyerror("error setting syncookies"); YYERROR; } } ; syncookie_val : STRING { if (!strcmp($1, "never")) $$ = PFCTL_SYNCOOKIES_NEVER; else if (!strcmp($1, "adaptive")) $$ = PFCTL_SYNCOOKIES_ADAPTIVE; else if (!strcmp($1, "always")) $$ = PFCTL_SYNCOOKIES_ALWAYS; else { yyerror("illegal value for syncookies"); YYERROR; } } ; syncookie_opts : /* empty */ { $$ = NULL; } | { memset(&syncookie_opts, 0, sizeof(syncookie_opts)); } '(' syncookie_opt_l ')' { $$ = &syncookie_opts; } ; syncookie_opt_l : syncookie_opt_l comma syncookie_opt | syncookie_opt ; syncookie_opt : STRING STRING { double val; char *cp; val = strtod($2, &cp); if (cp == NULL || strcmp(cp, "%")) YYERROR; if (val <= 0 || val > 100) { yyerror("illegal percentage value"); YYERROR; } if (!strcmp($1, "start")) { syncookie_opts.hi = val; } else if (!strcmp($1, "end")) { syncookie_opts.lo = val; } else { yyerror("illegal syncookie option"); YYERROR; } } ; stringall : STRING { $$ = $1; } | ALL { if (($$ = strdup("all")) == NULL) { err(1, "stringall: strdup"); } } ; string : STRING string { if (asprintf(&$$, "%s %s", $1, $2) == -1) err(1, "string: asprintf"); free($1); free($2); } | STRING ; varstring : numberstring varstring { if (asprintf(&$$, "%s %s", $1, $2) == -1) err(1, "string: asprintf"); free($1); free($2); } | numberstring ; numberstring : NUMBER { char *s; if (asprintf(&s, "%lld", (long long)$1) == -1) { yyerror("string: asprintf"); YYERROR; } $$ = s; } | STRING ; varset : STRING '=' varstring { char *s = $1; if (pf->opts & PF_OPT_VERBOSE) printf("%s = \"%s\"\n", $1, $3); while (*s++) { if (isspace((unsigned char)*s)) { yyerror("macro name cannot contain " "whitespace"); YYERROR; } } if (symset($1, $3, 0) == -1) err(1, "cannot store variable %s", $1); free($1); free($3); } ; anchorname : STRING { $$ = $1; } | /* empty */ { $$ = NULL; } ; pfa_anchorlist : /* empty */ | pfa_anchorlist '\n' | pfa_anchorlist pfrule '\n' | pfa_anchorlist anchorrule '\n' ; pfa_anchor : '{' { char ta[PF_ANCHOR_NAME_SIZE]; struct pfctl_ruleset *rs; /* stepping into a brace anchor */ pf->asd++; pf->bn++; /* * Anchor contents are parsed before the anchor rule * production completes, so we don't know the real * location yet. Create a holding ruleset in the root; * contents will be moved afterwards. */ snprintf(ta, PF_ANCHOR_NAME_SIZE, "_%d", pf->bn); rs = pf_find_or_create_ruleset(ta); if (rs == NULL) err(1, "pfa_anchor: pf_find_or_create_ruleset"); pf->astack[pf->asd] = rs->anchor; pf->anchor = rs->anchor; } '\n' pfa_anchorlist '}' { pf->alast = pf->anchor; pf->asd--; pf->anchor = pf->astack[pf->asd]; } | /* empty */ ; anchorrule : ANCHOR anchorname dir quick interface af proto fromto filter_opts pfa_anchor { struct pfctl_rule r; struct node_proto *proto; if (check_rulestate(PFCTL_STATE_FILTER)) { if ($2) free($2); YYERROR; } if ($2 && ($2[0] == '_' || strstr($2, "/_") != NULL)) { free($2); yyerror("anchor names beginning with '_' " "are reserved for internal use"); YYERROR; } memset(&r, 0, sizeof(r)); if (pf->astack[pf->asd + 1]) { if ($2 && strchr($2, '/') != NULL) { free($2); yyerror("anchor paths containing '/' " "cannot be used for inline anchors."); YYERROR; } /* Move inline rules into relative location. */ pfctl_anchor_setup(&r, &pf->astack[pf->asd]->ruleset, $2 ? $2 : pf->alast->name); if (r.anchor == NULL) err(1, "anchorrule: unable to " "create ruleset"); if (pf->alast != r.anchor) { if (r.anchor->match) { yyerror("inline anchor '%s' " "already exists", r.anchor->name); YYERROR; } mv_rules(&pf->alast->ruleset, &r.anchor->ruleset); } pf_remove_if_empty_ruleset(&pf->alast->ruleset); pf->alast = r.anchor; } else { if (!$2) { yyerror("anchors without explicit " "rules must specify a name"); YYERROR; } } r.direction = $3; r.quick = $4.quick; r.af = $6; r.prob = $9.prob; r.rtableid = $9.rtableid; r.ridentifier = $9.ridentifier; if ($9.tag) if (strlcpy(r.tagname, $9.tag, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } if ($9.match_tag) if (strlcpy(r.match_tagname, $9.match_tag, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } r.match_tag_not = $9.match_tag_not; if (rule_label(&r, $9.label)) YYERROR; for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) free($9.label[i]); r.flags = $9.flags.b1; r.flagset = $9.flags.b2; if (($9.flags.b1 & $9.flags.b2) != $9.flags.b1) { yyerror("flags always false"); YYERROR; } if ($9.flags.b1 || $9.flags.b2 || $8.src_os) { for (proto = $7; proto != NULL && proto->proto != IPPROTO_TCP; proto = proto->next) ; /* nothing */ if (proto == NULL && $7 != NULL) { if ($9.flags.b1 || $9.flags.b2) yyerror( "flags only apply to tcp"); if ($8.src_os) yyerror( "OS fingerprinting only " "applies to tcp"); YYERROR; } } r.tos = $9.tos; if ($9.keep.action) { yyerror("cannot specify state handling " "on anchors"); YYERROR; } if ($9.match_tag) if (strlcpy(r.match_tagname, $9.match_tag, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } r.match_tag_not = $9.match_tag_not; if ($9.marker & FOM_PRIO) { if ($9.prio == 0) r.prio = PF_PRIO_ZERO; else r.prio = $9.prio; } if ($9.marker & FOM_SETPRIO) { r.set_prio[0] = $9.set_prio[0]; r.set_prio[1] = $9.set_prio[1]; r.scrub_flags |= PFSTATE_SETPRIO; } decide_address_family($8.src.host, &r.af); decide_address_family($8.dst.host, &r.af); expand_rule(&r, $5, NULL, $7, $8.src_os, $8.src.host, $8.src.port, $8.dst.host, $8.dst.port, $9.uid, $9.gid, $9.icmpspec, pf->astack[pf->asd + 1] ? pf->alast->name : $2); free($2); pf->astack[pf->asd + 1] = NULL; } | NATANCHOR string interface af proto fromto rtable { struct pfctl_rule r; if (check_rulestate(PFCTL_STATE_NAT)) { free($2); YYERROR; } memset(&r, 0, sizeof(r)); r.action = PF_NAT; r.af = $4; r.rtableid = $7; decide_address_family($6.src.host, &r.af); decide_address_family($6.dst.host, &r.af); expand_rule(&r, $3, NULL, $5, $6.src_os, $6.src.host, $6.src.port, $6.dst.host, $6.dst.port, 0, 0, 0, $2); free($2); } | RDRANCHOR string interface af proto fromto rtable { struct pfctl_rule r; if (check_rulestate(PFCTL_STATE_NAT)) { free($2); YYERROR; } memset(&r, 0, sizeof(r)); r.action = PF_RDR; r.af = $4; r.rtableid = $7; decide_address_family($6.src.host, &r.af); decide_address_family($6.dst.host, &r.af); if ($6.src.port != NULL) { yyerror("source port parameter not supported" " in rdr-anchor"); YYERROR; } if ($6.dst.port != NULL) { if ($6.dst.port->next != NULL) { yyerror("destination port list " "expansion not supported in " "rdr-anchor"); YYERROR; } else if ($6.dst.port->op != PF_OP_EQ) { yyerror("destination port operators" " not supported in rdr-anchor"); YYERROR; } r.dst.port[0] = $6.dst.port->port[0]; r.dst.port[1] = $6.dst.port->port[1]; r.dst.port_op = $6.dst.port->op; } expand_rule(&r, $3, NULL, $5, $6.src_os, $6.src.host, $6.src.port, $6.dst.host, $6.dst.port, 0, 0, 0, $2); free($2); } | BINATANCHOR string interface af proto fromto rtable { struct pfctl_rule r; if (check_rulestate(PFCTL_STATE_NAT)) { free($2); YYERROR; } memset(&r, 0, sizeof(r)); r.action = PF_BINAT; r.af = $4; r.rtableid = $7; if ($5 != NULL) { if ($5->next != NULL) { yyerror("proto list expansion" " not supported in binat-anchor"); YYERROR; } r.proto = $5->proto; free($5); } if ($6.src.host != NULL || $6.src.port != NULL || $6.dst.host != NULL || $6.dst.port != NULL) { yyerror("fromto parameter not supported" " in binat-anchor"); YYERROR; } decide_address_family($6.src.host, &r.af); decide_address_family($6.dst.host, &r.af); pfctl_append_rule(pf, &r, $2); free($2); } ; loadrule : LOAD ANCHOR string FROM string { struct loadanchors *loadanchor; if (strlen(pf->anchor->name) + 1 + strlen($3) >= MAXPATHLEN) { yyerror("anchorname %s too long, max %u\n", $3, MAXPATHLEN - 1); free($3); YYERROR; } loadanchor = calloc(1, sizeof(struct loadanchors)); if (loadanchor == NULL) err(1, "loadrule: calloc"); if ((loadanchor->anchorname = malloc(MAXPATHLEN)) == NULL) err(1, "loadrule: malloc"); if (pf->anchor->name[0]) snprintf(loadanchor->anchorname, MAXPATHLEN, "%s/%s", pf->anchor->name, $3); else strlcpy(loadanchor->anchorname, $3, MAXPATHLEN); if ((loadanchor->filename = strdup($5)) == NULL) err(1, "loadrule: strdup"); TAILQ_INSERT_TAIL(&loadanchorshead, loadanchor, entries); free($3); free($5); }; scrubaction : no SCRUB { $$.b2 = $$.w = 0; if ($1) $$.b1 = PF_NOSCRUB; else $$.b1 = PF_SCRUB; } ; etherrule : ETHER action dir quick interface bridge etherproto etherfromto l3fromto etherfilter_opts { struct pfctl_eth_rule r; bzero(&r, sizeof(r)); if (check_rulestate(PFCTL_STATE_ETHER)) YYERROR; r.action = $2.b1; r.direction = $3; r.quick = $4.quick; if ($10.tag != NULL) memcpy(&r.tagname, $10.tag, sizeof(r.tagname)); if ($10.match_tag) if (strlcpy(r.match_tagname, $10.match_tag, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } r.match_tag_not = $10.match_tag_not; if ($10.queues.qname != NULL) memcpy(&r.qname, $10.queues.qname, sizeof(r.qname)); r.dnpipe = $10.dnpipe; r.dnflags = $10.free_flags; expand_eth_rule(&r, $5, $7, $8.src, $8.dst, $9.src.host, $9.dst.host, $6, ""); } ; etherpfa_anchorlist : /* empty */ | etherpfa_anchorlist '\n' | etherpfa_anchorlist etherrule '\n' | etherpfa_anchorlist etheranchorrule '\n' ; etherpfa_anchor : '{' { char ta[PF_ANCHOR_NAME_SIZE]; struct pfctl_eth_ruleset *rs; /* steping into a brace anchor */ pf->asd++; pf->bn++; /* create a holding ruleset in the root */ snprintf(ta, PF_ANCHOR_NAME_SIZE, "_%d", pf->bn); rs = pf_find_or_create_eth_ruleset(ta); if (rs == NULL) err(1, "etherpfa_anchor: pf_find_or_create_eth_ruleset"); pf->eastack[pf->asd] = rs->anchor; pf->eanchor = rs->anchor; } '\n' etherpfa_anchorlist '}' { pf->ealast = pf->eanchor; pf->asd--; pf->eanchor = pf->eastack[pf->asd]; } | /* empty */ ; etheranchorrule : ETHER ANCHOR anchorname dir quick interface etherproto etherfromto l3fromto etherpfa_anchor { struct pfctl_eth_rule r; if (check_rulestate(PFCTL_STATE_ETHER)) { free($3); YYERROR; } if ($3 && ($3[0] == '_' || strstr($3, "/_") != NULL)) { free($3); yyerror("anchor names beginning with '_' " "are reserved for internal use"); YYERROR; } memset(&r, 0, sizeof(r)); if (pf->eastack[pf->asd + 1]) { if ($3 && strchr($3, '/') != NULL) { free($3); yyerror("anchor paths containing '/' " "cannot be used for inline anchors."); YYERROR; } /* Move inline rules into relative location. */ pfctl_eth_anchor_setup(pf, &r, &pf->eastack[pf->asd]->ruleset, $3 ? $3 : pf->ealast->name); if (r.anchor == NULL) err(1, "etheranchorrule: unable to " "create ruleset"); if (pf->ealast != r.anchor) { if (r.anchor->match) { yyerror("inline anchor '%s' " "already exists", r.anchor->name); YYERROR; } mv_eth_rules(&pf->ealast->ruleset, &r.anchor->ruleset); } pf_remove_if_empty_eth_ruleset(&pf->ealast->ruleset); pf->ealast = r.anchor; } else { if (!$3) { yyerror("anchors without explicit " "rules must specify a name"); YYERROR; } } r.direction = $4; r.quick = $5.quick; expand_eth_rule(&r, $6, $7, $8.src, $8.dst, $9.src.host, $9.dst.host, NULL, pf->eastack[pf->asd + 1] ? pf->ealast->name : $3); free($3); pf->eastack[pf->asd + 1] = NULL; } ; etherfilter_opts : { bzero(&filter_opts, sizeof filter_opts); } etherfilter_opts_l { $$ = filter_opts; } | /* empty */ { bzero(&filter_opts, sizeof filter_opts); $$ = filter_opts; } ; etherfilter_opts_l : etherfilter_opts_l etherfilter_opt | etherfilter_opt etherfilter_opt : etherqname { if (filter_opts.queues.qname) { yyerror("queue cannot be redefined"); YYERROR; } filter_opts.queues = $1; } | TAG string { filter_opts.tag = $2; } | not TAGGED string { filter_opts.match_tag = $3; filter_opts.match_tag_not = $1; } | DNPIPE number { filter_opts.dnpipe = $2; filter_opts.free_flags |= PFRULE_DN_IS_PIPE; } | DNQUEUE number { filter_opts.dnpipe = $2; filter_opts.free_flags |= PFRULE_DN_IS_QUEUE; } ; bridge : /* empty */ { $$ = NULL; } | BRIDGE_TO STRING { $$ = strdup($2); } ; scrubrule : scrubaction dir logquick interface af proto fromto scrub_opts { struct pfctl_rule r; if (check_rulestate(PFCTL_STATE_SCRUB)) YYERROR; memset(&r, 0, sizeof(r)); r.action = $1.b1; r.direction = $2; r.log = $3.log; r.logif = $3.logif; if ($3.quick) { yyerror("scrub rules do not support 'quick'"); YYERROR; } r.af = $5; if ($8.nodf) r.rule_flag |= PFRULE_NODF; if ($8.randomid) r.rule_flag |= PFRULE_RANDOMID; if ($8.reassemble_tcp) { if (r.direction != PF_INOUT) { yyerror("reassemble tcp rules can not " "specify direction"); YYERROR; } r.rule_flag |= PFRULE_REASSEMBLE_TCP; } if ($8.minttl) r.min_ttl = $8.minttl; if ($8.maxmss) r.max_mss = $8.maxmss; - if ($8.marker & SOM_SETTOS) { + if ($8.marker & FOM_SETTOS) { r.rule_flag |= PFRULE_SET_TOS; r.set_tos = $8.settos; } if ($8.fragcache) r.rule_flag |= $8.fragcache; if ($8.match_tag) if (strlcpy(r.match_tagname, $8.match_tag, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } r.match_tag_not = $8.match_tag_not; r.rtableid = $8.rtableid; expand_rule(&r, $4, NULL, $6, $7.src_os, $7.src.host, $7.src.port, $7.dst.host, $7.dst.port, NULL, NULL, NULL, ""); } ; scrub_opts : { bzero(&scrub_opts, sizeof scrub_opts); scrub_opts.rtableid = -1; } scrub_opts_l { $$ = scrub_opts; } | /* empty */ { bzero(&scrub_opts, sizeof scrub_opts); scrub_opts.rtableid = -1; $$ = scrub_opts; } ; -scrub_opts_l : scrub_opts_l scrub_opt +scrub_opts_l : scrub_opts_l comma scrub_opt | scrub_opt ; scrub_opt : NODF { if (scrub_opts.nodf) { yyerror("no-df cannot be respecified"); YYERROR; } scrub_opts.nodf = 1; } | MINTTL NUMBER { - if (scrub_opts.marker & SOM_MINTTL) { + if (scrub_opts.marker & FOM_MINTTL) { yyerror("min-ttl cannot be respecified"); YYERROR; } if ($2 < 0 || $2 > 255) { yyerror("illegal min-ttl value %d", $2); YYERROR; } - scrub_opts.marker |= SOM_MINTTL; + scrub_opts.marker |= FOM_MINTTL; scrub_opts.minttl = $2; } | MAXMSS NUMBER { - if (scrub_opts.marker & SOM_MAXMSS) { + if (scrub_opts.marker & FOM_MAXMSS) { yyerror("max-mss cannot be respecified"); YYERROR; } if ($2 < 0 || $2 > 65535) { yyerror("illegal max-mss value %d", $2); YYERROR; } - scrub_opts.marker |= SOM_MAXMSS; + scrub_opts.marker |= FOM_MAXMSS; scrub_opts.maxmss = $2; } | SETTOS tos { - if (scrub_opts.marker & SOM_SETTOS) { + if (scrub_opts.marker & FOM_SETTOS) { yyerror("set-tos cannot be respecified"); YYERROR; } - scrub_opts.marker |= SOM_SETTOS; + scrub_opts.marker |= FOM_SETTOS; scrub_opts.settos = $2; } | fragcache { - if (scrub_opts.marker & SOM_FRAGCACHE) { + if (scrub_opts.marker & FOM_FRAGCACHE) { yyerror("fragcache cannot be respecified"); YYERROR; } - scrub_opts.marker |= SOM_FRAGCACHE; + scrub_opts.marker |= FOM_FRAGCACHE; scrub_opts.fragcache = $1; } | REASSEMBLE STRING { if (strcasecmp($2, "tcp") != 0) { yyerror("scrub reassemble supports only tcp, " "not '%s'", $2); free($2); YYERROR; } free($2); if (scrub_opts.reassemble_tcp) { yyerror("reassemble tcp cannot be respecified"); YYERROR; } scrub_opts.reassemble_tcp = 1; } | RANDOMID { if (scrub_opts.randomid) { yyerror("random-id cannot be respecified"); YYERROR; } scrub_opts.randomid = 1; } | RTABLE NUMBER { if ($2 < 0 || $2 > rt_tableid_max()) { yyerror("invalid rtable id"); YYERROR; } scrub_opts.rtableid = $2; } | not TAGGED string { scrub_opts.match_tag = $3; scrub_opts.match_tag_not = $1; } ; fragcache : FRAGMENT REASSEMBLE { $$ = 0; /* default */ } | FRAGMENT NO REASSEMBLE { $$ = PFRULE_FRAGMENT_NOREASS; } ; antispoof : ANTISPOOF logquick antispoof_ifspc af antispoof_opts { struct pfctl_rule r; struct node_host *h = NULL, *hh; struct node_if *i, *j; if (check_rulestate(PFCTL_STATE_FILTER)) YYERROR; for (i = $3; i; i = i->next) { bzero(&r, sizeof(r)); r.action = PF_DROP; r.direction = PF_IN; r.log = $2.log; r.logif = $2.logif; r.quick = $2.quick; r.af = $4; r.ridentifier = $5.ridentifier; if (rule_label(&r, $5.label)) YYERROR; r.rtableid = $5.rtableid; j = calloc(1, sizeof(struct node_if)); if (j == NULL) err(1, "antispoof: calloc"); if (strlcpy(j->ifname, i->ifname, sizeof(j->ifname)) >= sizeof(j->ifname)) { free(j); yyerror("interface name too long"); YYERROR; } j->not = 1; if (i->dynamic) { h = calloc(1, sizeof(*h)); if (h == NULL) err(1, "address: calloc"); h->addr.type = PF_ADDR_DYNIFTL; set_ipmask(h, 128); if (strlcpy(h->addr.v.ifname, i->ifname, sizeof(h->addr.v.ifname)) >= sizeof(h->addr.v.ifname)) { free(h); yyerror( "interface name too long"); YYERROR; } hh = malloc(sizeof(*hh)); if (hh == NULL) err(1, "address: malloc"); bcopy(h, hh, sizeof(*hh)); h->addr.iflags = PFI_AFLAG_NETWORK; } else { h = ifa_lookup(j->ifname, PFI_AFLAG_NETWORK); hh = NULL; } if (h != NULL) expand_rule(&r, j, NULL, NULL, NULL, h, NULL, NULL, NULL, NULL, NULL, NULL, ""); if ((i->ifa_flags & IFF_LOOPBACK) == 0) { bzero(&r, sizeof(r)); r.action = PF_DROP; r.direction = PF_IN; r.log = $2.log; r.logif = $2.logif; r.quick = $2.quick; r.af = $4; r.ridentifier = $5.ridentifier; if (rule_label(&r, $5.label)) YYERROR; r.rtableid = $5.rtableid; if (hh != NULL) h = hh; else h = ifa_lookup(i->ifname, 0); if (h != NULL) expand_rule(&r, NULL, NULL, NULL, NULL, h, NULL, NULL, NULL, NULL, NULL, NULL, ""); } else free(hh); } for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) free($5.label[i]); } ; antispoof_ifspc : FOR antispoof_if { $$ = $2; } | FOR '{' optnl antispoof_iflst '}' { $$ = $4; } ; antispoof_iflst : antispoof_if optnl { $$ = $1; } | antispoof_iflst comma antispoof_if optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; antispoof_if : if_item { $$ = $1; } | '(' if_item ')' { $2->dynamic = 1; $$ = $2; } ; antispoof_opts : { bzero(&antispoof_opts, sizeof antispoof_opts); antispoof_opts.rtableid = -1; } antispoof_opts_l { $$ = antispoof_opts; } | /* empty */ { bzero(&antispoof_opts, sizeof antispoof_opts); antispoof_opts.rtableid = -1; $$ = antispoof_opts; } ; antispoof_opts_l : antispoof_opts_l antispoof_opt | antispoof_opt ; antispoof_opt : label { if (antispoof_opts.labelcount >= PF_RULE_MAX_LABEL_COUNT) { yyerror("label can only be used %d times", PF_RULE_MAX_LABEL_COUNT); YYERROR; } antispoof_opts.label[antispoof_opts.labelcount++] = $1; } | RIDENTIFIER number { antispoof_opts.ridentifier = $2; } | RTABLE NUMBER { if ($2 < 0 || $2 > rt_tableid_max()) { yyerror("invalid rtable id"); YYERROR; } antispoof_opts.rtableid = $2; } ; not : '!' { $$ = 1; } | /* empty */ { $$ = 0; } ; tabledef : TABLE '<' STRING '>' table_opts { struct node_host *h, *nh; struct node_tinit *ti, *nti; if (strlen($3) >= PF_TABLE_NAME_SIZE) { yyerror("table name too long, max %d chars", PF_TABLE_NAME_SIZE - 1); free($3); YYERROR; } if (pf->loadopt & PFCTL_FLAG_TABLE) if (process_tabledef($3, &$5)) { free($3); YYERROR; } free($3); for (ti = SIMPLEQ_FIRST(&$5.init_nodes); ti != SIMPLEQ_END(&$5.init_nodes); ti = nti) { if (ti->file) free(ti->file); for (h = ti->host; h != NULL; h = nh) { nh = h->next; free(h); } nti = SIMPLEQ_NEXT(ti, entries); free(ti); } } ; table_opts : { bzero(&table_opts, sizeof table_opts); SIMPLEQ_INIT(&table_opts.init_nodes); } table_opts_l { $$ = table_opts; } | /* empty */ { bzero(&table_opts, sizeof table_opts); SIMPLEQ_INIT(&table_opts.init_nodes); $$ = table_opts; } ; table_opts_l : table_opts_l table_opt | table_opt ; table_opt : STRING { if (!strcmp($1, "const")) table_opts.flags |= PFR_TFLAG_CONST; else if (!strcmp($1, "persist")) table_opts.flags |= PFR_TFLAG_PERSIST; else if (!strcmp($1, "counters")) table_opts.flags |= PFR_TFLAG_COUNTERS; else { yyerror("invalid table option '%s'", $1); free($1); YYERROR; } free($1); } | '{' optnl '}' { table_opts.init_addr = 1; } | '{' optnl host_list '}' { struct node_host *n; struct node_tinit *ti; for (n = $3; n != NULL; n = n->next) { switch (n->addr.type) { case PF_ADDR_ADDRMASK: continue; /* ok */ case PF_ADDR_RANGE: yyerror("address ranges are not " "permitted inside tables"); break; case PF_ADDR_DYNIFTL: yyerror("dynamic addresses are not " "permitted inside tables"); break; case PF_ADDR_TABLE: yyerror("tables cannot contain tables"); break; case PF_ADDR_NOROUTE: yyerror("\"no-route\" is not permitted " "inside tables"); break; case PF_ADDR_URPFFAILED: yyerror("\"urpf-failed\" is not " "permitted inside tables"); break; default: yyerror("unknown address type %d", n->addr.type); } YYERROR; } if (!(ti = calloc(1, sizeof(*ti)))) err(1, "table_opt: calloc"); ti->host = $3; SIMPLEQ_INSERT_TAIL(&table_opts.init_nodes, ti, entries); table_opts.init_addr = 1; } | FILENAME STRING { struct node_tinit *ti; if (!(ti = calloc(1, sizeof(*ti)))) err(1, "table_opt: calloc"); ti->file = $2; SIMPLEQ_INSERT_TAIL(&table_opts.init_nodes, ti, entries); table_opts.init_addr = 1; } ; altqif : ALTQ interface queue_opts QUEUE qassign { struct pf_altq a; if (check_rulestate(PFCTL_STATE_QUEUE)) YYERROR; memset(&a, 0, sizeof(a)); if ($3.scheduler.qtype == ALTQT_NONE) { yyerror("no scheduler specified!"); YYERROR; } a.scheduler = $3.scheduler.qtype; a.qlimit = $3.qlimit; a.tbrsize = $3.tbrsize; if ($5 == NULL && $3.scheduler.qtype != ALTQT_CODEL) { yyerror("no child queues specified"); YYERROR; } if (expand_altq(&a, $2, $5, $3.queue_bwspec, &$3.scheduler)) YYERROR; } ; queuespec : QUEUE STRING interface queue_opts qassign { struct pf_altq a; if (check_rulestate(PFCTL_STATE_QUEUE)) { free($2); YYERROR; } memset(&a, 0, sizeof(a)); if (strlcpy(a.qname, $2, sizeof(a.qname)) >= sizeof(a.qname)) { yyerror("queue name too long (max " "%d chars)", PF_QNAME_SIZE-1); free($2); YYERROR; } free($2); if ($4.tbrsize) { yyerror("cannot specify tbrsize for queue"); YYERROR; } if ($4.priority > 255) { yyerror("priority out of range: max 255"); YYERROR; } a.priority = $4.priority; a.qlimit = $4.qlimit; a.scheduler = $4.scheduler.qtype; if (expand_queue(&a, $3, $5, $4.queue_bwspec, &$4.scheduler)) { yyerror("errors in queue definition"); YYERROR; } } ; queue_opts : { bzero(&queue_opts, sizeof queue_opts); queue_opts.priority = DEFAULT_PRIORITY; queue_opts.qlimit = DEFAULT_QLIMIT; queue_opts.scheduler.qtype = ALTQT_NONE; queue_opts.queue_bwspec.bw_percent = 100; } queue_opts_l { $$ = queue_opts; } | /* empty */ { bzero(&queue_opts, sizeof queue_opts); queue_opts.priority = DEFAULT_PRIORITY; queue_opts.qlimit = DEFAULT_QLIMIT; queue_opts.scheduler.qtype = ALTQT_NONE; queue_opts.queue_bwspec.bw_percent = 100; $$ = queue_opts; } ; queue_opts_l : queue_opts_l queue_opt | queue_opt ; queue_opt : BANDWIDTH bandwidth { if (queue_opts.marker & QOM_BWSPEC) { yyerror("bandwidth cannot be respecified"); YYERROR; } queue_opts.marker |= QOM_BWSPEC; queue_opts.queue_bwspec = $2; } | PRIORITY NUMBER { if (queue_opts.marker & QOM_PRIORITY) { yyerror("priority cannot be respecified"); YYERROR; } if ($2 < 0 || $2 > 255) { yyerror("priority out of range: max 255"); YYERROR; } queue_opts.marker |= QOM_PRIORITY; queue_opts.priority = $2; } | QLIMIT NUMBER { if (queue_opts.marker & QOM_QLIMIT) { yyerror("qlimit cannot be respecified"); YYERROR; } if ($2 < 0 || $2 > 65535) { yyerror("qlimit out of range: max 65535"); YYERROR; } queue_opts.marker |= QOM_QLIMIT; queue_opts.qlimit = $2; } | scheduler { if (queue_opts.marker & QOM_SCHEDULER) { yyerror("scheduler cannot be respecified"); YYERROR; } queue_opts.marker |= QOM_SCHEDULER; queue_opts.scheduler = $1; } | TBRSIZE NUMBER { if (queue_opts.marker & QOM_TBRSIZE) { yyerror("tbrsize cannot be respecified"); YYERROR; } if ($2 < 0 || $2 > UINT_MAX) { yyerror("tbrsize too big: max %u", UINT_MAX); YYERROR; } queue_opts.marker |= QOM_TBRSIZE; queue_opts.tbrsize = $2; } ; bandwidth : STRING { double bps; char *cp; $$.bw_percent = 0; bps = strtod($1, &cp); if (cp != NULL) { if (strlen(cp) > 1) { char *cu = cp + 1; if (!strcmp(cu, "Bit") || !strcmp(cu, "B") || !strcmp(cu, "bit") || !strcmp(cu, "b")) { *cu = 0; } } if (!strcmp(cp, "b")) ; /* nothing */ else if (!strcmp(cp, "K")) bps *= 1000; else if (!strcmp(cp, "M")) bps *= 1000 * 1000; else if (!strcmp(cp, "G")) bps *= 1000 * 1000 * 1000; else if (!strcmp(cp, "%")) { if (bps < 0 || bps > 100) { yyerror("bandwidth spec " "out of range"); free($1); YYERROR; } $$.bw_percent = bps; bps = 0; } else { yyerror("unknown unit %s", cp); free($1); YYERROR; } } free($1); $$.bw_absolute = (u_int64_t)bps; } | NUMBER { if ($1 < 0 || $1 >= LLONG_MAX) { yyerror("bandwidth number too big"); YYERROR; } $$.bw_percent = 0; $$.bw_absolute = $1; } ; scheduler : CBQ { $$.qtype = ALTQT_CBQ; $$.data.cbq_opts.flags = 0; } | CBQ '(' cbqflags_list ')' { $$.qtype = ALTQT_CBQ; $$.data.cbq_opts.flags = $3; } | PRIQ { $$.qtype = ALTQT_PRIQ; $$.data.priq_opts.flags = 0; } | PRIQ '(' priqflags_list ')' { $$.qtype = ALTQT_PRIQ; $$.data.priq_opts.flags = $3; } | HFSC { $$.qtype = ALTQT_HFSC; bzero(&$$.data.hfsc_opts, sizeof(struct node_hfsc_opts)); } | HFSC '(' hfsc_opts ')' { $$.qtype = ALTQT_HFSC; $$.data.hfsc_opts = $3; } | FAIRQ { $$.qtype = ALTQT_FAIRQ; bzero(&$$.data.fairq_opts, sizeof(struct node_fairq_opts)); } | FAIRQ '(' fairq_opts ')' { $$.qtype = ALTQT_FAIRQ; $$.data.fairq_opts = $3; } | CODEL { $$.qtype = ALTQT_CODEL; bzero(&$$.data.codel_opts, sizeof(struct codel_opts)); } | CODEL '(' codel_opts ')' { $$.qtype = ALTQT_CODEL; $$.data.codel_opts = $3; } ; cbqflags_list : cbqflags_item { $$ |= $1; } | cbqflags_list comma cbqflags_item { $$ |= $3; } ; cbqflags_item : STRING { if (!strcmp($1, "default")) $$ = CBQCLF_DEFCLASS; else if (!strcmp($1, "borrow")) $$ = CBQCLF_BORROW; else if (!strcmp($1, "red")) $$ = CBQCLF_RED; else if (!strcmp($1, "ecn")) $$ = CBQCLF_RED|CBQCLF_ECN; else if (!strcmp($1, "rio")) $$ = CBQCLF_RIO; else if (!strcmp($1, "codel")) $$ = CBQCLF_CODEL; else { yyerror("unknown cbq flag \"%s\"", $1); free($1); YYERROR; } free($1); } ; priqflags_list : priqflags_item { $$ |= $1; } | priqflags_list comma priqflags_item { $$ |= $3; } ; priqflags_item : STRING { if (!strcmp($1, "default")) $$ = PRCF_DEFAULTCLASS; else if (!strcmp($1, "red")) $$ = PRCF_RED; else if (!strcmp($1, "ecn")) $$ = PRCF_RED|PRCF_ECN; else if (!strcmp($1, "rio")) $$ = PRCF_RIO; else if (!strcmp($1, "codel")) $$ = PRCF_CODEL; else { yyerror("unknown priq flag \"%s\"", $1); free($1); YYERROR; } free($1); } ; hfsc_opts : { bzero(&hfsc_opts, sizeof(struct node_hfsc_opts)); } hfscopts_list { $$ = hfsc_opts; } ; hfscopts_list : hfscopts_item | hfscopts_list comma hfscopts_item ; hfscopts_item : LINKSHARE bandwidth { if (hfsc_opts.linkshare.used) { yyerror("linkshare already specified"); YYERROR; } hfsc_opts.linkshare.m2 = $2; hfsc_opts.linkshare.used = 1; } | LINKSHARE '(' bandwidth comma NUMBER comma bandwidth ')' { if ($5 < 0 || $5 > INT_MAX) { yyerror("timing in curve out of range"); YYERROR; } if (hfsc_opts.linkshare.used) { yyerror("linkshare already specified"); YYERROR; } hfsc_opts.linkshare.m1 = $3; hfsc_opts.linkshare.d = $5; hfsc_opts.linkshare.m2 = $7; hfsc_opts.linkshare.used = 1; } | REALTIME bandwidth { if (hfsc_opts.realtime.used) { yyerror("realtime already specified"); YYERROR; } hfsc_opts.realtime.m2 = $2; hfsc_opts.realtime.used = 1; } | REALTIME '(' bandwidth comma NUMBER comma bandwidth ')' { if ($5 < 0 || $5 > INT_MAX) { yyerror("timing in curve out of range"); YYERROR; } if (hfsc_opts.realtime.used) { yyerror("realtime already specified"); YYERROR; } hfsc_opts.realtime.m1 = $3; hfsc_opts.realtime.d = $5; hfsc_opts.realtime.m2 = $7; hfsc_opts.realtime.used = 1; } | UPPERLIMIT bandwidth { if (hfsc_opts.upperlimit.used) { yyerror("upperlimit already specified"); YYERROR; } hfsc_opts.upperlimit.m2 = $2; hfsc_opts.upperlimit.used = 1; } | UPPERLIMIT '(' bandwidth comma NUMBER comma bandwidth ')' { if ($5 < 0 || $5 > INT_MAX) { yyerror("timing in curve out of range"); YYERROR; } if (hfsc_opts.upperlimit.used) { yyerror("upperlimit already specified"); YYERROR; } hfsc_opts.upperlimit.m1 = $3; hfsc_opts.upperlimit.d = $5; hfsc_opts.upperlimit.m2 = $7; hfsc_opts.upperlimit.used = 1; } | STRING { if (!strcmp($1, "default")) hfsc_opts.flags |= HFCF_DEFAULTCLASS; else if (!strcmp($1, "red")) hfsc_opts.flags |= HFCF_RED; else if (!strcmp($1, "ecn")) hfsc_opts.flags |= HFCF_RED|HFCF_ECN; else if (!strcmp($1, "rio")) hfsc_opts.flags |= HFCF_RIO; else if (!strcmp($1, "codel")) hfsc_opts.flags |= HFCF_CODEL; else { yyerror("unknown hfsc flag \"%s\"", $1); free($1); YYERROR; } free($1); } ; fairq_opts : { bzero(&fairq_opts, sizeof(struct node_fairq_opts)); } fairqopts_list { $$ = fairq_opts; } ; fairqopts_list : fairqopts_item | fairqopts_list comma fairqopts_item ; fairqopts_item : LINKSHARE bandwidth { if (fairq_opts.linkshare.used) { yyerror("linkshare already specified"); YYERROR; } fairq_opts.linkshare.m2 = $2; fairq_opts.linkshare.used = 1; } | LINKSHARE '(' bandwidth number bandwidth ')' { if (fairq_opts.linkshare.used) { yyerror("linkshare already specified"); YYERROR; } fairq_opts.linkshare.m1 = $3; fairq_opts.linkshare.d = $4; fairq_opts.linkshare.m2 = $5; fairq_opts.linkshare.used = 1; } | HOGS bandwidth { fairq_opts.hogs_bw = $2; } | BUCKETS number { fairq_opts.nbuckets = $2; } | STRING { if (!strcmp($1, "default")) fairq_opts.flags |= FARF_DEFAULTCLASS; else if (!strcmp($1, "red")) fairq_opts.flags |= FARF_RED; else if (!strcmp($1, "ecn")) fairq_opts.flags |= FARF_RED|FARF_ECN; else if (!strcmp($1, "rio")) fairq_opts.flags |= FARF_RIO; else if (!strcmp($1, "codel")) fairq_opts.flags |= FARF_CODEL; else { yyerror("unknown fairq flag \"%s\"", $1); free($1); YYERROR; } free($1); } ; codel_opts : { bzero(&codel_opts, sizeof(struct codel_opts)); } codelopts_list { $$ = codel_opts; } ; codelopts_list : codelopts_item | codelopts_list comma codelopts_item ; codelopts_item : INTERVAL number { if (codel_opts.interval) { yyerror("interval already specified"); YYERROR; } codel_opts.interval = $2; } | TARGET number { if (codel_opts.target) { yyerror("target already specified"); YYERROR; } codel_opts.target = $2; } | STRING { if (!strcmp($1, "ecn")) codel_opts.ecn = 1; else { yyerror("unknown codel option \"%s\"", $1); free($1); YYERROR; } free($1); } ; qassign : /* empty */ { $$ = NULL; } | qassign_item { $$ = $1; } | '{' optnl qassign_list '}' { $$ = $3; } ; qassign_list : qassign_item optnl { $$ = $1; } | qassign_list comma qassign_item optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; qassign_item : STRING { $$ = calloc(1, sizeof(struct node_queue)); if ($$ == NULL) err(1, "qassign_item: calloc"); if (strlcpy($$->queue, $1, sizeof($$->queue)) >= sizeof($$->queue)) { yyerror("queue name '%s' too long (max " "%d chars)", $1, sizeof($$->queue)-1); free($1); free($$); YYERROR; } free($1); $$->next = NULL; $$->tail = $$; } ; pfrule : action dir logquick interface route af proto fromto filter_opts { struct pfctl_rule r; struct node_state_opt *o; struct node_proto *proto; int srctrack = 0; int statelock = 0; int adaptive = 0; int defaults = 0; if (check_rulestate(PFCTL_STATE_FILTER)) YYERROR; memset(&r, 0, sizeof(r)); r.action = $1.b1; switch ($1.b2) { case PFRULE_RETURNRST: r.rule_flag |= PFRULE_RETURNRST; r.return_ttl = $1.w; break; case PFRULE_RETURNICMP: r.rule_flag |= PFRULE_RETURNICMP; r.return_icmp = $1.w; r.return_icmp6 = $1.w2; break; case PFRULE_RETURN: r.rule_flag |= PFRULE_RETURN; r.return_icmp = $1.w; r.return_icmp6 = $1.w2; break; } r.direction = $2; r.log = $3.log; r.logif = $3.logif; r.quick = $3.quick; r.prob = $9.prob; r.rtableid = $9.rtableid; + if ($9.nodf) + r.scrub_flags |= PFSTATE_NODF; + if ($9.randomid) + r.scrub_flags |= PFSTATE_RANDOMID; + if ($9.minttl) + r.min_ttl = $9.minttl; + if ($9.max_mss) + r.max_mss = $9.max_mss; + if ($9.marker & FOM_SETTOS) { + r.scrub_flags |= PFSTATE_SETTOS; + r.set_tos = $9.settos; + } + if ($9.marker & FOM_SCRUB_TCP) + r.scrub_flags |= PFSTATE_SCRUB_TCP; + if ($9.marker & FOM_PRIO) { if ($9.prio == 0) r.prio = PF_PRIO_ZERO; else r.prio = $9.prio; } if ($9.marker & FOM_SETPRIO) { r.set_prio[0] = $9.set_prio[0]; r.set_prio[1] = $9.set_prio[1]; r.scrub_flags |= PFSTATE_SETPRIO; } r.af = $6; if ($9.tag) if (strlcpy(r.tagname, $9.tag, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } if ($9.match_tag) if (strlcpy(r.match_tagname, $9.match_tag, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } r.match_tag_not = $9.match_tag_not; if (rule_label(&r, $9.label)) YYERROR; for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) free($9.label[i]); r.ridentifier = $9.ridentifier; r.flags = $9.flags.b1; r.flagset = $9.flags.b2; if (($9.flags.b1 & $9.flags.b2) != $9.flags.b1) { yyerror("flags always false"); YYERROR; } if ($9.flags.b1 || $9.flags.b2 || $8.src_os) { for (proto = $7; proto != NULL && proto->proto != IPPROTO_TCP; proto = proto->next) ; /* nothing */ if (proto == NULL && $7 != NULL) { if ($9.flags.b1 || $9.flags.b2) yyerror( "flags only apply to tcp"); if ($8.src_os) yyerror( "OS fingerprinting only " "apply to tcp"); YYERROR; } #if 0 if (($9.flags.b1 & parse_flags("S")) == 0 && $8.src_os) { yyerror("OS fingerprinting requires " "the SYN TCP flag (flags S/SA)"); YYERROR; } #endif } r.tos = $9.tos; r.keep_state = $9.keep.action; o = $9.keep.options; /* 'keep state' by default on pass rules. */ if (!r.keep_state && !r.action && !($9.marker & FOM_KEEP)) { r.keep_state = PF_STATE_NORMAL; o = keep_state_defaults; defaults = 1; } while (o) { struct node_state_opt *p = o; switch (o->type) { case PF_STATE_OPT_MAX: if (r.max_states) { yyerror("state option 'max' " "multiple definitions"); YYERROR; } r.max_states = o->data.max_states; break; case PF_STATE_OPT_NOSYNC: if (r.rule_flag & PFRULE_NOSYNC) { yyerror("state option 'sync' " "multiple definitions"); YYERROR; } r.rule_flag |= PFRULE_NOSYNC; break; case PF_STATE_OPT_SRCTRACK: if (srctrack) { yyerror("state option " "'source-track' " "multiple definitions"); YYERROR; } srctrack = o->data.src_track; r.rule_flag |= PFRULE_SRCTRACK; break; case PF_STATE_OPT_MAX_SRC_STATES: if (r.max_src_states) { yyerror("state option " "'max-src-states' " "multiple definitions"); YYERROR; } if (o->data.max_src_states == 0) { yyerror("'max-src-states' must " "be > 0"); YYERROR; } r.max_src_states = o->data.max_src_states; r.rule_flag |= PFRULE_SRCTRACK; break; case PF_STATE_OPT_OVERLOAD: if (r.overload_tblname[0]) { yyerror("multiple 'overload' " "table definitions"); YYERROR; } if (strlcpy(r.overload_tblname, o->data.overload.tblname, PF_TABLE_NAME_SIZE) >= PF_TABLE_NAME_SIZE) { yyerror("state option: " "strlcpy"); YYERROR; } r.flush = o->data.overload.flush; break; case PF_STATE_OPT_MAX_SRC_CONN: if (r.max_src_conn) { yyerror("state option " "'max-src-conn' " "multiple definitions"); YYERROR; } if (o->data.max_src_conn == 0) { yyerror("'max-src-conn' " "must be > 0"); YYERROR; } r.max_src_conn = o->data.max_src_conn; r.rule_flag |= PFRULE_SRCTRACK | PFRULE_RULESRCTRACK; break; case PF_STATE_OPT_MAX_SRC_CONN_RATE: if (r.max_src_conn_rate.limit) { yyerror("state option " "'max-src-conn-rate' " "multiple definitions"); YYERROR; } if (!o->data.max_src_conn_rate.limit || !o->data.max_src_conn_rate.seconds) { yyerror("'max-src-conn-rate' " "values must be > 0"); YYERROR; } if (o->data.max_src_conn_rate.limit > PF_THRESHOLD_MAX) { yyerror("'max-src-conn-rate' " "maximum rate must be < %u", PF_THRESHOLD_MAX); YYERROR; } r.max_src_conn_rate.limit = o->data.max_src_conn_rate.limit; r.max_src_conn_rate.seconds = o->data.max_src_conn_rate.seconds; r.rule_flag |= PFRULE_SRCTRACK | PFRULE_RULESRCTRACK; break; case PF_STATE_OPT_MAX_SRC_NODES: if (r.max_src_nodes) { yyerror("state option " "'max-src-nodes' " "multiple definitions"); YYERROR; } if (o->data.max_src_nodes == 0) { yyerror("'max-src-nodes' must " "be > 0"); YYERROR; } r.max_src_nodes = o->data.max_src_nodes; r.rule_flag |= PFRULE_SRCTRACK | PFRULE_RULESRCTRACK; break; case PF_STATE_OPT_STATELOCK: if (statelock) { yyerror("state locking option: " "multiple definitions"); YYERROR; } statelock = 1; r.rule_flag |= o->data.statelock; break; case PF_STATE_OPT_SLOPPY: if (r.rule_flag & PFRULE_STATESLOPPY) { yyerror("state sloppy option: " "multiple definitions"); YYERROR; } r.rule_flag |= PFRULE_STATESLOPPY; break; case PF_STATE_OPT_TIMEOUT: if (o->data.timeout.number == PFTM_ADAPTIVE_START || o->data.timeout.number == PFTM_ADAPTIVE_END) adaptive = 1; if (r.timeout[o->data.timeout.number]) { yyerror("state timeout %s " "multiple definitions", pf_timeouts[o->data. timeout.number].name); YYERROR; } r.timeout[o->data.timeout.number] = o->data.timeout.seconds; } o = o->next; if (!defaults) free(p); } /* 'flags S/SA' by default on stateful rules */ if (!r.action && !r.flags && !r.flagset && !$9.fragment && !($9.marker & FOM_FLAGS) && r.keep_state) { r.flags = parse_flags("S"); r.flagset = parse_flags("SA"); } if (!adaptive && r.max_states) { r.timeout[PFTM_ADAPTIVE_START] = (r.max_states / 10) * 6; r.timeout[PFTM_ADAPTIVE_END] = (r.max_states / 10) * 12; } if (r.rule_flag & PFRULE_SRCTRACK) { if (srctrack == PF_SRCTRACK_GLOBAL && r.max_src_nodes) { yyerror("'max-src-nodes' is " "incompatible with " "'source-track global'"); YYERROR; } if (srctrack == PF_SRCTRACK_GLOBAL && r.max_src_conn) { yyerror("'max-src-conn' is " "incompatible with " "'source-track global'"); YYERROR; } if (srctrack == PF_SRCTRACK_GLOBAL && r.max_src_conn_rate.seconds) { yyerror("'max-src-conn-rate' is " "incompatible with " "'source-track global'"); YYERROR; } if (r.timeout[PFTM_SRC_NODE] < r.max_src_conn_rate.seconds) r.timeout[PFTM_SRC_NODE] = r.max_src_conn_rate.seconds; r.rule_flag |= PFRULE_SRCTRACK; if (srctrack == PF_SRCTRACK_RULE) r.rule_flag |= PFRULE_RULESRCTRACK; } if (r.keep_state && !statelock) r.rule_flag |= default_statelock; if ($9.fragment) r.rule_flag |= PFRULE_FRAGMENT; r.allow_opts = $9.allowopts; decide_address_family($8.src.host, &r.af); decide_address_family($8.dst.host, &r.af); if ($5.rt) { if (!r.direction) { yyerror("direction must be explicit " "with rules that specify routing"); YYERROR; } r.rt = $5.rt; r.rpool.opts = $5.pool_opts; if ($5.key != NULL) memcpy(&r.rpool.key, $5.key, sizeof(struct pf_poolhashkey)); } if (r.rt) { decide_address_family($5.host, &r.af); remove_invalid_hosts(&$5.host, &r.af); if ($5.host == NULL) { yyerror("no routing address with " "matching address family found."); YYERROR; } if ((r.rpool.opts & PF_POOL_TYPEMASK) == PF_POOL_NONE && ($5.host->next != NULL || $5.host->addr.type == PF_ADDR_TABLE || DYNIF_MULTIADDR($5.host->addr))) r.rpool.opts |= PF_POOL_ROUNDROBIN; if ((r.rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN && disallow_table($5.host, "tables are only " "supported in round-robin routing pools")) YYERROR; if ((r.rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN && disallow_alias($5.host, "interface (%s) " "is only supported in round-robin " "routing pools")) YYERROR; if ($5.host->next != NULL) { if ((r.rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) { yyerror("r.rpool.opts must " "be PF_POOL_ROUNDROBIN"); YYERROR; } } } if ($9.queues.qname != NULL) { if (strlcpy(r.qname, $9.queues.qname, sizeof(r.qname)) >= sizeof(r.qname)) { yyerror("rule qname too long (max " "%d chars)", sizeof(r.qname)-1); YYERROR; } free($9.queues.qname); } if ($9.queues.pqname != NULL) { if (strlcpy(r.pqname, $9.queues.pqname, sizeof(r.pqname)) >= sizeof(r.pqname)) { yyerror("rule pqname too long (max " "%d chars)", sizeof(r.pqname)-1); YYERROR; } free($9.queues.pqname); } #ifdef __FreeBSD__ r.divert.port = $9.divert.port; #else if ((r.divert.port = $9.divert.port)) { if (r.direction == PF_OUT) { if ($9.divert.addr) { yyerror("address specified " "for outgoing divert"); YYERROR; } bzero(&r.divert.addr, sizeof(r.divert.addr)); } else { if (!$9.divert.addr) { yyerror("no address specified " "for incoming divert"); YYERROR; } if ($9.divert.addr->af != r.af) { yyerror("address family " "mismatch for divert"); YYERROR; } r.divert.addr = $9.divert.addr->addr.v.a.addr; } } #endif if ($9.dnpipe || $9.dnrpipe) { r.dnpipe = $9.dnpipe; r.dnrpipe = $9.dnrpipe; if ($9.free_flags & PFRULE_DN_IS_PIPE) r.free_flags |= PFRULE_DN_IS_PIPE; else r.free_flags |= PFRULE_DN_IS_QUEUE; } expand_rule(&r, $4, $5.host, $7, $8.src_os, $8.src.host, $8.src.port, $8.dst.host, $8.dst.port, $9.uid, $9.gid, $9.icmpspec, ""); } ; filter_opts : { bzero(&filter_opts, sizeof filter_opts); filter_opts.rtableid = -1; } filter_opts_l { $$ = filter_opts; } | /* empty */ { bzero(&filter_opts, sizeof filter_opts); filter_opts.rtableid = -1; $$ = filter_opts; } ; filter_opts_l : filter_opts_l filter_opt | filter_opt ; filter_opt : USER uids { if (filter_opts.uid) $2->tail->next = filter_opts.uid; filter_opts.uid = $2; } | GROUP gids { if (filter_opts.gid) $2->tail->next = filter_opts.gid; filter_opts.gid = $2; } | flags { if (filter_opts.marker & FOM_FLAGS) { yyerror("flags cannot be redefined"); YYERROR; } filter_opts.marker |= FOM_FLAGS; filter_opts.flags.b1 |= $1.b1; filter_opts.flags.b2 |= $1.b2; filter_opts.flags.w |= $1.w; filter_opts.flags.w2 |= $1.w2; } | icmpspec { if (filter_opts.marker & FOM_ICMP) { yyerror("icmp-type cannot be redefined"); YYERROR; } filter_opts.marker |= FOM_ICMP; filter_opts.icmpspec = $1; } | PRIO NUMBER { if (filter_opts.marker & FOM_PRIO) { yyerror("prio cannot be redefined"); YYERROR; } if ($2 < 0 || $2 > PF_PRIO_MAX) { yyerror("prio must be 0 - %u", PF_PRIO_MAX); YYERROR; } filter_opts.marker |= FOM_PRIO; filter_opts.prio = $2; } | TOS tos { if (filter_opts.marker & FOM_TOS) { yyerror("tos cannot be redefined"); YYERROR; } filter_opts.marker |= FOM_TOS; filter_opts.tos = $2; } | keep { if (filter_opts.marker & FOM_KEEP) { yyerror("modulate or keep cannot be redefined"); YYERROR; } filter_opts.marker |= FOM_KEEP; filter_opts.keep.action = $1.action; filter_opts.keep.options = $1.options; } | RIDENTIFIER number { filter_opts.ridentifier = $2; } | FRAGMENT { filter_opts.fragment = 1; } | ALLOWOPTS { filter_opts.allowopts = 1; } | label { if (filter_opts.labelcount >= PF_RULE_MAX_LABEL_COUNT) { yyerror("label can only be used %d times", PF_RULE_MAX_LABEL_COUNT); YYERROR; } filter_opts.label[filter_opts.labelcount++] = $1; } | qname { if (filter_opts.queues.qname) { yyerror("queue cannot be redefined"); YYERROR; } filter_opts.queues = $1; } | DNPIPE number { filter_opts.dnpipe = $2; filter_opts.free_flags |= PFRULE_DN_IS_PIPE; } | DNPIPE '(' number ')' { filter_opts.dnpipe = $3; filter_opts.free_flags |= PFRULE_DN_IS_PIPE; } | DNPIPE '(' number comma number ')' { filter_opts.dnrpipe = $5; filter_opts.dnpipe = $3; filter_opts.free_flags |= PFRULE_DN_IS_PIPE; } | DNQUEUE number { filter_opts.dnpipe = $2; filter_opts.free_flags |= PFRULE_DN_IS_QUEUE; } | DNQUEUE '(' number comma number ')' { filter_opts.dnrpipe = $5; filter_opts.dnpipe = $3; filter_opts.free_flags |= PFRULE_DN_IS_QUEUE; } | DNQUEUE '(' number ')' { filter_opts.dnpipe = $3; filter_opts.free_flags |= PFRULE_DN_IS_QUEUE; } | TAG string { filter_opts.tag = $2; } | not TAGGED string { filter_opts.match_tag = $3; filter_opts.match_tag_not = $1; } | PROBABILITY probability { double p; p = floor($2 * UINT_MAX + 0.5); if (p < 0.0 || p > UINT_MAX) { yyerror("invalid probability: %lf", p); YYERROR; } filter_opts.prob = (u_int32_t)p; if (filter_opts.prob == 0) filter_opts.prob = 1; } | RTABLE NUMBER { if ($2 < 0 || $2 > rt_tableid_max()) { yyerror("invalid rtable id"); YYERROR; } filter_opts.rtableid = $2; } | DIVERTTO portplain { #ifdef __FreeBSD__ filter_opts.divert.port = $2.a; if (!filter_opts.divert.port) { yyerror("invalid divert port: %u", ntohs($2.a)); YYERROR; } #endif } | DIVERTTO STRING PORT portplain { #ifndef __FreeBSD__ if ((filter_opts.divert.addr = host($2)) == NULL) { yyerror("could not parse divert address: %s", $2); free($2); YYERROR; } #else if ($2) #endif free($2); filter_opts.divert.port = $4.a; if (!filter_opts.divert.port) { yyerror("invalid divert port: %u", ntohs($4.a)); YYERROR; } } | DIVERTREPLY { #ifdef __FreeBSD__ yyerror("divert-reply has no meaning in FreeBSD pf(4)"); YYERROR; #else filter_opts.divert.port = 1; /* some random value */ #endif } + | SCRUB '(' scrub_opts ')' { + filter_opts.nodf = $3.nodf; + filter_opts.minttl = $3.minttl; + if ($3.marker & FOM_SETTOS) { + /* Old style rules are "scrub set-tos 0x42" + * New style are "set tos 0x42 scrub (...)" + * What is in "scrub(...)"" is unfortunately the + * original scrub syntax so it would overwrite + * "set tos" of a pass/match rule. + */ + filter_opts.settos = $3.settos; + } + filter_opts.randomid = $3.randomid; + filter_opts.max_mss = $3.maxmss; + if ($3.reassemble_tcp) + filter_opts.marker |= FOM_SCRUB_TCP; + filter_opts.marker |= $3.marker; + } | filter_sets ; filter_sets : SET '(' filter_sets_l ')' { $$ = filter_opts; } | SET filter_set { $$ = filter_opts; } ; filter_sets_l : filter_sets_l comma filter_set | filter_set ; filter_set : prio { if (filter_opts.marker & FOM_SETPRIO) { yyerror("prio cannot be redefined"); YYERROR; } filter_opts.marker |= FOM_SETPRIO; filter_opts.set_prio[0] = $1.b1; filter_opts.set_prio[1] = $1.b2; } + | TOS tos { + if (filter_opts.marker & FOM_SETTOS) { + yyerror("tos cannot be respecified"); + YYERROR; + } + filter_opts.marker |= FOM_SETTOS; + filter_opts.settos = $2; + } prio : PRIO NUMBER { if ($2 < 0 || $2 > PF_PRIO_MAX) { yyerror("prio must be 0 - %u", PF_PRIO_MAX); YYERROR; } $$.b1 = $$.b2 = $2; } | PRIO '(' NUMBER comma NUMBER ')' { if ($3 < 0 || $3 > PF_PRIO_MAX || $5 < 0 || $5 > PF_PRIO_MAX) { yyerror("prio must be 0 - %u", PF_PRIO_MAX); YYERROR; } $$.b1 = $3; $$.b2 = $5; } ; probability : STRING { char *e; double p = strtod($1, &e); if (*e == '%') { p *= 0.01; e++; } if (*e) { yyerror("invalid probability: %s", $1); free($1); YYERROR; } free($1); $$ = p; } | NUMBER { $$ = (double)$1; } ; action : PASS { $$.b1 = PF_PASS; $$.b2 = failpolicy; $$.w = returnicmpdefault; $$.w2 = returnicmp6default; } | MATCH { $$.b1 = PF_MATCH; $$.b2 = $$.w = 0; } | BLOCK blockspec { $$ = $2; $$.b1 = PF_DROP; } ; blockspec : /* empty */ { $$.b2 = blockpolicy; $$.w = returnicmpdefault; $$.w2 = returnicmp6default; } | DROP { $$.b2 = PFRULE_DROP; $$.w = 0; $$.w2 = 0; } | RETURNRST { $$.b2 = PFRULE_RETURNRST; $$.w = 0; $$.w2 = 0; } | RETURNRST '(' TTL NUMBER ')' { if ($4 < 0 || $4 > 255) { yyerror("illegal ttl value %d", $4); YYERROR; } $$.b2 = PFRULE_RETURNRST; $$.w = $4; $$.w2 = 0; } | RETURNICMP { $$.b2 = PFRULE_RETURNICMP; $$.w = returnicmpdefault; $$.w2 = returnicmp6default; } | RETURNICMP6 { $$.b2 = PFRULE_RETURNICMP; $$.w = returnicmpdefault; $$.w2 = returnicmp6default; } | RETURNICMP '(' reticmpspec ')' { $$.b2 = PFRULE_RETURNICMP; $$.w = $3; $$.w2 = returnicmpdefault; } | RETURNICMP6 '(' reticmp6spec ')' { $$.b2 = PFRULE_RETURNICMP; $$.w = returnicmpdefault; $$.w2 = $3; } | RETURNICMP '(' reticmpspec comma reticmp6spec ')' { $$.b2 = PFRULE_RETURNICMP; $$.w = $3; $$.w2 = $5; } | RETURN { $$.b2 = PFRULE_RETURN; $$.w = returnicmpdefault; $$.w2 = returnicmp6default; } ; reticmpspec : STRING { if (!($$ = parseicmpspec($1, AF_INET))) { free($1); YYERROR; } free($1); } | NUMBER { u_int8_t icmptype; if ($1 < 0 || $1 > 255) { yyerror("invalid icmp code %lu", $1); YYERROR; } icmptype = returnicmpdefault >> 8; $$ = (icmptype << 8 | $1); } ; reticmp6spec : STRING { if (!($$ = parseicmpspec($1, AF_INET6))) { free($1); YYERROR; } free($1); } | NUMBER { u_int8_t icmptype; if ($1 < 0 || $1 > 255) { yyerror("invalid icmp code %lu", $1); YYERROR; } icmptype = returnicmp6default >> 8; $$ = (icmptype << 8 | $1); } ; dir : /* empty */ { $$ = PF_INOUT; } | IN { $$ = PF_IN; } | OUT { $$ = PF_OUT; } ; quick : /* empty */ { $$.quick = 0; } | QUICK { $$.quick = 1; } ; logquick : /* empty */ { $$.log = 0; $$.quick = 0; $$.logif = 0; } | log { $$ = $1; $$.quick = 0; } | QUICK { $$.quick = 1; $$.log = 0; $$.logif = 0; } | log QUICK { $$ = $1; $$.quick = 1; } | QUICK log { $$ = $2; $$.quick = 1; } ; log : LOG { $$.log = PF_LOG; $$.logif = 0; } | LOG '(' logopts ')' { $$.log = PF_LOG | $3.log; $$.logif = $3.logif; } ; logopts : logopt { $$ = $1; } | logopts comma logopt { $$.log = $1.log | $3.log; $$.logif = $3.logif; if ($$.logif == 0) $$.logif = $1.logif; } ; logopt : ALL { $$.log = PF_LOG_ALL; $$.logif = 0; } | USER { $$.log = PF_LOG_SOCKET_LOOKUP; $$.logif = 0; } | GROUP { $$.log = PF_LOG_SOCKET_LOOKUP; $$.logif = 0; } | TO string { const char *errstr; u_int i; $$.log = 0; if (strncmp($2, "pflog", 5)) { yyerror("%s: should be a pflog interface", $2); free($2); YYERROR; } i = strtonum($2 + 5, 0, 255, &errstr); if (errstr) { yyerror("%s: %s", $2, errstr); free($2); YYERROR; } free($2); $$.logif = i; } ; interface : /* empty */ { $$ = NULL; } | ON if_item_not { $$ = $2; } | ON '{' optnl if_list '}' { $$ = $4; } ; if_list : if_item_not optnl { $$ = $1; } | if_list comma if_item_not optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; if_item_not : not if_item { $$ = $2; $$->not = $1; } ; if_item : STRING { struct node_host *n; $$ = calloc(1, sizeof(struct node_if)); if ($$ == NULL) err(1, "if_item: calloc"); if (strlcpy($$->ifname, $1, sizeof($$->ifname)) >= sizeof($$->ifname)) { free($1); free($$); yyerror("interface name too long"); YYERROR; } if ((n = ifa_exists($1)) != NULL) $$->ifa_flags = n->ifa_flags; free($1); $$->not = 0; $$->next = NULL; $$->tail = $$; } ; af : /* empty */ { $$ = 0; } | INET { $$ = AF_INET; } | INET6 { $$ = AF_INET6; } ; etherproto : /* empty */ { $$ = NULL; } | PROTO etherproto_item { $$ = $2; } | PROTO '{' optnl etherproto_list '}' { $$ = $4; } ; etherproto_list : etherproto_item optnl { $$ = $1; } | etherproto_list comma etherproto_item optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; etherproto_item : etherprotoval { u_int16_t pr; pr = (u_int16_t)$1; if (pr == 0) { yyerror("proto 0 cannot be used"); YYERROR; } $$ = calloc(1, sizeof(struct node_proto)); if ($$ == NULL) err(1, "proto_item: calloc"); $$->proto = pr; $$->next = NULL; $$->tail = $$; } ; etherprotoval : NUMBER { if ($1 < 0 || $1 > 65565) { yyerror("protocol outside range"); YYERROR; } } | STRING { if (!strncmp($1, "0x", 2)) { if (sscanf($1, "0x%4x", &$$) != 1) { free($1); yyerror("invalid EtherType hex"); YYERROR; } } else { yyerror("Symbolic EtherType not yet supported"); } } ; proto : /* empty */ { $$ = NULL; } | PROTO proto_item { $$ = $2; } | PROTO '{' optnl proto_list '}' { $$ = $4; } ; proto_list : proto_item optnl { $$ = $1; } | proto_list comma proto_item optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; proto_item : protoval { u_int8_t pr; pr = (u_int8_t)$1; if (pr == 0) { yyerror("proto 0 cannot be used"); YYERROR; } $$ = calloc(1, sizeof(struct node_proto)); if ($$ == NULL) err(1, "proto_item: calloc"); $$->proto = pr; $$->next = NULL; $$->tail = $$; } ; protoval : STRING { struct protoent *p; p = getprotobyname($1); if (p == NULL) { yyerror("unknown protocol %s", $1); free($1); YYERROR; } $$ = p->p_proto; free($1); } | NUMBER { if ($1 < 0 || $1 > 255) { yyerror("protocol outside range"); YYERROR; } } ; l3fromto : /* empty */ { bzero(&$$, sizeof($$)); } | L3 fromto { if ($2.src.host != NULL && $2.src.host->addr.type != PF_ADDR_ADDRMASK && $2.src.host->addr.type != PF_ADDR_TABLE) { yyerror("from must be an address or table"); YYERROR; } if ($2.dst.host != NULL && $2.dst.host->addr.type != PF_ADDR_ADDRMASK && $2.dst.host->addr.type != PF_ADDR_TABLE) { yyerror("to must be an address or table"); YYERROR; } $$ = $2; } ; etherfromto : ALL { $$.src = NULL; $$.dst = NULL; } | etherfrom etherto { $$.src = $1.mac; $$.dst = $2.mac; } ; etherfrom : /* emtpy */ { bzero(&$$, sizeof($$)); } | FROM macspec { $$.mac = $2; } ; etherto : /* empty */ { bzero(&$$, sizeof($$)); } | TO macspec { $$.mac = $2; } ; mac : string '/' NUMBER { $$ = node_mac_from_string_masklen($1, $3); free($1); if ($$ == NULL) YYERROR; } | string { if (strchr($1, '&')) { /* mac&mask */ char *mac = strtok($1, "&"); char *mask = strtok(NULL, "&"); $$ = node_mac_from_string_mask(mac, mask); } else { $$ = node_mac_from_string($1); } free($1); if ($$ == NULL) YYERROR; } xmac : not mac { struct node_mac *n; for (n = $2; n != NULL; n = n->next) n->neg = $1; $$ = $2; } ; macspec : xmac { $$ = $1; } | '{' optnl mac_list '}' { $$ = $3; } ; mac_list : xmac optnl { $$ = $1; } | mac_list comma xmac { if ($3 == NULL) $$ = $1; else if ($1 == NULL) $$ = $3; else { $1->tail->next = $3; $1->tail = $3->tail; $$ = $1; } } fromto : ALL { $$.src.host = NULL; $$.src.port = NULL; $$.dst.host = NULL; $$.dst.port = NULL; $$.src_os = NULL; } | from os to { $$.src = $1; $$.src_os = $2; $$.dst = $3; } ; os : /* empty */ { $$ = NULL; } | OS xos { $$ = $2; } | OS '{' optnl os_list '}' { $$ = $4; } ; xos : STRING { $$ = calloc(1, sizeof(struct node_os)); if ($$ == NULL) err(1, "os: calloc"); $$->os = $1; $$->tail = $$; } ; os_list : xos optnl { $$ = $1; } | os_list comma xos optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; from : /* empty */ { $$.host = NULL; $$.port = NULL; } | FROM ipportspec { $$ = $2; } ; to : /* empty */ { $$.host = NULL; $$.port = NULL; } | TO ipportspec { if (disallow_urpf_failed($2.host, "\"urpf-failed\" is " "not permitted in a destination address")) YYERROR; $$ = $2; } ; ipportspec : ipspec { $$.host = $1; $$.port = NULL; } | ipspec PORT portspec { $$.host = $1; $$.port = $3; } | PORT portspec { $$.host = NULL; $$.port = $2; } ; optnl : '\n' optnl | ; ipspec : ANY { $$ = NULL; } | xhost { $$ = $1; } | '{' optnl host_list '}' { $$ = $3; } ; toipspec : TO ipspec { $$ = $2; } | /* empty */ { $$ = NULL; } ; host_list : ipspec optnl { $$ = $1; } | host_list comma ipspec optnl { if ($3 == NULL) $$ = $1; else if ($1 == NULL) $$ = $3; else { $1->tail->next = $3; $1->tail = $3->tail; $$ = $1; } } ; xhost : not host { struct node_host *n; for (n = $2; n != NULL; n = n->next) n->not = $1; $$ = $2; } | not NOROUTE { $$ = calloc(1, sizeof(struct node_host)); if ($$ == NULL) err(1, "xhost: calloc"); $$->addr.type = PF_ADDR_NOROUTE; $$->next = NULL; $$->not = $1; $$->tail = $$; } | not URPFFAILED { $$ = calloc(1, sizeof(struct node_host)); if ($$ == NULL) err(1, "xhost: calloc"); $$->addr.type = PF_ADDR_URPFFAILED; $$->next = NULL; $$->not = $1; $$->tail = $$; } ; host : STRING { if (($$ = host($1)) == NULL) { /* error. "any" is handled elsewhere */ free($1); yyerror("could not parse host specification"); YYERROR; } free($1); } | STRING '-' STRING { struct node_host *b, *e; if ((b = host($1)) == NULL || (e = host($3)) == NULL) { free($1); free($3); yyerror("could not parse host specification"); YYERROR; } if (b->af != e->af || b->addr.type != PF_ADDR_ADDRMASK || e->addr.type != PF_ADDR_ADDRMASK || unmask(&b->addr.v.a.mask, b->af) != (b->af == AF_INET ? 32 : 128) || unmask(&e->addr.v.a.mask, e->af) != (e->af == AF_INET ? 32 : 128) || b->next != NULL || b->not || e->next != NULL || e->not) { free(b); free(e); free($1); free($3); yyerror("invalid address range"); YYERROR; } memcpy(&b->addr.v.a.mask, &e->addr.v.a.addr, sizeof(b->addr.v.a.mask)); b->addr.type = PF_ADDR_RANGE; $$ = b; free(e); free($1); free($3); } | STRING '/' NUMBER { char *buf; if (asprintf(&buf, "%s/%lld", $1, (long long)$3) == -1) err(1, "host: asprintf"); free($1); if (($$ = host(buf)) == NULL) { /* error. "any" is handled elsewhere */ free(buf); yyerror("could not parse host specification"); YYERROR; } free(buf); } | NUMBER '/' NUMBER { char *buf; /* ie. for 10/8 parsing */ #ifdef __FreeBSD__ if (asprintf(&buf, "%lld/%lld", (long long)$1, (long long)$3) == -1) #else if (asprintf(&buf, "%lld/%lld", $1, $3) == -1) #endif err(1, "host: asprintf"); if (($$ = host(buf)) == NULL) { /* error. "any" is handled elsewhere */ free(buf); yyerror("could not parse host specification"); YYERROR; } free(buf); } | dynaddr | dynaddr '/' NUMBER { struct node_host *n; if ($3 < 0 || $3 > 128) { yyerror("bit number too big"); YYERROR; } $$ = $1; for (n = $1; n != NULL; n = n->next) set_ipmask(n, $3); } | '<' STRING '>' { if (strlen($2) >= PF_TABLE_NAME_SIZE) { yyerror("table name '%s' too long", $2); free($2); YYERROR; } $$ = calloc(1, sizeof(struct node_host)); if ($$ == NULL) err(1, "host: calloc"); $$->addr.type = PF_ADDR_TABLE; if (strlcpy($$->addr.v.tblname, $2, sizeof($$->addr.v.tblname)) >= sizeof($$->addr.v.tblname)) errx(1, "host: strlcpy"); free($2); $$->next = NULL; $$->tail = $$; } ; number : NUMBER | STRING { u_long ulval; if (atoul($1, &ulval) == -1) { yyerror("%s is not a number", $1); free($1); YYERROR; } else $$ = ulval; free($1); } ; dynaddr : '(' STRING ')' { int flags = 0; char *p, *op; op = $2; if (!isalpha(op[0])) { yyerror("invalid interface name '%s'", op); free(op); YYERROR; } while ((p = strrchr($2, ':')) != NULL) { if (!strcmp(p+1, "network")) flags |= PFI_AFLAG_NETWORK; else if (!strcmp(p+1, "broadcast")) flags |= PFI_AFLAG_BROADCAST; else if (!strcmp(p+1, "peer")) flags |= PFI_AFLAG_PEER; else if (!strcmp(p+1, "0")) flags |= PFI_AFLAG_NOALIAS; else { yyerror("interface %s has bad modifier", $2); free(op); YYERROR; } *p = '\0'; } if (flags & (flags - 1) & PFI_AFLAG_MODEMASK) { free(op); yyerror("illegal combination of " "interface modifiers"); YYERROR; } $$ = calloc(1, sizeof(struct node_host)); if ($$ == NULL) err(1, "address: calloc"); $$->af = 0; set_ipmask($$, 128); $$->addr.type = PF_ADDR_DYNIFTL; $$->addr.iflags = flags; if (strlcpy($$->addr.v.ifname, $2, sizeof($$->addr.v.ifname)) >= sizeof($$->addr.v.ifname)) { free(op); free($$); yyerror("interface name too long"); YYERROR; } free(op); $$->next = NULL; $$->tail = $$; } ; portspec : port_item { $$ = $1; } | '{' optnl port_list '}' { $$ = $3; } ; port_list : port_item optnl { $$ = $1; } | port_list comma port_item optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; port_item : portrange { $$ = calloc(1, sizeof(struct node_port)); if ($$ == NULL) err(1, "port_item: calloc"); $$->port[0] = $1.a; $$->port[1] = $1.b; if ($1.t) $$->op = PF_OP_RRG; else $$->op = PF_OP_EQ; $$->next = NULL; $$->tail = $$; } | unaryop portrange { if ($2.t) { yyerror("':' cannot be used with an other " "port operator"); YYERROR; } $$ = calloc(1, sizeof(struct node_port)); if ($$ == NULL) err(1, "port_item: calloc"); $$->port[0] = $2.a; $$->port[1] = $2.b; $$->op = $1; $$->next = NULL; $$->tail = $$; } | portrange PORTBINARY portrange { if ($1.t || $3.t) { yyerror("':' cannot be used with an other " "port operator"); YYERROR; } $$ = calloc(1, sizeof(struct node_port)); if ($$ == NULL) err(1, "port_item: calloc"); $$->port[0] = $1.a; $$->port[1] = $3.a; $$->op = $2; $$->next = NULL; $$->tail = $$; } ; portplain : numberstring { if (parseport($1, &$$, 0) == -1) { free($1); YYERROR; } free($1); } ; portrange : numberstring { if (parseport($1, &$$, PPORT_RANGE) == -1) { free($1); YYERROR; } free($1); } ; uids : uid_item { $$ = $1; } | '{' optnl uid_list '}' { $$ = $3; } ; uid_list : uid_item optnl { $$ = $1; } | uid_list comma uid_item optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; uid_item : uid { $$ = calloc(1, sizeof(struct node_uid)); if ($$ == NULL) err(1, "uid_item: calloc"); $$->uid[0] = $1; $$->uid[1] = $1; $$->op = PF_OP_EQ; $$->next = NULL; $$->tail = $$; } | unaryop uid { if ($2 == UID_MAX && $1 != PF_OP_EQ && $1 != PF_OP_NE) { yyerror("user unknown requires operator = or " "!="); YYERROR; } $$ = calloc(1, sizeof(struct node_uid)); if ($$ == NULL) err(1, "uid_item: calloc"); $$->uid[0] = $2; $$->uid[1] = $2; $$->op = $1; $$->next = NULL; $$->tail = $$; } | uid PORTBINARY uid { if ($1 == UID_MAX || $3 == UID_MAX) { yyerror("user unknown requires operator = or " "!="); YYERROR; } $$ = calloc(1, sizeof(struct node_uid)); if ($$ == NULL) err(1, "uid_item: calloc"); $$->uid[0] = $1; $$->uid[1] = $3; $$->op = $2; $$->next = NULL; $$->tail = $$; } ; uid : STRING { if (!strcmp($1, "unknown")) $$ = UID_MAX; else { struct passwd *pw; if ((pw = getpwnam($1)) == NULL) { yyerror("unknown user %s", $1); free($1); YYERROR; } $$ = pw->pw_uid; } free($1); } | NUMBER { if ($1 < 0 || $1 >= UID_MAX) { yyerror("illegal uid value %lu", $1); YYERROR; } $$ = $1; } ; gids : gid_item { $$ = $1; } | '{' optnl gid_list '}' { $$ = $3; } ; gid_list : gid_item optnl { $$ = $1; } | gid_list comma gid_item optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; gid_item : gid { $$ = calloc(1, sizeof(struct node_gid)); if ($$ == NULL) err(1, "gid_item: calloc"); $$->gid[0] = $1; $$->gid[1] = $1; $$->op = PF_OP_EQ; $$->next = NULL; $$->tail = $$; } | unaryop gid { if ($2 == GID_MAX && $1 != PF_OP_EQ && $1 != PF_OP_NE) { yyerror("group unknown requires operator = or " "!="); YYERROR; } $$ = calloc(1, sizeof(struct node_gid)); if ($$ == NULL) err(1, "gid_item: calloc"); $$->gid[0] = $2; $$->gid[1] = $2; $$->op = $1; $$->next = NULL; $$->tail = $$; } | gid PORTBINARY gid { if ($1 == GID_MAX || $3 == GID_MAX) { yyerror("group unknown requires operator = or " "!="); YYERROR; } $$ = calloc(1, sizeof(struct node_gid)); if ($$ == NULL) err(1, "gid_item: calloc"); $$->gid[0] = $1; $$->gid[1] = $3; $$->op = $2; $$->next = NULL; $$->tail = $$; } ; gid : STRING { if (!strcmp($1, "unknown")) $$ = GID_MAX; else { struct group *grp; if ((grp = getgrnam($1)) == NULL) { yyerror("unknown group %s", $1); free($1); YYERROR; } $$ = grp->gr_gid; } free($1); } | NUMBER { if ($1 < 0 || $1 >= GID_MAX) { yyerror("illegal gid value %lu", $1); YYERROR; } $$ = $1; } ; flag : STRING { int f; if ((f = parse_flags($1)) < 0) { yyerror("bad flags %s", $1); free($1); YYERROR; } free($1); $$.b1 = f; } ; flags : FLAGS flag '/' flag { $$.b1 = $2.b1; $$.b2 = $4.b1; } | FLAGS '/' flag { $$.b1 = 0; $$.b2 = $3.b1; } | FLAGS ANY { $$.b1 = 0; $$.b2 = 0; } ; icmpspec : ICMPTYPE icmp_item { $$ = $2; } | ICMPTYPE '{' optnl icmp_list '}' { $$ = $4; } | ICMP6TYPE icmp6_item { $$ = $2; } | ICMP6TYPE '{' optnl icmp6_list '}' { $$ = $4; } ; icmp_list : icmp_item optnl { $$ = $1; } | icmp_list comma icmp_item optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; icmp6_list : icmp6_item optnl { $$ = $1; } | icmp6_list comma icmp6_item optnl { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; icmp_item : icmptype { $$ = calloc(1, sizeof(struct node_icmp)); if ($$ == NULL) err(1, "icmp_item: calloc"); $$->type = $1; $$->code = 0; $$->proto = IPPROTO_ICMP; $$->next = NULL; $$->tail = $$; } | icmptype CODE STRING { const struct icmpcodeent *p; if ((p = geticmpcodebyname($1-1, $3, AF_INET)) == NULL) { yyerror("unknown icmp-code %s", $3); free($3); YYERROR; } free($3); $$ = calloc(1, sizeof(struct node_icmp)); if ($$ == NULL) err(1, "icmp_item: calloc"); $$->type = $1; $$->code = p->code + 1; $$->proto = IPPROTO_ICMP; $$->next = NULL; $$->tail = $$; } | icmptype CODE NUMBER { if ($3 < 0 || $3 > 255) { yyerror("illegal icmp-code %lu", $3); YYERROR; } $$ = calloc(1, sizeof(struct node_icmp)); if ($$ == NULL) err(1, "icmp_item: calloc"); $$->type = $1; $$->code = $3 + 1; $$->proto = IPPROTO_ICMP; $$->next = NULL; $$->tail = $$; } ; icmp6_item : icmp6type { $$ = calloc(1, sizeof(struct node_icmp)); if ($$ == NULL) err(1, "icmp_item: calloc"); $$->type = $1; $$->code = 0; $$->proto = IPPROTO_ICMPV6; $$->next = NULL; $$->tail = $$; } | icmp6type CODE STRING { const struct icmpcodeent *p; if ((p = geticmpcodebyname($1-1, $3, AF_INET6)) == NULL) { yyerror("unknown icmp6-code %s", $3); free($3); YYERROR; } free($3); $$ = calloc(1, sizeof(struct node_icmp)); if ($$ == NULL) err(1, "icmp_item: calloc"); $$->type = $1; $$->code = p->code + 1; $$->proto = IPPROTO_ICMPV6; $$->next = NULL; $$->tail = $$; } | icmp6type CODE NUMBER { if ($3 < 0 || $3 > 255) { yyerror("illegal icmp-code %lu", $3); YYERROR; } $$ = calloc(1, sizeof(struct node_icmp)); if ($$ == NULL) err(1, "icmp_item: calloc"); $$->type = $1; $$->code = $3 + 1; $$->proto = IPPROTO_ICMPV6; $$->next = NULL; $$->tail = $$; } ; icmptype : STRING { const struct icmptypeent *p; if ((p = geticmptypebyname($1, AF_INET)) == NULL) { yyerror("unknown icmp-type %s", $1); free($1); YYERROR; } $$ = p->type + 1; free($1); } | NUMBER { if ($1 < 0 || $1 > 255) { yyerror("illegal icmp-type %lu", $1); YYERROR; } $$ = $1 + 1; } ; icmp6type : STRING { const struct icmptypeent *p; if ((p = geticmptypebyname($1, AF_INET6)) == NULL) { yyerror("unknown icmp6-type %s", $1); free($1); YYERROR; } $$ = p->type + 1; free($1); } | NUMBER { if ($1 < 0 || $1 > 255) { yyerror("illegal icmp6-type %lu", $1); YYERROR; } $$ = $1 + 1; } ; tos : STRING { int val; char *end; if (map_tos($1, &val)) $$ = val; else if ($1[0] == '0' && $1[1] == 'x') { errno = 0; $$ = strtoul($1, &end, 16); if (errno || *end != '\0') $$ = 256; } else $$ = 256; /* flag bad argument */ if ($$ < 0 || $$ > 255) { yyerror("illegal tos value %s", $1); free($1); YYERROR; } free($1); } | NUMBER { $$ = $1; if ($$ < 0 || $$ > 255) { yyerror("illegal tos value %s", $1); YYERROR; } } ; sourcetrack : SOURCETRACK { $$ = PF_SRCTRACK; } | SOURCETRACK GLOBAL { $$ = PF_SRCTRACK_GLOBAL; } | SOURCETRACK RULE { $$ = PF_SRCTRACK_RULE; } ; statelock : IFBOUND { $$ = PFRULE_IFBOUND; } | FLOATING { $$ = 0; } ; keep : NO STATE { $$.action = 0; $$.options = NULL; } | KEEP STATE state_opt_spec { $$.action = PF_STATE_NORMAL; $$.options = $3; } | MODULATE STATE state_opt_spec { $$.action = PF_STATE_MODULATE; $$.options = $3; } | SYNPROXY STATE state_opt_spec { $$.action = PF_STATE_SYNPROXY; $$.options = $3; } ; flush : /* empty */ { $$ = 0; } | FLUSH { $$ = PF_FLUSH; } | FLUSH GLOBAL { $$ = PF_FLUSH | PF_FLUSH_GLOBAL; } ; state_opt_spec : '(' state_opt_list ')' { $$ = $2; } | /* empty */ { $$ = NULL; } ; state_opt_list : state_opt_item { $$ = $1; } | state_opt_list comma state_opt_item { $1->tail->next = $3; $1->tail = $3; $$ = $1; } ; state_opt_item : MAXIMUM NUMBER { if ($2 < 0 || $2 > UINT_MAX) { yyerror("only positive values permitted"); YYERROR; } $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); $$->type = PF_STATE_OPT_MAX; $$->data.max_states = $2; $$->next = NULL; $$->tail = $$; } | NOSYNC { $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); $$->type = PF_STATE_OPT_NOSYNC; $$->next = NULL; $$->tail = $$; } | MAXSRCSTATES NUMBER { if ($2 < 0 || $2 > UINT_MAX) { yyerror("only positive values permitted"); YYERROR; } $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); $$->type = PF_STATE_OPT_MAX_SRC_STATES; $$->data.max_src_states = $2; $$->next = NULL; $$->tail = $$; } | MAXSRCCONN NUMBER { if ($2 < 0 || $2 > UINT_MAX) { yyerror("only positive values permitted"); YYERROR; } $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); $$->type = PF_STATE_OPT_MAX_SRC_CONN; $$->data.max_src_conn = $2; $$->next = NULL; $$->tail = $$; } | MAXSRCCONNRATE NUMBER '/' NUMBER { if ($2 < 0 || $2 > UINT_MAX || $4 < 0 || $4 > UINT_MAX) { yyerror("only positive values permitted"); YYERROR; } $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); $$->type = PF_STATE_OPT_MAX_SRC_CONN_RATE; $$->data.max_src_conn_rate.limit = $2; $$->data.max_src_conn_rate.seconds = $4; $$->next = NULL; $$->tail = $$; } | OVERLOAD '<' STRING '>' flush { if (strlen($3) >= PF_TABLE_NAME_SIZE) { yyerror("table name '%s' too long", $3); free($3); YYERROR; } $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); if (strlcpy($$->data.overload.tblname, $3, PF_TABLE_NAME_SIZE) >= PF_TABLE_NAME_SIZE) errx(1, "state_opt_item: strlcpy"); free($3); $$->type = PF_STATE_OPT_OVERLOAD; $$->data.overload.flush = $5; $$->next = NULL; $$->tail = $$; } | MAXSRCNODES NUMBER { if ($2 < 0 || $2 > UINT_MAX) { yyerror("only positive values permitted"); YYERROR; } $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); $$->type = PF_STATE_OPT_MAX_SRC_NODES; $$->data.max_src_nodes = $2; $$->next = NULL; $$->tail = $$; } | sourcetrack { $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); $$->type = PF_STATE_OPT_SRCTRACK; $$->data.src_track = $1; $$->next = NULL; $$->tail = $$; } | statelock { $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); $$->type = PF_STATE_OPT_STATELOCK; $$->data.statelock = $1; $$->next = NULL; $$->tail = $$; } | SLOPPY { $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); $$->type = PF_STATE_OPT_SLOPPY; $$->next = NULL; $$->tail = $$; } | STRING NUMBER { int i; if ($2 < 0 || $2 > UINT_MAX) { yyerror("only positive values permitted"); YYERROR; } for (i = 0; pf_timeouts[i].name && strcmp(pf_timeouts[i].name, $1); ++i) ; /* nothing */ if (!pf_timeouts[i].name) { yyerror("illegal timeout name %s", $1); free($1); YYERROR; } if (strchr(pf_timeouts[i].name, '.') == NULL) { yyerror("illegal state timeout %s", $1); free($1); YYERROR; } free($1); $$ = calloc(1, sizeof(struct node_state_opt)); if ($$ == NULL) err(1, "state_opt_item: calloc"); $$->type = PF_STATE_OPT_TIMEOUT; $$->data.timeout.number = pf_timeouts[i].timeout; $$->data.timeout.seconds = $2; $$->next = NULL; $$->tail = $$; } ; label : LABEL STRING { $$ = $2; } ; etherqname : QUEUE STRING { $$.qname = $2; } | QUEUE '(' STRING ')' { $$.qname = $3; } ; qname : QUEUE STRING { $$.qname = $2; $$.pqname = NULL; } | QUEUE '(' STRING ')' { $$.qname = $3; $$.pqname = NULL; } | QUEUE '(' STRING comma STRING ')' { $$.qname = $3; $$.pqname = $5; } ; no : /* empty */ { $$ = 0; } | NO { $$ = 1; } ; portstar : numberstring { if (parseport($1, &$$, PPORT_RANGE|PPORT_STAR) == -1) { free($1); YYERROR; } free($1); } ; redirspec : host { $$ = $1; } | '{' optnl redir_host_list '}' { $$ = $3; } ; redir_host_list : host optnl { $$ = $1; } | redir_host_list comma host optnl { $1->tail->next = $3; $1->tail = $3->tail; $$ = $1; } ; redirpool : /* empty */ { $$ = NULL; } | ARROW redirspec { $$ = calloc(1, sizeof(struct redirection)); if ($$ == NULL) err(1, "redirection: calloc"); $$->host = $2; $$->rport.a = $$->rport.b = $$->rport.t = 0; } | ARROW redirspec PORT portstar { $$ = calloc(1, sizeof(struct redirection)); if ($$ == NULL) err(1, "redirection: calloc"); $$->host = $2; $$->rport = $4; } ; hashkey : /* empty */ { $$ = calloc(1, sizeof(struct pf_poolhashkey)); if ($$ == NULL) err(1, "hashkey: calloc"); $$->key32[0] = arc4random(); $$->key32[1] = arc4random(); $$->key32[2] = arc4random(); $$->key32[3] = arc4random(); } | string { if (!strncmp($1, "0x", 2)) { if (strlen($1) != 34) { free($1); yyerror("hex key must be 128 bits " "(32 hex digits) long"); YYERROR; } $$ = calloc(1, sizeof(struct pf_poolhashkey)); if ($$ == NULL) err(1, "hashkey: calloc"); if (sscanf($1, "0x%8x%8x%8x%8x", &$$->key32[0], &$$->key32[1], &$$->key32[2], &$$->key32[3]) != 4) { free($$); free($1); yyerror("invalid hex key"); YYERROR; } } else { MD5_CTX context; $$ = calloc(1, sizeof(struct pf_poolhashkey)); if ($$ == NULL) err(1, "hashkey: calloc"); MD5Init(&context); MD5Update(&context, (unsigned char *)$1, strlen($1)); MD5Final((unsigned char *)$$, &context); HTONL($$->key32[0]); HTONL($$->key32[1]); HTONL($$->key32[2]); HTONL($$->key32[3]); } free($1); } ; pool_opts : { bzero(&pool_opts, sizeof pool_opts); } pool_opts_l { $$ = pool_opts; } | /* empty */ { bzero(&pool_opts, sizeof pool_opts); $$ = pool_opts; } ; pool_opts_l : pool_opts_l pool_opt | pool_opt ; pool_opt : BITMASK { if (pool_opts.type) { yyerror("pool type cannot be redefined"); YYERROR; } pool_opts.type = PF_POOL_BITMASK; } | RANDOM { if (pool_opts.type) { yyerror("pool type cannot be redefined"); YYERROR; } pool_opts.type = PF_POOL_RANDOM; } | SOURCEHASH hashkey { if (pool_opts.type) { yyerror("pool type cannot be redefined"); YYERROR; } pool_opts.type = PF_POOL_SRCHASH; pool_opts.key = $2; } | ROUNDROBIN { if (pool_opts.type) { yyerror("pool type cannot be redefined"); YYERROR; } pool_opts.type = PF_POOL_ROUNDROBIN; } | STATICPORT { if (pool_opts.staticport) { yyerror("static-port cannot be redefined"); YYERROR; } pool_opts.staticport = 1; } | STICKYADDRESS { if (pool_opts.marker & POM_STICKYADDRESS) { yyerror("sticky-address cannot be redefined"); YYERROR; } pool_opts.marker |= POM_STICKYADDRESS; pool_opts.opts |= PF_POOL_STICKYADDR; } | MAPEPORTSET number '/' number '/' number { if (pool_opts.mape.offset) { yyerror("map-e-portset cannot be redefined"); YYERROR; } if (pool_opts.type) { yyerror("map-e-portset cannot be used with " "address pools"); YYERROR; } if ($2 <= 0 || $2 >= 16) { yyerror("MAP-E PSID offset must be 1-15"); YYERROR; } if ($4 < 0 || $4 >= 16 || $2 + $4 > 16) { yyerror("Invalid MAP-E PSID length"); YYERROR; } else if ($4 == 0) { yyerror("PSID Length = 0: this means" " you do not need MAP-E"); YYERROR; } if ($6 < 0 || $6 > 65535) { yyerror("Invalid MAP-E PSID"); YYERROR; } pool_opts.mape.offset = $2; pool_opts.mape.psidlen = $4; pool_opts.mape.psid = $6; } ; redirection : /* empty */ { $$ = NULL; } | ARROW host { $$ = calloc(1, sizeof(struct redirection)); if ($$ == NULL) err(1, "redirection: calloc"); $$->host = $2; $$->rport.a = $$->rport.b = $$->rport.t = 0; } | ARROW host PORT portstar { $$ = calloc(1, sizeof(struct redirection)); if ($$ == NULL) err(1, "redirection: calloc"); $$->host = $2; $$->rport = $4; } ; natpasslog : /* empty */ { $$.b1 = $$.b2 = 0; $$.w2 = 0; } | PASS { $$.b1 = 1; $$.b2 = 0; $$.w2 = 0; } | PASS log { $$.b1 = 1; $$.b2 = $2.log; $$.w2 = $2.logif; } | log { $$.b1 = 0; $$.b2 = $1.log; $$.w2 = $1.logif; } ; nataction : no NAT natpasslog { if ($1 && $3.b1) { yyerror("\"pass\" not valid with \"no\""); YYERROR; } if ($1) $$.b1 = PF_NONAT; else $$.b1 = PF_NAT; $$.b2 = $3.b1; $$.w = $3.b2; $$.w2 = $3.w2; } | no RDR natpasslog { if ($1 && $3.b1) { yyerror("\"pass\" not valid with \"no\""); YYERROR; } if ($1) $$.b1 = PF_NORDR; else $$.b1 = PF_RDR; $$.b2 = $3.b1; $$.w = $3.b2; $$.w2 = $3.w2; } ; natrule : nataction interface af proto fromto tag tagged rtable redirpool pool_opts { struct pfctl_rule r; if (check_rulestate(PFCTL_STATE_NAT)) YYERROR; memset(&r, 0, sizeof(r)); r.action = $1.b1; r.natpass = $1.b2; r.log = $1.w; r.logif = $1.w2; r.af = $3; if (!r.af) { if ($5.src.host && $5.src.host->af && !$5.src.host->ifindex) r.af = $5.src.host->af; else if ($5.dst.host && $5.dst.host->af && !$5.dst.host->ifindex) r.af = $5.dst.host->af; } if ($6 != NULL) if (strlcpy(r.tagname, $6, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } if ($7.name) if (strlcpy(r.match_tagname, $7.name, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } r.match_tag_not = $7.neg; r.rtableid = $8; if (r.action == PF_NONAT || r.action == PF_NORDR) { if ($9 != NULL) { yyerror("translation rule with 'no' " "does not need '->'"); YYERROR; } } else { if ($9 == NULL || $9->host == NULL) { yyerror("translation rule requires '-> " "address'"); YYERROR; } if (!r.af && ! $9->host->ifindex) r.af = $9->host->af; remove_invalid_hosts(&$9->host, &r.af); if (invalid_redirect($9->host, r.af)) YYERROR; if (check_netmask($9->host, r.af)) YYERROR; r.rpool.proxy_port[0] = ntohs($9->rport.a); switch (r.action) { case PF_RDR: if (!$9->rport.b && $9->rport.t && $5.dst.port != NULL) { r.rpool.proxy_port[1] = ntohs($9->rport.a) + (ntohs( $5.dst.port->port[1]) - ntohs( $5.dst.port->port[0])); } else r.rpool.proxy_port[1] = ntohs($9->rport.b); break; case PF_NAT: r.rpool.proxy_port[1] = ntohs($9->rport.b); if (!r.rpool.proxy_port[0] && !r.rpool.proxy_port[1]) { r.rpool.proxy_port[0] = PF_NAT_PROXY_PORT_LOW; r.rpool.proxy_port[1] = PF_NAT_PROXY_PORT_HIGH; } else if (!r.rpool.proxy_port[1]) r.rpool.proxy_port[1] = r.rpool.proxy_port[0]; break; default: break; } r.rpool.opts = $10.type; if ((r.rpool.opts & PF_POOL_TYPEMASK) == PF_POOL_NONE && ($9->host->next != NULL || $9->host->addr.type == PF_ADDR_TABLE || DYNIF_MULTIADDR($9->host->addr))) r.rpool.opts = PF_POOL_ROUNDROBIN; if ((r.rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN && disallow_table($9->host, "tables are only " "supported in round-robin redirection " "pools")) YYERROR; if ((r.rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN && disallow_alias($9->host, "interface (%s) " "is only supported in round-robin " "redirection pools")) YYERROR; if ($9->host->next != NULL) { if ((r.rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) { yyerror("only round-robin " "valid for multiple " "redirection addresses"); YYERROR; } } } if ($10.key != NULL) memcpy(&r.rpool.key, $10.key, sizeof(struct pf_poolhashkey)); if ($10.opts) r.rpool.opts |= $10.opts; if ($10.staticport) { if (r.action != PF_NAT) { yyerror("the 'static-port' option is " "only valid with nat rules"); YYERROR; } if (r.rpool.proxy_port[0] != PF_NAT_PROXY_PORT_LOW && r.rpool.proxy_port[1] != PF_NAT_PROXY_PORT_HIGH) { yyerror("the 'static-port' option can't" " be used when specifying a port" " range"); YYERROR; } r.rpool.proxy_port[0] = 0; r.rpool.proxy_port[1] = 0; } if ($10.mape.offset) { if (r.action != PF_NAT) { yyerror("the 'map-e-portset' option is" " only valid with nat rules"); YYERROR; } if ($10.staticport) { yyerror("the 'map-e-portset' option" " can't be used 'static-port'"); YYERROR; } if (r.rpool.proxy_port[0] != PF_NAT_PROXY_PORT_LOW && r.rpool.proxy_port[1] != PF_NAT_PROXY_PORT_HIGH) { yyerror("the 'map-e-portset' option" " can't be used when specifying" " a port range"); YYERROR; } r.rpool.mape = $10.mape; } expand_rule(&r, $2, $9 == NULL ? NULL : $9->host, $4, $5.src_os, $5.src.host, $5.src.port, $5.dst.host, $5.dst.port, 0, 0, 0, ""); free($9); } ; binatrule : no BINAT natpasslog interface af proto FROM ipspec toipspec tag tagged rtable redirection { struct pfctl_rule binat; struct pf_pooladdr *pa; if (check_rulestate(PFCTL_STATE_NAT)) YYERROR; if (disallow_urpf_failed($9, "\"urpf-failed\" is not " "permitted as a binat destination")) YYERROR; memset(&binat, 0, sizeof(binat)); if ($1 && $3.b1) { yyerror("\"pass\" not valid with \"no\""); YYERROR; } if ($1) binat.action = PF_NOBINAT; else binat.action = PF_BINAT; binat.natpass = $3.b1; binat.log = $3.b2; binat.logif = $3.w2; binat.af = $5; if (!binat.af && $8 != NULL && $8->af) binat.af = $8->af; if (!binat.af && $9 != NULL && $9->af) binat.af = $9->af; if (!binat.af && $13 != NULL && $13->host) binat.af = $13->host->af; if (!binat.af) { yyerror("address family (inet/inet6) " "undefined"); YYERROR; } if ($4 != NULL) { memcpy(binat.ifname, $4->ifname, sizeof(binat.ifname)); binat.ifnot = $4->not; free($4); } if ($10 != NULL) if (strlcpy(binat.tagname, $10, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } if ($11.name) if (strlcpy(binat.match_tagname, $11.name, PF_TAG_NAME_SIZE) >= PF_TAG_NAME_SIZE) { yyerror("tag too long, max %u chars", PF_TAG_NAME_SIZE - 1); YYERROR; } binat.match_tag_not = $11.neg; binat.rtableid = $12; if ($6 != NULL) { binat.proto = $6->proto; free($6); } if ($8 != NULL && disallow_table($8, "invalid use of " "table <%s> as the source address of a binat rule")) YYERROR; if ($8 != NULL && disallow_alias($8, "invalid use of " "interface (%s) as the source address of a binat " "rule")) YYERROR; if ($13 != NULL && $13->host != NULL && disallow_table( $13->host, "invalid use of table <%s> as the " "redirect address of a binat rule")) YYERROR; if ($13 != NULL && $13->host != NULL && disallow_alias( $13->host, "invalid use of interface (%s) as the " "redirect address of a binat rule")) YYERROR; if ($8 != NULL) { if ($8->next) { yyerror("multiple binat ip addresses"); YYERROR; } if ($8->addr.type == PF_ADDR_DYNIFTL) $8->af = binat.af; if ($8->af != binat.af) { yyerror("binat ip versions must match"); YYERROR; } if (check_netmask($8, binat.af)) YYERROR; memcpy(&binat.src.addr, &$8->addr, sizeof(binat.src.addr)); free($8); } if ($9 != NULL) { if ($9->next) { yyerror("multiple binat ip addresses"); YYERROR; } if ($9->af != binat.af && $9->af) { yyerror("binat ip versions must match"); YYERROR; } if (check_netmask($9, binat.af)) YYERROR; memcpy(&binat.dst.addr, &$9->addr, sizeof(binat.dst.addr)); binat.dst.neg = $9->not; free($9); } if (binat.action == PF_NOBINAT) { if ($13 != NULL) { yyerror("'no binat' rule does not need" " '->'"); YYERROR; } } else { if ($13 == NULL || $13->host == NULL) { yyerror("'binat' rule requires" " '-> address'"); YYERROR; } remove_invalid_hosts(&$13->host, &binat.af); if (invalid_redirect($13->host, binat.af)) YYERROR; if ($13->host->next != NULL) { yyerror("binat rule must redirect to " "a single address"); YYERROR; } if (check_netmask($13->host, binat.af)) YYERROR; if (!PF_AZERO(&binat.src.addr.v.a.mask, binat.af) && !PF_AEQ(&binat.src.addr.v.a.mask, &$13->host->addr.v.a.mask, binat.af)) { yyerror("'binat' source mask and " "redirect mask must be the same"); YYERROR; } TAILQ_INIT(&binat.rpool.list); pa = calloc(1, sizeof(struct pf_pooladdr)); if (pa == NULL) err(1, "binat: calloc"); pa->addr = $13->host->addr; pa->ifname[0] = 0; TAILQ_INSERT_TAIL(&binat.rpool.list, pa, entries); free($13); } pfctl_append_rule(pf, &binat, ""); } ; tag : /* empty */ { $$ = NULL; } | TAG STRING { $$ = $2; } ; tagged : /* empty */ { $$.neg = 0; $$.name = NULL; } | not TAGGED string { $$.neg = $1; $$.name = $3; } ; rtable : /* empty */ { $$ = -1; } | RTABLE NUMBER { if ($2 < 0 || $2 > rt_tableid_max()) { yyerror("invalid rtable id"); YYERROR; } $$ = $2; } ; route_host : STRING { $$ = calloc(1, sizeof(struct node_host)); if ($$ == NULL) err(1, "route_host: calloc"); if (strlen($1) >= IFNAMSIZ) { yyerror("interface name too long"); YYERROR; } $$->ifname = strdup($1); set_ipmask($$, 128); $$->next = NULL; $$->tail = $$; } | '(' STRING host ')' { struct node_host *n; $$ = $3; for (n = $3; n != NULL; n = n->next) { if (strlen($2) >= IFNAMSIZ) { yyerror("interface name too long"); YYERROR; } n->ifname = strdup($2); } } ; route_host_list : route_host optnl { $$ = $1; } | route_host_list comma route_host optnl { if ($1->af == 0) $1->af = $3->af; if ($1->af != $3->af) { yyerror("all pool addresses must be in the " "same address family"); YYERROR; } $1->tail->next = $3; $1->tail = $3->tail; $$ = $1; } ; routespec : route_host { $$ = $1; } | '{' optnl route_host_list '}' { $$ = $3; } ; route : /* empty */ { $$.host = NULL; $$.rt = 0; $$.pool_opts = 0; } | FASTROUTE { /* backwards-compat */ $$.host = NULL; $$.rt = 0; $$.pool_opts = 0; } | ROUTETO routespec pool_opts { $$.host = $2; $$.rt = PF_ROUTETO; $$.pool_opts = $3.type | $3.opts; if ($3.key != NULL) $$.key = $3.key; } | REPLYTO routespec pool_opts { $$.host = $2; $$.rt = PF_REPLYTO; $$.pool_opts = $3.type | $3.opts; if ($3.key != NULL) $$.key = $3.key; } | DUPTO routespec pool_opts { $$.host = $2; $$.rt = PF_DUPTO; $$.pool_opts = $3.type | $3.opts; if ($3.key != NULL) $$.key = $3.key; } ; timeout_spec : STRING NUMBER { if (check_rulestate(PFCTL_STATE_OPTION)) { free($1); YYERROR; } if ($2 < 0 || $2 > UINT_MAX) { yyerror("only positive values permitted"); YYERROR; } if (pfctl_set_timeout(pf, $1, $2, 0) != 0) { yyerror("unknown timeout %s", $1); free($1); YYERROR; } free($1); } | INTERVAL NUMBER { if (check_rulestate(PFCTL_STATE_OPTION)) YYERROR; if ($2 < 0 || $2 > UINT_MAX) { yyerror("only positive values permitted"); YYERROR; } if (pfctl_set_timeout(pf, "interval", $2, 0) != 0) YYERROR; } ; timeout_list : timeout_list comma timeout_spec optnl | timeout_spec optnl ; limit_spec : STRING NUMBER { if (check_rulestate(PFCTL_STATE_OPTION)) { free($1); YYERROR; } if ($2 < 0 || $2 > UINT_MAX) { yyerror("only positive values permitted"); YYERROR; } if (pfctl_set_limit(pf, $1, $2) != 0) { yyerror("unable to set limit %s %u", $1, $2); free($1); YYERROR; } free($1); } ; limit_list : limit_list comma limit_spec optnl | limit_spec optnl ; comma : ',' | /* empty */ ; yesno : NO { $$ = 0; } | STRING { if (!strcmp($1, "yes")) $$ = 1; else { yyerror("invalid value '%s', expected 'yes' " "or 'no'", $1); free($1); YYERROR; } free($1); } ; unaryop : '=' { $$ = PF_OP_EQ; } | '!' '=' { $$ = PF_OP_NE; } | '<' '=' { $$ = PF_OP_LE; } | '<' { $$ = PF_OP_LT; } | '>' '=' { $$ = PF_OP_GE; } | '>' { $$ = PF_OP_GT; } ; %% int yyerror(const char *fmt, ...) { va_list ap; file->errors++; va_start(ap, fmt); fprintf(stderr, "%s:%d: ", file->name, yylval.lineno); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); return (0); } int disallow_table(struct node_host *h, const char *fmt) { for (; h != NULL; h = h->next) if (h->addr.type == PF_ADDR_TABLE) { yyerror(fmt, h->addr.v.tblname); return (1); } return (0); } int disallow_urpf_failed(struct node_host *h, const char *fmt) { for (; h != NULL; h = h->next) if (h->addr.type == PF_ADDR_URPFFAILED) { yyerror(fmt); return (1); } return (0); } int disallow_alias(struct node_host *h, const char *fmt) { for (; h != NULL; h = h->next) if (DYNIF_MULTIADDR(h->addr)) { yyerror(fmt, h->addr.v.tblname); return (1); } return (0); } int rule_consistent(struct pfctl_rule *r, int anchor_call) { int problems = 0; switch (r->action) { case PF_PASS: + case PF_MATCH: case PF_DROP: case PF_SCRUB: case PF_NOSCRUB: problems = filter_consistent(r, anchor_call); break; case PF_NAT: case PF_NONAT: problems = nat_consistent(r); break; case PF_RDR: case PF_NORDR: problems = rdr_consistent(r); break; case PF_BINAT: case PF_NOBINAT: default: break; } return (problems); } int filter_consistent(struct pfctl_rule *r, int anchor_call) { int problems = 0; if (r->proto != IPPROTO_TCP && r->proto != IPPROTO_UDP && (r->src.port_op || r->dst.port_op)) { yyerror("port only applies to tcp/udp"); problems++; } if (r->proto != IPPROTO_ICMP && r->proto != IPPROTO_ICMPV6 && (r->type || r->code)) { yyerror("icmp-type/code only applies to icmp"); problems++; } if (!r->af && (r->type || r->code)) { yyerror("must indicate address family with icmp-type/code"); problems++; } if (r->overload_tblname[0] && r->max_src_conn == 0 && r->max_src_conn_rate.seconds == 0) { yyerror("'overload' requires 'max-src-conn' " "or 'max-src-conn-rate'"); problems++; } if ((r->proto == IPPROTO_ICMP && r->af == AF_INET6) || (r->proto == IPPROTO_ICMPV6 && r->af == AF_INET)) { yyerror("proto %s doesn't match address family %s", r->proto == IPPROTO_ICMP ? "icmp" : "icmp6", r->af == AF_INET ? "inet" : "inet6"); problems++; } if (r->allow_opts && r->action != PF_PASS) { yyerror("allow-opts can only be specified for pass rules"); problems++; } if (r->rule_flag & PFRULE_FRAGMENT && (r->src.port_op || r->dst.port_op || r->flagset || r->type || r->code)) { yyerror("fragments can be filtered only on IP header fields"); problems++; } if (r->rule_flag & PFRULE_RETURNRST && r->proto != IPPROTO_TCP) { yyerror("return-rst can only be applied to TCP rules"); problems++; } if (r->max_src_nodes && !(r->rule_flag & PFRULE_RULESRCTRACK)) { yyerror("max-src-nodes requires 'source-track rule'"); problems++; } - if (r->action == PF_DROP && r->keep_state) { - yyerror("keep state on block rules doesn't make sense"); + if (r->action != PF_PASS && r->keep_state) { + yyerror("keep state is great, but only for pass rules"); problems++; } if (r->rule_flag & PFRULE_STATESLOPPY && (r->keep_state == PF_STATE_MODULATE || r->keep_state == PF_STATE_SYNPROXY)) { yyerror("sloppy state matching cannot be used with " "synproxy state or modulate state"); problems++; } + /* match rules rules */ + if (r->action == PF_MATCH) { + if (r->divert.port) { + yyerror("divert is not supported on match rules"); + problems++; + } + if (r->rt) { + yyerror("route-to, reply-to, dup-to and fastroute " + "must not be used on match rules"); + problems++; + } + } return (-problems); } int nat_consistent(struct pfctl_rule *r) { return (0); /* yeah! */ } int rdr_consistent(struct pfctl_rule *r) { int problems = 0; if (r->proto != IPPROTO_TCP && r->proto != IPPROTO_UDP) { if (r->src.port_op) { yyerror("src port only applies to tcp/udp"); problems++; } if (r->dst.port_op) { yyerror("dst port only applies to tcp/udp"); problems++; } if (r->rpool.proxy_port[0]) { yyerror("rpool port only applies to tcp/udp"); problems++; } } if (r->dst.port_op && r->dst.port_op != PF_OP_EQ && r->dst.port_op != PF_OP_RRG) { yyerror("invalid port operator for rdr destination port"); problems++; } return (-problems); } int process_tabledef(char *name, struct table_opts *opts) { struct pfr_buffer ab; struct node_tinit *ti; unsigned long maxcount; size_t s = sizeof(maxcount); bzero(&ab, sizeof(ab)); ab.pfrb_type = PFRB_ADDRS; SIMPLEQ_FOREACH(ti, &opts->init_nodes, entries) { if (ti->file) if (pfr_buf_load(&ab, ti->file, 0, append_addr)) { if (errno) yyerror("cannot load \"%s\": %s", ti->file, strerror(errno)); else yyerror("file \"%s\" contains bad data", ti->file); goto _error; } if (ti->host) if (append_addr_host(&ab, ti->host, 0, 0)) { yyerror("cannot create address buffer: %s", strerror(errno)); goto _error; } } if (pf->opts & PF_OPT_VERBOSE) print_tabledef(name, opts->flags, opts->init_addr, &opts->init_nodes); if (!(pf->opts & PF_OPT_NOACTION) && pfctl_define_table(name, opts->flags, opts->init_addr, pf->anchor->name, &ab, pf->anchor->ruleset.tticket)) { if (sysctlbyname("net.pf.request_maxcount", &maxcount, &s, NULL, 0) == -1) maxcount = 65535; if (ab.pfrb_size > maxcount) yyerror("cannot define table %s: too many elements.\n" "Consider increasing net.pf.request_maxcount.", name); else yyerror("cannot define table %s: %s", name, pfr_strerror(errno)); goto _error; } pf->tdirty = 1; pfr_buf_clear(&ab); return (0); _error: pfr_buf_clear(&ab); return (-1); } struct keywords { const char *k_name; int k_val; }; /* macro gore, but you should've seen the prior indentation nightmare... */ #define FREE_LIST(T,r) \ do { \ T *p, *node = r; \ while (node != NULL) { \ p = node; \ node = node->next; \ free(p); \ } \ } while (0) #define LOOP_THROUGH(T,n,r,C) \ do { \ T *n; \ if (r == NULL) { \ r = calloc(1, sizeof(T)); \ if (r == NULL) \ err(1, "LOOP: calloc"); \ r->next = NULL; \ } \ n = r; \ while (n != NULL) { \ do { \ C; \ } while (0); \ n = n->next; \ } \ } while (0) void expand_label_str(char *label, size_t len, const char *srch, const char *repl) { char *tmp; char *p, *q; if ((tmp = calloc(1, len)) == NULL) err(1, "expand_label_str: calloc"); p = q = label; while ((q = strstr(p, srch)) != NULL) { *q = '\0'; if ((strlcat(tmp, p, len) >= len) || (strlcat(tmp, repl, len) >= len)) errx(1, "expand_label: label too long"); q += strlen(srch); p = q; } if (strlcat(tmp, p, len) >= len) errx(1, "expand_label: label too long"); strlcpy(label, tmp, len); /* always fits */ free(tmp); } void expand_label_if(const char *name, char *label, size_t len, const char *ifname) { if (strstr(label, name) != NULL) { if (!*ifname) expand_label_str(label, len, name, "any"); else expand_label_str(label, len, name, ifname); } } void expand_label_addr(const char *name, char *label, size_t len, sa_family_t af, struct pf_rule_addr *addr) { char tmp[64], tmp_not[66]; if (strstr(label, name) != NULL) { switch (addr->addr.type) { case PF_ADDR_DYNIFTL: snprintf(tmp, sizeof(tmp), "(%s)", addr->addr.v.ifname); break; case PF_ADDR_TABLE: snprintf(tmp, sizeof(tmp), "<%s>", addr->addr.v.tblname); break; case PF_ADDR_NOROUTE: snprintf(tmp, sizeof(tmp), "no-route"); break; case PF_ADDR_URPFFAILED: snprintf(tmp, sizeof(tmp), "urpf-failed"); break; case PF_ADDR_ADDRMASK: if (!af || (PF_AZERO(&addr->addr.v.a.addr, af) && PF_AZERO(&addr->addr.v.a.mask, af))) snprintf(tmp, sizeof(tmp), "any"); else { char a[48]; int bits; if (inet_ntop(af, &addr->addr.v.a.addr, a, sizeof(a)) == NULL) snprintf(tmp, sizeof(tmp), "?"); else { bits = unmask(&addr->addr.v.a.mask, af); if ((af == AF_INET && bits < 32) || (af == AF_INET6 && bits < 128)) snprintf(tmp, sizeof(tmp), "%s/%d", a, bits); else snprintf(tmp, sizeof(tmp), "%s", a); } } break; default: snprintf(tmp, sizeof(tmp), "?"); break; } if (addr->neg) { snprintf(tmp_not, sizeof(tmp_not), "! %s", tmp); expand_label_str(label, len, name, tmp_not); } else expand_label_str(label, len, name, tmp); } } void expand_label_port(const char *name, char *label, size_t len, struct pf_rule_addr *addr) { char a1[6], a2[6], op[13] = ""; if (strstr(label, name) != NULL) { snprintf(a1, sizeof(a1), "%u", ntohs(addr->port[0])); snprintf(a2, sizeof(a2), "%u", ntohs(addr->port[1])); if (!addr->port_op) ; else if (addr->port_op == PF_OP_IRG) snprintf(op, sizeof(op), "%s><%s", a1, a2); else if (addr->port_op == PF_OP_XRG) snprintf(op, sizeof(op), "%s<>%s", a1, a2); else if (addr->port_op == PF_OP_EQ) snprintf(op, sizeof(op), "%s", a1); else if (addr->port_op == PF_OP_NE) snprintf(op, sizeof(op), "!=%s", a1); else if (addr->port_op == PF_OP_LT) snprintf(op, sizeof(op), "<%s", a1); else if (addr->port_op == PF_OP_LE) snprintf(op, sizeof(op), "<=%s", a1); else if (addr->port_op == PF_OP_GT) snprintf(op, sizeof(op), ">%s", a1); else if (addr->port_op == PF_OP_GE) snprintf(op, sizeof(op), ">=%s", a1); expand_label_str(label, len, name, op); } } void expand_label_proto(const char *name, char *label, size_t len, u_int8_t proto) { const char *protoname; char n[4]; if (strstr(label, name) != NULL) { protoname = pfctl_proto2name(proto); if (protoname != NULL) expand_label_str(label, len, name, protoname); else { snprintf(n, sizeof(n), "%u", proto); expand_label_str(label, len, name, n); } } } void expand_label_nr(const char *name, char *label, size_t len, struct pfctl_rule *r) { char n[11]; if (strstr(label, name) != NULL) { snprintf(n, sizeof(n), "%u", r->nr); expand_label_str(label, len, name, n); } } void expand_label(char *label, size_t len, struct pfctl_rule *r) { expand_label_if("$if", label, len, r->ifname); expand_label_addr("$srcaddr", label, len, r->af, &r->src); expand_label_addr("$dstaddr", label, len, r->af, &r->dst); expand_label_port("$srcport", label, len, &r->src); expand_label_port("$dstport", label, len, &r->dst); expand_label_proto("$proto", label, len, r->proto); expand_label_nr("$nr", label, len, r); } int expand_altq(struct pf_altq *a, struct node_if *interfaces, struct node_queue *nqueues, struct node_queue_bw bwspec, struct node_queue_opt *opts) { struct pf_altq pa, pb; char qname[PF_QNAME_SIZE]; struct node_queue *n; struct node_queue_bw bw; int errs = 0; if ((pf->loadopt & PFCTL_FLAG_ALTQ) == 0) { FREE_LIST(struct node_if, interfaces); if (nqueues) FREE_LIST(struct node_queue, nqueues); return (0); } LOOP_THROUGH(struct node_if, interface, interfaces, memcpy(&pa, a, sizeof(struct pf_altq)); if (strlcpy(pa.ifname, interface->ifname, sizeof(pa.ifname)) >= sizeof(pa.ifname)) errx(1, "expand_altq: strlcpy"); if (interface->not) { yyerror("altq on ! is not supported"); errs++; } else { if (eval_pfaltq(pf, &pa, &bwspec, opts)) errs++; else if (pfctl_add_altq(pf, &pa)) errs++; if (pf->opts & PF_OPT_VERBOSE) { print_altq(&pf->paltq->altq, 0, &bwspec, opts); if (nqueues && nqueues->tail) { printf("queue { "); LOOP_THROUGH(struct node_queue, queue, nqueues, printf("%s ", queue->queue); ); printf("}"); } printf("\n"); } if (pa.scheduler == ALTQT_CBQ || pa.scheduler == ALTQT_HFSC || pa.scheduler == ALTQT_FAIRQ) { /* now create a root queue */ memset(&pb, 0, sizeof(struct pf_altq)); if (strlcpy(qname, "root_", sizeof(qname)) >= sizeof(qname)) errx(1, "expand_altq: strlcpy"); if (strlcat(qname, interface->ifname, sizeof(qname)) >= sizeof(qname)) errx(1, "expand_altq: strlcat"); if (strlcpy(pb.qname, qname, sizeof(pb.qname)) >= sizeof(pb.qname)) errx(1, "expand_altq: strlcpy"); if (strlcpy(pb.ifname, interface->ifname, sizeof(pb.ifname)) >= sizeof(pb.ifname)) errx(1, "expand_altq: strlcpy"); pb.qlimit = pa.qlimit; pb.scheduler = pa.scheduler; bw.bw_absolute = pa.ifbandwidth; bw.bw_percent = 0; if (eval_pfqueue(pf, &pb, &bw, opts)) errs++; else if (pfctl_add_altq(pf, &pb)) errs++; } LOOP_THROUGH(struct node_queue, queue, nqueues, n = calloc(1, sizeof(struct node_queue)); if (n == NULL) err(1, "expand_altq: calloc"); if (pa.scheduler == ALTQT_CBQ || pa.scheduler == ALTQT_HFSC || pa.scheduler == ALTQT_FAIRQ) if (strlcpy(n->parent, qname, sizeof(n->parent)) >= sizeof(n->parent)) errx(1, "expand_altq: strlcpy"); if (strlcpy(n->queue, queue->queue, sizeof(n->queue)) >= sizeof(n->queue)) errx(1, "expand_altq: strlcpy"); if (strlcpy(n->ifname, interface->ifname, sizeof(n->ifname)) >= sizeof(n->ifname)) errx(1, "expand_altq: strlcpy"); n->scheduler = pa.scheduler; n->next = NULL; n->tail = n; if (queues == NULL) queues = n; else { queues->tail->next = n; queues->tail = n; } ); } ); FREE_LIST(struct node_if, interfaces); if (nqueues) FREE_LIST(struct node_queue, nqueues); return (errs); } int expand_queue(struct pf_altq *a, struct node_if *interfaces, struct node_queue *nqueues, struct node_queue_bw bwspec, struct node_queue_opt *opts) { struct node_queue *n, *nq; struct pf_altq pa; u_int8_t found = 0; u_int8_t errs = 0; if ((pf->loadopt & PFCTL_FLAG_ALTQ) == 0) { FREE_LIST(struct node_queue, nqueues); return (0); } if (queues == NULL) { yyerror("queue %s has no parent", a->qname); FREE_LIST(struct node_queue, nqueues); return (1); } LOOP_THROUGH(struct node_if, interface, interfaces, LOOP_THROUGH(struct node_queue, tqueue, queues, if (!strncmp(a->qname, tqueue->queue, PF_QNAME_SIZE) && (interface->ifname[0] == 0 || (!interface->not && !strncmp(interface->ifname, tqueue->ifname, IFNAMSIZ)) || (interface->not && strncmp(interface->ifname, tqueue->ifname, IFNAMSIZ)))) { /* found ourself in queues */ found++; memcpy(&pa, a, sizeof(struct pf_altq)); if (pa.scheduler != ALTQT_NONE && pa.scheduler != tqueue->scheduler) { yyerror("exactly one scheduler type " "per interface allowed"); return (1); } pa.scheduler = tqueue->scheduler; /* scheduler dependent error checking */ switch (pa.scheduler) { case ALTQT_PRIQ: if (nqueues != NULL) { yyerror("priq queues cannot " "have child queues"); return (1); } if (bwspec.bw_absolute > 0 || bwspec.bw_percent < 100) { yyerror("priq doesn't take " "bandwidth"); return (1); } break; default: break; } if (strlcpy(pa.ifname, tqueue->ifname, sizeof(pa.ifname)) >= sizeof(pa.ifname)) errx(1, "expand_queue: strlcpy"); if (strlcpy(pa.parent, tqueue->parent, sizeof(pa.parent)) >= sizeof(pa.parent)) errx(1, "expand_queue: strlcpy"); if (eval_pfqueue(pf, &pa, &bwspec, opts)) errs++; else if (pfctl_add_altq(pf, &pa)) errs++; for (nq = nqueues; nq != NULL; nq = nq->next) { if (!strcmp(a->qname, nq->queue)) { yyerror("queue cannot have " "itself as child"); errs++; continue; } n = calloc(1, sizeof(struct node_queue)); if (n == NULL) err(1, "expand_queue: calloc"); if (strlcpy(n->parent, a->qname, sizeof(n->parent)) >= sizeof(n->parent)) errx(1, "expand_queue strlcpy"); if (strlcpy(n->queue, nq->queue, sizeof(n->queue)) >= sizeof(n->queue)) errx(1, "expand_queue strlcpy"); if (strlcpy(n->ifname, tqueue->ifname, sizeof(n->ifname)) >= sizeof(n->ifname)) errx(1, "expand_queue strlcpy"); n->scheduler = tqueue->scheduler; n->next = NULL; n->tail = n; if (queues == NULL) queues = n; else { queues->tail->next = n; queues->tail = n; } } if ((pf->opts & PF_OPT_VERBOSE) && ( (found == 1 && interface->ifname[0] == 0) || (found > 0 && interface->ifname[0] != 0))) { print_queue(&pf->paltq->altq, 0, &bwspec, interface->ifname[0] != 0, opts); if (nqueues && nqueues->tail) { printf("{ "); LOOP_THROUGH(struct node_queue, queue, nqueues, printf("%s ", queue->queue); ); printf("}"); } printf("\n"); } } ); ); FREE_LIST(struct node_queue, nqueues); FREE_LIST(struct node_if, interfaces); if (!found) { yyerror("queue %s has no parent", a->qname); errs++; } if (errs) return (1); else return (0); } static int pf_af_to_proto(sa_family_t af) { if (af == AF_INET) return (ETHERTYPE_IP); if (af == AF_INET6) return (ETHERTYPE_IPV6); return (0); } void expand_eth_rule(struct pfctl_eth_rule *r, struct node_if *interfaces, struct node_etherproto *protos, struct node_mac *srcs, struct node_mac *dsts, struct node_host *ipsrcs, struct node_host *ipdsts, const char *bridge_to, const char *anchor_call) { char tagname[PF_TAG_NAME_SIZE]; char match_tagname[PF_TAG_NAME_SIZE]; char qname[PF_QNAME_SIZE]; if (strlcpy(tagname, r->tagname, sizeof(tagname)) >= sizeof(tagname)) errx(1, "expand_eth_rule: tagname"); if (strlcpy(match_tagname, r->match_tagname, sizeof(match_tagname)) >= sizeof(match_tagname)) errx(1, "expand_eth_rule: match_tagname"); if (strlcpy(qname, r->qname, sizeof(qname)) >= sizeof(qname)) errx(1, "expand_eth_rule: qname"); LOOP_THROUGH(struct node_if, interface, interfaces, LOOP_THROUGH(struct node_etherproto, proto, protos, LOOP_THROUGH(struct node_mac, src, srcs, LOOP_THROUGH(struct node_mac, dst, dsts, LOOP_THROUGH(struct node_host, ipsrc, ipsrcs, LOOP_THROUGH(struct node_host, ipdst, ipdsts, strlcpy(r->ifname, interface->ifname, sizeof(r->ifname)); r->ifnot = interface->not; r->proto = proto->proto; if (!r->proto && ipsrc->af) r->proto = pf_af_to_proto(ipsrc->af); else if (!r->proto && ipdst->af) r->proto = pf_af_to_proto(ipdst->af); bcopy(src->mac, r->src.addr, ETHER_ADDR_LEN); bcopy(src->mask, r->src.mask, ETHER_ADDR_LEN); r->src.neg = src->neg; r->src.isset = src->isset; r->ipsrc.addr = ipsrc->addr; r->ipsrc.neg = ipsrc->not; r->ipdst.addr = ipdst->addr; r->ipdst.neg = ipdst->not; bcopy(dst->mac, r->dst.addr, ETHER_ADDR_LEN); bcopy(dst->mask, r->dst.mask, ETHER_ADDR_LEN); r->dst.neg = dst->neg; r->dst.isset = dst->isset; r->nr = pf->eastack[pf->asd]->match++; if (strlcpy(r->tagname, tagname, sizeof(r->tagname)) >= sizeof(r->tagname)) errx(1, "expand_eth_rule: r->tagname"); if (strlcpy(r->match_tagname, match_tagname, sizeof(r->match_tagname)) >= sizeof(r->match_tagname)) errx(1, "expand_eth_rule: r->match_tagname"); if (strlcpy(r->qname, qname, sizeof(r->qname)) >= sizeof(r->qname)) errx(1, "expand_eth_rule: r->qname"); if (bridge_to) strlcpy(r->bridge_to, bridge_to, sizeof(r->bridge_to)); pfctl_append_eth_rule(pf, r, anchor_call); )))))); FREE_LIST(struct node_if, interfaces); FREE_LIST(struct node_etherproto, protos); FREE_LIST(struct node_mac, srcs); FREE_LIST(struct node_mac, dsts); FREE_LIST(struct node_host, ipsrcs); FREE_LIST(struct node_host, ipdsts); } void expand_rule(struct pfctl_rule *r, struct node_if *interfaces, struct node_host *rpool_hosts, struct node_proto *protos, struct node_os *src_oses, struct node_host *src_hosts, struct node_port *src_ports, struct node_host *dst_hosts, struct node_port *dst_ports, struct node_uid *uids, struct node_gid *gids, struct node_icmp *icmp_types, const char *anchor_call) { sa_family_t af = r->af; int added = 0, error = 0; char ifname[IF_NAMESIZE]; char label[PF_RULE_MAX_LABEL_COUNT][PF_RULE_LABEL_SIZE]; char tagname[PF_TAG_NAME_SIZE]; char match_tagname[PF_TAG_NAME_SIZE]; struct pf_pooladdr *pa; struct node_host *h; u_int8_t flags, flagset, keep_state; memcpy(label, r->label, sizeof(r->label)); assert(sizeof(r->label) == sizeof(label)); if (strlcpy(tagname, r->tagname, sizeof(tagname)) >= sizeof(tagname)) errx(1, "expand_rule: strlcpy"); if (strlcpy(match_tagname, r->match_tagname, sizeof(match_tagname)) >= sizeof(match_tagname)) errx(1, "expand_rule: strlcpy"); flags = r->flags; flagset = r->flagset; keep_state = r->keep_state; LOOP_THROUGH(struct node_if, interface, interfaces, LOOP_THROUGH(struct node_proto, proto, protos, LOOP_THROUGH(struct node_icmp, icmp_type, icmp_types, LOOP_THROUGH(struct node_host, src_host, src_hosts, LOOP_THROUGH(struct node_port, src_port, src_ports, LOOP_THROUGH(struct node_os, src_os, src_oses, LOOP_THROUGH(struct node_host, dst_host, dst_hosts, LOOP_THROUGH(struct node_port, dst_port, dst_ports, LOOP_THROUGH(struct node_uid, uid, uids, LOOP_THROUGH(struct node_gid, gid, gids, r->af = af; /* for link-local IPv6 address, interface must match up */ if ((r->af && src_host->af && r->af != src_host->af) || (r->af && dst_host->af && r->af != dst_host->af) || (src_host->af && dst_host->af && src_host->af != dst_host->af) || (src_host->ifindex && dst_host->ifindex && src_host->ifindex != dst_host->ifindex) || (src_host->ifindex && *interface->ifname && src_host->ifindex != if_nametoindex(interface->ifname)) || (dst_host->ifindex && *interface->ifname && dst_host->ifindex != if_nametoindex(interface->ifname))) continue; if (!r->af && src_host->af) r->af = src_host->af; else if (!r->af && dst_host->af) r->af = dst_host->af; if (*interface->ifname) strlcpy(r->ifname, interface->ifname, sizeof(r->ifname)); else if (if_indextoname(src_host->ifindex, ifname)) strlcpy(r->ifname, ifname, sizeof(r->ifname)); else if (if_indextoname(dst_host->ifindex, ifname)) strlcpy(r->ifname, ifname, sizeof(r->ifname)); else memset(r->ifname, '\0', sizeof(r->ifname)); memcpy(r->label, label, sizeof(r->label)); if (strlcpy(r->tagname, tagname, sizeof(r->tagname)) >= sizeof(r->tagname)) errx(1, "expand_rule: strlcpy"); if (strlcpy(r->match_tagname, match_tagname, sizeof(r->match_tagname)) >= sizeof(r->match_tagname)) errx(1, "expand_rule: strlcpy"); error += check_netmask(src_host, r->af); error += check_netmask(dst_host, r->af); r->ifnot = interface->not; r->proto = proto->proto; r->src.addr = src_host->addr; r->src.neg = src_host->not; r->src.port[0] = src_port->port[0]; r->src.port[1] = src_port->port[1]; r->src.port_op = src_port->op; r->dst.addr = dst_host->addr; r->dst.neg = dst_host->not; r->dst.port[0] = dst_port->port[0]; r->dst.port[1] = dst_port->port[1]; r->dst.port_op = dst_port->op; r->uid.op = uid->op; r->uid.uid[0] = uid->uid[0]; r->uid.uid[1] = uid->uid[1]; r->gid.op = gid->op; r->gid.gid[0] = gid->gid[0]; r->gid.gid[1] = gid->gid[1]; r->type = icmp_type->type; r->code = icmp_type->code; if ((keep_state == PF_STATE_MODULATE || keep_state == PF_STATE_SYNPROXY) && r->proto && r->proto != IPPROTO_TCP) r->keep_state = PF_STATE_NORMAL; else r->keep_state = keep_state; if (r->proto && r->proto != IPPROTO_TCP) { r->flags = 0; r->flagset = 0; } else { r->flags = flags; r->flagset = flagset; } if (icmp_type->proto && r->proto != icmp_type->proto) { yyerror("icmp-type mismatch"); error++; } if (src_os && src_os->os) { r->os_fingerprint = pfctl_get_fingerprint(src_os->os); if ((pf->opts & PF_OPT_VERBOSE2) && r->os_fingerprint == PF_OSFP_NOMATCH) fprintf(stderr, "warning: unknown '%s' OS fingerprint\n", src_os->os); } else { r->os_fingerprint = PF_OSFP_ANY; } TAILQ_INIT(&r->rpool.list); for (h = rpool_hosts; h != NULL; h = h->next) { pa = calloc(1, sizeof(struct pf_pooladdr)); if (pa == NULL) err(1, "expand_rule: calloc"); pa->addr = h->addr; if (h->ifname != NULL) { if (strlcpy(pa->ifname, h->ifname, sizeof(pa->ifname)) >= sizeof(pa->ifname)) errx(1, "expand_rule: strlcpy"); } else pa->ifname[0] = 0; TAILQ_INSERT_TAIL(&r->rpool.list, pa, entries); } if (rule_consistent(r, anchor_call[0]) < 0 || error) yyerror("skipping rule due to errors"); else { r->nr = pf->astack[pf->asd]->match++; pfctl_append_rule(pf, r, anchor_call); added++; } )))))))))); FREE_LIST(struct node_if, interfaces); FREE_LIST(struct node_proto, protos); FREE_LIST(struct node_host, src_hosts); FREE_LIST(struct node_port, src_ports); FREE_LIST(struct node_os, src_oses); FREE_LIST(struct node_host, dst_hosts); FREE_LIST(struct node_port, dst_ports); FREE_LIST(struct node_uid, uids); FREE_LIST(struct node_gid, gids); FREE_LIST(struct node_icmp, icmp_types); FREE_LIST(struct node_host, rpool_hosts); if (!added) yyerror("rule expands to no valid combination"); } int expand_skip_interface(struct node_if *interfaces) { int errs = 0; if (!interfaces || (!interfaces->next && !interfaces->not && !strcmp(interfaces->ifname, "none"))) { if (pf->opts & PF_OPT_VERBOSE) printf("set skip on none\n"); errs = pfctl_set_interface_flags(pf, "", PFI_IFLAG_SKIP, 0); return (errs); } if (pf->opts & PF_OPT_VERBOSE) printf("set skip on {"); LOOP_THROUGH(struct node_if, interface, interfaces, if (pf->opts & PF_OPT_VERBOSE) printf(" %s", interface->ifname); if (interface->not) { yyerror("skip on ! is not supported"); errs++; } else errs += pfctl_set_interface_flags(pf, interface->ifname, PFI_IFLAG_SKIP, 1); ); if (pf->opts & PF_OPT_VERBOSE) printf(" }\n"); FREE_LIST(struct node_if, interfaces); if (errs) return (1); else return (0); } #undef FREE_LIST #undef LOOP_THROUGH int check_rulestate(int desired_state) { if (require_order && (rulestate > desired_state)) { yyerror("Rules must be in order: options, ethernet, " "normalization, queueing, translation, filtering"); return (1); } rulestate = desired_state; return (0); } int kw_cmp(const void *k, const void *e) { return (strcmp(k, ((const struct keywords *)e)->k_name)); } int lookup(char *s) { /* this has to be sorted always */ static const struct keywords keywords[] = { { "all", ALL}, { "allow-opts", ALLOWOPTS}, { "altq", ALTQ}, { "anchor", ANCHOR}, { "antispoof", ANTISPOOF}, { "any", ANY}, { "bandwidth", BANDWIDTH}, { "binat", BINAT}, { "binat-anchor", BINATANCHOR}, { "bitmask", BITMASK}, { "block", BLOCK}, { "block-policy", BLOCKPOLICY}, { "bridge-to", BRIDGE_TO}, { "buckets", BUCKETS}, { "cbq", CBQ}, { "code", CODE}, { "codelq", CODEL}, { "debug", DEBUG}, { "divert-reply", DIVERTREPLY}, { "divert-to", DIVERTTO}, { "dnpipe", DNPIPE}, { "dnqueue", DNQUEUE}, { "drop", DROP}, { "dup-to", DUPTO}, { "ether", ETHER}, { "fail-policy", FAILPOLICY}, { "fairq", FAIRQ}, { "fastroute", FASTROUTE}, { "file", FILENAME}, { "fingerprints", FINGERPRINTS}, { "flags", FLAGS}, { "floating", FLOATING}, { "flush", FLUSH}, { "for", FOR}, { "fragment", FRAGMENT}, { "from", FROM}, { "global", GLOBAL}, { "group", GROUP}, { "hfsc", HFSC}, { "hogs", HOGS}, { "hostid", HOSTID}, { "icmp-type", ICMPTYPE}, { "icmp6-type", ICMP6TYPE}, { "if-bound", IFBOUND}, { "in", IN}, { "include", INCLUDE}, { "inet", INET}, { "inet6", INET6}, { "interval", INTERVAL}, { "keep", KEEP}, { "keepcounters", KEEPCOUNTERS}, { "l3", L3}, { "label", LABEL}, { "limit", LIMIT}, { "linkshare", LINKSHARE}, { "load", LOAD}, { "log", LOG}, { "loginterface", LOGINTERFACE}, { "map-e-portset", MAPEPORTSET}, { "match", MATCH}, { "max", MAXIMUM}, { "max-mss", MAXMSS}, { "max-src-conn", MAXSRCCONN}, { "max-src-conn-rate", MAXSRCCONNRATE}, { "max-src-nodes", MAXSRCNODES}, { "max-src-states", MAXSRCSTATES}, { "min-ttl", MINTTL}, { "modulate", MODULATE}, { "nat", NAT}, { "nat-anchor", NATANCHOR}, { "no", NO}, { "no-df", NODF}, { "no-route", NOROUTE}, { "no-sync", NOSYNC}, { "on", ON}, { "optimization", OPTIMIZATION}, { "os", OS}, { "out", OUT}, { "overload", OVERLOAD}, { "pass", PASS}, { "port", PORT}, { "prio", PRIO}, { "priority", PRIORITY}, { "priq", PRIQ}, { "probability", PROBABILITY}, { "proto", PROTO}, { "qlimit", QLIMIT}, { "queue", QUEUE}, { "quick", QUICK}, { "random", RANDOM}, { "random-id", RANDOMID}, { "rdr", RDR}, { "rdr-anchor", RDRANCHOR}, { "realtime", REALTIME}, { "reassemble", REASSEMBLE}, { "reply-to", REPLYTO}, { "require-order", REQUIREORDER}, { "return", RETURN}, { "return-icmp", RETURNICMP}, { "return-icmp6", RETURNICMP6}, { "return-rst", RETURNRST}, { "ridentifier", RIDENTIFIER}, { "round-robin", ROUNDROBIN}, { "route", ROUTE}, { "route-to", ROUTETO}, { "rtable", RTABLE}, { "rule", RULE}, { "ruleset-optimization", RULESET_OPTIMIZATION}, { "scrub", SCRUB}, { "set", SET}, { "set-tos", SETTOS}, { "skip", SKIP}, { "sloppy", SLOPPY}, { "source-hash", SOURCEHASH}, { "source-track", SOURCETRACK}, { "state", STATE}, { "state-defaults", STATEDEFAULTS}, { "state-policy", STATEPOLICY}, { "static-port", STATICPORT}, { "sticky-address", STICKYADDRESS}, { "syncookies", SYNCOOKIES}, { "synproxy", SYNPROXY}, { "table", TABLE}, { "tag", TAG}, { "tagged", TAGGED}, { "target", TARGET}, { "tbrsize", TBRSIZE}, { "timeout", TIMEOUT}, { "to", TO}, { "tos", TOS}, { "ttl", TTL}, { "upperlimit", UPPERLIMIT}, { "urpf-failed", URPFFAILED}, { "user", USER}, }; const struct keywords *p; p = bsearch(s, keywords, sizeof(keywords)/sizeof(keywords[0]), sizeof(keywords[0]), kw_cmp); if (p) { if (debug > 1) fprintf(stderr, "%s: %d\n", s, p->k_val); return (p->k_val); } else { if (debug > 1) fprintf(stderr, "string: %s\n", s); return (STRING); } } #define MAXPUSHBACK 128 static char *parsebuf; static int parseindex; static char pushback_buffer[MAXPUSHBACK]; static int pushback_index = 0; int lgetc(int quotec) { int c, next; if (parsebuf) { /* Read character from the parsebuffer instead of input. */ if (parseindex >= 0) { c = parsebuf[parseindex++]; if (c != '\0') return (c); parsebuf = NULL; } else parseindex++; } if (pushback_index) return (pushback_buffer[--pushback_index]); if (quotec) { if ((c = getc(file->stream)) == EOF) { yyerror("reached end of file while parsing quoted string"); if (popfile() == EOF) return (EOF); return (quotec); } return (c); } while ((c = getc(file->stream)) == '\\') { next = getc(file->stream); if (next != '\n') { c = next; break; } yylval.lineno = file->lineno; file->lineno++; } while (c == EOF) { if (popfile() == EOF) return (EOF); c = getc(file->stream); } return (c); } int lungetc(int c) { if (c == EOF) return (EOF); if (parsebuf) { parseindex--; if (parseindex >= 0) return (c); } if (pushback_index < MAXPUSHBACK-1) return (pushback_buffer[pushback_index++] = c); else return (EOF); } int findeol(void) { int c; parsebuf = NULL; /* skip to either EOF or the first real EOL */ while (1) { if (pushback_index) c = pushback_buffer[--pushback_index]; else c = lgetc(0); if (c == '\n') { file->lineno++; break; } if (c == EOF) break; } return (ERROR); } int yylex(void) { char buf[8096]; char *p, *val; int quotec, next, c; int token; top: p = buf; while ((c = lgetc(0)) == ' ' || c == '\t') ; /* nothing */ yylval.lineno = file->lineno; if (c == '#') while ((c = lgetc(0)) != '\n' && c != EOF) ; /* nothing */ if (c == '$' && parsebuf == NULL) { while (1) { if ((c = lgetc(0)) == EOF) return (0); if (p + 1 >= buf + sizeof(buf) - 1) { yyerror("string too long"); return (findeol()); } if (isalnum(c) || c == '_') { *p++ = (char)c; continue; } *p = '\0'; lungetc(c); break; } val = symget(buf); if (val == NULL) { yyerror("macro '%s' not defined", buf); return (findeol()); } parsebuf = val; parseindex = 0; goto top; } switch (c) { case '\'': case '"': quotec = c; while (1) { if ((c = lgetc(quotec)) == EOF) return (0); if (c == '\n') { file->lineno++; continue; } else if (c == '\\') { if ((next = lgetc(quotec)) == EOF) return (0); if (next == quotec || c == ' ' || c == '\t') c = next; else if (next == '\n') { file->lineno++; continue; } else lungetc(next); } else if (c == quotec) { *p = '\0'; break; } if (p + 1 >= buf + sizeof(buf) - 1) { yyerror("string too long"); return (findeol()); } *p++ = (char)c; } yylval.v.string = strdup(buf); if (yylval.v.string == NULL) err(1, "yylex: strdup"); return (STRING); case '<': next = lgetc(0); if (next == '>') { yylval.v.i = PF_OP_XRG; return (PORTBINARY); } lungetc(next); break; case '>': next = lgetc(0); if (next == '<') { yylval.v.i = PF_OP_IRG; return (PORTBINARY); } lungetc(next); break; case '-': next = lgetc(0); if (next == '>') return (ARROW); lungetc(next); break; } #define allowed_to_end_number(x) \ (isspace(x) || x == ')' || x ==',' || x == '/' || x == '}' || x == '=') if (c == '-' || isdigit(c)) { do { *p++ = c; if ((unsigned)(p-buf) >= sizeof(buf)) { yyerror("string too long"); return (findeol()); } } while ((c = lgetc(0)) != EOF && isdigit(c)); lungetc(c); if (p == buf + 1 && buf[0] == '-') goto nodigits; if (c == EOF || allowed_to_end_number(c)) { const char *errstr = NULL; *p = '\0'; yylval.v.number = strtonum(buf, LLONG_MIN, LLONG_MAX, &errstr); if (errstr) { yyerror("\"%s\" invalid number: %s", buf, errstr); return (findeol()); } return (NUMBER); } else { nodigits: while (p > buf + 1) lungetc(*--p); c = *--p; if (c == '-') return (c); } } #define allowed_in_string(x) \ (isalnum(x) || (ispunct(x) && x != '(' && x != ')' && \ x != '{' && x != '}' && x != '<' && x != '>' && \ x != '!' && x != '=' && x != '/' && x != '#' && \ x != ',')) if (isalnum(c) || c == ':' || c == '_') { do { *p++ = c; if ((unsigned)(p-buf) >= sizeof(buf)) { yyerror("string too long"); return (findeol()); } } while ((c = lgetc(0)) != EOF && (allowed_in_string(c))); lungetc(c); *p = '\0'; if ((token = lookup(buf)) == STRING) if ((yylval.v.string = strdup(buf)) == NULL) err(1, "yylex: strdup"); return (token); } if (c == '\n') { yylval.lineno = file->lineno; file->lineno++; } if (c == EOF) return (0); return (c); } int check_file_secrecy(int fd, const char *fname) { struct stat st; if (fstat(fd, &st)) { warn("cannot stat %s", fname); return (-1); } if (st.st_uid != 0 && st.st_uid != getuid()) { warnx("%s: owner not root or current user", fname); return (-1); } if (st.st_mode & (S_IRWXG | S_IRWXO)) { warnx("%s: group/world readable/writeable", fname); return (-1); } return (0); } struct file * pushfile(const char *name, int secret) { struct file *nfile; if ((nfile = calloc(1, sizeof(struct file))) == NULL || (nfile->name = strdup(name)) == NULL) { warn("malloc"); return (NULL); } if (TAILQ_FIRST(&files) == NULL && strcmp(nfile->name, "-") == 0) { nfile->stream = stdin; free(nfile->name); if ((nfile->name = strdup("stdin")) == NULL) { warn("strdup"); free(nfile); return (NULL); } } else if ((nfile->stream = fopen(nfile->name, "r")) == NULL) { warn("%s", nfile->name); free(nfile->name); free(nfile); return (NULL); } else if (secret && check_file_secrecy(fileno(nfile->stream), nfile->name)) { fclose(nfile->stream); free(nfile->name); free(nfile); return (NULL); } nfile->lineno = 1; TAILQ_INSERT_TAIL(&files, nfile, entry); return (nfile); } int popfile(void) { struct file *prev; if ((prev = TAILQ_PREV(file, files, entry)) != NULL) { prev->errors += file->errors; TAILQ_REMOVE(&files, file, entry); fclose(file->stream); free(file->name); free(file); file = prev; return (0); } return (EOF); } int parse_config(char *filename, struct pfctl *xpf) { int errors = 0; struct sym *sym; pf = xpf; errors = 0; rulestate = PFCTL_STATE_NONE; returnicmpdefault = (ICMP_UNREACH << 8) | ICMP_UNREACH_PORT; returnicmp6default = (ICMP6_DST_UNREACH << 8) | ICMP6_DST_UNREACH_NOPORT; blockpolicy = PFRULE_DROP; failpolicy = PFRULE_DROP; require_order = 1; if ((file = pushfile(filename, 0)) == NULL) { warn("cannot open the main config file!"); return (-1); } yyparse(); errors = file->errors; popfile(); /* Free macros and check which have not been used. */ while ((sym = TAILQ_FIRST(&symhead))) { if ((pf->opts & PF_OPT_VERBOSE2) && !sym->used) fprintf(stderr, "warning: macro '%s' not " "used\n", sym->nam); free(sym->nam); free(sym->val); TAILQ_REMOVE(&symhead, sym, entry); free(sym); } return (errors ? -1 : 0); } int symset(const char *nam, const char *val, int persist) { struct sym *sym; for (sym = TAILQ_FIRST(&symhead); sym && strcmp(nam, sym->nam); sym = TAILQ_NEXT(sym, entry)) ; /* nothing */ if (sym != NULL) { if (sym->persist == 1) return (0); else { free(sym->nam); free(sym->val); TAILQ_REMOVE(&symhead, sym, entry); free(sym); } } if ((sym = calloc(1, sizeof(*sym))) == NULL) return (-1); sym->nam = strdup(nam); if (sym->nam == NULL) { free(sym); return (-1); } sym->val = strdup(val); if (sym->val == NULL) { free(sym->nam); free(sym); return (-1); } sym->used = 0; sym->persist = persist; TAILQ_INSERT_TAIL(&symhead, sym, entry); return (0); } int pfctl_cmdline_symset(char *s) { char *sym, *val; int ret; if ((val = strrchr(s, '=')) == NULL) return (-1); if ((sym = malloc(strlen(s) - strlen(val) + 1)) == NULL) err(1, "pfctl_cmdline_symset: malloc"); strlcpy(sym, s, strlen(s) - strlen(val) + 1); ret = symset(sym, val + 1, 1); free(sym); return (ret); } char * symget(const char *nam) { struct sym *sym; TAILQ_FOREACH(sym, &symhead, entry) if (strcmp(nam, sym->nam) == 0) { sym->used = 1; return (sym->val); } return (NULL); } void mv_rules(struct pfctl_ruleset *src, struct pfctl_ruleset *dst) { int i; struct pfctl_rule *r; for (i = 0; i < PF_RULESET_MAX; ++i) { while ((r = TAILQ_FIRST(src->rules[i].active.ptr)) != NULL) { TAILQ_REMOVE(src->rules[i].active.ptr, r, entries); TAILQ_INSERT_TAIL(dst->rules[i].active.ptr, r, entries); dst->anchor->match++; } src->anchor->match = 0; while ((r = TAILQ_FIRST(src->rules[i].inactive.ptr)) != NULL) { TAILQ_REMOVE(src->rules[i].inactive.ptr, r, entries); TAILQ_INSERT_TAIL(dst->rules[i].inactive.ptr, r, entries); } } } void mv_eth_rules(struct pfctl_eth_ruleset *src, struct pfctl_eth_ruleset *dst) { struct pfctl_eth_rule *r; while ((r = TAILQ_FIRST(&src->rules)) != NULL) { TAILQ_REMOVE(&src->rules, r, entries); TAILQ_INSERT_TAIL(&dst->rules, r, entries); dst->anchor->match++; } src->anchor->match = 0; } void decide_address_family(struct node_host *n, sa_family_t *af) { if (*af != 0 || n == NULL) return; *af = n->af; while ((n = n->next) != NULL) { if (n->af != *af) { *af = 0; return; } } } void remove_invalid_hosts(struct node_host **nh, sa_family_t *af) { struct node_host *n = *nh, *prev = NULL; while (n != NULL) { if (*af && n->af && n->af != *af) { /* unlink and free n */ struct node_host *next = n->next; /* adjust tail pointer */ if (n == (*nh)->tail) (*nh)->tail = prev; /* adjust previous node's next pointer */ if (prev == NULL) *nh = next; else prev->next = next; /* free node */ if (n->ifname != NULL) free(n->ifname); free(n); n = next; } else { if (n->af && !*af) *af = n->af; prev = n; n = n->next; } } } int invalid_redirect(struct node_host *nh, sa_family_t af) { if (!af) { struct node_host *n; /* tables and dyniftl are ok without an address family */ for (n = nh; n != NULL; n = n->next) { if (n->addr.type != PF_ADDR_TABLE && n->addr.type != PF_ADDR_DYNIFTL) { yyerror("address family not given and " "translation address expands to multiple " "address families"); return (1); } } } if (nh == NULL) { yyerror("no translation address with matching address family " "found."); return (1); } return (0); } int atoul(char *s, u_long *ulvalp) { u_long ulval; char *ep; errno = 0; ulval = strtoul(s, &ep, 0); if (s[0] == '\0' || *ep != '\0') return (-1); if (errno == ERANGE && ulval == ULONG_MAX) return (-1); *ulvalp = ulval; return (0); } int getservice(char *n) { struct servent *s; u_long ulval; if (atoul(n, &ulval) == 0) { if (ulval > 65535) { yyerror("illegal port value %lu", ulval); return (-1); } return (htons(ulval)); } else { s = getservbyname(n, "tcp"); if (s == NULL) s = getservbyname(n, "udp"); if (s == NULL) { yyerror("unknown port %s", n); return (-1); } return (s->s_port); } } int rule_label(struct pfctl_rule *r, char *s[PF_RULE_MAX_LABEL_COUNT]) { for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) { if (s[i] == NULL) return (0); if (strlcpy(r->label[i], s[i], sizeof(r->label[0])) >= sizeof(r->label[0])) { yyerror("rule label too long (max %d chars)", sizeof(r->label[0])-1); return (-1); } } return (0); } u_int16_t parseicmpspec(char *w, sa_family_t af) { const struct icmpcodeent *p; u_long ulval; u_int8_t icmptype; if (af == AF_INET) icmptype = returnicmpdefault >> 8; else icmptype = returnicmp6default >> 8; if (atoul(w, &ulval) == -1) { if ((p = geticmpcodebyname(icmptype, w, af)) == NULL) { yyerror("unknown icmp code %s", w); return (0); } ulval = p->code; } if (ulval > 255) { yyerror("invalid icmp code %lu", ulval); return (0); } return (icmptype << 8 | ulval); } int parseport(char *port, struct range *r, int extensions) { char *p = strchr(port, ':'); if (p == NULL) { if ((r->a = getservice(port)) == -1) return (-1); r->b = 0; r->t = PF_OP_NONE; return (0); } if ((extensions & PPORT_STAR) && !strcmp(p+1, "*")) { *p = 0; if ((r->a = getservice(port)) == -1) return (-1); r->b = 0; r->t = PF_OP_IRG; return (0); } if ((extensions & PPORT_RANGE)) { *p++ = 0; if ((r->a = getservice(port)) == -1 || (r->b = getservice(p)) == -1) return (-1); if (r->a == r->b) { r->b = 0; r->t = PF_OP_NONE; } else r->t = PF_OP_RRG; return (0); } return (-1); } int pfctl_load_anchors(int dev, struct pfctl *pf, struct pfr_buffer *trans) { struct loadanchors *la; TAILQ_FOREACH(la, &loadanchorshead, entries) { if (pf->opts & PF_OPT_VERBOSE) fprintf(stderr, "\nLoading anchor %s from %s\n", la->anchorname, la->filename); if (pfctl_rules(dev, la->filename, pf->opts, pf->optimize, la->anchorname, trans) == -1) return (-1); } return (0); } int kw_casecmp(const void *k, const void *e) { return (strcasecmp(k, ((const struct keywords *)e)->k_name)); } int map_tos(char *s, int *val) { /* DiffServ Codepoints and other TOS mappings */ const struct keywords toswords[] = { { "af11", IPTOS_DSCP_AF11 }, { "af12", IPTOS_DSCP_AF12 }, { "af13", IPTOS_DSCP_AF13 }, { "af21", IPTOS_DSCP_AF21 }, { "af22", IPTOS_DSCP_AF22 }, { "af23", IPTOS_DSCP_AF23 }, { "af31", IPTOS_DSCP_AF31 }, { "af32", IPTOS_DSCP_AF32 }, { "af33", IPTOS_DSCP_AF33 }, { "af41", IPTOS_DSCP_AF41 }, { "af42", IPTOS_DSCP_AF42 }, { "af43", IPTOS_DSCP_AF43 }, { "critical", IPTOS_PREC_CRITIC_ECP }, { "cs0", IPTOS_DSCP_CS0 }, { "cs1", IPTOS_DSCP_CS1 }, { "cs2", IPTOS_DSCP_CS2 }, { "cs3", IPTOS_DSCP_CS3 }, { "cs4", IPTOS_DSCP_CS4 }, { "cs5", IPTOS_DSCP_CS5 }, { "cs6", IPTOS_DSCP_CS6 }, { "cs7", IPTOS_DSCP_CS7 }, { "ef", IPTOS_DSCP_EF }, { "inetcontrol", IPTOS_PREC_INTERNETCONTROL }, { "lowdelay", IPTOS_LOWDELAY }, { "netcontrol", IPTOS_PREC_NETCONTROL }, { "reliability", IPTOS_RELIABILITY }, { "throughput", IPTOS_THROUGHPUT }, { "va", IPTOS_DSCP_VA } }; const struct keywords *p; p = bsearch(s, toswords, sizeof(toswords)/sizeof(toswords[0]), sizeof(toswords[0]), kw_casecmp); if (p) { *val = p->k_val; return (1); } return (0); } int rt_tableid_max(void) { #ifdef __FreeBSD__ int fibs; size_t l = sizeof(fibs); if (sysctlbyname("net.fibs", &fibs, &l, NULL, 0) == -1) fibs = 16; /* XXX RT_MAXFIBS, at least limit it some. */ /* * As the OpenBSD code only compares > and not >= we need to adjust * here given we only accept values of 0..n and want to avoid #ifdefs * in the grammar. */ return (fibs - 1); #else return (RT_TABLEID_MAX); #endif } struct node_mac* node_mac_from_string(const char *str) { struct node_mac *m; m = calloc(1, sizeof(struct node_mac)); if (m == NULL) err(1, "mac: calloc"); if (sscanf(str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", &m->mac[0], &m->mac[1], &m->mac[2], &m->mac[3], &m->mac[4], &m->mac[5]) != 6) { free(m); yyerror("invalid MAC address"); return (NULL); } memset(m->mask, 0xff, ETHER_ADDR_LEN); m->isset = true; m->next = NULL; m->tail = m; return (m); } struct node_mac* node_mac_from_string_masklen(const char *str, int masklen) { struct node_mac *m; if (masklen < 0 || masklen > (ETHER_ADDR_LEN * 8)) { yyerror("invalid MAC mask length"); return (NULL); } m = node_mac_from_string(str); if (m == NULL) return (NULL); memset(m->mask, 0, ETHER_ADDR_LEN); for (int i = 0; i < masklen; i++) m->mask[i / 8] |= 1 << (i % 8); return (m); } struct node_mac* node_mac_from_string_mask(const char *str, const char *mask) { struct node_mac *m; m = node_mac_from_string(str); if (m == NULL) return (NULL); if (sscanf(mask, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", &m->mask[0], &m->mask[1], &m->mask[2], &m->mask[3], &m->mask[4], &m->mask[5]) != 6) { free(m); yyerror("invalid MAC mask"); return (NULL); } return (m); } diff --git a/sbin/pfctl/pf_print_state.c b/sbin/pfctl/pf_print_state.c index b66a296d6080..d23a0154b70d 100644 --- a/sbin/pfctl/pf_print_state.c +++ b/sbin/pfctl/pf_print_state.c @@ -1,380 +1,396 @@ /* $OpenBSD: pf_print_state.c,v 1.52 2008/08/12 16:40:18 david Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #define TCPSTATES #include #include #include #include #include #include #include #include "pfctl_parser.h" #include "pfctl.h" void print_name(struct pf_addr *, sa_family_t); void print_addr(struct pf_addr_wrap *addr, sa_family_t af, int verbose) { switch (addr->type) { case PF_ADDR_DYNIFTL: printf("(%s", addr->v.ifname); if (addr->iflags & PFI_AFLAG_NETWORK) printf(":network"); if (addr->iflags & PFI_AFLAG_BROADCAST) printf(":broadcast"); if (addr->iflags & PFI_AFLAG_PEER) printf(":peer"); if (addr->iflags & PFI_AFLAG_NOALIAS) printf(":0"); if (verbose) { if (addr->p.dyncnt <= 0) printf(":*"); else printf(":%d", addr->p.dyncnt); } printf(")"); break; case PF_ADDR_TABLE: if (verbose) if (addr->p.tblcnt == -1) printf("<%s:*>", addr->v.tblname); else printf("<%s:%d>", addr->v.tblname, addr->p.tblcnt); else printf("<%s>", addr->v.tblname); return; case PF_ADDR_RANGE: { char buf[48]; if (inet_ntop(af, &addr->v.a.addr, buf, sizeof(buf)) == NULL) printf("?"); else printf("%s", buf); if (inet_ntop(af, &addr->v.a.mask, buf, sizeof(buf)) == NULL) printf(" - ?"); else printf(" - %s", buf); break; } case PF_ADDR_ADDRMASK: if (PF_AZERO(&addr->v.a.addr, AF_INET6) && PF_AZERO(&addr->v.a.mask, AF_INET6)) printf("any"); else { char buf[48]; if (inet_ntop(af, &addr->v.a.addr, buf, sizeof(buf)) == NULL) printf("?"); else printf("%s", buf); } break; case PF_ADDR_NOROUTE: printf("no-route"); return; case PF_ADDR_URPFFAILED: printf("urpf-failed"); return; default: printf("?"); return; } /* mask if not _both_ address and mask are zero */ if (addr->type != PF_ADDR_RANGE && !(PF_AZERO(&addr->v.a.addr, AF_INET6) && PF_AZERO(&addr->v.a.mask, AF_INET6))) { int bits = unmask(&addr->v.a.mask, af); if (bits != (af == AF_INET ? 32 : 128)) printf("/%d", bits); } } void print_name(struct pf_addr *addr, sa_family_t af) { char host[NI_MAXHOST]; strlcpy(host, "?", sizeof(host)); switch (af) { case AF_INET: { struct sockaddr_in sin; memset(&sin, 0, sizeof(sin)); sin.sin_len = sizeof(sin); sin.sin_family = AF_INET; sin.sin_addr = addr->v4; getnameinfo((struct sockaddr *)&sin, sin.sin_len, host, sizeof(host), NULL, 0, NI_NOFQDN); break; } case AF_INET6: { struct sockaddr_in6 sin6; memset(&sin6, 0, sizeof(sin6)); sin6.sin6_len = sizeof(sin6); sin6.sin6_family = AF_INET6; sin6.sin6_addr = addr->v6; getnameinfo((struct sockaddr *)&sin6, sin6.sin6_len, host, sizeof(host), NULL, 0, NI_NOFQDN); break; } } printf("%s", host); } void print_host(struct pf_addr *addr, u_int16_t port, sa_family_t af, int opts) { if (opts & PF_OPT_USEDNS) print_name(addr, af); else { struct pf_addr_wrap aw; memset(&aw, 0, sizeof(aw)); aw.v.a.addr = *addr; if (af == AF_INET) aw.v.a.mask.addr32[0] = 0xffffffff; else { memset(&aw.v.a.mask, 0xff, sizeof(aw.v.a.mask)); af = AF_INET6; } print_addr(&aw, af, opts & PF_OPT_VERBOSE2); } if (port) { if (af == AF_INET) printf(":%u", ntohs(port)); else printf("[%u]", ntohs(port)); } } void print_seq(struct pfctl_state_peer *p) { if (p->seqdiff) printf("[%u + %u](+%u)", p->seqlo, p->seqhi - p->seqlo, p->seqdiff); else printf("[%u + %u]", p->seqlo, p->seqhi - p->seqlo); } void print_state(struct pfctl_state *s, int opts) { struct pfctl_state_peer *src, *dst; struct pfctl_state_key *key, *sk, *nk; const char *protoname; int min, sec; sa_family_t af; uint8_t proto; #ifndef __NO_STRICT_ALIGNMENT struct pfctl_state_key aligned_key[2]; bcopy(&s->key, aligned_key, sizeof(aligned_key)); key = aligned_key; #else key = s->key; #endif af = s->key[PF_SK_WIRE].af; proto = s->key[PF_SK_WIRE].proto; if (s->direction == PF_OUT) { src = &s->src; dst = &s->dst; sk = &key[PF_SK_STACK]; nk = &key[PF_SK_WIRE]; if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) sk->port[0] = nk->port[0]; } else { src = &s->dst; dst = &s->src; sk = &key[PF_SK_WIRE]; nk = &key[PF_SK_STACK]; if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) sk->port[1] = nk->port[1]; } printf("%s ", s->ifname); if ((protoname = pfctl_proto2name(proto)) != NULL) printf("%s ", protoname); else printf("%u ", proto); print_host(&nk->addr[1], nk->port[1], af, opts); if (PF_ANEQ(&nk->addr[1], &sk->addr[1], af) || nk->port[1] != sk->port[1]) { printf(" ("); print_host(&sk->addr[1], sk->port[1], af, opts); printf(")"); } if (s->direction == PF_OUT) printf(" -> "); else printf(" <- "); print_host(&nk->addr[0], nk->port[0], af, opts); if (PF_ANEQ(&nk->addr[0], &sk->addr[0], af) || nk->port[0] != sk->port[0]) { printf(" ("); print_host(&sk->addr[0], sk->port[0], af, opts); printf(")"); } printf(" "); if (proto == IPPROTO_TCP) { if (src->state <= TCPS_TIME_WAIT && dst->state <= TCPS_TIME_WAIT) printf(" %s:%s\n", tcpstates[src->state], tcpstates[dst->state]); else if (src->state == PF_TCPS_PROXY_SRC || dst->state == PF_TCPS_PROXY_SRC) printf(" PROXY:SRC\n"); else if (src->state == PF_TCPS_PROXY_DST || dst->state == PF_TCPS_PROXY_DST) printf(" PROXY:DST\n"); else printf(" \n", src->state, dst->state); if (opts & PF_OPT_VERBOSE) { printf(" "); print_seq(src); if (src->wscale && dst->wscale) printf(" wscale %u", src->wscale & PF_WSCALE_MASK); printf(" "); print_seq(dst); if (src->wscale && dst->wscale) printf(" wscale %u", dst->wscale & PF_WSCALE_MASK); printf("\n"); } } else if (proto == IPPROTO_UDP && src->state < PFUDPS_NSTATES && dst->state < PFUDPS_NSTATES) { const char *states[] = PFUDPS_NAMES; printf(" %s:%s\n", states[src->state], states[dst->state]); #ifndef INET6 } else if (proto != IPPROTO_ICMP && src->state < PFOTHERS_NSTATES && dst->state < PFOTHERS_NSTATES) { #else } else if (proto != IPPROTO_ICMP && proto != IPPROTO_ICMPV6 && src->state < PFOTHERS_NSTATES && dst->state < PFOTHERS_NSTATES) { #endif /* XXX ICMP doesn't really have state levels */ const char *states[] = PFOTHERS_NAMES; printf(" %s:%s\n", states[src->state], states[dst->state]); } else { printf(" %u:%u\n", src->state, dst->state); } if (opts & PF_OPT_VERBOSE) { u_int32_t creation = s->creation; u_int32_t expire = s->expire; sec = creation % 60; creation /= 60; min = creation % 60; creation /= 60; printf(" age %.2u:%.2u:%.2u", creation, min, sec); sec = expire % 60; expire /= 60; min = expire % 60; expire /= 60; printf(", expires in %.2u:%.2u:%.2u", expire, min, sec); printf(", %ju:%ju pkts, %ju:%ju bytes", s->packets[0], s->packets[1], s->bytes[0], s->bytes[1]); if (s->anchor != -1) printf(", anchor %u", s->anchor); if (s->rule != -1) printf(", rule %u", s->rule); + if (s->state_flags & PFSTATE_ALLOWOPTS) + printf(", allow-opts"); if (s->state_flags & PFSTATE_SLOPPY) printf(", sloppy"); + if (s->state_flags & PFSTATE_NOSYNC) + printf(", no-sync"); + if (s->state_flags & PFSTATE_ACK) + printf(", psync-ack"); + if (s->state_flags & PFSTATE_NODF) + printf(", no-df"); + if (s->state_flags & PFSTATE_SETTOS) + printf(", set-tos"); + if (s->state_flags & PFSTATE_RANDOMID) + printf(", random-id"); + if (s->state_flags & PFSTATE_SCRUB_TCP) + printf(", scrub-tcp"); + if (s->state_flags & PFSTATE_SETPRIO) + printf(", set-prio"); if (s->sync_flags & PFSYNC_FLAG_SRCNODE) printf(", source-track"); if (s->sync_flags & PFSYNC_FLAG_NATSRCNODE) printf(", sticky-address"); printf("\n"); } if (opts & PF_OPT_VERBOSE2) { u_int64_t id; bcopy(&s->id, &id, sizeof(u_int64_t)); printf(" id: %016jx creatorid: %08x", id, s->creatorid); printf(" gateway: "); print_host(&s->rt_addr, 0, af, opts); printf("\n"); if (strcmp(s->ifname, s->orig_ifname) != 0) printf(" origif: %s\n", s->orig_ifname); } } int unmask(struct pf_addr *m, sa_family_t af) { int i = 31, j = 0, b = 0; u_int32_t tmp; while (j < 4 && m->addr32[j] == 0xffffffff) { b += 32; j++; } if (j < 4) { tmp = ntohl(m->addr32[j]); for (i = 31; tmp & (1 << i); --i) b++; } return (b); } diff --git a/sbin/pfctl/pfctl.c b/sbin/pfctl/pfctl.c index f60e3f67e082..b5ef07064fb9 100644 --- a/sbin/pfctl/pfctl.c +++ b/sbin/pfctl/pfctl.c @@ -1,3213 +1,3252 @@ /* $OpenBSD: pfctl.c,v 1.278 2008/08/31 20:18:17 jmc Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * Copyright (c) 2002,2003 Henning Brauer * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #define PFIOC_USE_LATEST #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pfctl_parser.h" #include "pfctl.h" void usage(void); int pfctl_enable(int, int); int pfctl_disable(int, int); int pfctl_clear_stats(int, int); int pfctl_get_skip_ifaces(void); int pfctl_check_skip_ifaces(char *); int pfctl_adjust_skip_ifaces(struct pfctl *); int pfctl_clear_interface_flags(int, int); int pfctl_flush_eth_rules(int, int, char *); int pfctl_flush_rules(int, int, char *); int pfctl_flush_nat(int, int, char *); int pfctl_clear_altq(int, int); int pfctl_clear_src_nodes(int, int); int pfctl_clear_iface_states(int, const char *, int); void pfctl_addrprefix(char *, struct pf_addr *); int pfctl_kill_src_nodes(int, const char *, int); int pfctl_net_kill_states(int, const char *, int); int pfctl_gateway_kill_states(int, const char *, int); int pfctl_label_kill_states(int, const char *, int); int pfctl_id_kill_states(int, const char *, int); void pfctl_init_options(struct pfctl *); int pfctl_load_options(struct pfctl *); int pfctl_load_limit(struct pfctl *, unsigned int, unsigned int); int pfctl_load_timeout(struct pfctl *, unsigned int, unsigned int); int pfctl_load_debug(struct pfctl *, unsigned int); int pfctl_load_logif(struct pfctl *, char *); int pfctl_load_hostid(struct pfctl *, u_int32_t); +int pfctl_load_reassembly(struct pfctl *, u_int32_t); int pfctl_load_syncookies(struct pfctl *, u_int8_t); int pfctl_get_pool(int, struct pfctl_pool *, u_int32_t, u_int32_t, int, char *); void pfctl_print_eth_rule_counters(struct pfctl_eth_rule *, int); void pfctl_print_rule_counters(struct pfctl_rule *, int); int pfctl_show_eth_rules(int, char *, int, enum pfctl_show, char *, int, int); int pfctl_show_rules(int, char *, int, enum pfctl_show, char *, int, int); int pfctl_show_nat(int, char *, int, char *, int); int pfctl_show_src_nodes(int, int); int pfctl_show_states(int, const char *, int); int pfctl_show_status(int, int); int pfctl_show_running(int); int pfctl_show_timeouts(int, int); int pfctl_show_limits(int, int); void pfctl_debug(int, u_int32_t, int); int pfctl_test_altqsupport(int, int); int pfctl_show_anchors(int, int, char *); int pfctl_show_eth_anchors(int, int, char *); int pfctl_ruleset_trans(struct pfctl *, char *, struct pfctl_anchor *, bool); int pfctl_eth_ruleset_trans(struct pfctl *, char *, struct pfctl_eth_anchor *); int pfctl_load_eth_ruleset(struct pfctl *, char *, struct pfctl_eth_ruleset *, int); int pfctl_load_eth_rule(struct pfctl *, char *, struct pfctl_eth_rule *, int); int pfctl_load_ruleset(struct pfctl *, char *, struct pfctl_ruleset *, int, int); int pfctl_load_rule(struct pfctl *, char *, struct pfctl_rule *, int); const char *pfctl_lookup_option(char *, const char * const *); static struct pfctl_anchor_global pf_anchors; struct pfctl_anchor pf_main_anchor; struct pfctl_eth_anchor pf_eth_main_anchor; static struct pfr_buffer skip_b; static const char *clearopt; static char *rulesopt; static const char *showopt; static const char *debugopt; static char *anchoropt; static const char *optiopt = NULL; static const char *pf_device = "/dev/pf"; static char *ifaceopt; static char *tableopt; static const char *tblcmdopt; static int src_node_killers; static char *src_node_kill[2]; static int state_killers; static char *state_kill[2]; int loadopt; int altqsupport; int dev = -1; static int first_title = 1; static int labels = 0; #define INDENT(d, o) do { \ if (o) { \ int i; \ for (i=0; i < d; i++) \ printf(" "); \ } \ } while (0); \ static const struct { const char *name; int index; } pf_limits[] = { { "states", PF_LIMIT_STATES }, { "src-nodes", PF_LIMIT_SRC_NODES }, { "frags", PF_LIMIT_FRAGS }, { "table-entries", PF_LIMIT_TABLE_ENTRIES }, { NULL, 0 } }; struct pf_hint { const char *name; int timeout; }; static const struct pf_hint pf_hint_normal[] = { { "tcp.first", 2 * 60 }, { "tcp.opening", 30 }, { "tcp.established", 24 * 60 * 60 }, { "tcp.closing", 15 * 60 }, { "tcp.finwait", 45 }, { "tcp.closed", 90 }, { "tcp.tsdiff", 30 }, { NULL, 0 } }; static const struct pf_hint pf_hint_satellite[] = { { "tcp.first", 3 * 60 }, { "tcp.opening", 30 + 5 }, { "tcp.established", 24 * 60 * 60 }, { "tcp.closing", 15 * 60 + 5 }, { "tcp.finwait", 45 + 5 }, { "tcp.closed", 90 + 5 }, { "tcp.tsdiff", 60 }, { NULL, 0 } }; static const struct pf_hint pf_hint_conservative[] = { { "tcp.first", 60 * 60 }, { "tcp.opening", 15 * 60 }, { "tcp.established", 5 * 24 * 60 * 60 }, { "tcp.closing", 60 * 60 }, { "tcp.finwait", 10 * 60 }, { "tcp.closed", 3 * 60 }, { "tcp.tsdiff", 60 }, { NULL, 0 } }; static const struct pf_hint pf_hint_aggressive[] = { { "tcp.first", 30 }, { "tcp.opening", 5 }, { "tcp.established", 5 * 60 * 60 }, { "tcp.closing", 60 }, { "tcp.finwait", 30 }, { "tcp.closed", 30 }, { "tcp.tsdiff", 10 }, { NULL, 0 } }; static const struct { const char *name; const struct pf_hint *hint; } pf_hints[] = { { "normal", pf_hint_normal }, { "satellite", pf_hint_satellite }, { "high-latency", pf_hint_satellite }, { "conservative", pf_hint_conservative }, { "aggressive", pf_hint_aggressive }, { NULL, NULL } }; static const char * const clearopt_list[] = { "nat", "queue", "rules", "Sources", "states", "info", "Tables", "osfp", "all", "ethernet", NULL }; static const char * const showopt_list[] = { "ether", "nat", "queue", "rules", "Anchors", "Sources", "states", "info", "Interfaces", "labels", "timeouts", "memory", "Tables", "osfp", "Running", "all", NULL }; static const char * const tblcmdopt_list[] = { "kill", "flush", "add", "delete", "load", "replace", "show", "test", "zero", "expire", NULL }; static const char * const debugopt_list[] = { "none", "urgent", "misc", "loud", NULL }; static const char * const optiopt_list[] = { "none", "basic", "profile", NULL }; void usage(void) { extern char *__progname; fprintf(stderr, "usage: %s [-AdeghMmNnOPqRrvz] [-a anchor] [-D macro=value] [-F modifier]\n" "\t[-f file] [-i interface] [-K host | network]\n" "\t[-k host | network | gateway | label | id] [-o level] [-p device]\n" "\t[-s modifier] [-t table -T command [address ...]] [-x level]\n", __progname); exit(1); } /* * Cache protocol number to name translations. * * Translation is performed a lot e.g., when dumping states and * getprotobynumber is incredibly expensive. * * Note from the getprotobynumber(3) manpage: * * These functions use a thread-specific data space; if the data is needed * for future use, it should be copied before any subsequent calls overwrite * it. Only the Internet protocols are currently understood. * * * Consequently we only cache the name and strdup it for safety. * * At the time of writing this comment the last entry in /etc/protocols is: * divert 258 DIVERT # Divert pseudo-protocol [non IANA] */ const char * pfctl_proto2name(int proto) { static const char *pfctl_proto_cache[259]; struct protoent *p; if (proto >= nitems(pfctl_proto_cache)) { p = getprotobynumber(proto); if (p == NULL) { return (NULL); } return (p->p_name); } if (pfctl_proto_cache[proto] == NULL) { p = getprotobynumber(proto); if (p == NULL) { return (NULL); } pfctl_proto_cache[proto] = strdup(p->p_name); } return (pfctl_proto_cache[proto]); } int pfctl_enable(int dev, int opts) { if (ioctl(dev, DIOCSTART)) { if (errno == EEXIST) errx(1, "pf already enabled"); else if (errno == ESRCH) errx(1, "pfil registeration failed"); else err(1, "DIOCSTART"); } if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "pf enabled\n"); if (altqsupport && ioctl(dev, DIOCSTARTALTQ)) if (errno != EEXIST) err(1, "DIOCSTARTALTQ"); return (0); } int pfctl_disable(int dev, int opts) { if (ioctl(dev, DIOCSTOP)) { if (errno == ENOENT) errx(1, "pf not enabled"); else err(1, "DIOCSTOP"); } if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "pf disabled\n"); if (altqsupport && ioctl(dev, DIOCSTOPALTQ)) if (errno != ENOENT) err(1, "DIOCSTOPALTQ"); return (0); } int pfctl_clear_stats(int dev, int opts) { if (ioctl(dev, DIOCCLRSTATUS)) err(1, "DIOCCLRSTATUS"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "pf: statistics cleared\n"); return (0); } int pfctl_get_skip_ifaces(void) { bzero(&skip_b, sizeof(skip_b)); skip_b.pfrb_type = PFRB_IFACES; for (;;) { pfr_buf_grow(&skip_b, skip_b.pfrb_size); skip_b.pfrb_size = skip_b.pfrb_msize; if (pfi_get_ifaces(NULL, skip_b.pfrb_caddr, &skip_b.pfrb_size)) err(1, "pfi_get_ifaces"); if (skip_b.pfrb_size <= skip_b.pfrb_msize) break; } return (0); } int pfctl_check_skip_ifaces(char *ifname) { struct pfi_kif *p; struct node_host *h = NULL, *n = NULL; PFRB_FOREACH(p, &skip_b) { if (!strcmp(ifname, p->pfik_name) && (p->pfik_flags & PFI_IFLAG_SKIP)) p->pfik_flags &= ~PFI_IFLAG_SKIP; if (!strcmp(ifname, p->pfik_name) && p->pfik_group != NULL) { if ((h = ifa_grouplookup(p->pfik_name, 0)) == NULL) continue; for (n = h; n != NULL; n = n->next) { if (p->pfik_ifp == NULL) continue; if (strncmp(p->pfik_name, ifname, IFNAMSIZ)) continue; p->pfik_flags &= ~PFI_IFLAG_SKIP; } } } return (0); } int pfctl_adjust_skip_ifaces(struct pfctl *pf) { struct pfi_kif *p, *pp; struct node_host *h = NULL, *n = NULL; PFRB_FOREACH(p, &skip_b) { if (p->pfik_group == NULL || !(p->pfik_flags & PFI_IFLAG_SKIP)) continue; pfctl_set_interface_flags(pf, p->pfik_name, PFI_IFLAG_SKIP, 0); if ((h = ifa_grouplookup(p->pfik_name, 0)) == NULL) continue; for (n = h; n != NULL; n = n->next) PFRB_FOREACH(pp, &skip_b) { if (pp->pfik_ifp == NULL) continue; if (strncmp(pp->pfik_name, n->ifname, IFNAMSIZ)) continue; if (!(pp->pfik_flags & PFI_IFLAG_SKIP)) pfctl_set_interface_flags(pf, pp->pfik_name, PFI_IFLAG_SKIP, 1); if (pp->pfik_flags & PFI_IFLAG_SKIP) pp->pfik_flags &= ~PFI_IFLAG_SKIP; } } PFRB_FOREACH(p, &skip_b) { if (p->pfik_ifp == NULL || ! (p->pfik_flags & PFI_IFLAG_SKIP)) continue; pfctl_set_interface_flags(pf, p->pfik_name, PFI_IFLAG_SKIP, 0); } return (0); } int pfctl_clear_interface_flags(int dev, int opts) { struct pfioc_iface pi; if ((opts & PF_OPT_NOACTION) == 0) { bzero(&pi, sizeof(pi)); pi.pfiio_flags = PFI_IFLAG_SKIP; if (ioctl(dev, DIOCCLRIFFLAG, &pi)) err(1, "DIOCCLRIFFLAG"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "pf: interface flags reset\n"); } return (0); } int pfctl_flush_eth_rules(int dev, int opts, char *anchorname) { int ret; ret = pfctl_clear_eth_rules(dev, anchorname); if (ret != 0) err(1, "pfctl_clear_eth_rules"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "Ethernet rules cleared\n"); return (ret); } int pfctl_flush_rules(int dev, int opts, char *anchorname) { int ret; ret = pfctl_clear_rules(dev, anchorname); if (ret != 0) err(1, "pfctl_clear_rules"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "rules cleared\n"); return (0); } int pfctl_flush_nat(int dev, int opts, char *anchorname) { int ret; ret = pfctl_clear_nat(dev, anchorname); if (ret != 0) err(1, "pfctl_clear_nat"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "nat cleared\n"); return (0); } int pfctl_clear_altq(int dev, int opts) { struct pfr_buffer t; if (!altqsupport) return (-1); memset(&t, 0, sizeof(t)); t.pfrb_type = PFRB_TRANS; if (pfctl_add_trans(&t, PF_RULESET_ALTQ, "") || pfctl_trans(dev, &t, DIOCXBEGIN, 0) || pfctl_trans(dev, &t, DIOCXCOMMIT, 0)) err(1, "pfctl_clear_altq"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "altq cleared\n"); return (0); } int pfctl_clear_src_nodes(int dev, int opts) { if (ioctl(dev, DIOCCLRSRCNODES)) err(1, "DIOCCLRSRCNODES"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "source tracking entries cleared\n"); return (0); } int pfctl_clear_iface_states(int dev, const char *iface, int opts) { struct pfctl_kill kill; unsigned int killed; memset(&kill, 0, sizeof(kill)); if (iface != NULL && strlcpy(kill.ifname, iface, sizeof(kill.ifname)) >= sizeof(kill.ifname)) errx(1, "invalid interface: %s", iface); if (opts & PF_OPT_KILLMATCH) kill.kill_match = true; if (pfctl_clear_states(dev, &kill, &killed)) err(1, "DIOCCLRSTATES"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "%d states cleared\n", killed); return (0); } void pfctl_addrprefix(char *addr, struct pf_addr *mask) { char *p; const char *errstr; int prefix, ret_ga, q, r; struct addrinfo hints, *res; if ((p = strchr(addr, '/')) == NULL) return; *p++ = '\0'; prefix = strtonum(p, 0, 128, &errstr); if (errstr) errx(1, "prefix is %s: %s", errstr, p); bzero(&hints, sizeof(hints)); /* prefix only with numeric addresses */ hints.ai_flags |= AI_NUMERICHOST; if ((ret_ga = getaddrinfo(addr, NULL, &hints, &res))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } if (res->ai_family == AF_INET && prefix > 32) errx(1, "prefix too long for AF_INET"); else if (res->ai_family == AF_INET6 && prefix > 128) errx(1, "prefix too long for AF_INET6"); q = prefix >> 3; r = prefix & 7; switch (res->ai_family) { case AF_INET: bzero(&mask->v4, sizeof(mask->v4)); mask->v4.s_addr = htonl((u_int32_t) (0xffffffffffULL << (32 - prefix))); break; case AF_INET6: bzero(&mask->v6, sizeof(mask->v6)); if (q > 0) memset((void *)&mask->v6, 0xff, q); if (r > 0) *((u_char *)&mask->v6 + q) = (0xff00 >> r) & 0xff; break; } freeaddrinfo(res); } int pfctl_kill_src_nodes(int dev, const char *iface, int opts) { struct pfioc_src_node_kill psnk; struct addrinfo *res[2], *resp[2]; struct sockaddr last_src, last_dst; int killed, sources, dests; int ret_ga; killed = sources = dests = 0; memset(&psnk, 0, sizeof(psnk)); memset(&psnk.psnk_src.addr.v.a.mask, 0xff, sizeof(psnk.psnk_src.addr.v.a.mask)); memset(&last_src, 0xff, sizeof(last_src)); memset(&last_dst, 0xff, sizeof(last_dst)); pfctl_addrprefix(src_node_kill[0], &psnk.psnk_src.addr.v.a.mask); if ((ret_ga = getaddrinfo(src_node_kill[0], NULL, NULL, &res[0]))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } for (resp[0] = res[0]; resp[0]; resp[0] = resp[0]->ai_next) { if (resp[0]->ai_addr == NULL) continue; /* We get lots of duplicates. Catch the easy ones */ if (memcmp(&last_src, resp[0]->ai_addr, sizeof(last_src)) == 0) continue; last_src = *(struct sockaddr *)resp[0]->ai_addr; psnk.psnk_af = resp[0]->ai_family; sources++; if (psnk.psnk_af == AF_INET) psnk.psnk_src.addr.v.a.addr.v4 = ((struct sockaddr_in *)resp[0]->ai_addr)->sin_addr; else if (psnk.psnk_af == AF_INET6) psnk.psnk_src.addr.v.a.addr.v6 = ((struct sockaddr_in6 *)resp[0]->ai_addr)-> sin6_addr; else errx(1, "Unknown address family %d", psnk.psnk_af); if (src_node_killers > 1) { dests = 0; memset(&psnk.psnk_dst.addr.v.a.mask, 0xff, sizeof(psnk.psnk_dst.addr.v.a.mask)); memset(&last_dst, 0xff, sizeof(last_dst)); pfctl_addrprefix(src_node_kill[1], &psnk.psnk_dst.addr.v.a.mask); if ((ret_ga = getaddrinfo(src_node_kill[1], NULL, NULL, &res[1]))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } for (resp[1] = res[1]; resp[1]; resp[1] = resp[1]->ai_next) { if (resp[1]->ai_addr == NULL) continue; if (psnk.psnk_af != resp[1]->ai_family) continue; if (memcmp(&last_dst, resp[1]->ai_addr, sizeof(last_dst)) == 0) continue; last_dst = *(struct sockaddr *)resp[1]->ai_addr; dests++; if (psnk.psnk_af == AF_INET) psnk.psnk_dst.addr.v.a.addr.v4 = ((struct sockaddr_in *)resp[1]-> ai_addr)->sin_addr; else if (psnk.psnk_af == AF_INET6) psnk.psnk_dst.addr.v.a.addr.v6 = ((struct sockaddr_in6 *)resp[1]-> ai_addr)->sin6_addr; else errx(1, "Unknown address family %d", psnk.psnk_af); if (ioctl(dev, DIOCKILLSRCNODES, &psnk)) err(1, "DIOCKILLSRCNODES"); killed += psnk.psnk_killed; } freeaddrinfo(res[1]); } else { if (ioctl(dev, DIOCKILLSRCNODES, &psnk)) err(1, "DIOCKILLSRCNODES"); killed += psnk.psnk_killed; } } freeaddrinfo(res[0]); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "killed %d src nodes from %d sources and %d " "destinations\n", killed, sources, dests); return (0); } int pfctl_net_kill_states(int dev, const char *iface, int opts) { struct pfctl_kill kill; struct addrinfo *res[2], *resp[2]; struct sockaddr last_src, last_dst; unsigned int newkilled; int killed, sources, dests; int ret_ga; killed = sources = dests = 0; memset(&kill, 0, sizeof(kill)); memset(&kill.src.addr.v.a.mask, 0xff, sizeof(kill.src.addr.v.a.mask)); memset(&last_src, 0xff, sizeof(last_src)); memset(&last_dst, 0xff, sizeof(last_dst)); if (iface != NULL && strlcpy(kill.ifname, iface, sizeof(kill.ifname)) >= sizeof(kill.ifname)) errx(1, "invalid interface: %s", iface); pfctl_addrprefix(state_kill[0], &kill.src.addr.v.a.mask); if (opts & PF_OPT_KILLMATCH) kill.kill_match = true; if ((ret_ga = getaddrinfo(state_kill[0], NULL, NULL, &res[0]))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } for (resp[0] = res[0]; resp[0]; resp[0] = resp[0]->ai_next) { if (resp[0]->ai_addr == NULL) continue; /* We get lots of duplicates. Catch the easy ones */ if (memcmp(&last_src, resp[0]->ai_addr, sizeof(last_src)) == 0) continue; last_src = *(struct sockaddr *)resp[0]->ai_addr; kill.af = resp[0]->ai_family; sources++; if (kill.af == AF_INET) kill.src.addr.v.a.addr.v4 = ((struct sockaddr_in *)resp[0]->ai_addr)->sin_addr; else if (kill.af == AF_INET6) kill.src.addr.v.a.addr.v6 = ((struct sockaddr_in6 *)resp[0]->ai_addr)-> sin6_addr; else errx(1, "Unknown address family %d", kill.af); if (state_killers > 1) { dests = 0; memset(&kill.dst.addr.v.a.mask, 0xff, sizeof(kill.dst.addr.v.a.mask)); memset(&last_dst, 0xff, sizeof(last_dst)); pfctl_addrprefix(state_kill[1], &kill.dst.addr.v.a.mask); if ((ret_ga = getaddrinfo(state_kill[1], NULL, NULL, &res[1]))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } for (resp[1] = res[1]; resp[1]; resp[1] = resp[1]->ai_next) { if (resp[1]->ai_addr == NULL) continue; if (kill.af != resp[1]->ai_family) continue; if (memcmp(&last_dst, resp[1]->ai_addr, sizeof(last_dst)) == 0) continue; last_dst = *(struct sockaddr *)resp[1]->ai_addr; dests++; if (kill.af == AF_INET) kill.dst.addr.v.a.addr.v4 = ((struct sockaddr_in *)resp[1]-> ai_addr)->sin_addr; else if (kill.af == AF_INET6) kill.dst.addr.v.a.addr.v6 = ((struct sockaddr_in6 *)resp[1]-> ai_addr)->sin6_addr; else errx(1, "Unknown address family %d", kill.af); if (pfctl_kill_states(dev, &kill, &newkilled)) err(1, "DIOCKILLSTATES"); killed += newkilled; } freeaddrinfo(res[1]); } else { if (pfctl_kill_states(dev, &kill, &newkilled)) err(1, "DIOCKILLSTATES"); killed += newkilled; } } freeaddrinfo(res[0]); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "killed %d states from %d sources and %d " "destinations\n", killed, sources, dests); return (0); } int pfctl_gateway_kill_states(int dev, const char *iface, int opts) { struct pfctl_kill kill; struct addrinfo *res, *resp; struct sockaddr last_src; unsigned int newkilled; int killed = 0; int ret_ga; if (state_killers != 2 || (strlen(state_kill[1]) == 0)) { warnx("no gateway specified"); usage(); } memset(&kill, 0, sizeof(kill)); memset(&kill.rt_addr.addr.v.a.mask, 0xff, sizeof(kill.rt_addr.addr.v.a.mask)); memset(&last_src, 0xff, sizeof(last_src)); if (iface != NULL && strlcpy(kill.ifname, iface, sizeof(kill.ifname)) >= sizeof(kill.ifname)) errx(1, "invalid interface: %s", iface); if (opts & PF_OPT_KILLMATCH) kill.kill_match = true; pfctl_addrprefix(state_kill[1], &kill.rt_addr.addr.v.a.mask); if ((ret_ga = getaddrinfo(state_kill[1], NULL, NULL, &res))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } for (resp = res; resp; resp = resp->ai_next) { if (resp->ai_addr == NULL) continue; /* We get lots of duplicates. Catch the easy ones */ if (memcmp(&last_src, resp->ai_addr, sizeof(last_src)) == 0) continue; last_src = *(struct sockaddr *)resp->ai_addr; kill.af = resp->ai_family; if (kill.af == AF_INET) kill.rt_addr.addr.v.a.addr.v4 = ((struct sockaddr_in *)resp->ai_addr)->sin_addr; else if (kill.af == AF_INET6) kill.rt_addr.addr.v.a.addr.v6 = ((struct sockaddr_in6 *)resp->ai_addr)-> sin6_addr; else errx(1, "Unknown address family %d", kill.af); if (pfctl_kill_states(dev, &kill, &newkilled)) err(1, "DIOCKILLSTATES"); killed += newkilled; } freeaddrinfo(res); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "killed %d states\n", killed); return (0); } int pfctl_label_kill_states(int dev, const char *iface, int opts) { struct pfctl_kill kill; unsigned int killed; if (state_killers != 2 || (strlen(state_kill[1]) == 0)) { warnx("no label specified"); usage(); } memset(&kill, 0, sizeof(kill)); if (iface != NULL && strlcpy(kill.ifname, iface, sizeof(kill.ifname)) >= sizeof(kill.ifname)) errx(1, "invalid interface: %s", iface); if (opts & PF_OPT_KILLMATCH) kill.kill_match = true; if (strlcpy(kill.label, state_kill[1], sizeof(kill.label)) >= sizeof(kill.label)) errx(1, "label too long: %s", state_kill[1]); if (pfctl_kill_states(dev, &kill, &killed)) err(1, "DIOCKILLSTATES"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "killed %d states\n", killed); return (0); } int pfctl_id_kill_states(int dev, const char *iface, int opts) { struct pfctl_kill kill; unsigned int killed; if (state_killers != 2 || (strlen(state_kill[1]) == 0)) { warnx("no id specified"); usage(); } memset(&kill, 0, sizeof(kill)); if (opts & PF_OPT_KILLMATCH) kill.kill_match = true; if ((sscanf(state_kill[1], "%jx/%x", &kill.cmp.id, &kill.cmp.creatorid)) == 2) { } else if ((sscanf(state_kill[1], "%jx", &kill.cmp.id)) == 1) { kill.cmp.creatorid = 0; } else { warnx("wrong id format specified"); usage(); } if (kill.cmp.id == 0) { warnx("cannot kill id 0"); usage(); } if (pfctl_kill_states(dev, &kill, &killed)) err(1, "DIOCKILLSTATES"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "killed %d states\n", killed); return (0); } int pfctl_get_pool(int dev, struct pfctl_pool *pool, u_int32_t nr, u_int32_t ticket, int r_action, char *anchorname) { struct pfioc_pooladdr pp; struct pf_pooladdr *pa; u_int32_t pnr, mpnr; memset(&pp, 0, sizeof(pp)); memcpy(pp.anchor, anchorname, sizeof(pp.anchor)); pp.r_action = r_action; pp.r_num = nr; pp.ticket = ticket; if (ioctl(dev, DIOCGETADDRS, &pp)) { warn("DIOCGETADDRS"); return (-1); } mpnr = pp.nr; TAILQ_INIT(&pool->list); for (pnr = 0; pnr < mpnr; ++pnr) { pp.nr = pnr; if (ioctl(dev, DIOCGETADDR, &pp)) { warn("DIOCGETADDR"); return (-1); } pa = calloc(1, sizeof(struct pf_pooladdr)); if (pa == NULL) err(1, "calloc"); bcopy(&pp.addr, pa, sizeof(struct pf_pooladdr)); TAILQ_INSERT_TAIL(&pool->list, pa, entries); } return (0); } void pfctl_move_pool(struct pfctl_pool *src, struct pfctl_pool *dst) { struct pf_pooladdr *pa; while ((pa = TAILQ_FIRST(&src->list)) != NULL) { TAILQ_REMOVE(&src->list, pa, entries); TAILQ_INSERT_TAIL(&dst->list, pa, entries); } } void pfctl_clear_pool(struct pfctl_pool *pool) { struct pf_pooladdr *pa; while ((pa = TAILQ_FIRST(&pool->list)) != NULL) { TAILQ_REMOVE(&pool->list, pa, entries); free(pa); } } void pfctl_print_eth_rule_counters(struct pfctl_eth_rule *rule, int opts) { if (opts & PF_OPT_VERBOSE) { printf(" [ Evaluations: %-8llu Packets: %-8llu " "Bytes: %-10llu]\n", (unsigned long long)rule->evaluations, (unsigned long long)(rule->packets[0] + rule->packets[1]), (unsigned long long)(rule->bytes[0] + rule->bytes[1])); } if (opts & PF_OPT_VERBOSE2) { char timestr[30]; if (rule->last_active_timestamp != 0) { bcopy(ctime(&rule->last_active_timestamp), timestr, sizeof(timestr)); *strchr(timestr, '\n') = '\0'; } else { snprintf(timestr, sizeof(timestr), "N/A"); } printf(" [ Last Active Time: %s ]\n", timestr); } } void pfctl_print_rule_counters(struct pfctl_rule *rule, int opts) { if (opts & PF_OPT_DEBUG) { const char *t[PF_SKIP_COUNT] = { "i", "d", "f", "p", "sa", "sp", "da", "dp" }; int i; printf(" [ Skip steps: "); for (i = 0; i < PF_SKIP_COUNT; ++i) { if (rule->skip[i].nr == rule->nr + 1) continue; printf("%s=", t[i]); if (rule->skip[i].nr == -1) printf("end "); else printf("%u ", rule->skip[i].nr); } printf("]\n"); printf(" [ queue: qname=%s qid=%u pqname=%s pqid=%u ]\n", rule->qname, rule->qid, rule->pqname, rule->pqid); } if (opts & PF_OPT_VERBOSE) { printf(" [ Evaluations: %-8llu Packets: %-8llu " "Bytes: %-10llu States: %-6ju]\n", (unsigned long long)rule->evaluations, (unsigned long long)(rule->packets[0] + rule->packets[1]), (unsigned long long)(rule->bytes[0] + rule->bytes[1]), (uintmax_t)rule->states_cur); if (!(opts & PF_OPT_DEBUG)) printf(" [ Inserted: uid %u pid %u " "State Creations: %-6ju]\n", (unsigned)rule->cuid, (unsigned)rule->cpid, (uintmax_t)rule->states_tot); } if (opts & PF_OPT_VERBOSE2) { char timestr[30]; if (rule->last_active_timestamp != 0) { bcopy(ctime(&rule->last_active_timestamp), timestr, sizeof(timestr)); *strchr(timestr, '\n') = '\0'; } else { snprintf(timestr, sizeof(timestr), "N/A"); } printf(" [ Last Active Time: %s ]\n", timestr); } } void pfctl_print_title(char *title) { if (!first_title) printf("\n"); first_title = 0; printf("%s\n", title); } int pfctl_show_eth_rules(int dev, char *path, int opts, enum pfctl_show format, char *anchorname, int depth, int wildcard) { char anchor_call[MAXPATHLEN]; struct pfctl_eth_rules_info info; struct pfctl_eth_rule rule; int brace; int dotitle = opts & PF_OPT_SHOWALL; int len = strlen(path); char *npath, *p; /* * Truncate a trailing / and * on an anchorname before searching for * the ruleset, this is syntactic sugar that doesn't actually make it * to the kernel. */ if ((p = strrchr(anchorname, '/')) != NULL && p[1] == '*' && p[2] == '\0') { p[0] = '\0'; } if (anchorname[0] == '/') { if ((npath = calloc(1, MAXPATHLEN)) == NULL) errx(1, "pfctl_rules: calloc"); snprintf(npath, MAXPATHLEN, "%s", anchorname); } else { if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", anchorname); else snprintf(&path[len], MAXPATHLEN - len, "%s", anchorname); npath = path; } /* * If this anchor was called with a wildcard path, go through * the rulesets in the anchor rather than the rules. */ if (wildcard && (opts & PF_OPT_RECURSE)) { struct pfctl_eth_rulesets_info ri; u_int32_t mnr, nr; if (pfctl_get_eth_rulesets_info(dev, &ri, npath)) { if (errno == EINVAL) { fprintf(stderr, "Anchor '%s' " "not found.\n", anchorname); } else { warn("DIOCGETETHRULESETS"); return (-1); } } mnr = ri.nr; pfctl_print_eth_rule_counters(&rule, opts); for (nr = 0; nr < mnr; ++nr) { struct pfctl_eth_ruleset_info rs; if (pfctl_get_eth_ruleset(dev, npath, nr, &rs)) err(1, "DIOCGETETHRULESET"); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("anchor \"%s\" all {\n", rs.name); pfctl_show_eth_rules(dev, npath, opts, format, rs.name, depth + 1, 0); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("}\n"); } path[len] = '\0'; return (0); } if (pfctl_get_eth_rules_info(dev, &info, path)) { warn("DIOCGETETHRULES"); return (-1); } for (int nr = 0; nr < info.nr; nr++) { brace = 0; INDENT(depth, !(opts & PF_OPT_VERBOSE)); if (pfctl_get_eth_rule(dev, nr, info.ticket, path, &rule, opts & PF_OPT_CLRRULECTRS, anchor_call) != 0) { warn("DIOCGETETHRULE"); return (-1); } if (anchor_call[0] && ((((p = strrchr(anchor_call, '_')) != NULL) && (p == anchor_call || *(--p) == '/')) || (opts & PF_OPT_RECURSE))) { brace++; int aclen = strlen(anchor_call); if (anchor_call[aclen - 1] == '*') anchor_call[aclen - 2] = '\0'; } p = &anchor_call[0]; if (dotitle) { pfctl_print_title("ETH RULES:"); dotitle = 0; } print_eth_rule(&rule, anchor_call, opts & (PF_OPT_VERBOSE2 | PF_OPT_DEBUG)); if (brace) printf(" {\n"); else printf("\n"); pfctl_print_eth_rule_counters(&rule, opts); if (brace) { pfctl_show_eth_rules(dev, path, opts, format, p, depth + 1, rule.anchor_wildcard); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("}\n"); } } path[len] = '\0'; return (0); } int pfctl_show_rules(int dev, char *path, int opts, enum pfctl_show format, char *anchorname, int depth, int wildcard) { struct pfctl_rules_info ri; struct pfctl_rule rule; char anchor_call[MAXPATHLEN]; u_int32_t nr, header = 0; int rule_numbers = opts & (PF_OPT_VERBOSE2 | PF_OPT_DEBUG); int numeric = opts & PF_OPT_NUMERIC; int len = strlen(path), ret = 0; char *npath, *p; /* * Truncate a trailing / and * on an anchorname before searching for * the ruleset, this is syntactic sugar that doesn't actually make it * to the kernel. */ if ((p = strrchr(anchorname, '/')) != NULL && p[1] == '*' && p[2] == '\0') { p[0] = '\0'; } if (anchorname[0] == '/') { if ((npath = calloc(1, MAXPATHLEN)) == NULL) errx(1, "pfctl_rules: calloc"); snprintf(npath, MAXPATHLEN, "%s", anchorname); } else { if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", anchorname); else snprintf(&path[len], MAXPATHLEN - len, "%s", anchorname); npath = path; } /* * If this anchor was called with a wildcard path, go through * the rulesets in the anchor rather than the rules. */ if (wildcard && (opts & PF_OPT_RECURSE)) { struct pfioc_ruleset prs; u_int32_t mnr, nr; memset(&prs, 0, sizeof(prs)); memcpy(prs.path, npath, sizeof(prs.path)); if (ioctl(dev, DIOCGETRULESETS, &prs)) { if (errno == EINVAL) fprintf(stderr, "Anchor '%s' " "not found.\n", anchorname); else err(1, "DIOCGETRULESETS"); } mnr = prs.nr; pfctl_print_rule_counters(&rule, opts); for (nr = 0; nr < mnr; ++nr) { prs.nr = nr; if (ioctl(dev, DIOCGETRULESET, &prs)) err(1, "DIOCGETRULESET"); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("anchor \"%s\" all {\n", prs.name); pfctl_show_rules(dev, npath, opts, format, prs.name, depth + 1, 0); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("}\n"); } path[len] = '\0'; return (0); } if (opts & PF_OPT_SHOWALL) { ret = pfctl_get_rules_info(dev, &ri, PF_PASS, path); if (ret != 0) { warn("DIOCGETRULES"); goto error; } header++; } ret = pfctl_get_rules_info(dev, &ri, PF_SCRUB, path); if (ret != 0) { warn("DIOCGETRULES"); goto error; } if (opts & PF_OPT_SHOWALL) { if (format == PFCTL_SHOW_RULES && (ri.nr > 0 || header)) pfctl_print_title("FILTER RULES:"); else if (format == PFCTL_SHOW_LABELS && labels) pfctl_print_title("LABEL COUNTERS:"); } for (nr = 0; nr < ri.nr; ++nr) { if (pfctl_get_clear_rule(dev, nr, ri.ticket, path, PF_SCRUB, &rule, anchor_call, opts & PF_OPT_CLRRULECTRS)) { warn("DIOCGETRULENV"); goto error; } if (pfctl_get_pool(dev, &rule.rpool, nr, ri.ticket, PF_SCRUB, path) != 0) goto error; switch (format) { case PFCTL_SHOW_LABELS: break; case PFCTL_SHOW_RULES: if (rule.label[0][0] && (opts & PF_OPT_SHOWALL)) labels = 1; print_rule(&rule, anchor_call, rule_numbers, numeric); printf("\n"); pfctl_print_rule_counters(&rule, opts); break; case PFCTL_SHOW_NOTHING: break; } pfctl_clear_pool(&rule.rpool); } ret = pfctl_get_rules_info(dev, &ri, PF_PASS, path); if (ret != 0) { warn("DIOCGETRULES"); goto error; } for (nr = 0; nr < ri.nr; ++nr) { if (pfctl_get_clear_rule(dev, nr, ri.ticket, path, PF_PASS, &rule, anchor_call, opts & PF_OPT_CLRRULECTRS)) { warn("DIOCGETRULE"); goto error; } if (pfctl_get_pool(dev, &rule.rpool, nr, ri.ticket, PF_PASS, path) != 0) goto error; switch (format) { case PFCTL_SHOW_LABELS: { bool show = false; int i = 0; while (rule.label[i][0]) { printf("%s ", rule.label[i++]); show = true; } if (show) { printf("%llu %llu %llu %llu" " %llu %llu %llu %ju\n", (unsigned long long)rule.evaluations, (unsigned long long)(rule.packets[0] + rule.packets[1]), (unsigned long long)(rule.bytes[0] + rule.bytes[1]), (unsigned long long)rule.packets[0], (unsigned long long)rule.bytes[0], (unsigned long long)rule.packets[1], (unsigned long long)rule.bytes[1], (uintmax_t)rule.states_tot); } break; } case PFCTL_SHOW_RULES: if (rule.label[0][0] && (opts & PF_OPT_SHOWALL)) labels = 1; INDENT(depth, !(opts & PF_OPT_VERBOSE)); print_rule(&rule, anchor_call, rule_numbers, numeric); /* * If this is a 'unnamed' brace notation * anchor, OR the user has explicitly requested * recursion, print it recursively. */ if (anchor_call[0] && (((p = strrchr(anchor_call, '/')) ? p[1] == '_' : anchor_call[0] == '_') || opts & PF_OPT_RECURSE)) { printf(" {\n"); pfctl_print_rule_counters(&rule, opts); pfctl_show_rules(dev, npath, opts, format, anchor_call, depth + 1, rule.anchor_wildcard); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("}\n"); } else { printf("\n"); pfctl_print_rule_counters(&rule, opts); } break; case PFCTL_SHOW_NOTHING: break; } pfctl_clear_pool(&rule.rpool); } error: path[len] = '\0'; return (ret); } int pfctl_show_nat(int dev, char *path, int opts, char *anchorname, int depth) { struct pfctl_rules_info ri; struct pfctl_rule rule; char anchor_call[MAXPATHLEN]; u_int32_t nr; static int nattype[3] = { PF_NAT, PF_RDR, PF_BINAT }; int i, dotitle = opts & PF_OPT_SHOWALL; int brace, ret; int len = strlen(path); char *p; if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", anchorname); else snprintf(&path[len], MAXPATHLEN - len, "%s", anchorname); for (i = 0; i < 3; i++) { ret = pfctl_get_rules_info(dev, &ri, nattype[i], path); if (ret != 0) { warn("DIOCGETRULES"); return (-1); } for (nr = 0; nr < ri.nr; ++nr) { brace = 0; INDENT(depth, !(opts & PF_OPT_VERBOSE)); if (pfctl_get_rule(dev, nr, ri.ticket, path, nattype[i], &rule, anchor_call)) { warn("DIOCGETRULE"); return (-1); } if (pfctl_get_pool(dev, &rule.rpool, nr, ri.ticket, nattype[i], path) != 0) return (-1); if (anchor_call[0] && ((((p = strrchr(anchor_call, '_')) != NULL) && (p == anchor_call || *(--p) == '/')) || (opts & PF_OPT_RECURSE))) { brace++; if ((p = strrchr(anchor_call, '/')) != NULL) p++; else p = &anchor_call[0]; } else p = &anchor_call[0]; if (dotitle) { pfctl_print_title("TRANSLATION RULES:"); dotitle = 0; } print_rule(&rule, anchor_call, opts & PF_OPT_VERBOSE2, opts & PF_OPT_NUMERIC); if (brace) printf(" {\n"); else printf("\n"); pfctl_print_rule_counters(&rule, opts); pfctl_clear_pool(&rule.rpool); if (brace) { pfctl_show_nat(dev, path, opts, p, depth + 1); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("}\n"); } } } return (0); } int pfctl_show_src_nodes(int dev, int opts) { struct pfioc_src_nodes psn; struct pf_src_node *p; char *inbuf = NULL, *newinbuf = NULL; unsigned int len = 0; int i; memset(&psn, 0, sizeof(psn)); for (;;) { psn.psn_len = len; if (len) { newinbuf = realloc(inbuf, len); if (newinbuf == NULL) err(1, "realloc"); psn.psn_buf = inbuf = newinbuf; } if (ioctl(dev, DIOCGETSRCNODES, &psn) < 0) { warn("DIOCGETSRCNODES"); free(inbuf); return (-1); } if (psn.psn_len + sizeof(struct pfioc_src_nodes) < len) break; if (len == 0 && psn.psn_len == 0) goto done; if (len == 0 && psn.psn_len != 0) len = psn.psn_len; if (psn.psn_len == 0) goto done; /* no src_nodes */ len *= 2; } p = psn.psn_src_nodes; if (psn.psn_len > 0 && (opts & PF_OPT_SHOWALL)) pfctl_print_title("SOURCE TRACKING NODES:"); for (i = 0; i < psn.psn_len; i += sizeof(*p)) { print_src_node(p, opts); p++; } done: free(inbuf); return (0); } int pfctl_show_states(int dev, const char *iface, int opts) { struct pfctl_states states; struct pfctl_state *s; int dotitle = (opts & PF_OPT_SHOWALL); memset(&states, 0, sizeof(states)); if (pfctl_get_states(dev, &states)) return (-1); TAILQ_FOREACH(s, &states.states, entry) { if (iface != NULL && strcmp(s->ifname, iface)) continue; if (dotitle) { pfctl_print_title("STATES:"); dotitle = 0; } print_state(s, opts); } pfctl_free_states(&states); return (0); } int pfctl_show_status(int dev, int opts) { struct pfctl_status *status; struct pfctl_syncookies cookies; if ((status = pfctl_get_status(dev)) == NULL) { warn("DIOCGETSTATUS"); return (-1); } if (pfctl_get_syncookies(dev, &cookies)) { pfctl_free_status(status); warn("DIOCGETSYNCOOKIES"); return (-1); } if (opts & PF_OPT_SHOWALL) pfctl_print_title("INFO:"); print_status(status, &cookies, opts); pfctl_free_status(status); return (0); } int pfctl_show_running(int dev) { struct pfctl_status *status; int running; if ((status = pfctl_get_status(dev)) == NULL) { warn("DIOCGETSTATUS"); return (-1); } running = status->running; print_running(status); pfctl_free_status(status); return (!running); } int pfctl_show_timeouts(int dev, int opts) { struct pfioc_tm pt; int i; if (opts & PF_OPT_SHOWALL) pfctl_print_title("TIMEOUTS:"); memset(&pt, 0, sizeof(pt)); for (i = 0; pf_timeouts[i].name; i++) { pt.timeout = pf_timeouts[i].timeout; if (ioctl(dev, DIOCGETTIMEOUT, &pt)) err(1, "DIOCGETTIMEOUT"); printf("%-20s %10d", pf_timeouts[i].name, pt.seconds); if (pf_timeouts[i].timeout >= PFTM_ADAPTIVE_START && pf_timeouts[i].timeout <= PFTM_ADAPTIVE_END) printf(" states"); else printf("s"); printf("\n"); } return (0); } int pfctl_show_limits(int dev, int opts) { struct pfioc_limit pl; int i; if (opts & PF_OPT_SHOWALL) pfctl_print_title("LIMITS:"); memset(&pl, 0, sizeof(pl)); for (i = 0; pf_limits[i].name; i++) { pl.index = pf_limits[i].index; if (ioctl(dev, DIOCGETLIMIT, &pl)) err(1, "DIOCGETLIMIT"); printf("%-13s ", pf_limits[i].name); if (pl.limit == UINT_MAX) printf("unlimited\n"); else printf("hard limit %8u\n", pl.limit); } return (0); } /* callbacks for rule/nat/rdr/addr */ int pfctl_add_pool(struct pfctl *pf, struct pfctl_pool *p, sa_family_t af) { struct pf_pooladdr *pa; if ((pf->opts & PF_OPT_NOACTION) == 0) { if (ioctl(pf->dev, DIOCBEGINADDRS, &pf->paddr)) err(1, "DIOCBEGINADDRS"); } pf->paddr.af = af; TAILQ_FOREACH(pa, &p->list, entries) { memcpy(&pf->paddr.addr, pa, sizeof(struct pf_pooladdr)); if ((pf->opts & PF_OPT_NOACTION) == 0) { if (ioctl(pf->dev, DIOCADDADDR, &pf->paddr)) err(1, "DIOCADDADDR"); } } return (0); } int pfctl_append_rule(struct pfctl *pf, struct pfctl_rule *r, const char *anchor_call) { u_int8_t rs_num; struct pfctl_rule *rule; struct pfctl_ruleset *rs; char *p; rs_num = pf_get_ruleset_number(r->action); if (rs_num == PF_RULESET_MAX) errx(1, "Invalid rule type %d", r->action); rs = &pf->anchor->ruleset; if (anchor_call[0] && r->anchor == NULL) { /* * Don't make non-brace anchors part of the main anchor pool. */ if ((r->anchor = calloc(1, sizeof(*r->anchor))) == NULL) err(1, "pfctl_append_rule: calloc"); pf_init_ruleset(&r->anchor->ruleset); r->anchor->ruleset.anchor = r->anchor; if (strlcpy(r->anchor->path, anchor_call, sizeof(rule->anchor->path)) >= sizeof(rule->anchor->path)) errx(1, "pfctl_append_rule: strlcpy"); if ((p = strrchr(anchor_call, '/')) != NULL) { if (!strlen(p)) err(1, "pfctl_append_rule: bad anchor name %s", anchor_call); } else p = (char *)anchor_call; if (strlcpy(r->anchor->name, p, sizeof(rule->anchor->name)) >= sizeof(rule->anchor->name)) errx(1, "pfctl_append_rule: strlcpy"); } if ((rule = calloc(1, sizeof(*rule))) == NULL) err(1, "calloc"); bcopy(r, rule, sizeof(*rule)); TAILQ_INIT(&rule->rpool.list); pfctl_move_pool(&r->rpool, &rule->rpool); TAILQ_INSERT_TAIL(rs->rules[rs_num].active.ptr, rule, entries); return (0); } int pfctl_append_eth_rule(struct pfctl *pf, struct pfctl_eth_rule *r, const char *anchor_call) { struct pfctl_eth_rule *rule; struct pfctl_eth_ruleset *rs; char *p; rs = &pf->eanchor->ruleset; if (anchor_call[0] && r->anchor == NULL) { /* * Don't make non-brace anchors part of the main anchor pool. */ if ((r->anchor = calloc(1, sizeof(*r->anchor))) == NULL) err(1, "pfctl_append_rule: calloc"); pf_init_eth_ruleset(&r->anchor->ruleset); r->anchor->ruleset.anchor = r->anchor; if (strlcpy(r->anchor->path, anchor_call, sizeof(rule->anchor->path)) >= sizeof(rule->anchor->path)) errx(1, "pfctl_append_rule: strlcpy"); if ((p = strrchr(anchor_call, '/')) != NULL) { if (!strlen(p)) err(1, "pfctl_append_eth_rule: bad anchor name %s", anchor_call); } else p = (char *)anchor_call; if (strlcpy(r->anchor->name, p, sizeof(rule->anchor->name)) >= sizeof(rule->anchor->name)) errx(1, "pfctl_append_eth_rule: strlcpy"); } if ((rule = calloc(1, sizeof(*rule))) == NULL) err(1, "calloc"); bcopy(r, rule, sizeof(*rule)); TAILQ_INSERT_TAIL(&rs->rules, rule, entries); return (0); } int pfctl_eth_ruleset_trans(struct pfctl *pf, char *path, struct pfctl_eth_anchor *a) { int osize = pf->trans->pfrb_size; if ((pf->loadopt & PFCTL_FLAG_ETH) != 0) { if (pfctl_add_trans(pf->trans, PF_RULESET_ETH, path)) return (1); } if (pfctl_trans(pf->dev, pf->trans, DIOCXBEGIN, osize)) return (5); return (0); } int pfctl_ruleset_trans(struct pfctl *pf, char *path, struct pfctl_anchor *a, bool do_eth) { int osize = pf->trans->pfrb_size; if ((pf->loadopt & PFCTL_FLAG_ETH) != 0 && do_eth) { if (pfctl_add_trans(pf->trans, PF_RULESET_ETH, path)) return (1); } if ((pf->loadopt & PFCTL_FLAG_NAT) != 0) { if (pfctl_add_trans(pf->trans, PF_RULESET_NAT, path) || pfctl_add_trans(pf->trans, PF_RULESET_BINAT, path) || pfctl_add_trans(pf->trans, PF_RULESET_RDR, path)) return (1); } if (a == pf->astack[0] && ((altqsupport && (pf->loadopt & PFCTL_FLAG_ALTQ) != 0))) { if (pfctl_add_trans(pf->trans, PF_RULESET_ALTQ, path)) return (2); } if ((pf->loadopt & PFCTL_FLAG_FILTER) != 0) { if (pfctl_add_trans(pf->trans, PF_RULESET_SCRUB, path) || pfctl_add_trans(pf->trans, PF_RULESET_FILTER, path)) return (3); } if (pf->loadopt & PFCTL_FLAG_TABLE) if (pfctl_add_trans(pf->trans, PF_RULESET_TABLE, path)) return (4); if (pfctl_trans(pf->dev, pf->trans, DIOCXBEGIN, osize)) return (5); return (0); } int pfctl_load_eth_ruleset(struct pfctl *pf, char *path, struct pfctl_eth_ruleset *rs, int depth) { struct pfctl_eth_rule *r; int error, len = strlen(path); int brace = 0; pf->eanchor = rs->anchor; if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", pf->eanchor->name); else snprintf(&path[len], MAXPATHLEN - len, "%s", pf->eanchor->name); if (depth) { if (TAILQ_FIRST(&rs->rules) != NULL) { brace++; if (pf->opts & PF_OPT_VERBOSE) printf(" {\n"); if ((pf->opts & PF_OPT_NOACTION) == 0 && (error = pfctl_eth_ruleset_trans(pf, path, rs->anchor))) { printf("pfctl_load_eth_rulesets: " "pfctl_eth_ruleset_trans %d\n", error); goto error; } } else if (pf->opts & PF_OPT_VERBOSE) printf("\n"); } while ((r = TAILQ_FIRST(&rs->rules)) != NULL) { TAILQ_REMOVE(&rs->rules, r, entries); error = pfctl_load_eth_rule(pf, path, r, depth); if (error) return (error); if (r->anchor) { if ((error = pfctl_load_eth_ruleset(pf, path, &r->anchor->ruleset, depth + 1))) return (error); } else if (pf->opts & PF_OPT_VERBOSE) printf("\n"); free(r); } if (brace && pf->opts & PF_OPT_VERBOSE) { INDENT(depth - 1, (pf->opts & PF_OPT_VERBOSE)); printf("}\n"); } path[len] = '\0'; return (0); error: path[len] = '\0'; return (error); } int pfctl_load_eth_rule(struct pfctl *pf, char *path, struct pfctl_eth_rule *r, int depth) { char *name; char anchor[PF_ANCHOR_NAME_SIZE]; int len = strlen(path); if (strlcpy(anchor, path, sizeof(anchor)) >= sizeof(anchor)) errx(1, "pfctl_load_eth_rule: strlcpy"); if (r->anchor) { if (r->anchor->match) { if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", r->anchor->name); else snprintf(&path[len], MAXPATHLEN - len, "%s", r->anchor->name); name = r->anchor->name; } else name = r->anchor->path; } else name = ""; if ((pf->opts & PF_OPT_NOACTION) == 0) if (pfctl_add_eth_rule(pf->dev, r, anchor, name, pf->eth_ticket)) err(1, "DIOCADDETHRULENV"); if (pf->opts & PF_OPT_VERBOSE) { INDENT(depth, !(pf->opts & PF_OPT_VERBOSE2)); print_eth_rule(r, r->anchor ? r->anchor->name : "", pf->opts & (PF_OPT_VERBOSE2 | PF_OPT_DEBUG)); } path[len] = '\0'; return (0); } int pfctl_load_ruleset(struct pfctl *pf, char *path, struct pfctl_ruleset *rs, int rs_num, int depth) { struct pfctl_rule *r; int error, len = strlen(path); int brace = 0; pf->anchor = rs->anchor; if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", pf->anchor->name); else snprintf(&path[len], MAXPATHLEN - len, "%s", pf->anchor->name); if (depth) { if (TAILQ_FIRST(rs->rules[rs_num].active.ptr) != NULL) { brace++; if (pf->opts & PF_OPT_VERBOSE) printf(" {\n"); if ((pf->opts & PF_OPT_NOACTION) == 0 && (error = pfctl_ruleset_trans(pf, path, rs->anchor, false))) { printf("pfctl_load_rulesets: " "pfctl_ruleset_trans %d\n", error); goto error; } } else if (pf->opts & PF_OPT_VERBOSE) printf("\n"); } if (pf->optimize && rs_num == PF_RULESET_FILTER) pfctl_optimize_ruleset(pf, rs); while ((r = TAILQ_FIRST(rs->rules[rs_num].active.ptr)) != NULL) { TAILQ_REMOVE(rs->rules[rs_num].active.ptr, r, entries); for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) expand_label(r->label[i], PF_RULE_LABEL_SIZE, r); expand_label(r->tagname, PF_TAG_NAME_SIZE, r); expand_label(r->match_tagname, PF_TAG_NAME_SIZE, r); if ((error = pfctl_load_rule(pf, path, r, depth))) goto error; if (r->anchor) { if ((error = pfctl_load_ruleset(pf, path, &r->anchor->ruleset, rs_num, depth + 1))) goto error; } else if (pf->opts & PF_OPT_VERBOSE) printf("\n"); free(r); } if (brace && pf->opts & PF_OPT_VERBOSE) { INDENT(depth - 1, (pf->opts & PF_OPT_VERBOSE)); printf("}\n"); } path[len] = '\0'; return (0); error: path[len] = '\0'; return (error); } int pfctl_load_rule(struct pfctl *pf, char *path, struct pfctl_rule *r, int depth) { u_int8_t rs_num = pf_get_ruleset_number(r->action); char *name; u_int32_t ticket; char anchor[PF_ANCHOR_NAME_SIZE]; int len = strlen(path); int error; bool was_present; /* set up anchor before adding to path for anchor_call */ if ((pf->opts & PF_OPT_NOACTION) == 0) ticket = pfctl_get_ticket(pf->trans, rs_num, path); if (strlcpy(anchor, path, sizeof(anchor)) >= sizeof(anchor)) errx(1, "pfctl_load_rule: strlcpy"); if (r->anchor) { if (r->anchor->match) { if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", r->anchor->name); else snprintf(&path[len], MAXPATHLEN - len, "%s", r->anchor->name); name = r->anchor->name; } else name = r->anchor->path; } else name = ""; was_present = false; if ((pf->opts & PF_OPT_NOACTION) == 0) { if (pfctl_add_pool(pf, &r->rpool, r->af)) return (1); error = pfctl_add_rule(pf->dev, r, anchor, name, ticket, pf->paddr.ticket); switch (error) { case 0: /* things worked, do nothing */ break; case EEXIST: /* an identical rule is already present */ was_present = true; break; default: err(1, "DIOCADDRULENV"); } } if (pf->opts & PF_OPT_VERBOSE) { INDENT(depth, !(pf->opts & PF_OPT_VERBOSE2)); print_rule(r, name, pf->opts & PF_OPT_VERBOSE2, pf->opts & PF_OPT_NUMERIC); if (was_present) printf(" -- rule was already present"); } path[len] = '\0'; pfctl_clear_pool(&r->rpool); return (0); } int pfctl_add_altq(struct pfctl *pf, struct pf_altq *a) { if (altqsupport && (loadopt & PFCTL_FLAG_ALTQ) != 0) { memcpy(&pf->paltq->altq, a, sizeof(struct pf_altq)); if ((pf->opts & PF_OPT_NOACTION) == 0) { if (ioctl(pf->dev, DIOCADDALTQ, pf->paltq)) { if (errno == ENXIO) errx(1, "qtype not configured"); else if (errno == ENODEV) errx(1, "%s: driver does not support " "altq", a->ifname); else err(1, "DIOCADDALTQ"); } } pfaltq_store(&pf->paltq->altq); } return (0); } int pfctl_rules(int dev, char *filename, int opts, int optimize, char *anchorname, struct pfr_buffer *trans) { #define ERR(x) do { warn(x); goto _error; } while(0) #define ERRX(x) do { warnx(x); goto _error; } while(0) struct pfr_buffer *t, buf; struct pfioc_altq pa; struct pfctl pf; struct pfctl_ruleset *rs; struct pfctl_eth_ruleset *ethrs; struct pfr_table trs; char *path; int osize; RB_INIT(&pf_anchors); memset(&pf_main_anchor, 0, sizeof(pf_main_anchor)); pf_init_ruleset(&pf_main_anchor.ruleset); pf_main_anchor.ruleset.anchor = &pf_main_anchor; memset(&pf_eth_main_anchor, 0, sizeof(pf_eth_main_anchor)); pf_init_eth_ruleset(&pf_eth_main_anchor.ruleset); pf_eth_main_anchor.ruleset.anchor = &pf_eth_main_anchor; if (trans == NULL) { bzero(&buf, sizeof(buf)); buf.pfrb_type = PFRB_TRANS; t = &buf; osize = 0; } else { t = trans; osize = t->pfrb_size; } memset(&pa, 0, sizeof(pa)); pa.version = PFIOC_ALTQ_VERSION; memset(&pf, 0, sizeof(pf)); memset(&trs, 0, sizeof(trs)); if ((path = calloc(1, MAXPATHLEN)) == NULL) ERRX("pfctl_rules: calloc"); if (strlcpy(trs.pfrt_anchor, anchorname, sizeof(trs.pfrt_anchor)) >= sizeof(trs.pfrt_anchor)) ERRX("pfctl_rules: strlcpy"); pf.dev = dev; pf.opts = opts; pf.optimize = optimize; pf.loadopt = loadopt; /* non-brace anchor, create without resolving the path */ if ((pf.anchor = calloc(1, sizeof(*pf.anchor))) == NULL) ERRX("pfctl_rules: calloc"); rs = &pf.anchor->ruleset; pf_init_ruleset(rs); rs->anchor = pf.anchor; if (strlcpy(pf.anchor->path, anchorname, sizeof(pf.anchor->path)) >= sizeof(pf.anchor->path)) errx(1, "pfctl_rules: strlcpy"); if (strlcpy(pf.anchor->name, anchorname, sizeof(pf.anchor->name)) >= sizeof(pf.anchor->name)) errx(1, "pfctl_rules: strlcpy"); pf.astack[0] = pf.anchor; pf.asd = 0; if (anchorname[0]) pf.loadopt &= ~PFCTL_FLAG_ALTQ; pf.paltq = &pa; pf.trans = t; pfctl_init_options(&pf); /* Set up ethernet anchor */ if ((pf.eanchor = calloc(1, sizeof(*pf.eanchor))) == NULL) ERRX("pfctl_rules: calloc"); if (strlcpy(pf.eanchor->path, anchorname, sizeof(pf.eanchor->path)) >= sizeof(pf.eanchor->path)) errx(1, "pfctl_rules: strlcpy"); if (strlcpy(pf.eanchor->name, anchorname, sizeof(pf.eanchor->name)) >= sizeof(pf.eanchor->name)) errx(1, "pfctl_rules: strlcpy"); ethrs = &pf.eanchor->ruleset; pf_init_eth_ruleset(ethrs); ethrs->anchor = pf.eanchor; pf.eastack[0] = pf.eanchor; if ((opts & PF_OPT_NOACTION) == 0) { /* * XXX For the time being we need to open transactions for * the main ruleset before parsing, because tables are still * loaded at parse time. */ if (pfctl_ruleset_trans(&pf, anchorname, pf.anchor, true)) ERRX("pfctl_rules"); if (pf.loadopt & PFCTL_FLAG_ETH) pf.eth_ticket = pfctl_get_ticket(t, PF_RULESET_ETH, anchorname); if (altqsupport && (pf.loadopt & PFCTL_FLAG_ALTQ)) pa.ticket = pfctl_get_ticket(t, PF_RULESET_ALTQ, anchorname); if (pf.loadopt & PFCTL_FLAG_TABLE) pf.astack[0]->ruleset.tticket = pfctl_get_ticket(t, PF_RULESET_TABLE, anchorname); } if (parse_config(filename, &pf) < 0) { if ((opts & PF_OPT_NOACTION) == 0) ERRX("Syntax error in config file: " "pf rules not loaded"); else goto _error; } if (loadopt & PFCTL_FLAG_OPTION) pfctl_adjust_skip_ifaces(&pf); if ((pf.loadopt & PFCTL_FLAG_FILTER && (pfctl_load_ruleset(&pf, path, rs, PF_RULESET_SCRUB, 0))) || (pf.loadopt & PFCTL_FLAG_ETH && (pfctl_load_eth_ruleset(&pf, path, ethrs, 0))) || (pf.loadopt & PFCTL_FLAG_NAT && (pfctl_load_ruleset(&pf, path, rs, PF_RULESET_NAT, 0) || pfctl_load_ruleset(&pf, path, rs, PF_RULESET_RDR, 0) || pfctl_load_ruleset(&pf, path, rs, PF_RULESET_BINAT, 0))) || (pf.loadopt & PFCTL_FLAG_FILTER && pfctl_load_ruleset(&pf, path, rs, PF_RULESET_FILTER, 0))) { if ((opts & PF_OPT_NOACTION) == 0) ERRX("Unable to load rules into kernel"); else goto _error; } if ((altqsupport && (pf.loadopt & PFCTL_FLAG_ALTQ) != 0)) if (check_commit_altq(dev, opts) != 0) ERRX("errors in altq config"); /* process "load anchor" directives */ if (!anchorname[0]) if (pfctl_load_anchors(dev, &pf, t) == -1) ERRX("load anchors"); if (trans == NULL && (opts & PF_OPT_NOACTION) == 0) { if (!anchorname[0]) if (pfctl_load_options(&pf)) goto _error; if (pfctl_trans(dev, t, DIOCXCOMMIT, osize)) ERR("DIOCXCOMMIT"); } free(path); return (0); _error: if (trans == NULL) { /* main ruleset */ if ((opts & PF_OPT_NOACTION) == 0) if (pfctl_trans(dev, t, DIOCXROLLBACK, osize)) err(1, "DIOCXROLLBACK"); exit(1); } else { /* sub ruleset */ free(path); return (-1); } #undef ERR #undef ERRX } FILE * pfctl_fopen(const char *name, const char *mode) { struct stat st; FILE *fp; fp = fopen(name, mode); if (fp == NULL) return (NULL); if (fstat(fileno(fp), &st)) { fclose(fp); return (NULL); } if (S_ISDIR(st.st_mode)) { fclose(fp); errno = EISDIR; return (NULL); } return (fp); } void pfctl_init_options(struct pfctl *pf) { pf->timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; pf->timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; pf->timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; pf->timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; pf->timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; pf->timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; pf->timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; pf->timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; pf->timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; pf->timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; pf->timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; pf->timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; pf->timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; pf->timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; pf->timeout[PFTM_FRAG] = PFTM_FRAG_VAL; pf->timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; pf->timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; pf->timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; pf->timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; pf->timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; pf->limit[PF_LIMIT_STATES] = PFSTATE_HIWAT; pf->limit[PF_LIMIT_FRAGS] = PFFRAG_FRENT_HIWAT; pf->limit[PF_LIMIT_SRC_NODES] = PFSNODE_HIWAT; pf->limit[PF_LIMIT_TABLE_ENTRIES] = PFR_KENTRY_HIWAT; pf->debug = PF_DEBUG_URGENT; + pf->reassemble = 0; pf->syncookies = false; pf->syncookieswat[0] = PF_SYNCOOKIES_LOWATPCT; pf->syncookieswat[1] = PF_SYNCOOKIES_HIWATPCT; } int pfctl_load_options(struct pfctl *pf) { int i, error = 0; if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); /* load limits */ for (i = 0; i < PF_LIMIT_MAX; i++) { if ((pf->opts & PF_OPT_MERGE) && !pf->limit_set[i]) continue; if (pfctl_load_limit(pf, i, pf->limit[i])) error = 1; } /* * If we've set the limit, but haven't explicitly set adaptive * timeouts, do it now with a start of 60% and end of 120%. */ if (pf->limit_set[PF_LIMIT_STATES] && !pf->timeout_set[PFTM_ADAPTIVE_START] && !pf->timeout_set[PFTM_ADAPTIVE_END]) { pf->timeout[PFTM_ADAPTIVE_START] = (pf->limit[PF_LIMIT_STATES] / 10) * 6; pf->timeout_set[PFTM_ADAPTIVE_START] = 1; pf->timeout[PFTM_ADAPTIVE_END] = (pf->limit[PF_LIMIT_STATES] / 10) * 12; pf->timeout_set[PFTM_ADAPTIVE_END] = 1; } /* load timeouts */ for (i = 0; i < PFTM_MAX; i++) { if ((pf->opts & PF_OPT_MERGE) && !pf->timeout_set[i]) continue; if (pfctl_load_timeout(pf, i, pf->timeout[i])) error = 1; } /* load debug */ if (!(pf->opts & PF_OPT_MERGE) || pf->debug_set) if (pfctl_load_debug(pf, pf->debug)) error = 1; /* load logif */ if (!(pf->opts & PF_OPT_MERGE) || pf->ifname_set) if (pfctl_load_logif(pf, pf->ifname)) error = 1; /* load hostid */ if (!(pf->opts & PF_OPT_MERGE) || pf->hostid_set) if (pfctl_load_hostid(pf, pf->hostid)) error = 1; + /* load reassembly settings */ + if (!(pf->opts & PF_OPT_MERGE) || pf->reass_set) + if (pfctl_load_reassembly(pf, pf->reassemble)) + error = 1; + /* load keepcounters */ if (pfctl_set_keepcounters(pf->dev, pf->keep_counters)) error = 1; /* load syncookies settings */ if (pfctl_load_syncookies(pf, pf->syncookies)) error = 1; return (error); } int pfctl_set_limit(struct pfctl *pf, const char *opt, unsigned int limit) { int i; for (i = 0; pf_limits[i].name; i++) { if (strcasecmp(opt, pf_limits[i].name) == 0) { pf->limit[pf_limits[i].index] = limit; pf->limit_set[pf_limits[i].index] = 1; break; } } if (pf_limits[i].name == NULL) { warnx("Bad pool name."); return (1); } if (pf->opts & PF_OPT_VERBOSE) printf("set limit %s %d\n", opt, limit); return (0); } int pfctl_load_limit(struct pfctl *pf, unsigned int index, unsigned int limit) { struct pfioc_limit pl; memset(&pl, 0, sizeof(pl)); pl.index = index; pl.limit = limit; if (ioctl(pf->dev, DIOCSETLIMIT, &pl)) { if (errno == EBUSY) warnx("Current pool size exceeds requested hard limit"); else warnx("DIOCSETLIMIT"); return (1); } return (0); } int pfctl_set_timeout(struct pfctl *pf, const char *opt, int seconds, int quiet) { int i; if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); for (i = 0; pf_timeouts[i].name; i++) { if (strcasecmp(opt, pf_timeouts[i].name) == 0) { pf->timeout[pf_timeouts[i].timeout] = seconds; pf->timeout_set[pf_timeouts[i].timeout] = 1; break; } } if (pf_timeouts[i].name == NULL) { warnx("Bad timeout name."); return (1); } if (pf->opts & PF_OPT_VERBOSE && ! quiet) printf("set timeout %s %d\n", opt, seconds); return (0); } int pfctl_load_timeout(struct pfctl *pf, unsigned int timeout, unsigned int seconds) { struct pfioc_tm pt; memset(&pt, 0, sizeof(pt)); pt.timeout = timeout; pt.seconds = seconds; if (ioctl(pf->dev, DIOCSETTIMEOUT, &pt)) { warnx("DIOCSETTIMEOUT"); return (1); } return (0); } +int +pfctl_set_reassembly(struct pfctl *pf, int on, int nodf) +{ + if ((loadopt & PFCTL_FLAG_OPTION) == 0) + return (0); + + pf->reass_set = 1; + if (on) { + pf->reassemble = PF_REASS_ENABLED; + if (nodf) + pf->reassemble |= PF_REASS_NODF; + } else { + pf->reassemble = 0; + } + + if (pf->opts & PF_OPT_VERBOSE) + printf("set reassemble %s %s\n", on ? "yes" : "no", + nodf ? "no-df" : ""); + + return (0); +} + int pfctl_set_optimization(struct pfctl *pf, const char *opt) { const struct pf_hint *hint; int i, r; if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); for (i = 0; pf_hints[i].name; i++) if (strcasecmp(opt, pf_hints[i].name) == 0) break; hint = pf_hints[i].hint; if (hint == NULL) { warnx("invalid state timeouts optimization"); return (1); } for (i = 0; hint[i].name; i++) if ((r = pfctl_set_timeout(pf, hint[i].name, hint[i].timeout, 1))) return (r); if (pf->opts & PF_OPT_VERBOSE) printf("set optimization %s\n", opt); return (0); } int pfctl_set_logif(struct pfctl *pf, char *ifname) { if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); if (!strcmp(ifname, "none")) { free(pf->ifname); pf->ifname = NULL; } else { pf->ifname = strdup(ifname); if (!pf->ifname) errx(1, "pfctl_set_logif: strdup"); } pf->ifname_set = 1; if (pf->opts & PF_OPT_VERBOSE) printf("set loginterface %s\n", ifname); return (0); } int pfctl_load_logif(struct pfctl *pf, char *ifname) { struct pfioc_if pi; memset(&pi, 0, sizeof(pi)); if (ifname && strlcpy(pi.ifname, ifname, sizeof(pi.ifname)) >= sizeof(pi.ifname)) { warnx("pfctl_load_logif: strlcpy"); return (1); } if (ioctl(pf->dev, DIOCSETSTATUSIF, &pi)) { warnx("DIOCSETSTATUSIF"); return (1); } return (0); } int pfctl_set_hostid(struct pfctl *pf, u_int32_t hostid) { if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); HTONL(hostid); pf->hostid = hostid; pf->hostid_set = 1; if (pf->opts & PF_OPT_VERBOSE) printf("set hostid 0x%08x\n", ntohl(hostid)); return (0); } int pfctl_load_hostid(struct pfctl *pf, u_int32_t hostid) { if (ioctl(dev, DIOCSETHOSTID, &hostid)) { warnx("DIOCSETHOSTID"); return (1); } return (0); } +int +pfctl_load_reassembly(struct pfctl *pf, u_int32_t reassembly) +{ + if (ioctl(dev, DIOCSETREASS, &reassembly)) { + warnx("DIOCSETREASS"); + return (1); + } + return (0); +} + int pfctl_load_syncookies(struct pfctl *pf, u_int8_t val) { struct pfctl_syncookies cookies; bzero(&cookies, sizeof(cookies)); cookies.mode = val; cookies.lowwater = pf->syncookieswat[0]; cookies.highwater = pf->syncookieswat[1]; if (pfctl_set_syncookies(dev, &cookies)) { warnx("DIOCSETSYNCOOKIES"); return (1); } return (0); } int pfctl_cfg_syncookies(struct pfctl *pf, uint8_t val, struct pfctl_watermarks *w) { if (val != PF_SYNCOOKIES_ADAPTIVE && w != NULL) { warnx("syncookies start/end only apply to adaptive"); return (1); } if (val == PF_SYNCOOKIES_ADAPTIVE && w != NULL) { if (!w->hi) w->hi = PF_SYNCOOKIES_HIWATPCT; if (!w->lo) w->lo = w->hi / 2; if (w->lo >= w->hi) { warnx("start must be higher than end"); return (1); } pf->syncookieswat[0] = w->lo; pf->syncookieswat[1] = w->hi; pf->syncookieswat_set = 1; } if (pf->opts & PF_OPT_VERBOSE) { if (val == PF_SYNCOOKIES_NEVER) printf("set syncookies never\n"); else if (val == PF_SYNCOOKIES_ALWAYS) printf("set syncookies always\n"); else if (val == PF_SYNCOOKIES_ADAPTIVE) { if (pf->syncookieswat_set) printf("set syncookies adaptive (start %u%%, " "end %u%%)\n", pf->syncookieswat[1], pf->syncookieswat[0]); else printf("set syncookies adaptive\n"); } else { /* cannot happen */ warnx("king bula ate all syncookies"); return (1); } } pf->syncookies = val; return (0); } int pfctl_set_debug(struct pfctl *pf, char *d) { u_int32_t level; if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); if (!strcmp(d, "none")) pf->debug = PF_DEBUG_NONE; else if (!strcmp(d, "urgent")) pf->debug = PF_DEBUG_URGENT; else if (!strcmp(d, "misc")) pf->debug = PF_DEBUG_MISC; else if (!strcmp(d, "loud")) pf->debug = PF_DEBUG_NOISY; else { warnx("unknown debug level \"%s\"", d); return (-1); } pf->debug_set = 1; level = pf->debug; if ((pf->opts & PF_OPT_NOACTION) == 0) if (ioctl(dev, DIOCSETDEBUG, &level)) err(1, "DIOCSETDEBUG"); if (pf->opts & PF_OPT_VERBOSE) printf("set debug %s\n", d); return (0); } int pfctl_load_debug(struct pfctl *pf, unsigned int level) { if (ioctl(pf->dev, DIOCSETDEBUG, &level)) { warnx("DIOCSETDEBUG"); return (1); } return (0); } int pfctl_set_interface_flags(struct pfctl *pf, char *ifname, int flags, int how) { struct pfioc_iface pi; struct node_host *h = NULL, *n = NULL; if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); bzero(&pi, sizeof(pi)); pi.pfiio_flags = flags; /* Make sure our cache matches the kernel. If we set or clear the flag * for a group this applies to all members. */ h = ifa_grouplookup(ifname, 0); for (n = h; n != NULL; n = n->next) pfctl_set_interface_flags(pf, n->ifname, flags, how); if (strlcpy(pi.pfiio_name, ifname, sizeof(pi.pfiio_name)) >= sizeof(pi.pfiio_name)) errx(1, "pfctl_set_interface_flags: strlcpy"); if ((pf->opts & PF_OPT_NOACTION) == 0) { if (how == 0) { if (ioctl(pf->dev, DIOCCLRIFFLAG, &pi)) err(1, "DIOCCLRIFFLAG"); } else { if (ioctl(pf->dev, DIOCSETIFFLAG, &pi)) err(1, "DIOCSETIFFLAG"); pfctl_check_skip_ifaces(ifname); } } return (0); } void pfctl_debug(int dev, u_int32_t level, int opts) { if (ioctl(dev, DIOCSETDEBUG, &level)) err(1, "DIOCSETDEBUG"); if ((opts & PF_OPT_QUIET) == 0) { fprintf(stderr, "debug level set to '"); switch (level) { case PF_DEBUG_NONE: fprintf(stderr, "none"); break; case PF_DEBUG_URGENT: fprintf(stderr, "urgent"); break; case PF_DEBUG_MISC: fprintf(stderr, "misc"); break; case PF_DEBUG_NOISY: fprintf(stderr, "loud"); break; default: fprintf(stderr, ""); break; } fprintf(stderr, "'\n"); } } int pfctl_test_altqsupport(int dev, int opts) { struct pfioc_altq pa; pa.version = PFIOC_ALTQ_VERSION; if (ioctl(dev, DIOCGETALTQS, &pa)) { if (errno == ENODEV) { if (opts & PF_OPT_VERBOSE) fprintf(stderr, "No ALTQ support in kernel\n" "ALTQ related functions disabled\n"); return (0); } else err(1, "DIOCGETALTQS"); } return (1); } int pfctl_show_anchors(int dev, int opts, char *anchorname) { struct pfioc_ruleset pr; u_int32_t mnr, nr; memset(&pr, 0, sizeof(pr)); memcpy(pr.path, anchorname, sizeof(pr.path)); if (ioctl(dev, DIOCGETRULESETS, &pr)) { if (errno == EINVAL) fprintf(stderr, "Anchor '%s' not found.\n", anchorname); else err(1, "DIOCGETRULESETS"); return (-1); } mnr = pr.nr; for (nr = 0; nr < mnr; ++nr) { char sub[MAXPATHLEN]; pr.nr = nr; if (ioctl(dev, DIOCGETRULESET, &pr)) err(1, "DIOCGETRULESET"); if (!strcmp(pr.name, PF_RESERVED_ANCHOR)) continue; sub[0] = 0; if (pr.path[0]) { strlcat(sub, pr.path, sizeof(sub)); strlcat(sub, "/", sizeof(sub)); } strlcat(sub, pr.name, sizeof(sub)); if (sub[0] != '_' || (opts & PF_OPT_VERBOSE)) printf(" %s\n", sub); if ((opts & PF_OPT_VERBOSE) && pfctl_show_anchors(dev, opts, sub)) return (-1); } return (0); } int pfctl_show_eth_anchors(int dev, int opts, char *anchorname) { struct pfctl_eth_rulesets_info ri; struct pfctl_eth_ruleset_info rs; int ret; if ((ret = pfctl_get_eth_rulesets_info(dev, &ri, anchorname)) != 0) { if (ret == ENOENT) fprintf(stderr, "Anchor '%s' not found.\n", anchorname); else err(1, "DIOCGETETHRULESETS"); return (-1); } for (int nr = 0; nr < ri.nr; nr++) { char sub[MAXPATHLEN]; if (pfctl_get_eth_ruleset(dev, anchorname, nr, &rs) != 0) err(1, "DIOCGETETHRULESET"); if (!strcmp(rs.name, PF_RESERVED_ANCHOR)) continue; sub[0] = 0; if (rs.path[0]) { strlcat(sub, rs.path, sizeof(sub)); strlcat(sub, "/", sizeof(sub)); } strlcat(sub, rs.name, sizeof(sub)); if (sub[0] != '_' || (opts & PF_OPT_VERBOSE)) printf(" %s\n", sub); if ((opts & PF_OPT_VERBOSE) && pfctl_show_eth_anchors(dev, opts, sub)) return (-1); } return (0); } const char * pfctl_lookup_option(char *cmd, const char * const *list) { if (cmd != NULL && *cmd) for (; *list; list++) if (!strncmp(cmd, *list, strlen(cmd))) return (*list); return (NULL); } int main(int argc, char *argv[]) { int error = 0; int ch; int mode = O_RDONLY; int opts = 0; int optimize = PF_OPTIMIZE_BASIC; char anchorname[MAXPATHLEN]; char *path; if (argc < 2) usage(); while ((ch = getopt(argc, argv, "a:AdD:eqf:F:ghi:k:K:mMnNOo:Pp:rRs:t:T:vx:z")) != -1) { switch (ch) { case 'a': anchoropt = optarg; break; case 'd': opts |= PF_OPT_DISABLE; mode = O_RDWR; break; case 'D': if (pfctl_cmdline_symset(optarg) < 0) warnx("could not parse macro definition %s", optarg); break; case 'e': opts |= PF_OPT_ENABLE; mode = O_RDWR; break; case 'q': opts |= PF_OPT_QUIET; break; case 'F': clearopt = pfctl_lookup_option(optarg, clearopt_list); if (clearopt == NULL) { warnx("Unknown flush modifier '%s'", optarg); usage(); } mode = O_RDWR; break; case 'i': ifaceopt = optarg; break; case 'k': if (state_killers >= 2) { warnx("can only specify -k twice"); usage(); /* NOTREACHED */ } state_kill[state_killers++] = optarg; mode = O_RDWR; break; case 'K': if (src_node_killers >= 2) { warnx("can only specify -K twice"); usage(); /* NOTREACHED */ } src_node_kill[src_node_killers++] = optarg; mode = O_RDWR; break; case 'm': opts |= PF_OPT_MERGE; break; case 'M': opts |= PF_OPT_KILLMATCH; break; case 'n': opts |= PF_OPT_NOACTION; break; case 'N': loadopt |= PFCTL_FLAG_NAT; break; case 'r': opts |= PF_OPT_USEDNS; break; case 'f': rulesopt = optarg; mode = O_RDWR; break; case 'g': opts |= PF_OPT_DEBUG; break; case 'A': loadopt |= PFCTL_FLAG_ALTQ; break; case 'R': loadopt |= PFCTL_FLAG_FILTER; break; case 'o': optiopt = pfctl_lookup_option(optarg, optiopt_list); if (optiopt == NULL) { warnx("Unknown optimization '%s'", optarg); usage(); } opts |= PF_OPT_OPTIMIZE; break; case 'O': loadopt |= PFCTL_FLAG_OPTION; break; case 'p': pf_device = optarg; break; case 'P': opts |= PF_OPT_NUMERIC; break; case 's': showopt = pfctl_lookup_option(optarg, showopt_list); if (showopt == NULL) { warnx("Unknown show modifier '%s'", optarg); usage(); } break; case 't': tableopt = optarg; break; case 'T': tblcmdopt = pfctl_lookup_option(optarg, tblcmdopt_list); if (tblcmdopt == NULL) { warnx("Unknown table command '%s'", optarg); usage(); } break; case 'v': if (opts & PF_OPT_VERBOSE) opts |= PF_OPT_VERBOSE2; opts |= PF_OPT_VERBOSE; break; case 'x': debugopt = pfctl_lookup_option(optarg, debugopt_list); if (debugopt == NULL) { warnx("Unknown debug level '%s'", optarg); usage(); } mode = O_RDWR; break; case 'z': opts |= PF_OPT_CLRRULECTRS; mode = O_RDWR; break; case 'h': /* FALLTHROUGH */ default: usage(); /* NOTREACHED */ } } if (tblcmdopt != NULL) { argc -= optind; argv += optind; ch = *tblcmdopt; if (ch == 'l') { loadopt |= PFCTL_FLAG_TABLE; tblcmdopt = NULL; } else mode = strchr("acdefkrz", ch) ? O_RDWR : O_RDONLY; } else if (argc != optind) { warnx("unknown command line argument: %s ...", argv[optind]); usage(); /* NOTREACHED */ } if (loadopt == 0) loadopt = ~0; if ((path = calloc(1, MAXPATHLEN)) == NULL) errx(1, "pfctl: calloc"); memset(anchorname, 0, sizeof(anchorname)); if (anchoropt != NULL) { int len = strlen(anchoropt); if (len >= 1 && anchoropt[len - 1] == '*') { if (len >= 2 && anchoropt[len - 2] == '/') anchoropt[len - 2] = '\0'; else anchoropt[len - 1] = '\0'; opts |= PF_OPT_RECURSE; } if (strlcpy(anchorname, anchoropt, sizeof(anchorname)) >= sizeof(anchorname)) errx(1, "anchor name '%s' too long", anchoropt); loadopt &= PFCTL_FLAG_FILTER|PFCTL_FLAG_NAT|PFCTL_FLAG_TABLE|PFCTL_FLAG_ETH; } if ((opts & PF_OPT_NOACTION) == 0) { dev = open(pf_device, mode); if (dev == -1) err(1, "%s", pf_device); altqsupport = pfctl_test_altqsupport(dev, opts); } else { dev = open(pf_device, O_RDONLY); if (dev >= 0) opts |= PF_OPT_DUMMYACTION; /* turn off options */ opts &= ~ (PF_OPT_DISABLE | PF_OPT_ENABLE); clearopt = showopt = debugopt = NULL; #if !defined(ENABLE_ALTQ) altqsupport = 0; #else altqsupport = 1; #endif } if (opts & PF_OPT_DISABLE) if (pfctl_disable(dev, opts)) error = 1; if (showopt != NULL) { switch (*showopt) { case 'A': pfctl_show_anchors(dev, opts, anchorname); pfctl_show_eth_anchors(dev, opts, anchorname); break; case 'r': pfctl_load_fingerprints(dev, opts); pfctl_show_rules(dev, path, opts, PFCTL_SHOW_RULES, anchorname, 0, 0); break; case 'l': pfctl_load_fingerprints(dev, opts); pfctl_show_rules(dev, path, opts, PFCTL_SHOW_LABELS, anchorname, 0, 0); break; case 'n': pfctl_load_fingerprints(dev, opts); pfctl_show_nat(dev, path, opts, anchorname, 0); break; case 'q': pfctl_show_altq(dev, ifaceopt, opts, opts & PF_OPT_VERBOSE2); break; case 's': pfctl_show_states(dev, ifaceopt, opts); break; case 'S': pfctl_show_src_nodes(dev, opts); break; case 'i': pfctl_show_status(dev, opts); break; case 'R': error = pfctl_show_running(dev); break; case 't': pfctl_show_timeouts(dev, opts); break; case 'm': pfctl_show_limits(dev, opts); break; case 'e': pfctl_show_eth_rules(dev, path, opts, 0, anchorname, 0, 0); break; case 'a': opts |= PF_OPT_SHOWALL; pfctl_load_fingerprints(dev, opts); pfctl_show_eth_rules(dev, path, opts, 0, anchorname, 0, 0); pfctl_show_nat(dev, path, opts, anchorname, 0); pfctl_show_rules(dev, path, opts, 0, anchorname, 0, 0); pfctl_show_altq(dev, ifaceopt, opts, 0); pfctl_show_states(dev, ifaceopt, opts); pfctl_show_src_nodes(dev, opts); pfctl_show_status(dev, opts); pfctl_show_rules(dev, path, opts, 1, anchorname, 0, 0); pfctl_show_timeouts(dev, opts); pfctl_show_limits(dev, opts); pfctl_show_tables(anchorname, opts); pfctl_show_fingerprints(opts); break; case 'T': pfctl_show_tables(anchorname, opts); break; case 'o': pfctl_load_fingerprints(dev, opts); pfctl_show_fingerprints(opts); break; case 'I': pfctl_show_ifaces(ifaceopt, opts); break; } } if ((opts & PF_OPT_CLRRULECTRS) && showopt == NULL) { pfctl_show_eth_rules(dev, path, opts, PFCTL_SHOW_NOTHING, anchorname, 0, 0); pfctl_show_rules(dev, path, opts, PFCTL_SHOW_NOTHING, anchorname, 0, 0); } if (clearopt != NULL) { if (anchorname[0] == '_' || strstr(anchorname, "/_") != NULL) errx(1, "anchor names beginning with '_' cannot " "be modified from the command line"); switch (*clearopt) { case 'e': pfctl_flush_eth_rules(dev, opts, anchorname); break; case 'r': pfctl_flush_rules(dev, opts, anchorname); break; case 'n': pfctl_flush_nat(dev, opts, anchorname); break; case 'q': pfctl_clear_altq(dev, opts); break; case 's': pfctl_clear_iface_states(dev, ifaceopt, opts); break; case 'S': pfctl_clear_src_nodes(dev, opts); break; case 'i': pfctl_clear_stats(dev, opts); break; case 'a': pfctl_flush_eth_rules(dev, opts, anchorname); pfctl_flush_rules(dev, opts, anchorname); pfctl_flush_nat(dev, opts, anchorname); pfctl_clear_tables(anchorname, opts); if (!*anchorname) { pfctl_clear_altq(dev, opts); pfctl_clear_iface_states(dev, ifaceopt, opts); pfctl_clear_src_nodes(dev, opts); pfctl_clear_stats(dev, opts); pfctl_clear_fingerprints(dev, opts); pfctl_clear_interface_flags(dev, opts); } break; case 'o': pfctl_clear_fingerprints(dev, opts); break; case 'T': pfctl_clear_tables(anchorname, opts); break; } } if (state_killers) { if (!strcmp(state_kill[0], "label")) pfctl_label_kill_states(dev, ifaceopt, opts); else if (!strcmp(state_kill[0], "id")) pfctl_id_kill_states(dev, ifaceopt, opts); else if (!strcmp(state_kill[0], "gateway")) pfctl_gateway_kill_states(dev, ifaceopt, opts); else pfctl_net_kill_states(dev, ifaceopt, opts); } if (src_node_killers) pfctl_kill_src_nodes(dev, ifaceopt, opts); if (tblcmdopt != NULL) { error = pfctl_command_tables(argc, argv, tableopt, tblcmdopt, rulesopt, anchorname, opts); rulesopt = NULL; } if (optiopt != NULL) { switch (*optiopt) { case 'n': optimize = 0; break; case 'b': optimize |= PF_OPTIMIZE_BASIC; break; case 'o': case 'p': optimize |= PF_OPTIMIZE_PROFILE; break; } } if ((rulesopt != NULL) && (loadopt & PFCTL_FLAG_OPTION) && !anchorname[0] && !(opts & PF_OPT_NOACTION)) if (pfctl_get_skip_ifaces()) error = 1; if (rulesopt != NULL && !(opts & (PF_OPT_MERGE|PF_OPT_NOACTION)) && !anchorname[0] && (loadopt & PFCTL_FLAG_OPTION)) if (pfctl_file_fingerprints(dev, opts, PF_OSFP_FILE)) error = 1; if (rulesopt != NULL) { if (anchorname[0] == '_' || strstr(anchorname, "/_") != NULL) errx(1, "anchor names beginning with '_' cannot " "be modified from the command line"); if (pfctl_rules(dev, rulesopt, opts, optimize, anchorname, NULL)) error = 1; else if (!(opts & PF_OPT_NOACTION) && (loadopt & PFCTL_FLAG_TABLE)) warn_namespace_collision(NULL); } if (opts & PF_OPT_ENABLE) if (pfctl_enable(dev, opts)) error = 1; if (debugopt != NULL) { switch (*debugopt) { case 'n': pfctl_debug(dev, PF_DEBUG_NONE, opts); break; case 'u': pfctl_debug(dev, PF_DEBUG_URGENT, opts); break; case 'm': pfctl_debug(dev, PF_DEBUG_MISC, opts); break; case 'l': pfctl_debug(dev, PF_DEBUG_NOISY, opts); break; } } exit(error); } diff --git a/sbin/pfctl/pfctl_parser.c b/sbin/pfctl/pfctl_parser.c index fca1a7aa7b8f..15ed8c29abd0 100644 --- a/sbin/pfctl/pfctl_parser.c +++ b/sbin/pfctl/pfctl_parser.c @@ -1,2006 +1,2034 @@ /* $OpenBSD: pfctl_parser.c,v 1.240 2008/06/10 20:55:02 mcbride Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * Copyright (c) 2002,2003 Henning Brauer * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pfctl_parser.h" #include "pfctl.h" void print_op (u_int8_t, const char *, const char *); void print_port (u_int8_t, u_int16_t, u_int16_t, const char *, int); void print_ugid (u_int8_t, unsigned, unsigned, const char *, unsigned); void print_flags (u_int8_t); void print_fromto(struct pf_rule_addr *, pf_osfp_t, struct pf_rule_addr *, u_int8_t, u_int8_t, int, int); int ifa_skip_if(const char *filter, struct node_host *p); struct node_host *host_if(const char *, int, int *); struct node_host *host_v4(const char *, int); struct node_host *host_v6(const char *, int); struct node_host *host_dns(const char *, int, int); const char * const tcpflags = "FSRPAUEW"; static const struct icmptypeent icmp_type[] = { { "echoreq", ICMP_ECHO }, { "echorep", ICMP_ECHOREPLY }, { "unreach", ICMP_UNREACH }, { "squench", ICMP_SOURCEQUENCH }, { "redir", ICMP_REDIRECT }, { "althost", ICMP_ALTHOSTADDR }, { "routeradv", ICMP_ROUTERADVERT }, { "routersol", ICMP_ROUTERSOLICIT }, { "timex", ICMP_TIMXCEED }, { "paramprob", ICMP_PARAMPROB }, { "timereq", ICMP_TSTAMP }, { "timerep", ICMP_TSTAMPREPLY }, { "inforeq", ICMP_IREQ }, { "inforep", ICMP_IREQREPLY }, { "maskreq", ICMP_MASKREQ }, { "maskrep", ICMP_MASKREPLY }, { "trace", ICMP_TRACEROUTE }, { "dataconv", ICMP_DATACONVERR }, { "mobredir", ICMP_MOBILE_REDIRECT }, { "ipv6-where", ICMP_IPV6_WHEREAREYOU }, { "ipv6-here", ICMP_IPV6_IAMHERE }, { "mobregreq", ICMP_MOBILE_REGREQUEST }, { "mobregrep", ICMP_MOBILE_REGREPLY }, { "skip", ICMP_SKIP }, { "photuris", ICMP_PHOTURIS } }; static const struct icmptypeent icmp6_type[] = { { "unreach", ICMP6_DST_UNREACH }, { "toobig", ICMP6_PACKET_TOO_BIG }, { "timex", ICMP6_TIME_EXCEEDED }, { "paramprob", ICMP6_PARAM_PROB }, { "echoreq", ICMP6_ECHO_REQUEST }, { "echorep", ICMP6_ECHO_REPLY }, { "groupqry", ICMP6_MEMBERSHIP_QUERY }, { "listqry", MLD_LISTENER_QUERY }, { "grouprep", ICMP6_MEMBERSHIP_REPORT }, { "listenrep", MLD_LISTENER_REPORT }, { "groupterm", ICMP6_MEMBERSHIP_REDUCTION }, { "listendone", MLD_LISTENER_DONE }, { "routersol", ND_ROUTER_SOLICIT }, { "routeradv", ND_ROUTER_ADVERT }, { "neighbrsol", ND_NEIGHBOR_SOLICIT }, { "neighbradv", ND_NEIGHBOR_ADVERT }, { "redir", ND_REDIRECT }, { "routrrenum", ICMP6_ROUTER_RENUMBERING }, { "wrureq", ICMP6_WRUREQUEST }, { "wrurep", ICMP6_WRUREPLY }, { "fqdnreq", ICMP6_FQDN_QUERY }, { "fqdnrep", ICMP6_FQDN_REPLY }, { "niqry", ICMP6_NI_QUERY }, { "nirep", ICMP6_NI_REPLY }, { "mtraceresp", MLD_MTRACE_RESP }, { "mtrace", MLD_MTRACE } }; static const struct icmpcodeent icmp_code[] = { { "net-unr", ICMP_UNREACH, ICMP_UNREACH_NET }, { "host-unr", ICMP_UNREACH, ICMP_UNREACH_HOST }, { "proto-unr", ICMP_UNREACH, ICMP_UNREACH_PROTOCOL }, { "port-unr", ICMP_UNREACH, ICMP_UNREACH_PORT }, { "needfrag", ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG }, { "srcfail", ICMP_UNREACH, ICMP_UNREACH_SRCFAIL }, { "net-unk", ICMP_UNREACH, ICMP_UNREACH_NET_UNKNOWN }, { "host-unk", ICMP_UNREACH, ICMP_UNREACH_HOST_UNKNOWN }, { "isolate", ICMP_UNREACH, ICMP_UNREACH_ISOLATED }, { "net-prohib", ICMP_UNREACH, ICMP_UNREACH_NET_PROHIB }, { "host-prohib", ICMP_UNREACH, ICMP_UNREACH_HOST_PROHIB }, { "net-tos", ICMP_UNREACH, ICMP_UNREACH_TOSNET }, { "host-tos", ICMP_UNREACH, ICMP_UNREACH_TOSHOST }, { "filter-prohib", ICMP_UNREACH, ICMP_UNREACH_FILTER_PROHIB }, { "host-preced", ICMP_UNREACH, ICMP_UNREACH_HOST_PRECEDENCE }, { "cutoff-preced", ICMP_UNREACH, ICMP_UNREACH_PRECEDENCE_CUTOFF }, { "redir-net", ICMP_REDIRECT, ICMP_REDIRECT_NET }, { "redir-host", ICMP_REDIRECT, ICMP_REDIRECT_HOST }, { "redir-tos-net", ICMP_REDIRECT, ICMP_REDIRECT_TOSNET }, { "redir-tos-host", ICMP_REDIRECT, ICMP_REDIRECT_TOSHOST }, { "normal-adv", ICMP_ROUTERADVERT, ICMP_ROUTERADVERT_NORMAL }, { "common-adv", ICMP_ROUTERADVERT, ICMP_ROUTERADVERT_NOROUTE_COMMON }, { "transit", ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS }, { "reassemb", ICMP_TIMXCEED, ICMP_TIMXCEED_REASS }, { "badhead", ICMP_PARAMPROB, ICMP_PARAMPROB_ERRATPTR }, { "optmiss", ICMP_PARAMPROB, ICMP_PARAMPROB_OPTABSENT }, { "badlen", ICMP_PARAMPROB, ICMP_PARAMPROB_LENGTH }, { "unknown-ind", ICMP_PHOTURIS, ICMP_PHOTURIS_UNKNOWN_INDEX }, { "auth-fail", ICMP_PHOTURIS, ICMP_PHOTURIS_AUTH_FAILED }, { "decrypt-fail", ICMP_PHOTURIS, ICMP_PHOTURIS_DECRYPT_FAILED } }; static const struct icmpcodeent icmp6_code[] = { { "admin-unr", ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADMIN }, { "noroute-unr", ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOROUTE }, { "notnbr-unr", ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOTNEIGHBOR }, { "beyond-unr", ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_BEYONDSCOPE }, { "addr-unr", ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR }, { "port-unr", ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOPORT }, { "transit", ICMP6_TIME_EXCEEDED, ICMP6_TIME_EXCEED_TRANSIT }, { "reassemb", ICMP6_TIME_EXCEEDED, ICMP6_TIME_EXCEED_REASSEMBLY }, { "badhead", ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER }, { "nxthdr", ICMP6_PARAM_PROB, ICMP6_PARAMPROB_NEXTHEADER }, { "redironlink", ND_REDIRECT, ND_REDIRECT_ONLINK }, { "redirrouter", ND_REDIRECT, ND_REDIRECT_ROUTER } }; const struct pf_timeout pf_timeouts[] = { { "tcp.first", PFTM_TCP_FIRST_PACKET }, { "tcp.opening", PFTM_TCP_OPENING }, { "tcp.established", PFTM_TCP_ESTABLISHED }, { "tcp.closing", PFTM_TCP_CLOSING }, { "tcp.finwait", PFTM_TCP_FIN_WAIT }, { "tcp.closed", PFTM_TCP_CLOSED }, { "tcp.tsdiff", PFTM_TS_DIFF }, { "udp.first", PFTM_UDP_FIRST_PACKET }, { "udp.single", PFTM_UDP_SINGLE }, { "udp.multiple", PFTM_UDP_MULTIPLE }, { "icmp.first", PFTM_ICMP_FIRST_PACKET }, { "icmp.error", PFTM_ICMP_ERROR_REPLY }, { "other.first", PFTM_OTHER_FIRST_PACKET }, { "other.single", PFTM_OTHER_SINGLE }, { "other.multiple", PFTM_OTHER_MULTIPLE }, { "frag", PFTM_FRAG }, { "interval", PFTM_INTERVAL }, { "adaptive.start", PFTM_ADAPTIVE_START }, { "adaptive.end", PFTM_ADAPTIVE_END }, { "src.track", PFTM_SRC_NODE }, { NULL, 0 } }; static struct hsearch_data isgroup_map; static __attribute__((constructor)) void pfctl_parser_init(void) { /* * As hdestroy() will never be called on these tables, it will be * safe to use references into the stored data as keys. */ if (hcreate_r(0, &isgroup_map) == 0) err(1, "Failed to create interface group query response map"); } const struct icmptypeent * geticmptypebynumber(u_int8_t type, sa_family_t af) { unsigned int i; if (af != AF_INET6) { for (i=0; i < nitems(icmp_type); i++) { if (type == icmp_type[i].type) return (&icmp_type[i]); } } else { for (i=0; i < nitems(icmp6_type); i++) { if (type == icmp6_type[i].type) return (&icmp6_type[i]); } } return (NULL); } const struct icmptypeent * geticmptypebyname(char *w, sa_family_t af) { unsigned int i; if (af != AF_INET6) { for (i=0; i < nitems(icmp_type); i++) { if (!strcmp(w, icmp_type[i].name)) return (&icmp_type[i]); } } else { for (i=0; i < nitems(icmp6_type); i++) { if (!strcmp(w, icmp6_type[i].name)) return (&icmp6_type[i]); } } return (NULL); } const struct icmpcodeent * geticmpcodebynumber(u_int8_t type, u_int8_t code, sa_family_t af) { unsigned int i; if (af != AF_INET6) { for (i=0; i < nitems(icmp_code); i++) { if (type == icmp_code[i].type && code == icmp_code[i].code) return (&icmp_code[i]); } } else { for (i=0; i < nitems(icmp6_code); i++) { if (type == icmp6_code[i].type && code == icmp6_code[i].code) return (&icmp6_code[i]); } } return (NULL); } const struct icmpcodeent * geticmpcodebyname(u_long type, char *w, sa_family_t af) { unsigned int i; if (af != AF_INET6) { for (i=0; i < nitems(icmp_code); i++) { if (type == icmp_code[i].type && !strcmp(w, icmp_code[i].name)) return (&icmp_code[i]); } } else { for (i=0; i < nitems(icmp6_code); i++) { if (type == icmp6_code[i].type && !strcmp(w, icmp6_code[i].name)) return (&icmp6_code[i]); } } return (NULL); } void print_op(u_int8_t op, const char *a1, const char *a2) { if (op == PF_OP_IRG) printf(" %s >< %s", a1, a2); else if (op == PF_OP_XRG) printf(" %s <> %s", a1, a2); else if (op == PF_OP_EQ) printf(" = %s", a1); else if (op == PF_OP_NE) printf(" != %s", a1); else if (op == PF_OP_LT) printf(" < %s", a1); else if (op == PF_OP_LE) printf(" <= %s", a1); else if (op == PF_OP_GT) printf(" > %s", a1); else if (op == PF_OP_GE) printf(" >= %s", a1); else if (op == PF_OP_RRG) printf(" %s:%s", a1, a2); } void print_port(u_int8_t op, u_int16_t p1, u_int16_t p2, const char *proto, int numeric) { char a1[6], a2[6]; struct servent *s; if (!numeric) s = getservbyport(p1, proto); else s = NULL; p1 = ntohs(p1); p2 = ntohs(p2); snprintf(a1, sizeof(a1), "%u", p1); snprintf(a2, sizeof(a2), "%u", p2); printf(" port"); if (s != NULL && (op == PF_OP_EQ || op == PF_OP_NE)) print_op(op, s->s_name, a2); else print_op(op, a1, a2); } void print_ugid(u_int8_t op, unsigned u1, unsigned u2, const char *t, unsigned umax) { char a1[11], a2[11]; snprintf(a1, sizeof(a1), "%u", u1); snprintf(a2, sizeof(a2), "%u", u2); printf(" %s", t); if (u1 == umax && (op == PF_OP_EQ || op == PF_OP_NE)) print_op(op, "unknown", a2); else print_op(op, a1, a2); } void print_flags(u_int8_t f) { int i; for (i = 0; tcpflags[i]; ++i) if (f & (1 << i)) printf("%c", tcpflags[i]); } void print_fromto(struct pf_rule_addr *src, pf_osfp_t osfp, struct pf_rule_addr *dst, sa_family_t af, u_int8_t proto, int verbose, int numeric) { char buf[PF_OSFP_LEN*3]; if (src->addr.type == PF_ADDR_ADDRMASK && dst->addr.type == PF_ADDR_ADDRMASK && PF_AZERO(&src->addr.v.a.addr, AF_INET6) && PF_AZERO(&src->addr.v.a.mask, AF_INET6) && PF_AZERO(&dst->addr.v.a.addr, AF_INET6) && PF_AZERO(&dst->addr.v.a.mask, AF_INET6) && !src->neg && !dst->neg && !src->port_op && !dst->port_op && osfp == PF_OSFP_ANY) printf(" all"); else { printf(" from "); if (src->neg) printf("! "); print_addr(&src->addr, af, verbose); if (src->port_op) print_port(src->port_op, src->port[0], src->port[1], proto == IPPROTO_TCP ? "tcp" : "udp", numeric); if (osfp != PF_OSFP_ANY) printf(" os \"%s\"", pfctl_lookup_fingerprint(osfp, buf, sizeof(buf))); printf(" to "); if (dst->neg) printf("! "); print_addr(&dst->addr, af, verbose); if (dst->port_op) print_port(dst->port_op, dst->port[0], dst->port[1], proto == IPPROTO_TCP ? "tcp" : "udp", numeric); } } void print_pool(struct pfctl_pool *pool, u_int16_t p1, u_int16_t p2, sa_family_t af, int id) { struct pf_pooladdr *pooladdr; if ((TAILQ_FIRST(&pool->list) != NULL) && TAILQ_NEXT(TAILQ_FIRST(&pool->list), entries) != NULL) printf("{ "); TAILQ_FOREACH(pooladdr, &pool->list, entries){ switch (id) { case PF_NAT: case PF_RDR: case PF_BINAT: print_addr(&pooladdr->addr, af, 0); break; case PF_PASS: + case PF_MATCH: if (PF_AZERO(&pooladdr->addr.v.a.addr, af)) printf("%s", pooladdr->ifname); else { printf("(%s ", pooladdr->ifname); print_addr(&pooladdr->addr, af, 0); printf(")"); } break; default: break; } if (TAILQ_NEXT(pooladdr, entries) != NULL) printf(", "); else if (TAILQ_NEXT(TAILQ_FIRST(&pool->list), entries) != NULL) printf(" }"); } switch (id) { case PF_NAT: if ((p1 != PF_NAT_PROXY_PORT_LOW || p2 != PF_NAT_PROXY_PORT_HIGH) && (p1 != 0 || p2 != 0)) { if (p1 == p2) printf(" port %u", p1); else printf(" port %u:%u", p1, p2); } break; case PF_RDR: if (p1) { printf(" port %u", p1); if (p2 && (p2 != p1)) printf(":%u", p2); } break; default: break; } switch (pool->opts & PF_POOL_TYPEMASK) { case PF_POOL_NONE: break; case PF_POOL_BITMASK: printf(" bitmask"); break; case PF_POOL_RANDOM: printf(" random"); break; case PF_POOL_SRCHASH: printf(" source-hash 0x%08x%08x%08x%08x", pool->key.key32[0], pool->key.key32[1], pool->key.key32[2], pool->key.key32[3]); break; case PF_POOL_ROUNDROBIN: printf(" round-robin"); break; } if (pool->opts & PF_POOL_STICKYADDR) printf(" sticky-address"); if (id == PF_NAT && p1 == 0 && p2 == 0) printf(" static-port"); if (pool->mape.offset > 0) printf(" map-e-portset %u/%u/%u", pool->mape.offset, pool->mape.psidlen, pool->mape.psid); } const char * const pf_reasons[PFRES_MAX+1] = PFRES_NAMES; const char * const pf_lcounters[LCNT_MAX+1] = LCNT_NAMES; const char * const pf_fcounters[FCNT_MAX+1] = FCNT_NAMES; const char * const pf_scounters[FCNT_MAX+1] = FCNT_NAMES; void print_status(struct pfctl_status *s, struct pfctl_syncookies *cookies, int opts) { struct pfctl_status_counter *c; char statline[80], *running; time_t runtime; int i; char buf[PF_MD5_DIGEST_LENGTH * 2 + 1]; static const char hex[] = "0123456789abcdef"; runtime = time(NULL) - s->since; running = s->running ? "Enabled" : "Disabled"; if (s->since) { unsigned int sec, min, hrs, day = runtime; sec = day % 60; day /= 60; min = day % 60; day /= 60; hrs = day % 24; day /= 24; snprintf(statline, sizeof(statline), "Status: %s for %u days %.2u:%.2u:%.2u", running, day, hrs, min, sec); } else snprintf(statline, sizeof(statline), "Status: %s", running); printf("%-44s", statline); switch (s->debug) { case PF_DEBUG_NONE: printf("%15s\n\n", "Debug: None"); break; case PF_DEBUG_URGENT: printf("%15s\n\n", "Debug: Urgent"); break; case PF_DEBUG_MISC: printf("%15s\n\n", "Debug: Misc"); break; case PF_DEBUG_NOISY: printf("%15s\n\n", "Debug: Loud"); break; } if (opts & PF_OPT_VERBOSE) { printf("Hostid: 0x%08x\n", s->hostid); for (i = 0; i < PF_MD5_DIGEST_LENGTH; i++) { buf[i + i] = hex[s->pf_chksum[i] >> 4]; buf[i + i + 1] = hex[s->pf_chksum[i] & 0x0f]; } buf[i + i] = '\0'; printf("Checksum: 0x%s\n\n", buf); } if (s->ifname[0] != 0) { printf("Interface Stats for %-16s %5s %16s\n", s->ifname, "IPv4", "IPv6"); printf(" %-25s %14llu %16llu\n", "Bytes In", (unsigned long long)s->bcounters[0][0], (unsigned long long)s->bcounters[1][0]); printf(" %-25s %14llu %16llu\n", "Bytes Out", (unsigned long long)s->bcounters[0][1], (unsigned long long)s->bcounters[1][1]); printf(" Packets In\n"); printf(" %-23s %14llu %16llu\n", "Passed", (unsigned long long)s->pcounters[0][0][PF_PASS], (unsigned long long)s->pcounters[1][0][PF_PASS]); printf(" %-23s %14llu %16llu\n", "Blocked", (unsigned long long)s->pcounters[0][0][PF_DROP], (unsigned long long)s->pcounters[1][0][PF_DROP]); printf(" Packets Out\n"); printf(" %-23s %14llu %16llu\n", "Passed", (unsigned long long)s->pcounters[0][1][PF_PASS], (unsigned long long)s->pcounters[1][1][PF_PASS]); printf(" %-23s %14llu %16llu\n\n", "Blocked", (unsigned long long)s->pcounters[0][1][PF_DROP], (unsigned long long)s->pcounters[1][1][PF_DROP]); } printf("%-27s %14s %16s\n", "State Table", "Total", "Rate"); printf(" %-25s %14ju %14s\n", "current entries", s->states, ""); TAILQ_FOREACH(c, &s->fcounters, entry) { printf(" %-25s %14ju ", c->name, c->counter); if (runtime > 0) printf("%14.1f/s\n", (double)c->counter / (double)runtime); else printf("%14s\n", ""); } if (opts & PF_OPT_VERBOSE) { printf("Source Tracking Table\n"); printf(" %-25s %14ju %14s\n", "current entries", s->src_nodes, ""); TAILQ_FOREACH(c, &s->scounters, entry) { printf(" %-25s %14ju ", c->name, c->counter); if (runtime > 0) printf("%14.1f/s\n", (double)c->counter / (double)runtime); else printf("%14s\n", ""); } } printf("Counters\n"); TAILQ_FOREACH(c, &s->counters, entry) { printf(" %-25s %14ju ", c->name, c->counter); if (runtime > 0) printf("%14.1f/s\n", (double)c->counter / (double)runtime); else printf("%14s\n", ""); } if (opts & PF_OPT_VERBOSE) { printf("Limit Counters\n"); TAILQ_FOREACH(c, &s->lcounters, entry) { printf(" %-25s %14ju ", c->name, c->counter); if (runtime > 0) printf("%14.1f/s\n", (double)c->counter / (double)runtime); else printf("%14s\n", ""); } printf("Syncookies\n"); assert(cookies->mode <= PFCTL_SYNCOOKIES_ADAPTIVE); printf(" %-25s %s\n", "mode", PFCTL_SYNCOOKIES_MODE_NAMES[cookies->mode]); printf(" %-25s %s\n", "active", s->syncookies_active ? "active" : "inactive"); + printf("Reassemble %24s %s\n", + s->reass & PF_REASS_ENABLED ? "yes" : "no", + s->reass & PF_REASS_NODF ? "no-df" : "" + ); } } void print_running(struct pfctl_status *status) { printf("%s\n", status->running ? "Enabled" : "Disabled"); } void print_src_node(struct pf_src_node *sn, int opts) { struct pf_addr_wrap aw; int min, sec; memset(&aw, 0, sizeof(aw)); if (sn->af == AF_INET) aw.v.a.mask.addr32[0] = 0xffffffff; else memset(&aw.v.a.mask, 0xff, sizeof(aw.v.a.mask)); aw.v.a.addr = sn->addr; print_addr(&aw, sn->af, opts & PF_OPT_VERBOSE2); printf(" -> "); aw.v.a.addr = sn->raddr; print_addr(&aw, sn->af, opts & PF_OPT_VERBOSE2); printf(" ( states %u, connections %u, rate %u.%u/%us )\n", sn->states, sn->conn, sn->conn_rate.count / 1000, (sn->conn_rate.count % 1000) / 100, sn->conn_rate.seconds); if (opts & PF_OPT_VERBOSE) { sec = sn->creation % 60; sn->creation /= 60; min = sn->creation % 60; sn->creation /= 60; printf(" age %.2u:%.2u:%.2u", sn->creation, min, sec); if (sn->states == 0) { sec = sn->expire % 60; sn->expire /= 60; min = sn->expire % 60; sn->expire /= 60; printf(", expires in %.2u:%.2u:%.2u", sn->expire, min, sec); } printf(", %llu pkts, %llu bytes", #ifdef __FreeBSD__ (unsigned long long)(sn->packets[0] + sn->packets[1]), (unsigned long long)(sn->bytes[0] + sn->bytes[1])); #else sn->packets[0] + sn->packets[1], sn->bytes[0] + sn->bytes[1]); #endif switch (sn->ruletype) { case PF_NAT: if (sn->rule.nr != -1) printf(", nat rule %u", sn->rule.nr); break; case PF_RDR: if (sn->rule.nr != -1) printf(", rdr rule %u", sn->rule.nr); break; case PF_PASS: + case PF_MATCH: if (sn->rule.nr != -1) printf(", filter rule %u", sn->rule.nr); break; } printf("\n"); } } static void print_eth_addr(const struct pfctl_eth_addr *a) { int i, masklen = ETHER_ADDR_LEN * 8; bool seen_unset = false; for (i = 0; i < ETHER_ADDR_LEN; i++) { if (a->addr[i] != 0) break; } /* Unset, so don't print anything. */ if (i == ETHER_ADDR_LEN) return; printf("%s%02x:%02x:%02x:%02x:%02x:%02x", a->neg ? "! " : "", a->addr[0], a->addr[1], a->addr[2], a->addr[3], a->addr[4], a->addr[5]); for (i = 0; i < (ETHER_ADDR_LEN * 8); i++) { bool isset = a->mask[i / 8] & (1 << i % 8); if (! seen_unset) { if (isset) continue; seen_unset = true; masklen = i; } else { /* Not actually a continuous mask, so print the whole * thing. */ if (isset) break; continue; } } if (masklen == (ETHER_ADDR_LEN * 8)) return; if (i == (ETHER_ADDR_LEN * 8)) { printf("/%d", masklen); return; } printf("&%02x:%02x:%02x:%02x:%02x:%02x", a->mask[0], a->mask[1], a->mask[2], a->mask[3], a->mask[4], a->mask[5]); } void print_eth_rule(struct pfctl_eth_rule *r, const char *anchor_call, int rule_numbers) { static const char *actiontypes[] = { "pass", "block", "", "", "", "", "", "", "", "", "", "", "match" }; if (rule_numbers) printf("@%u ", r->nr); printf("ether "); if (anchor_call[0]) { if (anchor_call[0] == '_') { printf("anchor"); } else printf("anchor \"%s\"", anchor_call); } else { printf("%s", actiontypes[r->action]); } if (r->direction == PF_IN) printf(" in"); else if (r->direction == PF_OUT) printf(" out"); if (r->quick) printf(" quick"); if (r->ifname[0]) { if (r->ifnot) printf(" on ! %s", r->ifname); else printf(" on %s", r->ifname); } if (r->bridge_to[0]) printf(" bridge-to %s", r->bridge_to); if (r->proto) printf(" proto 0x%04x", r->proto); if (r->src.isset) { printf(" from "); print_eth_addr(&r->src); } if (r->dst.isset) { printf(" to "); print_eth_addr(&r->dst); } printf(" l3"); print_fromto(&r->ipsrc, PF_OSFP_ANY, &r->ipdst, r->proto == ETHERTYPE_IP ? AF_INET : AF_INET6, 0, 0, 0); if (r->qname[0]) printf(" queue %s", r->qname); if (r->tagname[0]) printf(" tag %s", r->tagname); if (r->match_tagname[0]) { if (r->match_tag_not) printf(" !"); printf(" tagged %s", r->match_tagname); } if (r->dnpipe) printf(" %s %d", r->dnflags & PFRULE_DN_IS_PIPE ? "dnpipe" : "dnqueue", r->dnpipe); } void print_rule(struct pfctl_rule *r, const char *anchor_call, int verbose, int numeric) { static const char *actiontypes[] = { "pass", "block", "scrub", - "no scrub", "nat", "no nat", "binat", "no binat", "rdr", "no rdr" }; + "no scrub", "nat", "no nat", "binat", "no binat", "rdr", "no rdr", + "", "", "match"}; static const char *anchortypes[] = { "anchor", "anchor", "anchor", "anchor", "nat-anchor", "nat-anchor", "binat-anchor", "binat-anchor", "rdr-anchor", "rdr-anchor" }; int i, opts; char *p; if (verbose) printf("@%d ", r->nr); if (r->action == PF_MATCH) printf("match"); else if (r->action > PF_NORDR) printf("action(%d)", r->action); else if (anchor_call[0]) { p = strrchr(anchor_call, '/'); if (p ? p[1] == '_' : anchor_call[0] == '_') printf("%s", anchortypes[r->action]); else printf("%s \"%s\"", anchortypes[r->action], anchor_call); } else { printf("%s", actiontypes[r->action]); if (r->natpass) printf(" pass"); } if (r->action == PF_DROP) { if (r->rule_flag & PFRULE_RETURN) printf(" return"); else if (r->rule_flag & PFRULE_RETURNRST) { if (!r->return_ttl) printf(" return-rst"); else printf(" return-rst(ttl %d)", r->return_ttl); } else if (r->rule_flag & PFRULE_RETURNICMP) { const struct icmpcodeent *ic, *ic6; ic = geticmpcodebynumber(r->return_icmp >> 8, r->return_icmp & 255, AF_INET); ic6 = geticmpcodebynumber(r->return_icmp6 >> 8, r->return_icmp6 & 255, AF_INET6); switch (r->af) { case AF_INET: printf(" return-icmp"); if (ic == NULL) printf("(%u)", r->return_icmp & 255); else printf("(%s)", ic->name); break; case AF_INET6: printf(" return-icmp6"); if (ic6 == NULL) printf("(%u)", r->return_icmp6 & 255); else printf("(%s)", ic6->name); break; default: printf(" return-icmp"); if (ic == NULL) printf("(%u, ", r->return_icmp & 255); else printf("(%s, ", ic->name); if (ic6 == NULL) printf("%u)", r->return_icmp6 & 255); else printf("%s)", ic6->name); break; } } else printf(" drop"); } if (r->direction == PF_IN) printf(" in"); else if (r->direction == PF_OUT) printf(" out"); if (r->log) { printf(" log"); if (r->log & ~PF_LOG || r->logif) { int count = 0; printf(" ("); if (r->log & PF_LOG_ALL) printf("%sall", count++ ? ", " : ""); if (r->log & PF_LOG_SOCKET_LOOKUP) printf("%suser", count++ ? ", " : ""); if (r->logif) printf("%sto pflog%u", count++ ? ", " : "", r->logif); printf(")"); } } if (r->quick) printf(" quick"); if (r->ifname[0]) { if (r->ifnot) printf(" on ! %s", r->ifname); else printf(" on %s", r->ifname); } if (r->rt) { if (r->rt == PF_ROUTETO) printf(" route-to"); else if (r->rt == PF_REPLYTO) printf(" reply-to"); else if (r->rt == PF_DUPTO) printf(" dup-to"); printf(" "); print_pool(&r->rpool, 0, 0, r->af, PF_PASS); } if (r->af) { if (r->af == AF_INET) printf(" inet"); else printf(" inet6"); } if (r->proto) { const char *protoname; if ((protoname = pfctl_proto2name(r->proto)) != NULL) printf(" proto %s", protoname); else printf(" proto %u", r->proto); } print_fromto(&r->src, r->os_fingerprint, &r->dst, r->af, r->proto, verbose, numeric); if (r->uid.op) print_ugid(r->uid.op, r->uid.uid[0], r->uid.uid[1], "user", UID_MAX); if (r->gid.op) print_ugid(r->gid.op, r->gid.gid[0], r->gid.gid[1], "group", GID_MAX); if (r->flags || r->flagset) { printf(" flags "); print_flags(r->flags); printf("/"); print_flags(r->flagset); - } else if (r->action == PF_PASS && + } else if ((r->action == PF_PASS || r->action == PF_MATCH) && (!r->proto || r->proto == IPPROTO_TCP) && !(r->rule_flag & PFRULE_FRAGMENT) && !anchor_call[0] && r->keep_state) printf(" flags any"); if (r->type) { const struct icmptypeent *it; it = geticmptypebynumber(r->type-1, r->af); if (r->af != AF_INET6) printf(" icmp-type"); else printf(" icmp6-type"); if (it != NULL) printf(" %s", it->name); else printf(" %u", r->type-1); if (r->code) { const struct icmpcodeent *ic; ic = geticmpcodebynumber(r->type-1, r->code-1, r->af); if (ic != NULL) printf(" code %s", ic->name); else printf(" code %u", r->code-1); } } if (r->tos) printf(" tos 0x%2.2x", r->tos); if (r->prio) printf(" prio %u", r->prio == PF_PRIO_ZERO ? 0 : r->prio); if (r->scrub_flags & PFSTATE_SETMASK) { char *comma = ""; printf(" set ("); if (r->scrub_flags & PFSTATE_SETPRIO) { if (r->set_prio[0] == r->set_prio[1]) printf("%s prio %u", comma, r->set_prio[0]); else printf("%s prio(%u, %u)", comma, r->set_prio[0], r->set_prio[1]); comma = ","; } + if (r->scrub_flags & PFSTATE_SETTOS) { + printf("%s tos 0x%2.2x", comma, r->set_tos); + comma = ","; + } printf(" )"); } if (!r->keep_state && r->action == PF_PASS && !anchor_call[0]) printf(" no state"); else if (r->keep_state == PF_STATE_NORMAL) printf(" keep state"); else if (r->keep_state == PF_STATE_MODULATE) printf(" modulate state"); else if (r->keep_state == PF_STATE_SYNPROXY) printf(" synproxy state"); if (r->prob) { char buf[20]; snprintf(buf, sizeof(buf), "%f", r->prob*100.0/(UINT_MAX+1.0)); for (i = strlen(buf)-1; i > 0; i--) { if (buf[i] == '0') buf[i] = '\0'; else { if (buf[i] == '.') buf[i] = '\0'; break; } } printf(" probability %s%%", buf); } opts = 0; if (r->max_states || r->max_src_nodes || r->max_src_states) opts = 1; if (r->rule_flag & PFRULE_NOSYNC) opts = 1; if (r->rule_flag & PFRULE_SRCTRACK) opts = 1; if (r->rule_flag & PFRULE_IFBOUND) opts = 1; if (r->rule_flag & PFRULE_STATESLOPPY) opts = 1; for (i = 0; !opts && i < PFTM_MAX; ++i) if (r->timeout[i]) opts = 1; if (opts) { printf(" ("); if (r->max_states) { printf("max %u", r->max_states); opts = 0; } if (r->rule_flag & PFRULE_NOSYNC) { if (!opts) printf(", "); printf("no-sync"); opts = 0; } if (r->rule_flag & PFRULE_SRCTRACK) { if (!opts) printf(", "); printf("source-track"); if (r->rule_flag & PFRULE_RULESRCTRACK) printf(" rule"); else printf(" global"); opts = 0; } if (r->max_src_states) { if (!opts) printf(", "); printf("max-src-states %u", r->max_src_states); opts = 0; } if (r->max_src_conn) { if (!opts) printf(", "); printf("max-src-conn %u", r->max_src_conn); opts = 0; } if (r->max_src_conn_rate.limit) { if (!opts) printf(", "); printf("max-src-conn-rate %u/%u", r->max_src_conn_rate.limit, r->max_src_conn_rate.seconds); opts = 0; } if (r->max_src_nodes) { if (!opts) printf(", "); printf("max-src-nodes %u", r->max_src_nodes); opts = 0; } if (r->overload_tblname[0]) { if (!opts) printf(", "); printf("overload <%s>", r->overload_tblname); if (r->flush) printf(" flush"); if (r->flush & PF_FLUSH_GLOBAL) printf(" global"); } if (r->rule_flag & PFRULE_IFBOUND) { if (!opts) printf(", "); printf("if-bound"); opts = 0; } if (r->rule_flag & PFRULE_STATESLOPPY) { if (!opts) printf(", "); printf("sloppy"); opts = 0; } for (i = 0; i < PFTM_MAX; ++i) if (r->timeout[i]) { int j; if (!opts) printf(", "); opts = 0; for (j = 0; pf_timeouts[j].name != NULL; ++j) if (pf_timeouts[j].timeout == i) break; printf("%s %u", pf_timeouts[j].name == NULL ? "inv.timeout" : pf_timeouts[j].name, r->timeout[i]); } printf(")"); } - if (r->rule_flag & PFRULE_FRAGMENT) - printf(" fragment"); - if (r->rule_flag & PFRULE_NODF) - printf(" no-df"); - if (r->rule_flag & PFRULE_RANDOMID) - printf(" random-id"); - if (r->min_ttl) - printf(" min-ttl %d", r->min_ttl); - if (r->max_mss) - printf(" max-mss %d", r->max_mss); - if (r->rule_flag & PFRULE_SET_TOS) - printf(" set-tos 0x%2.2x", r->set_tos); if (r->allow_opts) printf(" allow-opts"); + if (r->rule_flag & PFRULE_FRAGMENT) + printf(" fragment"); if (r->action == PF_SCRUB) { + /* Scrub flags for old-style scrub. */ + if (r->rule_flag & PFRULE_NODF) + printf(" no-df"); + if (r->rule_flag & PFRULE_RANDOMID) + printf(" random-id"); + if (r->min_ttl) + printf(" min-ttl %d", r->min_ttl); + if (r->max_mss) + printf(" max-mss %d", r->max_mss); + if (r->rule_flag & PFRULE_SET_TOS) + printf(" set-tos 0x%2.2x", r->set_tos); if (r->rule_flag & PFRULE_REASSEMBLE_TCP) printf(" reassemble tcp"); - + /* The PFRULE_FRAGMENT_NOREASS is set on all rules by default! */ printf(" fragment %sreassemble", r->rule_flag & PFRULE_FRAGMENT_NOREASS ? "no " : ""); + } else if (r->scrub_flags & PFSTATE_SCRUBMASK || r->min_ttl || r->max_mss) { + /* Scrub actions on normal rules. */ + printf(" scrub("); + if (r->scrub_flags & PFSTATE_NODF) + printf(" no-df"); + if (r->scrub_flags & PFSTATE_RANDOMID) + printf(" random-id"); + if (r->min_ttl) + printf(" min-ttl %d", r->min_ttl); + if (r->scrub_flags & PFSTATE_SETTOS) + printf(" set-tos 0x%2.2x", r->set_tos); + if (r->scrub_flags & PFSTATE_SCRUB_TCP) + printf(" reassemble tcp"); + if (r->max_mss) + printf(" max-mss %d", r->max_mss); + printf(")"); } i = 0; while (r->label[i][0]) printf(" label \"%s\"", r->label[i++]); if (r->ridentifier) printf(" ridentifier %u", r->ridentifier); /* Only dnrpipe as we might do (0, 42) to only queue return traffic. */ if (r->dnrpipe) printf(" %s(%d, %d)", r->free_flags & PFRULE_DN_IS_PIPE ? "dnpipe" : "dnqueue", r->dnpipe, r->dnrpipe); else if (r->dnpipe) printf(" %s %d", r->free_flags & PFRULE_DN_IS_PIPE ? "dnpipe" : "dnqueue", r->dnpipe); if (r->qname[0] && r->pqname[0]) printf(" queue(%s, %s)", r->qname, r->pqname); else if (r->qname[0]) printf(" queue %s", r->qname); if (r->tagname[0]) printf(" tag %s", r->tagname); if (r->match_tagname[0]) { if (r->match_tag_not) printf(" !"); printf(" tagged %s", r->match_tagname); } if (r->rtableid != -1) printf(" rtable %u", r->rtableid); if (r->divert.port) { #ifdef __FreeBSD__ printf(" divert-to %u", ntohs(r->divert.port)); #else if (PF_AZERO(&r->divert.addr, r->af)) { printf(" divert-reply"); } else { /* XXX cut&paste from print_addr */ char buf[48]; printf(" divert-to "); if (inet_ntop(r->af, &r->divert.addr, buf, sizeof(buf)) == NULL) printf("?"); else printf("%s", buf); printf(" port %u", ntohs(r->divert.port)); } #endif } if (!anchor_call[0] && (r->action == PF_NAT || r->action == PF_BINAT || r->action == PF_RDR)) { printf(" -> "); print_pool(&r->rpool, r->rpool.proxy_port[0], r->rpool.proxy_port[1], r->af, r->action); } } void print_tabledef(const char *name, int flags, int addrs, struct node_tinithead *nodes) { struct node_tinit *ti, *nti; struct node_host *h; printf("table <%s>", name); if (flags & PFR_TFLAG_CONST) printf(" const"); if (flags & PFR_TFLAG_PERSIST) printf(" persist"); if (flags & PFR_TFLAG_COUNTERS) printf(" counters"); SIMPLEQ_FOREACH(ti, nodes, entries) { if (ti->file) { printf(" file \"%s\"", ti->file); continue; } printf(" {"); for (;;) { for (h = ti->host; h != NULL; h = h->next) { printf(h->not ? " !" : " "); print_addr(&h->addr, h->af, 0); } nti = SIMPLEQ_NEXT(ti, entries); if (nti != NULL && nti->file == NULL) ti = nti; /* merge lists */ else break; } printf(" }"); } if (addrs && SIMPLEQ_EMPTY(nodes)) printf(" { }"); printf("\n"); } int parse_flags(char *s) { char *p, *q; u_int8_t f = 0; for (p = s; *p; p++) { if ((q = strchr(tcpflags, *p)) == NULL) return -1; else f |= 1 << (q - tcpflags); } return (f ? f : PF_TH_ALL); } void set_ipmask(struct node_host *h, u_int8_t b) { struct pf_addr *m, *n; int i, j = 0; m = &h->addr.v.a.mask; memset(m, 0, sizeof(*m)); while (b >= 32) { m->addr32[j++] = 0xffffffff; b -= 32; } for (i = 31; i > 31-b; --i) m->addr32[j] |= (1 << i); if (b) m->addr32[j] = htonl(m->addr32[j]); /* Mask off bits of the address that will never be used. */ n = &h->addr.v.a.addr; if (h->addr.type == PF_ADDR_ADDRMASK) for (i = 0; i < 4; i++) n->addr32[i] = n->addr32[i] & m->addr32[i]; } int check_netmask(struct node_host *h, sa_family_t af) { struct node_host *n = NULL; struct pf_addr *m; for (n = h; n != NULL; n = n->next) { if (h->addr.type == PF_ADDR_TABLE) continue; m = &h->addr.v.a.mask; /* fix up netmask for dynaddr */ if (af == AF_INET && h->addr.type == PF_ADDR_DYNIFTL && unmask(m, AF_INET6) > 32) set_ipmask(n, 32); /* netmasks > 32 bit are invalid on v4 */ if (af == AF_INET && (m->addr32[1] || m->addr32[2] || m->addr32[3])) { fprintf(stderr, "netmask %u invalid for IPv4 address\n", unmask(m, AF_INET6)); return (1); } } return (0); } /* interface lookup routines */ static struct node_host *iftab; /* * Retrieve the list of groups this interface is a member of and make sure * each group is in the group map. */ static void ifa_add_groups_to_map(char *ifa_name) { int s, len; struct ifgroupreq ifgr; struct ifg_req *ifg; s = get_query_socket(); /* Get size of group list for this interface */ memset(&ifgr, 0, sizeof(ifgr)); strlcpy(ifgr.ifgr_name, ifa_name, IFNAMSIZ); if (ioctl(s, SIOCGIFGROUP, (caddr_t)&ifgr) == -1) err(1, "SIOCGIFGROUP"); /* Retrieve group list for this interface */ len = ifgr.ifgr_len; ifgr.ifgr_groups = (struct ifg_req *)calloc(len / sizeof(struct ifg_req), sizeof(struct ifg_req)); if (ifgr.ifgr_groups == NULL) err(1, "calloc"); if (ioctl(s, SIOCGIFGROUP, (caddr_t)&ifgr) == -1) err(1, "SIOCGIFGROUP"); ifg = ifgr.ifgr_groups; for (; ifg && len >= sizeof(struct ifg_req); ifg++) { len -= sizeof(struct ifg_req); if (strcmp(ifg->ifgrq_group, "all")) { ENTRY item; ENTRY *ret_item; int *answer; item.key = ifg->ifgrq_group; if (hsearch_r(item, FIND, &ret_item, &isgroup_map) == 0) { struct ifgroupreq ifgr2; /* Don't know the answer yet */ if ((answer = malloc(sizeof(int))) == NULL) err(1, "malloc"); bzero(&ifgr2, sizeof(ifgr2)); strlcpy(ifgr2.ifgr_name, ifg->ifgrq_group, sizeof(ifgr2.ifgr_name)); if (ioctl(s, SIOCGIFGMEMB, (caddr_t)&ifgr2) == 0) *answer = ifgr2.ifgr_len; else *answer = 0; item.key = strdup(ifg->ifgrq_group); item.data = answer; if (hsearch_r(item, ENTER, &ret_item, &isgroup_map) == 0) err(1, "interface group query response" " map insert"); } } } free(ifgr.ifgr_groups); } void ifa_load(void) { struct ifaddrs *ifap, *ifa; struct node_host *n = NULL, *h = NULL; if (getifaddrs(&ifap) < 0) err(1, "getifaddrs"); for (ifa = ifap; ifa; ifa = ifa->ifa_next) { if (!(ifa->ifa_addr->sa_family == AF_INET || ifa->ifa_addr->sa_family == AF_INET6 || ifa->ifa_addr->sa_family == AF_LINK)) continue; n = calloc(1, sizeof(struct node_host)); if (n == NULL) err(1, "address: calloc"); n->af = ifa->ifa_addr->sa_family; n->ifa_flags = ifa->ifa_flags; #ifdef __KAME__ if (n->af == AF_INET6 && IN6_IS_ADDR_LINKLOCAL(&((struct sockaddr_in6 *) ifa->ifa_addr)->sin6_addr) && ((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_scope_id == 0) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)ifa->ifa_addr; sin6->sin6_scope_id = sin6->sin6_addr.s6_addr[2] << 8 | sin6->sin6_addr.s6_addr[3]; sin6->sin6_addr.s6_addr[2] = 0; sin6->sin6_addr.s6_addr[3] = 0; } #endif n->ifindex = 0; if (n->af == AF_INET) { memcpy(&n->addr.v.a.addr, &((struct sockaddr_in *) ifa->ifa_addr)->sin_addr.s_addr, sizeof(struct in_addr)); memcpy(&n->addr.v.a.mask, &((struct sockaddr_in *) ifa->ifa_netmask)->sin_addr.s_addr, sizeof(struct in_addr)); if (ifa->ifa_broadaddr != NULL) memcpy(&n->bcast, &((struct sockaddr_in *) ifa->ifa_broadaddr)->sin_addr.s_addr, sizeof(struct in_addr)); if (ifa->ifa_dstaddr != NULL) memcpy(&n->peer, &((struct sockaddr_in *) ifa->ifa_dstaddr)->sin_addr.s_addr, sizeof(struct in_addr)); } else if (n->af == AF_INET6) { memcpy(&n->addr.v.a.addr, &((struct sockaddr_in6 *) ifa->ifa_addr)->sin6_addr.s6_addr, sizeof(struct in6_addr)); memcpy(&n->addr.v.a.mask, &((struct sockaddr_in6 *) ifa->ifa_netmask)->sin6_addr.s6_addr, sizeof(struct in6_addr)); if (ifa->ifa_broadaddr != NULL) memcpy(&n->bcast, &((struct sockaddr_in6 *) ifa->ifa_broadaddr)->sin6_addr.s6_addr, sizeof(struct in6_addr)); if (ifa->ifa_dstaddr != NULL) memcpy(&n->peer, &((struct sockaddr_in6 *) ifa->ifa_dstaddr)->sin6_addr.s6_addr, sizeof(struct in6_addr)); n->ifindex = ((struct sockaddr_in6 *) ifa->ifa_addr)->sin6_scope_id; } else if (n->af == AF_LINK) { ifa_add_groups_to_map(ifa->ifa_name); } if ((n->ifname = strdup(ifa->ifa_name)) == NULL) err(1, "ifa_load: strdup"); n->next = NULL; n->tail = n; if (h == NULL) h = n; else { h->tail->next = n; h->tail = n; } } iftab = h; freeifaddrs(ifap); } static int get_socket_domain(void) { int sdom; sdom = AF_UNSPEC; #ifdef WITH_INET6 if (sdom == AF_UNSPEC && feature_present("inet6")) sdom = AF_INET6; #endif #ifdef WITH_INET if (sdom == AF_UNSPEC && feature_present("inet")) sdom = AF_INET; #endif if (sdom == AF_UNSPEC) sdom = AF_LINK; return (sdom); } int get_query_socket(void) { static int s = -1; if (s == -1) { if ((s = socket(get_socket_domain(), SOCK_DGRAM, 0)) == -1) err(1, "socket"); } return (s); } /* * Returns the response len if the name is a group, otherwise returns 0. */ static int is_a_group(char *name) { ENTRY item; ENTRY *ret_item; item.key = name; if (hsearch_r(item, FIND, &ret_item, &isgroup_map) == 0) return (0); return (*(int *)ret_item->data); } struct node_host * ifa_exists(char *ifa_name) { struct node_host *n; if (iftab == NULL) ifa_load(); /* check whether this is a group */ if (is_a_group(ifa_name)) { /* fake a node_host */ if ((n = calloc(1, sizeof(*n))) == NULL) err(1, "calloc"); if ((n->ifname = strdup(ifa_name)) == NULL) err(1, "strdup"); return (n); } for (n = iftab; n; n = n->next) { if (n->af == AF_LINK && !strncmp(n->ifname, ifa_name, IFNAMSIZ)) return (n); } return (NULL); } struct node_host * ifa_grouplookup(char *ifa_name, int flags) { struct ifg_req *ifg; struct ifgroupreq ifgr; int s, len; struct node_host *n, *h = NULL; s = get_query_socket(); len = is_a_group(ifa_name); if (len == 0) return (NULL); bzero(&ifgr, sizeof(ifgr)); strlcpy(ifgr.ifgr_name, ifa_name, sizeof(ifgr.ifgr_name)); ifgr.ifgr_len = len; if ((ifgr.ifgr_groups = calloc(1, len)) == NULL) err(1, "calloc"); if (ioctl(s, SIOCGIFGMEMB, (caddr_t)&ifgr) == -1) err(1, "SIOCGIFGMEMB"); for (ifg = ifgr.ifgr_groups; ifg && len >= sizeof(struct ifg_req); ifg++) { len -= sizeof(struct ifg_req); if ((n = ifa_lookup(ifg->ifgrq_member, flags)) == NULL) continue; if (h == NULL) h = n; else { h->tail->next = n; h->tail = n->tail; } } free(ifgr.ifgr_groups); return (h); } struct node_host * ifa_lookup(char *ifa_name, int flags) { struct node_host *p = NULL, *h = NULL, *n = NULL; int got4 = 0, got6 = 0; const char *last_if = NULL; /* first load iftab and isgroup_map */ if (iftab == NULL) ifa_load(); if ((h = ifa_grouplookup(ifa_name, flags)) != NULL) return (h); if (!strncmp(ifa_name, "self", IFNAMSIZ)) ifa_name = NULL; for (p = iftab; p; p = p->next) { if (ifa_skip_if(ifa_name, p)) continue; if ((flags & PFI_AFLAG_BROADCAST) && p->af != AF_INET) continue; if ((flags & PFI_AFLAG_BROADCAST) && !(p->ifa_flags & IFF_BROADCAST)) continue; if ((flags & PFI_AFLAG_PEER) && !(p->ifa_flags & IFF_POINTOPOINT)) continue; if ((flags & PFI_AFLAG_NETWORK) && p->ifindex > 0) continue; if (last_if == NULL || strcmp(last_if, p->ifname)) got4 = got6 = 0; last_if = p->ifname; if ((flags & PFI_AFLAG_NOALIAS) && p->af == AF_INET && got4) continue; if ((flags & PFI_AFLAG_NOALIAS) && p->af == AF_INET6 && IN6_IS_ADDR_LINKLOCAL(&p->addr.v.a.addr.v6)) continue; if ((flags & PFI_AFLAG_NOALIAS) && p->af == AF_INET6 && got6) continue; if (p->af == AF_INET) got4 = 1; else got6 = 1; n = calloc(1, sizeof(struct node_host)); if (n == NULL) err(1, "address: calloc"); n->af = p->af; if (flags & PFI_AFLAG_BROADCAST) memcpy(&n->addr.v.a.addr, &p->bcast, sizeof(struct pf_addr)); else if (flags & PFI_AFLAG_PEER) memcpy(&n->addr.v.a.addr, &p->peer, sizeof(struct pf_addr)); else memcpy(&n->addr.v.a.addr, &p->addr.v.a.addr, sizeof(struct pf_addr)); if (flags & PFI_AFLAG_NETWORK) set_ipmask(n, unmask(&p->addr.v.a.mask, n->af)); else { if (n->af == AF_INET) { if (p->ifa_flags & IFF_LOOPBACK && p->ifa_flags & IFF_LINK1) memcpy(&n->addr.v.a.mask, &p->addr.v.a.mask, sizeof(struct pf_addr)); else set_ipmask(n, 32); } else set_ipmask(n, 128); } n->ifindex = p->ifindex; n->ifname = strdup(p->ifname); n->next = NULL; n->tail = n; if (h == NULL) h = n; else { h->tail->next = n; h->tail = n; } } return (h); } int ifa_skip_if(const char *filter, struct node_host *p) { int n; if (p->af != AF_INET && p->af != AF_INET6) return (1); if (filter == NULL || !*filter) return (0); if (!strcmp(p->ifname, filter)) return (0); /* exact match */ n = strlen(filter); if (n < 1 || n >= IFNAMSIZ) return (1); /* sanity check */ if (filter[n-1] >= '0' && filter[n-1] <= '9') return (1); /* only do exact match in that case */ if (strncmp(p->ifname, filter, n)) return (1); /* prefix doesn't match */ return (p->ifname[n] < '0' || p->ifname[n] > '9'); } struct node_host * host(const char *s) { struct node_host *h = NULL; int mask, v4mask, v6mask, cont = 1; char *p, *q, *ps; if ((p = strrchr(s, '/')) != NULL) { mask = strtol(p+1, &q, 0); if (!q || *q || mask > 128 || q == (p+1)) { fprintf(stderr, "invalid netmask '%s'\n", p); return (NULL); } if ((ps = malloc(strlen(s) - strlen(p) + 1)) == NULL) err(1, "host: malloc"); strlcpy(ps, s, strlen(s) - strlen(p) + 1); v4mask = v6mask = mask; } else { if ((ps = strdup(s)) == NULL) err(1, "host: strdup"); v4mask = 32; v6mask = 128; mask = -1; } /* IPv4 address? */ if (cont && (h = host_v4(s, mask)) != NULL) cont = 0; /* IPv6 address? */ if (cont && (h = host_v6(ps, v6mask)) != NULL) cont = 0; /* interface with this name exists? */ /* expensive with thousands of interfaces - prioritze IPv4/6 check */ if (cont && (h = host_if(ps, mask, &cont)) != NULL) cont = 0; /* dns lookup */ if (cont && (h = host_dns(ps, v4mask, v6mask)) != NULL) cont = 0; free(ps); if (h == NULL || cont == 1) { fprintf(stderr, "no IP address found for %s\n", s); return (NULL); } return (h); } struct node_host * host_if(const char *s, int mask, int *cont) { struct node_host *n, *h = NULL; char *p, *ps; int flags = 0; if ((ps = strdup(s)) == NULL) err(1, "host_if: strdup"); while ((p = strrchr(ps, ':')) != NULL) { if (!strcmp(p+1, "network")) flags |= PFI_AFLAG_NETWORK; else if (!strcmp(p+1, "broadcast")) flags |= PFI_AFLAG_BROADCAST; else if (!strcmp(p+1, "peer")) flags |= PFI_AFLAG_PEER; else if (!strcmp(p+1, "0")) flags |= PFI_AFLAG_NOALIAS; else { free(ps); return (NULL); } *p = '\0'; *cont = 0; } if (flags & (flags - 1) & PFI_AFLAG_MODEMASK) { /* Yep! */ fprintf(stderr, "illegal combination of interface modifiers\n"); free(ps); return (NULL); } if ((flags & (PFI_AFLAG_NETWORK|PFI_AFLAG_BROADCAST)) && mask > -1) { fprintf(stderr, "network or broadcast lookup, but " "extra netmask given\n"); free(ps); return (NULL); } if (ifa_exists(ps) || !strncmp(ps, "self", IFNAMSIZ)) { /* interface with this name exists */ h = ifa_lookup(ps, flags); for (n = h; n != NULL && mask > -1; n = n->next) set_ipmask(n, mask); } free(ps); return (h); } struct node_host * host_v4(const char *s, int mask) { struct node_host *h = NULL; struct in_addr ina; int bits = 32; memset(&ina, 0, sizeof(struct in_addr)); if (strrchr(s, '/') != NULL) { if ((bits = inet_net_pton(AF_INET, s, &ina, sizeof(ina))) == -1) return (NULL); } else { if (inet_pton(AF_INET, s, &ina) != 1) return (NULL); } h = calloc(1, sizeof(struct node_host)); if (h == NULL) err(1, "address: calloc"); h->ifname = NULL; h->af = AF_INET; h->addr.v.a.addr.addr32[0] = ina.s_addr; set_ipmask(h, bits); h->next = NULL; h->tail = h; return (h); } struct node_host * host_v6(const char *s, int mask) { struct addrinfo hints, *res; struct node_host *h = NULL; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET6; hints.ai_socktype = SOCK_DGRAM; /*dummy*/ hints.ai_flags = AI_NUMERICHOST; if (getaddrinfo(s, "0", &hints, &res) == 0) { h = calloc(1, sizeof(struct node_host)); if (h == NULL) err(1, "address: calloc"); h->ifname = NULL; h->af = AF_INET6; memcpy(&h->addr.v.a.addr, &((struct sockaddr_in6 *)res->ai_addr)->sin6_addr, sizeof(h->addr.v.a.addr)); h->ifindex = ((struct sockaddr_in6 *)res->ai_addr)->sin6_scope_id; set_ipmask(h, mask); freeaddrinfo(res); h->next = NULL; h->tail = h; } return (h); } struct node_host * host_dns(const char *s, int v4mask, int v6mask) { struct addrinfo hints, *res0, *res; struct node_host *n, *h = NULL; int error, noalias = 0; int got4 = 0, got6 = 0; char *p, *ps; if ((ps = strdup(s)) == NULL) err(1, "host_dns: strdup"); if ((p = strrchr(ps, ':')) != NULL && !strcmp(p, ":0")) { noalias = 1; *p = '\0'; } memset(&hints, 0, sizeof(hints)); hints.ai_family = PF_UNSPEC; hints.ai_socktype = SOCK_STREAM; /* DUMMY */ error = getaddrinfo(ps, NULL, &hints, &res0); if (error) { free(ps); return (h); } for (res = res0; res; res = res->ai_next) { if (res->ai_family != AF_INET && res->ai_family != AF_INET6) continue; if (noalias) { if (res->ai_family == AF_INET) { if (got4) continue; got4 = 1; } else { if (got6) continue; got6 = 1; } } n = calloc(1, sizeof(struct node_host)); if (n == NULL) err(1, "host_dns: calloc"); n->ifname = NULL; n->af = res->ai_family; if (res->ai_family == AF_INET) { memcpy(&n->addr.v.a.addr, &((struct sockaddr_in *) res->ai_addr)->sin_addr.s_addr, sizeof(struct in_addr)); set_ipmask(n, v4mask); } else { memcpy(&n->addr.v.a.addr, &((struct sockaddr_in6 *) res->ai_addr)->sin6_addr.s6_addr, sizeof(struct in6_addr)); n->ifindex = ((struct sockaddr_in6 *) res->ai_addr)->sin6_scope_id; set_ipmask(n, v6mask); } n->next = NULL; n->tail = n; if (h == NULL) h = n; else { h->tail->next = n; h->tail = n; } } freeaddrinfo(res0); free(ps); return (h); } /* * convert a hostname to a list of addresses and put them in the given buffer. * test: * if set to 1, only simple addresses are accepted (no netblock, no "!"). */ int append_addr(struct pfr_buffer *b, char *s, int test) { char *r; struct node_host *h, *n; int rv, not = 0; for (r = s; *r == '!'; r++) not = !not; if ((n = host(r)) == NULL) { errno = 0; return (-1); } rv = append_addr_host(b, n, test, not); do { h = n; n = n->next; free(h); } while (n != NULL); return (rv); } /* * same as previous function, but with a pre-parsed input and the ability * to "negate" the result. Does not free the node_host list. * not: * setting it to 1 is equivalent to adding "!" in front of parameter s. */ int append_addr_host(struct pfr_buffer *b, struct node_host *n, int test, int not) { int bits; struct pfr_addr addr; do { bzero(&addr, sizeof(addr)); addr.pfra_not = n->not ^ not; addr.pfra_af = n->af; addr.pfra_net = unmask(&n->addr.v.a.mask, n->af); switch (n->af) { case AF_INET: addr.pfra_ip4addr.s_addr = n->addr.v.a.addr.addr32[0]; bits = 32; break; case AF_INET6: memcpy(&addr.pfra_ip6addr, &n->addr.v.a.addr.v6, sizeof(struct in6_addr)); bits = 128; break; default: errno = EINVAL; return (-1); } if ((test && (not || addr.pfra_net != bits)) || addr.pfra_net > bits) { errno = EINVAL; return (-1); } if (pfr_buf_add(b, &addr)) return (-1); } while ((n = n->next) != NULL); return (0); } int pfctl_add_trans(struct pfr_buffer *buf, int rs_num, const char *anchor) { struct pfioc_trans_e trans; bzero(&trans, sizeof(trans)); trans.rs_num = rs_num; if (strlcpy(trans.anchor, anchor, sizeof(trans.anchor)) >= sizeof(trans.anchor)) errx(1, "pfctl_add_trans: strlcpy"); return pfr_buf_add(buf, &trans); } u_int32_t pfctl_get_ticket(struct pfr_buffer *buf, int rs_num, const char *anchor) { struct pfioc_trans_e *p; PFRB_FOREACH(p, buf) if (rs_num == p->rs_num && !strcmp(anchor, p->anchor)) return (p->ticket); errx(1, "pfctl_get_ticket: assertion failed"); } int pfctl_trans(int dev, struct pfr_buffer *buf, u_long cmd, int from) { struct pfioc_trans trans; bzero(&trans, sizeof(trans)); trans.size = buf->pfrb_size - from; trans.esize = sizeof(struct pfioc_trans_e); trans.array = ((struct pfioc_trans_e *)buf->pfrb_caddr) + from; return ioctl(dev, cmd, &trans); } diff --git a/sbin/pfctl/pfctl_parser.h b/sbin/pfctl/pfctl_parser.h index 13151cc33829..c5da3408fb93 100644 --- a/sbin/pfctl/pfctl_parser.h +++ b/sbin/pfctl/pfctl_parser.h @@ -1,373 +1,376 @@ /* $OpenBSD: pfctl_parser.h,v 1.86 2006/10/31 23:46:25 mcbride Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _PFCTL_PARSER_H_ #define _PFCTL_PARSER_H_ #include #define PF_OSFP_FILE "/etc/pf.os" #define PF_OPT_DISABLE 0x0001 #define PF_OPT_ENABLE 0x0002 #define PF_OPT_VERBOSE 0x0004 #define PF_OPT_NOACTION 0x0008 #define PF_OPT_QUIET 0x0010 #define PF_OPT_CLRRULECTRS 0x0020 #define PF_OPT_USEDNS 0x0040 #define PF_OPT_VERBOSE2 0x0080 #define PF_OPT_DUMMYACTION 0x0100 #define PF_OPT_DEBUG 0x0200 #define PF_OPT_SHOWALL 0x0400 #define PF_OPT_OPTIMIZE 0x0800 #define PF_OPT_NUMERIC 0x1000 #define PF_OPT_MERGE 0x2000 #define PF_OPT_RECURSE 0x4000 #define PF_OPT_KILLMATCH 0x8000 #define PF_TH_ALL 0xFF #define PF_NAT_PROXY_PORT_LOW 50001 #define PF_NAT_PROXY_PORT_HIGH 65535 #define PF_OPTIMIZE_BASIC 0x0001 #define PF_OPTIMIZE_PROFILE 0x0002 #define FCNT_NAMES { \ "searches", \ "inserts", \ "removals", \ NULL \ } struct pfr_buffer; /* forward definition */ struct pfctl { int dev; int opts; int optimize; int loadopt; int asd; /* anchor stack depth */ int bn; /* brace number */ int tdirty; /* kernel dirty */ #define PFCTL_ANCHOR_STACK_DEPTH 64 struct pfctl_anchor *astack[PFCTL_ANCHOR_STACK_DEPTH]; struct pfioc_pooladdr paddr; struct pfioc_altq *paltq; struct pfioc_queue *pqueue; struct pfr_buffer *trans; struct pfctl_anchor *anchor, *alast; int eth_nr; struct pfctl_eth_anchor *eanchor, *ealast; struct pfctl_eth_anchor *eastack[PFCTL_ANCHOR_STACK_DEPTH]; u_int32_t eth_ticket; const char *ruleset; /* 'set foo' options */ u_int32_t timeout[PFTM_MAX]; u_int32_t limit[PF_LIMIT_MAX]; u_int32_t debug; u_int32_t hostid; + u_int32_t reassemble; char *ifname; bool keep_counters; u_int8_t syncookies; u_int8_t syncookieswat[2]; /* lowat, highwat, in % */ u_int8_t syncookieswat_set; u_int8_t timeout_set[PFTM_MAX]; u_int8_t limit_set[PF_LIMIT_MAX]; u_int8_t debug_set; u_int8_t hostid_set; u_int8_t ifname_set; + u_int8_t reass_set; }; struct node_if { char ifname[IFNAMSIZ]; u_int8_t not; u_int8_t dynamic; /* antispoof */ u_int ifa_flags; struct node_if *next; struct node_if *tail; }; struct node_host { struct pf_addr_wrap addr; struct pf_addr bcast; struct pf_addr peer; sa_family_t af; u_int8_t not; u_int32_t ifindex; /* link-local IPv6 addrs */ char *ifname; u_int ifa_flags; struct node_host *next; struct node_host *tail; }; struct node_mac { u_int8_t mac[ETHER_ADDR_LEN]; u_int8_t mask[ETHER_ADDR_LEN]; bool neg; bool isset; struct node_mac *next; struct node_mac *tail; }; struct node_os { char *os; pf_osfp_t fingerprint; struct node_os *next; struct node_os *tail; }; struct node_queue_bw { u_int64_t bw_absolute; u_int16_t bw_percent; }; struct node_hfsc_sc { struct node_queue_bw m1; /* slope of 1st segment; bps */ u_int d; /* x-projection of m1; msec */ struct node_queue_bw m2; /* slope of 2nd segment; bps */ u_int8_t used; }; struct node_hfsc_opts { struct node_hfsc_sc realtime; struct node_hfsc_sc linkshare; struct node_hfsc_sc upperlimit; int flags; }; struct node_fairq_sc { struct node_queue_bw m1; /* slope of 1st segment; bps */ u_int d; /* x-projection of m1; msec */ struct node_queue_bw m2; /* slope of 2nd segment; bps */ u_int8_t used; }; struct node_fairq_opts { struct node_fairq_sc linkshare; struct node_queue_bw hogs_bw; u_int nbuckets; int flags; }; struct node_queue_opt { int qtype; union { struct cbq_opts cbq_opts; struct codel_opts codel_opts; struct priq_opts priq_opts; struct node_hfsc_opts hfsc_opts; struct node_fairq_opts fairq_opts; } data; }; #define QPRI_BITSET_SIZE 256 __BITSET_DEFINE(qpri_bitset, QPRI_BITSET_SIZE); LIST_HEAD(gen_sc, segment); struct pfctl_altq { struct pf_altq pa; struct { STAILQ_ENTRY(pfctl_altq) link; u_int64_t bwsum; struct qpri_bitset qpris; int children; int root_classes; int default_classes; struct gen_sc lssc; struct gen_sc rtsc; } meta; }; struct pfctl_watermarks { uint32_t hi; uint32_t lo; }; #ifdef __FreeBSD__ /* * XXX * Absolutely this is not correct location to define this. * Should we use an another sperate header file? */ #define SIMPLEQ_HEAD STAILQ_HEAD #define SIMPLEQ_HEAD_INITIALIZER STAILQ_HEAD_INITIALIZER #define SIMPLEQ_ENTRY STAILQ_ENTRY #define SIMPLEQ_FIRST STAILQ_FIRST #define SIMPLEQ_END(head) NULL #define SIMPLEQ_EMPTY STAILQ_EMPTY #define SIMPLEQ_NEXT STAILQ_NEXT /*#define SIMPLEQ_FOREACH STAILQ_FOREACH*/ #define SIMPLEQ_FOREACH(var, head, field) \ for((var) = SIMPLEQ_FIRST(head); \ (var) != SIMPLEQ_END(head); \ (var) = SIMPLEQ_NEXT(var, field)) #define SIMPLEQ_INIT STAILQ_INIT #define SIMPLEQ_INSERT_HEAD STAILQ_INSERT_HEAD #define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL #define SIMPLEQ_INSERT_AFTER STAILQ_INSERT_AFTER #define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD #endif SIMPLEQ_HEAD(node_tinithead, node_tinit); struct node_tinit { /* table initializer */ SIMPLEQ_ENTRY(node_tinit) entries; struct node_host *host; char *file; }; /* optimizer created tables */ struct pf_opt_tbl { char pt_name[PF_TABLE_NAME_SIZE]; int pt_rulecount; int pt_generated; struct node_tinithead pt_nodes; struct pfr_buffer *pt_buf; }; #define PF_OPT_TABLE_PREFIX "__automatic_" /* optimizer pf_rule container */ struct pf_opt_rule { struct pfctl_rule por_rule; struct pf_opt_tbl *por_src_tbl; struct pf_opt_tbl *por_dst_tbl; u_int64_t por_profile_count; TAILQ_ENTRY(pf_opt_rule) por_entry; TAILQ_ENTRY(pf_opt_rule) por_skip_entry[PF_SKIP_COUNT]; }; TAILQ_HEAD(pf_opt_queue, pf_opt_rule); int pfctl_rules(int, char *, int, int, char *, struct pfr_buffer *); int pfctl_optimize_ruleset(struct pfctl *, struct pfctl_ruleset *); int pfctl_append_rule(struct pfctl *, struct pfctl_rule *, const char *); int pfctl_append_eth_rule(struct pfctl *, struct pfctl_eth_rule *, const char *); int pfctl_add_altq(struct pfctl *, struct pf_altq *); int pfctl_add_pool(struct pfctl *, struct pfctl_pool *, sa_family_t); void pfctl_move_pool(struct pfctl_pool *, struct pfctl_pool *); void pfctl_clear_pool(struct pfctl_pool *); int pfctl_set_timeout(struct pfctl *, const char *, int, int); +int pfctl_set_reassembly(struct pfctl *, int, int); int pfctl_set_optimization(struct pfctl *, const char *); int pfctl_set_limit(struct pfctl *, const char *, unsigned int); int pfctl_set_logif(struct pfctl *, char *); int pfctl_set_hostid(struct pfctl *, u_int32_t); int pfctl_set_debug(struct pfctl *, char *); int pfctl_set_interface_flags(struct pfctl *, char *, int, int); int pfctl_cfg_syncookies(struct pfctl *, uint8_t, struct pfctl_watermarks *); int parse_config(char *, struct pfctl *); int parse_flags(char *); int pfctl_load_anchors(int, struct pfctl *, struct pfr_buffer *); void print_pool(struct pfctl_pool *, u_int16_t, u_int16_t, sa_family_t, int); void print_src_node(struct pf_src_node *, int); void print_eth_rule(struct pfctl_eth_rule *, const char *, int); void print_rule(struct pfctl_rule *, const char *, int, int); void print_tabledef(const char *, int, int, struct node_tinithead *); void print_status(struct pfctl_status *, struct pfctl_syncookies *, int); void print_running(struct pfctl_status *); int eval_pfaltq(struct pfctl *, struct pf_altq *, struct node_queue_bw *, struct node_queue_opt *); int eval_pfqueue(struct pfctl *, struct pf_altq *, struct node_queue_bw *, struct node_queue_opt *); void print_altq(const struct pf_altq *, unsigned, struct node_queue_bw *, struct node_queue_opt *); void print_queue(const struct pf_altq *, unsigned, struct node_queue_bw *, int, struct node_queue_opt *); int pfctl_define_table(char *, int, int, const char *, struct pfr_buffer *, u_int32_t); void pfctl_clear_fingerprints(int, int); int pfctl_file_fingerprints(int, int, const char *); pf_osfp_t pfctl_get_fingerprint(const char *); int pfctl_load_fingerprints(int, int); char *pfctl_lookup_fingerprint(pf_osfp_t, char *, size_t); void pfctl_show_fingerprints(int); struct icmptypeent { const char *name; u_int8_t type; }; struct icmpcodeent { const char *name; u_int8_t type; u_int8_t code; }; const struct icmptypeent *geticmptypebynumber(u_int8_t, u_int8_t); const struct icmptypeent *geticmptypebyname(char *, u_int8_t); const struct icmpcodeent *geticmpcodebynumber(u_int8_t, u_int8_t, u_int8_t); const struct icmpcodeent *geticmpcodebyname(u_long, char *, u_int8_t); struct pf_timeout { const char *name; int timeout; }; #define PFCTL_FLAG_FILTER 0x02 #define PFCTL_FLAG_NAT 0x04 #define PFCTL_FLAG_OPTION 0x08 #define PFCTL_FLAG_ALTQ 0x10 #define PFCTL_FLAG_TABLE 0x20 #define PFCTL_FLAG_ETH 0x40 extern const struct pf_timeout pf_timeouts[]; void set_ipmask(struct node_host *, u_int8_t); int check_netmask(struct node_host *, sa_family_t); int unmask(struct pf_addr *, sa_family_t); void ifa_load(void); int get_query_socket(void); struct node_host *ifa_exists(char *); struct node_host *ifa_grouplookup(char *ifa_name, int flags); struct node_host *ifa_lookup(char *, int); struct node_host *host(const char *); int append_addr(struct pfr_buffer *, char *, int); int append_addr_host(struct pfr_buffer *, struct node_host *, int, int); #endif /* _PFCTL_PARSER_H_ */ diff --git a/share/man/man5/pf.conf.5 b/share/man/man5/pf.conf.5 index ca7c64d1cf83..8e0bb42f257a 100644 --- a/share/man/man5/pf.conf.5 +++ b/share/man/man5/pf.conf.5 @@ -1,3323 +1,3332 @@ .\" $FreeBSD$ .\" $OpenBSD: pf.conf.5,v 1.406 2009/01/31 19:37:12 sobrado Exp $ .\" .\" Copyright (c) 2002, Daniel Hartmeier .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" .\" - Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" - Redistributions in binary form must reproduce the above .\" copyright notice, this list of conditions and the following .\" disclaimer in the documentation and/or other materials provided .\" with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS .\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT .\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS .\" FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE .\" COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, .\" BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; .\" LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER .\" CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN .\" ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .Dd November 24, 2022 .Dt PF.CONF 5 .Os .Sh NAME .Nm pf.conf .Nd packet filter configuration file .Sh DESCRIPTION The .Xr pf 4 packet filter modifies, drops or passes packets according to rules or definitions specified in .Nm pf.conf . .Sh STATEMENT ORDER There are eight types of statements in .Nm pf.conf : .Bl -tag -width xxxx .It Cm Macros User-defined variables may be defined and used later, simplifying the configuration file. Macros must be defined before they are referenced in .Nm pf.conf . .It Cm Tables Tables provide a mechanism for increasing the performance and flexibility of rules with large numbers of source or destination addresses. .It Cm Options Options tune the behaviour of the packet filtering engine. .It Cm Ethernet Filtering Ethernet filtering provides rule-based blocking or passing of Ethernet packets. .It Cm Traffic Normalization Li (e.g. Em scrub ) Traffic normalization protects internal machines against inconsistencies in Internet protocols and implementations. .It Cm Queueing Queueing provides rule-based bandwidth control. .It Cm Translation Li (Various forms of NAT) Translation rules specify how addresses are to be mapped or redirected to other addresses. .It Cm Packet Filtering Packet filtering provides rule-based blocking or passing of packets. .El .Pp With the exception of .Cm macros and .Cm tables , the types of statements should be grouped and appear in .Nm pf.conf in the order shown above, as this matches the operation of the underlying packet filtering engine. By default .Xr pfctl 8 enforces this order (see .Ar set require-order below). .Pp Comments can be put anywhere in the file using a hash mark .Pq Sq # , and extend to the end of the current line. .Pp Additional configuration files can be included with the .Ic include keyword, for example: .Bd -literal -offset indent include "/etc/pf/sub.filter.conf" .Ed .Sh MACROS Macros can be defined that will later be expanded in context. Macro names must start with a letter, and may contain letters, digits and underscores. Macro names may not be reserved words (for example .Ar pass , .Ar in , .Ar out ) . Macros are not expanded inside quotes. .Pp For example, .Bd -literal -offset indent ext_if = \&"kue0\&" all_ifs = \&"{\&" $ext_if lo0 \&"}\&" pass out on $ext_if from any to any pass in on $ext_if proto tcp from any to any port 25 .Ed .Sh TABLES Tables are named structures which can hold a collection of addresses and networks. Lookups against tables in .Xr pf 4 are relatively fast, making a single rule with tables much more efficient, in terms of processor usage and memory consumption, than a large number of rules which differ only in IP address (either created explicitly or automatically by rule expansion). .Pp Tables can be used as the source or destination of filter rules, .Ar scrub rules or translation rules such as .Ar nat or .Ar rdr (see below for details on the various rule types). Tables can also be used for the redirect address of .Ar nat and .Ar rdr rules and in the routing options of filter rules, but only for .Ar round-robin pools. .Pp Tables can be defined with any of the following .Xr pfctl 8 mechanisms. As with macros, reserved words may not be used as table names. .Bl -tag -width "manually" .It Ar manually Persistent tables can be manually created with the .Ar add or .Ar replace option of .Xr pfctl 8 , before or after the ruleset has been loaded. .It Pa pf.conf Table definitions can be placed directly in this file, and loaded at the same time as other rules are loaded, atomically. Table definitions inside .Nm pf.conf use the .Ar table statement, and are especially useful to define non-persistent tables. The contents of a pre-existing table defined without a list of addresses to initialize it is not altered when .Nm pf.conf is loaded. A table initialized with the empty list, .Li { } , will be cleared on load. .El .Pp Tables may be defined with the following attributes: .Bl -tag -width persist .It Ar persist The .Ar persist flag forces the kernel to keep the table even when no rules refer to it. If the flag is not set, the kernel will automatically remove the table when the last rule referring to it is flushed. .It Ar const The .Ar const flag prevents the user from altering the contents of the table once it has been created. Without that flag, .Xr pfctl 8 can be used to add or remove addresses from the table at any time, even when running with .Xr securelevel 7 = 2. .It Ar counters The .Ar counters flag enables per-address packet and byte counters which can be displayed with .Xr pfctl 8 . Note that this feature carries significant memory overhead for large tables. .El .Pp For example, .Bd -literal -offset indent table \*(Ltprivate\*(Gt const { 10/8, 172.16/12, 192.168/16 } table \*(Ltbadhosts\*(Gt persist block on fxp0 from { \*(Ltprivate\*(Gt, \*(Ltbadhosts\*(Gt } to any .Ed .Pp creates a table called private, to hold RFC 1918 private network blocks, and a table called badhosts, which is initially empty. A filter rule is set up to block all traffic coming from addresses listed in either table. The private table cannot have its contents changed and the badhosts table will exist even when no active filter rules reference it. Addresses may later be added to the badhosts table, so that traffic from these hosts can be blocked by using .Bd -literal -offset indent # pfctl -t badhosts -Tadd 204.92.77.111 .Ed .Pp A table can also be initialized with an address list specified in one or more external files, using the following syntax: .Bd -literal -offset indent table \*(Ltspam\*(Gt persist file \&"/etc/spammers\&" file \&"/etc/openrelays\&" block on fxp0 from \*(Ltspam\*(Gt to any .Ed .Pp The files .Pa /etc/spammers and .Pa /etc/openrelays list IP addresses, one per line. Any lines beginning with a # are treated as comments and ignored. In addition to being specified by IP address, hosts may also be specified by their hostname. When the resolver is called to add a hostname to a table, .Em all resulting IPv4 and IPv6 addresses are placed into the table. IP addresses can also be entered in a table by specifying a valid interface name, a valid interface group or the .Em self keyword, in which case all addresses assigned to the interface(s) will be added to the table. .Sh OPTIONS .Xr pf 4 may be tuned for various situations using the .Ar set command. .Bl -tag -width xxxx .It Ar set timeout .Pp .Bl -tag -width "src.track" -compact .It Ar interval Interval between purging expired states and fragments. .It Ar frag Seconds before an unassembled fragment is expired. .It Ar src.track Length of time to retain a source tracking entry after the last state expires. .El .Pp When a packet matches a stateful connection, the seconds to live for the connection will be updated to that of the .Ar proto.modifier which corresponds to the connection state. Each packet which matches this state will reset the TTL. Tuning these values may improve the performance of the firewall at the risk of dropping valid idle connections. .Pp .Bl -tag -width xxxx -compact .It Ar tcp.first The state after the first packet. .It Ar tcp.opening The state before the destination host ever sends a packet. .It Ar tcp.established The fully established state. .It Ar tcp.closing The state after the first FIN has been sent. .It Ar tcp.finwait The state after both FINs have been exchanged and the connection is closed. Some hosts (notably web servers on Solaris) send TCP packets even after closing the connection. Increasing .Ar tcp.finwait (and possibly .Ar tcp.closing ) can prevent blocking of such packets. .It Ar tcp.closed The state after one endpoint sends an RST. .El .Pp ICMP and UDP are handled in a fashion similar to TCP, but with a much more limited set of states: .Pp .Bl -tag -width xxxx -compact .It Ar udp.first The state after the first packet. .It Ar udp.single The state if the source host sends more than one packet but the destination host has never sent one back. .It Ar udp.multiple The state if both hosts have sent packets. .It Ar icmp.first The state after the first packet. .It Ar icmp.error The state after an ICMP error came back in response to an ICMP packet. .El .Pp Other protocols are handled similarly to UDP: .Pp .Bl -tag -width xxxx -compact .It Ar other.first .It Ar other.single .It Ar other.multiple .El .Pp Timeout values can be reduced adaptively as the number of state table entries grows. .Pp .Bl -tag -width xxxx -compact .It Ar adaptive.start When the number of state entries exceeds this value, adaptive scaling begins. All timeout values are scaled linearly with factor (adaptive.end - number of states) / (adaptive.end - adaptive.start). .It Ar adaptive.end When reaching this number of state entries, all timeout values become zero, effectively purging all state entries immediately. This value is used to define the scale factor, it should not actually be reached (set a lower state limit, see below). .El .Pp Adaptive timeouts are enabled by default, with an adaptive.start value equal to 60% of the state limit, and an adaptive.end value equal to 120% of the state limit. They can be disabled by setting both adaptive.start and adaptive.end to 0. .Pp The adaptive timeout values can be defined both globally and for each rule. When used on a per-rule basis, the values relate to the number of states created by the rule, otherwise to the total number of states. .Pp For example: .Bd -literal -offset indent set timeout tcp.first 120 set timeout tcp.established 86400 set timeout { adaptive.start 6000, adaptive.end 12000 } set limit states 10000 .Ed .Pp With 9000 state table entries, the timeout values are scaled to 50% (tcp.first 60, tcp.established 43200). .It Ar set loginterface Enable collection of packet and byte count statistics for the given interface or interface group. These statistics can be viewed using .Bd -literal -offset indent # pfctl -s info .Ed .Pp In this example .Xr pf 4 collects statistics on the interface named dc0: .Bd -literal -offset indent set loginterface dc0 .Ed .Pp One can disable the loginterface using: .Bd -literal -offset indent set loginterface none .Ed .It Ar set limit Sets hard limits on the memory pools used by the packet filter. See .Xr zone 9 for an explanation of memory pools. .Pp For example, .Bd -literal -offset indent set limit states 20000 .Ed .Pp sets the maximum number of entries in the memory pool used by state table entries (generated by .Ar pass rules which do not specify .Ar no state ) to 20000. Using .Bd -literal -offset indent set limit frags 20000 .Ed .Pp sets the maximum number of entries in the memory pool used for fragment reassembly (generated by .Ar scrub rules) to 20000. Using .Bd -literal -offset indent set limit src-nodes 2000 .Ed .Pp sets the maximum number of entries in the memory pool used for tracking source IP addresses (generated by the .Ar sticky-address and .Ar src.track options) to 2000. Using .Bd -literal -offset indent set limit tables 1000 set limit table-entries 100000 .Ed .Pp sets limits on the memory pools used by tables. The first limits the number of tables that can exist to 1000. The second limits the overall number of addresses that can be stored in tables to 100000. .Pp Various limits can be combined on a single line: .Bd -literal -offset indent set limit { states 20000, frags 20000, src-nodes 2000 } .Ed .It Ar set ruleset-optimization .Bl -tag -width xxxxxxxx -compact .It Ar none Disable the ruleset optimizer. .It Ar basic Enable basic ruleset optimization. This is the default behaviour. Basic ruleset optimization does four things to improve the performance of ruleset evaluations: .Pp .Bl -enum -compact .It remove duplicate rules .It remove rules that are a subset of another rule .It combine multiple rules into a table when advantageous .It re-order the rules to improve evaluation performance .El .Pp .It Ar profile Uses the currently loaded ruleset as a feedback profile to tailor the ordering of quick rules to actual network traffic. .El .Pp It is important to note that the ruleset optimizer will modify the ruleset to improve performance. A side effect of the ruleset modification is that per-rule accounting statistics will have different meanings than before. If per-rule accounting is important for billing purposes or whatnot, either the ruleset optimizer should not be used or a label field should be added to all of the accounting rules to act as optimization barriers. .Pp Optimization can also be set as a command-line argument to .Xr pfctl 8 , overriding the settings in .Nm . .It Ar set optimization Optimize state timeouts for one of the following network environments: .Pp .Bl -tag -width xxxx -compact .It Ar normal A normal network environment. Suitable for almost all networks. .It Ar high-latency A high-latency environment (such as a satellite connection). .It Ar satellite Alias for .Ar high-latency . .It Ar aggressive Aggressively expire connections. This can greatly reduce the memory usage of the firewall at the cost of dropping idle connections early. .It Ar conservative Extremely conservative settings. Avoid dropping legitimate connections at the expense of greater memory utilization (possibly much greater on a busy network) and slightly increased processor utilization. .El .Pp For example: .Bd -literal -offset indent set optimization aggressive .Ed .It Ar set block-policy The .Ar block-policy option sets the default behaviour for the packet .Ar block action: .Pp .Bl -tag -width xxxxxxxx -compact .It Ar drop Packet is silently dropped. .It Ar return A TCP RST is returned for blocked TCP packets, an ICMP UNREACHABLE is returned for blocked UDP packets, and all other packets are silently dropped. .El .Pp For example: .Bd -literal -offset indent set block-policy return .Ed .It Ar set fail-policy The .Ar fail-policy option sets the behaviour of rules which should pass a packet but were unable to do so. This might happen when a nat or route-to rule uses an empty table as list of targets or if a rule fails to create state or source node. The following .Ar block actions are possible: .Pp .Bl -tag -width xxxxxxxx -compact .It Ar drop Incoming packet is silently dropped. .It Ar return Incoming packet is dropped and TCP RST is returned for TCP packets, an ICMP UNREACHABLE is returned for UDP packets, and no response is sent for other packets. .El .Pp For example: .Bd -literal -offset indent set fail-policy return .Ed .It Ar set state-policy The .Ar state-policy option sets the default behaviour for states: .Pp .Bl -tag -width group-bound -compact .It Ar if-bound States are bound to interface. .It Ar floating States can match packets on any interfaces (the default). .El .Pp For example: .Bd -literal -offset indent set state-policy if-bound .Ed .It Ar set syncookies never | always | adaptive When .Cm syncookies are active, pf will answer each incoming TCP SYN with a syncookie SYNACK, without allocating any resources. Upon reception of the client's ACK in response to the syncookie SYNACK, pf will evaluate the ruleset and create state if the ruleset permits it, complete the three way handshake with the target host and continue the connection with synproxy in place. This allows pf to be resilient against large synflood attacks which would run the state table against its limits otherwise. Due to the blind answers to every incoming SYN syncookies share the caveats of synproxy, namely seemingly accepting connections that will be dropped later on. .Pp .Bl -tag -width adaptive -compact .It Cm never pf will never send syncookie SYNACKs (the default). .It Cm always pf will always send syncookie SYNACKs. .It Cm adaptive pf will enable syncookie mode when a given percentage of the state table is used up by half-open TCP connections, as in, those that saw the initial SYN but didn't finish the three way handshake. The thresholds for entering and leaving syncookie mode can be specified using .Bd -literal -offset indent set syncookies adaptive (start 25%, end 12%) .Ed .El .It Ar set state-defaults The .Ar state-defaults option sets the state options for states created from rules without an explicit .Ar keep state . For example: .Bd -literal -offset indent set state-defaults no-sync .Ed .It Ar set hostid The 32-bit .Ar hostid identifies this firewall's state table entries to other firewalls in a .Xr pfsync 4 failover cluster. By default the hostid is set to a pseudo-random value, however it may be desirable to manually configure it, for example to more easily identify the source of state table entries. .Bd -literal -offset indent set hostid 1 .Ed .Pp The hostid may be specified in either decimal or hexadecimal. .It Ar set require-order By default .Xr pfctl 8 enforces an ordering of the statement types in the ruleset to: .Em options , .Em normalization , .Em queueing , .Em translation , .Em filtering . Setting this option to .Ar no disables this enforcement. There may be non-trivial and non-obvious implications to an out of order ruleset. Consider carefully before disabling the order enforcement. .It Ar set fingerprints Load fingerprints of known operating systems from the given filename. By default fingerprints of known operating systems are automatically loaded from .Xr pf.os 5 in .Pa /etc but can be overridden via this option. Setting this option may leave a small period of time where the fingerprints referenced by the currently active ruleset are inconsistent until the new ruleset finishes loading. .Pp For example: .Pp .Dl set fingerprints \&"/etc/pf.os.devel\&" .It Ar set skip on Aq Ar ifspec List interfaces for which packets should not be filtered. Packets passing in or out on such interfaces are passed as if pf was disabled, i.e. pf does not process them in any way. This can be useful on loopback and other virtual interfaces, when packet filtering is not desired and can have unexpected effects. For example: .Pp .Dl set skip on lo0 .It Ar set debug Set the debug .Ar level to one of the following: .Pp .Bl -tag -width xxxxxxxxxxxx -compact .It Ar none Don't generate debug messages. .It Ar urgent Generate debug messages only for serious errors. .It Ar misc Generate debug messages for various errors. .It Ar loud Generate debug messages for common conditions. .El .It Ar set keepcounters Preserve rule counters across rule updates. Usually rule counters are reset to zero on every update of the ruleset. With .Ar keepcounters set pf will attempt to find matching rules between old and new rulesets and preserve the rule counters. .El .Sh ETHERNET FILTERING .Xr pf 4 has the ability to .Ar block and .Ar pass packets based on attributes of their Ethernet (layer 2) header. .Pp For each packet processed by the packet filter, the filter rules are evaluated in sequential order, from first to last. The last matching rule decides what action is taken. If no rule matches the packet, the default action is to pass the packet. .Pp The following actions can be used in the filter: .Bl -tag -width xxxx .It Ar block The packet is blocked. Unlike for layer 3 traffic the packet is always silently dropped. .It Ar pass The packet is passed; no state is created for layer 2 traffic. .El .Sh PARAMETERS The rule parameters specify the packets to which a rule applies. A packet always comes in on, or goes out through, one interface. Most parameters are optional. If a parameter is specified, the rule only applies to packets with matching attributes. Certain parameters can be expressed as lists, in which case .Xr pfctl 8 generates all needed rule combinations. .Bl -tag -width xxxx .It Ar in No or Ar out This rule applies to incoming or outgoing packets. If neither .Ar in nor .Ar out are specified, the rule will match packets in both directions. .It Ar quick If a packet matches a rule which has the .Ar quick option set, this rule is considered the last matching rule, and evaluation of subsequent rules is skipped. .It Ar on Aq Ar interface This rule applies only to packets coming in on, or going out through, this particular interface or interface group. For more information on interface groups, see the .Ic group keyword in .Xr ifconfig 8 . .It Ar bridge-to Aq interface Packets matching this rule will be sent out of the specified interface without further processing. .It Ar proto Aq Ar protocol This rule applies only to packets of this protocol. Note that Ethernet protocol numbers are different from those used in .Xr ip 4 and .Xr ip6 4 . .It Xo .Ar from Aq Ar source .Ar to Aq Ar dest .Xc This rule applies only to packets with the specified source and destination MAC addresses. .It Xo Ar queue Aq Ar queue .Xc Packets matching this rule will be assigned to the specified queue. See .Sx QUEUEING for setup details. .Pp .It Ar tag Aq Ar string Packets matching this rule will be tagged with the specified string. The tag acts as an internal marker that can be used to identify these packets later on. This can be used, for example, to provide trust between interfaces and to determine if packets have been processed by translation rules. Tags are .Qq sticky , meaning that the packet will be tagged even if the rule is not the last matching rule. Further matching rules can replace the tag with a new one but will not remove a previously applied tag. A packet is only ever assigned one tag at a time. .It Ar tagged Aq Ar string Used to specify that packets must already be tagged with the given tag in order to match the rule. Inverse tag matching can also be done by specifying the ! operator before the tagged keyword. .Sh TRAFFIC NORMALIZATION Traffic normalization is used to sanitize packet content in such a way that there are no ambiguities in packet interpretation on the receiving side. The normalizer does IP fragment reassembly to prevent attacks that confuse intrusion detection systems by sending overlapping IP fragments. Packet normalization is invoked with the .Ar scrub directive. .Pp .Ar scrub has the following options: .Bl -tag -width xxxx .It Ar no-df Clears the .Ar dont-fragment bit from a matching IP packet. Some operating systems are known to generate fragmented packets with the .Ar dont-fragment bit set. This is particularly true with NFS. .Ar Scrub will drop such fragmented .Ar dont-fragment packets unless .Ar no-df is specified. .Pp Unfortunately some operating systems also generate their .Ar dont-fragment packets with a zero IP identification field. Clearing the .Ar dont-fragment bit on packets with a zero IP ID may cause deleterious results if an upstream router later fragments the packet. Using the .Ar random-id modifier (see below) is recommended in combination with the .Ar no-df modifier to ensure unique IP identifiers. .It Ar min-ttl Aq Ar number Enforces a minimum TTL for matching IP packets. .It Ar max-mss Aq Ar number Enforces a maximum MSS for matching TCP packets. .It Xo Ar set-tos Aq Ar string .No \*(Ba Aq Ar number .Xc Enforces a .Em TOS for matching IP packets. .Em TOS may be given as one of .Ar critical , .Ar inetcontrol , .Ar lowdelay , .Ar netcontrol , .Ar throughput , .Ar reliability , or one of the DiffServ Code Points: .Ar ef , .Ar va , .Ar af11 No ... Ar af43 , .Ar cs0 No ... Ar cs7 ; or as either hex or decimal. .It Ar random-id Replaces the IP identification field with random values to compensate for predictable values generated by many hosts. This option only applies to packets that are not fragmented after the optional fragment reassembly. .It Ar fragment reassemble Using .Ar scrub rules, fragments can be reassembled by normalization. In this case, fragments are buffered until they form a complete packet, and only the completed packet is passed on to the filter. The advantage is that filter rules have to deal only with complete packets, and can ignore fragments. The drawback of caching fragments is the additional memory cost. This is the default behaviour unless no fragment reassemble is specified. .It Ar no fragment reassemble Do not reassemble fragments. .It Ar reassemble tcp Statefully normalizes TCP connections. .Ar scrub reassemble tcp rules may not have the direction (in/out) specified. .Ar reassemble tcp performs the following normalizations: .Pp .Bl -tag -width timeout -compact .It ttl Neither side of the connection is allowed to reduce their IP TTL. An attacker may send a packet such that it reaches the firewall, affects the firewall state, and expires before reaching the destination host. .Ar reassemble tcp will raise the TTL of all packets back up to the highest value seen on the connection. .It timestamp modulation Modern TCP stacks will send a timestamp on every TCP packet and echo the other endpoint's timestamp back to them. Many operating systems will merely start the timestamp at zero when first booted, and increment it several times a second. The uptime of the host can be deduced by reading the timestamp and multiplying by a constant. Also observing several different timestamps can be used to count hosts behind a NAT device. And spoofing TCP packets into a connection requires knowing or guessing valid timestamps. Timestamps merely need to be monotonically increasing and not derived off a guessable base time. .Ar reassemble tcp will cause .Ar scrub to modulate the TCP timestamps with a random number. .It extended PAWS checks There is a problem with TCP on long fat pipes, in that a packet might get delayed for longer than it takes the connection to wrap its 32-bit sequence space. In such an occurrence, the old packet would be indistinguishable from a new packet and would be accepted as such. The solution to this is called PAWS: Protection Against Wrapped Sequence numbers. It protects against it by making sure the timestamp on each packet does not go backwards. .Ar reassemble tcp also makes sure the timestamp on the packet does not go forward more than the RFC allows. By doing this, .Xr pf 4 artificially extends the security of TCP sequence numbers by 10 to 18 bits when the host uses appropriately randomized timestamps, since a blind attacker would have to guess the timestamp as well. .El .El .Pp For example, .Bd -literal -offset indent scrub in on $ext_if all fragment reassemble .Ed .Pp The .Ar no option prefixed to a scrub rule causes matching packets to remain unscrubbed, much in the same way as .Ar drop quick works in the packet filter (see below). This mechanism should be used when it is necessary to exclude specific packets from broader scrub rules. .Sh QUEUEING with ALTQ The ALTQ system is currently not available in the GENERIC kernel nor as loadable modules. In order to use the herein after called queueing options one has to use a custom built kernel. Please refer to .Xr altq 4 to learn about the related kernel options. .Pp Packets can be assigned to queues for the purpose of bandwidth control. At least two declarations are required to configure queues, and later any packet filtering rule can reference the defined queues by name. During the filtering component of .Nm pf.conf , the last referenced .Ar queue name is where any packets from .Ar pass rules will be queued, while for .Ar block rules it specifies where any resulting ICMP or TCP RST packets should be queued. The .Ar scheduler defines the algorithm used to decide which packets get delayed, dropped, or sent out immediately. There are three .Ar schedulers currently supported. .Bl -tag -width xxxx .It Ar cbq Class Based Queueing. .Ar Queues attached to an interface build a tree, thus each .Ar queue can have further child .Ar queues . Each queue can have a .Ar priority and a .Ar bandwidth assigned. .Ar Priority mainly controls the time packets take to get sent out, while .Ar bandwidth has primarily effects on throughput. .Ar cbq achieves both partitioning and sharing of link bandwidth by hierarchically structured classes. Each class has its own .Ar queue and is assigned its share of .Ar bandwidth . A child class can borrow bandwidth from its parent class as long as excess bandwidth is available (see the option .Ar borrow , below). .It Ar priq Priority Queueing. .Ar Queues are flat attached to the interface, thus, .Ar queues cannot have further child .Ar queues . Each .Ar queue has a unique .Ar priority assigned, ranging from 0 to 15. Packets in the .Ar queue with the highest .Ar priority are processed first. .It Ar hfsc Hierarchical Fair Service Curve. .Ar Queues attached to an interface build a tree, thus each .Ar queue can have further child .Ar queues . Each queue can have a .Ar priority and a .Ar bandwidth assigned. .Ar Priority mainly controls the time packets take to get sent out, while .Ar bandwidth primarily affects throughput. .Ar hfsc supports both link-sharing and guaranteed real-time services. It employs a service curve based QoS model, and its unique feature is an ability to decouple .Ar delay and .Ar bandwidth allocation. .El .Pp The interfaces on which queueing should be activated are declared using the .Ar altq on declaration. .Ar altq on has the following keywords: .Bl -tag -width xxxx .It Aq Ar interface Queueing is enabled on the named interface. .It Aq Ar scheduler Specifies which queueing scheduler to use. Currently supported values are .Ar cbq for Class Based Queueing, .Ar priq for Priority Queueing and .Ar hfsc for the Hierarchical Fair Service Curve scheduler. .It Ar bandwidth Aq Ar bw The maximum bitrate for all queues on an interface may be specified using the .Ar bandwidth keyword. The value can be specified as an absolute value or as a percentage of the interface bandwidth. When using an absolute value, the suffixes .Ar b , .Ar Kb , .Ar Mb , and .Ar Gb are used to represent bits, kilobits, megabits, and gigabits per second, respectively. The value must not exceed the interface bandwidth. If .Ar bandwidth is not specified, the interface bandwidth is used (but take note that some interfaces do not know their bandwidth, or can adapt their bandwidth rates). .It Ar qlimit Aq Ar limit The maximum number of packets held in the queue. The default is 50. .It Ar tbrsize Aq Ar size Adjusts the size, in bytes, of the token bucket regulator. If not specified, heuristics based on the interface bandwidth are used to determine the size. .It Ar queue Aq Ar list Defines a list of subqueues to create on an interface. .El .Pp In the following example, the interface dc0 should queue up to 5Mbps in four second-level queues using Class Based Queueing. Those four queues will be shown in a later example. .Bd -literal -offset indent altq on dc0 cbq bandwidth 5Mb queue { std, http, mail, ssh } .Ed .Pp Once interfaces are activated for queueing using the .Ar altq directive, a sequence of .Ar queue directives may be defined. The name associated with a .Ar queue must match a queue defined in the .Ar altq directive (e.g. mail), or, except for the .Ar priq .Ar scheduler , in a parent .Ar queue declaration. The following keywords can be used: .Bl -tag -width xxxx .It Ar on Aq Ar interface Specifies the interface the queue operates on. If not given, it operates on all matching interfaces. .It Ar bandwidth Aq Ar bw Specifies the maximum bitrate to be processed by the queue. This value must not exceed the value of the parent .Ar queue and can be specified as an absolute value or a percentage of the parent queue's bandwidth. If not specified, defaults to 100% of the parent queue's bandwidth. The .Ar priq scheduler does not support bandwidth specification. .It Ar priority Aq Ar level Between queues a priority level can be set. For .Ar cbq and .Ar hfsc , the range is 0 to 7 and for .Ar priq , the range is 0 to 15. The default for all is 1. .Ar Priq queues with a higher priority are always served first. .Ar Cbq and .Ar Hfsc queues with a higher priority are preferred in the case of overload. .It Ar qlimit Aq Ar limit The maximum number of packets held in the queue. The default is 50. .El .Pp The .Ar scheduler can get additional parameters with .Xo Aq Ar scheduler .Pf ( Aq Ar parameters ) . .Xc Parameters are as follows: .Bl -tag -width Fl .It Ar default Packets not matched by another queue are assigned to this one. Exactly one default queue is required. .It Ar red Enable RED (Random Early Detection) on this queue. RED drops packets with a probability proportional to the average queue length. .It Ar rio Enables RIO on this queue. RIO is RED with IN/OUT, thus running RED two times more than RIO would achieve the same effect. RIO is currently not supported in the GENERIC kernel. .It Ar ecn Enables ECN (Explicit Congestion Notification) on this queue. ECN implies RED. .El .Pp The .Ar cbq .Ar scheduler supports an additional option: .Bl -tag -width Fl .It Ar borrow The queue can borrow bandwidth from the parent. .El .Pp The .Ar hfsc .Ar scheduler supports some additional options: .Bl -tag -width Fl .It Ar realtime Aq Ar sc The minimum required bandwidth for the queue. .It Ar upperlimit Aq Ar sc The maximum allowed bandwidth for the queue. .It Ar linkshare Aq Ar sc The bandwidth share of a backlogged queue. .El .Pp .Aq Ar sc is an acronym for .Ar service curve . .Pp The format for service curve specifications is .Ar ( m1 , d , m2 ) . .Ar m2 controls the bandwidth assigned to the queue. .Ar m1 and .Ar d are optional and can be used to control the initial bandwidth assignment. For the first .Ar d milliseconds the queue gets the bandwidth given as .Ar m1 , afterwards the value given in .Ar m2 . .Pp Furthermore, with .Ar cbq and .Ar hfsc , child queues can be specified as in an .Ar altq declaration, thus building a tree of queues using a part of their parent's bandwidth. .Pp Packets can be assigned to queues based on filter rules by using the .Ar queue keyword. Normally only one .Ar queue is specified; when a second one is specified it will instead be used for packets which have a .Em TOS of .Em lowdelay and for TCP ACKs with no data payload. .Pp To continue the previous example, the examples below would specify the four referenced queues, plus a few child queues. Interactive .Xr ssh 1 sessions get priority over bulk transfers like .Xr scp 1 and .Xr sftp 1 . The queues may then be referenced by filtering rules (see .Sx PACKET FILTERING below). .Bd -literal queue std bandwidth 10% cbq(default) queue http bandwidth 60% priority 2 cbq(borrow red) \e { employees, developers } queue developers bandwidth 75% cbq(borrow) queue employees bandwidth 15% queue mail bandwidth 10% priority 0 cbq(borrow ecn) queue ssh bandwidth 20% cbq(borrow) { ssh_interactive, ssh_bulk } queue ssh_interactive bandwidth 50% priority 7 cbq(borrow) queue ssh_bulk bandwidth 50% priority 0 cbq(borrow) block return out on dc0 inet all queue std pass out on dc0 inet proto tcp from $developerhosts to any port 80 \e queue developers pass out on dc0 inet proto tcp from $employeehosts to any port 80 \e queue employees pass out on dc0 inet proto tcp from any to any port 22 \e queue(ssh_bulk, ssh_interactive) pass out on dc0 inet proto tcp from any to any port 25 \e queue mail .Ed .Sh QUEUEING with dummynet Queueing can also be done with .Xr dummynet 4 . Queues and pipes can be created with .Xr dnctl 8 . .Pp Packets can be assigned to queues and pipes using .Ar dnqueue and .Ar dnpipe respectively. .Pp Both .Ar dnqueue and .Ar dnpipe take either a single pipe or queue number or two numbers as arguments. The first pipe or queue number will be used to shape the traffic in the rule direction, the second will be used to shape the traffic in the reverse direction. If the rule does not specify a direction the first packet to create state will be shaped according to the first number, and the response traffic according to the second. .Pp If the .Xr dummynet 4 module is not loaded any traffic sent into a queue or pipe will be dropped. .Sh TRANSLATION Translation rules modify either the source or destination address of the packets associated with a stateful connection. A stateful connection is automatically created to track packets matching such a rule as long as they are not blocked by the filtering section of .Nm pf.conf . The translation engine modifies the specified address and/or port in the packet, recalculates IP, TCP and UDP checksums as necessary, and passes it to the packet filter for evaluation. .Pp Since translation occurs before filtering the filter engine will see packets as they look after any addresses and ports have been translated. Filter rules will therefore have to filter based on the translated address and port number. Packets that match a translation rule are only automatically passed if the .Ar pass modifier is given, otherwise they are still subject to .Ar block and .Ar pass rules. .Pp The state entry created permits .Xr pf 4 to keep track of the original address for traffic associated with that state and correctly direct return traffic for that connection. .Pp Various types of translation are possible with pf: .Bl -tag -width xxxx .It Ar binat A .Ar binat rule specifies a bidirectional mapping between an external IP netblock and an internal IP netblock. .It Ar nat A .Ar nat rule specifies that IP addresses are to be changed as the packet traverses the given interface. This technique allows one or more IP addresses on the translating host to support network traffic for a larger range of machines on an "inside" network. Although in theory any IP address can be used on the inside, it is strongly recommended that one of the address ranges defined by RFC 1918 be used. These netblocks are: .Bd -literal 10.0.0.0 - 10.255.255.255 (all of net 10, i.e., 10/8) 172.16.0.0 - 172.31.255.255 (i.e., 172.16/12) 192.168.0.0 - 192.168.255.255 (i.e., 192.168/16) .Ed .It Pa rdr The packet is redirected to another destination and possibly a different port. .Ar rdr rules can optionally specify port ranges instead of single ports. rdr ... port 2000:2999 -\*(Gt ... port 4000 redirects ports 2000 to 2999 (inclusive) to port 4000. rdr ... port 2000:2999 -\*(Gt ... port 4000:* redirects port 2000 to 4000, 2001 to 4001, ..., 2999 to 4999. .El .Pp In addition to modifying the address, some translation rules may modify source or destination ports for .Xr tcp 4 or .Xr udp 4 connections; implicitly in the case of .Ar nat rules and explicitly in the case of .Ar rdr rules. Port numbers are never translated with a .Ar binat rule. .Pp Evaluation order of the translation rules is dependent on the type of the translation rules and of the direction of a packet. .Ar binat rules are always evaluated first. Then either the .Ar rdr rules are evaluated on an inbound packet or the .Ar nat rules on an outbound packet. Rules of the same type are evaluated in the same order in which they appear in the ruleset. The first matching rule decides what action is taken. .Pp The .Ar no option prefixed to a translation rule causes packets to remain untranslated, much in the same way as .Ar drop quick works in the packet filter (see below). If no rule matches the packet it is passed to the filter engine unmodified. .Pp Translation rules apply only to packets that pass through the specified interface, and if no interface is specified, translation is applied to packets on all interfaces. For instance, redirecting port 80 on an external interface to an internal web server will only work for connections originating from the outside. Connections to the address of the external interface from local hosts will not be redirected, since such packets do not actually pass through the external interface. Redirections cannot reflect packets back through the interface they arrive on, they can only be redirected to hosts connected to different interfaces or to the firewall itself. .Pp Note that redirecting external incoming connections to the loopback address, as in .Bd -literal -offset indent rdr on ne3 inet proto tcp to port smtp -\*(Gt 127.0.0.1 port spamd .Ed .Pp will effectively allow an external host to connect to daemons bound solely to the loopback address, circumventing the traditional blocking of such connections on a real interface. Unless this effect is desired, any of the local non-loopback addresses should be used as redirection target instead, which allows external connections only to daemons bound to this address or not bound to any address. .Pp See .Sx TRANSLATION EXAMPLES below. .Sh PACKET FILTERING .Xr pf 4 has the ability to .Ar block , .Ar pass and .Ar match packets based on attributes of their layer 3 (see .Xr ip 4 and .Xr ip6 4 ) and layer 4 (see .Xr icmp 4 , .Xr icmp6 4 , .Xr tcp 4 , .Xr udp 4 ) headers. In addition, packets may also be assigned to queues for the purpose of bandwidth control. .Pp For each packet processed by the packet filter, the filter rules are evaluated in sequential order, from first to last. For .Ar block and .Ar pass , the last matching rule decides what action is taken. For .Ar match , rules are evaluated every time they match; the pass/block state of a packet remains unchanged. If no rule matches the packet, the default action is to pass the packet. .Pp The following actions can be used in the filter: .Bl -tag -width xxxx .It Ar block The packet is blocked. There are a number of ways in which a .Ar block rule can behave when blocking a packet. The default behaviour is to .Ar drop packets silently, however this can be overridden or made explicit either globally, by setting the .Ar block-policy option, or on a per-rule basis with one of the following options: .Pp .Bl -tag -width xxxx -compact .It Ar drop The packet is silently dropped. .It Ar return-rst This applies only to .Xr tcp 4 packets, and issues a TCP RST which closes the connection. .It Ar return-icmp .It Ar return-icmp6 This causes ICMP messages to be returned for packets which match the rule. By default this is an ICMP UNREACHABLE message, however this can be overridden by specifying a message as a code or number. .It Ar return This causes a TCP RST to be returned for .Xr tcp 4 packets and an ICMP UNREACHABLE for UDP and other packets. .El .Pp Options returning ICMP packets currently have no effect if .Xr pf 4 operates on a .Xr if_bridge 4 , as the code to support this feature has not yet been implemented. .Pp The simplest mechanism to block everything by default and only pass packets that match explicit rules is specify a first filter rule of: .Bd -literal -offset indent block all .Ed .It Ar match The packet is matched. This mechanism is used to provide fine grained filtering without altering the block/pass state of a packet. .Ar match rules differ from .Ar block and .Ar pass rules in that parameters are set every time a packet matches the rule, not only on the last matching rule. For the following parameters, this means that the parameter effectively becomes "sticky" until explicitly overridden: .Ar queue , .Ar dnpipe , .Ar dnqueue . .It Ar pass The packet is passed; state is created unless the .Ar no state option is specified. +.It Ar match +Action is unaltered, the previously matched rule's action still matters. +Match rules apply queue and rtable assignments for every matched packet, +subsequent matching pass or match rules can overwrite the assignment, +if they don't specify a queue or an rtable, respectively, the previously +set value remains. +Additionally, match rules can contain log statements; the is logging done +for each and every matching match rule, so it is possible to log a single +packet multiple times. .El .Pp By default .Xr pf 4 filters packets statefully; the first time a packet matches a .Ar pass rule, a state entry is created; for subsequent packets the filter checks whether the packet matches any state. If it does, the packet is passed without evaluation of any rules. After the connection is closed or times out, the state entry is automatically removed. .Pp This has several advantages. For TCP connections, comparing a packet to a state involves checking its sequence numbers, as well as TCP timestamps if a .Ar scrub reassemble tcp rule applies to the connection. If these values are outside the narrow windows of expected values, the packet is dropped. This prevents spoofing attacks, such as when an attacker sends packets with a fake source address/port but does not know the connection's sequence numbers. Similarly, .Xr pf 4 knows how to match ICMP replies to states. For example, .Bd -literal -offset indent pass out inet proto icmp all icmp-type echoreq .Ed .Pp allows echo requests (such as those created by .Xr ping 8 ) out statefully, and matches incoming echo replies correctly to states. .Pp Also, looking up states is usually faster than evaluating rules. If there are 50 rules, all of them are evaluated sequentially in O(n). Even with 50000 states, only 16 comparisons are needed to match a state, since states are stored in a binary search tree that allows searches in O(log2 n). .Pp Furthermore, correct handling of ICMP error messages is critical to many protocols, particularly TCP. .Xr pf 4 matches ICMP error messages to the correct connection, checks them against connection parameters, and passes them if appropriate. For example if an ICMP source quench message referring to a stateful TCP connection arrives, it will be matched to the state and get passed. .Pp Finally, state tracking is required for .Ar nat , binat No and Ar rdr rules, in order to track address and port translations and reverse the translation on returning packets. .Pp .Xr pf 4 will also create state for other protocols which are effectively stateless by nature. UDP packets are matched to states using only host addresses and ports, and other protocols are matched to states using only the host addresses. .Pp If stateless filtering of individual packets is desired, the .Ar no state keyword can be used to specify that state will not be created if this is the last matching rule. A number of parameters can also be set to affect how .Xr pf 4 handles state tracking. See .Sx STATEFUL TRACKING OPTIONS below for further details. .Sh PARAMETERS The rule parameters specify the packets to which a rule applies. A packet always comes in on, or goes out through, one interface. Most parameters are optional. If a parameter is specified, the rule only applies to packets with matching attributes. Certain parameters can be expressed as lists, in which case .Xr pfctl 8 generates all needed rule combinations. .Bl -tag -width xxxx .It Ar in No or Ar out This rule applies to incoming or outgoing packets. If neither .Ar in nor .Ar out are specified, the rule will match packets in both directions. .It Ar log In addition to the action specified, a log message is generated. Only the packet that establishes the state is logged, unless the .Ar no state option is specified. The logged packets are sent to a .Xr pflog 4 interface, by default .Ar pflog0 . This interface is monitored by the .Xr pflogd 8 logging daemon, which dumps the logged packets to the file .Pa /var/log/pflog in .Xr pcap 3 binary format. .It Ar log (all) Used to force logging of all packets for a connection. This is not necessary when .Ar no state is explicitly specified. As with .Ar log , packets are logged to .Xr pflog 4 . .It Ar log (user) Logs the .Ux user ID of the user that owns the socket and the PID of the process that has the socket open where the packet is sourced from or destined to (depending on which socket is local). This is in addition to the normal information logged. .Pp Only the first packet logged via .Ar log (all, user) will have the user credentials logged when using stateful matching. .It Ar log (to Aq Ar interface ) Send logs to the specified .Xr pflog 4 interface instead of .Ar pflog0 . .It Ar quick If a packet matches a rule which has the .Ar quick option set, this rule is considered the last matching rule, and evaluation of subsequent rules is skipped. .It Ar on Aq Ar interface This rule applies only to packets coming in on, or going out through, this particular interface or interface group. For more information on interface groups, see the .Ic group keyword in .Xr ifconfig 8 . .It Aq Ar af This rule applies only to packets of this address family. Supported values are .Ar inet and .Ar inet6 . .It Ar proto Aq Ar protocol This rule applies only to packets of this protocol. Common protocols are .Xr icmp 4 , .Xr icmp6 4 , .Xr tcp 4 , and .Xr udp 4 . For a list of all the protocol name to number mappings used by .Xr pfctl 8 , see the file .Pa /etc/protocols . .It Xo .Ar from Aq Ar source .Ar port Aq Ar source .Ar os Aq Ar source .Ar to Aq Ar dest .Ar port Aq Ar dest .Xc This rule applies only to packets with the specified source and destination addresses and ports. .Pp Addresses can be specified in CIDR notation (matching netblocks), as symbolic host names, interface names or interface group names, or as any of the following keywords: .Pp .Bl -tag -width xxxxxxxxxxxxxx -compact .It Ar any Any address. .It Ar no-route Any address which is not currently routable. .It Ar urpf-failed Any source address that fails a unicast reverse path forwarding (URPF) check, i.e. packets coming in on an interface other than that which holds the route back to the packet's source address. .It Aq Ar table Any address that matches the given table. .El .Pp Ranges of addresses are specified by using the .Sq - operator. For instance: .Dq 10.1.1.10 - 10.1.1.12 means all addresses from 10.1.1.10 to 10.1.1.12, hence addresses 10.1.1.10, 10.1.1.11, and 10.1.1.12. .Pp Interface names and interface group names can have modifiers appended: .Pp .Bl -tag -width xxxxxxxxxxxx -compact .It Ar :network Translates to the network(s) attached to the interface. .It Ar :broadcast Translates to the interface's broadcast address(es). .It Ar :peer Translates to the point-to-point interface's peer address(es). .It Ar :0 Do not include interface aliases. .El .Pp Host names may also have the .Ar :0 option appended to restrict the name resolution to the first of each v4 and non-link-local v6 address found. .Pp Host name resolution and interface to address translation are done at ruleset load-time. When the address of an interface (or host name) changes (under DHCP or PPP, for instance), the ruleset must be reloaded for the change to be reflected in the kernel. Surrounding the interface name (and optional modifiers) in parentheses changes this behaviour. When the interface name is surrounded by parentheses, the rule is automatically updated whenever the interface changes its address. The ruleset does not need to be reloaded. This is especially useful with .Ar nat . .Pp Ports can be specified either by number or by name. For example, port 80 can be specified as .Em www . For a list of all port name to number mappings used by .Xr pfctl 8 , see the file .Pa /etc/services . .Pp Ports and ranges of ports are specified by using these operators: .Bd -literal -offset indent = (equal) != (unequal) \*(Lt (less than) \*(Le (less than or equal) \*(Gt (greater than) \*(Ge (greater than or equal) : (range including boundaries) \*(Gt\*(Lt (range excluding boundaries) \*(Lt\*(Gt (except range) .Ed .Pp .Sq \*(Gt\*(Lt , .Sq \*(Lt\*(Gt and .Sq \&: are binary operators (they take two arguments). For instance: .Bl -tag -width Fl .It Ar port 2000:2004 means .Sq all ports \*(Ge 2000 and \*(Le 2004 , hence ports 2000, 2001, 2002, 2003 and 2004. .It Ar port 2000 \*(Gt\*(Lt 2004 means .Sq all ports \*(Gt 2000 and \*(Lt 2004 , hence ports 2001, 2002 and 2003. .It Ar port 2000 \*(Lt\*(Gt 2004 means .Sq all ports \*(Lt 2000 or \*(Gt 2004 , hence ports 1-1999 and 2005-65535. .El .Pp The operating system of the source host can be specified in the case of TCP rules with the .Ar OS modifier. See the .Sx OPERATING SYSTEM FINGERPRINTING section for more information. .Pp The host, port and OS specifications are optional, as in the following examples: .Bd -literal -offset indent pass in all pass in from any to any pass in proto tcp from any port \*(Le 1024 to any pass in proto tcp from any to any port 25 pass in proto tcp from 10.0.0.0/8 port \*(Gt 1024 \e to ! 10.1.2.3 port != ssh pass in proto tcp from any os "OpenBSD" .Ed .It Ar all This is equivalent to "from any to any". .It Ar group Aq Ar group Similar to .Ar user , this rule only applies to packets of sockets owned by the specified group. .It Ar user Aq Ar user This rule only applies to packets of sockets owned by the specified user. For outgoing connections initiated from the firewall, this is the user that opened the connection. For incoming connections to the firewall itself, this is the user that listens on the destination port. For forwarded connections, where the firewall is not a connection endpoint, the user and group are .Em unknown . .Pp All packets, both outgoing and incoming, of one connection are associated with the same user and group. Only TCP and UDP packets can be associated with users; for other protocols these parameters are ignored. .Pp User and group refer to the effective (as opposed to the real) IDs, in case the socket is created by a setuid/setgid process. User and group IDs are stored when a socket is created; when a process creates a listening socket as root (for instance, by binding to a privileged port) and subsequently changes to another user ID (to drop privileges), the credentials will remain root. .Pp User and group IDs can be specified as either numbers or names. The syntax is similar to the one for ports. The value .Em unknown matches packets of forwarded connections. .Em unknown can only be used with the operators .Cm = and .Cm != . Other constructs like .Cm user \*(Ge unknown are invalid. Forwarded packets with unknown user and group ID match only rules that explicitly compare against .Em unknown with the operators .Cm = or .Cm != . For instance .Cm user \*(Ge 0 does not match forwarded packets. The following example allows only selected users to open outgoing connections: .Bd -literal -offset indent block out proto { tcp, udp } all pass out proto { tcp, udp } all user { \*(Lt 1000, dhartmei } .Ed .It Xo Ar flags Aq Ar a .Pf / Ns Aq Ar b .No \*(Ba / Ns Aq Ar b .No \*(Ba any .Xc This rule only applies to TCP packets that have the flags .Aq Ar a set out of set .Aq Ar b . Flags not specified in .Aq Ar b are ignored. For stateful connections, the default is .Ar flags S/SA . To indicate that flags should not be checked at all, specify .Ar flags any . The flags are: (F)IN, (S)YN, (R)ST, (P)USH, (A)CK, (U)RG, (E)CE, and C(W)R. .Bl -tag -width Fl .It Ar flags S/S Flag SYN is set. The other flags are ignored. .It Ar flags S/SA This is the default setting for stateful connections. Out of SYN and ACK, exactly SYN may be set. SYN, SYN+PSH and SYN+RST match, but SYN+ACK, ACK and ACK+RST do not. This is more restrictive than the previous example. .It Ar flags /SFRA If the first set is not specified, it defaults to none. All of SYN, FIN, RST and ACK must be unset. .El .Pp Because .Ar flags S/SA is applied by default (unless .Ar no state is specified), only the initial SYN packet of a TCP handshake will create a state for a TCP connection. It is possible to be less restrictive, and allow state creation from intermediate .Pq non-SYN packets, by specifying .Ar flags any . This will cause .Xr pf 4 to synchronize to existing connections, for instance if one flushes the state table. However, states created from such intermediate packets may be missing connection details such as the TCP window scaling factor. States which modify the packet flow, such as those affected by .Ar nat , binat No or Ar rdr rules, .Ar modulate No or Ar synproxy state options, or scrubbed with .Ar reassemble tcp will also not be recoverable from intermediate packets. Such connections will stall and time out. .It Xo Ar icmp-type Aq Ar type .Ar code Aq Ar code .Xc .It Xo Ar icmp6-type Aq Ar type .Ar code Aq Ar code .Xc This rule only applies to ICMP or ICMPv6 packets with the specified type and code. Text names for ICMP types and codes are listed in .Xr icmp 4 and .Xr icmp6 4 . This parameter is only valid for rules that cover protocols ICMP or ICMP6. The protocol and the ICMP type indicator .Po .Ar icmp-type or .Ar icmp6-type .Pc must match. .It Xo Ar tos Aq Ar string .No \*(Ba Aq Ar number .Xc This rule applies to packets with the specified .Em TOS bits set. .Em TOS may be given as one of .Ar critical , .Ar inetcontrol , .Ar lowdelay , .Ar netcontrol , .Ar throughput , .Ar reliability , or one of the DiffServ Code Points: .Ar ef , .Ar va , .Ar af11 No ... Ar af43 , .Ar cs0 No ... Ar cs7 ; or as either hex or decimal. .Pp For example, the following rules are identical: .Bd -literal -offset indent pass all tos lowdelay pass all tos 0x10 pass all tos 16 .Ed .It Ar allow-opts By default, IPv4 packets with IP options or IPv6 packets with routing extension headers are blocked. When .Ar allow-opts is specified for a .Ar pass rule, packets that pass the filter based on that rule (last matching) do so even if they contain IP options or routing extension headers. For packets that match state, the rule that initially created the state is used. The implicit .Ar pass rule that is used when a packet does not match any rules does not allow IP options. .It Ar label Aq Ar string Adds a label (name) to the rule, which can be used to identify the rule. For instance, pfctl -s labels shows per-rule statistics for rules that have labels. .Pp The following macros can be used in labels: .Pp .Bl -tag -width $srcaddr -compact -offset indent .It Ar $if The interface. .It Ar $srcaddr The source IP address. .It Ar $dstaddr The destination IP address. .It Ar $srcport The source port specification. .It Ar $dstport The destination port specification. .It Ar $proto The protocol name. .It Ar $nr The rule number. .El .Pp For example: .Bd -literal -offset indent ips = \&"{ 1.2.3.4, 1.2.3.5 }\&" pass in proto tcp from any to $ips \e port \*(Gt 1023 label \&"$dstaddr:$dstport\&" .Ed .Pp expands to .Bd -literal -offset indent pass in inet proto tcp from any to 1.2.3.4 \e port \*(Gt 1023 label \&"1.2.3.4:\*(Gt1023\&" pass in inet proto tcp from any to 1.2.3.5 \e port \*(Gt 1023 label \&"1.2.3.5:\*(Gt1023\&" .Ed .Pp The macro expansion for the .Ar label directive occurs only at configuration file parse time, not during runtime. .It Ar ridentifier Aq Ar number Add an identifier (number) to the rule, which can be used to correlate the rule to pflog entries, even after ruleset updates. .It Xo Ar queue Aq Ar queue .No \*(Ba ( Aq Ar queue , .Aq Ar queue ) .Xc Packets matching this rule will be assigned to the specified queue. If two queues are given, packets which have a .Em TOS of .Em lowdelay and TCP ACKs with no data payload will be assigned to the second one. See .Sx QUEUEING for setup details. .Pp For example: .Bd -literal -offset indent pass in proto tcp to port 25 queue mail pass in proto tcp to port 22 queue(ssh_bulk, ssh_prio) .Ed .It Cm set prio Ar priority | Pq Ar priority , priority Packets matching this rule will be assigned a specific queueing priority. Priorities are assigned as integers 0 through 7. If the packet is transmitted on a .Xr vlan 4 interface, the queueing priority will be written as the priority code point in the 802.1Q VLAN header. If two priorities are given, packets which have a TOS of .Cm lowdelay and TCP ACKs with no data payload will be assigned to the second one. .Pp For example: .Bd -literal -offset indent pass in proto tcp to port 25 set prio 2 pass in proto tcp to port 22 set prio (2, 5) .Ed .It Ar tag Aq Ar string Packets matching this rule will be tagged with the specified string. The tag acts as an internal marker that can be used to identify these packets later on. This can be used, for example, to provide trust between interfaces and to determine if packets have been processed by translation rules. Tags are .Qq sticky , meaning that the packet will be tagged even if the rule is not the last matching rule. Further matching rules can replace the tag with a new one but will not remove a previously applied tag. A packet is only ever assigned one tag at a time. Packet tagging can be done during .Ar nat , .Ar rdr , .Ar binat or .Ar ether rules in addition to filter rules. Tags take the same macros as labels (see above). .It Ar tagged Aq Ar string Used with filter, translation or scrub rules to specify that packets must already be tagged with the given tag in order to match the rule. Inverse tag matching can also be done by specifying the .Cm !\& operator before the .Ar tagged keyword. .It Ar rtable Aq Ar number Used to select an alternate routing table for the routing lookup. Only effective before the route lookup happened, i.e. when filtering inbound. .It Xo Ar divert-to Aq Ar host .Ar port Aq Ar port .Xc Used to redirect packets to a local socket bound to .Ar host and .Ar port . The packets will not be modified, so .Xr getsockname 2 on the socket will return the original destination address of the packet. .It Ar divert-reply Used to receive replies for sockets that are bound to addresses which are not local to the machine. See .Xr setsockopt 2 for information on how to bind these sockets. .It Ar probability Aq Ar number A probability attribute can be attached to a rule, with a value set between 0 and 1, bounds not included. In that case, the rule will be honoured using the given probability value only. For example, the following rule will drop 20% of incoming ICMP packets: .Bd -literal -offset indent block in proto icmp probability 20% .Ed .It Ar prio Aq Ar number Only match packets which have the given queueing priority assigned. .El .Sh ROUTING If a packet matches a rule with a route option set, the packet filter will route the packet according to the type of route option. When such a rule creates state, the route option is also applied to all packets matching the same connection. .Bl -tag -width xxxx .It Ar route-to The .Ar route-to option routes the packet to the specified interface with an optional address for the next hop. When a .Ar route-to rule creates state, only packets that pass in the same direction as the filter rule specifies will be routed in this way. Packets passing in the opposite direction (replies) are not affected and are routed normally. .It Ar reply-to The .Ar reply-to option is similar to .Ar route-to , but routes packets that pass in the opposite direction (replies) to the specified interface. Opposite direction is only defined in the context of a state entry, and .Ar reply-to is useful only in rules that create state. It can be used on systems with multiple external connections to route all outgoing packets of a connection through the interface the incoming connection arrived through (symmetric routing enforcement). .It Ar dup-to The .Ar dup-to option creates a duplicate of the packet and routes it like .Ar route-to . The original packet gets routed as it normally would. .El .Sh POOL OPTIONS For .Ar nat and .Ar rdr rules, (as well as for the .Ar route-to , .Ar reply-to and .Ar dup-to rule options) for which there is a single redirection address which has a subnet mask smaller than 32 for IPv4 or 128 for IPv6 (more than one IP address), a variety of different methods for assigning this address can be used: .Bl -tag -width xxxx .It Ar bitmask The .Ar bitmask option applies the network portion of the redirection address to the address to be modified (source with .Ar nat , destination with .Ar rdr ) . .It Ar random The .Ar random option selects an address at random within the defined block of addresses. .It Ar source-hash The .Ar source-hash option uses a hash of the source address to determine the redirection address, ensuring that the redirection address is always the same for a given source. An optional key can be specified after this keyword either in hex or as a string; by default .Xr pfctl 8 randomly generates a key for source-hash every time the ruleset is reloaded. .It Ar round-robin The .Ar round-robin option loops through the redirection address(es). .Pp When more than one redirection address is specified, .Ar round-robin is the only permitted pool type. .It Ar static-port With .Ar nat rules, the .Ar static-port option prevents .Xr pf 4 from modifying the source port on TCP and UDP packets. .It Xo Ar map-e-portset Aq Ar psid-offset .No / Aq Ar psid-len .No / Aq Ar psid .Xc With .Ar nat rules, the .Ar map-e-portset option enables the source port translation of MAP-E (RFC 7597) Customer Edge. In order to make the host act as a MAP-E Customer Edge, setting up a tunneling interface and pass rules for encapsulated packets are required in addition to the map-e-portset nat rule. .Pp For example: .Bd -literal -offset indent nat on $gif_mape_if from $int_if:network to any \e -> $ipv4_mape_src map-e-portset 6/8/0x34 .Ed .Pp sets PSID offset 6, PSID length 8, PSID 0x34. .El .Pp Additionally, the .Ar sticky-address option can be specified to help ensure that multiple connections from the same source are mapped to the same redirection address. This option can be used with the .Ar random and .Ar round-robin pool options. Note that by default these associations are destroyed as soon as there are no longer states which refer to them; in order to make the mappings last beyond the lifetime of the states, increase the global options with .Ar set timeout src.track . See .Sx STATEFUL TRACKING OPTIONS for more ways to control the source tracking. .Sh STATE MODULATION Much of the security derived from TCP is attributable to how well the initial sequence numbers (ISNs) are chosen. Some popular stack implementations choose .Em very poor ISNs and thus are normally susceptible to ISN prediction exploits. By applying a .Ar modulate state rule to a TCP connection, .Xr pf 4 will create a high quality random sequence number for each connection endpoint. .Pp The .Ar modulate state directive implicitly keeps state on the rule and is only applicable to TCP connections. .Pp For instance: .Bd -literal -offset indent block all pass out proto tcp from any to any modulate state pass in proto tcp from any to any port 25 flags S/SFRA modulate state .Ed .Pp Note that modulated connections will not recover when the state table is lost (firewall reboot, flushing the state table, etc...). .Xr pf 4 will not be able to infer a connection again after the state table flushes the connection's modulator. When the state is lost, the connection may be left dangling until the respective endpoints time out the connection. It is possible on a fast local network for the endpoints to start an ACK storm while trying to resynchronize after the loss of the modulator. The default .Ar flags settings (or a more strict equivalent) should be used on .Ar modulate state rules to prevent ACK storms. .Pp Note that alternative methods are available to prevent loss of the state table and allow for firewall failover. See .Xr carp 4 and .Xr pfsync 4 for further information. .Sh SYN PROXY By default, .Xr pf 4 passes packets that are part of a .Xr tcp 4 handshake between the endpoints. The .Ar synproxy state option can be used to cause .Xr pf 4 itself to complete the handshake with the active endpoint, perform a handshake with the passive endpoint, and then forward packets between the endpoints. .Pp No packets are sent to the passive endpoint before the active endpoint has completed the handshake, hence so-called SYN floods with spoofed source addresses will not reach the passive endpoint, as the sender can't complete the handshake. .Pp The proxy is transparent to both endpoints, they each see a single connection from/to the other endpoint. .Xr pf 4 chooses random initial sequence numbers for both handshakes. Once the handshakes are completed, the sequence number modulators (see previous section) are used to translate further packets of the connection. .Ar synproxy state includes .Ar modulate state . .Pp Rules with .Ar synproxy will not work if .Xr pf 4 operates on a .Xr bridge 4 . .Pp Example: .Bd -literal -offset indent pass in proto tcp from any to any port www synproxy state .Ed .Sh STATEFUL TRACKING OPTIONS A number of options related to stateful tracking can be applied on a per-rule basis. .Ar keep state , .Ar modulate state and .Ar synproxy state support these options, and .Ar keep state must be specified explicitly to apply options to a rule. .Pp .Bl -tag -width xxxx -compact .It Ar max Aq Ar number Limits the number of concurrent states the rule may create. When this limit is reached, further packets that would create state will not match this rule until existing states time out. .It Ar no-sync Prevent state changes for states created by this rule from appearing on the .Xr pfsync 4 interface. .It Xo Aq Ar timeout .Aq Ar seconds .Xc Changes the timeout values used for states created by this rule. For a list of all valid timeout names, see .Sx OPTIONS above. .It Ar sloppy Uses a sloppy TCP connection tracker that does not check sequence numbers at all, which makes insertion and ICMP teardown attacks way easier. This is intended to be used in situations where one does not see all packets of a connection, e.g. in asymmetric routing situations. Cannot be used with modulate or synproxy state. .El .Pp Multiple options can be specified, separated by commas: .Bd -literal -offset indent pass in proto tcp from any to any \e port www keep state \e (max 100, source-track rule, max-src-nodes 75, \e max-src-states 3, tcp.established 60, tcp.closing 5) .Ed .Pp When the .Ar source-track keyword is specified, the number of states per source IP is tracked. .Pp .Bl -tag -width xxxx -compact .It Ar source-track rule The maximum number of states created by this rule is limited by the rule's .Ar max-src-nodes and .Ar max-src-states options. Only state entries created by this particular rule count toward the rule's limits. .It Ar source-track global The number of states created by all rules that use this option is limited. Each rule can specify different .Ar max-src-nodes and .Ar max-src-states options, however state entries created by any participating rule count towards each individual rule's limits. .El .Pp The following limits can be set: .Pp .Bl -tag -width xxxx -compact .It Ar max-src-nodes Aq Ar number Limits the maximum number of source addresses which can simultaneously have state table entries. .It Ar max-src-states Aq Ar number Limits the maximum number of simultaneous state entries that a single source address can create with this rule. .El .Pp For stateful TCP connections, limits on established connections (connections which have completed the TCP 3-way handshake) can also be enforced per source IP. .Pp .Bl -tag -width xxxx -compact .It Ar max-src-conn Aq Ar number Limits the maximum number of simultaneous TCP connections which have completed the 3-way handshake that a single host can make. .It Xo Ar max-src-conn-rate Aq Ar number .No / Aq Ar seconds .Xc Limit the rate of new connections over a time interval. The connection rate is an approximation calculated as a moving average. .El .Pp Because the 3-way handshake ensures that the source address is not being spoofed, more aggressive action can be taken based on these limits. With the .Ar overload Aq Ar table state option, source IP addresses which hit either of the limits on established connections will be added to the named table. This table can be used in the ruleset to block further activity from the offending host, redirect it to a tarpit process, or restrict its bandwidth. .Pp The optional .Ar flush keyword kills all states created by the matching rule which originate from the host which exceeds these limits. The .Ar global modifier to the flush command kills all states originating from the offending host, regardless of which rule created the state. .Pp For example, the following rules will protect the webserver against hosts making more than 100 connections in 10 seconds. Any host which connects faster than this rate will have its address added to the .Aq bad_hosts table and have all states originating from it flushed. Any new packets arriving from this host will be dropped unconditionally by the block rule. .Bd -literal -offset indent block quick from \*(Ltbad_hosts\*(Gt pass in on $ext_if proto tcp to $webserver port www keep state \e (max-src-conn-rate 100/10, overload \*(Ltbad_hosts\*(Gt flush global) .Ed .Sh OPERATING SYSTEM FINGERPRINTING Passive OS Fingerprinting is a mechanism to inspect nuances of a TCP connection's initial SYN packet and guess at the host's operating system. Unfortunately these nuances are easily spoofed by an attacker so the fingerprint is not useful in making security decisions. But the fingerprint is typically accurate enough to make policy decisions upon. .Pp The fingerprints may be specified by operating system class, by version, or by subtype/patchlevel. The class of an operating system is typically the vendor or genre and would be .Ox for the .Xr pf 4 firewall itself. The version of the oldest available .Ox release on the main FTP site would be 2.6 and the fingerprint would be written .Pp .Dl \&"OpenBSD 2.6\&" .Pp The subtype of an operating system is typically used to describe the patchlevel if that patch led to changes in the TCP stack behavior. In the case of .Ox , the only subtype is for a fingerprint that was normalized by the .Ar no-df scrub option and would be specified as .Pp .Dl \&"OpenBSD 3.3 no-df\&" .Pp Fingerprints for most popular operating systems are provided by .Xr pf.os 5 . Once .Xr pf 4 is running, a complete list of known operating system fingerprints may be listed by running: .Pp .Dl # pfctl -so .Pp Filter rules can enforce policy at any level of operating system specification assuming a fingerprint is present. Policy could limit traffic to approved operating systems or even ban traffic from hosts that aren't at the latest service pack. .Pp The .Ar unknown class can also be used as the fingerprint which will match packets for which no operating system fingerprint is known. .Pp Examples: .Bd -literal -offset indent pass out proto tcp from any os OpenBSD block out proto tcp from any os Doors block out proto tcp from any os "Doors PT" block out proto tcp from any os "Doors PT SP3" block out from any os "unknown" pass on lo0 proto tcp from any os "OpenBSD 3.3 lo0" .Ed .Pp Operating system fingerprinting is limited only to the TCP SYN packet. This means that it will not work on other protocols and will not match a currently established connection. .Pp Caveat: operating system fingerprints are occasionally wrong. There are three problems: an attacker can trivially craft his packets to appear as any operating system he chooses; an operating system patch could change the stack behavior and no fingerprints will match it until the database is updated; and multiple operating systems may have the same fingerprint. .Sh BLOCKING SPOOFED TRAFFIC "Spoofing" is the faking of IP addresses, typically for malicious purposes. The .Ar antispoof directive expands to a set of filter rules which will block all traffic with a source IP from the network(s) directly connected to the specified interface(s) from entering the system through any other interface. .Pp For example, the line .Bd -literal -offset indent antispoof for lo0 .Ed .Pp expands to .Bd -literal -offset indent block drop in on ! lo0 inet from 127.0.0.1/8 to any block drop in on ! lo0 inet6 from ::1 to any .Ed .Pp For non-loopback interfaces, there are additional rules to block incoming packets with a source IP address identical to the interface's IP(s). For example, assuming the interface wi0 had an IP address of 10.0.0.1 and a netmask of 255.255.255.0, the line .Bd -literal -offset indent antispoof for wi0 inet .Ed .Pp expands to .Bd -literal -offset indent block drop in on ! wi0 inet from 10.0.0.0/24 to any block drop in inet from 10.0.0.1 to any .Ed .Pp Caveat: Rules created by the .Ar antispoof directive interfere with packets sent over loopback interfaces to local addresses. One should pass these explicitly. .Sh FRAGMENT HANDLING The size of IP datagrams (packets) can be significantly larger than the maximum transmission unit (MTU) of the network. In cases when it is necessary or more efficient to send such large packets, the large packet will be fragmented into many smaller packets that will each fit onto the wire. Unfortunately for a firewalling device, only the first logical fragment will contain the necessary header information for the subprotocol that allows .Xr pf 4 to filter on things such as TCP ports or to perform NAT. .Pp Besides the use of .Ar scrub rules as described in .Sx TRAFFIC NORMALIZATION above, there are three options for handling fragments in the packet filter. .Pp One alternative is to filter individual fragments with filter rules. If no .Ar scrub rule applies to a fragment, it is passed to the filter. Filter rules with matching IP header parameters decide whether the fragment is passed or blocked, in the same way as complete packets are filtered. Without reassembly, fragments can only be filtered based on IP header fields (source/destination address, protocol), since subprotocol header fields are not available (TCP/UDP port numbers, ICMP code/type). The .Ar fragment option can be used to restrict filter rules to apply only to fragments, but not complete packets. Filter rules without the .Ar fragment option still apply to fragments, if they only specify IP header fields. For instance, the rule .Bd -literal -offset indent pass in proto tcp from any to any port 80 .Ed .Pp never applies to a fragment, even if the fragment is part of a TCP packet with destination port 80, because without reassembly this information is not available for each fragment. This also means that fragments cannot create new or match existing state table entries, which makes stateful filtering and address translation (NAT, redirection) for fragments impossible. .Pp It's also possible to reassemble only certain fragments by specifying source or destination addresses or protocols as parameters in .Ar scrub rules. .Pp In most cases, the benefits of reassembly outweigh the additional memory cost, and it's recommended to use .Ar scrub rules to reassemble all fragments via the .Ar fragment reassemble modifier. .Pp The memory allocated for fragment caching can be limited using .Xr pfctl 8 . Once this limit is reached, fragments that would have to be cached are dropped until other entries time out. The timeout value can also be adjusted. .Pp When forwarding reassembled IPv6 packets, pf refragments them with the original maximum fragment size. This allows the sender to determine the optimal fragment size by path MTU discovery. .Sh ANCHORS Besides the main ruleset, .Xr pfctl 8 can load rulesets into .Ar anchor attachment points. An .Ar anchor is a container that can hold rules, address tables, and other anchors. .Pp An .Ar anchor has a name which specifies the path where .Xr pfctl 8 can be used to access the anchor to perform operations on it, such as attaching child anchors to it or loading rules into it. Anchors may be nested, with components separated by .Sq / characters, similar to how file system hierarchies are laid out. The main ruleset is actually the default anchor, so filter and translation rules, for example, may also be contained in any anchor. .Pp An anchor can reference another .Ar anchor attachment point using the following kinds of rules: .Bl -tag -width xxxx .It Ar nat-anchor Aq Ar name Evaluates the .Ar nat rules in the specified .Ar anchor . .It Ar rdr-anchor Aq Ar name Evaluates the .Ar rdr rules in the specified .Ar anchor . .It Ar binat-anchor Aq Ar name Evaluates the .Ar binat rules in the specified .Ar anchor . .It Ar anchor Aq Ar name Evaluates the filter rules in the specified .Ar anchor . .It Xo Ar load anchor .Aq Ar name .Ar from Aq Ar file .Xc Loads the rules from the specified file into the anchor .Ar name . .El .Pp When evaluation of the main ruleset reaches an .Ar anchor rule, .Xr pf 4 will proceed to evaluate all rules specified in that anchor. .Pp Matching filter and translation rules marked with the .Ar quick option are final and abort the evaluation of the rules in other anchors and the main ruleset. If the .Ar anchor itself is marked with the .Ar quick option, ruleset evaluation will terminate when the anchor is exited if the packet is matched by any rule within the anchor. .Pp .Ar anchor rules are evaluated relative to the anchor in which they are contained. For example, all .Ar anchor rules specified in the main ruleset will reference anchor attachment points underneath the main ruleset, and .Ar anchor rules specified in a file loaded from a .Ar load anchor rule will be attached under that anchor point. .Pp Rules may be contained in .Ar anchor attachment points which do not contain any rules when the main ruleset is loaded, and later such anchors can be manipulated through .Xr pfctl 8 without reloading the main ruleset or other anchors. For example, .Bd -literal -offset indent ext_if = \&"kue0\&" block on $ext_if all anchor spam pass out on $ext_if all pass in on $ext_if proto tcp from any \e to $ext_if port smtp .Ed .Pp blocks all packets on the external interface by default, then evaluates all rules in the .Ar anchor named "spam", and finally passes all outgoing connections and incoming connections to port 25. .Bd -literal -offset indent # echo \&"block in quick from 1.2.3.4 to any\&" \&| \e pfctl -a spam -f - .Ed .Pp This loads a single rule into the .Ar anchor , which blocks all packets from a specific address. .Pp The anchor can also be populated by adding a .Ar load anchor rule after the .Ar anchor rule: .Bd -literal -offset indent anchor spam load anchor spam from "/etc/pf-spam.conf" .Ed .Pp When .Xr pfctl 8 loads .Nm pf.conf , it will also load all the rules from the file .Pa /etc/pf-spam.conf into the anchor. .Pp Optionally, .Ar anchor rules can specify packet filtering parameters using the same syntax as filter rules. When parameters are used, the .Ar anchor rule is only evaluated for matching packets. This allows conditional evaluation of anchors, like: .Bd -literal -offset indent block on $ext_if all anchor spam proto tcp from any to any port smtp pass out on $ext_if all pass in on $ext_if proto tcp from any to $ext_if port smtp .Ed .Pp The rules inside .Ar anchor spam are only evaluated for .Ar tcp packets with destination port 25. Hence, .Bd -literal -offset indent # echo \&"block in quick from 1.2.3.4 to any" \&| \e pfctl -a spam -f - .Ed .Pp will only block connections from 1.2.3.4 to port 25. .Pp Anchors may end with the asterisk .Pq Sq * character, which signifies that all anchors attached at that point should be evaluated in the alphabetical ordering of their anchor name. For example, .Bd -literal -offset indent anchor "spam/*" .Ed .Pp will evaluate each rule in each anchor attached to the .Li spam anchor. Note that it will only evaluate anchors that are directly attached to the .Li spam anchor, and will not descend to evaluate anchors recursively. .Pp Since anchors are evaluated relative to the anchor in which they are contained, there is a mechanism for accessing the parent and ancestor anchors of a given anchor. Similar to file system path name resolution, if the sequence .Dq .. appears as an anchor path component, the parent anchor of the current anchor in the path evaluation at that point will become the new current anchor. As an example, consider the following: .Bd -literal -offset indent # echo ' anchor "spam/allowed" ' | pfctl -f - # echo -e ' anchor "../banned" \en pass' | \e pfctl -a spam/allowed -f - .Ed .Pp Evaluation of the main ruleset will lead into the .Li spam/allowed anchor, which will evaluate the rules in the .Li spam/banned anchor, if any, before finally evaluating the .Ar pass rule. .Pp Filter rule .Ar anchors can also be loaded inline in the ruleset within a brace ('{' '}') delimited block. Brace delimited blocks may contain rules or other brace-delimited blocks. When anchors are loaded this way the anchor name becomes optional. .Bd -literal -offset indent anchor "external" on $ext_if { block anchor out { pass proto tcp from any to port { 25, 80, 443 } } pass in proto tcp to any port 22 } .Ed .Pp Since the parser specification for anchor names is a string, any reference to an anchor name containing .Sq / characters will require double quote .Pq Sq \&" characters around the anchor name. .Sh TRANSLATION EXAMPLES This example maps incoming requests on port 80 to port 8080, on which a daemon is running (because, for example, it is not run as root, and therefore lacks permission to bind to port 80). .Bd -literal # use a macro for the interface name, so it can be changed easily ext_if = \&"ne3\&" # map daemon on 8080 to appear to be on 80 rdr on $ext_if proto tcp from any to any port 80 -\*(Gt 127.0.0.1 port 8080 .Ed .Pp If the .Ar pass modifier is given, packets matching the translation rule are passed without inspecting the filter rules: .Bd -literal rdr pass on $ext_if proto tcp from any to any port 80 -\*(Gt 127.0.0.1 \e port 8080 .Ed .Pp In the example below, vlan12 is configured as 192.168.168.1; the machine translates all packets coming from 192.168.168.0/24 to 204.92.77.111 when they are going out any interface except vlan12. This has the net effect of making traffic from the 192.168.168.0/24 network appear as though it is the Internet routable address 204.92.77.111 to nodes behind any interface on the router except for the nodes on vlan12. (Thus, 192.168.168.1 can talk to the 192.168.168.0/24 nodes.) .Bd -literal nat on ! vlan12 from 192.168.168.0/24 to any -\*(Gt 204.92.77.111 .Ed .Pp In the example below, the machine sits between a fake internal 144.19.74.* network, and a routable external IP of 204.92.77.100. The .Ar no nat rule excludes protocol AH from being translated. .Bd -literal # NO NAT no nat on $ext_if proto ah from 144.19.74.0/24 to any nat on $ext_if from 144.19.74.0/24 to any -\*(Gt 204.92.77.100 .Ed .Pp In the example below, packets bound for one specific server, as well as those generated by the sysadmins are not proxied; all other connections are. .Bd -literal # NO RDR no rdr on $int_if proto { tcp, udp } from any to $server port 80 no rdr on $int_if proto { tcp, udp } from $sysadmins to any port 80 rdr on $int_if proto { tcp, udp } from any to any port 80 -\*(Gt 127.0.0.1 \e port 80 .Ed .Pp This longer example uses both a NAT and a redirection. The external interface has the address 157.161.48.183. On localhost, we are running .Xr ftp-proxy 8 , waiting for FTP sessions to be redirected to it. The three mandatory anchors for .Xr ftp-proxy 8 are omitted from this example; see the .Xr ftp-proxy 8 manpage. .Bd -literal # NAT # Translate outgoing packets' source addresses (any protocol). # In this case, any address but the gateway's external address is mapped. nat on $ext_if inet from ! ($ext_if) to any -\*(Gt ($ext_if) # NAT PROXYING # Map outgoing packets' source port to an assigned proxy port instead of # an arbitrary port. # In this case, proxy outgoing isakmp with port 500 on the gateway. nat on $ext_if inet proto udp from any port = isakmp to any -\*(Gt ($ext_if) \e port 500 # BINAT # Translate outgoing packets' source address (any protocol). # Translate incoming packets' destination address to an internal machine # (bidirectional). binat on $ext_if from 10.1.2.150 to any -\*(Gt $ext_if # Translate packets arriving on $peer_if addressed to 172.22.16.0/20 # to the corresponding address in 172.21.16.0/20 (bidirectional). binat on $peer_if from 172.21.16.0/20 to any -> 172.22.16.0/20 # RDR # Translate incoming packets' destination addresses. # As an example, redirect a TCP and UDP port to an internal machine. rdr on $ext_if inet proto tcp from any to ($ext_if) port 8080 \e -\*(Gt 10.1.2.151 port 22 rdr on $ext_if inet proto udp from any to ($ext_if) port 8080 \e -\*(Gt 10.1.2.151 port 53 # RDR # Translate outgoing ftp control connections to send them to localhost # for proxying with ftp-proxy(8) running on port 8021. rdr on $int_if proto tcp from any to any port 21 -\*(Gt 127.0.0.1 port 8021 .Ed .Pp In this example, a NAT gateway is set up to translate internal addresses using a pool of public addresses (192.0.2.16/28) and to redirect incoming web server connections to a group of web servers on the internal network. .Bd -literal # NAT LOAD BALANCE # Translate outgoing packets' source addresses using an address pool. # A given source address is always translated to the same pool address by # using the source-hash keyword. nat on $ext_if inet from any to any -\*(Gt 192.0.2.16/28 source-hash # RDR ROUND ROBIN # Translate incoming web server connections to a group of web servers on # the internal network. rdr on $ext_if proto tcp from any to any port 80 \e -\*(Gt { 10.1.2.155, 10.1.2.160, 10.1.2.161 } round-robin .Ed .Sh FILTER EXAMPLES .Bd -literal # The external interface is kue0 # (157.161.48.183, the only routable address) # and the private network is 10.0.0.0/8, for which we are doing NAT. # use a macro for the interface name, so it can be changed easily ext_if = \&"kue0\&" # normalize all incoming traffic scrub in on $ext_if all fragment reassemble # block and log everything by default block return log on $ext_if all # block anything coming from source we have no back routes for block in from no-route to any # block packets whose ingress interface does not match the one in # the route back to their source address block in from urpf-failed to any # block and log outgoing packets that do not have our address as source, # they are either spoofed or something is misconfigured (NAT disabled, # for instance), we want to be nice and do not send out garbage. block out log quick on $ext_if from ! 157.161.48.183 to any # silently drop broadcasts (cable modem noise) block in quick on $ext_if from any to 255.255.255.255 # block and log incoming packets from reserved address space and invalid # addresses, they are either spoofed or misconfigured, we cannot reply to # them anyway (hence, no return-rst). block in log quick on $ext_if from { 10.0.0.0/8, 172.16.0.0/12, \e 192.168.0.0/16, 255.255.255.255/32 } to any # ICMP # pass out/in certain ICMP queries and keep state (ping) # state matching is done on host addresses and ICMP id (not type/code), # so replies (like 0/0 for 8/0) will match queries # ICMP error messages (which always refer to a TCP/UDP packet) are # handled by the TCP/UDP states pass on $ext_if inet proto icmp all icmp-type 8 code 0 # UDP # pass out all UDP connections and keep state pass out on $ext_if proto udp all # pass in certain UDP connections and keep state (DNS) pass in on $ext_if proto udp from any to any port domain # TCP # pass out all TCP connections and modulate state pass out on $ext_if proto tcp all modulate state # pass in certain TCP connections and keep state (SSH, SMTP, DNS, IDENT) pass in on $ext_if proto tcp from any to any port { ssh, smtp, domain, \e auth } # Do not allow Windows 9x SMTP connections since they are typically # a viral worm. Alternately we could limit these OSes to 1 connection each. block in on $ext_if proto tcp from any os {"Windows 95", "Windows 98"} \e to any port smtp # IPv6 # pass in/out all IPv6 traffic: note that we have to enable this in two # different ways, on both our physical interface and our tunnel pass quick on gif0 inet6 pass quick on $ext_if proto ipv6 # Packet Tagging # three interfaces: $int_if, $ext_if, and $wifi_if (wireless). NAT is # being done on $ext_if for all outgoing packets. tag packets in on # $int_if and pass those tagged packets out on $ext_if. all other # outgoing packets (i.e., packets from the wireless network) are only # permitted to access port 80. pass in on $int_if from any to any tag INTNET pass in on $wifi_if from any to any block out on $ext_if from any to any pass out quick on $ext_if tagged INTNET pass out on $ext_if proto tcp from any to any port 80 # tag incoming packets as they are redirected to spamd(8). use the tag # to pass those packets through the packet filter. rdr on $ext_if inet proto tcp from \*(Ltspammers\*(Gt to port smtp \e tag SPAMD -\*(Gt 127.0.0.1 port spamd block in on $ext_if pass in on $ext_if inet proto tcp tagged SPAMD .Ed .Sh GRAMMAR Syntax for .Nm in BNF: .Bd -literal line = ( option | ether-rule | pf-rule | nat-rule | binat-rule | rdr-rule | antispoof-rule | altq-rule | queue-rule | trans-anchors | anchor-rule | anchor-close | load-anchor | table-rule | include ) option = "set" ( [ "timeout" ( timeout | "{" timeout-list "}" ) ] | [ "ruleset-optimization" [ "none" | "basic" | "profile" ]] | [ "optimization" [ "default" | "normal" | "high-latency" | "satellite" | "aggressive" | "conservative" ] ] [ "limit" ( limit-item | "{" limit-list "}" ) ] | [ "loginterface" ( interface-name | "none" ) ] | [ "block-policy" ( "drop" | "return" ) ] | [ "state-policy" ( "if-bound" | "floating" ) ] [ "state-defaults" state-opts ] [ "require-order" ( "yes" | "no" ) ] [ "fingerprints" filename ] | [ "skip on" ifspec ] | [ "debug" ( "none" | "urgent" | "misc" | "loud" ) ] [ "keepcounters" ] ) ether-rule = "ether" etheraction [ ( "in" | "out" ) ] [ "quick" ] [ "on" ifspec ] [ "bridge-to" interface-name ] [ etherprotospec ] etherhosts [ "l3" hosts ] [ etherfilteropt-list ] pf-rule = action [ ( "in" | "out" ) ] [ "log" [ "(" logopts ")"] ] [ "quick" ] [ "on" ifspec ] [ route ] [ af ] [ protospec ] hosts [ filteropt-list ] logopts = logopt [ "," logopts ] logopt = "all" | "user" | "to" interface-name etherfilteropt-list = etherfilteropt-list etherfilteropt | etherfilteropt etherfilteropt = "tag" string | "tagged" string | "queue" ( string ) filteropt-list = filteropt-list filteropt | filteropt filteropt = user | group | flags | icmp-type | icmp6-type | "tos" tos | ( "no" | "keep" | "modulate" | "synproxy" ) "state" [ "(" state-opts ")" ] | "fragment" | "no-df" | "min-ttl" number | "set-tos" tos | "max-mss" number | "random-id" | "reassemble tcp" | fragmentation | "allow-opts" | "label" string | "tag" string | [ ! ] "tagged" string | "set prio" ( number | "(" number [ [ "," ] number ] ")" ) | "queue" ( string | "(" string [ [ "," ] string ] ")" ) | "rtable" number | "probability" number"%" | "prio" number | "dnpipe" ( number | "(" number "," number ")" ) | "dnqueue" ( number | "(" number "," number ")" ) | "ridentifier" number nat-rule = [ "no" ] "nat" [ "pass" [ "log" [ "(" logopts ")" ] ] ] [ "on" ifspec ] [ af ] [ protospec ] hosts [ "tag" string ] [ "tagged" string ] [ "-\*(Gt" ( redirhost | "{" redirhost-list "}" ) [ portspec ] [ pooltype ] [ "static-port" ] [ "map-e-portset" number "/" number "/" number ] ] binat-rule = [ "no" ] "binat" [ "pass" [ "log" [ "(" logopts ")" ] ] ] [ "on" interface-name ] [ af ] [ "proto" ( proto-name | proto-number ) ] "from" address [ "/" mask-bits ] "to" ipspec [ "tag" string ] [ "tagged" string ] [ "-\*(Gt" address [ "/" mask-bits ] ] rdr-rule = [ "no" ] "rdr" [ "pass" [ "log" [ "(" logopts ")" ] ] ] [ "on" ifspec ] [ af ] [ protospec ] hosts [ "tag" string ] [ "tagged" string ] [ "-\*(Gt" ( redirhost | "{" redirhost-list "}" ) [ portspec ] [ pooltype ] ] antispoof-rule = "antispoof" [ "log" ] [ "quick" ] "for" ifspec [ af ] [ "label" string ] [ "ridentifier" number ] table-rule = "table" "\*(Lt" string "\*(Gt" [ tableopts-list ] tableopts-list = tableopts-list tableopts | tableopts tableopts = "persist" | "const" | "counters" | "file" string | "{" [ tableaddr-list ] "}" tableaddr-list = tableaddr-list [ "," ] tableaddr-spec | tableaddr-spec tableaddr-spec = [ "!" ] tableaddr [ "/" mask-bits ] tableaddr = hostname | ifspec | "self" | ipv4-dotted-quad | ipv6-coloned-hex altq-rule = "altq on" interface-name queueopts-list "queue" subqueue queue-rule = "queue" string [ "on" interface-name ] queueopts-list subqueue anchor-rule = "anchor" [ string ] [ ( "in" | "out" ) ] [ "on" ifspec ] [ af ] [ protospec ] [ hosts ] [ filteropt-list ] [ "{" ] anchor-close = "}" trans-anchors = ( "nat-anchor" | "rdr-anchor" | "binat-anchor" ) string [ "on" ifspec ] [ af ] [ "proto" ] [ protospec ] [ hosts ] load-anchor = "load anchor" string "from" filename queueopts-list = queueopts-list queueopts | queueopts queueopts = [ "bandwidth" bandwidth-spec ] | [ "qlimit" number ] | [ "tbrsize" number ] | [ "priority" number ] | [ schedulers ] schedulers = ( cbq-def | priq-def | hfsc-def ) bandwidth-spec = "number" ( "b" | "Kb" | "Mb" | "Gb" | "%" ) etheraction = "pass" | "block" -action = "pass" | "block" [ return ] | [ "no" ] "scrub" +action = "pass" | "match" | "block" [ return ] | [ "no" ] "scrub" return = "drop" | "return" | "return-rst" [ "( ttl" number ")" ] | "return-icmp" [ "(" icmpcode [ [ "," ] icmp6code ] ")" ] | "return-icmp6" [ "(" icmp6code ")" ] icmpcode = ( icmp-code-name | icmp-code-number ) icmp6code = ( icmp6-code-name | icmp6-code-number ) ifspec = ( [ "!" ] ( interface-name | interface-group ) ) | "{" interface-list "}" interface-list = [ "!" ] ( interface-name | interface-group ) [ [ "," ] interface-list ] route = ( "route-to" | "reply-to" | "dup-to" ) ( routehost | "{" routehost-list "}" ) [ pooltype ] af = "inet" | "inet6" etherprotospec = "proto" ( proto-number | "{" proto-list "}" ) protospec = "proto" ( proto-name | proto-number | "{" proto-list "}" ) proto-list = ( proto-name | proto-number ) [ [ "," ] proto-list ] etherhosts = "from" macaddress "to" macaddress macaddress = mac | mac "/" masklen | mac "&" mask hosts = "all" | "from" ( "any" | "no-route" | "urpf-failed" | "self" | host | "{" host-list "}" ) [ port ] [ os ] "to" ( "any" | "no-route" | "self" | host | "{" host-list "}" ) [ port ] ipspec = "any" | host | "{" host-list "}" host = [ "!" ] ( address [ "/" mask-bits ] | "\*(Lt" string "\*(Gt" ) redirhost = address [ "/" mask-bits ] routehost = "(" interface-name [ address [ "/" mask-bits ] ] ")" address = ( interface-name | interface-group | "(" ( interface-name | interface-group ) ")" | hostname | ipv4-dotted-quad | ipv6-coloned-hex ) host-list = host [ [ "," ] host-list ] redirhost-list = redirhost [ [ "," ] redirhost-list ] routehost-list = routehost [ [ "," ] routehost-list ] port = "port" ( unary-op | binary-op | "{" op-list "}" ) portspec = "port" ( number | name ) [ ":" ( "*" | number | name ) ] os = "os" ( os-name | "{" os-list "}" ) user = "user" ( unary-op | binary-op | "{" op-list "}" ) group = "group" ( unary-op | binary-op | "{" op-list "}" ) unary-op = [ "=" | "!=" | "\*(Lt" | "\*(Le" | "\*(Gt" | "\*(Ge" ] ( name | number ) binary-op = number ( "\*(Lt\*(Gt" | "\*(Gt\*(Lt" | ":" ) number op-list = ( unary-op | binary-op ) [ [ "," ] op-list ] os-name = operating-system-name os-list = os-name [ [ "," ] os-list ] flags = "flags" ( [ flag-set ] "/" flag-set | "any" ) flag-set = [ "F" ] [ "S" ] [ "R" ] [ "P" ] [ "A" ] [ "U" ] [ "E" ] [ "W" ] icmp-type = "icmp-type" ( icmp-type-code | "{" icmp-list "}" ) icmp6-type = "icmp6-type" ( icmp-type-code | "{" icmp-list "}" ) icmp-type-code = ( icmp-type-name | icmp-type-number ) [ "code" ( icmp-code-name | icmp-code-number ) ] icmp-list = icmp-type-code [ [ "," ] icmp-list ] tos = ( "lowdelay" | "throughput" | "reliability" | [ "0x" ] number ) state-opts = state-opt [ [ "," ] state-opts ] state-opt = ( "max" number | "no-sync" | timeout | "sloppy" | "source-track" [ ( "rule" | "global" ) ] | "max-src-nodes" number | "max-src-states" number | "max-src-conn" number | "max-src-conn-rate" number "/" number | "overload" "\*(Lt" string "\*(Gt" [ "flush" ] | "if-bound" | "floating" ) fragmentation = [ "fragment reassemble" ] timeout-list = timeout [ [ "," ] timeout-list ] timeout = ( "tcp.first" | "tcp.opening" | "tcp.established" | "tcp.closing" | "tcp.finwait" | "tcp.closed" | "udp.first" | "udp.single" | "udp.multiple" | "icmp.first" | "icmp.error" | "other.first" | "other.single" | "other.multiple" | "frag" | "interval" | "src.track" | "adaptive.start" | "adaptive.end" ) number limit-list = limit-item [ [ "," ] limit-list ] limit-item = ( "states" | "frags" | "src-nodes" ) number pooltype = ( "bitmask" | "random" | "source-hash" [ ( hex-key | string-key ) ] | "round-robin" ) [ sticky-address ] subqueue = string | "{" queue-list "}" queue-list = string [ [ "," ] string ] cbq-def = "cbq" [ "(" cbq-opt [ [ "," ] cbq-opt ] ")" ] priq-def = "priq" [ "(" priq-opt [ [ "," ] priq-opt ] ")" ] hfsc-def = "hfsc" [ "(" hfsc-opt [ [ "," ] hfsc-opt ] ")" ] cbq-opt = ( "default" | "borrow" | "red" | "ecn" | "rio" ) priq-opt = ( "default" | "red" | "ecn" | "rio" ) hfsc-opt = ( "default" | "red" | "ecn" | "rio" | linkshare-sc | realtime-sc | upperlimit-sc ) linkshare-sc = "linkshare" sc-spec realtime-sc = "realtime" sc-spec upperlimit-sc = "upperlimit" sc-spec sc-spec = ( bandwidth-spec | "(" bandwidth-spec number bandwidth-spec ")" ) include = "include" filename .Ed .Sh FILES .Bl -tag -width "/etc/protocols" -compact .It Pa /etc/hosts Host name database. .It Pa /etc/pf.conf Default location of the ruleset file. The file has to be created manually as it is not installed with a standard installation. .It Pa /etc/pf.os Default location of OS fingerprints. .It Pa /etc/protocols Protocol name database. .It Pa /etc/services Service name database. .El .Sh SEE ALSO .Xr altq 4 , .Xr carp 4 , .Xr icmp 4 , .Xr icmp6 4 , .Xr ip 4 , .Xr ip6 4 , .Xr pf 4 , .Xr pfsync 4 , .Xr tcp 4 , .Xr udp 4 , .Xr hosts 5 , .Xr pf.os 5 , .Xr protocols 5 , .Xr services 5 , .Xr ftp-proxy 8 , .Xr pfctl 8 , .Xr pflogd 8 .Sh HISTORY The .Nm file format first appeared in .Ox 3.0 . diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h index b97524f34bc3..e2cddb49728c 100644 --- a/sys/net/pfvar.h +++ b/sys/net/pfvar.h @@ -1,2376 +1,2398 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $OpenBSD: pfvar.h,v 1.282 2009/01/29 15:12:28 pyr Exp $ * $FreeBSD$ */ #ifndef _NET_PFVAR_H_ #define _NET_PFVAR_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #include #endif #include #include #include #ifdef _KERNEL #if defined(__arm__) #define PF_WANT_32_TO_64_COUNTER #endif /* * A hybrid of 32-bit and 64-bit counters which can be used on platforms where * counter(9) is very expensive. * * As 32-bit counters are expected to overflow, a periodic job sums them up to * a saved 64-bit state. Fetching the value still walks all CPUs to get the most * current snapshot. */ #ifdef PF_WANT_32_TO_64_COUNTER struct pf_counter_u64_pcpu { u_int32_t current; u_int32_t snapshot; }; struct pf_counter_u64 { struct pf_counter_u64_pcpu *pfcu64_pcpu; u_int64_t pfcu64_value; seqc_t pfcu64_seqc; }; static inline int pf_counter_u64_init(struct pf_counter_u64 *pfcu64, int flags) { pfcu64->pfcu64_value = 0; pfcu64->pfcu64_seqc = 0; pfcu64->pfcu64_pcpu = uma_zalloc_pcpu(pcpu_zone_8, flags | M_ZERO); if (__predict_false(pfcu64->pfcu64_pcpu == NULL)) return (ENOMEM); return (0); } static inline void pf_counter_u64_deinit(struct pf_counter_u64 *pfcu64) { uma_zfree_pcpu(pcpu_zone_8, pfcu64->pfcu64_pcpu); } static inline void pf_counter_u64_critical_enter(void) { critical_enter(); } static inline void pf_counter_u64_critical_exit(void) { critical_exit(); } static inline void pf_counter_u64_add_protected(struct pf_counter_u64 *pfcu64, uint32_t n) { struct pf_counter_u64_pcpu *pcpu; u_int32_t val; MPASS(curthread->td_critnest > 0); pcpu = zpcpu_get(pfcu64->pfcu64_pcpu); val = atomic_load_int(&pcpu->current); atomic_store_int(&pcpu->current, val + n); } static inline void pf_counter_u64_add(struct pf_counter_u64 *pfcu64, uint32_t n) { critical_enter(); pf_counter_u64_add_protected(pfcu64, n); critical_exit(); } static inline u_int64_t pf_counter_u64_periodic(struct pf_counter_u64 *pfcu64) { struct pf_counter_u64_pcpu *pcpu; u_int64_t sum; u_int32_t val; int cpu; MPASS(curthread->td_critnest > 0); seqc_write_begin(&pfcu64->pfcu64_seqc); sum = pfcu64->pfcu64_value; CPU_FOREACH(cpu) { pcpu = zpcpu_get_cpu(pfcu64->pfcu64_pcpu, cpu); val = atomic_load_int(&pcpu->current); sum += (uint32_t)(val - pcpu->snapshot); pcpu->snapshot = val; } pfcu64->pfcu64_value = sum; seqc_write_end(&pfcu64->pfcu64_seqc); return (sum); } static inline u_int64_t pf_counter_u64_fetch(const struct pf_counter_u64 *pfcu64) { struct pf_counter_u64_pcpu *pcpu; u_int64_t sum; seqc_t seqc; int cpu; for (;;) { seqc = seqc_read(&pfcu64->pfcu64_seqc); sum = 0; CPU_FOREACH(cpu) { pcpu = zpcpu_get_cpu(pfcu64->pfcu64_pcpu, cpu); sum += (uint32_t)(atomic_load_int(&pcpu->current) -pcpu->snapshot); } sum += pfcu64->pfcu64_value; if (seqc_consistent(&pfcu64->pfcu64_seqc, seqc)) break; } return (sum); } static inline void pf_counter_u64_zero_protected(struct pf_counter_u64 *pfcu64) { struct pf_counter_u64_pcpu *pcpu; int cpu; MPASS(curthread->td_critnest > 0); seqc_write_begin(&pfcu64->pfcu64_seqc); CPU_FOREACH(cpu) { pcpu = zpcpu_get_cpu(pfcu64->pfcu64_pcpu, cpu); pcpu->snapshot = atomic_load_int(&pcpu->current); } pfcu64->pfcu64_value = 0; seqc_write_end(&pfcu64->pfcu64_seqc); } static inline void pf_counter_u64_zero(struct pf_counter_u64 *pfcu64) { critical_enter(); pf_counter_u64_zero_protected(pfcu64); critical_exit(); } #else struct pf_counter_u64 { counter_u64_t counter; }; static inline int pf_counter_u64_init(struct pf_counter_u64 *pfcu64, int flags) { pfcu64->counter = counter_u64_alloc(flags); if (__predict_false(pfcu64->counter == NULL)) return (ENOMEM); return (0); } static inline void pf_counter_u64_deinit(struct pf_counter_u64 *pfcu64) { counter_u64_free(pfcu64->counter); } static inline void pf_counter_u64_critical_enter(void) { } static inline void pf_counter_u64_critical_exit(void) { } static inline void pf_counter_u64_add_protected(struct pf_counter_u64 *pfcu64, uint32_t n) { counter_u64_add(pfcu64->counter, n); } static inline void pf_counter_u64_add(struct pf_counter_u64 *pfcu64, uint32_t n) { pf_counter_u64_add_protected(pfcu64, n); } static inline u_int64_t pf_counter_u64_fetch(const struct pf_counter_u64 *pfcu64) { return (counter_u64_fetch(pfcu64->counter)); } static inline void pf_counter_u64_zero_protected(struct pf_counter_u64 *pfcu64) { counter_u64_zero(pfcu64->counter); } static inline void pf_counter_u64_zero(struct pf_counter_u64 *pfcu64) { pf_counter_u64_zero_protected(pfcu64); } #endif #define pf_get_timestamp(prule)({ \ uint32_t _ts = 0; \ uint32_t __ts; \ int cpu; \ CPU_FOREACH(cpu) { \ __ts = *zpcpu_get_cpu(prule->timestamp, cpu); \ if (__ts > _ts) \ _ts = __ts; \ } \ _ts; \ }) #define pf_update_timestamp(prule) \ do { \ critical_enter(); \ *zpcpu_get((prule)->timestamp) = time_second; \ critical_exit(); \ } while (0) #define pf_timestamp_pcpu_zone (sizeof(time_t) == 4 ? pcpu_zone_4 : pcpu_zone_8) _Static_assert(sizeof(time_t) == 4 || sizeof(time_t) == 8, "unexpected time_t size"); SYSCTL_DECL(_net_pf); MALLOC_DECLARE(M_PFHASH); +MALLOC_DECLARE(M_PF_RULE_ITEM); SDT_PROVIDER_DECLARE(pf); struct pfi_dynaddr { TAILQ_ENTRY(pfi_dynaddr) entry; struct pf_addr pfid_addr4; struct pf_addr pfid_mask4; struct pf_addr pfid_addr6; struct pf_addr pfid_mask6; struct pfr_ktable *pfid_kt; struct pfi_kkif *pfid_kif; int pfid_net; /* mask or 128 */ int pfid_acnt4; /* address count IPv4 */ int pfid_acnt6; /* address count IPv6 */ sa_family_t pfid_af; /* rule af */ u_int8_t pfid_iflags; /* PFI_AFLAG_* */ }; /* * Address manipulation macros */ #define HTONL(x) (x) = htonl((__uint32_t)(x)) #define HTONS(x) (x) = htons((__uint16_t)(x)) #define NTOHL(x) (x) = ntohl((__uint32_t)(x)) #define NTOHS(x) (x) = ntohs((__uint16_t)(x)) #define PF_NAME "pf" #define PF_HASHROW_ASSERT(h) mtx_assert(&(h)->lock, MA_OWNED) #define PF_HASHROW_LOCK(h) mtx_lock(&(h)->lock) #define PF_HASHROW_UNLOCK(h) mtx_unlock(&(h)->lock) #ifdef INVARIANTS #define PF_STATE_LOCK(s) \ do { \ struct pf_kstate *_s = (s); \ struct pf_idhash *_ih = &V_pf_idhash[PF_IDHASH(_s)]; \ MPASS(_s->lock == &_ih->lock); \ mtx_lock(_s->lock); \ } while (0) #define PF_STATE_UNLOCK(s) \ do { \ struct pf_kstate *_s = (s); \ struct pf_idhash *_ih = &V_pf_idhash[PF_IDHASH(_s)]; \ MPASS(_s->lock == &_ih->lock); \ mtx_unlock(_s->lock); \ } while (0) #else #define PF_STATE_LOCK(s) mtx_lock(s->lock) #define PF_STATE_UNLOCK(s) mtx_unlock(s->lock) #endif #ifdef INVARIANTS #define PF_STATE_LOCK_ASSERT(s) \ do { \ struct pf_kstate *_s = (s); \ struct pf_idhash *_ih = &V_pf_idhash[PF_IDHASH(_s)]; \ MPASS(_s->lock == &_ih->lock); \ PF_HASHROW_ASSERT(_ih); \ } while (0) #else /* !INVARIANTS */ #define PF_STATE_LOCK_ASSERT(s) do {} while (0) #endif /* INVARIANTS */ extern struct mtx_padalign pf_unlnkdrules_mtx; #define PF_UNLNKDRULES_LOCK() mtx_lock(&pf_unlnkdrules_mtx) #define PF_UNLNKDRULES_UNLOCK() mtx_unlock(&pf_unlnkdrules_mtx) #define PF_UNLNKDRULES_ASSERT() mtx_assert(&pf_unlnkdrules_mtx, MA_OWNED) extern struct sx pf_config_lock; #define PF_CONFIG_LOCK() sx_xlock(&pf_config_lock) #define PF_CONFIG_UNLOCK() sx_xunlock(&pf_config_lock) #define PF_CONFIG_ASSERT() sx_assert(&pf_config_lock, SA_XLOCKED) extern struct rmlock pf_rules_lock; #define PF_RULES_RLOCK_TRACKER struct rm_priotracker _pf_rules_tracker #define PF_RULES_RLOCK() rm_rlock(&pf_rules_lock, &_pf_rules_tracker) #define PF_RULES_RUNLOCK() rm_runlock(&pf_rules_lock, &_pf_rules_tracker) #define PF_RULES_WLOCK() rm_wlock(&pf_rules_lock) #define PF_RULES_WUNLOCK() rm_wunlock(&pf_rules_lock) #define PF_RULES_WOWNED() rm_wowned(&pf_rules_lock) #define PF_RULES_ASSERT() rm_assert(&pf_rules_lock, RA_LOCKED) #define PF_RULES_RASSERT() rm_assert(&pf_rules_lock, RA_RLOCKED) #define PF_RULES_WASSERT() rm_assert(&pf_rules_lock, RA_WLOCKED) extern struct mtx_padalign pf_table_stats_lock; #define PF_TABLE_STATS_LOCK() mtx_lock(&pf_table_stats_lock) #define PF_TABLE_STATS_UNLOCK() mtx_unlock(&pf_table_stats_lock) #define PF_TABLE_STATS_OWNED() mtx_owned(&pf_table_stats_lock) #define PF_TABLE_STATS_ASSERT() mtx_assert(&pf_rules_lock, MA_OWNED) extern struct sx pf_end_lock; #define PF_MODVER 1 #define PFLOG_MODVER 1 #define PFSYNC_MODVER 1 #define PFLOG_MINVER 1 #define PFLOG_PREFVER PFLOG_MODVER #define PFLOG_MAXVER 1 #define PFSYNC_MINVER 1 #define PFSYNC_PREFVER PFSYNC_MODVER #define PFSYNC_MAXVER 1 #ifdef INET #ifndef INET6 #define PF_INET_ONLY #endif /* ! INET6 */ #endif /* INET */ #ifdef INET6 #ifndef INET #define PF_INET6_ONLY #endif /* ! INET */ #endif /* INET6 */ #ifdef INET #ifdef INET6 #define PF_INET_INET6 #endif /* INET6 */ #endif /* INET */ #else #define PF_INET_INET6 #endif /* _KERNEL */ /* Both IPv4 and IPv6 */ #ifdef PF_INET_INET6 #define PF_AEQ(a, b, c) \ ((c == AF_INET && (a)->addr32[0] == (b)->addr32[0]) || \ (c == AF_INET6 && (a)->addr32[3] == (b)->addr32[3] && \ (a)->addr32[2] == (b)->addr32[2] && \ (a)->addr32[1] == (b)->addr32[1] && \ (a)->addr32[0] == (b)->addr32[0])) \ #define PF_ANEQ(a, b, c) \ ((c == AF_INET && (a)->addr32[0] != (b)->addr32[0]) || \ (c == AF_INET6 && ((a)->addr32[0] != (b)->addr32[0] || \ (a)->addr32[1] != (b)->addr32[1] || \ (a)->addr32[2] != (b)->addr32[2] || \ (a)->addr32[3] != (b)->addr32[3]))) \ #define PF_AZERO(a, c) \ ((c == AF_INET && !(a)->addr32[0]) || \ (c == AF_INET6 && !(a)->addr32[0] && !(a)->addr32[1] && \ !(a)->addr32[2] && !(a)->addr32[3] )) \ #define PF_MATCHA(n, a, m, b, f) \ pf_match_addr(n, a, m, b, f) #define PF_ACPY(a, b, f) \ pf_addrcpy(a, b, f) #define PF_AINC(a, f) \ pf_addr_inc(a, f) #define PF_POOLMASK(a, b, c, d, f) \ pf_poolmask(a, b, c, d, f) #else /* Just IPv6 */ #ifdef PF_INET6_ONLY #define PF_AEQ(a, b, c) \ ((a)->addr32[3] == (b)->addr32[3] && \ (a)->addr32[2] == (b)->addr32[2] && \ (a)->addr32[1] == (b)->addr32[1] && \ (a)->addr32[0] == (b)->addr32[0]) \ #define PF_ANEQ(a, b, c) \ ((a)->addr32[3] != (b)->addr32[3] || \ (a)->addr32[2] != (b)->addr32[2] || \ (a)->addr32[1] != (b)->addr32[1] || \ (a)->addr32[0] != (b)->addr32[0]) \ #define PF_AZERO(a, c) \ (!(a)->addr32[0] && \ !(a)->addr32[1] && \ !(a)->addr32[2] && \ !(a)->addr32[3] ) \ #define PF_MATCHA(n, a, m, b, f) \ pf_match_addr(n, a, m, b, f) #define PF_ACPY(a, b, f) \ pf_addrcpy(a, b, f) #define PF_AINC(a, f) \ pf_addr_inc(a, f) #define PF_POOLMASK(a, b, c, d, f) \ pf_poolmask(a, b, c, d, f) #else /* Just IPv4 */ #ifdef PF_INET_ONLY #define PF_AEQ(a, b, c) \ ((a)->addr32[0] == (b)->addr32[0]) #define PF_ANEQ(a, b, c) \ ((a)->addr32[0] != (b)->addr32[0]) #define PF_AZERO(a, c) \ (!(a)->addr32[0]) #define PF_MATCHA(n, a, m, b, f) \ pf_match_addr(n, a, m, b, f) #define PF_ACPY(a, b, f) \ (a)->v4.s_addr = (b)->v4.s_addr #define PF_AINC(a, f) \ do { \ (a)->addr32[0] = htonl(ntohl((a)->addr32[0]) + 1); \ } while (0) #define PF_POOLMASK(a, b, c, d, f) \ do { \ (a)->addr32[0] = ((b)->addr32[0] & (c)->addr32[0]) | \ (((c)->addr32[0] ^ 0xffffffff ) & (d)->addr32[0]); \ } while (0) #endif /* PF_INET_ONLY */ #endif /* PF_INET6_ONLY */ #endif /* PF_INET_INET6 */ /* * XXX callers not FIB-aware in our version of pf yet. * OpenBSD fixed it later it seems, 2010/05/07 13:33:16 claudio. */ #define PF_MISMATCHAW(aw, x, af, neg, ifp, rtid) \ ( \ (((aw)->type == PF_ADDR_NOROUTE && \ pf_routable((x), (af), NULL, (rtid))) || \ (((aw)->type == PF_ADDR_URPFFAILED && (ifp) != NULL && \ pf_routable((x), (af), (ifp), (rtid))) || \ ((aw)->type == PF_ADDR_TABLE && \ !pfr_match_addr((aw)->p.tbl, (x), (af))) || \ ((aw)->type == PF_ADDR_DYNIFTL && \ !pfi_match_addr((aw)->p.dyn, (x), (af))) || \ ((aw)->type == PF_ADDR_RANGE && \ !pf_match_addr_range(&(aw)->v.a.addr, \ &(aw)->v.a.mask, (x), (af))) || \ ((aw)->type == PF_ADDR_ADDRMASK && \ !PF_AZERO(&(aw)->v.a.mask, (af)) && \ !PF_MATCHA(0, &(aw)->v.a.addr, \ &(aw)->v.a.mask, (x), (af))))) != \ (neg) \ ) #define PF_ALGNMNT(off) (((off) % 2) == 0) #ifdef _KERNEL struct pf_kpooladdr { struct pf_addr_wrap addr; TAILQ_ENTRY(pf_kpooladdr) entries; char ifname[IFNAMSIZ]; struct pfi_kkif *kif; }; TAILQ_HEAD(pf_kpalist, pf_kpooladdr); struct pf_kpool { struct mtx mtx; struct pf_kpalist list; struct pf_kpooladdr *cur; struct pf_poolhashkey key; struct pf_addr counter; struct pf_mape_portset mape; int tblidx; u_int16_t proxy_port[2]; u_int8_t opts; }; struct pf_rule_actions { + int rtableid; uint16_t qid; uint16_t pqid; + uint16_t max_mss; + uint8_t log; + uint8_t set_tos; + uint8_t min_ttl; uint16_t dnpipe; uint16_t dnrpipe; /* Reverse direction pipe */ uint32_t flags; }; union pf_keth_rule_ptr { struct pf_keth_rule *ptr; uint32_t nr; }; struct pf_keth_rule_addr { uint8_t addr[ETHER_ADDR_LEN]; uint8_t mask[ETHER_ADDR_LEN]; bool neg; uint8_t isset; }; struct pf_keth_anchor; TAILQ_HEAD(pf_keth_ruleq, pf_keth_rule); struct pf_keth_ruleset { struct pf_keth_ruleq rules[2]; struct pf_keth_rules { struct pf_keth_ruleq *rules; int open; uint32_t ticket; } active, inactive; struct epoch_context epoch_ctx; struct vnet *vnet; struct pf_keth_anchor *anchor; }; RB_HEAD(pf_keth_anchor_global, pf_keth_anchor); RB_HEAD(pf_keth_anchor_node, pf_keth_anchor); struct pf_keth_anchor { RB_ENTRY(pf_keth_anchor) entry_node; RB_ENTRY(pf_keth_anchor) entry_global; struct pf_keth_anchor *parent; struct pf_keth_anchor_node children; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; struct pf_keth_ruleset ruleset; int refcnt; /* anchor rules */ uint8_t anchor_relative; uint8_t anchor_wildcard; }; RB_PROTOTYPE(pf_keth_anchor_node, pf_keth_anchor, entry_node, pf_keth_anchor_compare); RB_PROTOTYPE(pf_keth_anchor_global, pf_keth_anchor, entry_global, pf_keth_anchor_compare); struct pf_keth_rule { #define PFE_SKIP_IFP 0 #define PFE_SKIP_DIR 1 #define PFE_SKIP_PROTO 2 #define PFE_SKIP_SRC_ADDR 3 #define PFE_SKIP_DST_ADDR 4 #define PFE_SKIP_COUNT 5 union pf_keth_rule_ptr skip[PFE_SKIP_COUNT]; TAILQ_ENTRY(pf_keth_rule) entries; struct pf_keth_anchor *anchor; u_int8_t anchor_relative; u_int8_t anchor_wildcard; uint32_t nr; bool quick; /* Filter */ char ifname[IFNAMSIZ]; struct pfi_kkif *kif; bool ifnot; uint8_t direction; uint16_t proto; struct pf_keth_rule_addr src, dst; struct pf_rule_addr ipsrc, ipdst; char match_tagname[PF_TAG_NAME_SIZE]; uint16_t match_tag; bool match_tag_not; /* Stats */ counter_u64_t evaluations; counter_u64_t packets[2]; counter_u64_t bytes[2]; time_t *timestamp; /* Action */ char qname[PF_QNAME_SIZE]; int qid; char tagname[PF_TAG_NAME_SIZE]; uint16_t tag; char bridge_to_name[IFNAMSIZ]; struct pfi_kkif *bridge_to; uint8_t action; uint16_t dnpipe; uint32_t dnflags; }; union pf_krule_ptr { struct pf_krule *ptr; u_int32_t nr; }; RB_HEAD(pf_krule_global, pf_krule); RB_PROTOTYPE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); struct pf_krule { struct pf_rule_addr src; struct pf_rule_addr dst; union pf_krule_ptr skip[PF_SKIP_COUNT]; char label[PF_RULE_MAX_LABEL_COUNT][PF_RULE_LABEL_SIZE]; uint32_t ridentifier; char ifname[IFNAMSIZ]; char qname[PF_QNAME_SIZE]; char pqname[PF_QNAME_SIZE]; char tagname[PF_TAG_NAME_SIZE]; char match_tagname[PF_TAG_NAME_SIZE]; char overload_tblname[PF_TABLE_NAME_SIZE]; TAILQ_ENTRY(pf_krule) entries; struct pf_kpool rpool; struct pf_counter_u64 evaluations; struct pf_counter_u64 packets[2]; struct pf_counter_u64 bytes[2]; time_t *timestamp; struct pfi_kkif *kif; struct pf_kanchor *anchor; struct pfr_ktable *overload_tbl; pf_osfp_t os_fingerprint; int rtableid; u_int32_t timeout[PFTM_MAX]; u_int32_t max_states; u_int32_t max_src_nodes; u_int32_t max_src_states; u_int32_t max_src_conn; struct { u_int32_t limit; u_int32_t seconds; } max_src_conn_rate; u_int16_t qid; u_int16_t pqid; u_int16_t dnpipe; u_int16_t dnrpipe; u_int32_t free_flags; u_int32_t nr; u_int32_t prob; uid_t cuid; pid_t cpid; counter_u64_t states_cur; counter_u64_t states_tot; counter_u64_t src_nodes; u_int16_t return_icmp; u_int16_t return_icmp6; u_int16_t max_mss; u_int16_t tag; u_int16_t match_tag; u_int16_t scrub_flags; struct pf_rule_uid uid; struct pf_rule_gid gid; u_int32_t rule_flag; uint32_t rule_ref; u_int8_t action; u_int8_t direction; u_int8_t log; u_int8_t logif; u_int8_t quick; u_int8_t ifnot; u_int8_t match_tag_not; u_int8_t natpass; u_int8_t keep_state; sa_family_t af; u_int8_t proto; u_int8_t type; u_int8_t code; u_int8_t flags; u_int8_t flagset; u_int8_t min_ttl; u_int8_t allow_opts; u_int8_t rt; u_int8_t return_ttl; u_int8_t tos; u_int8_t set_tos; u_int8_t anchor_relative; u_int8_t anchor_wildcard; u_int8_t flush; u_int8_t prio; u_int8_t set_prio[2]; struct { struct pf_addr addr; u_int16_t port; } divert; u_int8_t md5sum[PF_MD5_DIGEST_LENGTH]; RB_ENTRY(pf_krule) entry_global; #ifdef PF_WANT_32_TO_64_COUNTER LIST_ENTRY(pf_krule) allrulelist; bool allrulelinked; #endif }; +struct pf_krule_item { + SLIST_ENTRY(pf_krule_item) entry; + struct pf_krule *r; +}; + +SLIST_HEAD(pf_krule_slist, pf_krule_item); + struct pf_ksrc_node { LIST_ENTRY(pf_ksrc_node) entry; struct pf_addr addr; struct pf_addr raddr; + struct pf_krule_slist match_rules; union pf_krule_ptr rule; struct pfi_kkif *kif; counter_u64_t bytes[2]; counter_u64_t packets[2]; u_int32_t states; u_int32_t conn; struct pf_threshold conn_rate; u_int32_t creation; u_int32_t expire; sa_family_t af; u_int8_t ruletype; }; #endif struct pf_state_scrub { struct timeval pfss_last; /* time received last packet */ u_int32_t pfss_tsecr; /* last echoed timestamp */ u_int32_t pfss_tsval; /* largest timestamp */ u_int32_t pfss_tsval0; /* original timestamp */ u_int16_t pfss_flags; #define PFSS_TIMESTAMP 0x0001 /* modulate timestamp */ #define PFSS_PAWS 0x0010 /* stricter PAWS checks */ #define PFSS_PAWS_IDLED 0x0020 /* was idle too long. no PAWS */ #define PFSS_DATA_TS 0x0040 /* timestamp on data packets */ #define PFSS_DATA_NOTS 0x0080 /* no timestamp on data packets */ u_int8_t pfss_ttl; /* stashed TTL */ u_int8_t pad; u_int32_t pfss_ts_mod; /* timestamp modulation */ }; struct pf_state_host { struct pf_addr addr; u_int16_t port; u_int16_t pad; }; struct pf_state_peer { struct pf_state_scrub *scrub; /* state is scrubbed */ u_int32_t seqlo; /* Max sequence number sent */ u_int32_t seqhi; /* Max the other end ACKd + win */ u_int32_t seqdiff; /* Sequence number modulator */ u_int16_t max_win; /* largest window (pre scaling) */ u_int16_t mss; /* Maximum segment size option */ u_int8_t state; /* active state level */ u_int8_t wscale; /* window scaling factor */ u_int8_t tcp_est; /* Did we reach TCPS_ESTABLISHED */ u_int8_t pad[1]; }; /* Keep synced with struct pf_state_key. */ struct pf_state_key_cmp { struct pf_addr addr[2]; u_int16_t port[2]; sa_family_t af; u_int8_t proto; u_int8_t pad[2]; }; struct pf_state_key { struct pf_addr addr[2]; u_int16_t port[2]; sa_family_t af; u_int8_t proto; u_int8_t pad[2]; LIST_ENTRY(pf_state_key) entry; TAILQ_HEAD(, pf_kstate) states[2]; }; /* Keep synced with struct pf_kstate. */ struct pf_state_cmp { u_int64_t id; u_int32_t creatorid; u_int8_t direction; u_int8_t pad[3]; }; -#define PFSTATE_ALLOWOPTS 0x01 -#define PFSTATE_SLOPPY 0x02 -/* was PFSTATE_PFLOW 0x04 */ -#define PFSTATE_NOSYNC 0x08 -#define PFSTATE_ACK 0x10 -#define PFRULE_DN_IS_PIPE 0x40 -#define PFRULE_DN_IS_QUEUE 0x80 -#define PFSTATE_SETPRIO 0x0200 -#define PFSTATE_SETMASK (PFSTATE_SETPRIO) - struct pf_state_scrub_export { uint16_t pfss_flags; uint8_t pfss_ttl; /* stashed TTL */ #define PF_SCRUB_FLAG_VALID 0x01 uint8_t scrub_flag; uint32_t pfss_ts_mod; /* timestamp modulation */ }; struct pf_state_key_export { struct pf_addr addr[2]; uint16_t port[2]; }; struct pf_state_peer_export { struct pf_state_scrub_export scrub; /* state is scrubbed */ uint32_t seqlo; /* Max sequence number sent */ uint32_t seqhi; /* Max the other end ACKd + win */ uint32_t seqdiff; /* Sequence number modulator */ uint16_t max_win; /* largest window (pre scaling) */ uint16_t mss; /* Maximum segment size option */ uint8_t state; /* active state level */ uint8_t wscale; /* window scaling factor */ uint8_t dummy[6]; }; _Static_assert(sizeof(struct pf_state_peer_export) == 32, "size incorrect"); struct pf_state_export { uint64_t version; #define PF_STATE_VERSION 20210706 uint64_t id; char ifname[IFNAMSIZ]; char orig_ifname[IFNAMSIZ]; struct pf_state_key_export key[2]; struct pf_state_peer_export src; struct pf_state_peer_export dst; struct pf_addr rt_addr; uint32_t rule; uint32_t anchor; uint32_t nat_rule; uint32_t creation; uint32_t expire; uint32_t spare0; uint64_t packets[2]; uint64_t bytes[2]; uint32_t creatorid; uint32_t spare1; sa_family_t af; uint8_t proto; uint8_t direction; uint8_t log; - uint8_t state_flags; + uint8_t state_flags_compat; uint8_t timeout; uint8_t sync_flags; uint8_t updates; + uint16_t state_flags; - uint8_t spare[112]; + uint8_t spare[110]; }; _Static_assert(sizeof(struct pf_state_export) == 384, "size incorrect"); #ifdef _KERNEL struct pf_kstate { /* * Area shared with pf_state_cmp */ u_int64_t id; u_int32_t creatorid; u_int8_t direction; u_int8_t pad[3]; /* * end of the area */ - u_int8_t state_flags; + u_int16_t state_flags; u_int8_t timeout; u_int8_t sync_state; /* PFSYNC_S_x */ u_int8_t sync_updates; /* XXX */ u_int refs; struct mtx *lock; TAILQ_ENTRY(pf_kstate) sync_list; TAILQ_ENTRY(pf_kstate) key_list[2]; LIST_ENTRY(pf_kstate) entry; struct pf_state_peer src; struct pf_state_peer dst; + struct pf_krule_slist match_rules; union pf_krule_ptr rule; union pf_krule_ptr anchor; union pf_krule_ptr nat_rule; struct pf_addr rt_addr; struct pf_state_key *key[2]; /* addresses stack and wire */ struct pfi_kkif *kif; struct pfi_kkif *orig_kif; /* The real kif, even if we're a floating state (i.e. if == V_pfi_all). */ struct pfi_kkif *rt_kif; struct pf_ksrc_node *src_node; struct pf_ksrc_node *nat_src_node; u_int64_t packets[2]; u_int64_t bytes[2]; u_int32_t creation; u_int32_t expire; u_int32_t pfsync_time; - u_int16_t qid; - u_int16_t pqid; + u_int16_t qid; + u_int16_t pqid; u_int16_t dnpipe; u_int16_t dnrpipe; u_int16_t tag; u_int8_t log; + int rtableid; + u_int8_t min_ttl; + u_int8_t set_tos; + u_int16_t max_mss; }; /* - * Size <= fits 13 objects per page on LP64. Try to not grow the struct beyond that. + * Size <= fits 12 objects per page on LP64. Try to not grow the struct beyond that. */ -_Static_assert(sizeof(struct pf_kstate) <= 312, "pf_kstate size crosses 312 bytes"); +_Static_assert(sizeof(struct pf_kstate) <= 336, "pf_kstate size crosses 336 bytes"); #endif /* * Unified state structures for pulling states out of the kernel * used by pfsync(4) and the pf(4) ioctl. */ struct pfsync_state_scrub { u_int16_t pfss_flags; u_int8_t pfss_ttl; /* stashed TTL */ #define PFSYNC_SCRUB_FLAG_VALID 0x01 u_int8_t scrub_flag; u_int32_t pfss_ts_mod; /* timestamp modulation */ } __packed; struct pfsync_state_peer { struct pfsync_state_scrub scrub; /* state is scrubbed */ u_int32_t seqlo; /* Max sequence number sent */ u_int32_t seqhi; /* Max the other end ACKd + win */ u_int32_t seqdiff; /* Sequence number modulator */ u_int16_t max_win; /* largest window (pre scaling) */ u_int16_t mss; /* Maximum segment size option */ u_int8_t state; /* active state level */ u_int8_t wscale; /* window scaling factor */ u_int8_t pad[6]; } __packed; struct pfsync_state_key { struct pf_addr addr[2]; u_int16_t port[2]; }; struct pfsync_state { u_int64_t id; char ifname[IFNAMSIZ]; struct pfsync_state_key key[2]; struct pfsync_state_peer src; struct pfsync_state_peer dst; struct pf_addr rt_addr; u_int32_t rule; u_int32_t anchor; u_int32_t nat_rule; u_int32_t creation; u_int32_t expire; u_int32_t packets[2][2]; u_int32_t bytes[2][2]; u_int32_t creatorid; sa_family_t af; u_int8_t proto; u_int8_t direction; - u_int8_t __spare[2]; + u_int16_t state_flags; u_int8_t log; - u_int8_t state_flags; + u_int8_t state_flags_compat; u_int8_t timeout; u_int8_t sync_flags; u_int8_t updates; } __packed; #ifdef _KERNEL /* pfsync */ typedef int pfsync_state_import_t(struct pfsync_state *, int); typedef void pfsync_insert_state_t(struct pf_kstate *); typedef void pfsync_update_state_t(struct pf_kstate *); typedef void pfsync_delete_state_t(struct pf_kstate *); typedef void pfsync_clear_states_t(u_int32_t, const char *); typedef int pfsync_defer_t(struct pf_kstate *, struct mbuf *); typedef void pfsync_detach_ifnet_t(struct ifnet *); VNET_DECLARE(pfsync_state_import_t *, pfsync_state_import_ptr); #define V_pfsync_state_import_ptr VNET(pfsync_state_import_ptr) VNET_DECLARE(pfsync_insert_state_t *, pfsync_insert_state_ptr); #define V_pfsync_insert_state_ptr VNET(pfsync_insert_state_ptr) VNET_DECLARE(pfsync_update_state_t *, pfsync_update_state_ptr); #define V_pfsync_update_state_ptr VNET(pfsync_update_state_ptr) VNET_DECLARE(pfsync_delete_state_t *, pfsync_delete_state_ptr); #define V_pfsync_delete_state_ptr VNET(pfsync_delete_state_ptr) VNET_DECLARE(pfsync_clear_states_t *, pfsync_clear_states_ptr); #define V_pfsync_clear_states_ptr VNET(pfsync_clear_states_ptr) VNET_DECLARE(pfsync_defer_t *, pfsync_defer_ptr); #define V_pfsync_defer_ptr VNET(pfsync_defer_ptr) extern pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; void pfsync_state_export(struct pfsync_state *, struct pf_kstate *); void pf_state_export(struct pf_state_export *, struct pf_kstate *); /* pflog */ struct pf_kruleset; struct pf_pdesc; typedef int pflog_packet_t(struct pfi_kkif *, struct mbuf *, sa_family_t, u_int8_t, u_int8_t, struct pf_krule *, struct pf_krule *, struct pf_kruleset *, struct pf_pdesc *, int); extern pflog_packet_t *pflog_packet_ptr; #endif /* _KERNEL */ #define PFSYNC_FLAG_SRCNODE 0x04 #define PFSYNC_FLAG_NATSRCNODE 0x08 /* for copies to/from network byte order */ /* ioctl interface also uses network byte order */ #define pf_state_peer_hton(s,d) do { \ (d)->seqlo = htonl((s)->seqlo); \ (d)->seqhi = htonl((s)->seqhi); \ (d)->seqdiff = htonl((s)->seqdiff); \ (d)->max_win = htons((s)->max_win); \ (d)->mss = htons((s)->mss); \ (d)->state = (s)->state; \ (d)->wscale = (s)->wscale; \ if ((s)->scrub) { \ (d)->scrub.pfss_flags = \ htons((s)->scrub->pfss_flags & PFSS_TIMESTAMP); \ (d)->scrub.pfss_ttl = (s)->scrub->pfss_ttl; \ (d)->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod);\ (d)->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID; \ } \ } while (0) #define pf_state_peer_ntoh(s,d) do { \ (d)->seqlo = ntohl((s)->seqlo); \ (d)->seqhi = ntohl((s)->seqhi); \ (d)->seqdiff = ntohl((s)->seqdiff); \ (d)->max_win = ntohs((s)->max_win); \ (d)->mss = ntohs((s)->mss); \ (d)->state = (s)->state; \ (d)->wscale = (s)->wscale; \ if ((s)->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID && \ (d)->scrub != NULL) { \ (d)->scrub->pfss_flags = \ ntohs((s)->scrub.pfss_flags) & PFSS_TIMESTAMP; \ (d)->scrub->pfss_ttl = (s)->scrub.pfss_ttl; \ (d)->scrub->pfss_ts_mod = ntohl((s)->scrub.pfss_ts_mod);\ } \ } while (0) #define pf_state_counter_hton(s,d) do { \ d[0] = htonl((s>>32)&0xffffffff); \ d[1] = htonl(s&0xffffffff); \ } while (0) #define pf_state_counter_from_pfsync(s) \ (((u_int64_t)(s[0])<<32) | (u_int64_t)(s[1])) #define pf_state_counter_ntoh(s,d) do { \ d = ntohl(s[0]); \ d = d<<32; \ d += ntohl(s[1]); \ } while (0) TAILQ_HEAD(pf_krulequeue, pf_krule); struct pf_kanchor; struct pf_kruleset { struct { struct pf_krulequeue queues[2]; struct { struct pf_krulequeue *ptr; struct pf_krule **ptr_array; u_int32_t rcount; u_int32_t ticket; int open; struct pf_krule_global *tree; } active, inactive; } rules[PF_RULESET_MAX]; struct pf_kanchor *anchor; u_int32_t tticket; int tables; int topen; }; RB_HEAD(pf_kanchor_global, pf_kanchor); RB_HEAD(pf_kanchor_node, pf_kanchor); struct pf_kanchor { RB_ENTRY(pf_kanchor) entry_global; RB_ENTRY(pf_kanchor) entry_node; struct pf_kanchor *parent; struct pf_kanchor_node children; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; struct pf_kruleset ruleset; int refcnt; /* anchor rules */ }; RB_PROTOTYPE(pf_kanchor_global, pf_kanchor, entry_global, pf_anchor_compare); RB_PROTOTYPE(pf_kanchor_node, pf_kanchor, entry_node, pf_kanchor_compare); #define PF_RESERVED_ANCHOR "_pf" #define PFR_TFLAG_PERSIST 0x00000001 #define PFR_TFLAG_CONST 0x00000002 #define PFR_TFLAG_ACTIVE 0x00000004 #define PFR_TFLAG_INACTIVE 0x00000008 #define PFR_TFLAG_REFERENCED 0x00000010 #define PFR_TFLAG_REFDANCHOR 0x00000020 #define PFR_TFLAG_COUNTERS 0x00000040 /* Adjust masks below when adding flags. */ #define PFR_TFLAG_USRMASK (PFR_TFLAG_PERSIST | \ PFR_TFLAG_CONST | \ PFR_TFLAG_COUNTERS) #define PFR_TFLAG_SETMASK (PFR_TFLAG_ACTIVE | \ PFR_TFLAG_INACTIVE | \ PFR_TFLAG_REFERENCED | \ PFR_TFLAG_REFDANCHOR) #define PFR_TFLAG_ALLMASK (PFR_TFLAG_PERSIST | \ PFR_TFLAG_CONST | \ PFR_TFLAG_ACTIVE | \ PFR_TFLAG_INACTIVE | \ PFR_TFLAG_REFERENCED | \ PFR_TFLAG_REFDANCHOR | \ PFR_TFLAG_COUNTERS) struct pf_kanchor_stackframe; struct pf_keth_anchor_stackframe; struct pfr_table { char pfrt_anchor[MAXPATHLEN]; char pfrt_name[PF_TABLE_NAME_SIZE]; u_int32_t pfrt_flags; u_int8_t pfrt_fback; }; enum { PFR_FB_NONE, PFR_FB_MATCH, PFR_FB_ADDED, PFR_FB_DELETED, PFR_FB_CHANGED, PFR_FB_CLEARED, PFR_FB_DUPLICATE, PFR_FB_NOTMATCH, PFR_FB_CONFLICT, PFR_FB_NOCOUNT, PFR_FB_MAX }; struct pfr_addr { union { struct in_addr _pfra_ip4addr; struct in6_addr _pfra_ip6addr; } pfra_u; u_int8_t pfra_af; u_int8_t pfra_net; u_int8_t pfra_not; u_int8_t pfra_fback; }; #define pfra_ip4addr pfra_u._pfra_ip4addr #define pfra_ip6addr pfra_u._pfra_ip6addr enum { PFR_DIR_IN, PFR_DIR_OUT, PFR_DIR_MAX }; enum { PFR_OP_BLOCK, PFR_OP_PASS, PFR_OP_ADDR_MAX, PFR_OP_TABLE_MAX }; enum { PFR_TYPE_PACKETS, PFR_TYPE_BYTES, PFR_TYPE_MAX }; #define PFR_NUM_COUNTERS (PFR_DIR_MAX * PFR_OP_ADDR_MAX * PFR_TYPE_MAX) #define PFR_OP_XPASS PFR_OP_ADDR_MAX struct pfr_astats { struct pfr_addr pfras_a; u_int64_t pfras_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; u_int64_t pfras_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; long pfras_tzero; }; enum { PFR_REFCNT_RULE, PFR_REFCNT_ANCHOR, PFR_REFCNT_MAX }; struct pfr_tstats { struct pfr_table pfrts_t; u_int64_t pfrts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; u_int64_t pfrts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; u_int64_t pfrts_match; u_int64_t pfrts_nomatch; long pfrts_tzero; int pfrts_cnt; int pfrts_refcnt[PFR_REFCNT_MAX]; }; #ifdef _KERNEL struct pfr_kstate_counter { counter_u64_t pkc_pcpu; u_int64_t pkc_zero; }; static inline int pfr_kstate_counter_init(struct pfr_kstate_counter *pfrc, int flags) { pfrc->pkc_zero = 0; pfrc->pkc_pcpu = counter_u64_alloc(flags); if (pfrc->pkc_pcpu == NULL) return (ENOMEM); return (0); } static inline void pfr_kstate_counter_deinit(struct pfr_kstate_counter *pfrc) { counter_u64_free(pfrc->pkc_pcpu); } static inline u_int64_t pfr_kstate_counter_fetch(struct pfr_kstate_counter *pfrc) { u_int64_t c; c = counter_u64_fetch(pfrc->pkc_pcpu); c -= pfrc->pkc_zero; return (c); } static inline void pfr_kstate_counter_zero(struct pfr_kstate_counter *pfrc) { u_int64_t c; c = counter_u64_fetch(pfrc->pkc_pcpu); pfrc->pkc_zero = c; } static inline void pfr_kstate_counter_add(struct pfr_kstate_counter *pfrc, int64_t n) { counter_u64_add(pfrc->pkc_pcpu, n); } struct pfr_ktstats { struct pfr_table pfrts_t; struct pfr_kstate_counter pfrkts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; struct pfr_kstate_counter pfrkts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; struct pfr_kstate_counter pfrkts_match; struct pfr_kstate_counter pfrkts_nomatch; long pfrkts_tzero; int pfrkts_cnt; int pfrkts_refcnt[PFR_REFCNT_MAX]; }; #endif /* _KERNEL */ #define pfrts_name pfrts_t.pfrt_name #define pfrts_flags pfrts_t.pfrt_flags #ifndef _SOCKADDR_UNION_DEFINED #define _SOCKADDR_UNION_DEFINED union sockaddr_union { struct sockaddr sa; struct sockaddr_in sin; struct sockaddr_in6 sin6; }; #endif /* _SOCKADDR_UNION_DEFINED */ struct pfr_kcounters { counter_u64_t pfrkc_counters; long pfrkc_tzero; }; #define pfr_kentry_counter(kc, dir, op, t) \ ((kc)->pfrkc_counters + \ (dir) * PFR_OP_ADDR_MAX * PFR_TYPE_MAX + (op) * PFR_TYPE_MAX + (t)) #ifdef _KERNEL SLIST_HEAD(pfr_kentryworkq, pfr_kentry); struct pfr_kentry { struct radix_node pfrke_node[2]; union sockaddr_union pfrke_sa; SLIST_ENTRY(pfr_kentry) pfrke_workq; struct pfr_kcounters pfrke_counters; u_int8_t pfrke_af; u_int8_t pfrke_net; u_int8_t pfrke_not; u_int8_t pfrke_mark; }; SLIST_HEAD(pfr_ktableworkq, pfr_ktable); RB_HEAD(pfr_ktablehead, pfr_ktable); struct pfr_ktable { struct pfr_ktstats pfrkt_kts; RB_ENTRY(pfr_ktable) pfrkt_tree; SLIST_ENTRY(pfr_ktable) pfrkt_workq; struct radix_node_head *pfrkt_ip4; struct radix_node_head *pfrkt_ip6; struct pfr_ktable *pfrkt_shadow; struct pfr_ktable *pfrkt_root; struct pf_kruleset *pfrkt_rs; long pfrkt_larg; int pfrkt_nflags; }; #define pfrkt_t pfrkt_kts.pfrts_t #define pfrkt_name pfrkt_t.pfrt_name #define pfrkt_anchor pfrkt_t.pfrt_anchor #define pfrkt_ruleset pfrkt_t.pfrt_ruleset #define pfrkt_flags pfrkt_t.pfrt_flags #define pfrkt_cnt pfrkt_kts.pfrkts_cnt #define pfrkt_refcnt pfrkt_kts.pfrkts_refcnt #define pfrkt_packets pfrkt_kts.pfrkts_packets #define pfrkt_bytes pfrkt_kts.pfrkts_bytes #define pfrkt_match pfrkt_kts.pfrkts_match #define pfrkt_nomatch pfrkt_kts.pfrkts_nomatch #define pfrkt_tzero pfrkt_kts.pfrkts_tzero #endif #ifdef _KERNEL struct pfi_kkif { char pfik_name[IFNAMSIZ]; union { RB_ENTRY(pfi_kkif) _pfik_tree; LIST_ENTRY(pfi_kkif) _pfik_list; } _pfik_glue; #define pfik_tree _pfik_glue._pfik_tree #define pfik_list _pfik_glue._pfik_list struct pf_counter_u64 pfik_packets[2][2][2]; struct pf_counter_u64 pfik_bytes[2][2][2]; u_int32_t pfik_tzero; u_int pfik_flags; struct ifnet *pfik_ifp; struct ifg_group *pfik_group; u_int pfik_rulerefs; TAILQ_HEAD(, pfi_dynaddr) pfik_dynaddrs; #ifdef PF_WANT_32_TO_64_COUNTER LIST_ENTRY(pfi_kkif) pfik_allkiflist; #endif }; #endif #define PFI_IFLAG_REFS 0x0001 /* has state references */ #define PFI_IFLAG_SKIP 0x0100 /* skip filtering on interface */ #ifdef _KERNEL struct pf_pdesc { struct { int done; uid_t uid; gid_t gid; } lookup; u_int64_t tot_len; /* Make Mickey money */ union pf_headers { struct tcphdr tcp; struct udphdr udp; struct icmp icmp; #ifdef INET6 struct icmp6_hdr icmp6; #endif /* INET6 */ char any[0]; } hdr; struct pf_krule *nat_rule; /* nat/rdr rule applied to packet */ struct pf_addr *src; /* src address */ struct pf_addr *dst; /* dst address */ u_int16_t *sport; u_int16_t *dport; struct pf_mtag *pf_mtag; struct pf_rule_actions act; u_int32_t p_len; /* total length of payload */ u_int16_t *ip_sum; u_int16_t *proto_sum; u_int16_t flags; /* Let SCRUB trigger behavior in * state code. Easier than tags */ #define PFDESC_TCP_NORM 0x0001 /* TCP shall be statefully scrubbed */ #define PFDESC_IP_REAS 0x0002 /* IP frags would've been reassembled */ sa_family_t af; u_int8_t proto; u_int8_t tos; u_int8_t dir; /* direction */ u_int8_t sidx; /* key index for source */ u_int8_t didx; /* key index for destination */ }; #endif /* flags for RDR options */ #define PF_DPORT_RANGE 0x01 /* Dest port uses range */ #define PF_RPORT_RANGE 0x02 /* RDR'ed port uses range */ /* UDP state enumeration */ #define PFUDPS_NO_TRAFFIC 0 #define PFUDPS_SINGLE 1 #define PFUDPS_MULTIPLE 2 #define PFUDPS_NSTATES 3 /* number of state levels */ #define PFUDPS_NAMES { \ "NO_TRAFFIC", \ "SINGLE", \ "MULTIPLE", \ NULL \ } /* Other protocol state enumeration */ #define PFOTHERS_NO_TRAFFIC 0 #define PFOTHERS_SINGLE 1 #define PFOTHERS_MULTIPLE 2 #define PFOTHERS_NSTATES 3 /* number of state levels */ #define PFOTHERS_NAMES { \ "NO_TRAFFIC", \ "SINGLE", \ "MULTIPLE", \ NULL \ } #define ACTION_SET(a, x) \ do { \ if ((a) != NULL) \ *(a) = (x); \ } while (0) #define REASON_SET(a, x) \ do { \ if ((a) != NULL) \ *(a) = (x); \ if (x < PFRES_MAX) \ counter_u64_add(V_pf_status.counters[x], 1); \ } while (0) enum pf_syncookies_mode { PF_SYNCOOKIES_NEVER = 0, PF_SYNCOOKIES_ALWAYS = 1, PF_SYNCOOKIES_ADAPTIVE = 2, PF_SYNCOOKIES_MODE_MAX = PF_SYNCOOKIES_ADAPTIVE }; #define PF_SYNCOOKIES_HIWATPCT 25 #define PF_SYNCOOKIES_LOWATPCT (PF_SYNCOOKIES_HIWATPCT / 2) #ifdef _KERNEL struct pf_kstatus { counter_u64_t counters[PFRES_MAX]; /* reason for passing/dropping */ counter_u64_t lcounters[KLCNT_MAX]; /* limit counters */ struct pf_counter_u64 fcounters[FCNT_MAX]; /* state operation counters */ counter_u64_t scounters[SCNT_MAX]; /* src_node operation counters */ uint32_t states; uint32_t src_nodes; uint32_t running; uint32_t since; uint32_t debug; uint32_t hostid; char ifname[IFNAMSIZ]; uint8_t pf_chksum[PF_MD5_DIGEST_LENGTH]; bool keep_counters; enum pf_syncookies_mode syncookies_mode; bool syncookies_active; uint64_t syncookies_inflight[2]; uint32_t states_halfopen; + uint32_t reass; }; #endif struct pf_divert { union { struct in_addr ipv4; struct in6_addr ipv6; } addr; u_int16_t port; }; #define PFFRAG_FRENT_HIWAT 5000 /* Number of fragment entries */ #define PFR_KENTRY_HIWAT 200000 /* Number of table entries */ /* * Limit the length of the fragment queue traversal. Remember * search entry points based on the fragment offset. */ #define PF_FRAG_ENTRY_POINTS 16 /* * The number of entries in the fragment queue must be limited * to avoid DoS by linear searching. Instead of a global limit, * use a limit per entry point. For large packets these sum up. */ #define PF_FRAG_ENTRY_LIMIT 64 /* * ioctl parameter structures */ struct pfioc_pooladdr { u_int32_t action; u_int32_t ticket; u_int32_t nr; u_int32_t r_num; u_int8_t r_action; u_int8_t r_last; u_int8_t af; char anchor[MAXPATHLEN]; struct pf_pooladdr addr; }; struct pfioc_rule { u_int32_t action; u_int32_t ticket; u_int32_t pool_ticket; u_int32_t nr; char anchor[MAXPATHLEN]; char anchor_call[MAXPATHLEN]; struct pf_rule rule; }; struct pfioc_natlook { struct pf_addr saddr; struct pf_addr daddr; struct pf_addr rsaddr; struct pf_addr rdaddr; u_int16_t sport; u_int16_t dport; u_int16_t rsport; u_int16_t rdport; sa_family_t af; u_int8_t proto; u_int8_t direction; }; struct pfioc_state { struct pfsync_state state; }; struct pfioc_src_node_kill { sa_family_t psnk_af; struct pf_rule_addr psnk_src; struct pf_rule_addr psnk_dst; u_int psnk_killed; }; #ifdef _KERNEL struct pf_kstate_kill { struct pf_state_cmp psk_pfcmp; sa_family_t psk_af; int psk_proto; struct pf_rule_addr psk_src; struct pf_rule_addr psk_dst; struct pf_rule_addr psk_rt_addr; char psk_ifname[IFNAMSIZ]; char psk_label[PF_RULE_LABEL_SIZE]; u_int psk_killed; bool psk_kill_match; }; #endif struct pfioc_state_kill { struct pf_state_cmp psk_pfcmp; sa_family_t psk_af; int psk_proto; struct pf_rule_addr psk_src; struct pf_rule_addr psk_dst; char psk_ifname[IFNAMSIZ]; char psk_label[PF_RULE_LABEL_SIZE]; u_int psk_killed; }; struct pfioc_states { int ps_len; union { caddr_t psu_buf; struct pfsync_state *psu_states; } ps_u; #define ps_buf ps_u.psu_buf #define ps_states ps_u.psu_states }; struct pfioc_states_v2 { int ps_len; uint64_t ps_req_version; union { caddr_t psu_buf; struct pf_state_export *psu_states; } ps_u; #define ps_buf ps_u.psu_buf #define ps_states ps_u.psu_states }; struct pfioc_src_nodes { int psn_len; union { caddr_t psu_buf; struct pf_src_node *psu_src_nodes; } psn_u; #define psn_buf psn_u.psu_buf #define psn_src_nodes psn_u.psu_src_nodes }; struct pfioc_if { char ifname[IFNAMSIZ]; }; struct pfioc_tm { int timeout; int seconds; }; struct pfioc_limit { int index; unsigned limit; }; struct pfioc_altq_v0 { u_int32_t action; u_int32_t ticket; u_int32_t nr; struct pf_altq_v0 altq; }; struct pfioc_altq_v1 { u_int32_t action; u_int32_t ticket; u_int32_t nr; /* * Placed here so code that only uses the above parameters can be * written entirely in terms of the v0 or v1 type. */ u_int32_t version; struct pf_altq_v1 altq; }; /* * Latest version of struct pfioc_altq_vX. This must move in lock-step with * the latest version of struct pf_altq_vX as it has that struct as a * member. */ #define PFIOC_ALTQ_VERSION PF_ALTQ_VERSION struct pfioc_qstats_v0 { u_int32_t ticket; u_int32_t nr; void *buf; int nbytes; u_int8_t scheduler; }; struct pfioc_qstats_v1 { u_int32_t ticket; u_int32_t nr; void *buf; int nbytes; u_int8_t scheduler; /* * Placed here so code that only uses the above parameters can be * written entirely in terms of the v0 or v1 type. */ u_int32_t version; /* Requested version of stats struct */ }; /* Latest version of struct pfioc_qstats_vX */ #define PFIOC_QSTATS_VERSION 1 struct pfioc_ruleset { u_int32_t nr; char path[MAXPATHLEN]; char name[PF_ANCHOR_NAME_SIZE]; }; #define PF_RULESET_ALTQ (PF_RULESET_MAX) #define PF_RULESET_TABLE (PF_RULESET_MAX+1) #define PF_RULESET_ETH (PF_RULESET_MAX+2) struct pfioc_trans { int size; /* number of elements */ int esize; /* size of each element in bytes */ struct pfioc_trans_e { int rs_num; char anchor[MAXPATHLEN]; u_int32_t ticket; } *array; }; #define PFR_FLAG_ATOMIC 0x00000001 /* unused */ #define PFR_FLAG_DUMMY 0x00000002 #define PFR_FLAG_FEEDBACK 0x00000004 #define PFR_FLAG_CLSTATS 0x00000008 #define PFR_FLAG_ADDRSTOO 0x00000010 #define PFR_FLAG_REPLACE 0x00000020 #define PFR_FLAG_ALLRSETS 0x00000040 #define PFR_FLAG_ALLMASK 0x0000007F #ifdef _KERNEL #define PFR_FLAG_USERIOCTL 0x10000000 #endif struct pfioc_table { struct pfr_table pfrio_table; void *pfrio_buffer; int pfrio_esize; int pfrio_size; int pfrio_size2; int pfrio_nadd; int pfrio_ndel; int pfrio_nchange; int pfrio_flags; u_int32_t pfrio_ticket; }; #define pfrio_exists pfrio_nadd #define pfrio_nzero pfrio_nadd #define pfrio_nmatch pfrio_nadd #define pfrio_naddr pfrio_size2 #define pfrio_setflag pfrio_size2 #define pfrio_clrflag pfrio_nadd struct pfioc_iface { char pfiio_name[IFNAMSIZ]; void *pfiio_buffer; int pfiio_esize; int pfiio_size; int pfiio_nzero; int pfiio_flags; }; /* * ioctl operations */ #define DIOCSTART _IO ('D', 1) #define DIOCSTOP _IO ('D', 2) #define DIOCADDRULE _IOWR('D', 4, struct pfioc_rule) #define DIOCADDRULENV _IOWR('D', 4, struct pfioc_nv) #define DIOCGETRULES _IOWR('D', 6, struct pfioc_rule) #define DIOCGETRULE _IOWR('D', 7, struct pfioc_rule) #define DIOCGETRULENV _IOWR('D', 7, struct pfioc_nv) /* XXX cut 8 - 17 */ #define DIOCCLRSTATES _IOWR('D', 18, struct pfioc_state_kill) #define DIOCCLRSTATESNV _IOWR('D', 18, struct pfioc_nv) #define DIOCGETSTATE _IOWR('D', 19, struct pfioc_state) #define DIOCGETSTATENV _IOWR('D', 19, struct pfioc_nv) #define DIOCSETSTATUSIF _IOWR('D', 20, struct pfioc_if) #define DIOCGETSTATUS _IOWR('D', 21, struct pf_status) #define DIOCGETSTATUSNV _IOWR('D', 21, struct pfioc_nv) #define DIOCCLRSTATUS _IO ('D', 22) #define DIOCNATLOOK _IOWR('D', 23, struct pfioc_natlook) #define DIOCSETDEBUG _IOWR('D', 24, u_int32_t) #define DIOCGETSTATES _IOWR('D', 25, struct pfioc_states) #define DIOCCHANGERULE _IOWR('D', 26, struct pfioc_rule) /* XXX cut 26 - 28 */ #define DIOCSETTIMEOUT _IOWR('D', 29, struct pfioc_tm) #define DIOCGETTIMEOUT _IOWR('D', 30, struct pfioc_tm) #define DIOCADDSTATE _IOWR('D', 37, struct pfioc_state) #define DIOCCLRRULECTRS _IO ('D', 38) #define DIOCGETLIMIT _IOWR('D', 39, struct pfioc_limit) #define DIOCSETLIMIT _IOWR('D', 40, struct pfioc_limit) #define DIOCKILLSTATES _IOWR('D', 41, struct pfioc_state_kill) #define DIOCKILLSTATESNV _IOWR('D', 41, struct pfioc_nv) #define DIOCSTARTALTQ _IO ('D', 42) #define DIOCSTOPALTQ _IO ('D', 43) #define DIOCADDALTQV0 _IOWR('D', 45, struct pfioc_altq_v0) #define DIOCADDALTQV1 _IOWR('D', 45, struct pfioc_altq_v1) #define DIOCGETALTQSV0 _IOWR('D', 47, struct pfioc_altq_v0) #define DIOCGETALTQSV1 _IOWR('D', 47, struct pfioc_altq_v1) #define DIOCGETALTQV0 _IOWR('D', 48, struct pfioc_altq_v0) #define DIOCGETALTQV1 _IOWR('D', 48, struct pfioc_altq_v1) #define DIOCCHANGEALTQV0 _IOWR('D', 49, struct pfioc_altq_v0) #define DIOCCHANGEALTQV1 _IOWR('D', 49, struct pfioc_altq_v1) #define DIOCGETQSTATSV0 _IOWR('D', 50, struct pfioc_qstats_v0) #define DIOCGETQSTATSV1 _IOWR('D', 50, struct pfioc_qstats_v1) #define DIOCBEGINADDRS _IOWR('D', 51, struct pfioc_pooladdr) #define DIOCADDADDR _IOWR('D', 52, struct pfioc_pooladdr) #define DIOCGETADDRS _IOWR('D', 53, struct pfioc_pooladdr) #define DIOCGETADDR _IOWR('D', 54, struct pfioc_pooladdr) #define DIOCCHANGEADDR _IOWR('D', 55, struct pfioc_pooladdr) /* XXX cut 55 - 57 */ #define DIOCGETRULESETS _IOWR('D', 58, struct pfioc_ruleset) #define DIOCGETRULESET _IOWR('D', 59, struct pfioc_ruleset) #define DIOCRCLRTABLES _IOWR('D', 60, struct pfioc_table) #define DIOCRADDTABLES _IOWR('D', 61, struct pfioc_table) #define DIOCRDELTABLES _IOWR('D', 62, struct pfioc_table) #define DIOCRGETTABLES _IOWR('D', 63, struct pfioc_table) #define DIOCRGETTSTATS _IOWR('D', 64, struct pfioc_table) #define DIOCRCLRTSTATS _IOWR('D', 65, struct pfioc_table) #define DIOCRCLRADDRS _IOWR('D', 66, struct pfioc_table) #define DIOCRADDADDRS _IOWR('D', 67, struct pfioc_table) #define DIOCRDELADDRS _IOWR('D', 68, struct pfioc_table) #define DIOCRSETADDRS _IOWR('D', 69, struct pfioc_table) #define DIOCRGETADDRS _IOWR('D', 70, struct pfioc_table) #define DIOCRGETASTATS _IOWR('D', 71, struct pfioc_table) #define DIOCRCLRASTATS _IOWR('D', 72, struct pfioc_table) #define DIOCRTSTADDRS _IOWR('D', 73, struct pfioc_table) #define DIOCRSETTFLAGS _IOWR('D', 74, struct pfioc_table) #define DIOCRINADEFINE _IOWR('D', 77, struct pfioc_table) #define DIOCOSFPFLUSH _IO('D', 78) #define DIOCOSFPADD _IOWR('D', 79, struct pf_osfp_ioctl) #define DIOCOSFPGET _IOWR('D', 80, struct pf_osfp_ioctl) #define DIOCXBEGIN _IOWR('D', 81, struct pfioc_trans) #define DIOCXCOMMIT _IOWR('D', 82, struct pfioc_trans) #define DIOCXROLLBACK _IOWR('D', 83, struct pfioc_trans) #define DIOCGETSRCNODES _IOWR('D', 84, struct pfioc_src_nodes) #define DIOCCLRSRCNODES _IO('D', 85) #define DIOCSETHOSTID _IOWR('D', 86, u_int32_t) #define DIOCIGETIFACES _IOWR('D', 87, struct pfioc_iface) #define DIOCSETIFFLAG _IOWR('D', 89, struct pfioc_iface) #define DIOCCLRIFFLAG _IOWR('D', 90, struct pfioc_iface) #define DIOCKILLSRCNODES _IOWR('D', 91, struct pfioc_src_node_kill) #define DIOCGIFSPEEDV0 _IOWR('D', 92, struct pf_ifspeed_v0) #define DIOCGIFSPEEDV1 _IOWR('D', 92, struct pf_ifspeed_v1) #define DIOCGETSTATESV2 _IOWR('D', 93, struct pfioc_states_v2) #define DIOCGETSYNCOOKIES _IOWR('D', 94, struct pfioc_nv) #define DIOCSETSYNCOOKIES _IOWR('D', 95, struct pfioc_nv) #define DIOCKEEPCOUNTERS _IOWR('D', 96, struct pfioc_nv) #define DIOCKEEPCOUNTERS_FREEBSD13 _IOWR('D', 92, struct pfioc_nv) #define DIOCADDETHRULE _IOWR('D', 97, struct pfioc_nv) #define DIOCGETETHRULE _IOWR('D', 98, struct pfioc_nv) #define DIOCGETETHRULES _IOWR('D', 99, struct pfioc_nv) #define DIOCGETETHRULESETS _IOWR('D', 100, struct pfioc_nv) #define DIOCGETETHRULESET _IOWR('D', 101, struct pfioc_nv) +#define DIOCSETREASS _IOWR('D', 102, u_int32_t) struct pf_ifspeed_v0 { char ifname[IFNAMSIZ]; u_int32_t baudrate; }; struct pf_ifspeed_v1 { char ifname[IFNAMSIZ]; u_int32_t baudrate32; /* layout identical to struct pf_ifspeed_v0 up to this point */ u_int64_t baudrate; }; /* Latest version of struct pf_ifspeed_vX */ #define PF_IFSPEED_VERSION 1 /* * Compatibility and convenience macros */ #ifndef _KERNEL #ifdef PFIOC_USE_LATEST /* * Maintaining in-tree consumers of the ioctl interface is easier when that * code can be written in terms old names that refer to the latest interface * version as that reduces the required changes in the consumers to those * that are functionally necessary to accommodate a new interface version. */ #define pfioc_altq __CONCAT(pfioc_altq_v, PFIOC_ALTQ_VERSION) #define pfioc_qstats __CONCAT(pfioc_qstats_v, PFIOC_QSTATS_VERSION) #define pf_ifspeed __CONCAT(pf_ifspeed_v, PF_IFSPEED_VERSION) #define DIOCADDALTQ __CONCAT(DIOCADDALTQV, PFIOC_ALTQ_VERSION) #define DIOCGETALTQS __CONCAT(DIOCGETALTQSV, PFIOC_ALTQ_VERSION) #define DIOCGETALTQ __CONCAT(DIOCGETALTQV, PFIOC_ALTQ_VERSION) #define DIOCCHANGEALTQ __CONCAT(DIOCCHANGEALTQV, PFIOC_ALTQ_VERSION) #define DIOCGETQSTATS __CONCAT(DIOCGETQSTATSV, PFIOC_QSTATS_VERSION) #define DIOCGIFSPEED __CONCAT(DIOCGIFSPEEDV, PF_IFSPEED_VERSION) #else /* * When building out-of-tree code that is written for the old interface, * such as may exist in ports for example, resolve the old struct tags and * ioctl command names to the v0 versions. */ #define pfioc_altq __CONCAT(pfioc_altq_v, 0) #define pfioc_qstats __CONCAT(pfioc_qstats_v, 0) #define pf_ifspeed __CONCAT(pf_ifspeed_v, 0) #define DIOCADDALTQ __CONCAT(DIOCADDALTQV, 0) #define DIOCGETALTQS __CONCAT(DIOCGETALTQSV, 0) #define DIOCGETALTQ __CONCAT(DIOCGETALTQV, 0) #define DIOCCHANGEALTQ __CONCAT(DIOCCHANGEALTQV, 0) #define DIOCGETQSTATS __CONCAT(DIOCGETQSTATSV, 0) #define DIOCGIFSPEED __CONCAT(DIOCGIFSPEEDV, 0) #endif /* PFIOC_USE_LATEST */ #endif /* _KERNEL */ #ifdef _KERNEL LIST_HEAD(pf_ksrc_node_list, pf_ksrc_node); struct pf_srchash { struct pf_ksrc_node_list nodes; struct mtx lock; }; struct pf_keyhash { LIST_HEAD(, pf_state_key) keys; struct mtx lock; }; struct pf_idhash { LIST_HEAD(, pf_kstate) states; struct mtx lock; }; extern u_long pf_ioctl_maxcount; extern u_long pf_hashmask; extern u_long pf_srchashmask; #define PF_HASHSIZ (131072) #define PF_SRCHASHSIZ (PF_HASHSIZ/4) VNET_DECLARE(struct pf_keyhash *, pf_keyhash); VNET_DECLARE(struct pf_idhash *, pf_idhash); #define V_pf_keyhash VNET(pf_keyhash) #define V_pf_idhash VNET(pf_idhash) VNET_DECLARE(struct pf_srchash *, pf_srchash); #define V_pf_srchash VNET(pf_srchash) #define PF_IDHASH(s) (be64toh((s)->id) % (pf_hashmask + 1)) VNET_DECLARE(void *, pf_swi_cookie); #define V_pf_swi_cookie VNET(pf_swi_cookie) VNET_DECLARE(struct intr_event *, pf_swi_ie); #define V_pf_swi_ie VNET(pf_swi_ie) VNET_DECLARE(struct unrhdr64, pf_stateid); #define V_pf_stateid VNET(pf_stateid) TAILQ_HEAD(pf_altqqueue, pf_altq); VNET_DECLARE(struct pf_altqqueue, pf_altqs[4]); #define V_pf_altqs VNET(pf_altqs) VNET_DECLARE(struct pf_kpalist, pf_pabuf); #define V_pf_pabuf VNET(pf_pabuf) VNET_DECLARE(u_int32_t, ticket_altqs_active); #define V_ticket_altqs_active VNET(ticket_altqs_active) VNET_DECLARE(u_int32_t, ticket_altqs_inactive); #define V_ticket_altqs_inactive VNET(ticket_altqs_inactive) VNET_DECLARE(int, altqs_inactive_open); #define V_altqs_inactive_open VNET(altqs_inactive_open) VNET_DECLARE(u_int32_t, ticket_pabuf); #define V_ticket_pabuf VNET(ticket_pabuf) VNET_DECLARE(struct pf_altqqueue *, pf_altqs_active); #define V_pf_altqs_active VNET(pf_altqs_active) VNET_DECLARE(struct pf_altqqueue *, pf_altq_ifs_active); #define V_pf_altq_ifs_active VNET(pf_altq_ifs_active) VNET_DECLARE(struct pf_altqqueue *, pf_altqs_inactive); #define V_pf_altqs_inactive VNET(pf_altqs_inactive) VNET_DECLARE(struct pf_altqqueue *, pf_altq_ifs_inactive); #define V_pf_altq_ifs_inactive VNET(pf_altq_ifs_inactive) VNET_DECLARE(struct pf_krulequeue, pf_unlinked_rules); #define V_pf_unlinked_rules VNET(pf_unlinked_rules) #ifdef PF_WANT_32_TO_64_COUNTER LIST_HEAD(allkiflist_head, pfi_kkif); VNET_DECLARE(struct allkiflist_head, pf_allkiflist); #define V_pf_allkiflist VNET(pf_allkiflist) VNET_DECLARE(size_t, pf_allkifcount); #define V_pf_allkifcount VNET(pf_allkifcount) VNET_DECLARE(struct pfi_kkif *, pf_kifmarker); #define V_pf_kifmarker VNET(pf_kifmarker) LIST_HEAD(allrulelist_head, pf_krule); VNET_DECLARE(struct allrulelist_head, pf_allrulelist); #define V_pf_allrulelist VNET(pf_allrulelist) VNET_DECLARE(size_t, pf_allrulecount); #define V_pf_allrulecount VNET(pf_allrulecount) VNET_DECLARE(struct pf_krule *, pf_rulemarker); #define V_pf_rulemarker VNET(pf_rulemarker) #endif void pf_initialize(void); void pf_mtag_initialize(void); void pf_mtag_cleanup(void); void pf_cleanup(void); struct pf_mtag *pf_get_mtag(struct mbuf *); extern void pf_calc_skip_steps(struct pf_krulequeue *); #ifdef ALTQ extern void pf_altq_ifnet_event(struct ifnet *, int); #endif VNET_DECLARE(uma_zone_t, pf_state_z); #define V_pf_state_z VNET(pf_state_z) VNET_DECLARE(uma_zone_t, pf_state_key_z); #define V_pf_state_key_z VNET(pf_state_key_z) VNET_DECLARE(uma_zone_t, pf_state_scrub_z); #define V_pf_state_scrub_z VNET(pf_state_scrub_z) extern void pf_purge_thread(void *); extern void pf_unload_vnet_purge(void); extern void pf_intr(void *); extern void pf_purge_expired_src_nodes(void); extern int pf_unlink_state(struct pf_kstate *); extern int pf_state_insert(struct pfi_kkif *, struct pfi_kkif *, struct pf_state_key *, struct pf_state_key *, struct pf_kstate *); extern struct pf_kstate *pf_alloc_state(int); extern void pf_free_state(struct pf_kstate *); static __inline void pf_ref_state(struct pf_kstate *s) { refcount_acquire(&s->refs); } static __inline int pf_release_state(struct pf_kstate *s) { if (refcount_release(&s->refs)) { pf_free_state(s); return (1); } else return (0); } static __inline int pf_release_staten(struct pf_kstate *s, u_int n) { if (refcount_releasen(&s->refs, n)) { pf_free_state(s); return (1); } else return (0); } extern struct pf_kstate *pf_find_state_byid(uint64_t, uint32_t); extern struct pf_kstate *pf_find_state_all(struct pf_state_key_cmp *, u_int, int *); extern bool pf_find_state_all_exists(struct pf_state_key_cmp *, u_int); extern struct pf_ksrc_node *pf_find_src_node(struct pf_addr *, struct pf_krule *, sa_family_t, int); extern void pf_unlink_src_node(struct pf_ksrc_node *); extern u_int pf_free_src_nodes(struct pf_ksrc_node_list *); extern void pf_print_state(struct pf_kstate *); extern void pf_print_flags(u_int8_t); extern u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t, u_int8_t); extern u_int16_t pf_proto_cksum_fixup(struct mbuf *, u_int16_t, u_int16_t, u_int16_t, u_int8_t); VNET_DECLARE(struct ifnet *, sync_ifp); #define V_sync_ifp VNET(sync_ifp); VNET_DECLARE(struct pf_krule, pf_default_rule); #define V_pf_default_rule VNET(pf_default_rule) extern void pf_addrcpy(struct pf_addr *, struct pf_addr *, u_int8_t); void pf_free_rule(struct pf_krule *); int pf_test_eth(int, int, struct ifnet *, struct mbuf **, struct inpcb *); #ifdef INET int pf_test(int, int, struct ifnet *, struct mbuf **, struct inpcb *); int pf_normalize_ip(struct mbuf **, int, struct pfi_kkif *, u_short *, struct pf_pdesc *); #endif /* INET */ #ifdef INET6 int pf_test6(int, int, struct ifnet *, struct mbuf **, struct inpcb *); int pf_normalize_ip6(struct mbuf **, int, struct pfi_kkif *, u_short *, struct pf_pdesc *); void pf_poolmask(struct pf_addr *, struct pf_addr*, struct pf_addr *, struct pf_addr *, u_int8_t); void pf_addr_inc(struct pf_addr *, sa_family_t); int pf_refragment6(struct ifnet *, struct mbuf **, struct m_tag *, bool); #endif /* INET6 */ u_int32_t pf_new_isn(struct pf_kstate *); void *pf_pull_hdr(struct mbuf *, int, void *, int, u_short *, u_short *, sa_family_t); void pf_change_a(void *, u_int16_t *, u_int32_t, u_int8_t); void pf_change_proto_a(struct mbuf *, void *, u_int16_t *, u_int32_t, u_int8_t); void pf_change_tcp_a(struct mbuf *, void *, u_int16_t *, u_int32_t); void pf_patch_16_unaligned(struct mbuf *, u_int16_t *, void *, u_int16_t, bool, u_int8_t); void pf_patch_32_unaligned(struct mbuf *, u_int16_t *, void *, u_int32_t, bool, u_int8_t); void pf_send_deferred_syn(struct pf_kstate *); int pf_match_addr(u_int8_t, struct pf_addr *, struct pf_addr *, struct pf_addr *, sa_family_t); int pf_match_addr_range(struct pf_addr *, struct pf_addr *, struct pf_addr *, sa_family_t); int pf_match_port(u_int8_t, u_int16_t, u_int16_t, u_int16_t); void pf_normalize_init(void); void pf_normalize_cleanup(void); int pf_normalize_tcp(int, struct pfi_kkif *, struct mbuf *, int, int, void *, struct pf_pdesc *); void pf_normalize_tcp_cleanup(struct pf_kstate *); int pf_normalize_tcp_init(struct mbuf *, int, struct pf_pdesc *, struct tcphdr *, struct pf_state_peer *, struct pf_state_peer *); int pf_normalize_tcp_stateful(struct mbuf *, int, struct pf_pdesc *, u_short *, struct tcphdr *, struct pf_kstate *, struct pf_state_peer *, struct pf_state_peer *, int *); u_int32_t pf_state_expires(const struct pf_kstate *); void pf_purge_expired_fragments(void); void pf_purge_fragments(uint32_t); int pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *, int); int pf_socket_lookup(int, struct pf_pdesc *, struct mbuf *); struct pf_state_key *pf_alloc_state_key(int); void pfr_initialize(void); void pfr_cleanup(void); int pfr_match_addr(struct pfr_ktable *, struct pf_addr *, sa_family_t); void pfr_update_stats(struct pfr_ktable *, struct pf_addr *, sa_family_t, u_int64_t, int, int, int); int pfr_pool_get(struct pfr_ktable *, int *, struct pf_addr *, sa_family_t); void pfr_dynaddr_update(struct pfr_ktable *, struct pfi_dynaddr *); struct pfr_ktable * pfr_attach_table(struct pf_kruleset *, char *); struct pfr_ktable * pfr_eth_attach_table(struct pf_keth_ruleset *, char *); void pfr_detach_table(struct pfr_ktable *); int pfr_clr_tables(struct pfr_table *, int *, int); int pfr_add_tables(struct pfr_table *, int, int *, int); int pfr_del_tables(struct pfr_table *, int, int *, int); int pfr_table_count(struct pfr_table *, int); int pfr_get_tables(struct pfr_table *, struct pfr_table *, int *, int); int pfr_get_tstats(struct pfr_table *, struct pfr_tstats *, int *, int); int pfr_clr_tstats(struct pfr_table *, int, int *, int); int pfr_set_tflags(struct pfr_table *, int, int, int, int *, int *, int); int pfr_clr_addrs(struct pfr_table *, int *, int); int pfr_insert_kentry(struct pfr_ktable *, struct pfr_addr *, long); int pfr_add_addrs(struct pfr_table *, struct pfr_addr *, int, int *, int); int pfr_del_addrs(struct pfr_table *, struct pfr_addr *, int, int *, int); int pfr_set_addrs(struct pfr_table *, struct pfr_addr *, int, int *, int *, int *, int *, int, u_int32_t); int pfr_get_addrs(struct pfr_table *, struct pfr_addr *, int *, int); int pfr_get_astats(struct pfr_table *, struct pfr_astats *, int *, int); int pfr_clr_astats(struct pfr_table *, struct pfr_addr *, int, int *, int); int pfr_tst_addrs(struct pfr_table *, struct pfr_addr *, int, int *, int); int pfr_ina_begin(struct pfr_table *, u_int32_t *, int *, int); int pfr_ina_rollback(struct pfr_table *, u_int32_t, int *, int); int pfr_ina_commit(struct pfr_table *, u_int32_t, int *, int *, int); int pfr_ina_define(struct pfr_table *, struct pfr_addr *, int, int *, int *, u_int32_t, int); MALLOC_DECLARE(PFI_MTYPE); VNET_DECLARE(struct pfi_kkif *, pfi_all); #define V_pfi_all VNET(pfi_all) void pfi_initialize(void); void pfi_initialize_vnet(void); void pfi_cleanup(void); void pfi_cleanup_vnet(void); void pfi_kkif_ref(struct pfi_kkif *); void pfi_kkif_unref(struct pfi_kkif *); struct pfi_kkif *pfi_kkif_find(const char *); struct pfi_kkif *pfi_kkif_attach(struct pfi_kkif *, const char *); int pfi_kkif_match(struct pfi_kkif *, struct pfi_kkif *); void pfi_kkif_purge(void); int pfi_match_addr(struct pfi_dynaddr *, struct pf_addr *, sa_family_t); int pfi_dynaddr_setup(struct pf_addr_wrap *, sa_family_t); void pfi_dynaddr_remove(struct pfi_dynaddr *); void pfi_dynaddr_copyout(struct pf_addr_wrap *); void pfi_update_status(const char *, struct pf_status *); void pfi_get_ifaces(const char *, struct pfi_kif *, int *); int pfi_set_flags(const char *, int); int pfi_clear_flags(const char *, int); int pf_match_tag(struct mbuf *, struct pf_krule *, int *, int); int pf_tag_packet(struct mbuf *, struct pf_pdesc *, int); int pf_addr_cmp(struct pf_addr *, struct pf_addr *, sa_family_t); u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, sa_family_t); u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, sa_family_t); struct mbuf *pf_build_tcp(const struct pf_krule *, sa_family_t, const struct pf_addr *, const struct pf_addr *, u_int16_t, u_int16_t, u_int32_t, u_int32_t, u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, - u_int16_t); + u_int16_t, int); void pf_send_tcp(const struct pf_krule *, sa_family_t, const struct pf_addr *, const struct pf_addr *, u_int16_t, u_int16_t, u_int32_t, u_int32_t, u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, - u_int16_t); + u_int16_t, int); void pf_syncookies_init(void); void pf_syncookies_cleanup(void); int pf_get_syncookies(struct pfioc_nv *); int pf_set_syncookies(struct pfioc_nv *); int pf_synflood_check(struct pf_pdesc *); void pf_syncookie_send(struct mbuf *m, int off, struct pf_pdesc *); bool pf_syncookie_check(struct pf_pdesc *); u_int8_t pf_syncookie_validate(struct pf_pdesc *); struct mbuf * pf_syncookie_recreate_syn(uint8_t, int, struct pf_pdesc *); VNET_DECLARE(struct pf_kstatus, pf_status); #define V_pf_status VNET(pf_status) struct pf_limit { uma_zone_t zone; u_int limit; }; VNET_DECLARE(struct pf_limit, pf_limits[PF_LIMIT_MAX]); #define V_pf_limits VNET(pf_limits) #endif /* _KERNEL */ #ifdef _KERNEL VNET_DECLARE(struct pf_kanchor_global, pf_anchors); #define V_pf_anchors VNET(pf_anchors) VNET_DECLARE(struct pf_kanchor, pf_main_anchor); #define V_pf_main_anchor VNET(pf_main_anchor) VNET_DECLARE(struct pf_keth_anchor_global, pf_keth_anchors); #define V_pf_keth_anchors VNET(pf_keth_anchors) #define pf_main_ruleset V_pf_main_anchor.ruleset VNET_DECLARE(struct pf_keth_anchor, pf_main_keth_anchor); #define V_pf_main_keth_anchor VNET(pf_main_keth_anchor) VNET_DECLARE(struct pf_keth_ruleset*, pf_keth); #define V_pf_keth VNET(pf_keth) void pf_init_kruleset(struct pf_kruleset *); void pf_init_keth(struct pf_keth_ruleset *); int pf_kanchor_setup(struct pf_krule *, const struct pf_kruleset *, const char *); int pf_kanchor_nvcopyout(const struct pf_kruleset *, const struct pf_krule *, nvlist_t *); int pf_kanchor_copyout(const struct pf_kruleset *, const struct pf_krule *, struct pfioc_rule *); void pf_kanchor_remove(struct pf_krule *); void pf_remove_if_empty_kruleset(struct pf_kruleset *); struct pf_kruleset *pf_find_kruleset(const char *); struct pf_kruleset *pf_find_or_create_kruleset(const char *); void pf_rs_initialize(void); struct pf_krule *pf_krule_alloc(void); void pf_remove_if_empty_keth_ruleset( struct pf_keth_ruleset *); struct pf_keth_ruleset *pf_find_keth_ruleset(const char *); struct pf_keth_anchor *pf_find_keth_anchor(const char *); int pf_keth_anchor_setup(struct pf_keth_rule *, const struct pf_keth_ruleset *, const char *); int pf_keth_anchor_nvcopyout( const struct pf_keth_ruleset *, const struct pf_keth_rule *, nvlist_t *); struct pf_keth_ruleset *pf_find_or_create_keth_ruleset(const char *); void pf_keth_anchor_remove(struct pf_keth_rule *); void pf_krule_free(struct pf_krule *); #endif /* The fingerprint functions can be linked into userland programs (tcpdump) */ int pf_osfp_add(struct pf_osfp_ioctl *); #ifdef _KERNEL struct pf_osfp_enlist * pf_osfp_fingerprint(struct pf_pdesc *, struct mbuf *, int, const struct tcphdr *); #endif /* _KERNEL */ void pf_osfp_flush(void); int pf_osfp_get(struct pf_osfp_ioctl *); int pf_osfp_match(struct pf_osfp_enlist *, pf_osfp_t); #ifdef _KERNEL void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); void pf_step_into_anchor(struct pf_kanchor_stackframe *, int *, struct pf_kruleset **, int, struct pf_krule **, struct pf_krule **, int *); int pf_step_out_of_anchor(struct pf_kanchor_stackframe *, int *, struct pf_kruleset **, int, struct pf_krule **, struct pf_krule **, int *); void pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe *, int *, struct pf_keth_ruleset **, struct pf_keth_rule **, struct pf_keth_rule **, int *); int pf_step_out_of_keth_anchor(struct pf_keth_anchor_stackframe *, int *, struct pf_keth_ruleset **, struct pf_keth_rule **, struct pf_keth_rule **, int *); int pf_map_addr(u_int8_t, struct pf_krule *, struct pf_addr *, struct pf_addr *, struct pf_addr *, struct pf_ksrc_node **); struct pf_krule *pf_get_translation(struct pf_pdesc *, struct mbuf *, int, int, struct pfi_kkif *, struct pf_ksrc_node **, struct pf_state_key **, struct pf_state_key **, struct pf_addr *, struct pf_addr *, uint16_t, uint16_t, struct pf_kanchor_stackframe *); struct pf_state_key *pf_state_key_setup(struct pf_pdesc *, struct pf_addr *, struct pf_addr *, u_int16_t, u_int16_t); struct pf_state_key *pf_state_key_clone(struct pf_state_key *); +int pf_normalize_mss(struct mbuf *m, int off, + struct pf_pdesc *pd, u_int16_t maxmss); +u_int16_t pf_rule_to_scrub_flags(u_int32_t); +#ifdef INET +void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t); +#endif /* INET */ +#ifdef INET6 +void pf_scrub_ip6(struct mbuf **, uint32_t, uint8_t, uint8_t); +#endif /* INET6 */ + struct pfi_kkif *pf_kkif_create(int); void pf_kkif_free(struct pfi_kkif *); void pf_kkif_zero(struct pfi_kkif *); #endif /* _KERNEL */ #endif /* _NET_PFVAR_H_ */ diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c index c9d214d73470..2bf1146ceda8 100644 --- a/sys/netpfil/pf/if_pfsync.c +++ b/sys/netpfil/pf/if_pfsync.c @@ -1,2740 +1,2763 @@ /*- * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND ISC) * * Copyright (c) 2002 Michael Shalayeff * Copyright (c) 2012 Gleb Smirnoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2009 David Gwynne * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ * * Revisions picked from OpenBSD after revision 1.110 import: * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input() * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates * 1.120, 1.175 - use monotonic time_uptime * 1.122 - reduce number of updates for non-TCP sessions * 1.125, 1.127 - rewrite merge or stale processing * 1.128 - cleanups * 1.146 - bzero() mbuf before sparsely filling it with data * 1.170 - SIOCSIFMTU checks * 1.126, 1.142 - deferred packets processing * 1.173 - correct expire time processing */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_pf.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct pfsync_bucket; struct pfsync_softc; union inet_template { struct ip ipv4; }; #define PFSYNC_MINPKT ( \ sizeof(union inet_template) + \ sizeof(struct pfsync_header) + \ sizeof(struct pfsync_subheader) ) static int pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *, struct pfsync_state_peer *); static int pfsync_in_clr(struct mbuf *, int, int, int); static int pfsync_in_ins(struct mbuf *, int, int, int); static int pfsync_in_iack(struct mbuf *, int, int, int); static int pfsync_in_upd(struct mbuf *, int, int, int); static int pfsync_in_upd_c(struct mbuf *, int, int, int); static int pfsync_in_ureq(struct mbuf *, int, int, int); static int pfsync_in_del(struct mbuf *, int, int, int); static int pfsync_in_del_c(struct mbuf *, int, int, int); static int pfsync_in_bus(struct mbuf *, int, int, int); static int pfsync_in_tdb(struct mbuf *, int, int, int); static int pfsync_in_eof(struct mbuf *, int, int, int); static int pfsync_in_error(struct mbuf *, int, int, int); static int (*pfsync_acts[])(struct mbuf *, int, int, int) = { pfsync_in_clr, /* PFSYNC_ACT_CLR */ pfsync_in_ins, /* PFSYNC_ACT_INS */ pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */ pfsync_in_upd, /* PFSYNC_ACT_UPD */ pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */ pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */ pfsync_in_del, /* PFSYNC_ACT_DEL */ pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */ pfsync_in_error, /* PFSYNC_ACT_INS_F */ pfsync_in_error, /* PFSYNC_ACT_DEL_F */ pfsync_in_bus, /* PFSYNC_ACT_BUS */ pfsync_in_tdb, /* PFSYNC_ACT_TDB */ pfsync_in_eof /* PFSYNC_ACT_EOF */ }; struct pfsync_q { void (*write)(struct pf_kstate *, void *); size_t len; u_int8_t action; }; /* we have one of these for every PFSYNC_S_ */ static void pfsync_out_state(struct pf_kstate *, void *); static void pfsync_out_iack(struct pf_kstate *, void *); static void pfsync_out_upd_c(struct pf_kstate *, void *); static void pfsync_out_del(struct pf_kstate *, void *); static struct pfsync_q pfsync_qs[] = { { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS }, { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK }, { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD }, { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C }, { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C } }; static void pfsync_q_ins(struct pf_kstate *, int, bool); static void pfsync_q_del(struct pf_kstate *, bool, struct pfsync_bucket *); static void pfsync_update_state(struct pf_kstate *); static void pfsync_tx(struct pfsync_softc *, struct mbuf *); struct pfsync_upd_req_item { TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; struct pfsync_upd_req ur_msg; }; struct pfsync_deferral { struct pfsync_softc *pd_sc; TAILQ_ENTRY(pfsync_deferral) pd_entry; struct callout pd_tmo; struct pf_kstate *pd_st; struct mbuf *pd_m; }; struct pfsync_bucket { int b_id; struct pfsync_softc *b_sc; struct mtx b_mtx; struct callout b_tmo; int b_flags; #define PFSYNCF_BUCKET_PUSH 0x00000001 size_t b_len; TAILQ_HEAD(, pf_kstate) b_qs[PFSYNC_S_COUNT]; TAILQ_HEAD(, pfsync_upd_req_item) b_upd_req_list; TAILQ_HEAD(, pfsync_deferral) b_deferrals; u_int b_deferred; void *b_plus; size_t b_pluslen; struct ifaltq b_snd; }; struct pfsync_softc { /* Configuration */ struct ifnet *sc_ifp; struct ifnet *sc_sync_if; struct ip_moptions sc_imo; struct sockaddr_storage sc_sync_peer; uint32_t sc_flags; uint8_t sc_maxupdates; union inet_template sc_template; struct mtx sc_mtx; /* Queued data */ struct pfsync_bucket *sc_buckets; /* Bulk update info */ struct mtx sc_bulk_mtx; uint32_t sc_ureq_sent; int sc_bulk_tries; uint32_t sc_ureq_received; int sc_bulk_hashid; uint64_t sc_bulk_stateid; uint32_t sc_bulk_creatorid; struct callout sc_bulk_tmo; struct callout sc_bulkfail_tmo; }; #define PFSYNC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define PFSYNC_BUCKET_LOCK(b) mtx_lock(&(b)->b_mtx) #define PFSYNC_BUCKET_UNLOCK(b) mtx_unlock(&(b)->b_mtx) #define PFSYNC_BUCKET_LOCK_ASSERT(b) mtx_assert(&(b)->b_mtx, MA_OWNED) #define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx) #define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx) #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED) static const char pfsyncname[] = "pfsync"; static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data"); VNET_DEFINE_STATIC(struct pfsync_softc *, pfsyncif) = NULL; #define V_pfsyncif VNET(pfsyncif) VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL; #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie) VNET_DEFINE_STATIC(struct intr_event *, pfsync_swi_ie); #define V_pfsync_swi_ie VNET(pfsync_swi_ie) VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats); #define V_pfsyncstats VNET(pfsyncstats) VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW; #define V_pfsync_carp_adj VNET(pfsync_carp_adj) static void pfsync_timeout(void *); static void pfsync_push(struct pfsync_bucket *); static void pfsync_push_all(struct pfsync_softc *); static void pfsyncintr(void *); static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *, struct in_mfilter *imf); static void pfsync_multicast_cleanup(struct pfsync_softc *); static void pfsync_pointers_init(void); static void pfsync_pointers_uninit(void); static int pfsync_init(void); static void pfsync_uninit(void); static unsigned long pfsync_buckets; SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "PFSYNC"); SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(pfsyncstats), pfsyncstats, "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN, &pfsync_buckets, 0, "Number of pfsync hash buckets"); static int pfsync_clone_create(struct if_clone *, int, caddr_t); static void pfsync_clone_destroy(struct ifnet *); static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, struct pf_state_peer *); static int pfsyncoutput(struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *); static int pfsyncioctl(struct ifnet *, u_long, caddr_t); static int pfsync_defer(struct pf_kstate *, struct mbuf *); static void pfsync_undefer(struct pfsync_deferral *, int); static void pfsync_undefer_state_locked(struct pf_kstate *, int); static void pfsync_undefer_state(struct pf_kstate *, int); static void pfsync_defer_tmo(void *); static void pfsync_request_update(u_int32_t, u_int64_t); static bool pfsync_update_state_req(struct pf_kstate *); static void pfsync_drop(struct pfsync_softc *); static void pfsync_sendout(int, int); static void pfsync_send_plus(void *, size_t); static void pfsync_bulk_start(void); static void pfsync_bulk_status(u_int8_t); static void pfsync_bulk_update(void *); static void pfsync_bulk_fail(void *); static void pfsync_detach_ifnet(struct ifnet *); static int pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *, struct pfsync_kstatus *); static int pfsync_kstatus_to_softc(struct pfsync_kstatus *, struct pfsync_softc *); #ifdef IPSEC static void pfsync_update_net_tdb(struct pfsync_tdb *); #endif static struct pfsync_bucket *pfsync_get_bucket(struct pfsync_softc *, struct pf_kstate *); #define PFSYNC_MAX_BULKTRIES 12 #define PFSYNC_DEFER_TIMEOUT ((20 * hz) / 1000) VNET_DEFINE(struct if_clone *, pfsync_cloner); #define V_pfsync_cloner VNET(pfsync_cloner) static int pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) { struct pfsync_softc *sc; struct ifnet *ifp; struct pfsync_bucket *b; int c, q; if (unit != 0) return (EINVAL); if (! pfsync_buckets) pfsync_buckets = mp_ncpus * 2; sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO); sc->sc_flags |= PFSYNCF_OK; sc->sc_maxupdates = 128; ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); if (ifp == NULL) { free(sc, M_PFSYNC); return (ENOSPC); } if_initname(ifp, pfsyncname, unit); ifp->if_softc = sc; ifp->if_ioctl = pfsyncioctl; ifp->if_output = pfsyncoutput; ifp->if_type = IFT_PFSYNC; ifp->if_hdrlen = sizeof(struct pfsync_header); ifp->if_mtu = ETHERMTU; mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF); mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF); callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0); callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0); if_attach(ifp); bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets), M_PFSYNC, M_ZERO | M_WAITOK); for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF); b->b_id = c; b->b_sc = sc; b->b_len = PFSYNC_MINPKT; for (q = 0; q < PFSYNC_S_COUNT; q++) TAILQ_INIT(&b->b_qs[q]); TAILQ_INIT(&b->b_upd_req_list); TAILQ_INIT(&b->b_deferrals); callout_init(&b->b_tmo, 1); b->b_snd.ifq_maxlen = ifqmaxlen; } V_pfsyncif = sc; return (0); } static void pfsync_clone_destroy(struct ifnet *ifp) { struct pfsync_softc *sc = ifp->if_softc; struct pfsync_bucket *b; int c, ret; for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; /* * At this stage, everything should have already been * cleared by pfsync_uninit(), and we have only to * drain callouts. */ PFSYNC_BUCKET_LOCK(b); while (b->b_deferred > 0) { struct pfsync_deferral *pd = TAILQ_FIRST(&b->b_deferrals); ret = callout_stop(&pd->pd_tmo); PFSYNC_BUCKET_UNLOCK(b); if (ret > 0) { pfsync_undefer(pd, 1); } else { callout_drain(&pd->pd_tmo); } PFSYNC_BUCKET_LOCK(b); } MPASS(b->b_deferred == 0); MPASS(TAILQ_EMPTY(&b->b_deferrals)); PFSYNC_BUCKET_UNLOCK(b); callout_drain(&b->b_tmo); } callout_drain(&sc->sc_bulkfail_tmo); callout_drain(&sc->sc_bulk_tmo); if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy"); bpfdetach(ifp); if_detach(ifp); pfsync_drop(sc); if_free(ifp); pfsync_multicast_cleanup(sc); mtx_destroy(&sc->sc_mtx); mtx_destroy(&sc->sc_bulk_mtx); free(sc->sc_buckets, M_PFSYNC); free(sc, M_PFSYNC); V_pfsyncif = NULL; } static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, struct pf_state_peer *d) { if (s->scrub.scrub_flag && d->scrub == NULL) { d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO); if (d->scrub == NULL) return (ENOMEM); } return (0); } static int pfsync_state_import(struct pfsync_state *sp, int flags) { struct pfsync_softc *sc = V_pfsyncif; #ifndef __NO_STRICT_ALIGNMENT struct pfsync_state_key key[2]; #endif struct pfsync_state_key *kw, *ks; struct pf_kstate *st = NULL; struct pf_state_key *skw = NULL, *sks = NULL; struct pf_krule *r = NULL; struct pfi_kkif *kif; int error; PF_RULES_RASSERT(); if (sp->creatorid == 0) { if (V_pf_status.debug >= PF_DEBUG_MISC) printf("%s: invalid creator id: %08x\n", __func__, ntohl(sp->creatorid)); return (EINVAL); } if ((kif = pfi_kkif_find(sp->ifname)) == NULL) { if (V_pf_status.debug >= PF_DEBUG_MISC) printf("%s: unknown interface: %s\n", __func__, sp->ifname); if (flags & PFSYNC_SI_IOCTL) return (EINVAL); return (0); /* skip this state */ } /* * If the ruleset checksums match or the state is coming from the ioctl, * it's safe to associate the state with the rule of that number. */ if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) < pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) r = pf_main_ruleset.rules[ PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)]; else r = &V_pf_default_rule; if ((r->max_states && counter_u64_fetch(r->states_cur) >= r->max_states)) goto cleanup; /* * XXXGL: consider M_WAITOK in ioctl path after. */ st = pf_alloc_state(M_NOWAIT); if (__predict_false(st == NULL)) goto cleanup; if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL) goto cleanup; #ifndef __NO_STRICT_ALIGNMENT bcopy(&sp->key, key, sizeof(struct pfsync_state_key) * 2); kw = &key[PF_SK_WIRE]; ks = &key[PF_SK_STACK]; #else kw = &sp->key[PF_SK_WIRE]; ks = &sp->key[PF_SK_STACK]; #endif if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->af) || PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->af) || kw->port[0] != ks->port[0] || kw->port[1] != ks->port[1]) { sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT); if (sks == NULL) goto cleanup; } else sks = skw; /* allocate memory for scrub info */ if (pfsync_alloc_scrub_memory(&sp->src, &st->src) || pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) goto cleanup; /* Copy to state key(s). */ skw->addr[0] = kw->addr[0]; skw->addr[1] = kw->addr[1]; skw->port[0] = kw->port[0]; skw->port[1] = kw->port[1]; skw->proto = sp->proto; skw->af = sp->af; if (sks != skw) { sks->addr[0] = ks->addr[0]; sks->addr[1] = ks->addr[1]; sks->port[0] = ks->port[0]; sks->port[1] = ks->port[1]; sks->proto = sp->proto; sks->af = sp->af; } /* copy to state */ bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr)); st->creation = time_uptime - ntohl(sp->creation); st->expire = time_uptime; if (sp->expire) { uint32_t timeout; timeout = r->timeout[sp->timeout]; if (!timeout) timeout = V_pf_default_rule.timeout[sp->timeout]; /* sp->expire may have been adaptively scaled by export. */ st->expire -= timeout - ntohl(sp->expire); } st->direction = sp->direction; st->log = sp->log; st->timeout = sp->timeout; - st->state_flags = sp->state_flags; + /* 8 from old peers, 16 bits from new peers */ + st->state_flags = sp->state_flags_compat | ntohs(sp->state_flags); + + if (r == &V_pf_default_rule) { + /* ToS and Prio are not sent over struct pfsync_state */ + st->state_flags &= ~PFSTATE_SETMASK; + } else { + /* Most actions are applied form state, not from rule. Until + * pfsync can forward all those actions and their parameters we + * must relay on restoring them from the found rule. + * It's a copy of pf_rule_to_actions() */ + st->qid = r->qid; + st->pqid = r->pqid; + st->rtableid = r->rtableid; + if (r->scrub_flags & PFSTATE_SETTOS) + st->set_tos = r->set_tos; + st->min_ttl = r->min_ttl; + st->max_mss = r->max_mss; + st->state_flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID| + PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO)); + st->dnpipe = r->dnpipe; + st->dnrpipe = r->dnrpipe; + /* FIXME: dnflags are not part of state, can't update them */ + } st->id = sp->id; st->creatorid = sp->creatorid; pf_state_peer_ntoh(&sp->src, &st->src); pf_state_peer_ntoh(&sp->dst, &st->dst); st->rule.ptr = r; st->nat_rule.ptr = NULL; st->anchor.ptr = NULL; st->rt_kif = NULL; st->pfsync_time = time_uptime; st->sync_state = PFSYNC_S_NONE; if (!(flags & PFSYNC_SI_IOCTL)) st->state_flags |= PFSTATE_NOSYNC; if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0) goto cleanup_state; /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ counter_u64_add(r->states_cur, 1); counter_u64_add(r->states_tot, 1); if (!(flags & PFSYNC_SI_IOCTL)) { st->state_flags &= ~PFSTATE_NOSYNC; if (st->state_flags & PFSTATE_ACK) { struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK(b); pfsync_q_ins(st, PFSYNC_S_IACK, true); PFSYNC_BUCKET_UNLOCK(b); pfsync_push_all(sc); } } st->state_flags &= ~PFSTATE_ACK; PF_STATE_UNLOCK(st); return (0); cleanup: error = ENOMEM; if (skw == sks) sks = NULL; if (skw != NULL) uma_zfree(V_pf_state_key_z, skw); if (sks != NULL) uma_zfree(V_pf_state_key_z, sks); cleanup_state: /* pf_state_insert() frees the state keys. */ if (st) { st->timeout = PFTM_UNLINKED; /* appease an assert */ pf_free_state(st); } return (error); } #ifdef INET static int pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused) { struct pfsync_softc *sc = V_pfsyncif; struct mbuf *m = *mp; struct ip *ip = mtod(m, struct ip *); struct pfsync_header *ph; struct pfsync_subheader subh; int offset, len, flags = 0; int rv; uint16_t count; PF_RULES_RLOCK_TRACKER; *mp = NULL; V_pfsyncstats.pfsyncs_ipackets++; /* Verify that we have a sync interface configured. */ if (!sc || !sc->sc_sync_if || !V_pf_status.running || (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done; /* verify that the packet came in on the right interface */ if (sc->sc_sync_if != m->m_pkthdr.rcvif) { V_pfsyncstats.pfsyncs_badif++; goto done; } if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); /* verify that the IP TTL is 255. */ if (ip->ip_ttl != PFSYNC_DFLTTL) { V_pfsyncstats.pfsyncs_badttl++; goto done; } offset = ip->ip_hl << 2; if (m->m_pkthdr.len < offset + sizeof(*ph)) { V_pfsyncstats.pfsyncs_hdrops++; goto done; } if (offset + sizeof(*ph) > m->m_len) { if (m_pullup(m, offset + sizeof(*ph)) == NULL) { V_pfsyncstats.pfsyncs_hdrops++; return (IPPROTO_DONE); } ip = mtod(m, struct ip *); } ph = (struct pfsync_header *)((char *)ip + offset); /* verify the version */ if (ph->version != PFSYNC_VERSION) { V_pfsyncstats.pfsyncs_badver++; goto done; } len = ntohs(ph->len) + offset; if (m->m_pkthdr.len < len) { V_pfsyncstats.pfsyncs_badlen++; goto done; } /* * Trusting pf_chksum during packet processing, as well as seeking * in interface name tree, require holding PF_RULES_RLOCK(). */ PF_RULES_RLOCK(); if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) flags = PFSYNC_SI_CKSUM; offset += sizeof(*ph); while (offset <= len - sizeof(subh)) { m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); offset += sizeof(subh); if (subh.action >= PFSYNC_ACT_MAX) { V_pfsyncstats.pfsyncs_badact++; PF_RULES_RUNLOCK(); goto done; } count = ntohs(subh.count); V_pfsyncstats.pfsyncs_iacts[subh.action] += count; rv = (*pfsync_acts[subh.action])(m, offset, count, flags); if (rv == -1) { PF_RULES_RUNLOCK(); return (IPPROTO_DONE); } offset += rv; } PF_RULES_RUNLOCK(); done: m_freem(m); return (IPPROTO_DONE); } #endif static int pfsync_in_clr(struct mbuf *m, int offset, int count, int flags) { struct pfsync_clr *clr; struct mbuf *mp; int len = sizeof(*clr) * count; int i, offp; u_int32_t creatorid; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } clr = (struct pfsync_clr *)(mp->m_data + offp); for (i = 0; i < count; i++) { creatorid = clr[i].creatorid; if (clr[i].ifname[0] != '\0' && pfi_kkif_find(clr[i].ifname) == NULL) continue; for (int i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; struct pf_kstate *s; relock: PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { if (s->creatorid == creatorid) { s->state_flags |= PFSTATE_NOSYNC; pf_unlink_state(s); goto relock; } } PF_HASHROW_UNLOCK(ih); } } return (len); } static int pfsync_in_ins(struct mbuf *m, int offset, int count, int flags) { struct mbuf *mp; struct pfsync_state *sa, *sp; int len = sizeof(*sp) * count; int i, offp; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } sa = (struct pfsync_state *)(mp->m_data + offp); for (i = 0; i < count; i++) { sp = &sa[i]; /* Check for invalid values. */ if (sp->timeout >= PFTM_MAX || sp->src.state > PF_TCPS_PROXY_DST || sp->dst.state > PF_TCPS_PROXY_DST || sp->direction > PF_OUT || (sp->af != AF_INET && sp->af != AF_INET6)) { if (V_pf_status.debug >= PF_DEBUG_MISC) printf("%s: invalid value\n", __func__); V_pfsyncstats.pfsyncs_badval++; continue; } if (pfsync_state_import(sp, flags) == ENOMEM) /* Drop out, but process the rest of the actions. */ break; } return (len); } static int pfsync_in_iack(struct mbuf *m, int offset, int count, int flags) { struct pfsync_ins_ack *ia, *iaa; struct pf_kstate *st; struct mbuf *mp; int len = count * sizeof(*ia); int offp, i; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } iaa = (struct pfsync_ins_ack *)(mp->m_data + offp); for (i = 0; i < count; i++) { ia = &iaa[i]; st = pf_find_state_byid(ia->id, ia->creatorid); if (st == NULL) continue; if (st->state_flags & PFSTATE_ACK) { pfsync_undefer_state(st, 0); } PF_STATE_UNLOCK(st); } /* * XXX this is not yet implemented, but we know the size of the * message so we can skip it. */ return (count * sizeof(struct pfsync_ins_ack)); } static int pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src, struct pfsync_state_peer *dst) { int sync = 0; PF_STATE_LOCK_ASSERT(st); /* * The state should never go backwards except * for syn-proxy states. Neither should the * sequence window slide backwards. */ if ((st->src.state > src->state && (st->src.state < PF_TCPS_PROXY_SRC || src->state >= PF_TCPS_PROXY_SRC)) || (st->src.state == src->state && SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))) sync++; else pf_state_peer_ntoh(src, &st->src); if ((st->dst.state > dst->state) || (st->dst.state >= TCPS_SYN_SENT && SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))) sync++; else pf_state_peer_ntoh(dst, &st->dst); return (sync); } static int pfsync_in_upd(struct mbuf *m, int offset, int count, int flags) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_state *sa, *sp; struct pf_kstate *st; int sync; struct mbuf *mp; int len = count * sizeof(*sp); int offp, i; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } sa = (struct pfsync_state *)(mp->m_data + offp); for (i = 0; i < count; i++) { sp = &sa[i]; /* check for invalid values */ if (sp->timeout >= PFTM_MAX || sp->src.state > PF_TCPS_PROXY_DST || sp->dst.state > PF_TCPS_PROXY_DST) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pfsync_input: PFSYNC_ACT_UPD: " "invalid value\n"); } V_pfsyncstats.pfsyncs_badval++; continue; } st = pf_find_state_byid(sp->id, sp->creatorid); if (st == NULL) { /* insert the update */ if (pfsync_state_import(sp, flags)) V_pfsyncstats.pfsyncs_badstate++; continue; } if (st->state_flags & PFSTATE_ACK) { pfsync_undefer_state(st, 1); } if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) sync = pfsync_upd_tcp(st, &sp->src, &sp->dst); else { sync = 0; /* * Non-TCP protocol state machine always go * forwards */ if (st->src.state > sp->src.state) sync++; else pf_state_peer_ntoh(&sp->src, &st->src); if (st->dst.state > sp->dst.state) sync++; else pf_state_peer_ntoh(&sp->dst, &st->dst); } if (sync < 2) { pfsync_alloc_scrub_memory(&sp->dst, &st->dst); pf_state_peer_ntoh(&sp->dst, &st->dst); st->expire = time_uptime; st->timeout = sp->timeout; } st->pfsync_time = time_uptime; if (sync) { V_pfsyncstats.pfsyncs_stale++; pfsync_update_state(st); PF_STATE_UNLOCK(st); pfsync_push_all(sc); continue; } PF_STATE_UNLOCK(st); } return (len); } static int pfsync_in_upd_c(struct mbuf *m, int offset, int count, int flags) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_upd_c *ua, *up; struct pf_kstate *st; int len = count * sizeof(*up); int sync; struct mbuf *mp; int offp, i; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } ua = (struct pfsync_upd_c *)(mp->m_data + offp); for (i = 0; i < count; i++) { up = &ua[i]; /* check for invalid values */ if (up->timeout >= PFTM_MAX || up->src.state > PF_TCPS_PROXY_DST || up->dst.state > PF_TCPS_PROXY_DST) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pfsync_input: " "PFSYNC_ACT_UPD_C: " "invalid value\n"); } V_pfsyncstats.pfsyncs_badval++; continue; } st = pf_find_state_byid(up->id, up->creatorid); if (st == NULL) { /* We don't have this state. Ask for it. */ PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); pfsync_request_update(up->creatorid, up->id); PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); continue; } if (st->state_flags & PFSTATE_ACK) { pfsync_undefer_state(st, 1); } if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) sync = pfsync_upd_tcp(st, &up->src, &up->dst); else { sync = 0; /* * Non-TCP protocol state machine always go * forwards */ if (st->src.state > up->src.state) sync++; else pf_state_peer_ntoh(&up->src, &st->src); if (st->dst.state > up->dst.state) sync++; else pf_state_peer_ntoh(&up->dst, &st->dst); } if (sync < 2) { pfsync_alloc_scrub_memory(&up->dst, &st->dst); pf_state_peer_ntoh(&up->dst, &st->dst); st->expire = time_uptime; st->timeout = up->timeout; } st->pfsync_time = time_uptime; if (sync) { V_pfsyncstats.pfsyncs_stale++; pfsync_update_state(st); PF_STATE_UNLOCK(st); pfsync_push_all(sc); continue; } PF_STATE_UNLOCK(st); } return (len); } static int pfsync_in_ureq(struct mbuf *m, int offset, int count, int flags) { struct pfsync_upd_req *ur, *ura; struct mbuf *mp; int len = count * sizeof(*ur); int i, offp; struct pf_kstate *st; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } ura = (struct pfsync_upd_req *)(mp->m_data + offp); for (i = 0; i < count; i++) { ur = &ura[i]; if (ur->id == 0 && ur->creatorid == 0) pfsync_bulk_start(); else { st = pf_find_state_byid(ur->id, ur->creatorid); if (st == NULL) { V_pfsyncstats.pfsyncs_badstate++; continue; } if (st->state_flags & PFSTATE_NOSYNC) { PF_STATE_UNLOCK(st); continue; } pfsync_update_state_req(st); PF_STATE_UNLOCK(st); } } return (len); } static int pfsync_in_del(struct mbuf *m, int offset, int count, int flags) { struct mbuf *mp; struct pfsync_state *sa, *sp; struct pf_kstate *st; int len = count * sizeof(*sp); int offp, i; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } sa = (struct pfsync_state *)(mp->m_data + offp); for (i = 0; i < count; i++) { sp = &sa[i]; st = pf_find_state_byid(sp->id, sp->creatorid); if (st == NULL) { V_pfsyncstats.pfsyncs_badstate++; continue; } st->state_flags |= PFSTATE_NOSYNC; pf_unlink_state(st); } return (len); } static int pfsync_in_del_c(struct mbuf *m, int offset, int count, int flags) { struct mbuf *mp; struct pfsync_del_c *sa, *sp; struct pf_kstate *st; int len = count * sizeof(*sp); int offp, i; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } sa = (struct pfsync_del_c *)(mp->m_data + offp); for (i = 0; i < count; i++) { sp = &sa[i]; st = pf_find_state_byid(sp->id, sp->creatorid); if (st == NULL) { V_pfsyncstats.pfsyncs_badstate++; continue; } st->state_flags |= PFSTATE_NOSYNC; pf_unlink_state(st); } return (len); } static int pfsync_in_bus(struct mbuf *m, int offset, int count, int flags) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bus *bus; struct mbuf *mp; int len = count * sizeof(*bus); int offp; PFSYNC_BLOCK(sc); /* If we're not waiting for a bulk update, who cares. */ if (sc->sc_ureq_sent == 0) { PFSYNC_BUNLOCK(sc); return (len); } mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { PFSYNC_BUNLOCK(sc); V_pfsyncstats.pfsyncs_badlen++; return (-1); } bus = (struct pfsync_bus *)(mp->m_data + offp); switch (bus->status) { case PFSYNC_BUS_START: callout_reset(&sc->sc_bulkfail_tmo, 4 * hz + V_pf_limits[PF_LIMIT_STATES].limit / ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) / sizeof(struct pfsync_state)), pfsync_bulk_fail, sc); if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: received bulk update start\n"); break; case PFSYNC_BUS_END: if (time_uptime - ntohl(bus->endtime) >= sc->sc_ureq_sent) { /* that's it, we're happy */ sc->sc_ureq_sent = 0; sc->sc_bulk_tries = 0; callout_stop(&sc->sc_bulkfail_tmo); if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync bulk done"); sc->sc_flags |= PFSYNCF_OK; if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: received valid " "bulk update end\n"); } else { if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: received invalid " "bulk update end: bad timestamp\n"); } break; } PFSYNC_BUNLOCK(sc); return (len); } static int pfsync_in_tdb(struct mbuf *m, int offset, int count, int flags) { int len = count * sizeof(struct pfsync_tdb); #if defined(IPSEC) struct pfsync_tdb *tp; struct mbuf *mp; int offp; int i; int s; mp = m_pulldown(m, offset, len, &offp); if (mp == NULL) { V_pfsyncstats.pfsyncs_badlen++; return (-1); } tp = (struct pfsync_tdb *)(mp->m_data + offp); for (i = 0; i < count; i++) pfsync_update_net_tdb(&tp[i]); #endif return (len); } #if defined(IPSEC) /* Update an in-kernel tdb. Silently fail if no tdb is found. */ static void pfsync_update_net_tdb(struct pfsync_tdb *pt) { struct tdb *tdb; int s; /* check for invalid values */ if (ntohl(pt->spi) <= SPI_RESERVED_MAX || (pt->dst.sa.sa_family != AF_INET && pt->dst.sa.sa_family != AF_INET6)) goto bad; tdb = gettdb(pt->spi, &pt->dst, pt->sproto); if (tdb) { pt->rpl = ntohl(pt->rpl); pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes); /* Neither replay nor byte counter should ever decrease. */ if (pt->rpl < tdb->tdb_rpl || pt->cur_bytes < tdb->tdb_cur_bytes) { goto bad; } tdb->tdb_rpl = pt->rpl; tdb->tdb_cur_bytes = pt->cur_bytes; } return; bad: if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: " "invalid value\n"); V_pfsyncstats.pfsyncs_badstate++; return; } #endif static int pfsync_in_eof(struct mbuf *m, int offset, int count, int flags) { /* check if we are at the right place in the packet */ if (offset != m->m_pkthdr.len) V_pfsyncstats.pfsyncs_badlen++; /* we're done. free and let the caller return */ m_freem(m); return (-1); } static int pfsync_in_error(struct mbuf *m, int offset, int count, int flags) { V_pfsyncstats.pfsyncs_badact++; m_freem(m); return (-1); } static int pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *rt) { m_freem(m); return (0); } /* ARGSUSED */ static int pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct pfsync_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct pfsyncreq pfsyncr; size_t nvbuflen; int error; int c; switch (cmd) { case SIOCSIFFLAGS: PFSYNC_LOCK(sc); if (ifp->if_flags & IFF_UP) { ifp->if_drv_flags |= IFF_DRV_RUNNING; PFSYNC_UNLOCK(sc); pfsync_pointers_init(); } else { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; PFSYNC_UNLOCK(sc); pfsync_pointers_uninit(); } break; case SIOCSIFMTU: if (!sc->sc_sync_if || ifr->ifr_mtu <= PFSYNC_MINPKT || ifr->ifr_mtu > sc->sc_sync_if->if_mtu) return (EINVAL); if (ifr->ifr_mtu < ifp->if_mtu) { for (c = 0; c < pfsync_buckets; c++) { PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT) pfsync_sendout(1, c); PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); } } ifp->if_mtu = ifr->ifr_mtu; break; case SIOCGETPFSYNC: bzero(&pfsyncr, sizeof(pfsyncr)); PFSYNC_LOCK(sc); if (sc->sc_sync_if) { strlcpy(pfsyncr.pfsyncr_syncdev, sc->sc_sync_if->if_xname, IFNAMSIZ); } pfsyncr.pfsyncr_syncpeer = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr; pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; pfsyncr.pfsyncr_defer = sc->sc_flags; PFSYNC_UNLOCK(sc); return (copyout(&pfsyncr, ifr_data_get_ptr(ifr), sizeof(pfsyncr))); case SIOCGETPFSYNCNV: { nvlist_t *nvl_syncpeer; nvlist_t *nvl = nvlist_create(0); if (nvl == NULL) return (ENOMEM); if (sc->sc_sync_if) nvlist_add_string(nvl, "syncdev", sc->sc_sync_if->if_xname); nvlist_add_number(nvl, "maxupdates", sc->sc_maxupdates); nvlist_add_number(nvl, "flags", sc->sc_flags); if ((nvl_syncpeer = pfsync_sockaddr_to_syncpeer_nvlist(&sc->sc_sync_peer)) != NULL) nvlist_add_nvlist(nvl, "syncpeer", nvl_syncpeer); void *packed = NULL; packed = nvlist_pack(nvl, &nvbuflen); if (packed == NULL) { free(packed, M_NVLIST); nvlist_destroy(nvl); return (ENOMEM); } if (nvbuflen > ifr->ifr_cap_nv.buf_length) { ifr->ifr_cap_nv.length = nvbuflen; ifr->ifr_cap_nv.buffer = NULL; free(packed, M_NVLIST); nvlist_destroy(nvl); return (EFBIG); } ifr->ifr_cap_nv.length = nvbuflen; error = copyout(packed, ifr->ifr_cap_nv.buffer, nvbuflen); nvlist_destroy(nvl); nvlist_destroy(nvl_syncpeer); free(packed, M_NVLIST); break; } case SIOCSETPFSYNC: { struct pfsync_kstatus status; if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) return (error); if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr, sizeof(pfsyncr)))) return (error); memset((char *)&status, 0, sizeof(struct pfsync_kstatus)); pfsync_pfsyncreq_to_kstatus(&pfsyncr, &status); error = pfsync_kstatus_to_softc(&status, sc); return (error); } case SIOCSETPFSYNCNV: { struct pfsync_kstatus status; void *data; nvlist_t *nvl; if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) return (error); if (ifr->ifr_cap_nv.length > IFR_CAP_NV_MAXBUFSIZE) return (EINVAL); data = malloc(ifr->ifr_cap_nv.length, M_TEMP, M_WAITOK); if ((error = copyin(ifr->ifr_cap_nv.buffer, data, ifr->ifr_cap_nv.length)) != 0) { free(data, M_TEMP); return (error); } if ((nvl = nvlist_unpack(data, ifr->ifr_cap_nv.length, 0)) == NULL) { free(data, M_TEMP); return (EINVAL); } memset((char *)&status, 0, sizeof(struct pfsync_kstatus)); pfsync_nvstatus_to_kstatus(nvl, &status); nvlist_destroy(nvl); free(data, M_TEMP); error = pfsync_kstatus_to_softc(&status, sc); return (error); } default: return (ENOTTY); } return (0); } static void pfsync_out_state(struct pf_kstate *st, void *buf) { struct pfsync_state *sp = buf; pfsync_state_export(sp, st); } static void pfsync_out_iack(struct pf_kstate *st, void *buf) { struct pfsync_ins_ack *iack = buf; iack->id = st->id; iack->creatorid = st->creatorid; } static void pfsync_out_upd_c(struct pf_kstate *st, void *buf) { struct pfsync_upd_c *up = buf; bzero(up, sizeof(*up)); up->id = st->id; pf_state_peer_hton(&st->src, &up->src); pf_state_peer_hton(&st->dst, &up->dst); up->creatorid = st->creatorid; up->timeout = st->timeout; } static void pfsync_out_del(struct pf_kstate *st, void *buf) { struct pfsync_del_c *dp = buf; dp->id = st->id; dp->creatorid = st->creatorid; st->state_flags |= PFSTATE_NOSYNC; } static void pfsync_drop(struct pfsync_softc *sc) { struct pf_kstate *st, *next; struct pfsync_upd_req_item *ur; struct pfsync_bucket *b; int c, q; for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; for (q = 0; q < PFSYNC_S_COUNT; q++) { if (TAILQ_EMPTY(&b->b_qs[q])) continue; TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) { KASSERT(st->sync_state == q, ("%s: st->sync_state == q", __func__)); st->sync_state = PFSYNC_S_NONE; pf_release_state(st); } TAILQ_INIT(&b->b_qs[q]); } while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); free(ur, M_PFSYNC); } b->b_len = PFSYNC_MINPKT; b->b_plus = NULL; } } static void pfsync_sendout(int schedswi, int c) { struct pfsync_softc *sc = V_pfsyncif; struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; struct pfsync_header *ph; struct pfsync_subheader *subh; struct pf_kstate *st, *st_next; struct pfsync_upd_req_item *ur; struct pfsync_bucket *b = &sc->sc_buckets[c]; int aflen, offset; int q, count = 0; KASSERT(sc != NULL, ("%s: null sc", __func__)); KASSERT(b->b_len > PFSYNC_MINPKT, ("%s: sc_len %zu", __func__, b->b_len)); PFSYNC_BUCKET_LOCK_ASSERT(b); if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { pfsync_drop(sc); return; } m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); V_pfsyncstats.pfsyncs_onomem++; return; } m->m_data += max_linkhdr; m->m_len = m->m_pkthdr.len = b->b_len; /* build the ip header */ switch (sc->sc_sync_peer.ss_family) { #ifdef INET case AF_INET: { struct ip *ip; ip = mtod(m, struct ip *); bcopy(&sc->sc_template.ipv4, ip, sizeof(*ip)); aflen = offset = sizeof(*ip); ip->ip_len = htons(m->m_pkthdr.len); ip_fillid(ip); break; } #endif default: m_freem(m); return; } /* build the pfsync header */ ph = (struct pfsync_header *)(m->m_data + offset); bzero(ph, sizeof(*ph)); offset += sizeof(*ph); ph->version = PFSYNC_VERSION; ph->len = htons(b->b_len - aflen); bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); /* walk the queues */ for (q = 0; q < PFSYNC_S_COUNT; q++) { if (TAILQ_EMPTY(&b->b_qs[q])) continue; subh = (struct pfsync_subheader *)(m->m_data + offset); offset += sizeof(*subh); count = 0; TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) { KASSERT(st->sync_state == q, ("%s: st->sync_state == q", __func__)); /* * XXXGL: some of write methods do unlocked reads * of state data :( */ pfsync_qs[q].write(st, m->m_data + offset); offset += pfsync_qs[q].len; st->sync_state = PFSYNC_S_NONE; pf_release_state(st); count++; } TAILQ_INIT(&b->b_qs[q]); bzero(subh, sizeof(*subh)); subh->action = pfsync_qs[q].action; subh->count = htons(count); V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count; } if (!TAILQ_EMPTY(&b->b_upd_req_list)) { subh = (struct pfsync_subheader *)(m->m_data + offset); offset += sizeof(*subh); count = 0; while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); bcopy(&ur->ur_msg, m->m_data + offset, sizeof(ur->ur_msg)); offset += sizeof(ur->ur_msg); free(ur, M_PFSYNC); count++; } bzero(subh, sizeof(*subh)); subh->action = PFSYNC_ACT_UPD_REQ; subh->count = htons(count); V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count; } /* has someone built a custom region for us to add? */ if (b->b_plus != NULL) { bcopy(b->b_plus, m->m_data + offset, b->b_pluslen); offset += b->b_pluslen; b->b_plus = NULL; } subh = (struct pfsync_subheader *)(m->m_data + offset); offset += sizeof(*subh); bzero(subh, sizeof(*subh)); subh->action = PFSYNC_ACT_EOF; subh->count = htons(1); V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++; /* we're done, let's put it on the wire */ if (ifp->if_bpf) { m->m_data += aflen; m->m_len = m->m_pkthdr.len = b->b_len - aflen; BPF_MTAP(ifp, m); m->m_data -= aflen; m->m_len = m->m_pkthdr.len = b->b_len; } if (sc->sc_sync_if == NULL) { b->b_len = PFSYNC_MINPKT; m_freem(m); return; } if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); b->b_len = PFSYNC_MINPKT; if (!_IF_QFULL(&b->b_snd)) _IF_ENQUEUE(&b->b_snd, m); else { m_freem(m); if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1); } if (schedswi) swi_sched(V_pfsync_swi_cookie, 0); } static void pfsync_insert_state(struct pf_kstate *st) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); if (st->state_flags & PFSTATE_NOSYNC) return; if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) || st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { st->state_flags |= PFSTATE_NOSYNC; return; } KASSERT(st->sync_state == PFSYNC_S_NONE, ("%s: st->sync_state %u", __func__, st->sync_state)); PFSYNC_BUCKET_LOCK(b); if (b->b_len == PFSYNC_MINPKT) callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); pfsync_q_ins(st, PFSYNC_S_INS, true); PFSYNC_BUCKET_UNLOCK(b); st->sync_updates = 0; } static int pfsync_defer(struct pf_kstate *st, struct mbuf *m) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_deferral *pd; struct pfsync_bucket *b; if (m->m_flags & (M_BCAST|M_MCAST)) return (0); if (sc == NULL) return (0); b = pfsync_get_bucket(sc, st); PFSYNC_LOCK(sc); if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) || !(sc->sc_flags & PFSYNCF_DEFER)) { PFSYNC_UNLOCK(sc); return (0); } PFSYNC_BUCKET_LOCK(b); PFSYNC_UNLOCK(sc); if (b->b_deferred >= 128) pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0); pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT); if (pd == NULL) { PFSYNC_BUCKET_UNLOCK(b); return (0); } b->b_deferred++; m->m_flags |= M_SKIP_FIREWALL; st->state_flags |= PFSTATE_ACK; pd->pd_sc = sc; pd->pd_st = st; pf_ref_state(st); pd->pd_m = m; TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry); callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED); callout_reset(&pd->pd_tmo, PFSYNC_DEFER_TIMEOUT, pfsync_defer_tmo, pd); pfsync_push(b); PFSYNC_BUCKET_UNLOCK(b); return (1); } static void pfsync_undefer(struct pfsync_deferral *pd, int drop) { struct pfsync_softc *sc = pd->pd_sc; struct mbuf *m = pd->pd_m; struct pf_kstate *st = pd->pd_st; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK_ASSERT(b); TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); b->b_deferred--; pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ free(pd, M_PFSYNC); pf_release_state(st); if (drop) m_freem(m); else { _IF_ENQUEUE(&b->b_snd, m); pfsync_push(b); } } static void pfsync_defer_tmo(void *arg) { struct epoch_tracker et; struct pfsync_deferral *pd = arg; struct pfsync_softc *sc = pd->pd_sc; struct mbuf *m = pd->pd_m; struct pf_kstate *st = pd->pd_st; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK_ASSERT(b); TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); b->b_deferred--; pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ PFSYNC_BUCKET_UNLOCK(b); free(pd, M_PFSYNC); if (sc->sc_sync_if == NULL) { pf_release_state(st); m_freem(m); return; } NET_EPOCH_ENTER(et); CURVNET_SET(sc->sc_sync_if->if_vnet); pfsync_tx(sc, m); pf_release_state(st); CURVNET_RESTORE(); NET_EPOCH_EXIT(et); } static void pfsync_undefer_state_locked(struct pf_kstate *st, int drop) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_deferral *pd; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK_ASSERT(b); TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) { if (pd->pd_st == st) { if (callout_stop(&pd->pd_tmo) > 0) pfsync_undefer(pd, drop); return; } } panic("%s: unable to find deferred state", __func__); } static void pfsync_undefer_state(struct pf_kstate *st, int drop) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK(b); pfsync_undefer_state_locked(st, drop); PFSYNC_BUCKET_UNLOCK(b); } static struct pfsync_bucket* pfsync_get_bucket(struct pfsync_softc *sc, struct pf_kstate *st) { int c = PF_IDHASH(st) % pfsync_buckets; return &sc->sc_buckets[c]; } static void pfsync_update_state(struct pf_kstate *st) { struct pfsync_softc *sc = V_pfsyncif; bool sync = false, ref = true; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PF_STATE_LOCK_ASSERT(st); PFSYNC_BUCKET_LOCK(b); if (st->state_flags & PFSTATE_ACK) pfsync_undefer_state_locked(st, 0); if (st->state_flags & PFSTATE_NOSYNC) { if (st->sync_state != PFSYNC_S_NONE) pfsync_q_del(st, true, b); PFSYNC_BUCKET_UNLOCK(b); return; } if (b->b_len == PFSYNC_MINPKT) callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); switch (st->sync_state) { case PFSYNC_S_UPD_C: case PFSYNC_S_UPD: case PFSYNC_S_INS: /* we're already handling it */ if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { st->sync_updates++; if (st->sync_updates >= sc->sc_maxupdates) sync = true; } break; case PFSYNC_S_IACK: pfsync_q_del(st, false, b); ref = false; /* FALLTHROUGH */ case PFSYNC_S_NONE: pfsync_q_ins(st, PFSYNC_S_UPD_C, ref); st->sync_updates = 0; break; default: panic("%s: unexpected sync state %d", __func__, st->sync_state); } if (sync || (time_uptime - st->pfsync_time) < 2) pfsync_push(b); PFSYNC_BUCKET_UNLOCK(b); } static void pfsync_request_update(u_int32_t creatorid, u_int64_t id) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bucket *b = &sc->sc_buckets[0]; struct pfsync_upd_req_item *item; size_t nlen = sizeof(struct pfsync_upd_req); PFSYNC_BUCKET_LOCK_ASSERT(b); /* * This code does a bit to prevent multiple update requests for the * same state being generated. It searches current subheader queue, * but it doesn't lookup into queue of already packed datagrams. */ TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry) if (item->ur_msg.id == id && item->ur_msg.creatorid == creatorid) return; item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT); if (item == NULL) return; /* XXX stats */ item->ur_msg.id = id; item->ur_msg.creatorid = creatorid; if (TAILQ_EMPTY(&b->b_upd_req_list)) nlen += sizeof(struct pfsync_subheader); if (b->b_len + nlen > sc->sc_ifp->if_mtu) { pfsync_sendout(0, 0); nlen = sizeof(struct pfsync_subheader) + sizeof(struct pfsync_upd_req); } TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry); b->b_len += nlen; pfsync_push(b); } static bool pfsync_update_state_req(struct pf_kstate *st) { struct pfsync_softc *sc = V_pfsyncif; bool ref = true, full = false; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PF_STATE_LOCK_ASSERT(st); PFSYNC_BUCKET_LOCK(b); if (st->state_flags & PFSTATE_NOSYNC) { if (st->sync_state != PFSYNC_S_NONE) pfsync_q_del(st, true, b); PFSYNC_BUCKET_UNLOCK(b); return (full); } switch (st->sync_state) { case PFSYNC_S_UPD_C: case PFSYNC_S_IACK: pfsync_q_del(st, false, b); ref = false; /* FALLTHROUGH */ case PFSYNC_S_NONE: pfsync_q_ins(st, PFSYNC_S_UPD, ref); pfsync_push(b); break; case PFSYNC_S_INS: case PFSYNC_S_UPD: case PFSYNC_S_DEL: /* we're already handling it */ break; default: panic("%s: unexpected sync state %d", __func__, st->sync_state); } if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(struct pfsync_state)) full = true; PFSYNC_BUCKET_UNLOCK(b); return (full); } static void pfsync_delete_state(struct pf_kstate *st) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); bool ref = true; PFSYNC_BUCKET_LOCK(b); if (st->state_flags & PFSTATE_ACK) pfsync_undefer_state_locked(st, 1); if (st->state_flags & PFSTATE_NOSYNC) { if (st->sync_state != PFSYNC_S_NONE) pfsync_q_del(st, true, b); PFSYNC_BUCKET_UNLOCK(b); return; } if (b->b_len == PFSYNC_MINPKT) callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); switch (st->sync_state) { case PFSYNC_S_INS: /* We never got to tell the world so just forget about it. */ pfsync_q_del(st, true, b); break; case PFSYNC_S_UPD_C: case PFSYNC_S_UPD: case PFSYNC_S_IACK: pfsync_q_del(st, false, b); ref = false; /* FALLTHROUGH */ case PFSYNC_S_NONE: pfsync_q_ins(st, PFSYNC_S_DEL, ref); break; default: panic("%s: unexpected sync state %d", __func__, st->sync_state); } PFSYNC_BUCKET_UNLOCK(b); } static void pfsync_clear_states(u_int32_t creatorid, const char *ifname) { struct { struct pfsync_subheader subh; struct pfsync_clr clr; } __packed r; bzero(&r, sizeof(r)); r.subh.action = PFSYNC_ACT_CLR; r.subh.count = htons(1); V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++; strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); r.clr.creatorid = creatorid; pfsync_send_plus(&r, sizeof(r)); } static void pfsync_q_ins(struct pf_kstate *st, int q, bool ref) { struct pfsync_softc *sc = V_pfsyncif; size_t nlen = pfsync_qs[q].len; struct pfsync_bucket *b = pfsync_get_bucket(sc, st); PFSYNC_BUCKET_LOCK_ASSERT(b); KASSERT(st->sync_state == PFSYNC_S_NONE, ("%s: st->sync_state %u", __func__, st->sync_state)); KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu", b->b_len)); if (TAILQ_EMPTY(&b->b_qs[q])) nlen += sizeof(struct pfsync_subheader); if (b->b_len + nlen > sc->sc_ifp->if_mtu) { pfsync_sendout(1, b->b_id); nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; } b->b_len += nlen; TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list); st->sync_state = q; if (ref) pf_ref_state(st); } static void pfsync_q_del(struct pf_kstate *st, bool unref, struct pfsync_bucket *b) { int q = st->sync_state; PFSYNC_BUCKET_LOCK_ASSERT(b); KASSERT(st->sync_state != PFSYNC_S_NONE, ("%s: st->sync_state != PFSYNC_S_NONE", __func__)); b->b_len -= pfsync_qs[q].len; TAILQ_REMOVE(&b->b_qs[q], st, sync_list); st->sync_state = PFSYNC_S_NONE; if (unref) pf_release_state(st); if (TAILQ_EMPTY(&b->b_qs[q])) b->b_len -= sizeof(struct pfsync_subheader); } static void pfsync_bulk_start(void) { struct pfsync_softc *sc = V_pfsyncif; if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: received bulk update request\n"); PFSYNC_BLOCK(sc); sc->sc_ureq_received = time_uptime; sc->sc_bulk_hashid = 0; sc->sc_bulk_stateid = 0; pfsync_bulk_status(PFSYNC_BUS_START); callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc); PFSYNC_BUNLOCK(sc); } static void pfsync_bulk_update(void *arg) { struct pfsync_softc *sc = arg; struct pf_kstate *s; int i; PFSYNC_BLOCK_ASSERT(sc); CURVNET_SET(sc->sc_ifp->if_vnet); /* * Start with last state from previous invocation. * It may had gone, in this case start from the * hash slot. */ s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid); if (s != NULL) i = PF_IDHASH(s); else i = sc->sc_bulk_hashid; for (; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; if (s != NULL) PF_HASHROW_ASSERT(ih); else { PF_HASHROW_LOCK(ih); s = LIST_FIRST(&ih->states); } for (; s; s = LIST_NEXT(s, entry)) { if (s->sync_state == PFSYNC_S_NONE && s->timeout < PFTM_MAX && s->pfsync_time <= sc->sc_ureq_received) { if (pfsync_update_state_req(s)) { /* We've filled a packet. */ sc->sc_bulk_hashid = i; sc->sc_bulk_stateid = s->id; sc->sc_bulk_creatorid = s->creatorid; PF_HASHROW_UNLOCK(ih); callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc); goto full; } } } PF_HASHROW_UNLOCK(ih); } /* We're done. */ pfsync_bulk_status(PFSYNC_BUS_END); full: CURVNET_RESTORE(); } static void pfsync_bulk_status(u_int8_t status) { struct { struct pfsync_subheader subh; struct pfsync_bus bus; } __packed r; struct pfsync_softc *sc = V_pfsyncif; bzero(&r, sizeof(r)); r.subh.action = PFSYNC_ACT_BUS; r.subh.count = htons(1); V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++; r.bus.creatorid = V_pf_status.hostid; r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); r.bus.status = status; pfsync_send_plus(&r, sizeof(r)); } static void pfsync_bulk_fail(void *arg) { struct pfsync_softc *sc = arg; struct pfsync_bucket *b = &sc->sc_buckets[0]; CURVNET_SET(sc->sc_ifp->if_vnet); PFSYNC_BLOCK_ASSERT(sc); if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { /* Try again */ callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, V_pfsyncif); PFSYNC_BUCKET_LOCK(b); pfsync_request_update(0, 0); PFSYNC_BUCKET_UNLOCK(b); } else { /* Pretend like the transfer was ok. */ sc->sc_ureq_sent = 0; sc->sc_bulk_tries = 0; PFSYNC_LOCK(sc); if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync bulk fail"); sc->sc_flags |= PFSYNCF_OK; PFSYNC_UNLOCK(sc); if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: failed to receive bulk update\n"); } CURVNET_RESTORE(); } static void pfsync_send_plus(void *plus, size_t pluslen) { struct pfsync_softc *sc = V_pfsyncif; struct pfsync_bucket *b = &sc->sc_buckets[0]; PFSYNC_BUCKET_LOCK(b); if (b->b_len + pluslen > sc->sc_ifp->if_mtu) pfsync_sendout(1, b->b_id); b->b_plus = plus; b->b_len += (b->b_pluslen = pluslen); pfsync_sendout(1, b->b_id); PFSYNC_BUCKET_UNLOCK(b); } static void pfsync_timeout(void *arg) { struct pfsync_bucket *b = arg; CURVNET_SET(b->b_sc->sc_ifp->if_vnet); PFSYNC_BUCKET_LOCK(b); pfsync_push(b); PFSYNC_BUCKET_UNLOCK(b); CURVNET_RESTORE(); } static void pfsync_push(struct pfsync_bucket *b) { PFSYNC_BUCKET_LOCK_ASSERT(b); b->b_flags |= PFSYNCF_BUCKET_PUSH; swi_sched(V_pfsync_swi_cookie, 0); } static void pfsync_push_all(struct pfsync_softc *sc) { int c; struct pfsync_bucket *b; for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; PFSYNC_BUCKET_LOCK(b); pfsync_push(b); PFSYNC_BUCKET_UNLOCK(b); } } static void pfsync_tx(struct pfsync_softc *sc, struct mbuf *m) { struct ip *ip; int af, error = 0; ip = mtod(m, struct ip *); MPASS(ip->ip_v == IPVERSION || ip->ip_v == (IPV6_VERSION >> 4)); af = ip->ip_v == IPVERSION ? AF_INET : AF_INET6; /* * We distinguish between a deferral packet and our * own pfsync packet based on M_SKIP_FIREWALL * flag. This is XXX. */ switch (af) { #ifdef INET case AF_INET: if (m->m_flags & M_SKIP_FIREWALL) { error = ip_output(m, NULL, NULL, 0, NULL, NULL); } else { error = ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL); } break; #endif #ifdef INET6 case AF_INET6: if (m->m_flags & M_SKIP_FIREWALL) { error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); } else { MPASS(false); /* We don't support pfsync over IPv6. */ /*error = ip6_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo6, NULL);*/ } break; #endif } if (error == 0) V_pfsyncstats.pfsyncs_opackets++; else V_pfsyncstats.pfsyncs_oerrors++; } static void pfsyncintr(void *arg) { struct epoch_tracker et; struct pfsync_softc *sc = arg; struct pfsync_bucket *b; struct mbuf *m, *n; int c; NET_EPOCH_ENTER(et); CURVNET_SET(sc->sc_ifp->if_vnet); for (c = 0; c < pfsync_buckets; c++) { b = &sc->sc_buckets[c]; PFSYNC_BUCKET_LOCK(b); if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) { pfsync_sendout(0, b->b_id); b->b_flags &= ~PFSYNCF_BUCKET_PUSH; } _IF_DEQUEUE_ALL(&b->b_snd, m); PFSYNC_BUCKET_UNLOCK(b); for (; m != NULL; m = n) { n = m->m_nextpkt; m->m_nextpkt = NULL; pfsync_tx(sc, m); } } CURVNET_RESTORE(); NET_EPOCH_EXIT(et); } static int pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, struct in_mfilter *imf) { struct ip_moptions *imo = &sc->sc_imo; int error; if (!(ifp->if_flags & IFF_MULTICAST)) return (EADDRNOTAVAIL); switch (sc->sc_sync_peer.ss_family) { #ifdef INET case AF_INET: { ip_mfilter_init(&imo->imo_head); imo->imo_multicast_vif = -1; if ((error = in_joingroup(ifp, &((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr, NULL, &imf->imf_inm)) != 0) return (error); ip_mfilter_insert(&imo->imo_head, imf); imo->imo_multicast_ifp = ifp; imo->imo_multicast_ttl = PFSYNC_DFLTTL; imo->imo_multicast_loop = 0; break; } #endif } return (0); } static void pfsync_multicast_cleanup(struct pfsync_softc *sc) { struct ip_moptions *imo = &sc->sc_imo; struct in_mfilter *imf; while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) { ip_mfilter_remove(&imo->imo_head, imf); in_leavegroup(imf->imf_inm, NULL); ip_mfilter_free(imf); } imo->imo_multicast_ifp = NULL; } void pfsync_detach_ifnet(struct ifnet *ifp) { struct pfsync_softc *sc = V_pfsyncif; if (sc == NULL) return; PFSYNC_LOCK(sc); if (sc->sc_sync_if == ifp) { /* We don't need mutlicast cleanup here, because the interface * is going away. We do need to ensure we don't try to do * cleanup later. */ ip_mfilter_init(&sc->sc_imo.imo_head); sc->sc_imo.imo_multicast_ifp = NULL; sc->sc_sync_if = NULL; } PFSYNC_UNLOCK(sc); } static int pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *pfsyncr, struct pfsync_kstatus *status) { struct sockaddr_storage sa; status->maxupdates = pfsyncr->pfsyncr_maxupdates; status->flags = pfsyncr->pfsyncr_defer; strlcpy(status->syncdev, pfsyncr->pfsyncr_syncdev, IFNAMSIZ); memset(&sa, 0, sizeof(sa)); if (pfsyncr->pfsyncr_syncpeer.s_addr != 0) { struct sockaddr_in *in = (struct sockaddr_in *)&sa; in->sin_family = AF_INET; in->sin_len = sizeof(*in); in->sin_addr.s_addr = pfsyncr->pfsyncr_syncpeer.s_addr; } status->syncpeer = sa; return 0; } static int pfsync_kstatus_to_softc(struct pfsync_kstatus *status, struct pfsync_softc *sc) { struct in_mfilter *imf = NULL; struct ifnet *sifp; struct ip *ip; int error; int c; if ((status->maxupdates < 0) || (status->maxupdates > 255)) return (EINVAL); if (status->syncdev[0] == '\0') sifp = NULL; else if ((sifp = ifunit_ref(status->syncdev)) == NULL) return (EINVAL); struct sockaddr_in *status_sin = (struct sockaddr_in *)&(status->syncpeer); if (sifp != NULL && (status_sin->sin_addr.s_addr == 0 || status_sin->sin_addr.s_addr == htonl(INADDR_PFSYNC_GROUP))) imf = ip_mfilter_alloc(M_WAITOK, 0, 0); PFSYNC_LOCK(sc); struct sockaddr_in *sc_sin = (struct sockaddr_in *)&sc->sc_sync_peer; sc_sin->sin_family = AF_INET; sc_sin->sin_len = sizeof(*sc_sin); if (status_sin->sin_addr.s_addr == 0) { sc_sin->sin_addr.s_addr = htonl(INADDR_PFSYNC_GROUP); } else { sc_sin->sin_addr.s_addr = status_sin->sin_addr.s_addr; } sc->sc_maxupdates = status->maxupdates; if (status->flags & PFSYNCF_DEFER) { sc->sc_flags |= PFSYNCF_DEFER; V_pfsync_defer_ptr = pfsync_defer; } else { sc->sc_flags &= ~PFSYNCF_DEFER; V_pfsync_defer_ptr = NULL; } if (sifp == NULL) { if (sc->sc_sync_if) if_rele(sc->sc_sync_if); sc->sc_sync_if = NULL; pfsync_multicast_cleanup(sc); PFSYNC_UNLOCK(sc); return (0); } for (c = 0; c < pfsync_buckets; c++) { PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT && (sifp->if_mtu < sc->sc_ifp->if_mtu || (sc->sc_sync_if != NULL && sifp->if_mtu < sc->sc_sync_if->if_mtu) || sifp->if_mtu < MCLBYTES - sizeof(struct ip))) pfsync_sendout(1, c); PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); } pfsync_multicast_cleanup(sc); if (sc_sin->sin_addr.s_addr == htonl(INADDR_PFSYNC_GROUP)) { error = pfsync_multicast_setup(sc, sifp, imf); if (error) { if_rele(sifp); ip_mfilter_free(imf); PFSYNC_UNLOCK(sc); return (error); } } if (sc->sc_sync_if) if_rele(sc->sc_sync_if); sc->sc_sync_if = sifp; ip = &sc->sc_template.ipv4; bzero(ip, sizeof(*ip)); ip->ip_v = IPVERSION; ip->ip_hl = sizeof(sc->sc_template.ipv4) >> 2; ip->ip_tos = IPTOS_LOWDELAY; /* len and id are set later. */ ip->ip_off = htons(IP_DF); ip->ip_ttl = PFSYNC_DFLTTL; ip->ip_p = IPPROTO_PFSYNC; ip->ip_src.s_addr = INADDR_ANY; ip->ip_dst.s_addr = sc_sin->sin_addr.s_addr; /* Request a full state table update. */ if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) (*carp_demote_adj_p)(V_pfsync_carp_adj, "pfsync bulk start"); sc->sc_flags &= ~PFSYNCF_OK; if (V_pf_status.debug >= PF_DEBUG_MISC) printf("pfsync: requesting bulk update\n"); PFSYNC_UNLOCK(sc); PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); pfsync_request_update(0, 0); PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); PFSYNC_BLOCK(sc); sc->sc_ureq_sent = time_uptime; callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, sc); PFSYNC_BUNLOCK(sc); return (0); } static void pfsync_pointers_init(void) { PF_RULES_WLOCK(); V_pfsync_state_import_ptr = pfsync_state_import; V_pfsync_insert_state_ptr = pfsync_insert_state; V_pfsync_update_state_ptr = pfsync_update_state; V_pfsync_delete_state_ptr = pfsync_delete_state; V_pfsync_clear_states_ptr = pfsync_clear_states; V_pfsync_defer_ptr = pfsync_defer; PF_RULES_WUNLOCK(); } static void pfsync_pointers_uninit(void) { PF_RULES_WLOCK(); V_pfsync_state_import_ptr = NULL; V_pfsync_insert_state_ptr = NULL; V_pfsync_update_state_ptr = NULL; V_pfsync_delete_state_ptr = NULL; V_pfsync_clear_states_ptr = NULL; V_pfsync_defer_ptr = NULL; PF_RULES_WUNLOCK(); } static void vnet_pfsync_init(const void *unused __unused) { int error; V_pfsync_cloner = if_clone_simple(pfsyncname, pfsync_clone_create, pfsync_clone_destroy, 1); error = swi_add(&V_pfsync_swi_ie, pfsyncname, pfsyncintr, V_pfsyncif, SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie); if (error) { if_clone_detach(V_pfsync_cloner); log(LOG_INFO, "swi_add() failed in %s\n", __func__); } pfsync_pointers_init(); } VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY, vnet_pfsync_init, NULL); static void vnet_pfsync_uninit(const void *unused __unused) { int ret __diagused; pfsync_pointers_uninit(); if_clone_detach(V_pfsync_cloner); ret = swi_remove(V_pfsync_swi_cookie); MPASS(ret == 0); ret = intr_event_destroy(V_pfsync_swi_ie); MPASS(ret == 0); } VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH, vnet_pfsync_uninit, NULL); static int pfsync_init(void) { #ifdef INET int error; pfsync_detach_ifnet_ptr = pfsync_detach_ifnet; error = ipproto_register(IPPROTO_PFSYNC, pfsync_input, NULL); if (error) return (error); #endif return (0); } static void pfsync_uninit(void) { pfsync_detach_ifnet_ptr = NULL; #ifdef INET ipproto_unregister(IPPROTO_PFSYNC); #endif } static int pfsync_modevent(module_t mod, int type, void *data) { int error = 0; switch (type) { case MOD_LOAD: error = pfsync_init(); break; case MOD_UNLOAD: pfsync_uninit(); break; default: error = EINVAL; break; } return (error); } static moduledata_t pfsync_mod = { pfsyncname, pfsync_modevent, 0 }; #define PFSYNC_MODVER 1 /* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */ DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY); MODULE_VERSION(pfsync, PFSYNC_MODVER); MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER); diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c index f20851cb60e8..5eebd44c297d 100644 --- a/sys/netpfil/pf/pf.c +++ b/sys/netpfil/pf/pf.c @@ -1,7962 +1,8105 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * Copyright (c) 2002 - 2008 Henning Brauer * Copyright (c) 2012 Gleb Smirnoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_bpf.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_pf.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* dummynet */ #include #include #include #include #include #ifdef INET6 #include #include #include #include #include #include #include #endif /* INET6 */ #if defined(SCTP) || defined(SCTP_SUPPORT) #include #endif #include #include #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x SDT_PROVIDER_DEFINE(pf); SDT_PROBE_DEFINE4(pf, ip, test, done, "int", "int", "struct pf_krule *", "struct pf_kstate *"); SDT_PROBE_DEFINE4(pf, ip, test6, done, "int", "int", "struct pf_krule *", "struct pf_kstate *"); SDT_PROBE_DEFINE5(pf, ip, state, lookup, "struct pfi_kkif *", "struct pf_state_key_cmp *", "int", "struct pf_pdesc *", "struct pf_kstate *"); SDT_PROBE_DEFINE3(pf, eth, test_rule, entry, "int", "struct ifnet *", "struct mbuf *"); SDT_PROBE_DEFINE2(pf, eth, test_rule, test, "int", "struct pf_keth_rule *"); SDT_PROBE_DEFINE3(pf, eth, test_rule, mismatch, "int", "struct pf_keth_rule *", "char *"); SDT_PROBE_DEFINE2(pf, eth, test_rule, match, "int", "struct pf_keth_rule *"); SDT_PROBE_DEFINE2(pf, eth, test_rule, final_match, "int", "struct pf_keth_rule *"); /* * Global variables */ /* state tables */ VNET_DEFINE(struct pf_altqqueue, pf_altqs[4]); VNET_DEFINE(struct pf_kpalist, pf_pabuf); VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active); VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_active); VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive); VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_inactive); VNET_DEFINE(struct pf_kstatus, pf_status); VNET_DEFINE(u_int32_t, ticket_altqs_active); VNET_DEFINE(u_int32_t, ticket_altqs_inactive); VNET_DEFINE(int, altqs_inactive_open); VNET_DEFINE(u_int32_t, ticket_pabuf); VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx); #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx) VNET_DEFINE(u_char, pf_tcp_secret[16]); #define V_pf_tcp_secret VNET(pf_tcp_secret) VNET_DEFINE(int, pf_tcp_secret_init); #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init) VNET_DEFINE(int, pf_tcp_iss_off); #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off) VNET_DECLARE(int, pf_vnet_active); #define V_pf_vnet_active VNET(pf_vnet_active) VNET_DEFINE_STATIC(uint32_t, pf_purge_idx); #define V_pf_purge_idx VNET(pf_purge_idx) #ifdef PF_WANT_32_TO_64_COUNTER VNET_DEFINE_STATIC(uint32_t, pf_counter_periodic_iter); #define V_pf_counter_periodic_iter VNET(pf_counter_periodic_iter) VNET_DEFINE(struct allrulelist_head, pf_allrulelist); VNET_DEFINE(size_t, pf_allrulecount); VNET_DEFINE(struct pf_krule *, pf_rulemarker); #endif /* * Queue for pf_intr() sends. */ static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations"); struct pf_send_entry { STAILQ_ENTRY(pf_send_entry) pfse_next; struct mbuf *pfse_m; enum { PFSE_IP, PFSE_IP6, PFSE_ICMP, PFSE_ICMP6, } pfse_type; struct { int type; int code; int mtu; } icmpopts; }; STAILQ_HEAD(pf_send_head, pf_send_entry); VNET_DEFINE_STATIC(struct pf_send_head, pf_sendqueue); #define V_pf_sendqueue VNET(pf_sendqueue) static struct mtx_padalign pf_sendqueue_mtx; MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF); #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx) #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx) /* * Queue for pf_overload_task() tasks. */ struct pf_overload_entry { SLIST_ENTRY(pf_overload_entry) next; struct pf_addr addr; sa_family_t af; uint8_t dir; struct pf_krule *rule; }; SLIST_HEAD(pf_overload_head, pf_overload_entry); VNET_DEFINE_STATIC(struct pf_overload_head, pf_overloadqueue); #define V_pf_overloadqueue VNET(pf_overloadqueue) VNET_DEFINE_STATIC(struct task, pf_overloadtask); #define V_pf_overloadtask VNET(pf_overloadtask) static struct mtx_padalign pf_overloadqueue_mtx; MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx, "pf overload/flush queue", MTX_DEF); #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx) #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx) VNET_DEFINE(struct pf_krulequeue, pf_unlinked_rules); struct mtx_padalign pf_unlnkdrules_mtx; MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules", MTX_DEF); struct sx pf_config_lock; SX_SYSINIT(pf_config_lock, &pf_config_lock, "pf config"); struct mtx_padalign pf_table_stats_lock; MTX_SYSINIT(pf_table_stats_lock, &pf_table_stats_lock, "pf table stats", MTX_DEF); VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z); #define V_pf_sources_z VNET(pf_sources_z) uma_zone_t pf_mtag_z; VNET_DEFINE(uma_zone_t, pf_state_z); VNET_DEFINE(uma_zone_t, pf_state_key_z); VNET_DEFINE(struct unrhdr64, pf_stateid); static void pf_src_tree_remove_state(struct pf_kstate *); static void pf_init_threshold(struct pf_threshold *, u_int32_t, u_int32_t); static void pf_add_threshold(struct pf_threshold *); static int pf_check_threshold(struct pf_threshold *); static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *, u_int16_t *, u_int16_t *, struct pf_addr *, u_int16_t, u_int8_t, sa_family_t); static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *, struct tcphdr *, struct pf_state_peer *); static void pf_change_icmp(struct pf_addr *, u_int16_t *, struct pf_addr *, struct pf_addr *, u_int16_t, u_int16_t *, u_int16_t *, u_int16_t *, u_int16_t *, u_int8_t, sa_family_t); static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, - sa_family_t, struct pf_krule *); + sa_family_t, struct pf_krule *, int); static void pf_detach_state(struct pf_kstate *); static int pf_state_key_attach(struct pf_state_key *, struct pf_state_key *, struct pf_kstate *); static void pf_state_key_detach(struct pf_kstate *, int); static int pf_state_key_ctor(void *, int, void *, int); static u_int32_t pf_tcp_iss(struct pf_pdesc *); void pf_rule_to_actions(struct pf_krule *, struct pf_rule_actions *); static int pf_dummynet(struct pf_pdesc *, int, struct pf_kstate *, struct pf_krule *, struct mbuf **); static int pf_dummynet_route(struct pf_pdesc *, int, struct pf_kstate *, struct pf_krule *, struct ifnet *, struct sockaddr *, struct mbuf **); static int pf_test_eth_rule(int, struct pfi_kkif *, struct mbuf **); static int pf_test_rule(struct pf_krule **, struct pf_kstate **, int, struct pfi_kkif *, struct mbuf *, int, struct pf_pdesc *, struct pf_krule **, struct pf_kruleset **, struct inpcb *); static int pf_create_state(struct pf_krule *, struct pf_krule *, struct pf_krule *, struct pf_pdesc *, struct pf_ksrc_node *, struct pf_state_key *, struct pf_state_key *, struct mbuf *, int, u_int16_t, u_int16_t, int *, struct pfi_kkif *, struct pf_kstate **, int, u_int16_t, u_int16_t, - int); + int, struct pf_krule_slist *); static int pf_test_fragment(struct pf_krule **, int, struct pfi_kkif *, struct mbuf *, void *, struct pf_pdesc *, struct pf_krule **, struct pf_kruleset **); static int pf_tcp_track_full(struct pf_kstate **, struct pfi_kkif *, struct mbuf *, int, struct pf_pdesc *, u_short *, int *); static int pf_tcp_track_sloppy(struct pf_kstate **, struct pf_pdesc *, u_short *); static int pf_test_state_tcp(struct pf_kstate **, int, struct pfi_kkif *, struct mbuf *, int, void *, struct pf_pdesc *, u_short *); static int pf_test_state_udp(struct pf_kstate **, int, struct pfi_kkif *, struct mbuf *, int, void *, struct pf_pdesc *); static int pf_test_state_icmp(struct pf_kstate **, int, struct pfi_kkif *, struct mbuf *, int, void *, struct pf_pdesc *, u_short *); static int pf_test_state_other(struct pf_kstate **, int, struct pfi_kkif *, struct mbuf *, struct pf_pdesc *); static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, int, u_int16_t); static int pf_check_proto_cksum(struct mbuf *, int, int, u_int8_t, sa_family_t); static void pf_print_state_parts(struct pf_kstate *, struct pf_state_key *, struct pf_state_key *); static int pf_addr_wrap_neq(struct pf_addr_wrap *, struct pf_addr_wrap *); static void pf_patch_8(struct mbuf *, u_int16_t *, u_int8_t *, u_int8_t, bool, u_int8_t); static struct pf_kstate *pf_find_state(struct pfi_kkif *, struct pf_state_key_cmp *, u_int); static int pf_src_connlimit(struct pf_kstate **); static void pf_overload_task(void *v, int pending); static int pf_insert_src_node(struct pf_ksrc_node **, struct pf_krule *, struct pf_addr *, sa_family_t); static u_int pf_purge_expired_states(u_int, int); static void pf_purge_unlinked_rules(void); static int pf_mtag_uminit(void *, int, int); static void pf_mtag_free(struct m_tag *); static void pf_packet_rework_nat(struct mbuf *, struct pf_pdesc *, int, struct pf_state_key *); #ifdef INET static void pf_route(struct mbuf **, struct pf_krule *, int, struct ifnet *, struct pf_kstate *, struct pf_pdesc *, struct inpcb *); #endif /* INET */ #ifdef INET6 static void pf_change_a6(struct pf_addr *, u_int16_t *, struct pf_addr *, u_int8_t); static void pf_route6(struct mbuf **, struct pf_krule *, int, struct ifnet *, struct pf_kstate *, struct pf_pdesc *, struct inpcb *); #endif /* INET6 */ static __inline void pf_set_protostate(struct pf_kstate *, int, u_int8_t); int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len); extern int pf_end_threads; extern struct proc *pf_purge_proc; VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]); #define PACKET_UNDO_NAT(_m, _pd, _off, _s, _dir) \ do { \ struct pf_state_key *nk; \ if ((_dir) == PF_OUT) \ nk = (_s)->key[PF_SK_STACK]; \ else \ nk = (_s)->key[PF_SK_WIRE]; \ pf_packet_rework_nat(_m, _pd, _off, nk); \ } while (0) #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \ (pd)->pf_mtag->flags & PF_PACKET_LOOPED) #define STATE_LOOKUP(i, k, d, s, pd) \ do { \ (s) = pf_find_state((i), (k), (d)); \ SDT_PROBE5(pf, ip, state, lookup, i, k, d, pd, (s)); \ if ((s) == NULL) \ return (PF_DROP); \ if (PACKET_LOOPED(pd)) \ return (PF_PASS); \ } while (0) #define BOUND_IFACE(r, k) \ ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all #define STATE_INC_COUNTERS(s) \ do { \ + struct pf_krule_item *mrm; \ counter_u64_add(s->rule.ptr->states_cur, 1); \ counter_u64_add(s->rule.ptr->states_tot, 1); \ if (s->anchor.ptr != NULL) { \ counter_u64_add(s->anchor.ptr->states_cur, 1); \ counter_u64_add(s->anchor.ptr->states_tot, 1); \ } \ if (s->nat_rule.ptr != NULL) { \ counter_u64_add(s->nat_rule.ptr->states_cur, 1);\ counter_u64_add(s->nat_rule.ptr->states_tot, 1);\ } \ + SLIST_FOREACH(mrm, &s->match_rules, entry) { \ + counter_u64_add(mrm->r->states_cur, 1); \ + counter_u64_add(mrm->r->states_tot, 1); \ + } \ } while (0) #define STATE_DEC_COUNTERS(s) \ do { \ + struct pf_krule_item *mrm; \ if (s->nat_rule.ptr != NULL) \ counter_u64_add(s->nat_rule.ptr->states_cur, -1);\ if (s->anchor.ptr != NULL) \ counter_u64_add(s->anchor.ptr->states_cur, -1); \ counter_u64_add(s->rule.ptr->states_cur, -1); \ + SLIST_FOREACH(mrm, &s->match_rules, entry) \ + counter_u64_add(mrm->r->states_cur, -1); \ } while (0) MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures"); +MALLOC_DEFINE(M_PF_RULE_ITEM, "pf_krule_item", "pf(4) rule items"); VNET_DEFINE(struct pf_keyhash *, pf_keyhash); VNET_DEFINE(struct pf_idhash *, pf_idhash); VNET_DEFINE(struct pf_srchash *, pf_srchash); SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "pf(4)"); u_long pf_hashmask; u_long pf_srchashmask; static u_long pf_hashsize; static u_long pf_srchashsize; u_long pf_ioctl_maxcount = 65535; SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN, &pf_hashsize, 0, "Size of pf(4) states hashtable"); SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN, &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable"); SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN, &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call"); VNET_DEFINE(void *, pf_swi_cookie); VNET_DEFINE(struct intr_event *, pf_swi_ie); VNET_DEFINE(uint32_t, pf_hashseed); #define V_pf_hashseed VNET(pf_hashseed) int pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: if (a->addr32[0] > b->addr32[0]) return (1); if (a->addr32[0] < b->addr32[0]) return (-1); break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (a->addr32[3] > b->addr32[3]) return (1); if (a->addr32[3] < b->addr32[3]) return (-1); if (a->addr32[2] > b->addr32[2]) return (1); if (a->addr32[2] < b->addr32[2]) return (-1); if (a->addr32[1] > b->addr32[1]) return (1); if (a->addr32[1] < b->addr32[1]) return (-1); if (a->addr32[0] > b->addr32[0]) return (1); if (a->addr32[0] < b->addr32[0]) return (-1); break; #endif /* INET6 */ default: panic("%s: unknown address family %u", __func__, af); } return (0); } static void pf_packet_rework_nat(struct mbuf *m, struct pf_pdesc *pd, int off, struct pf_state_key *nk) { switch (pd->proto) { case IPPROTO_TCP: { struct tcphdr *th = &pd->hdr.tcp; if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) pf_change_ap(m, pd->src, &th->th_sport, pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx], nk->port[pd->sidx], 0, pd->af); if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) pf_change_ap(m, pd->dst, &th->th_dport, pd->ip_sum, &th->th_sum, &nk->addr[pd->didx], nk->port[pd->didx], 0, pd->af); m_copyback(m, off, sizeof(*th), (caddr_t)th); break; } case IPPROTO_UDP: { struct udphdr *uh = &pd->hdr.udp; if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum, &uh->uh_sum, &nk->addr[pd->sidx], nk->port[pd->sidx], 1, pd->af); if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum, &uh->uh_sum, &nk->addr[pd->didx], nk->port[pd->didx], 1, pd->af); m_copyback(m, off, sizeof(*uh), (caddr_t)uh); break; } case IPPROTO_ICMP: { struct icmp *ih = &pd->hdr.icmp; if (nk->port[pd->sidx] != ih->icmp_id) { pd->hdr.icmp.icmp_cksum = pf_cksum_fixup( ih->icmp_cksum, ih->icmp_id, nk->port[pd->sidx], 0); ih->icmp_id = nk->port[pd->sidx]; pd->sport = &ih->icmp_id; m_copyback(m, off, ICMP_MINLEN, (caddr_t)ih); } /* FALLTHROUGH */ } default: if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) { switch (pd->af) { case AF_INET: pf_change_a(&pd->src->v4.s_addr, pd->ip_sum, nk->addr[pd->sidx].v4.s_addr, 0); break; case AF_INET6: PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); break; } } if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) { switch (pd->af) { case AF_INET: pf_change_a(&pd->dst->v4.s_addr, pd->ip_sum, nk->addr[pd->didx].v4.s_addr, 0); break; case AF_INET6: PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); break; } } break; } } static __inline uint32_t pf_hashkey(struct pf_state_key *sk) { uint32_t h; h = murmur3_32_hash32((uint32_t *)sk, sizeof(struct pf_state_key_cmp)/sizeof(uint32_t), V_pf_hashseed); return (h & pf_hashmask); } static __inline uint32_t pf_hashsrc(struct pf_addr *addr, sa_family_t af) { uint32_t h; switch (af) { case AF_INET: h = murmur3_32_hash32((uint32_t *)&addr->v4, sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed); break; case AF_INET6: h = murmur3_32_hash32((uint32_t *)&addr->v6, sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed); break; default: panic("%s: unknown address family %u", __func__, af); } return (h & pf_srchashmask); } #ifdef ALTQ static int pf_state_hash(struct pf_kstate *s) { u_int32_t hv = (intptr_t)s / sizeof(*s); hv ^= crc32(&s->src, sizeof(s->src)); hv ^= crc32(&s->dst, sizeof(s->dst)); if (hv == 0) hv = 1; return (hv); } #endif static __inline void pf_set_protostate(struct pf_kstate *s, int which, u_int8_t newstate) { if (which == PF_PEER_DST || which == PF_PEER_BOTH) s->dst.state = newstate; if (which == PF_PEER_DST) return; if (s->src.state == newstate) return; if (s->creatorid == V_pf_status.hostid && s->key[PF_SK_STACK] != NULL && s->key[PF_SK_STACK]->proto == IPPROTO_TCP && !(TCPS_HAVEESTABLISHED(s->src.state) || s->src.state == TCPS_CLOSED) && (TCPS_HAVEESTABLISHED(newstate) || newstate == TCPS_CLOSED)) atomic_add_32(&V_pf_status.states_halfopen, -1); s->src.state = newstate; } #ifdef INET6 void pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: dst->addr32[0] = src->addr32[0]; break; #endif /* INET */ case AF_INET6: dst->addr32[0] = src->addr32[0]; dst->addr32[1] = src->addr32[1]; dst->addr32[2] = src->addr32[2]; dst->addr32[3] = src->addr32[3]; break; } } #endif /* INET6 */ static void pf_init_threshold(struct pf_threshold *threshold, u_int32_t limit, u_int32_t seconds) { threshold->limit = limit * PF_THRESHOLD_MULT; threshold->seconds = seconds; threshold->count = 0; threshold->last = time_uptime; } static void pf_add_threshold(struct pf_threshold *threshold) { u_int32_t t = time_uptime, diff = t - threshold->last; if (diff >= threshold->seconds) threshold->count = 0; else threshold->count -= threshold->count * diff / threshold->seconds; threshold->count += PF_THRESHOLD_MULT; threshold->last = t; } static int pf_check_threshold(struct pf_threshold *threshold) { return (threshold->count > threshold->limit); } static int pf_src_connlimit(struct pf_kstate **state) { struct pf_overload_entry *pfoe; int bad = 0; PF_STATE_LOCK_ASSERT(*state); (*state)->src_node->conn++; (*state)->src.tcp_est = 1; pf_add_threshold(&(*state)->src_node->conn_rate); if ((*state)->rule.ptr->max_src_conn && (*state)->rule.ptr->max_src_conn < (*state)->src_node->conn) { counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1); bad++; } if ((*state)->rule.ptr->max_src_conn_rate.limit && pf_check_threshold(&(*state)->src_node->conn_rate)) { counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1); bad++; } if (!bad) return (0); /* Kill this state. */ (*state)->timeout = PFTM_PURGE; pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED); if ((*state)->rule.ptr->overload_tbl == NULL) return (1); /* Schedule overloading and flushing task. */ pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT); if (pfoe == NULL) return (1); /* too bad :( */ bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr)); pfoe->af = (*state)->key[PF_SK_WIRE]->af; pfoe->rule = (*state)->rule.ptr; pfoe->dir = (*state)->direction; PF_OVERLOADQ_LOCK(); SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next); PF_OVERLOADQ_UNLOCK(); taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask); return (1); } static void pf_overload_task(void *v, int pending) { struct pf_overload_head queue; struct pfr_addr p; struct pf_overload_entry *pfoe, *pfoe1; uint32_t killed = 0; CURVNET_SET((struct vnet *)v); PF_OVERLOADQ_LOCK(); queue = V_pf_overloadqueue; SLIST_INIT(&V_pf_overloadqueue); PF_OVERLOADQ_UNLOCK(); bzero(&p, sizeof(p)); SLIST_FOREACH(pfoe, &queue, next) { counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1); if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("%s: blocking address ", __func__); pf_print_host(&pfoe->addr, 0, pfoe->af); printf("\n"); } p.pfra_af = pfoe->af; switch (pfoe->af) { #ifdef INET case AF_INET: p.pfra_net = 32; p.pfra_ip4addr = pfoe->addr.v4; break; #endif #ifdef INET6 case AF_INET6: p.pfra_net = 128; p.pfra_ip6addr = pfoe->addr.v6; break; #endif } PF_RULES_WLOCK(); pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second); PF_RULES_WUNLOCK(); } /* * Remove those entries, that don't need flushing. */ SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1) if (pfoe->rule->flush == 0) { SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next); free(pfoe, M_PFTEMP); } else counter_u64_add( V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1); /* If nothing to flush, return. */ if (SLIST_EMPTY(&queue)) { CURVNET_RESTORE(); return; } for (int i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; struct pf_state_key *sk; struct pf_kstate *s; PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { sk = s->key[PF_SK_WIRE]; SLIST_FOREACH(pfoe, &queue, next) if (sk->af == pfoe->af && ((pfoe->rule->flush & PF_FLUSH_GLOBAL) || pfoe->rule == s->rule.ptr) && ((pfoe->dir == PF_OUT && PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) || (pfoe->dir == PF_IN && PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) { s->timeout = PFTM_PURGE; pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED); killed++; } } PF_HASHROW_UNLOCK(ih); } SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1) free(pfoe, M_PFTEMP); if (V_pf_status.debug >= PF_DEBUG_MISC) printf("%s: %u states killed", __func__, killed); CURVNET_RESTORE(); } /* * Can return locked on failure, so that we can consistently * allocate and insert a new one. */ struct pf_ksrc_node * pf_find_src_node(struct pf_addr *src, struct pf_krule *rule, sa_family_t af, int returnlocked) { struct pf_srchash *sh; struct pf_ksrc_node *n; counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1); sh = &V_pf_srchash[pf_hashsrc(src, af)]; PF_HASHROW_LOCK(sh); LIST_FOREACH(n, &sh->nodes, entry) if (n->rule.ptr == rule && n->af == af && ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) || (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0))) break; if (n != NULL) { n->states++; PF_HASHROW_UNLOCK(sh); } else if (returnlocked == 0) PF_HASHROW_UNLOCK(sh); return (n); } static void pf_free_src_node(struct pf_ksrc_node *sn) { for (int i = 0; i < 2; i++) { counter_u64_free(sn->bytes[i]); counter_u64_free(sn->packets[i]); } uma_zfree(V_pf_sources_z, sn); } static int pf_insert_src_node(struct pf_ksrc_node **sn, struct pf_krule *rule, struct pf_addr *src, sa_family_t af) { KASSERT((rule->rule_flag & PFRULE_SRCTRACK || rule->rpool.opts & PF_POOL_STICKYADDR), ("%s for non-tracking rule %p", __func__, rule)); if (*sn == NULL) *sn = pf_find_src_node(src, rule, af, 1); if (*sn == NULL) { struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)]; PF_HASHROW_ASSERT(sh); if (!rule->max_src_nodes || counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes) (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO); else counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES], 1); if ((*sn) == NULL) { PF_HASHROW_UNLOCK(sh); return (-1); } for (int i = 0; i < 2; i++) { (*sn)->bytes[i] = counter_u64_alloc(M_NOWAIT); (*sn)->packets[i] = counter_u64_alloc(M_NOWAIT); if ((*sn)->bytes[i] == NULL || (*sn)->packets[i] == NULL) { pf_free_src_node(*sn); PF_HASHROW_UNLOCK(sh); return (-1); } } pf_init_threshold(&(*sn)->conn_rate, rule->max_src_conn_rate.limit, rule->max_src_conn_rate.seconds); (*sn)->af = af; (*sn)->rule.ptr = rule; PF_ACPY(&(*sn)->addr, src, af); LIST_INSERT_HEAD(&sh->nodes, *sn, entry); (*sn)->creation = time_uptime; (*sn)->ruletype = rule->action; (*sn)->states = 1; if ((*sn)->rule.ptr != NULL) counter_u64_add((*sn)->rule.ptr->src_nodes, 1); PF_HASHROW_UNLOCK(sh); counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1); } else { if (rule->max_src_states && (*sn)->states >= rule->max_src_states) { counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES], 1); return (-1); } } return (0); } void pf_unlink_src_node(struct pf_ksrc_node *src) { PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]); LIST_REMOVE(src, entry); if (src->rule.ptr) counter_u64_add(src->rule.ptr->src_nodes, -1); } u_int pf_free_src_nodes(struct pf_ksrc_node_list *head) { struct pf_ksrc_node *sn, *tmp; u_int count = 0; LIST_FOREACH_SAFE(sn, head, entry, tmp) { pf_free_src_node(sn); count++; } counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count); return (count); } void pf_mtag_initialize(void) { pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) + sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL, UMA_ALIGN_PTR, 0); } /* Per-vnet data storage structures initialization. */ void pf_initialize(void) { struct pf_keyhash *kh; struct pf_idhash *ih; struct pf_srchash *sh; u_int i; if (pf_hashsize == 0 || !powerof2(pf_hashsize)) pf_hashsize = PF_HASHSIZ; if (pf_srchashsize == 0 || !powerof2(pf_srchashsize)) pf_srchashsize = PF_SRCHASHSIZ; V_pf_hashseed = arc4random(); /* States and state keys storage. */ V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_kstate), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z; uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT); uma_zone_set_warning(V_pf_state_z, "PF states limit reached"); V_pf_state_key_z = uma_zcreate("pf state keys", sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); V_pf_keyhash = mallocarray(pf_hashsize, sizeof(struct pf_keyhash), M_PFHASH, M_NOWAIT | M_ZERO); V_pf_idhash = mallocarray(pf_hashsize, sizeof(struct pf_idhash), M_PFHASH, M_NOWAIT | M_ZERO); if (V_pf_keyhash == NULL || V_pf_idhash == NULL) { printf("pf: Unable to allocate memory for " "state_hashsize %lu.\n", pf_hashsize); free(V_pf_keyhash, M_PFHASH); free(V_pf_idhash, M_PFHASH); pf_hashsize = PF_HASHSIZ; V_pf_keyhash = mallocarray(pf_hashsize, sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO); V_pf_idhash = mallocarray(pf_hashsize, sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO); } pf_hashmask = pf_hashsize - 1; for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask; i++, kh++, ih++) { mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK); mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF); } /* Source nodes. */ V_pf_sources_z = uma_zcreate("pf source nodes", sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z; uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT); uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached"); V_pf_srchash = mallocarray(pf_srchashsize, sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO); if (V_pf_srchash == NULL) { printf("pf: Unable to allocate memory for " "source_hashsize %lu.\n", pf_srchashsize); pf_srchashsize = PF_SRCHASHSIZ; V_pf_srchash = mallocarray(pf_srchashsize, sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO); } pf_srchashmask = pf_srchashsize - 1; for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF); /* ALTQ */ TAILQ_INIT(&V_pf_altqs[0]); TAILQ_INIT(&V_pf_altqs[1]); TAILQ_INIT(&V_pf_altqs[2]); TAILQ_INIT(&V_pf_altqs[3]); TAILQ_INIT(&V_pf_pabuf); V_pf_altqs_active = &V_pf_altqs[0]; V_pf_altq_ifs_active = &V_pf_altqs[1]; V_pf_altqs_inactive = &V_pf_altqs[2]; V_pf_altq_ifs_inactive = &V_pf_altqs[3]; /* Send & overload+flush queues. */ STAILQ_INIT(&V_pf_sendqueue); SLIST_INIT(&V_pf_overloadqueue); TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet); /* Unlinked, but may be referenced rules. */ TAILQ_INIT(&V_pf_unlinked_rules); } void pf_mtag_cleanup(void) { uma_zdestroy(pf_mtag_z); } void pf_cleanup(void) { struct pf_keyhash *kh; struct pf_idhash *ih; struct pf_srchash *sh; struct pf_send_entry *pfse, *next; u_int i; for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask; i++, kh++, ih++) { KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty", __func__)); KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty", __func__)); mtx_destroy(&kh->lock); mtx_destroy(&ih->lock); } free(V_pf_keyhash, M_PFHASH); free(V_pf_idhash, M_PFHASH); for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) { KASSERT(LIST_EMPTY(&sh->nodes), ("%s: source node hash not empty", __func__)); mtx_destroy(&sh->lock); } free(V_pf_srchash, M_PFHASH); STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) { m_freem(pfse->pfse_m); free(pfse, M_PFTEMP); } uma_zdestroy(V_pf_sources_z); uma_zdestroy(V_pf_state_z); uma_zdestroy(V_pf_state_key_z); } static int pf_mtag_uminit(void *mem, int size, int how) { struct m_tag *t; t = (struct m_tag *)mem; t->m_tag_cookie = MTAG_ABI_COMPAT; t->m_tag_id = PACKET_TAG_PF; t->m_tag_len = sizeof(struct pf_mtag); t->m_tag_free = pf_mtag_free; return (0); } static void pf_mtag_free(struct m_tag *t) { uma_zfree(pf_mtag_z, t); } struct pf_mtag * pf_get_mtag(struct mbuf *m) { struct m_tag *mtag; if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL) return ((struct pf_mtag *)(mtag + 1)); mtag = uma_zalloc(pf_mtag_z, M_NOWAIT); if (mtag == NULL) return (NULL); bzero(mtag + 1, sizeof(struct pf_mtag)); m_tag_prepend(m, mtag); return ((struct pf_mtag *)(mtag + 1)); } static int pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks, struct pf_kstate *s) { struct pf_keyhash *khs, *khw, *kh; struct pf_state_key *sk, *cur; struct pf_kstate *si, *olds = NULL; int idx; KASSERT(s->refs == 0, ("%s: state not pristine", __func__)); KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__)); KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__)); /* * We need to lock hash slots of both keys. To avoid deadlock * we always lock the slot with lower address first. Unlock order * isn't important. * * We also need to lock ID hash slot before dropping key * locks. On success we return with ID hash slot locked. */ if (skw == sks) { khs = khw = &V_pf_keyhash[pf_hashkey(skw)]; PF_HASHROW_LOCK(khs); } else { khs = &V_pf_keyhash[pf_hashkey(sks)]; khw = &V_pf_keyhash[pf_hashkey(skw)]; if (khs == khw) { PF_HASHROW_LOCK(khs); } else if (khs < khw) { PF_HASHROW_LOCK(khs); PF_HASHROW_LOCK(khw); } else { PF_HASHROW_LOCK(khw); PF_HASHROW_LOCK(khs); } } #define KEYS_UNLOCK() do { \ if (khs != khw) { \ PF_HASHROW_UNLOCK(khs); \ PF_HASHROW_UNLOCK(khw); \ } else \ PF_HASHROW_UNLOCK(khs); \ } while (0) /* * First run: start with wire key. */ sk = skw; kh = khw; idx = PF_SK_WIRE; MPASS(s->lock == NULL); s->lock = &V_pf_idhash[PF_IDHASH(s)].lock; keyattach: LIST_FOREACH(cur, &kh->keys, entry) if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0) break; if (cur != NULL) { /* Key exists. Check for same kif, if none, add to key. */ TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) { struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)]; PF_HASHROW_LOCK(ih); if (si->kif == s->kif && si->direction == s->direction) { if (sk->proto == IPPROTO_TCP && si->src.state >= TCPS_FIN_WAIT_2 && si->dst.state >= TCPS_FIN_WAIT_2) { /* * New state matches an old >FIN_WAIT_2 * state. We can't drop key hash locks, * thus we can't unlink it properly. * * As a workaround we drop it into * TCPS_CLOSED state, schedule purge * ASAP and push it into the very end * of the slot TAILQ, so that it won't * conflict with our new state. */ pf_set_protostate(si, PF_PEER_BOTH, TCPS_CLOSED); si->timeout = PFTM_PURGE; olds = si; } else { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: %s key attach " "failed on %s: ", (idx == PF_SK_WIRE) ? "wire" : "stack", s->kif->pfik_name); pf_print_state_parts(s, (idx == PF_SK_WIRE) ? sk : NULL, (idx == PF_SK_STACK) ? sk : NULL); printf(", existing: "); pf_print_state_parts(si, (idx == PF_SK_WIRE) ? sk : NULL, (idx == PF_SK_STACK) ? sk : NULL); printf("\n"); } PF_HASHROW_UNLOCK(ih); KEYS_UNLOCK(); uma_zfree(V_pf_state_key_z, sk); if (idx == PF_SK_STACK) pf_detach_state(s); return (EEXIST); /* collision! */ } } PF_HASHROW_UNLOCK(ih); } uma_zfree(V_pf_state_key_z, sk); s->key[idx] = cur; } else { LIST_INSERT_HEAD(&kh->keys, sk, entry); s->key[idx] = sk; } stateattach: /* List is sorted, if-bound states before floating. */ if (s->kif == V_pfi_all) TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]); else TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]); if (olds) { TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]); TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds, key_list[idx]); olds = NULL; } /* * Attach done. See how should we (or should not?) * attach a second key. */ if (sks == skw) { s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; idx = PF_SK_STACK; sks = NULL; goto stateattach; } else if (sks != NULL) { /* * Continue attaching with stack key. */ sk = sks; kh = khs; idx = PF_SK_STACK; sks = NULL; goto keyattach; } PF_STATE_LOCK(s); KEYS_UNLOCK(); KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL, ("%s failure", __func__)); return (0); #undef KEYS_UNLOCK } static void pf_detach_state(struct pf_kstate *s) { struct pf_state_key *sks = s->key[PF_SK_STACK]; struct pf_keyhash *kh; if (sks != NULL) { kh = &V_pf_keyhash[pf_hashkey(sks)]; PF_HASHROW_LOCK(kh); if (s->key[PF_SK_STACK] != NULL) pf_state_key_detach(s, PF_SK_STACK); /* * If both point to same key, then we are done. */ if (sks == s->key[PF_SK_WIRE]) { pf_state_key_detach(s, PF_SK_WIRE); PF_HASHROW_UNLOCK(kh); return; } PF_HASHROW_UNLOCK(kh); } if (s->key[PF_SK_WIRE] != NULL) { kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])]; PF_HASHROW_LOCK(kh); if (s->key[PF_SK_WIRE] != NULL) pf_state_key_detach(s, PF_SK_WIRE); PF_HASHROW_UNLOCK(kh); } } static void pf_state_key_detach(struct pf_kstate *s, int idx) { struct pf_state_key *sk = s->key[idx]; #ifdef INVARIANTS struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)]; PF_HASHROW_ASSERT(kh); #endif TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]); s->key[idx] = NULL; if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) { LIST_REMOVE(sk, entry); uma_zfree(V_pf_state_key_z, sk); } } static int pf_state_key_ctor(void *mem, int size, void *arg, int flags) { struct pf_state_key *sk = mem; bzero(sk, sizeof(struct pf_state_key_cmp)); TAILQ_INIT(&sk->states[PF_SK_WIRE]); TAILQ_INIT(&sk->states[PF_SK_STACK]); return (0); } struct pf_state_key * pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t sport, u_int16_t dport) { struct pf_state_key *sk; sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT); if (sk == NULL) return (NULL); PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af); PF_ACPY(&sk->addr[pd->didx], daddr, pd->af); sk->port[pd->sidx] = sport; sk->port[pd->didx] = dport; sk->proto = pd->proto; sk->af = pd->af; return (sk); } struct pf_state_key * pf_state_key_clone(struct pf_state_key *orig) { struct pf_state_key *sk; sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT); if (sk == NULL) return (NULL); bcopy(orig, sk, sizeof(struct pf_state_key_cmp)); return (sk); } int pf_state_insert(struct pfi_kkif *kif, struct pfi_kkif *orig_kif, struct pf_state_key *skw, struct pf_state_key *sks, struct pf_kstate *s) { struct pf_idhash *ih; struct pf_kstate *cur; int error; KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]), ("%s: sks not pristine", __func__)); KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]), ("%s: skw not pristine", __func__)); KASSERT(s->refs == 0, ("%s: state not pristine", __func__)); s->kif = kif; s->orig_kif = orig_kif; if (s->id == 0 && s->creatorid == 0) { s->id = alloc_unr64(&V_pf_stateid); s->id = htobe64(s->id); s->creatorid = V_pf_status.hostid; } /* Returns with ID locked on success. */ if ((error = pf_state_key_attach(skw, sks, s)) != 0) return (error); ih = &V_pf_idhash[PF_IDHASH(s)]; PF_HASHROW_ASSERT(ih); LIST_FOREACH(cur, &ih->states, entry) if (cur->id == s->id && cur->creatorid == s->creatorid) break; if (cur != NULL) { PF_HASHROW_UNLOCK(ih); if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: state ID collision: " "id: %016llx creatorid: %08x\n", (unsigned long long)be64toh(s->id), ntohl(s->creatorid)); } pf_detach_state(s); return (EEXIST); } LIST_INSERT_HEAD(&ih->states, s, entry); /* One for keys, one for ID hash. */ refcount_init(&s->refs, 2); pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_INSERT], 1); if (V_pfsync_insert_state_ptr != NULL) V_pfsync_insert_state_ptr(s); /* Returns locked. */ return (0); } /* * Find state by ID: returns with locked row on success. */ struct pf_kstate * pf_find_state_byid(uint64_t id, uint32_t creatorid) { struct pf_idhash *ih; struct pf_kstate *s; pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1); ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))]; PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) if (s->id == id && s->creatorid == creatorid) break; if (s == NULL) PF_HASHROW_UNLOCK(ih); return (s); } /* * Find state by key. * Returns with ID hash slot locked on success. */ static struct pf_kstate * pf_find_state(struct pfi_kkif *kif, struct pf_state_key_cmp *key, u_int dir) { struct pf_keyhash *kh; struct pf_state_key *sk; struct pf_kstate *s; int idx; pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1); kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)]; PF_HASHROW_LOCK(kh); LIST_FOREACH(sk, &kh->keys, entry) if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0) break; if (sk == NULL) { PF_HASHROW_UNLOCK(kh); return (NULL); } idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK); /* List is sorted, if-bound states before floating ones. */ TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) if (s->kif == V_pfi_all || s->kif == kif) { PF_STATE_LOCK(s); PF_HASHROW_UNLOCK(kh); if (__predict_false(s->timeout >= PFTM_MAX)) { /* * State is either being processed by * pf_unlink_state() in an other thread, or * is scheduled for immediate expiry. */ PF_STATE_UNLOCK(s); return (NULL); } return (s); } PF_HASHROW_UNLOCK(kh); return (NULL); } /* * Returns with ID hash slot locked on success. */ struct pf_kstate * pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) { struct pf_keyhash *kh; struct pf_state_key *sk; struct pf_kstate *s, *ret = NULL; int idx, inout = 0; pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1); kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)]; PF_HASHROW_LOCK(kh); LIST_FOREACH(sk, &kh->keys, entry) if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0) break; if (sk == NULL) { PF_HASHROW_UNLOCK(kh); return (NULL); } switch (dir) { case PF_IN: idx = PF_SK_WIRE; break; case PF_OUT: idx = PF_SK_STACK; break; case PF_INOUT: idx = PF_SK_WIRE; inout = 1; break; default: panic("%s: dir %u", __func__, dir); } second_run: TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) { if (more == NULL) { PF_STATE_LOCK(s); PF_HASHROW_UNLOCK(kh); return (s); } if (ret) (*more)++; else { ret = s; PF_STATE_LOCK(s); } } if (inout == 1) { inout = 0; idx = PF_SK_STACK; goto second_run; } PF_HASHROW_UNLOCK(kh); return (ret); } /* * FIXME * This routine is inefficient -- locks the state only to unlock immediately on * return. * It is racy -- after the state is unlocked nothing stops other threads from * removing it. */ bool pf_find_state_all_exists(struct pf_state_key_cmp *key, u_int dir) { struct pf_kstate *s; s = pf_find_state_all(key, dir, NULL); if (s != NULL) { PF_STATE_UNLOCK(s); return (true); } return (false); } /* END state table stuff */ static void pf_send(struct pf_send_entry *pfse) { PF_SENDQ_LOCK(); STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next); PF_SENDQ_UNLOCK(); swi_sched(V_pf_swi_cookie, 0); } static bool pf_isforlocal(struct mbuf *m, int af) { switch (af) { #ifdef INET case AF_INET: { struct ip *ip = mtod(m, struct ip *); return (in_localip(ip->ip_dst)); } #endif #ifdef INET6 case AF_INET6: { struct ip6_hdr *ip6; struct in6_ifaddr *ia; ip6 = mtod(m, struct ip6_hdr *); ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false); if (ia == NULL) return (false); return (! (ia->ia6_flags & IN6_IFF_NOTREADY)); } #endif default: panic("Unsupported af %d", af); } return (false); } void pf_intr(void *v) { struct epoch_tracker et; struct pf_send_head queue; struct pf_send_entry *pfse, *next; CURVNET_SET((struct vnet *)v); PF_SENDQ_LOCK(); queue = V_pf_sendqueue; STAILQ_INIT(&V_pf_sendqueue); PF_SENDQ_UNLOCK(); NET_EPOCH_ENTER(et); STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) { switch (pfse->pfse_type) { #ifdef INET case PFSE_IP: { if (pf_isforlocal(pfse->pfse_m, AF_INET)) { pfse->pfse_m->m_flags |= M_SKIP_FIREWALL; pfse->pfse_m->m_pkthdr.csum_flags |= CSUM_IP_VALID | CSUM_IP_CHECKED; ip_input(pfse->pfse_m); } else { ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL); } break; } case PFSE_ICMP: icmp_error(pfse->pfse_m, pfse->icmpopts.type, pfse->icmpopts.code, 0, pfse->icmpopts.mtu); break; #endif /* INET */ #ifdef INET6 case PFSE_IP6: if (pf_isforlocal(pfse->pfse_m, AF_INET6)) { pfse->pfse_m->m_flags |= M_SKIP_FIREWALL; ip6_input(pfse->pfse_m); } else { ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL, NULL); } break; case PFSE_ICMP6: icmp6_error(pfse->pfse_m, pfse->icmpopts.type, pfse->icmpopts.code, pfse->icmpopts.mtu); break; #endif /* INET6 */ default: panic("%s: unknown type", __func__); } free(pfse, M_PFTEMP); } NET_EPOCH_EXIT(et); CURVNET_RESTORE(); } #define pf_purge_thread_period (hz / 10) #ifdef PF_WANT_32_TO_64_COUNTER static void pf_status_counter_u64_periodic(void) { PF_RULES_RASSERT(); if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 60)) != 0) { return; } for (int i = 0; i < FCNT_MAX; i++) { pf_counter_u64_periodic(&V_pf_status.fcounters[i]); } } static void pf_kif_counter_u64_periodic(void) { struct pfi_kkif *kif; size_t r, run; PF_RULES_RASSERT(); if (__predict_false(V_pf_allkifcount == 0)) { return; } if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) { return; } run = V_pf_allkifcount / 10; if (run < 5) run = 5; for (r = 0; r < run; r++) { kif = LIST_NEXT(V_pf_kifmarker, pfik_allkiflist); if (kif == NULL) { LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); break; } LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); LIST_INSERT_AFTER(kif, V_pf_kifmarker, pfik_allkiflist); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { pf_counter_u64_periodic(&kif->pfik_packets[i][j][k]); pf_counter_u64_periodic(&kif->pfik_bytes[i][j][k]); } } } } } static void pf_rule_counter_u64_periodic(void) { struct pf_krule *rule; size_t r, run; PF_RULES_RASSERT(); if (__predict_false(V_pf_allrulecount == 0)) { return; } if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) { return; } run = V_pf_allrulecount / 10; if (run < 5) run = 5; for (r = 0; r < run; r++) { rule = LIST_NEXT(V_pf_rulemarker, allrulelist); if (rule == NULL) { LIST_REMOVE(V_pf_rulemarker, allrulelist); LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); break; } LIST_REMOVE(V_pf_rulemarker, allrulelist); LIST_INSERT_AFTER(rule, V_pf_rulemarker, allrulelist); pf_counter_u64_periodic(&rule->evaluations); for (int i = 0; i < 2; i++) { pf_counter_u64_periodic(&rule->packets[i]); pf_counter_u64_periodic(&rule->bytes[i]); } } } static void pf_counter_u64_periodic_main(void) { PF_RULES_RLOCK_TRACKER; V_pf_counter_periodic_iter++; PF_RULES_RLOCK(); pf_counter_u64_critical_enter(); pf_status_counter_u64_periodic(); pf_kif_counter_u64_periodic(); pf_rule_counter_u64_periodic(); pf_counter_u64_critical_exit(); PF_RULES_RUNLOCK(); } #else #define pf_counter_u64_periodic_main() do { } while (0) #endif void pf_purge_thread(void *unused __unused) { VNET_ITERATOR_DECL(vnet_iter); sx_xlock(&pf_end_lock); while (pf_end_threads == 0) { sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", pf_purge_thread_period); VNET_LIST_RLOCK(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); /* Wait until V_pf_default_rule is initialized. */ if (V_pf_vnet_active == 0) { CURVNET_RESTORE(); continue; } pf_counter_u64_periodic_main(); /* * Process 1/interval fraction of the state * table every run. */ V_pf_purge_idx = pf_purge_expired_states(V_pf_purge_idx, pf_hashmask / (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10)); /* * Purge other expired types every * PFTM_INTERVAL seconds. */ if (V_pf_purge_idx == 0) { /* * Order is important: * - states and src nodes reference rules * - states and rules reference kifs */ pf_purge_expired_fragments(); pf_purge_expired_src_nodes(); pf_purge_unlinked_rules(); pfi_kkif_purge(); } CURVNET_RESTORE(); } VNET_LIST_RUNLOCK(); } pf_end_threads++; sx_xunlock(&pf_end_lock); kproc_exit(0); } void pf_unload_vnet_purge(void) { /* * To cleanse up all kifs and rules we need * two runs: first one clears reference flags, * then pf_purge_expired_states() doesn't * raise them, and then second run frees. */ pf_purge_unlinked_rules(); pfi_kkif_purge(); /* * Now purge everything. */ pf_purge_expired_states(0, pf_hashmask); pf_purge_fragments(UINT_MAX); pf_purge_expired_src_nodes(); /* * Now all kifs & rules should be unreferenced, * thus should be successfully freed. */ pf_purge_unlinked_rules(); pfi_kkif_purge(); } u_int32_t pf_state_expires(const struct pf_kstate *state) { u_int32_t timeout; u_int32_t start; u_int32_t end; u_int32_t states; /* handle all PFTM_* > PFTM_MAX here */ if (state->timeout == PFTM_PURGE) return (time_uptime); KASSERT(state->timeout != PFTM_UNLINKED, ("pf_state_expires: timeout == PFTM_UNLINKED")); KASSERT((state->timeout < PFTM_MAX), ("pf_state_expires: timeout > PFTM_MAX")); timeout = state->rule.ptr->timeout[state->timeout]; if (!timeout) timeout = V_pf_default_rule.timeout[state->timeout]; start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; if (start && state->rule.ptr != &V_pf_default_rule) { end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; states = counter_u64_fetch(state->rule.ptr->states_cur); } else { start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START]; end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END]; states = V_pf_status.states; } if (end && states > start && start < end) { if (states < end) { timeout = (u_int64_t)timeout * (end - states) / (end - start); return (state->expire + timeout); } else return (time_uptime); } return (state->expire + timeout); } void pf_purge_expired_src_nodes(void) { struct pf_ksrc_node_list freelist; struct pf_srchash *sh; struct pf_ksrc_node *cur, *next; int i; LIST_INIT(&freelist); for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next) if (cur->states == 0 && cur->expire <= time_uptime) { pf_unlink_src_node(cur); LIST_INSERT_HEAD(&freelist, cur, entry); } else if (cur->rule.ptr != NULL) cur->rule.ptr->rule_ref |= PFRULE_REFS; PF_HASHROW_UNLOCK(sh); } pf_free_src_nodes(&freelist); V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z); } static void pf_src_tree_remove_state(struct pf_kstate *s) { struct pf_ksrc_node *sn; struct pf_srchash *sh; uint32_t timeout; timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ? s->rule.ptr->timeout[PFTM_SRC_NODE] : V_pf_default_rule.timeout[PFTM_SRC_NODE]; if (s->src_node != NULL) { sn = s->src_node; sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)]; PF_HASHROW_LOCK(sh); if (s->src.tcp_est) --sn->conn; if (--sn->states == 0) sn->expire = time_uptime + timeout; PF_HASHROW_UNLOCK(sh); } if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) { sn = s->nat_src_node; sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)]; PF_HASHROW_LOCK(sh); if (--sn->states == 0) sn->expire = time_uptime + timeout; PF_HASHROW_UNLOCK(sh); } s->src_node = s->nat_src_node = NULL; } /* * Unlink and potentilly free a state. Function may be * called with ID hash row locked, but always returns * unlocked, since it needs to go through key hash locking. */ int pf_unlink_state(struct pf_kstate *s) { struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)]; PF_HASHROW_ASSERT(ih); if (s->timeout == PFTM_UNLINKED) { /* * State is being processed * by pf_unlink_state() in * an other thread. */ PF_HASHROW_UNLOCK(ih); return (0); /* XXXGL: undefined actually */ } if (s->src.state == PF_TCPS_PROXY_DST) { /* XXX wire key the right one? */ pf_send_tcp(s->rule.ptr, s->key[PF_SK_WIRE]->af, &s->key[PF_SK_WIRE]->addr[1], &s->key[PF_SK_WIRE]->addr[0], s->key[PF_SK_WIRE]->port[1], s->key[PF_SK_WIRE]->port[0], s->src.seqhi, s->src.seqlo + 1, - TH_RST|TH_ACK, 0, 0, 0, 1, s->tag); + TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, s->rtableid); } LIST_REMOVE(s, entry); pf_src_tree_remove_state(s); if (V_pfsync_delete_state_ptr != NULL) V_pfsync_delete_state_ptr(s); STATE_DEC_COUNTERS(s); s->timeout = PFTM_UNLINKED; /* Ensure we remove it from the list of halfopen states, if needed. */ if (s->key[PF_SK_STACK] != NULL && s->key[PF_SK_STACK]->proto == IPPROTO_TCP) pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED); PF_HASHROW_UNLOCK(ih); pf_detach_state(s); /* pf_state_insert() initialises refs to 2 */ return (pf_release_staten(s, 2)); } struct pf_kstate * pf_alloc_state(int flags) { return (uma_zalloc(V_pf_state_z, flags | M_ZERO)); } void pf_free_state(struct pf_kstate *cur) { + struct pf_krule_item *ri; KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur)); KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__, cur->timeout)); + while ((ri = SLIST_FIRST(&cur->match_rules))) { + SLIST_REMOVE_HEAD(&cur->match_rules, entry); + free(ri, M_PF_RULE_ITEM); + } + pf_normalize_tcp_cleanup(cur); uma_zfree(V_pf_state_z, cur); pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1); } /* * Called only from pf_purge_thread(), thus serialized. */ static u_int pf_purge_expired_states(u_int i, int maxcheck) { struct pf_idhash *ih; struct pf_kstate *s; + struct pf_krule_item *mrm; V_pf_status.states = uma_zone_get_cur(V_pf_state_z); /* * Go through hash and unlink states that expire now. */ while (maxcheck > 0) { ih = &V_pf_idhash[i]; /* only take the lock if we expect to do work */ if (!LIST_EMPTY(&ih->states)) { relock: PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { if (pf_state_expires(s) <= time_uptime) { V_pf_status.states -= pf_unlink_state(s); goto relock; } s->rule.ptr->rule_ref |= PFRULE_REFS; if (s->nat_rule.ptr != NULL) s->nat_rule.ptr->rule_ref |= PFRULE_REFS; if (s->anchor.ptr != NULL) s->anchor.ptr->rule_ref |= PFRULE_REFS; s->kif->pfik_flags |= PFI_IFLAG_REFS; + SLIST_FOREACH(mrm, &s->match_rules, entry) + mrm->r->rule_ref |= PFRULE_REFS; if (s->rt_kif) s->rt_kif->pfik_flags |= PFI_IFLAG_REFS; } PF_HASHROW_UNLOCK(ih); } /* Return when we hit end of hash. */ if (++i > pf_hashmask) { V_pf_status.states = uma_zone_get_cur(V_pf_state_z); return (0); } maxcheck--; } V_pf_status.states = uma_zone_get_cur(V_pf_state_z); return (i); } static void pf_purge_unlinked_rules(void) { struct pf_krulequeue tmpq; struct pf_krule *r, *r1; /* * If we have overloading task pending, then we'd * better skip purging this time. There is a tiny * probability that overloading task references * an already unlinked rule. */ PF_OVERLOADQ_LOCK(); if (!SLIST_EMPTY(&V_pf_overloadqueue)) { PF_OVERLOADQ_UNLOCK(); return; } PF_OVERLOADQ_UNLOCK(); /* * Do naive mark-and-sweep garbage collecting of old rules. * Reference flag is raised by pf_purge_expired_states() * and pf_purge_expired_src_nodes(). * * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK, * use a temporary queue. */ TAILQ_INIT(&tmpq); PF_UNLNKDRULES_LOCK(); TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) { if (!(r->rule_ref & PFRULE_REFS)) { TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries); TAILQ_INSERT_TAIL(&tmpq, r, entries); } else r->rule_ref &= ~PFRULE_REFS; } PF_UNLNKDRULES_UNLOCK(); if (!TAILQ_EMPTY(&tmpq)) { PF_CONFIG_LOCK(); PF_RULES_WLOCK(); TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) { TAILQ_REMOVE(&tmpq, r, entries); pf_free_rule(r); } PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); } } void pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: { u_int32_t a = ntohl(addr->addr32[0]); printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, (a>>8)&255, a&255); if (p) { p = ntohs(p); printf(":%u", p); } break; } #endif /* INET */ #ifdef INET6 case AF_INET6: { u_int16_t b; u_int8_t i, curstart, curend, maxstart, maxend; curstart = curend = maxstart = maxend = 255; for (i = 0; i < 8; i++) { if (!addr->addr16[i]) { if (curstart == 255) curstart = i; curend = i; } else { if ((curend - curstart) > (maxend - maxstart)) { maxstart = curstart; maxend = curend; } curstart = curend = 255; } } if ((curend - curstart) > (maxend - maxstart)) { maxstart = curstart; maxend = curend; } for (i = 0; i < 8; i++) { if (i >= maxstart && i <= maxend) { if (i == 0) printf(":"); if (i == maxend) printf(":"); } else { b = ntohs(addr->addr16[i]); printf("%x", b); if (i < 7) printf(":"); } } if (p) { p = ntohs(p); printf("[%u]", p); } break; } #endif /* INET6 */ } } void pf_print_state(struct pf_kstate *s) { pf_print_state_parts(s, NULL, NULL); } static void pf_print_state_parts(struct pf_kstate *s, struct pf_state_key *skwp, struct pf_state_key *sksp) { struct pf_state_key *skw, *sks; u_int8_t proto, dir; /* Do our best to fill these, but they're skipped if NULL */ skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); proto = skw ? skw->proto : (sks ? sks->proto : 0); dir = s ? s->direction : 0; switch (proto) { case IPPROTO_IPV4: printf("IPv4"); break; case IPPROTO_IPV6: printf("IPv6"); break; case IPPROTO_TCP: printf("TCP"); break; case IPPROTO_UDP: printf("UDP"); break; case IPPROTO_ICMP: printf("ICMP"); break; case IPPROTO_ICMPV6: printf("ICMPv6"); break; default: printf("%u", proto); break; } switch (dir) { case PF_IN: printf(" in"); break; case PF_OUT: printf(" out"); break; } if (skw) { printf(" wire: "); pf_print_host(&skw->addr[0], skw->port[0], skw->af); printf(" "); pf_print_host(&skw->addr[1], skw->port[1], skw->af); } if (sks) { printf(" stack: "); if (sks != skw) { pf_print_host(&sks->addr[0], sks->port[0], sks->af); printf(" "); pf_print_host(&sks->addr[1], sks->port[1], sks->af); } else printf("-"); } if (s) { if (proto == IPPROTO_TCP) { printf(" [lo=%u high=%u win=%u modulator=%u", s->src.seqlo, s->src.seqhi, s->src.max_win, s->src.seqdiff); if (s->src.wscale && s->dst.wscale) printf(" wscale=%u", s->src.wscale & PF_WSCALE_MASK); printf("]"); printf(" [lo=%u high=%u win=%u modulator=%u", s->dst.seqlo, s->dst.seqhi, s->dst.max_win, s->dst.seqdiff); if (s->src.wscale && s->dst.wscale) printf(" wscale=%u", s->dst.wscale & PF_WSCALE_MASK); printf("]"); } printf(" %u:%u", s->src.state, s->dst.state); } } void pf_print_flags(u_int8_t f) { if (f) printf(" "); if (f & TH_FIN) printf("F"); if (f & TH_SYN) printf("S"); if (f & TH_RST) printf("R"); if (f & TH_PUSH) printf("P"); if (f & TH_ACK) printf("A"); if (f & TH_URG) printf("U"); if (f & TH_ECE) printf("E"); if (f & TH_CWR) printf("W"); } #define PF_SET_SKIP_STEPS(i) \ do { \ while (head[i] != cur) { \ head[i]->skip[i].ptr = cur; \ head[i] = TAILQ_NEXT(head[i], entries); \ } \ } while (0) void pf_calc_skip_steps(struct pf_krulequeue *rules) { struct pf_krule *cur, *prev, *head[PF_SKIP_COUNT]; int i; cur = TAILQ_FIRST(rules); prev = cur; for (i = 0; i < PF_SKIP_COUNT; ++i) head[i] = cur; while (cur != NULL) { if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) PF_SET_SKIP_STEPS(PF_SKIP_IFP); if (cur->direction != prev->direction) PF_SET_SKIP_STEPS(PF_SKIP_DIR); if (cur->af != prev->af) PF_SET_SKIP_STEPS(PF_SKIP_AF); if (cur->proto != prev->proto) PF_SET_SKIP_STEPS(PF_SKIP_PROTO); if (cur->src.neg != prev->src.neg || pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); if (cur->src.port[0] != prev->src.port[0] || cur->src.port[1] != prev->src.port[1] || cur->src.port_op != prev->src.port_op) PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); if (cur->dst.neg != prev->dst.neg || pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); if (cur->dst.port[0] != prev->dst.port[0] || cur->dst.port[1] != prev->dst.port[1] || cur->dst.port_op != prev->dst.port_op) PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); prev = cur; cur = TAILQ_NEXT(cur, entries); } for (i = 0; i < PF_SKIP_COUNT; ++i) PF_SET_SKIP_STEPS(i); } static int pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) { if (aw1->type != aw2->type) return (1); switch (aw1->type) { case PF_ADDR_ADDRMASK: case PF_ADDR_RANGE: if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6)) return (1); if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6)) return (1); return (0); case PF_ADDR_DYNIFTL: return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); case PF_ADDR_NOROUTE: case PF_ADDR_URPFFAILED: return (0); case PF_ADDR_TABLE: return (aw1->p.tbl != aw2->p.tbl); default: printf("invalid address type: %d\n", aw1->type); return (1); } } /** * Checksum updates are a little complicated because the checksum in the TCP/UDP * header isn't always a full checksum. In some cases (i.e. output) it's a * pseudo-header checksum, which is a partial checksum over src/dst IP * addresses, protocol number and length. * * That means we have the following cases: * * Input or forwarding: we don't have TSO, the checksum fields are full * checksums, we need to update the checksum whenever we change anything. * * Output (i.e. the checksum is a pseudo-header checksum): * x The field being updated is src/dst address or affects the length of * the packet. We need to update the pseudo-header checksum (note that this * checksum is not ones' complement). * x Some other field is being modified (e.g. src/dst port numbers): We * don't have to update anything. **/ u_int16_t pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) { u_int32_t x; x = cksum + old - new; x = (x + (x >> 16)) & 0xffff; /* optimise: eliminate a branch when not udp */ if (udp && cksum == 0x0000) return cksum; if (udp && x == 0x0000) x = 0xffff; return (u_int16_t)(x); } static void pf_patch_8(struct mbuf *m, u_int16_t *cksum, u_int8_t *f, u_int8_t v, bool hi, u_int8_t udp) { u_int16_t old = htons(hi ? (*f << 8) : *f); u_int16_t new = htons(hi ? ( v << 8) : v); if (*f == v) return; *f = v; if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) return; *cksum = pf_cksum_fixup(*cksum, old, new, udp); } void pf_patch_16_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int16_t v, bool hi, u_int8_t udp) { u_int8_t *fb = (u_int8_t *)f; u_int8_t *vb = (u_int8_t *)&v; pf_patch_8(m, cksum, fb++, *vb++, hi, udp); pf_patch_8(m, cksum, fb++, *vb++, !hi, udp); } void pf_patch_32_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int32_t v, bool hi, u_int8_t udp) { u_int8_t *fb = (u_int8_t *)f; u_int8_t *vb = (u_int8_t *)&v; pf_patch_8(m, cksum, fb++, *vb++, hi, udp); pf_patch_8(m, cksum, fb++, *vb++, !hi, udp); pf_patch_8(m, cksum, fb++, *vb++, hi, udp); pf_patch_8(m, cksum, fb++, *vb++, !hi, udp); } u_int16_t pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) { if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) return (cksum); return (pf_cksum_fixup(cksum, old, new, udp)); } static void pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af) { struct pf_addr ao; u_int16_t po = *p; PF_ACPY(&ao, a, af); PF_ACPY(a, an, af); if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) *pc = ~*pc; *p = pn; switch (af) { #ifdef INET case AF_INET: *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, ao.addr16[0], an->addr16[0], 0), ao.addr16[1], an->addr16[1], 0); *p = pn; *pc = pf_cksum_fixup(pf_cksum_fixup(*pc, ao.addr16[0], an->addr16[0], u), ao.addr16[1], an->addr16[1], u); *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u); break; #endif /* INET */ #ifdef INET6 case AF_INET6: *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(*pc, ao.addr16[0], an->addr16[0], u), ao.addr16[1], an->addr16[1], u), ao.addr16[2], an->addr16[2], u), ao.addr16[3], an->addr16[3], u), ao.addr16[4], an->addr16[4], u), ao.addr16[5], an->addr16[5], u), ao.addr16[6], an->addr16[6], u), ao.addr16[7], an->addr16[7], u); *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u); break; #endif /* INET6 */ } if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) { *pc = ~*pc; if (! *pc) *pc = 0xffff; } } /* Changes a u_int32_t. Uses a void * so there are no align restrictions */ void pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) { u_int32_t ao; memcpy(&ao, a, sizeof(ao)); memcpy(a, &an, sizeof(u_int32_t)); *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), ao % 65536, an % 65536, u); } void pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp) { u_int32_t ao; memcpy(&ao, a, sizeof(ao)); memcpy(a, &an, sizeof(u_int32_t)); *c = pf_proto_cksum_fixup(m, pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp), ao % 65536, an % 65536, udp); } #ifdef INET6 static void pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) { struct pf_addr ao; PF_ACPY(&ao, a, AF_INET6); PF_ACPY(a, an, AF_INET6); *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(*c, ao.addr16[0], an->addr16[0], u), ao.addr16[1], an->addr16[1], u), ao.addr16[2], an->addr16[2], u), ao.addr16[3], an->addr16[3], u), ao.addr16[4], an->addr16[4], u), ao.addr16[5], an->addr16[5], u), ao.addr16[6], an->addr16[6], u), ao.addr16[7], an->addr16[7], u); } #endif /* INET6 */ static void pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) { struct pf_addr oia, ooa; PF_ACPY(&oia, ia, af); if (oa) PF_ACPY(&ooa, oa, af); /* Change inner protocol port, fix inner protocol checksum. */ if (ip != NULL) { u_int16_t oip = *ip; u_int32_t opc; if (pc != NULL) opc = *pc; *ip = np; if (pc != NULL) *pc = pf_cksum_fixup(*pc, oip, *ip, u); *ic = pf_cksum_fixup(*ic, oip, *ip, 0); if (pc != NULL) *ic = pf_cksum_fixup(*ic, opc, *pc, 0); } /* Change inner ip address, fix inner ip and icmp checksums. */ PF_ACPY(ia, na, af); switch (af) { #ifdef INET case AF_INET: { u_int32_t oh2c = *h2c; *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, oia.addr16[0], ia->addr16[0], 0), oia.addr16[1], ia->addr16[1], 0); *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, oia.addr16[0], ia->addr16[0], 0), oia.addr16[1], ia->addr16[1], 0); *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); break; } #endif /* INET */ #ifdef INET6 case AF_INET6: *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(*ic, oia.addr16[0], ia->addr16[0], u), oia.addr16[1], ia->addr16[1], u), oia.addr16[2], ia->addr16[2], u), oia.addr16[3], ia->addr16[3], u), oia.addr16[4], ia->addr16[4], u), oia.addr16[5], ia->addr16[5], u), oia.addr16[6], ia->addr16[6], u), oia.addr16[7], ia->addr16[7], u); break; #endif /* INET6 */ } /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ if (oa) { PF_ACPY(oa, na, af); switch (af) { #ifdef INET case AF_INET: *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, ooa.addr16[0], oa->addr16[0], 0), ooa.addr16[1], oa->addr16[1], 0); break; #endif /* INET */ #ifdef INET6 case AF_INET6: *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(*ic, ooa.addr16[0], oa->addr16[0], u), ooa.addr16[1], oa->addr16[1], u), ooa.addr16[2], oa->addr16[2], u), ooa.addr16[3], oa->addr16[3], u), ooa.addr16[4], oa->addr16[4], u), ooa.addr16[5], oa->addr16[5], u), ooa.addr16[6], oa->addr16[6], u), ooa.addr16[7], oa->addr16[7], u); break; #endif /* INET6 */ } } } /* * Need to modulate the sequence numbers in the TCP SACK option * (credits to Krzysztof Pfaff for report and patch) */ static int pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd, struct tcphdr *th, struct pf_state_peer *dst) { int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; u_int8_t opts[TCP_MAXOLEN], *opt = opts; int copyback = 0, i, olen; struct sackblk sack; #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) if (hlen < TCPOLEN_SACKLEN || !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) return 0; while (hlen >= TCPOLEN_SACKLEN) { size_t startoff = opt - opts; olen = opt[1]; switch (*opt) { case TCPOPT_EOL: /* FALLTHROUGH */ case TCPOPT_NOP: opt++; hlen--; break; case TCPOPT_SACK: if (olen > hlen) olen = hlen; if (olen >= TCPOLEN_SACKLEN) { for (i = 2; i + TCPOLEN_SACK <= olen; i += TCPOLEN_SACK) { memcpy(&sack, &opt[i], sizeof(sack)); pf_patch_32_unaligned(m, &th->th_sum, &sack.start, htonl(ntohl(sack.start) - dst->seqdiff), PF_ALGNMNT(startoff), 0); pf_patch_32_unaligned(m, &th->th_sum, &sack.end, htonl(ntohl(sack.end) - dst->seqdiff), PF_ALGNMNT(startoff), 0); memcpy(&opt[i], &sack, sizeof(sack)); } copyback = 1; } /* FALLTHROUGH */ default: if (olen < 2) olen = 2; hlen -= olen; opt += olen; } } if (copyback) m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts); return (copyback); } struct mbuf * pf_build_tcp(const struct pf_krule *r, sa_family_t af, const struct pf_addr *saddr, const struct pf_addr *daddr, u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, - u_int16_t rtag) + u_int16_t rtag, int rtableid) { struct mbuf *m; int len, tlen; #ifdef INET struct ip *h = NULL; #endif /* INET */ #ifdef INET6 struct ip6_hdr *h6 = NULL; #endif /* INET6 */ struct tcphdr *th; char *opt; struct pf_mtag *pf_mtag; len = 0; th = NULL; /* maximum segment size tcp option */ tlen = sizeof(struct tcphdr); if (mss) tlen += 4; switch (af) { #ifdef INET case AF_INET: len = sizeof(struct ip) + tlen; break; #endif /* INET */ #ifdef INET6 case AF_INET6: len = sizeof(struct ip6_hdr) + tlen; break; #endif /* INET6 */ default: panic("%s: unsupported af %d", __func__, af); } m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) return (NULL); #ifdef MAC mac_netinet_firewall_send(m); #endif if ((pf_mtag = pf_get_mtag(m)) == NULL) { m_freem(m); return (NULL); } if (tag) m->m_flags |= M_SKIP_FIREWALL; pf_mtag->tag = rtag; - if (r != NULL && r->rtableid >= 0) - M_SETFIB(m, r->rtableid); + if (rtableid >= 0) + M_SETFIB(m, rtableid); #ifdef ALTQ if (r != NULL && r->qid) { pf_mtag->qid = r->qid; /* add hints for ecn */ pf_mtag->hdr = mtod(m, struct ip *); } #endif /* ALTQ */ m->m_data += max_linkhdr; m->m_pkthdr.len = m->m_len = len; /* The rest of the stack assumes a rcvif, so provide one. * This is a locally generated packet, so .. close enough. */ m->m_pkthdr.rcvif = V_loif; bzero(m->m_data, len); switch (af) { #ifdef INET case AF_INET: h = mtod(m, struct ip *); /* IP header fields included in the TCP checksum */ h->ip_p = IPPROTO_TCP; h->ip_len = htons(tlen); h->ip_src.s_addr = saddr->v4.s_addr; h->ip_dst.s_addr = daddr->v4.s_addr; th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); break; #endif /* INET */ #ifdef INET6 case AF_INET6: h6 = mtod(m, struct ip6_hdr *); /* IP header fields included in the TCP checksum */ h6->ip6_nxt = IPPROTO_TCP; h6->ip6_plen = htons(tlen); memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); break; #endif /* INET6 */ } /* TCP header */ th->th_sport = sport; th->th_dport = dport; th->th_seq = htonl(seq); th->th_ack = htonl(ack); th->th_off = tlen >> 2; th->th_flags = flags; th->th_win = htons(win); if (mss) { opt = (char *)(th + 1); opt[0] = TCPOPT_MAXSEG; opt[1] = 4; HTONS(mss); bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2); } switch (af) { #ifdef INET case AF_INET: /* TCP checksum */ th->th_sum = in_cksum(m, len); /* Finish the IP header */ h->ip_v = 4; h->ip_hl = sizeof(*h) >> 2; h->ip_tos = IPTOS_LOWDELAY; h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0); h->ip_len = htons(len); h->ip_ttl = ttl ? ttl : V_ip_defttl; h->ip_sum = 0; break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* TCP checksum */ th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), tlen); h6->ip6_vfc |= IPV6_VERSION; h6->ip6_hlim = IPV6_DEFHLIM; break; #endif /* INET6 */ } return (m); } void pf_send_tcp(const struct pf_krule *r, sa_family_t af, const struct pf_addr *saddr, const struct pf_addr *daddr, u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, - u_int16_t rtag) + u_int16_t rtag, int rtableid) { struct pf_send_entry *pfse; struct mbuf *m; m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, flags, - win, mss, ttl, tag, rtag); + win, mss, ttl, tag, rtag, rtableid); if (m == NULL) return; /* Allocate outgoing queue entry, mbuf and mbuf tag. */ pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT); if (pfse == NULL) { m_freem(m); return; } switch (af) { #ifdef INET case AF_INET: pfse->pfse_type = PFSE_IP; break; #endif /* INET */ #ifdef INET6 case AF_INET6: pfse->pfse_type = PFSE_IP6; break; #endif /* INET6 */ } pfse->pfse_m = m; pf_send(pfse); } static void pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd, struct pf_state_key *sk, int off, struct mbuf *m, struct tcphdr *th, struct pfi_kkif *kif, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen, - u_short *reason) + u_short *reason, int rtableid) { struct pf_addr * const saddr = pd->src; struct pf_addr * const daddr = pd->dst; sa_family_t af = pd->af; /* undo NAT changes, if they have taken place */ if (nr != NULL) { PF_ACPY(saddr, &sk->addr[pd->sidx], af); PF_ACPY(daddr, &sk->addr[pd->didx], af); if (pd->sport) *pd->sport = sk->port[pd->sidx]; if (pd->dport) *pd->dport = sk->port[pd->didx]; if (pd->proto_sum) *pd->proto_sum = bproto_sum; if (pd->ip_sum) *pd->ip_sum = bip_sum; m_copyback(m, off, hdrlen, pd->hdr.any); } if (pd->proto == IPPROTO_TCP && ((r->rule_flag & PFRULE_RETURNRST) || (r->rule_flag & PFRULE_RETURN)) && !(th->th_flags & TH_RST)) { u_int32_t ack = ntohl(th->th_seq) + pd->p_len; int len = 0; #ifdef INET struct ip *h4; #endif #ifdef INET6 struct ip6_hdr *h6; #endif switch (af) { #ifdef INET case AF_INET: h4 = mtod(m, struct ip *); len = ntohs(h4->ip_len) - off; break; #endif #ifdef INET6 case AF_INET6: h6 = mtod(m, struct ip6_hdr *); len = ntohs(h6->ip6_plen) - (off - sizeof(*h6)); break; #endif } if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af)) REASON_SET(reason, PFRES_PROTCKSUM); else { if (th->th_flags & TH_SYN) ack++; if (th->th_flags & TH_FIN) ack++; pf_send_tcp(r, af, pd->dst, pd->src, th->th_dport, th->th_sport, ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, - r->return_ttl, 1, 0); + r->return_ttl, 1, 0, rtableid); } } else if (pd->proto != IPPROTO_ICMP && af == AF_INET && r->return_icmp) pf_send_icmp(m, r->return_icmp >> 8, - r->return_icmp & 255, af, r); + r->return_icmp & 255, af, r, rtableid); else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && r->return_icmp6) pf_send_icmp(m, r->return_icmp6 >> 8, - r->return_icmp6 & 255, af, r); + r->return_icmp6 & 255, af, r, rtableid); } static int pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m) { struct m_tag *mtag; u_int8_t mpcp; mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); if (mtag == NULL) return (0); if (prio == PF_PRIO_ZERO) prio = 0; mpcp = *(uint8_t *)(mtag + 1); return (mpcp == prio); } static int pf_icmp_to_bandlim(uint8_t type) { switch (type) { case ICMP_ECHO: case ICMP_ECHOREPLY: return (BANDLIM_ICMP_ECHO); case ICMP_TSTAMP: case ICMP_TSTAMPREPLY: return (BANDLIM_ICMP_TSTAMP); case ICMP_UNREACH: default: return (BANDLIM_ICMP_UNREACH); } } static void pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, - struct pf_krule *r) + struct pf_krule *r, int rtableid) { struct pf_send_entry *pfse; struct mbuf *m0; struct pf_mtag *pf_mtag; /* ICMP packet rate limitation. */ #ifdef INET6 if (af == AF_INET6) { if (icmp6_ratelimit(NULL, type, code)) return; } #endif #ifdef INET if (af == AF_INET) { if (badport_bandlim(pf_icmp_to_bandlim(type)) != 0) return; } #endif /* Allocate outgoing queue entry, mbuf and mbuf tag. */ pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT); if (pfse == NULL) return; if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) { free(pfse, M_PFTEMP); return; } if ((pf_mtag = pf_get_mtag(m0)) == NULL) { free(pfse, M_PFTEMP); return; } /* XXX: revisit */ m0->m_flags |= M_SKIP_FIREWALL; - if (r->rtableid >= 0) - M_SETFIB(m0, r->rtableid); + if (rtableid >= 0) + M_SETFIB(m0, rtableid); #ifdef ALTQ if (r->qid) { pf_mtag->qid = r->qid; /* add hints for ecn */ pf_mtag->hdr = mtod(m0, struct ip *); } #endif /* ALTQ */ switch (af) { #ifdef INET case AF_INET: pfse->pfse_type = PFSE_ICMP; break; #endif /* INET */ #ifdef INET6 case AF_INET6: pfse->pfse_type = PFSE_ICMP6; break; #endif /* INET6 */ } pfse->pfse_m = m0; pfse->icmpopts.type = type; pfse->icmpopts.code = code; pf_send(pfse); } /* * Return 1 if the addresses a and b match (with mask m), otherwise return 0. * If n is 0, they match if they are equal. If n is != 0, they match if they * are different. */ int pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, struct pf_addr *b, sa_family_t af) { int match = 0; switch (af) { #ifdef INET case AF_INET: if ((a->addr32[0] & m->addr32[0]) == (b->addr32[0] & m->addr32[0])) match++; break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (((a->addr32[0] & m->addr32[0]) == (b->addr32[0] & m->addr32[0])) && ((a->addr32[1] & m->addr32[1]) == (b->addr32[1] & m->addr32[1])) && ((a->addr32[2] & m->addr32[2]) == (b->addr32[2] & m->addr32[2])) && ((a->addr32[3] & m->addr32[3]) == (b->addr32[3] & m->addr32[3]))) match++; break; #endif /* INET6 */ } if (match) { if (n) return (0); else return (1); } else { if (n) return (1); else return (0); } } /* * Return 1 if b <= a <= e, otherwise return 0. */ int pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, struct pf_addr *a, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) || (ntohl(a->addr32[0]) > ntohl(e->addr32[0]))) return (0); break; #endif /* INET */ #ifdef INET6 case AF_INET6: { int i; /* check a >= b */ for (i = 0; i < 4; ++i) if (ntohl(a->addr32[i]) > ntohl(b->addr32[i])) break; else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i])) return (0); /* check a <= e */ for (i = 0; i < 4; ++i) if (ntohl(a->addr32[i]) < ntohl(e->addr32[i])) break; else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i])) return (0); break; } #endif /* INET6 */ } return (1); } static int pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) { switch (op) { case PF_OP_IRG: return ((p > a1) && (p < a2)); case PF_OP_XRG: return ((p < a1) || (p > a2)); case PF_OP_RRG: return ((p >= a1) && (p <= a2)); case PF_OP_EQ: return (p == a1); case PF_OP_NE: return (p != a1); case PF_OP_LT: return (p < a1); case PF_OP_LE: return (p <= a1); case PF_OP_GT: return (p > a1); case PF_OP_GE: return (p >= a1); } return (0); /* never reached */ } int pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) { NTOHS(a1); NTOHS(a2); NTOHS(p); return (pf_match(op, a1, a2, p)); } static int pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) { if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) return (0); return (pf_match(op, a1, a2, u)); } static int pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) { if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) return (0); return (pf_match(op, a1, a2, g)); } int pf_match_tag(struct mbuf *m, struct pf_krule *r, int *tag, int mtag) { if (*tag == -1) *tag = mtag; return ((!r->match_tag_not && r->match_tag == *tag) || (r->match_tag_not && r->match_tag != *tag)); } int pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag) { KASSERT(tag > 0, ("%s: tag %d", __func__, tag)); if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL)) return (ENOMEM); pd->pf_mtag->tag = tag; return (0); } #define PF_ANCHOR_STACKSIZE 32 struct pf_kanchor_stackframe { struct pf_kruleset *rs; struct pf_krule *r; /* XXX: + match bit */ struct pf_kanchor *child; }; /* * XXX: We rely on malloc(9) returning pointer aligned addresses. */ #define PF_ANCHORSTACK_MATCH 0x00000001 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH) #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH) #define PF_ANCHOR_RULE(f) (struct pf_krule *) \ ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK) #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \ ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \ } while (0) void pf_step_into_anchor(struct pf_kanchor_stackframe *stack, int *depth, struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a, int *match) { struct pf_kanchor_stackframe *f; PF_RULES_RASSERT(); if (match) *match = 0; if (*depth >= PF_ANCHOR_STACKSIZE) { printf("%s: anchor stack overflow on %s\n", __func__, (*r)->anchor->name); *r = TAILQ_NEXT(*r, entries); return; } else if (*depth == 0 && a != NULL) *a = *r; f = stack + (*depth)++; f->rs = *rs; f->r = *r; if ((*r)->anchor_wildcard) { struct pf_kanchor_node *parent = &(*r)->anchor->children; if ((f->child = RB_MIN(pf_kanchor_node, parent)) == NULL) { *r = NULL; return; } *rs = &f->child->ruleset; } else { f->child = NULL; *rs = &(*r)->anchor->ruleset; } *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); } int pf_step_out_of_anchor(struct pf_kanchor_stackframe *stack, int *depth, struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a, int *match) { struct pf_kanchor_stackframe *f; struct pf_krule *fr; int quick = 0; PF_RULES_RASSERT(); do { if (*depth <= 0) break; f = stack + *depth - 1; fr = PF_ANCHOR_RULE(f); if (f->child != NULL) { /* * This block traverses through * a wildcard anchor. */ if (match != NULL && *match) { /* * If any of "*" matched, then * "foo/ *" matched, mark frame * appropriately. */ PF_ANCHOR_SET_MATCH(f); *match = 0; } f->child = RB_NEXT(pf_kanchor_node, &fr->anchor->children, f->child); if (f->child != NULL) { *rs = &f->child->ruleset; *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); if (*r == NULL) continue; else break; } } (*depth)--; if (*depth == 0 && a != NULL) *a = NULL; *rs = f->rs; if (PF_ANCHOR_MATCH(f) || (match != NULL && *match)) quick = fr->quick; *r = TAILQ_NEXT(fr, entries); } while (*r == NULL); return (quick); } struct pf_keth_anchor_stackframe { struct pf_keth_ruleset *rs; struct pf_keth_rule *r; /* XXX: + match bit */ struct pf_keth_anchor *child; }; #define PF_ETH_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH) #define PF_ETH_ANCHOR_RULE(f) (struct pf_keth_rule *) \ ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK) #define PF_ETH_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \ ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \ } while (0) void pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe *stack, int *depth, struct pf_keth_ruleset **rs, struct pf_keth_rule **r, struct pf_keth_rule **a, int *match) { struct pf_keth_anchor_stackframe *f; NET_EPOCH_ASSERT(); if (match) *match = 0; if (*depth >= PF_ANCHOR_STACKSIZE) { printf("%s: anchor stack overflow on %s\n", __func__, (*r)->anchor->name); *r = TAILQ_NEXT(*r, entries); return; } else if (*depth == 0 && a != NULL) *a = *r; f = stack + (*depth)++; f->rs = *rs; f->r = *r; if ((*r)->anchor_wildcard) { struct pf_keth_anchor_node *parent = &(*r)->anchor->children; if ((f->child = RB_MIN(pf_keth_anchor_node, parent)) == NULL) { *r = NULL; return; } *rs = &f->child->ruleset; } else { f->child = NULL; *rs = &(*r)->anchor->ruleset; } *r = TAILQ_FIRST((*rs)->active.rules); } int pf_step_out_of_keth_anchor(struct pf_keth_anchor_stackframe *stack, int *depth, struct pf_keth_ruleset **rs, struct pf_keth_rule **r, struct pf_keth_rule **a, int *match) { struct pf_keth_anchor_stackframe *f; struct pf_keth_rule *fr; int quick = 0; NET_EPOCH_ASSERT(); do { if (*depth <= 0) break; f = stack + *depth - 1; fr = PF_ETH_ANCHOR_RULE(f); if (f->child != NULL) { /* * This block traverses through * a wildcard anchor. */ if (match != NULL && *match) { /* * If any of "*" matched, then * "foo/ *" matched, mark frame * appropriately. */ PF_ETH_ANCHOR_SET_MATCH(f); *match = 0; } f->child = RB_NEXT(pf_keth_anchor_node, &fr->anchor->children, f->child); if (f->child != NULL) { *rs = &f->child->ruleset; *r = TAILQ_FIRST((*rs)->active.rules); if (*r == NULL) continue; else break; } } (*depth)--; if (*depth == 0 && a != NULL) *a = NULL; *rs = f->rs; if (PF_ETH_ANCHOR_MATCH(f) || (match != NULL && *match)) quick = fr->quick; *r = TAILQ_NEXT(fr, entries); } while (*r == NULL); return (quick); } #ifdef INET6 void pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); break; #endif /* INET */ case AF_INET6: naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); break; } } void pf_addr_inc(struct pf_addr *addr, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); break; #endif /* INET */ case AF_INET6: if (addr->addr32[3] == 0xffffffff) { addr->addr32[3] = 0; if (addr->addr32[2] == 0xffffffff) { addr->addr32[2] = 0; if (addr->addr32[1] == 0xffffffff) { addr->addr32[1] = 0; addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); } else addr->addr32[1] = htonl(ntohl(addr->addr32[1]) + 1); } else addr->addr32[2] = htonl(ntohl(addr->addr32[2]) + 1); } else addr->addr32[3] = htonl(ntohl(addr->addr32[3]) + 1); break; } } #endif /* INET6 */ void pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a) { if (r->qid) a->qid = r->qid; if (r->pqid) a->pqid = r->pqid; + if (r->rtableid >= 0) + a->rtableid = r->rtableid; + a->log |= r->log; + if (r->scrub_flags & PFSTATE_SETTOS) + a->set_tos = r->set_tos; + if (r->min_ttl) + a->min_ttl = r->min_ttl; + if (r->max_mss) + a->max_mss = r->max_mss; + a->flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID| + PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO)); if (r->dnpipe) a->dnpipe = r->dnpipe; if (r->dnrpipe) a->dnrpipe = r->dnrpipe; if (r->dnpipe || r->dnrpipe) { if (r->free_flags & PFRULE_DN_IS_PIPE) - a->flags |= PFRULE_DN_IS_PIPE; + a->flags |= PFSTATE_DN_IS_PIPE; else - a->flags &= ~PFRULE_DN_IS_PIPE; + a->flags &= ~PFSTATE_DN_IS_PIPE; } } int pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m) { struct pf_addr *saddr, *daddr; u_int16_t sport, dport; struct inpcbinfo *pi; struct inpcb *inp; pd->lookup.uid = UID_MAX; pd->lookup.gid = GID_MAX; switch (pd->proto) { case IPPROTO_TCP: sport = pd->hdr.tcp.th_sport; dport = pd->hdr.tcp.th_dport; pi = &V_tcbinfo; break; case IPPROTO_UDP: sport = pd->hdr.udp.uh_sport; dport = pd->hdr.udp.uh_dport; pi = &V_udbinfo; break; default: return (-1); } if (direction == PF_IN) { saddr = pd->src; daddr = pd->dst; } else { u_int16_t p; p = sport; sport = dport; dport = p; saddr = pd->dst; daddr = pd->src; } switch (pd->af) { #ifdef INET case AF_INET: inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4, dport, INPLOOKUP_RLOCKPCB, NULL, m); if (inp == NULL) { inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4, dport, INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL, m); if (inp == NULL) return (-1); } break; #endif /* INET */ #ifdef INET6 case AF_INET6: inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6, dport, INPLOOKUP_RLOCKPCB, NULL, m); if (inp == NULL) { inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6, dport, INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL, m); if (inp == NULL) return (-1); } break; #endif /* INET6 */ default: return (-1); } INP_RLOCK_ASSERT(inp); pd->lookup.uid = inp->inp_cred->cr_uid; pd->lookup.gid = inp->inp_cred->cr_groups[0]; INP_RUNLOCK(inp); return (1); } u_int8_t pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) { int hlen; u_int8_t hdr[60]; u_int8_t *opt, optlen; u_int8_t wscale = 0; hlen = th_off << 2; /* hlen <= sizeof(hdr) */ if (hlen <= sizeof(struct tcphdr)) return (0); if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) return (0); opt = hdr + sizeof(struct tcphdr); hlen -= sizeof(struct tcphdr); while (hlen >= 3) { switch (*opt) { case TCPOPT_EOL: case TCPOPT_NOP: ++opt; --hlen; break; case TCPOPT_WINDOW: wscale = opt[2]; if (wscale > TCP_MAX_WINSHIFT) wscale = TCP_MAX_WINSHIFT; wscale |= PF_WSCALE_FLAG; /* FALLTHROUGH */ default: optlen = opt[1]; if (optlen < 2) optlen = 2; hlen -= optlen; opt += optlen; break; } } return (wscale); } u_int16_t pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) { int hlen; u_int8_t hdr[60]; u_int8_t *opt, optlen; u_int16_t mss = V_tcp_mssdflt; hlen = th_off << 2; /* hlen <= sizeof(hdr) */ if (hlen <= sizeof(struct tcphdr)) return (0); if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) return (0); opt = hdr + sizeof(struct tcphdr); hlen -= sizeof(struct tcphdr); while (hlen >= TCPOLEN_MAXSEG) { switch (*opt) { case TCPOPT_EOL: case TCPOPT_NOP: ++opt; --hlen; break; case TCPOPT_MAXSEG: bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2); NTOHS(mss); /* FALLTHROUGH */ default: optlen = opt[1]; if (optlen < 2) optlen = 2; hlen -= optlen; opt += optlen; break; } } return (mss); } static u_int16_t pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer) { struct nhop_object *nh; #ifdef INET6 struct in6_addr dst6; uint32_t scopeid; #endif /* INET6 */ int hlen = 0; uint16_t mss = 0; NET_EPOCH_ASSERT(); switch (af) { #ifdef INET case AF_INET: hlen = sizeof(struct ip); nh = fib4_lookup(rtableid, addr->v4, 0, 0, 0); if (nh != NULL) mss = nh->nh_mtu - hlen - sizeof(struct tcphdr); break; #endif /* INET */ #ifdef INET6 case AF_INET6: hlen = sizeof(struct ip6_hdr); in6_splitscope(&addr->v6, &dst6, &scopeid); nh = fib6_lookup(rtableid, &dst6, scopeid, 0, 0); if (nh != NULL) mss = nh->nh_mtu - hlen - sizeof(struct tcphdr); break; #endif /* INET6 */ } mss = max(V_tcp_mssdflt, mss); mss = min(mss, offer); mss = max(mss, 64); /* sanity - at least max opt space */ return (mss); } static u_int32_t pf_tcp_iss(struct pf_pdesc *pd) { MD5_CTX ctx; u_int32_t digest[4]; if (V_pf_tcp_secret_init == 0) { arc4random_buf(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret)); MD5Init(&V_pf_tcp_secret_ctx); MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret, sizeof(V_pf_tcp_secret)); V_pf_tcp_secret_init = 1; } ctx = V_pf_tcp_secret_ctx; MD5Update(&ctx, (char *)&pd->hdr.tcp.th_sport, sizeof(u_short)); MD5Update(&ctx, (char *)&pd->hdr.tcp.th_dport, sizeof(u_short)); if (pd->af == AF_INET6) { MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr)); MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr)); } else { MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr)); MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr)); } MD5Final((u_char *)digest, &ctx); V_pf_tcp_iss_off += 4096; #define ISN_RANDOM_INCREMENT (4096 - 1) return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) + V_pf_tcp_iss_off); #undef ISN_RANDOM_INCREMENT } static bool pf_match_eth_addr(const uint8_t *a, const struct pf_keth_rule_addr *r) { bool match = true; /* Always matches if not set */ if (! r->isset) return (!r->neg); for (int i = 0; i < ETHER_ADDR_LEN; i++) { if ((a[i] & r->mask[i]) != (r->addr[i] & r->mask[i])) { match = false; break; } } return (match ^ r->neg); } static int pf_match_eth_tag(struct mbuf *m, struct pf_keth_rule *r, int *tag, int mtag) { if (*tag == -1) *tag = mtag; return ((!r->match_tag_not && r->match_tag == *tag) || (r->match_tag_not && r->match_tag != *tag)); } static void pf_bridge_to(struct pfi_kkif *kif, struct mbuf *m) { struct ifnet *ifp = kif->pfik_ifp; /* If we don't have the interface drop the packet. */ if (ifp == NULL) { m_freem(m); return; } switch (ifp->if_type) { case IFT_ETHER: case IFT_XETHER: case IFT_L2VLAN: case IFT_BRIDGE: case IFT_IEEE8023ADLAG: break; default: m_freem(m); return; } kif->pfik_ifp->if_transmit(kif->pfik_ifp, m); } static int pf_test_eth_rule(int dir, struct pfi_kkif *kif, struct mbuf **m0) { #ifdef INET struct ip ip; #endif #ifdef INET6 struct ip6_hdr ip6; #endif struct mbuf *m = *m0; struct ether_header *e; struct pf_keth_rule *r, *rm, *a = NULL; struct pf_keth_ruleset *ruleset = NULL; struct pf_mtag *mtag; struct pf_keth_ruleq *rules; struct pf_addr *src = NULL, *dst = NULL; sa_family_t af = 0; uint16_t proto; int asd = 0, match = 0; int tag = -1; uint8_t action; struct pf_keth_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; MPASS(kif->pfik_ifp->if_vnet == curvnet); NET_EPOCH_ASSERT(); PF_RULES_RLOCK_TRACKER; SDT_PROBE3(pf, eth, test_rule, entry, dir, kif->pfik_ifp, m); mtag = pf_find_mtag(m); if (mtag != NULL && mtag->flags & PF_TAG_DUMMYNET) { /* Dummynet re-injects packets after they've * completed their delay. We've already * processed them, so pass unconditionally. */ /* But only once. We may see the packet multiple times (e.g. * PFIL_IN/PFIL_OUT). */ mtag->flags &= ~PF_TAG_DUMMYNET; return (PF_PASS); } ruleset = V_pf_keth; rules = ck_pr_load_ptr(&ruleset->active.rules); r = TAILQ_FIRST(rules); rm = NULL; e = mtod(m, struct ether_header *); proto = ntohs(e->ether_type); switch (proto) { #ifdef INET case ETHERTYPE_IP: { if (m_length(m, NULL) < (sizeof(struct ether_header) + sizeof(ip))) return (PF_DROP); af = AF_INET; m_copydata(m, sizeof(struct ether_header), sizeof(ip), (caddr_t)&ip); src = (struct pf_addr *)&ip.ip_src; dst = (struct pf_addr *)&ip.ip_dst; break; } #endif /* INET */ #ifdef INET6 case ETHERTYPE_IPV6: { if (m_length(m, NULL) < (sizeof(struct ether_header) + sizeof(ip6))) return (PF_DROP); af = AF_INET6; m_copydata(m, sizeof(struct ether_header), sizeof(ip6), (caddr_t)&ip6); src = (struct pf_addr *)&ip6.ip6_src; dst = (struct pf_addr *)&ip6.ip6_dst; break; } #endif /* INET6 */ } PF_RULES_RLOCK(); while (r != NULL) { counter_u64_add(r->evaluations, 1); SDT_PROBE2(pf, eth, test_rule, test, r->nr, r); if (pfi_kkif_match(r->kif, kif) == r->ifnot) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "kif"); r = r->skip[PFE_SKIP_IFP].ptr; } else if (r->direction && r->direction != dir) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "dir"); r = r->skip[PFE_SKIP_DIR].ptr; } else if (r->proto && r->proto != proto) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "proto"); r = r->skip[PFE_SKIP_PROTO].ptr; } else if (! pf_match_eth_addr(e->ether_shost, &r->src)) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "src"); r = r->skip[PFE_SKIP_SRC_ADDR].ptr; } else if (! pf_match_eth_addr(e->ether_dhost, &r->dst)) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "dst"); r = TAILQ_NEXT(r, entries); } else if (src != NULL && PF_MISMATCHAW(&r->ipsrc.addr, src, af, r->ipsrc.neg, kif, M_GETFIB(m))) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "ip_src"); r = TAILQ_NEXT(r, entries); } else if (dst != NULL && PF_MISMATCHAW(&r->ipdst.addr, dst, af, r->ipdst.neg, kif, M_GETFIB(m))) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "ip_dst"); r = TAILQ_NEXT(r, entries); } else if (r->match_tag && !pf_match_eth_tag(m, r, &tag, mtag ? mtag->tag : 0)) { SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r, "match_tag"); r = TAILQ_NEXT(r, entries); } else { if (r->tag) tag = r->tag; if (r->anchor == NULL) { /* Rule matches */ rm = r; SDT_PROBE2(pf, eth, test_rule, match, r->nr, r); if (r->quick) break; r = TAILQ_NEXT(r, entries); } else { pf_step_into_keth_anchor(anchor_stack, &asd, &ruleset, &r, &a, &match); } } if (r == NULL && pf_step_out_of_keth_anchor(anchor_stack, &asd, &ruleset, &r, &a, &match)) break; } r = rm; SDT_PROBE2(pf, eth, test_rule, final_match, (r != NULL ? r->nr : -1), r); /* Default to pass. */ if (r == NULL) { PF_RULES_RUNLOCK(); return (PF_PASS); } /* Execute action. */ counter_u64_add(r->packets[dir == PF_OUT], 1); counter_u64_add(r->bytes[dir == PF_OUT], m_length(m, NULL)); pf_update_timestamp(r); /* Shortcut. Don't tag if we're just going to drop anyway. */ if (r->action == PF_DROP) { PF_RULES_RUNLOCK(); return (PF_DROP); } if (tag > 0) { if (mtag == NULL) mtag = pf_get_mtag(m); if (mtag == NULL) { PF_RULES_RUNLOCK(); counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1); return (PF_DROP); } mtag->tag = tag; } if (r->qid != 0) { if (mtag == NULL) mtag = pf_get_mtag(m); if (mtag == NULL) { PF_RULES_RUNLOCK(); counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1); return (PF_DROP); } mtag->qid = r->qid; } /* Dummynet */ if (r->dnpipe) { struct ip_fw_args dnflow; /* Drop packet if dummynet is not loaded. */ if (ip_dn_io_ptr == NULL) { PF_RULES_RUNLOCK(); m_freem(m); counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1); return (PF_DROP); } if (mtag == NULL) mtag = pf_get_mtag(m); if (mtag == NULL) { PF_RULES_RUNLOCK(); counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1); return (PF_DROP); } bzero(&dnflow, sizeof(dnflow)); /* We don't have port numbers here, so we set 0. That means * that we'll be somewhat limited in distinguishing flows (i.e. * only based on IP addresses, not based on port numbers), but * it's better than nothing. */ dnflow.f_id.dst_port = 0; dnflow.f_id.src_port = 0; dnflow.f_id.proto = 0; dnflow.rule.info = r->dnpipe; dnflow.rule.info |= IPFW_IS_DUMMYNET; if (r->dnflags & PFRULE_DN_IS_PIPE) dnflow.rule.info |= IPFW_IS_PIPE; dnflow.f_id.extra = dnflow.rule.info; dnflow.flags = dir == PF_IN ? IPFW_ARGS_IN : IPFW_ARGS_OUT; dnflow.flags |= IPFW_ARGS_ETHER; dnflow.ifp = kif->pfik_ifp; switch (af) { case AF_INET: dnflow.f_id.addr_type = 4; dnflow.f_id.src_ip = src->v4.s_addr; dnflow.f_id.dst_ip = dst->v4.s_addr; break; case AF_INET6: dnflow.flags |= IPFW_ARGS_IP6; dnflow.f_id.addr_type = 6; dnflow.f_id.src_ip6 = src->v6; dnflow.f_id.dst_ip6 = dst->v6; break; } mtag->flags |= PF_TAG_DUMMYNET; ip_dn_io_ptr(m0, &dnflow); if (*m0 != NULL) mtag->flags &= ~PF_TAG_DUMMYNET; } action = r->action; if (action == PF_PASS && r->bridge_to) { pf_bridge_to(r->bridge_to, *m0); *m0 = NULL; /* We've eaten the packet. */ } PF_RULES_RUNLOCK(); return (action); } static int pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, int direction, struct pfi_kkif *kif, struct mbuf *m, int off, struct pf_pdesc *pd, struct pf_krule **am, struct pf_kruleset **rsm, struct inpcb *inp) { struct pf_krule *nr = NULL; struct pf_addr * const saddr = pd->src; struct pf_addr * const daddr = pd->dst; sa_family_t af = pd->af; struct pf_krule *r, *a = NULL; struct pf_kruleset *ruleset = NULL; + struct pf_krule_slist match_rules; + struct pf_krule_item *ri; struct pf_ksrc_node *nsn = NULL; struct tcphdr *th = &pd->hdr.tcp; struct pf_state_key *sk = NULL, *nk = NULL; u_short reason; int rewrite = 0, hdrlen = 0; - int tag = -1, rtableid = -1; + int tag = -1; int asd = 0; int match = 0; int state_icmp = 0; u_int16_t sport = 0, dport = 0; u_int16_t bproto_sum = 0, bip_sum = 0; u_int8_t icmptype = 0, icmpcode = 0; struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; PF_RULES_RASSERT(); if (inp != NULL) { INP_LOCK_ASSERT(inp); pd->lookup.uid = inp->inp_cred->cr_uid; pd->lookup.gid = inp->inp_cred->cr_groups[0]; pd->lookup.done = 1; } switch (pd->proto) { case IPPROTO_TCP: sport = th->th_sport; dport = th->th_dport; hdrlen = sizeof(*th); break; case IPPROTO_UDP: sport = pd->hdr.udp.uh_sport; dport = pd->hdr.udp.uh_dport; hdrlen = sizeof(pd->hdr.udp); break; #ifdef INET case IPPROTO_ICMP: if (pd->af != AF_INET) break; sport = dport = pd->hdr.icmp.icmp_id; hdrlen = sizeof(pd->hdr.icmp); icmptype = pd->hdr.icmp.icmp_type; icmpcode = pd->hdr.icmp.icmp_code; if (icmptype == ICMP_UNREACH || icmptype == ICMP_SOURCEQUENCH || icmptype == ICMP_REDIRECT || icmptype == ICMP_TIMXCEED || icmptype == ICMP_PARAMPROB) state_icmp++; break; #endif /* INET */ #ifdef INET6 case IPPROTO_ICMPV6: if (af != AF_INET6) break; sport = dport = pd->hdr.icmp6.icmp6_id; hdrlen = sizeof(pd->hdr.icmp6); icmptype = pd->hdr.icmp6.icmp6_type; icmpcode = pd->hdr.icmp6.icmp6_code; if (icmptype == ICMP6_DST_UNREACH || icmptype == ICMP6_PACKET_TOO_BIG || icmptype == ICMP6_TIME_EXCEEDED || icmptype == ICMP6_PARAM_PROB) state_icmp++; break; #endif /* INET6 */ default: sport = dport = hdrlen = 0; break; } r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); /* check packet for BINAT/NAT/RDR */ if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk, &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) { KASSERT(sk != NULL, ("%s: null sk", __func__)); KASSERT(nk != NULL, ("%s: null nk", __func__)); if (nr->log) { PFLOG_PACKET(kif, m, af, direction, PFRES_MATCH, nr, a, ruleset, pd, 1); } if (pd->ip_sum) bip_sum = *pd->ip_sum; switch (pd->proto) { case IPPROTO_TCP: bproto_sum = th->th_sum; pd->proto_sum = &th->th_sum; if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || nk->port[pd->sidx] != sport) { pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx], nk->port[pd->sidx], 0, af); pd->sport = &th->th_sport; sport = th->th_sport; } if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || nk->port[pd->didx] != dport) { pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum, &th->th_sum, &nk->addr[pd->didx], nk->port[pd->didx], 0, af); dport = th->th_dport; pd->dport = &th->th_dport; } rewrite++; break; case IPPROTO_UDP: bproto_sum = pd->hdr.udp.uh_sum; pd->proto_sum = &pd->hdr.udp.uh_sum; if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || nk->port[pd->sidx] != sport) { pf_change_ap(m, saddr, &pd->hdr.udp.uh_sport, pd->ip_sum, &pd->hdr.udp.uh_sum, &nk->addr[pd->sidx], nk->port[pd->sidx], 1, af); sport = pd->hdr.udp.uh_sport; pd->sport = &pd->hdr.udp.uh_sport; } if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || nk->port[pd->didx] != dport) { pf_change_ap(m, daddr, &pd->hdr.udp.uh_dport, pd->ip_sum, &pd->hdr.udp.uh_sum, &nk->addr[pd->didx], nk->port[pd->didx], 1, af); dport = pd->hdr.udp.uh_dport; pd->dport = &pd->hdr.udp.uh_dport; } rewrite++; break; #ifdef INET case IPPROTO_ICMP: nk->port[0] = nk->port[1]; if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) pf_change_a(&saddr->v4.s_addr, pd->ip_sum, nk->addr[pd->sidx].v4.s_addr, 0); if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) pf_change_a(&daddr->v4.s_addr, pd->ip_sum, nk->addr[pd->didx].v4.s_addr, 0); if (nk->port[1] != pd->hdr.icmp.icmp_id) { pd->hdr.icmp.icmp_cksum = pf_cksum_fixup( pd->hdr.icmp.icmp_cksum, sport, nk->port[1], 0); pd->hdr.icmp.icmp_id = nk->port[1]; pd->sport = &pd->hdr.icmp.icmp_id; } m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp); break; #endif /* INET */ #ifdef INET6 case IPPROTO_ICMPV6: nk->port[0] = nk->port[1]; if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) pf_change_a6(saddr, &pd->hdr.icmp6.icmp6_cksum, &nk->addr[pd->sidx], 0); if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) pf_change_a6(daddr, &pd->hdr.icmp6.icmp6_cksum, &nk->addr[pd->didx], 0); rewrite++; break; #endif /* INET */ default: switch (af) { #ifdef INET case AF_INET: if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) pf_change_a(&saddr->v4.s_addr, pd->ip_sum, nk->addr[pd->sidx].v4.s_addr, 0); if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) pf_change_a(&daddr->v4.s_addr, pd->ip_sum, nk->addr[pd->didx].v4.s_addr, 0); break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) PF_ACPY(saddr, &nk->addr[pd->sidx], af); if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) PF_ACPY(daddr, &nk->addr[pd->didx], af); break; #endif /* INET */ } break; } if (nr->natpass) r = NULL; pd->nat_rule = nr; } + SLIST_INIT(&match_rules); while (r != NULL) { pf_counter_u64_add(&r->evaluations, 1); if (pfi_kkif_match(r->kif, kif) == r->ifnot) r = r->skip[PF_SKIP_IFP].ptr; else if (r->direction && r->direction != direction) r = r->skip[PF_SKIP_DIR].ptr; else if (r->af && r->af != af) r = r->skip[PF_SKIP_AF].ptr; else if (r->proto && r->proto != pd->proto) r = r->skip[PF_SKIP_PROTO].ptr; else if (PF_MISMATCHAW(&r->src.addr, saddr, af, r->src.neg, kif, M_GETFIB(m))) r = r->skip[PF_SKIP_SRC_ADDR].ptr; /* tcp/udp only. port_op always 0 in other cases */ else if (r->src.port_op && !pf_match_port(r->src.port_op, r->src.port[0], r->src.port[1], sport)) r = r->skip[PF_SKIP_SRC_PORT].ptr; else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, r->dst.neg, NULL, M_GETFIB(m))) r = r->skip[PF_SKIP_DST_ADDR].ptr; /* tcp/udp only. port_op always 0 in other cases */ else if (r->dst.port_op && !pf_match_port(r->dst.port_op, r->dst.port[0], r->dst.port[1], dport)) r = r->skip[PF_SKIP_DST_PORT].ptr; /* icmp only. type always 0 in other cases */ else if (r->type && r->type != icmptype + 1) r = TAILQ_NEXT(r, entries); /* icmp only. type always 0 in other cases */ else if (r->code && r->code != icmpcode + 1) r = TAILQ_NEXT(r, entries); else if (r->tos && !(r->tos == pd->tos)) r = TAILQ_NEXT(r, entries); else if (r->rule_flag & PFRULE_FRAGMENT) r = TAILQ_NEXT(r, entries); else if (pd->proto == IPPROTO_TCP && (r->flagset & th->th_flags) != r->flags) r = TAILQ_NEXT(r, entries); /* tcp/udp only. uid.op always 0 in other cases */ else if (r->uid.op && (pd->lookup.done || (pd->lookup.done = pf_socket_lookup(direction, pd, m), 1)) && !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], pd->lookup.uid)) r = TAILQ_NEXT(r, entries); /* tcp/udp only. gid.op always 0 in other cases */ else if (r->gid.op && (pd->lookup.done || (pd->lookup.done = pf_socket_lookup(direction, pd, m), 1)) && !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], pd->lookup.gid)) r = TAILQ_NEXT(r, entries); else if (r->prio && !pf_match_ieee8021q_pcp(r->prio, m)) r = TAILQ_NEXT(r, entries); else if (r->prob && r->prob <= arc4random()) r = TAILQ_NEXT(r, entries); else if (r->match_tag && !pf_match_tag(m, r, &tag, pd->pf_mtag ? pd->pf_mtag->tag : 0)) r = TAILQ_NEXT(r, entries); else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != IPPROTO_TCP || !pf_osfp_match( pf_osfp_fingerprint(pd, m, off, th), r->os_fingerprint))) r = TAILQ_NEXT(r, entries); else { if (r->tag) tag = r->tag; - if (r->rtableid >= 0) - rtableid = r->rtableid; if (r->anchor == NULL) { if (r->action == PF_MATCH) { + ri = malloc(sizeof(struct pf_krule_item), M_PF_RULE_ITEM, M_NOWAIT | M_ZERO); + if (ri == NULL) { + REASON_SET(&reason, PFRES_MEMORY); + goto cleanup; + } + ri->r = r; + SLIST_INSERT_HEAD(&match_rules, ri, entry); pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&r->packets[direction == PF_OUT], 1); pf_counter_u64_add_protected(&r->bytes[direction == PF_OUT], pd->tot_len); pf_counter_u64_critical_exit(); pf_rule_to_actions(r, &pd->act); if (r->log) PFLOG_PACKET(kif, m, af, direction, PFRES_MATCH, r, a, ruleset, pd, 1); } else { match = 1; *rm = r; *am = a; *rsm = ruleset; } if ((*rm)->quick) break; r = TAILQ_NEXT(r, entries); } else pf_step_into_anchor(anchor_stack, &asd, &ruleset, PF_RULESET_FILTER, &r, &a, &match); } if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd, &ruleset, PF_RULESET_FILTER, &r, &a, &match)) break; } r = *rm; a = *am; ruleset = *rsm; REASON_SET(&reason, PFRES_MATCH); /* apply actions for last matching pass/block rule */ pf_rule_to_actions(r, &pd->act); if (r->log) { if (rewrite) m_copyback(m, off, hdrlen, pd->hdr.any); PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd, 1); } if ((r->action == PF_DROP) && ((r->rule_flag & PFRULE_RETURNRST) || (r->rule_flag & PFRULE_RETURNICMP) || (r->rule_flag & PFRULE_RETURN))) { pf_return(r, nr, pd, sk, off, m, th, kif, bproto_sum, - bip_sum, hdrlen, &reason); + bip_sum, hdrlen, &reason, r->rtableid); } if (r->action == PF_DROP) goto cleanup; if (tag > 0 && pf_tag_packet(m, pd, tag)) { REASON_SET(&reason, PFRES_MEMORY); goto cleanup; } - if (rtableid >= 0) - M_SETFIB(m, rtableid); + if (pd->act.rtableid >= 0) + M_SETFIB(m, pd->act.rtableid); if (!state_icmp && (r->keep_state || nr != NULL || (pd->flags & PFDESC_TCP_NORM))) { int action; action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off, sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum, - hdrlen); + hdrlen, &match_rules); if (action != PF_PASS) { if (action == PF_DROP && (r->rule_flag & PFRULE_RETURN)) pf_return(r, nr, pd, sk, off, m, th, kif, - bproto_sum, bip_sum, hdrlen, &reason); + bproto_sum, bip_sum, hdrlen, &reason, + pd->act.rtableid); return (action); } } else { if (sk != NULL) uma_zfree(V_pf_state_key_z, sk); if (nk != NULL) uma_zfree(V_pf_state_key_z, nk); } /* copy back packet headers if we performed NAT operations */ if (rewrite) m_copyback(m, off, hdrlen, pd->hdr.any); if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) && direction == PF_OUT && V_pfsync_defer_ptr != NULL && V_pfsync_defer_ptr(*sm, m)) /* * We want the state created, but we dont * want to send this in case a partner * firewall has to know about it to allow * replies through it. */ return (PF_DEFER); return (PF_PASS); cleanup: + while ((ri = SLIST_FIRST(&match_rules))) { + SLIST_REMOVE_HEAD(&match_rules, entry); + free(ri, M_PF_RULE_ITEM); + } + if (sk != NULL) uma_zfree(V_pf_state_key_z, sk); if (nk != NULL) uma_zfree(V_pf_state_key_z, nk); return (PF_DROP); } static int pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a, struct pf_pdesc *pd, struct pf_ksrc_node *nsn, struct pf_state_key *nk, struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite, struct pfi_kkif *kif, struct pf_kstate **sm, - int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen) + int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen, + struct pf_krule_slist *match_rules) { struct pf_kstate *s = NULL; struct pf_ksrc_node *sn = NULL; struct tcphdr *th = &pd->hdr.tcp; u_int16_t mss = V_tcp_mssdflt; u_short reason; /* check maximums */ if (r->max_states && (counter_u64_fetch(r->states_cur) >= r->max_states)) { counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1); REASON_SET(&reason, PFRES_MAXSTATES); goto csfailed; } /* src node for filter rule */ if ((r->rule_flag & PFRULE_SRCTRACK || r->rpool.opts & PF_POOL_STICKYADDR) && pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) { REASON_SET(&reason, PFRES_SRCLIMIT); goto csfailed; } /* src node for translation rule */ if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) && pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) { REASON_SET(&reason, PFRES_SRCLIMIT); goto csfailed; } s = pf_alloc_state(M_NOWAIT); if (s == NULL) { REASON_SET(&reason, PFRES_MEMORY); goto csfailed; } s->rule.ptr = r; s->nat_rule.ptr = nr; s->anchor.ptr = a; + bcopy(match_rules, &s->match_rules, sizeof(s->match_rules)); STATE_INC_COUNTERS(s); if (r->allow_opts) s->state_flags |= PFSTATE_ALLOWOPTS; if (r->rule_flag & PFRULE_STATESLOPPY) s->state_flags |= PFSTATE_SLOPPY; - s->log = r->log & PF_LOG_ALL; + if (pd->flags & PFDESC_TCP_NORM) /* Set by old-style scrub rules */ + s->state_flags |= PFSTATE_SCRUB_TCP; + s->log = pd->act.log & PF_LOG_ALL; + s->qid = pd->act.qid; + s->pqid = pd->act.pqid; + s->rtableid = pd->act.rtableid; + s->min_ttl = pd->act.min_ttl; + s->set_tos = pd->act.set_tos; + s->max_mss = pd->act.max_mss; s->sync_state = PFSYNC_S_NONE; s->qid = pd->act.qid; s->pqid = pd->act.pqid; s->dnpipe = pd->act.dnpipe; s->dnrpipe = pd->act.dnrpipe; s->state_flags |= pd->act.flags; if (nr != NULL) s->log |= nr->log & PF_LOG_ALL; switch (pd->proto) { case IPPROTO_TCP: s->src.seqlo = ntohl(th->th_seq); s->src.seqhi = s->src.seqlo + pd->p_len + 1; if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && r->keep_state == PF_STATE_MODULATE) { /* Generate sequence number modulator */ if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 0) s->src.seqdiff = 1; pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(s->src.seqlo + s->src.seqdiff), 0); *rewrite = 1; } else s->src.seqdiff = 0; if (th->th_flags & TH_SYN) { s->src.seqhi++; s->src.wscale = pf_get_wscale(m, off, th->th_off, pd->af); } s->src.max_win = MAX(ntohs(th->th_win), 1); if (s->src.wscale & PF_WSCALE_MASK) { /* Remove scale factor from initial window */ int win = s->src.max_win; win += 1 << (s->src.wscale & PF_WSCALE_MASK); s->src.max_win = (win - 1) >> (s->src.wscale & PF_WSCALE_MASK); } if (th->th_flags & TH_FIN) s->src.seqhi++; s->dst.seqhi = 1; s->dst.max_win = 1; pf_set_protostate(s, PF_PEER_SRC, TCPS_SYN_SENT); pf_set_protostate(s, PF_PEER_DST, TCPS_CLOSED); s->timeout = PFTM_TCP_FIRST_PACKET; atomic_add_32(&V_pf_status.states_halfopen, 1); break; case IPPROTO_UDP: pf_set_protostate(s, PF_PEER_SRC, PFUDPS_SINGLE); pf_set_protostate(s, PF_PEER_DST, PFUDPS_NO_TRAFFIC); s->timeout = PFTM_UDP_FIRST_PACKET; break; case IPPROTO_ICMP: #ifdef INET6 case IPPROTO_ICMPV6: #endif s->timeout = PFTM_ICMP_FIRST_PACKET; break; default: pf_set_protostate(s, PF_PEER_SRC, PFOTHERS_SINGLE); pf_set_protostate(s, PF_PEER_DST, PFOTHERS_NO_TRAFFIC); s->timeout = PFTM_OTHER_FIRST_PACKET; } if (r->rt) { if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) { REASON_SET(&reason, PFRES_MAPFAILED); pf_src_tree_remove_state(s); s->timeout = PFTM_UNLINKED; STATE_DEC_COUNTERS(s); pf_free_state(s); goto csfailed; } s->rt_kif = r->rpool.cur->kif; } s->creation = time_uptime; s->expire = time_uptime; if (sn != NULL) s->src_node = sn; if (nsn != NULL) { /* XXX We only modify one side for now. */ PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af); s->nat_src_node = nsn; } if (pd->proto == IPPROTO_TCP) { - if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m, - off, pd, th, &s->src, &s->dst)) { + if (s->state_flags & PFSTATE_SCRUB_TCP && + pf_normalize_tcp_init(m, off, pd, th, &s->src, &s->dst)) { REASON_SET(&reason, PFRES_MEMORY); pf_src_tree_remove_state(s); s->timeout = PFTM_UNLINKED; STATE_DEC_COUNTERS(s); pf_free_state(s); return (PF_DROP); } - if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && + if (s->state_flags & PFSTATE_SCRUB_TCP && s->src.scrub && pf_normalize_tcp_stateful(m, off, pd, &reason, th, s, &s->src, &s->dst, rewrite)) { /* This really shouldn't happen!!! */ DPFPRINTF(PF_DEBUG_URGENT, ("pf_normalize_tcp_stateful failed on first " "pkt\n")); pf_src_tree_remove_state(s); s->timeout = PFTM_UNLINKED; STATE_DEC_COUNTERS(s); pf_free_state(s); return (PF_DROP); } } s->direction = pd->dir; /* * sk/nk could already been setup by pf_get_translation(). */ if (nr == NULL) { KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p", __func__, nr, sk, nk)); sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport); if (sk == NULL) goto csfailed; nk = sk; } else KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p", __func__, nr, sk, nk)); /* Swap sk/nk for PF_OUT. */ if (pf_state_insert(BOUND_IFACE(r, kif), kif, (pd->dir == PF_IN) ? sk : nk, (pd->dir == PF_IN) ? nk : sk, s)) { REASON_SET(&reason, PFRES_STATEINS); pf_src_tree_remove_state(s); s->timeout = PFTM_UNLINKED; STATE_DEC_COUNTERS(s); pf_free_state(s); return (PF_DROP); } else *sm = s; if (tag > 0) s->tag = tag; if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_SRC); /* undo NAT changes, if they have taken place */ if (nr != NULL) { struct pf_state_key *skt = s->key[PF_SK_WIRE]; if (pd->dir == PF_OUT) skt = s->key[PF_SK_STACK]; PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); if (pd->sport) *pd->sport = skt->port[pd->sidx]; if (pd->dport) *pd->dport = skt->port[pd->didx]; if (pd->proto_sum) *pd->proto_sum = bproto_sum; if (pd->ip_sum) *pd->ip_sum = bip_sum; m_copyback(m, off, hdrlen, pd->hdr.any); } s->src.seqhi = htonl(arc4random()); /* Find mss option */ int rtid = M_GETFIB(m); mss = pf_get_mss(m, off, th->th_off, pd->af); mss = pf_calc_mss(pd->src, pd->af, rtid, mss); mss = pf_calc_mss(pd->dst, pd->af, rtid, mss); s->src.mss = mss; pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, - TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0); + TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, pd->act.rtableid); REASON_SET(&reason, PFRES_SYNPROXY); return (PF_SYNPROXY_DROP); } return (PF_PASS); csfailed: if (sk != NULL) uma_zfree(V_pf_state_key_z, sk); if (nk != NULL) uma_zfree(V_pf_state_key_z, nk); if (sn != NULL) { struct pf_srchash *sh; sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)]; PF_HASHROW_LOCK(sh); if (--sn->states == 0 && sn->expire == 0) { pf_unlink_src_node(sn); uma_zfree(V_pf_sources_z, sn); counter_u64_add( V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1); } PF_HASHROW_UNLOCK(sh); } if (nsn != sn && nsn != NULL) { struct pf_srchash *sh; sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)]; PF_HASHROW_LOCK(sh); if (--nsn->states == 0 && nsn->expire == 0) { pf_unlink_src_node(nsn); uma_zfree(V_pf_sources_z, nsn); counter_u64_add( V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1); } PF_HASHROW_UNLOCK(sh); } return (PF_DROP); } static int pf_test_fragment(struct pf_krule **rm, int direction, struct pfi_kkif *kif, struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_krule **am, struct pf_kruleset **rsm) { struct pf_krule *r, *a = NULL; struct pf_kruleset *ruleset = NULL; + struct pf_krule_slist match_rules; + struct pf_krule_item *ri; sa_family_t af = pd->af; u_short reason; int tag = -1; int asd = 0; int match = 0; struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; PF_RULES_RASSERT(); r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); + SLIST_INIT(&match_rules); while (r != NULL) { pf_counter_u64_add(&r->evaluations, 1); if (pfi_kkif_match(r->kif, kif) == r->ifnot) r = r->skip[PF_SKIP_IFP].ptr; else if (r->direction && r->direction != direction) r = r->skip[PF_SKIP_DIR].ptr; else if (r->af && r->af != af) r = r->skip[PF_SKIP_AF].ptr; else if (r->proto && r->proto != pd->proto) r = r->skip[PF_SKIP_PROTO].ptr; else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.neg, kif, M_GETFIB(m))) r = r->skip[PF_SKIP_SRC_ADDR].ptr; else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.neg, NULL, M_GETFIB(m))) r = r->skip[PF_SKIP_DST_ADDR].ptr; else if (r->tos && !(r->tos == pd->tos)) r = TAILQ_NEXT(r, entries); else if (r->os_fingerprint != PF_OSFP_ANY) r = TAILQ_NEXT(r, entries); else if (pd->proto == IPPROTO_UDP && (r->src.port_op || r->dst.port_op)) r = TAILQ_NEXT(r, entries); else if (pd->proto == IPPROTO_TCP && (r->src.port_op || r->dst.port_op || r->flagset)) r = TAILQ_NEXT(r, entries); else if ((pd->proto == IPPROTO_ICMP || pd->proto == IPPROTO_ICMPV6) && (r->type || r->code)) r = TAILQ_NEXT(r, entries); else if (r->prio && !pf_match_ieee8021q_pcp(r->prio, m)) r = TAILQ_NEXT(r, entries); else if (r->prob && r->prob <= (arc4random() % (UINT_MAX - 1) + 1)) r = TAILQ_NEXT(r, entries); else if (r->match_tag && !pf_match_tag(m, r, &tag, pd->pf_mtag ? pd->pf_mtag->tag : 0)) r = TAILQ_NEXT(r, entries); else { if (r->anchor == NULL) { if (r->action == PF_MATCH) { + ri = malloc(sizeof(struct pf_krule_item), M_PF_RULE_ITEM, M_NOWAIT | M_ZERO); + if (ri == NULL) { + REASON_SET(&reason, PFRES_MEMORY); + goto cleanup; + } + ri->r = r; + SLIST_INSERT_HEAD(&match_rules, ri, entry); pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&r->packets[direction == PF_OUT], 1); pf_counter_u64_add_protected(&r->bytes[direction == PF_OUT], pd->tot_len); pf_counter_u64_critical_exit(); pf_rule_to_actions(r, &pd->act); if (r->log) PFLOG_PACKET(kif, m, af, direction, PFRES_MATCH, r, a, ruleset, pd, 1); } else { match = 1; *rm = r; *am = a; *rsm = ruleset; } if ((*rm)->quick) break; r = TAILQ_NEXT(r, entries); } else pf_step_into_anchor(anchor_stack, &asd, &ruleset, PF_RULESET_FILTER, &r, &a, &match); } if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd, &ruleset, PF_RULESET_FILTER, &r, &a, &match)) break; } r = *rm; a = *am; ruleset = *rsm; REASON_SET(&reason, PFRES_MATCH); /* apply actions for last matching pass/block rule */ pf_rule_to_actions(r, &pd->act); if (r->log) - PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd, - 1); + PFLOG_PACKET(kif, m, af, direction, reason, r, a, + ruleset, pd, 1); if (r->action != PF_PASS) return (PF_DROP); if (tag > 0 && pf_tag_packet(m, pd, tag)) { REASON_SET(&reason, PFRES_MEMORY); - return (PF_DROP); + goto cleanup; } return (PF_PASS); + +cleanup: + while ((ri = SLIST_FIRST(&match_rules))) { + SLIST_REMOVE_HEAD(&match_rules, entry); + free(ri, M_PF_RULE_ITEM); + } + + return (PF_DROP); } static int pf_tcp_track_full(struct pf_kstate **state, struct pfi_kkif *kif, struct mbuf *m, int off, struct pf_pdesc *pd, u_short *reason, int *copyback) { struct tcphdr *th = &pd->hdr.tcp; struct pf_state_peer *src, *dst; u_int16_t win = ntohs(th->th_win); u_int32_t ack, end, seq, orig_seq; u_int8_t sws, dws, psrc, pdst; int ackskew; if (pd->dir == (*state)->direction) { src = &(*state)->src; dst = &(*state)->dst; psrc = PF_PEER_SRC; pdst = PF_PEER_DST; } else { src = &(*state)->dst; dst = &(*state)->src; psrc = PF_PEER_DST; pdst = PF_PEER_SRC; } if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) { sws = src->wscale & PF_WSCALE_MASK; dws = dst->wscale & PF_WSCALE_MASK; } else sws = dws = 0; /* * Sequence tracking algorithm from Guido van Rooij's paper: * http://www.madison-gurkha.com/publications/tcp_filtering/ * tcp_filtering.ps */ orig_seq = seq = ntohl(th->th_seq); if (src->seqlo == 0) { /* First packet from this end. Set its state */ - if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) && + if (((*state)->state_flags & PFSTATE_SCRUB_TCP || dst->scrub) && src->scrub == NULL) { if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) { REASON_SET(reason, PFRES_MEMORY); return (PF_DROP); } } /* Deferred generation of sequence number modulator */ if (dst->seqdiff && !src->seqdiff) { /* use random iss for the TCP server */ while ((src->seqdiff = arc4random() - seq) == 0) ; ack = ntohl(th->th_ack) - dst->seqdiff; pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq + src->seqdiff), 0); pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0); *copyback = 1; } else { ack = ntohl(th->th_ack); } end = seq + pd->p_len; if (th->th_flags & TH_SYN) { end++; if (dst->wscale & PF_WSCALE_FLAG) { src->wscale = pf_get_wscale(m, off, th->th_off, pd->af); if (src->wscale & PF_WSCALE_FLAG) { /* Remove scale factor from initial * window */ sws = src->wscale & PF_WSCALE_MASK; win = ((u_int32_t)win + (1 << sws) - 1) >> sws; dws = dst->wscale & PF_WSCALE_MASK; } else { /* fixup other window */ dst->max_win <<= dst->wscale & PF_WSCALE_MASK; /* in case of a retrans SYN|ACK */ dst->wscale = 0; } } } if (th->th_flags & TH_FIN) end++; src->seqlo = seq; if (src->state < TCPS_SYN_SENT) pf_set_protostate(*state, psrc, TCPS_SYN_SENT); /* * May need to slide the window (seqhi may have been set by * the crappy stack check or if we picked up the connection * after establishment) */ if (src->seqhi == 1 || SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) src->seqhi = end + MAX(1, dst->max_win << dws); if (win > src->max_win) src->max_win = win; } else { ack = ntohl(th->th_ack) - dst->seqdiff; if (src->seqdiff) { /* Modulate sequence numbers */ pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq + src->seqdiff), 0); pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0); *copyback = 1; } end = seq + pd->p_len; if (th->th_flags & TH_SYN) end++; if (th->th_flags & TH_FIN) end++; } if ((th->th_flags & TH_ACK) == 0) { /* Let it pass through the ack skew check */ ack = dst->seqlo; } else if ((ack == 0 && (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || /* broken tcp stacks do not set ack */ (dst->state < TCPS_SYN_SENT)) { /* * Many stacks (ours included) will set the ACK number in an * FIN|ACK if the SYN times out -- no sequence to ACK. */ ack = dst->seqlo; } if (seq == end) { /* Ease sequencing restrictions on no data packets */ seq = src->seqlo; end = seq; } ackskew = dst->seqlo - ack; /* * Need to demodulate the sequence numbers in any TCP SACK options * (Selective ACK). We could optionally validate the SACK values * against the current ACK window, either forwards or backwards, but * I'm not confident that SACK has been implemented properly * everywhere. It wouldn't surprise me if several stacks accidentally * SACK too far backwards of previously ACKed data. There really aren't * any security implications of bad SACKing unless the target stack * doesn't validate the option length correctly. Someone trying to * spoof into a TCP connection won't bother blindly sending SACK * options anyway. */ if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { if (pf_modulate_sack(m, off, pd, th, dst)) *copyback = 1; } #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ if (SEQ_GEQ(src->seqhi, end) && /* Last octet inside other's window space */ SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && /* Retrans: not more than one window back */ (ackskew >= -MAXACKWINDOW) && /* Acking not more than one reassembled fragment backwards */ (ackskew <= (MAXACKWINDOW << sws)) && /* Acking not more than one window forward */ ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || (pd->flags & PFDESC_IP_REAS) == 0)) { /* Require an exact/+1 sequence match on resets when possible */ if (dst->scrub || src->scrub) { if (pf_normalize_tcp_stateful(m, off, pd, reason, th, *state, src, dst, copyback)) return (PF_DROP); } /* update max window */ if (src->max_win < win) src->max_win = win; /* synchronize sequencing */ if (SEQ_GT(end, src->seqlo)) src->seqlo = end; /* slide the window of what the other end can send */ if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) dst->seqhi = ack + MAX((win << sws), 1); /* update states */ if (th->th_flags & TH_SYN) if (src->state < TCPS_SYN_SENT) pf_set_protostate(*state, psrc, TCPS_SYN_SENT); if (th->th_flags & TH_FIN) if (src->state < TCPS_CLOSING) pf_set_protostate(*state, psrc, TCPS_CLOSING); if (th->th_flags & TH_ACK) { if (dst->state == TCPS_SYN_SENT) { pf_set_protostate(*state, pdst, TCPS_ESTABLISHED); if (src->state == TCPS_ESTABLISHED && (*state)->src_node != NULL && pf_src_connlimit(state)) { REASON_SET(reason, PFRES_SRCLIMIT); return (PF_DROP); } } else if (dst->state == TCPS_CLOSING) pf_set_protostate(*state, pdst, TCPS_FIN_WAIT_2); } if (th->th_flags & TH_RST) pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT); /* update expire time */ (*state)->expire = time_uptime; if (src->state >= TCPS_FIN_WAIT_2 && dst->state >= TCPS_FIN_WAIT_2) (*state)->timeout = PFTM_TCP_CLOSED; else if (src->state >= TCPS_CLOSING && dst->state >= TCPS_CLOSING) (*state)->timeout = PFTM_TCP_FIN_WAIT; else if (src->state < TCPS_ESTABLISHED || dst->state < TCPS_ESTABLISHED) (*state)->timeout = PFTM_TCP_OPENING; else if (src->state >= TCPS_CLOSING || dst->state >= TCPS_CLOSING) (*state)->timeout = PFTM_TCP_CLOSING; else (*state)->timeout = PFTM_TCP_ESTABLISHED; /* Fall through to PASS packet */ } else if ((dst->state < TCPS_SYN_SENT || dst->state >= TCPS_FIN_WAIT_2 || src->state >= TCPS_FIN_WAIT_2) && SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && /* Within a window forward of the originating packet */ SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { /* Within a window backward of the originating packet */ /* * This currently handles three situations: * 1) Stupid stacks will shotgun SYNs before their peer * replies. * 2) When PF catches an already established stream (the * firewall rebooted, the state table was flushed, routes * changed...) * 3) Packets get funky immediately after the connection * closes (this should catch Solaris spurious ACK|FINs * that web servers like to spew after a close) * * This must be a little more careful than the above code * since packet floods will also be caught here. We don't * update the TTL here to mitigate the damage of a packet * flood and so the same code can handle awkward establishment * and a loosened connection close. * In the establishment case, a correct peer response will * validate the connection, go through the normal state code * and keep updating the state TTL. */ if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: loose state match: "); pf_print_state(*state); pf_print_flags(th->th_flags); printf(" seq=%u (%u) ack=%u len=%u ackskew=%d " "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, ackskew, (unsigned long long)(*state)->packets[0], (unsigned long long)(*state)->packets[1], pd->dir == PF_IN ? "in" : "out", pd->dir == (*state)->direction ? "fwd" : "rev"); } if (dst->scrub || src->scrub) { if (pf_normalize_tcp_stateful(m, off, pd, reason, th, *state, src, dst, copyback)) return (PF_DROP); } /* update max window */ if (src->max_win < win) src->max_win = win; /* synchronize sequencing */ if (SEQ_GT(end, src->seqlo)) src->seqlo = end; /* slide the window of what the other end can send */ if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) dst->seqhi = ack + MAX((win << sws), 1); /* * Cannot set dst->seqhi here since this could be a shotgunned * SYN and not an already established connection. */ if (th->th_flags & TH_FIN) if (src->state < TCPS_CLOSING) pf_set_protostate(*state, psrc, TCPS_CLOSING); if (th->th_flags & TH_RST) pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT); /* Fall through to PASS packet */ } else { if ((*state)->dst.state == TCPS_SYN_SENT && (*state)->src.state == TCPS_SYN_SENT) { /* Send RST for state mismatches during handshake */ if (!(th->th_flags & TH_RST)) pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, ntohl(th->th_ack), 0, TH_RST, 0, 0, - (*state)->rule.ptr->return_ttl, 1, 0); + (*state)->rule.ptr->return_ttl, 1, 0, + (*state)->rtableid); src->seqlo = 0; src->seqhi = 1; src->max_win = 1; } else if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: BAD state: "); pf_print_state(*state); pf_print_flags(th->th_flags); printf(" seq=%u (%u) ack=%u len=%u ackskew=%d " "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, ackskew, (unsigned long long)(*state)->packets[0], (unsigned long long)(*state)->packets[1], pd->dir == PF_IN ? "in" : "out", pd->dir == (*state)->direction ? "fwd" : "rev"); printf("pf: State failure on: %c %c %c %c | %c %c\n", SEQ_GEQ(src->seqhi, end) ? ' ' : '1', SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? ' ': '2', (ackskew >= -MAXACKWINDOW) ? ' ' : '3', (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); } REASON_SET(reason, PFRES_BADSTATE); return (PF_DROP); } return (PF_PASS); } static int pf_tcp_track_sloppy(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason) { struct tcphdr *th = &pd->hdr.tcp; struct pf_state_peer *src, *dst; u_int8_t psrc, pdst; if (pd->dir == (*state)->direction) { src = &(*state)->src; dst = &(*state)->dst; psrc = PF_PEER_SRC; pdst = PF_PEER_DST; } else { src = &(*state)->dst; dst = &(*state)->src; psrc = PF_PEER_DST; pdst = PF_PEER_SRC; } if (th->th_flags & TH_SYN) if (src->state < TCPS_SYN_SENT) pf_set_protostate(*state, psrc, TCPS_SYN_SENT); if (th->th_flags & TH_FIN) if (src->state < TCPS_CLOSING) pf_set_protostate(*state, psrc, TCPS_CLOSING); if (th->th_flags & TH_ACK) { if (dst->state == TCPS_SYN_SENT) { pf_set_protostate(*state, pdst, TCPS_ESTABLISHED); if (src->state == TCPS_ESTABLISHED && (*state)->src_node != NULL && pf_src_connlimit(state)) { REASON_SET(reason, PFRES_SRCLIMIT); return (PF_DROP); } } else if (dst->state == TCPS_CLOSING) { pf_set_protostate(*state, pdst, TCPS_FIN_WAIT_2); } else if (src->state == TCPS_SYN_SENT && dst->state < TCPS_SYN_SENT) { /* * Handle a special sloppy case where we only see one * half of the connection. If there is a ACK after * the initial SYN without ever seeing a packet from * the destination, set the connection to established. */ pf_set_protostate(*state, PF_PEER_BOTH, TCPS_ESTABLISHED); dst->state = src->state = TCPS_ESTABLISHED; if ((*state)->src_node != NULL && pf_src_connlimit(state)) { REASON_SET(reason, PFRES_SRCLIMIT); return (PF_DROP); } } else if (src->state == TCPS_CLOSING && dst->state == TCPS_ESTABLISHED && dst->seqlo == 0) { /* * Handle the closing of half connections where we * don't see the full bidirectional FIN/ACK+ACK * handshake. */ pf_set_protostate(*state, pdst, TCPS_CLOSING); } } if (th->th_flags & TH_RST) pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT); /* update expire time */ (*state)->expire = time_uptime; if (src->state >= TCPS_FIN_WAIT_2 && dst->state >= TCPS_FIN_WAIT_2) (*state)->timeout = PFTM_TCP_CLOSED; else if (src->state >= TCPS_CLOSING && dst->state >= TCPS_CLOSING) (*state)->timeout = PFTM_TCP_FIN_WAIT; else if (src->state < TCPS_ESTABLISHED || dst->state < TCPS_ESTABLISHED) (*state)->timeout = PFTM_TCP_OPENING; else if (src->state >= TCPS_CLOSING || dst->state >= TCPS_CLOSING) (*state)->timeout = PFTM_TCP_CLOSING; else (*state)->timeout = PFTM_TCP_ESTABLISHED; return (PF_PASS); } static int pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason) { struct pf_state_key *sk = (*state)->key[pd->didx]; struct tcphdr *th = &pd->hdr.tcp; if ((*state)->src.state == PF_TCPS_PROXY_SRC) { if (pd->dir != (*state)->direction) { REASON_SET(reason, PFRES_SYNPROXY); return (PF_SYNPROXY_DROP); } if (th->th_flags & TH_SYN) { if (ntohl(th->th_seq) != (*state)->src.seqlo) { REASON_SET(reason, PFRES_SYNPROXY); return (PF_DROP); } pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, (*state)->src.seqhi, ntohl(th->th_seq) + 1, - TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0); + TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, + (*state)->rtableid); REASON_SET(reason, PFRES_SYNPROXY); return (PF_SYNPROXY_DROP); } else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK || (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { REASON_SET(reason, PFRES_SYNPROXY); return (PF_DROP); } else if ((*state)->src_node != NULL && pf_src_connlimit(state)) { REASON_SET(reason, PFRES_SRCLIMIT); return (PF_DROP); } else pf_set_protostate(*state, PF_PEER_SRC, PF_TCPS_PROXY_DST); } if ((*state)->src.state == PF_TCPS_PROXY_DST) { if (pd->dir == (*state)->direction) { if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { REASON_SET(reason, PFRES_SYNPROXY); return (PF_DROP); } (*state)->src.max_win = MAX(ntohs(th->th_win), 1); if ((*state)->dst.seqhi == 1) (*state)->dst.seqhi = htonl(arc4random()); pf_send_tcp((*state)->rule.ptr, pd->af, &sk->addr[pd->sidx], &sk->addr[pd->didx], sk->port[pd->sidx], sk->port[pd->didx], (*state)->dst.seqhi, 0, TH_SYN, 0, - (*state)->src.mss, 0, 0, (*state)->tag); + (*state)->src.mss, 0, 0, (*state)->tag, + (*state)->rtableid); REASON_SET(reason, PFRES_SYNPROXY); return (PF_SYNPROXY_DROP); } else if (((th->th_flags & (TH_SYN|TH_ACK)) != (TH_SYN|TH_ACK)) || (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { REASON_SET(reason, PFRES_SYNPROXY); return (PF_DROP); } else { (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); (*state)->dst.seqlo = ntohl(th->th_seq); pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, ntohl(th->th_ack), ntohl(th->th_seq) + 1, TH_ACK, (*state)->src.max_win, 0, 0, 0, - (*state)->tag); + (*state)->tag, (*state)->rtableid); pf_send_tcp((*state)->rule.ptr, pd->af, &sk->addr[pd->sidx], &sk->addr[pd->didx], sk->port[pd->sidx], sk->port[pd->didx], (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, - TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0); + TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, + (*state)->rtableid); (*state)->src.seqdiff = (*state)->dst.seqhi - (*state)->src.seqlo; (*state)->dst.seqdiff = (*state)->src.seqhi - (*state)->dst.seqlo; (*state)->src.seqhi = (*state)->src.seqlo + (*state)->dst.max_win; (*state)->dst.seqhi = (*state)->dst.seqlo + (*state)->src.max_win; (*state)->src.wscale = (*state)->dst.wscale = 0; pf_set_protostate(*state, PF_PEER_BOTH, TCPS_ESTABLISHED); REASON_SET(reason, PFRES_SYNPROXY); return (PF_SYNPROXY_DROP); } } return (PF_PASS); } static int pf_test_state_tcp(struct pf_kstate **state, int direction, struct pfi_kkif *kif, struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason) { struct pf_state_key_cmp key; struct tcphdr *th = &pd->hdr.tcp; int copyback = 0; int action; struct pf_state_peer *src, *dst; bzero(&key, sizeof(key)); key.af = pd->af; key.proto = IPPROTO_TCP; if (direction == PF_IN) { /* wire side, straight */ PF_ACPY(&key.addr[0], pd->src, key.af); PF_ACPY(&key.addr[1], pd->dst, key.af); key.port[0] = th->th_sport; key.port[1] = th->th_dport; } else { /* stack side, reverse */ PF_ACPY(&key.addr[1], pd->src, key.af); PF_ACPY(&key.addr[0], pd->dst, key.af); key.port[1] = th->th_sport; key.port[0] = th->th_dport; } STATE_LOOKUP(kif, &key, direction, *state, pd); if (direction == (*state)->direction) { src = &(*state)->src; dst = &(*state)->dst; } else { src = &(*state)->dst; dst = &(*state)->src; } if ((action = pf_synproxy(pd, state, reason)) != PF_PASS) return (action); if (dst->state >= TCPS_FIN_WAIT_2 && src->state >= TCPS_FIN_WAIT_2 && (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) || ((th->th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_ACK && pf_syncookie_check(pd) && pd->dir == PF_IN))) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: state reuse "); pf_print_state(*state); pf_print_flags(th->th_flags); printf("\n"); } /* XXX make sure it's the same direction ?? */ pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED); pf_unlink_state(*state); *state = NULL; return (PF_DROP); } if ((*state)->state_flags & PFSTATE_SLOPPY) { if (pf_tcp_track_sloppy(state, pd, reason) == PF_DROP) return (PF_DROP); } else { if (pf_tcp_track_full(state, kif, m, off, pd, reason, ©back) == PF_DROP) return (PF_DROP); } /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk = (*state)->key[pd->didx]; if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || nk->port[pd->sidx] != th->th_sport) pf_change_ap(m, pd->src, &th->th_sport, pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx], nk->port[pd->sidx], 0, pd->af); if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || nk->port[pd->didx] != th->th_dport) pf_change_ap(m, pd->dst, &th->th_dport, pd->ip_sum, &th->th_sum, &nk->addr[pd->didx], nk->port[pd->didx], 0, pd->af); copyback = 1; } /* Copyback sequence modulation or stateful scrub changes if needed */ if (copyback) m_copyback(m, off, sizeof(*th), (caddr_t)th); return (PF_PASS); } static int pf_test_state_udp(struct pf_kstate **state, int direction, struct pfi_kkif *kif, struct mbuf *m, int off, void *h, struct pf_pdesc *pd) { struct pf_state_peer *src, *dst; struct pf_state_key_cmp key; struct udphdr *uh = &pd->hdr.udp; uint8_t psrc, pdst; bzero(&key, sizeof(key)); key.af = pd->af; key.proto = IPPROTO_UDP; if (direction == PF_IN) { /* wire side, straight */ PF_ACPY(&key.addr[0], pd->src, key.af); PF_ACPY(&key.addr[1], pd->dst, key.af); key.port[0] = uh->uh_sport; key.port[1] = uh->uh_dport; } else { /* stack side, reverse */ PF_ACPY(&key.addr[1], pd->src, key.af); PF_ACPY(&key.addr[0], pd->dst, key.af); key.port[1] = uh->uh_sport; key.port[0] = uh->uh_dport; } STATE_LOOKUP(kif, &key, direction, *state, pd); if (direction == (*state)->direction) { src = &(*state)->src; dst = &(*state)->dst; psrc = PF_PEER_SRC; pdst = PF_PEER_DST; } else { src = &(*state)->dst; dst = &(*state)->src; psrc = PF_PEER_DST; pdst = PF_PEER_SRC; } /* update states */ if (src->state < PFUDPS_SINGLE) pf_set_protostate(*state, psrc, PFUDPS_SINGLE); if (dst->state == PFUDPS_SINGLE) pf_set_protostate(*state, pdst, PFUDPS_MULTIPLE); /* update expire time */ (*state)->expire = time_uptime; if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) (*state)->timeout = PFTM_UDP_MULTIPLE; else (*state)->timeout = PFTM_UDP_SINGLE; /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk = (*state)->key[pd->didx]; if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || nk->port[pd->sidx] != uh->uh_sport) pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum, &uh->uh_sum, &nk->addr[pd->sidx], nk->port[pd->sidx], 1, pd->af); if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || nk->port[pd->didx] != uh->uh_dport) pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum, &uh->uh_sum, &nk->addr[pd->didx], nk->port[pd->didx], 1, pd->af); m_copyback(m, off, sizeof(*uh), (caddr_t)uh); } return (PF_PASS); } static int pf_test_state_icmp(struct pf_kstate **state, int direction, struct pfi_kkif *kif, struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason) { struct pf_addr *saddr = pd->src, *daddr = pd->dst; u_int16_t icmpid = 0, *icmpsum; u_int8_t icmptype, icmpcode; int state_icmp = 0; struct pf_state_key_cmp key; bzero(&key, sizeof(key)); switch (pd->proto) { #ifdef INET case IPPROTO_ICMP: icmptype = pd->hdr.icmp.icmp_type; icmpcode = pd->hdr.icmp.icmp_code; icmpid = pd->hdr.icmp.icmp_id; icmpsum = &pd->hdr.icmp.icmp_cksum; if (icmptype == ICMP_UNREACH || icmptype == ICMP_SOURCEQUENCH || icmptype == ICMP_REDIRECT || icmptype == ICMP_TIMXCEED || icmptype == ICMP_PARAMPROB) state_icmp++; break; #endif /* INET */ #ifdef INET6 case IPPROTO_ICMPV6: icmptype = pd->hdr.icmp6.icmp6_type; icmpcode = pd->hdr.icmp6.icmp6_code; icmpid = pd->hdr.icmp6.icmp6_id; icmpsum = &pd->hdr.icmp6.icmp6_cksum; if (icmptype == ICMP6_DST_UNREACH || icmptype == ICMP6_PACKET_TOO_BIG || icmptype == ICMP6_TIME_EXCEEDED || icmptype == ICMP6_PARAM_PROB) state_icmp++; break; #endif /* INET6 */ } if (!state_icmp) { /* * ICMP query/reply message not related to a TCP/UDP packet. * Search for an ICMP state. */ key.af = pd->af; key.proto = pd->proto; key.port[0] = key.port[1] = icmpid; if (direction == PF_IN) { /* wire side, straight */ PF_ACPY(&key.addr[0], pd->src, key.af); PF_ACPY(&key.addr[1], pd->dst, key.af); } else { /* stack side, reverse */ PF_ACPY(&key.addr[1], pd->src, key.af); PF_ACPY(&key.addr[0], pd->dst, key.af); } STATE_LOOKUP(kif, &key, direction, *state, pd); (*state)->expire = time_uptime; (*state)->timeout = PFTM_ICMP_ERROR_REPLY; /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk = (*state)->key[pd->didx]; switch (pd->af) { #ifdef INET case AF_INET: if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) pf_change_a(&saddr->v4.s_addr, pd->ip_sum, nk->addr[pd->sidx].v4.s_addr, 0); if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) pf_change_a(&daddr->v4.s_addr, pd->ip_sum, nk->addr[pd->didx].v4.s_addr, 0); if (nk->port[0] != pd->hdr.icmp.icmp_id) { pd->hdr.icmp.icmp_cksum = pf_cksum_fixup( pd->hdr.icmp.icmp_cksum, icmpid, nk->port[pd->sidx], 0); pd->hdr.icmp.icmp_id = nk->port[pd->sidx]; } m_copyback(m, off, ICMP_MINLEN, (caddr_t )&pd->hdr.icmp); break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET6)) pf_change_a6(saddr, &pd->hdr.icmp6.icmp6_cksum, &nk->addr[pd->sidx], 0); if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET6)) pf_change_a6(daddr, &pd->hdr.icmp6.icmp6_cksum, &nk->addr[pd->didx], 0); m_copyback(m, off, sizeof(struct icmp6_hdr), (caddr_t )&pd->hdr.icmp6); break; #endif /* INET6 */ } } return (PF_PASS); } else { /* * ICMP error message in response to a TCP/UDP packet. * Extract the inner TCP/UDP header and search for that state. */ struct pf_pdesc pd2; bzero(&pd2, sizeof pd2); #ifdef INET struct ip h2; #endif /* INET */ #ifdef INET6 struct ip6_hdr h2_6; int terminal = 0; #endif /* INET6 */ int ipoff2 = 0; int off2 = 0; pd2.af = pd->af; /* Payload packet is from the opposite direction. */ pd2.sidx = (direction == PF_IN) ? 1 : 0; pd2.didx = (direction == PF_IN) ? 0 : 1; switch (pd->af) { #ifdef INET case AF_INET: /* offset of h2 in mbuf chain */ ipoff2 = off + ICMP_MINLEN; if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(ip)\n")); return (PF_DROP); } /* * ICMP error messages don't refer to non-first * fragments */ if (h2.ip_off & htons(IP_OFFMASK)) { REASON_SET(reason, PFRES_FRAG); return (PF_DROP); } /* offset of protocol header that follows h2 */ off2 = ipoff2 + (h2.ip_hl << 2); pd2.proto = h2.ip_p; pd2.src = (struct pf_addr *)&h2.ip_src; pd2.dst = (struct pf_addr *)&h2.ip_dst; pd2.ip_sum = &h2.ip_sum; break; #endif /* INET */ #ifdef INET6 case AF_INET6: ipoff2 = off + sizeof(struct icmp6_hdr); if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(ip6)\n")); return (PF_DROP); } pd2.proto = h2_6.ip6_nxt; pd2.src = (struct pf_addr *)&h2_6.ip6_src; pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; pd2.ip_sum = NULL; off2 = ipoff2 + sizeof(h2_6); do { switch (pd2.proto) { case IPPROTO_FRAGMENT: /* * ICMPv6 error messages for * non-first fragments */ REASON_SET(reason, PFRES_FRAG); return (PF_DROP); case IPPROTO_AH: case IPPROTO_HOPOPTS: case IPPROTO_ROUTING: case IPPROTO_DSTOPTS: { /* get next header and header length */ struct ip6_ext opt6; if (!pf_pull_hdr(m, off2, &opt6, sizeof(opt6), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMPv6 short opt\n")); return (PF_DROP); } if (pd2.proto == IPPROTO_AH) off2 += (opt6.ip6e_len + 2) * 4; else off2 += (opt6.ip6e_len + 1) * 8; pd2.proto = opt6.ip6e_nxt; /* goto the next header */ break; } default: terminal++; break; } } while (!terminal); break; #endif /* INET6 */ } if (PF_ANEQ(pd->dst, pd2.src, pd->af)) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: BAD ICMP %d:%d outer dst: ", icmptype, icmpcode); pf_print_host(pd->src, 0, pd->af); printf(" -> "); pf_print_host(pd->dst, 0, pd->af); printf(" inner src: "); pf_print_host(pd2.src, 0, pd2.af); printf(" -> "); pf_print_host(pd2.dst, 0, pd2.af); printf("\n"); } REASON_SET(reason, PFRES_BADSTATE); return (PF_DROP); } switch (pd2.proto) { case IPPROTO_TCP: { struct tcphdr th; u_int32_t seq; struct pf_state_peer *src, *dst; u_int8_t dws; int copyback = 0; /* * Only the first 8 bytes of the TCP header can be * expected. Don't access any TCP header fields after * th_seq, an ackskew test is not possible. */ if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(tcp)\n")); return (PF_DROP); } key.af = pd2.af; key.proto = IPPROTO_TCP; PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); key.port[pd2.sidx] = th.th_sport; key.port[pd2.didx] = th.th_dport; STATE_LOOKUP(kif, &key, direction, *state, pd); if (direction == (*state)->direction) { src = &(*state)->dst; dst = &(*state)->src; } else { src = &(*state)->src; dst = &(*state)->dst; } if (src->wscale && dst->wscale) dws = dst->wscale & PF_WSCALE_MASK; else dws = 0; /* Demodulate sequence number */ seq = ntohl(th.th_seq) - src->seqdiff; if (src->seqdiff) { pf_change_a(&th.th_seq, icmpsum, htonl(seq), 0); copyback = 1; } if (!((*state)->state_flags & PFSTATE_SLOPPY) && (!SEQ_GEQ(src->seqhi, seq) || !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: BAD ICMP %d:%d ", icmptype, icmpcode); pf_print_host(pd->src, 0, pd->af); printf(" -> "); pf_print_host(pd->dst, 0, pd->af); printf(" state: "); pf_print_state(*state); printf(" seq=%u\n", seq); } REASON_SET(reason, PFRES_BADSTATE); return (PF_DROP); } else { if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: OK ICMP %d:%d ", icmptype, icmpcode); pf_print_host(pd->src, 0, pd->af); printf(" -> "); pf_print_host(pd->dst, 0, pd->af); printf(" state: "); pf_print_state(*state); printf(" seq=%u\n", seq); } } /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk = (*state)->key[pd->didx]; if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af) || nk->port[pd2.sidx] != th.th_sport) pf_change_icmp(pd2.src, &th.th_sport, daddr, &nk->addr[pd2.sidx], nk->port[pd2.sidx], NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, pd2.af); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af) || nk->port[pd2.didx] != th.th_dport) pf_change_icmp(pd2.dst, &th.th_dport, saddr, &nk->addr[pd2.didx], nk->port[pd2.didx], NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, pd2.af); copyback = 1; } if (copyback) { switch (pd2.af) { #ifdef INET case AF_INET: m_copyback(m, off, ICMP_MINLEN, (caddr_t )&pd->hdr.icmp); m_copyback(m, ipoff2, sizeof(h2), (caddr_t )&h2); break; #endif /* INET */ #ifdef INET6 case AF_INET6: m_copyback(m, off, sizeof(struct icmp6_hdr), (caddr_t )&pd->hdr.icmp6); m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t )&h2_6); break; #endif /* INET6 */ } m_copyback(m, off2, 8, (caddr_t)&th); } return (PF_PASS); break; } case IPPROTO_UDP: { struct udphdr uh; if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(udp)\n")); return (PF_DROP); } key.af = pd2.af; key.proto = IPPROTO_UDP; PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); key.port[pd2.sidx] = uh.uh_sport; key.port[pd2.didx] = uh.uh_dport; STATE_LOOKUP(kif, &key, direction, *state, pd); /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk = (*state)->key[pd->didx]; if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af) || nk->port[pd2.sidx] != uh.uh_sport) pf_change_icmp(pd2.src, &uh.uh_sport, daddr, &nk->addr[pd2.sidx], nk->port[pd2.sidx], &uh.uh_sum, pd2.ip_sum, icmpsum, pd->ip_sum, 1, pd2.af); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af) || nk->port[pd2.didx] != uh.uh_dport) pf_change_icmp(pd2.dst, &uh.uh_dport, saddr, &nk->addr[pd2.didx], nk->port[pd2.didx], &uh.uh_sum, pd2.ip_sum, icmpsum, pd->ip_sum, 1, pd2.af); switch (pd2.af) { #ifdef INET case AF_INET: m_copyback(m, off, ICMP_MINLEN, (caddr_t )&pd->hdr.icmp); m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); break; #endif /* INET */ #ifdef INET6 case AF_INET6: m_copyback(m, off, sizeof(struct icmp6_hdr), (caddr_t )&pd->hdr.icmp6); m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t )&h2_6); break; #endif /* INET6 */ } m_copyback(m, off2, sizeof(uh), (caddr_t)&uh); } return (PF_PASS); break; } #ifdef INET case IPPROTO_ICMP: { struct icmp iih; if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short i" "(icmp)\n")); return (PF_DROP); } key.af = pd2.af; key.proto = IPPROTO_ICMP; PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); key.port[0] = key.port[1] = iih.icmp_id; STATE_LOOKUP(kif, &key, direction, *state, pd); /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk = (*state)->key[pd->didx]; if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af) || nk->port[pd2.sidx] != iih.icmp_id) pf_change_icmp(pd2.src, &iih.icmp_id, daddr, &nk->addr[pd2.sidx], nk->port[pd2.sidx], NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, AF_INET); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af) || nk->port[pd2.didx] != iih.icmp_id) pf_change_icmp(pd2.dst, &iih.icmp_id, saddr, &nk->addr[pd2.didx], nk->port[pd2.didx], NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, AF_INET); m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp); m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih); } return (PF_PASS); break; } #endif /* INET */ #ifdef INET6 case IPPROTO_ICMPV6: { struct icmp6_hdr iih; if (!pf_pull_hdr(m, off2, &iih, sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(icmp6)\n")); return (PF_DROP); } key.af = pd2.af; key.proto = IPPROTO_ICMPV6; PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); key.port[0] = key.port[1] = iih.icmp6_id; STATE_LOOKUP(kif, &key, direction, *state, pd); /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk = (*state)->key[pd->didx]; if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af) || nk->port[pd2.sidx] != iih.icmp6_id) pf_change_icmp(pd2.src, &iih.icmp6_id, daddr, &nk->addr[pd2.sidx], nk->port[pd2.sidx], NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, AF_INET6); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af) || nk->port[pd2.didx] != iih.icmp6_id) pf_change_icmp(pd2.dst, &iih.icmp6_id, saddr, &nk->addr[pd2.didx], nk->port[pd2.didx], NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, AF_INET6); m_copyback(m, off, sizeof(struct icmp6_hdr), (caddr_t)&pd->hdr.icmp6); m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); m_copyback(m, off2, sizeof(struct icmp6_hdr), (caddr_t)&iih); } return (PF_PASS); break; } #endif /* INET6 */ default: { key.af = pd2.af; key.proto = pd2.proto; PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); key.port[0] = key.port[1] = 0; STATE_LOOKUP(kif, &key, direction, *state, pd); /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk = (*state)->key[pd->didx]; if (PF_ANEQ(pd2.src, &nk->addr[pd2.sidx], pd2.af)) pf_change_icmp(pd2.src, NULL, daddr, &nk->addr[pd2.sidx], 0, NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, pd2.af); if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx], pd2.af)) pf_change_icmp(pd2.dst, NULL, saddr, &nk->addr[pd2.didx], 0, NULL, pd2.ip_sum, icmpsum, pd->ip_sum, 0, pd2.af); switch (pd2.af) { #ifdef INET case AF_INET: m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp); m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); break; #endif /* INET */ #ifdef INET6 case AF_INET6: m_copyback(m, off, sizeof(struct icmp6_hdr), (caddr_t )&pd->hdr.icmp6); m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t )&h2_6); break; #endif /* INET6 */ } } return (PF_PASS); break; } } } } static int pf_test_state_other(struct pf_kstate **state, int direction, struct pfi_kkif *kif, struct mbuf *m, struct pf_pdesc *pd) { struct pf_state_peer *src, *dst; struct pf_state_key_cmp key; uint8_t psrc, pdst; bzero(&key, sizeof(key)); key.af = pd->af; key.proto = pd->proto; if (direction == PF_IN) { PF_ACPY(&key.addr[0], pd->src, key.af); PF_ACPY(&key.addr[1], pd->dst, key.af); key.port[0] = key.port[1] = 0; } else { PF_ACPY(&key.addr[1], pd->src, key.af); PF_ACPY(&key.addr[0], pd->dst, key.af); key.port[1] = key.port[0] = 0; } STATE_LOOKUP(kif, &key, direction, *state, pd); if (direction == (*state)->direction) { src = &(*state)->src; dst = &(*state)->dst; psrc = PF_PEER_SRC; pdst = PF_PEER_DST; } else { src = &(*state)->dst; dst = &(*state)->src; psrc = PF_PEER_DST; pdst = PF_PEER_SRC; } /* update states */ if (src->state < PFOTHERS_SINGLE) pf_set_protostate(*state, psrc, PFOTHERS_SINGLE); if (dst->state == PFOTHERS_SINGLE) pf_set_protostate(*state, pdst, PFOTHERS_MULTIPLE); /* update expire time */ (*state)->expire = time_uptime; if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) (*state)->timeout = PFTM_OTHER_MULTIPLE; else (*state)->timeout = PFTM_OTHER_SINGLE; /* translate source/destination address, if necessary */ if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { struct pf_state_key *nk = (*state)->key[pd->didx]; KASSERT(nk, ("%s: nk is null", __func__)); KASSERT(pd, ("%s: pd is null", __func__)); KASSERT(pd->src, ("%s: pd->src is null", __func__)); KASSERT(pd->dst, ("%s: pd->dst is null", __func__)); switch (pd->af) { #ifdef INET case AF_INET: if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) pf_change_a(&pd->src->v4.s_addr, pd->ip_sum, nk->addr[pd->sidx].v4.s_addr, 0); if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) pf_change_a(&pd->dst->v4.s_addr, pd->ip_sum, nk->addr[pd->didx].v4.s_addr, 0); break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); #endif /* INET6 */ } } return (PF_PASS); } /* * ipoff and off are measured from the start of the mbuf chain. * h must be at "ipoff" on the mbuf chain. */ void * pf_pull_hdr(struct mbuf *m, int off, void *p, int len, u_short *actionp, u_short *reasonp, sa_family_t af) { switch (af) { #ifdef INET case AF_INET: { struct ip *h = mtod(m, struct ip *); u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; if (fragoff) { if (fragoff >= len) ACTION_SET(actionp, PF_PASS); else { ACTION_SET(actionp, PF_DROP); REASON_SET(reasonp, PFRES_FRAG); } return (NULL); } if (m->m_pkthdr.len < off + len || ntohs(h->ip_len) < off + len) { ACTION_SET(actionp, PF_DROP); REASON_SET(reasonp, PFRES_SHORT); return (NULL); } break; } #endif /* INET */ #ifdef INET6 case AF_INET6: { struct ip6_hdr *h = mtod(m, struct ip6_hdr *); if (m->m_pkthdr.len < off + len || (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < (unsigned)(off + len)) { ACTION_SET(actionp, PF_DROP); REASON_SET(reasonp, PFRES_SHORT); return (NULL); } break; } #endif /* INET6 */ } m_copydata(m, off, len, p); return (p); } int pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *kif, int rtableid) { struct ifnet *ifp; /* * Skip check for addresses with embedded interface scope, * as they would always match anyway. */ if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6)) return (1); if (af != AF_INET && af != AF_INET6) return (0); /* Skip checks for ipsec interfaces */ if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) return (1); ifp = (kif != NULL) ? kif->pfik_ifp : NULL; switch (af) { #ifdef INET6 case AF_INET6: return (fib6_check_urpf(rtableid, &addr->v6, 0, NHR_NONE, ifp)); #endif #ifdef INET case AF_INET: return (fib4_check_urpf(rtableid, addr->v4, 0, NHR_NONE, ifp)); #endif } return (0); } #ifdef INET static void pf_route(struct mbuf **m, struct pf_krule *r, int dir, struct ifnet *oifp, struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp) { struct mbuf *m0, *m1, *md; struct sockaddr_in dst; struct ip *ip; struct ifnet *ifp = NULL; struct pf_addr naddr; struct pf_ksrc_node *sn = NULL; int error = 0; uint16_t ip_len, ip_off; KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__)); KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction", __func__)); if ((pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) || pd->pf_mtag->routed++ > 3) { m0 = *m; *m = NULL; goto bad_locked; } if (r->rt == PF_DUPTO) { if ((pd->pf_mtag->flags & PF_DUPLICATED)) { if (s == NULL) { ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; } else { ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; /* If pfsync'd */ if (ifp == NULL) ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; PF_STATE_UNLOCK(s); } if (ifp == oifp) { /* When the 2nd interface is not skipped */ return; } else { m0 = *m; *m = NULL; goto bad; } } else { pd->pf_mtag->flags |= PF_DUPLICATED; if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) { if (s) PF_STATE_UNLOCK(s); return; } } } else { if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { pf_dummynet(pd, dir, s, r, m); if (s) PF_STATE_UNLOCK(s); return; } m0 = *m; } ip = mtod(m0, struct ip *); bzero(&dst, sizeof(dst)); dst.sin_family = AF_INET; dst.sin_len = sizeof(dst); dst.sin_addr = ip->ip_dst; bzero(&naddr, sizeof(naddr)); if (s == NULL) { if (TAILQ_EMPTY(&r->rpool.list)) { DPFPRINTF(PF_DEBUG_URGENT, ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__)); goto bad_locked; } pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, &naddr, NULL, &sn); if (!PF_AZERO(&naddr, AF_INET)) dst.sin_addr.s_addr = naddr.v4.s_addr; ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; } else { if (!PF_AZERO(&s->rt_addr, AF_INET)) dst.sin_addr.s_addr = s->rt_addr.v4.s_addr; ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; PF_STATE_UNLOCK(s); } /* If pfsync'd */ if (ifp == NULL) ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; if (ifp == NULL) goto bad; if (dir == PF_IN) { if (pf_test(PF_OUT, 0, ifp, &m0, inp) != PF_PASS) goto bad; else if (m0 == NULL) goto done; if (m0->m_len < sizeof(struct ip)) { DPFPRINTF(PF_DEBUG_URGENT, ("%s: m0->m_len < sizeof(struct ip)\n", __func__)); goto bad; } ip = mtod(m0, struct ip *); } if (ifp->if_flags & IFF_LOOPBACK) m0->m_flags |= M_SKIP_FIREWALL; ip_len = ntohs(ip->ip_len); ip_off = ntohs(ip->ip_off); /* Copied from FreeBSD 10.0-CURRENT ip_output. */ m0->m_pkthdr.csum_flags |= CSUM_IP; if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) { in_delayed_cksum(m0); m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } #if defined(SCTP) || defined(SCTP_SUPPORT) if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) { sctp_delayed_cksum(m0, (uint32_t)(ip->ip_hl << 2)); m0->m_pkthdr.csum_flags &= ~CSUM_SCTP; } #endif /* * If small enough for interface, or the interface will take * care of the fragmentation for us, we can just send directly. */ if (ip_len <= ifp->if_mtu || (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) { ip->ip_sum = 0; if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) { ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); m0->m_pkthdr.csum_flags &= ~CSUM_IP; } m_clrprotoflags(m0); /* Avoid confusing lower layers. */ md = m0; error = pf_dummynet_route(pd, dir, s, r, ifp, sintosa(&dst), &md); if (md != NULL) error = (*ifp->if_output)(ifp, md, sintosa(&dst), NULL); goto done; } /* Balk when DF bit is set or the interface didn't support TSO. */ if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) { error = EMSGSIZE; KMOD_IPSTAT_INC(ips_cantfrag); if (r->rt != PF_DUPTO) { if (s && pd->nat_rule != NULL) PACKET_UNDO_NAT(m0, pd, (ip->ip_hl << 2) + (ip_off & IP_OFFMASK), s, dir); icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, ifp->if_mtu); goto done; } else goto bad; } error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist); if (error) goto bad; for (; m0; m0 = m1) { m1 = m0->m_nextpkt; m0->m_nextpkt = NULL; if (error == 0) { m_clrprotoflags(m0); md = m0; error = pf_dummynet_route(pd, dir, s, r, ifp, sintosa(&dst), &md); if (md != NULL) error = (*ifp->if_output)(ifp, md, sintosa(&dst), NULL); } else m_freem(m0); } if (error == 0) KMOD_IPSTAT_INC(ips_fragmented); done: if (r->rt != PF_DUPTO) *m = NULL; return; bad_locked: if (s) PF_STATE_UNLOCK(s); bad: m_freem(m0); goto done; } #endif /* INET */ #ifdef INET6 static void pf_route6(struct mbuf **m, struct pf_krule *r, int dir, struct ifnet *oifp, struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp) { struct mbuf *m0, *md; struct sockaddr_in6 dst; struct ip6_hdr *ip6; struct ifnet *ifp = NULL; struct pf_addr naddr; struct pf_ksrc_node *sn = NULL; KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__)); KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction", __func__)); if ((pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) || pd->pf_mtag->routed++ > 3) { m0 = *m; *m = NULL; goto bad_locked; } if (r->rt == PF_DUPTO) { if ((pd->pf_mtag->flags & PF_DUPLICATED)) { if (s == NULL) { ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; } else { ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; /* If pfsync'd */ if (ifp == NULL) ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; PF_STATE_UNLOCK(s); } if (ifp == oifp) { /* When the 2nd interface is not skipped */ return; } else { m0 = *m; *m = NULL; goto bad; } } else { pd->pf_mtag->flags |= PF_DUPLICATED; if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) { if (s) PF_STATE_UNLOCK(s); return; } } } else { if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { pf_dummynet(pd, dir, s, r, m); if (s) PF_STATE_UNLOCK(s); return; } m0 = *m; } ip6 = mtod(m0, struct ip6_hdr *); bzero(&dst, sizeof(dst)); dst.sin6_family = AF_INET6; dst.sin6_len = sizeof(dst); dst.sin6_addr = ip6->ip6_dst; bzero(&naddr, sizeof(naddr)); if (s == NULL) { if (TAILQ_EMPTY(&r->rpool.list)) { DPFPRINTF(PF_DEBUG_URGENT, ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__)); goto bad_locked; } pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src, &naddr, NULL, &sn); if (!PF_AZERO(&naddr, AF_INET6)) PF_ACPY((struct pf_addr *)&dst.sin6_addr, &naddr, AF_INET6); ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; } else { if (!PF_AZERO(&s->rt_addr, AF_INET6)) PF_ACPY((struct pf_addr *)&dst.sin6_addr, &s->rt_addr, AF_INET6); ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; } if (s) PF_STATE_UNLOCK(s); /* If pfsync'd */ if (ifp == NULL) ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; if (ifp == NULL) goto bad; if (dir == PF_IN) { if (pf_test6(PF_OUT, 0, ifp, &m0, inp) != PF_PASS) goto bad; else if (m0 == NULL) goto done; if (m0->m_len < sizeof(struct ip6_hdr)) { DPFPRINTF(PF_DEBUG_URGENT, ("%s: m0->m_len < sizeof(struct ip6_hdr)\n", __func__)); goto bad; } ip6 = mtod(m0, struct ip6_hdr *); } if (ifp->if_flags & IFF_LOOPBACK) m0->m_flags |= M_SKIP_FIREWALL; if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 & ~ifp->if_hwassist) { uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6); in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr)); m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6; } /* * If the packet is too large for the outgoing interface, * send back an icmp6 error. */ if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr)) dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index); if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) { md = m0; pf_dummynet_route(pd, dir, s, r, ifp, sintosa(&dst), &md); if (md != NULL) nd6_output_ifp(ifp, ifp, md, &dst, NULL); } else { in6_ifstat_inc(ifp, ifs6_in_toobig); if (r->rt != PF_DUPTO) { if (s && pd->nat_rule != NULL) PACKET_UNDO_NAT(m0, pd, ((caddr_t)ip6 - m0->m_data) + sizeof(struct ip6_hdr), s, dir); icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); } else goto bad; } done: if (r->rt != PF_DUPTO) *m = NULL; return; bad_locked: if (s) PF_STATE_UNLOCK(s); bad: m_freem(m0); goto done; } #endif /* INET6 */ /* * FreeBSD supports cksum offloads for the following drivers. * em(4), fxp(4), lge(4), nge(4), re(4), ti(4), txp(4), xl(4) * * CSUM_DATA_VALID | CSUM_PSEUDO_HDR : * network driver performed cksum including pseudo header, need to verify * csum_data * CSUM_DATA_VALID : * network driver performed cksum, needs to additional pseudo header * cksum computation with partial csum_data(i.e. lack of H/W support for * pseudo header, for instance sk(4) and possibly gem(4)) * * After validating the cksum of packet, set both flag CSUM_DATA_VALID and * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper * TCP/UDP layer. * Also, set csum_data to 0xffff to force cksum validation. */ static int pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af) { u_int16_t sum = 0; int hw_assist = 0; struct ip *ip; if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) return (1); if (m->m_pkthdr.len < off + len) return (1); switch (p) { case IPPROTO_TCP: if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { sum = m->m_pkthdr.csum_data; } else { ip = mtod(m, struct ip *); sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl((u_short)len + m->m_pkthdr.csum_data + IPPROTO_TCP)); } sum ^= 0xffff; ++hw_assist; } break; case IPPROTO_UDP: if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { sum = m->m_pkthdr.csum_data; } else { ip = mtod(m, struct ip *); sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl((u_short)len + m->m_pkthdr.csum_data + IPPROTO_UDP)); } sum ^= 0xffff; ++hw_assist; } break; case IPPROTO_ICMP: #ifdef INET6 case IPPROTO_ICMPV6: #endif /* INET6 */ break; default: return (1); } if (!hw_assist) { switch (af) { case AF_INET: if (p == IPPROTO_ICMP) { if (m->m_len < off) return (1); m->m_data += off; m->m_len -= off; sum = in_cksum(m, len); m->m_data -= off; m->m_len += off; } else { if (m->m_len < sizeof(struct ip)) return (1); sum = in4_cksum(m, p, off, len); } break; #ifdef INET6 case AF_INET6: if (m->m_len < sizeof(struct ip6_hdr)) return (1); sum = in6_cksum(m, p, off, len); break; #endif /* INET6 */ default: return (1); } } if (sum) { switch (p) { case IPPROTO_TCP: { KMOD_TCPSTAT_INC(tcps_rcvbadsum); break; } case IPPROTO_UDP: { KMOD_UDPSTAT_INC(udps_badsum); break; } #ifdef INET case IPPROTO_ICMP: { KMOD_ICMPSTAT_INC(icps_checksum); break; } #endif #ifdef INET6 case IPPROTO_ICMPV6: { KMOD_ICMP6STAT_INC(icp6s_checksum); break; } #endif /* INET6 */ } return (1); } else { if (p == IPPROTO_TCP || p == IPPROTO_UDP) { m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m->m_pkthdr.csum_data = 0xffff; } } return (0); } static bool pf_pdesc_to_dnflow(int dir, const struct pf_pdesc *pd, const struct pf_krule *r, const struct pf_kstate *s, struct ip_fw_args *dnflow) { int dndir = r->direction; if (s && dndir == PF_INOUT) { dndir = s->direction; } else if (dndir == PF_INOUT) { /* Assume primary direction. Happens when we've set dnpipe in * the ethernet level code. */ dndir = dir; } memset(dnflow, 0, sizeof(*dnflow)); if (pd->dport != NULL) dnflow->f_id.dst_port = ntohs(*pd->dport); if (pd->sport != NULL) dnflow->f_id.src_port = ntohs(*pd->sport); if (dir == PF_IN) dnflow->flags |= IPFW_ARGS_IN; else dnflow->flags |= IPFW_ARGS_OUT; if (dir != dndir && pd->act.dnrpipe) { dnflow->rule.info = pd->act.dnrpipe; } else if (dir == dndir && pd->act.dnpipe) { dnflow->rule.info = pd->act.dnpipe; } else { return (false); } dnflow->rule.info |= IPFW_IS_DUMMYNET; - if (r->free_flags & PFRULE_DN_IS_PIPE || pd->act.flags & PFRULE_DN_IS_PIPE) + if (r->free_flags & PFRULE_DN_IS_PIPE || pd->act.flags & PFSTATE_DN_IS_PIPE) dnflow->rule.info |= IPFW_IS_PIPE; dnflow->f_id.proto = pd->proto; dnflow->f_id.extra = dnflow->rule.info; switch (pd->af) { case AF_INET: dnflow->f_id.addr_type = 4; dnflow->f_id.src_ip = ntohl(pd->src->v4.s_addr); dnflow->f_id.dst_ip = ntohl(pd->dst->v4.s_addr); break; case AF_INET6: dnflow->flags |= IPFW_ARGS_IP6; dnflow->f_id.addr_type = 6; dnflow->f_id.src_ip6 = pd->src->v6; dnflow->f_id.dst_ip6 = pd->dst->v6; break; default: panic("Invalid AF"); break; } return (true); } int pf_test_eth(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp) { struct pfi_kkif *kif; struct mbuf *m = *m0; M_ASSERTPKTHDR(m); MPASS(ifp->if_vnet == curvnet); NET_EPOCH_ASSERT(); if (!V_pf_status.running) return (PF_PASS); kif = (struct pfi_kkif *)ifp->if_pf_kif; if (kif == NULL) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); return (PF_DROP); } if (kif->pfik_flags & PFI_IFLAG_SKIP) return (PF_PASS); if (m->m_flags & M_SKIP_FIREWALL) return (PF_PASS); /* Stateless! */ return (pf_test_eth_rule(dir, kif, m0)); } static int pf_dummynet(struct pf_pdesc *pd, int dir, struct pf_kstate *s, struct pf_krule *r, struct mbuf **m0) { return (pf_dummynet_route(pd, dir, s, r, NULL, NULL, m0)); } static int pf_dummynet_route(struct pf_pdesc *pd, int dir, struct pf_kstate *s, struct pf_krule *r, struct ifnet *ifp, struct sockaddr *sa, struct mbuf **m0) { NET_EPOCH_ASSERT(); if (s && (s->dnpipe || s->dnrpipe)) { pd->act.dnpipe = s->dnpipe; pd->act.dnrpipe = s->dnrpipe; pd->act.flags = s->state_flags; } else if (r->dnpipe || r->dnrpipe) { pd->act.dnpipe = r->dnpipe; pd->act.dnrpipe = r->dnrpipe; pd->act.flags = r->free_flags; } if (pd->act.dnpipe || pd->act.dnrpipe) { struct ip_fw_args dnflow; if (ip_dn_io_ptr == NULL) { m_freem(*m0); *m0 = NULL; return (ENOMEM); } if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(*m0)) == NULL)) { m_freem(*m0); *m0 = NULL; return (ENOMEM); } if (ifp != NULL) { pd->pf_mtag->flags |= PF_TAG_ROUTE_TO; pd->pf_mtag->if_index = ifp->if_index; pd->pf_mtag->if_idxgen = ifp->if_idxgen; MPASS(sa != NULL); if (pd->af == AF_INET) memcpy(&pd->pf_mtag->dst, sa, sizeof(struct sockaddr_in)); else memcpy(&pd->pf_mtag->dst, sa, sizeof(struct sockaddr_in6)); } if (pf_pdesc_to_dnflow(dir, pd, r, s, &dnflow)) { pd->pf_mtag->flags |= PF_TAG_DUMMYNET; ip_dn_io_ptr(m0, &dnflow); if (*m0 != NULL) pd->pf_mtag->flags &= ~PF_TAG_DUMMYNET; } } return (0); } #ifdef INET int pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp) { struct pfi_kkif *kif; u_short action, reason = 0, log = 0; struct mbuf *m = *m0; struct ip *h = NULL; struct m_tag *ipfwtag; struct pf_krule *a = NULL, *r = &V_pf_default_rule, *tr, *nr; struct pf_kstate *s = NULL; struct pf_kruleset *ruleset = NULL; struct pf_pdesc pd; int off, dirndx, pqid = 0; PF_RULES_RLOCK_TRACKER; KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir)); M_ASSERTPKTHDR(m); if (!V_pf_status.running) return (PF_PASS); PF_RULES_RLOCK(); kif = (struct pfi_kkif *)ifp->if_pf_kif; if (__predict_false(kif == NULL)) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); PF_RULES_RUNLOCK(); return (PF_DROP); } if (kif->pfik_flags & PFI_IFLAG_SKIP) { PF_RULES_RUNLOCK(); return (PF_PASS); } if (m->m_flags & M_SKIP_FIREWALL) { PF_RULES_RUNLOCK(); return (PF_PASS); } memset(&pd, 0, sizeof(pd)); pd.pf_mtag = pf_find_mtag(m); if (pd.pf_mtag != NULL && (pd.pf_mtag->flags & PF_TAG_ROUTE_TO)) { pd.pf_mtag->flags &= ~PF_TAG_ROUTE_TO; ifp = ifnet_byindexgen(pd.pf_mtag->if_index, pd.pf_mtag->if_idxgen); if (ifp == NULL || ifp->if_flags & IFF_DYING) { PF_RULES_RUNLOCK(); m_freem(*m0); *m0 = NULL; return (PF_PASS); } PF_RULES_RUNLOCK(); (ifp->if_output)(ifp, m, sintosa(&pd.pf_mtag->dst), NULL); *m0 = NULL; return (PF_PASS); } if (pd.pf_mtag && pd.pf_mtag->dnpipe) { pd.act.dnpipe = pd.pf_mtag->dnpipe; pd.act.flags = pd.pf_mtag->dnflags; } if (ip_dn_io_ptr != NULL && pd.pf_mtag != NULL && pd.pf_mtag->flags & PF_TAG_DUMMYNET) { /* Dummynet re-injects packets after they've * completed their delay. We've already * processed them, so pass unconditionally. */ /* But only once. We may see the packet multiple times (e.g. * PFIL_IN/PFIL_OUT). */ pd.pf_mtag->flags &= ~PF_TAG_DUMMYNET; PF_RULES_RUNLOCK(); return (PF_PASS); } if (__predict_false(ip_divert_ptr != NULL) && ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) { struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1); if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) { if (pd.pf_mtag == NULL && ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { action = PF_DROP; goto done; } pd.pf_mtag->flags |= PF_PACKET_LOOPED; m_tag_delete(m, ipfwtag); } if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) { m->m_flags |= M_FASTFWD_OURS; pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT; } } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) { /* We do IP header normalization and packet reassembly here */ action = PF_DROP; goto done; } m = *m0; /* pf_normalize messes with m0 */ h = mtod(m, struct ip *); off = h->ip_hl << 2; if (off < (int)sizeof(struct ip)) { action = PF_DROP; REASON_SET(&reason, PFRES_SHORT); - log = 1; + log = PF_LOG_FORCE; goto done; } pd.src = (struct pf_addr *)&h->ip_src; pd.dst = (struct pf_addr *)&h->ip_dst; pd.sport = pd.dport = NULL; pd.ip_sum = &h->ip_sum; pd.proto_sum = NULL; pd.proto = h->ip_p; pd.dir = dir; pd.sidx = (dir == PF_IN) ? 0 : 1; pd.didx = (dir == PF_IN) ? 1 : 0; pd.af = AF_INET; pd.tos = h->ip_tos & ~IPTOS_ECN_MASK; pd.tot_len = ntohs(h->ip_len); + pd.act.rtableid = -1; /* handle fragments that didn't get reassembled by normalization */ if (h->ip_off & htons(IP_MF | IP_OFFMASK)) { action = pf_test_fragment(&r, dir, kif, m, h, &pd, &a, &ruleset); goto done; } switch (h->ip_p) { case IPPROTO_TCP: { if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp), &action, &reason, AF_INET)) { - log = action != PF_PASS; + if (action != PF_PASS) + log = PF_LOG_FORCE; goto done; } pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2); pd.sport = &pd.hdr.tcp.th_sport; pd.dport = &pd.hdr.tcp.th_dport; /* Respond to SYN with a syncookie. */ if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_SYN && pd.dir == PF_IN && pf_synflood_check(&pd)) { pf_syncookie_send(m, off, &pd); action = PF_DROP; break; } if ((pd.hdr.tcp.th_flags & TH_ACK) && pd.p_len == 0) pqid = 1; action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); if (action == PF_DROP) goto done; action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, &reason); if (action == PF_PASS) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule.ptr; a = s->anchor.ptr; log = s->log; } else if (s == NULL) { /* Validate remote SYN|ACK, re-create original SYN if * valid. */ if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_ACK && pf_syncookie_validate(&pd) && pd.dir == PF_IN) { struct mbuf *msyn; msyn = pf_syncookie_recreate_syn(h->ip_ttl, off,&pd); if (msyn == NULL) { action = PF_DROP; break; } action = pf_test(dir, pflags, ifp, &msyn, inp); m_freem(msyn); if (action == PF_PASS) { action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, &reason); if (action != PF_PASS || s == NULL) { action = PF_DROP; break; } s->src.seqhi = ntohl(pd.hdr.tcp.th_ack) - 1; s->src.seqlo = ntohl(pd.hdr.tcp.th_seq) - 1; pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_DST); action = pf_synproxy(&pd, &s, &reason); if (action != PF_PASS) break; } break; } else { action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, &a, &ruleset, inp); } } + if (s) { + if (s->max_mss) + pf_normalize_mss(m, off, &pd, s->max_mss); + } else if (r->max_mss) + pf_normalize_mss(m, off, &pd, r->max_mss); break; } case IPPROTO_UDP: { if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp), &action, &reason, AF_INET)) { - log = action != PF_PASS; + if (action != PF_PASS) + log = PF_LOG_FORCE; goto done; } pd.sport = &pd.hdr.udp.uh_sport; pd.dport = &pd.hdr.udp.uh_dport; if (pd.hdr.udp.uh_dport == 0 || ntohs(pd.hdr.udp.uh_ulen) > m->m_pkthdr.len - off || ntohs(pd.hdr.udp.uh_ulen) < sizeof(struct udphdr)) { action = PF_DROP; REASON_SET(&reason, PFRES_SHORT); goto done; } action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); if (action == PF_PASS) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule.ptr; a = s->anchor.ptr; log = s->log; } else if (s == NULL) action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, &a, &ruleset, inp); break; } case IPPROTO_ICMP: { if (!pf_pull_hdr(m, off, &pd.hdr.icmp, ICMP_MINLEN, &action, &reason, AF_INET)) { - log = action != PF_PASS; + if (action != PF_PASS) + log = PF_LOG_FORCE; goto done; } action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, &reason); if (action == PF_PASS) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule.ptr; a = s->anchor.ptr; log = s->log; } else if (s == NULL) action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, &a, &ruleset, inp); break; } #ifdef INET6 case IPPROTO_ICMPV6: { action = PF_DROP; DPFPRINTF(PF_DEBUG_MISC, ("pf: dropping IPv4 packet with ICMPv6 payload\n")); goto done; } #endif default: action = pf_test_state_other(&s, dir, kif, m, &pd); if (action == PF_PASS) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule.ptr; a = s->anchor.ptr; log = s->log; } else if (s == NULL) action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, &a, &ruleset, inp); break; } done: PF_RULES_RUNLOCK(); if (action == PF_PASS && h->ip_hl > 5 && !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { action = PF_DROP; REASON_SET(&reason, PFRES_IPOPTIONS); - log = r->log; + log = PF_LOG_FORCE; DPFPRINTF(PF_DEBUG_MISC, ("pf: dropping packet with ip options\n")); } + if (s) { + pf_scrub_ip(&m, s->state_flags, s->min_ttl, s->set_tos); + if (s->rtableid >= 0) + M_SETFIB(m, s->rtableid); +#ifdef ALTQ + if (s->qid) { + pd.act.pqid = s->pqid; + pd.act.qid = s->qid; + } +#endif + } else { + pf_scrub_ip(&m, r->scrub_flags, r->min_ttl, r->set_tos); + if (r->rtableid >= 0) + M_SETFIB(m, r->rtableid); +#ifdef ALTQ + if (r->qid) { + pd.act.pqid = r->pqid; + pd.act.qid = r->qid; + } +#endif + } + if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); } - if (r->rtableid >= 0) - M_SETFIB(m, r->rtableid); if (r->scrub_flags & PFSTATE_SETPRIO) { if (pd.tos & IPTOS_LOWDELAY) pqid = 1; if (vlan_set_pcp(m, r->set_prio[pqid])) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); - log = 1; + log = PF_LOG_FORCE; DPFPRINTF(PF_DEBUG_MISC, ("pf: failed to allocate 802.1q mtag\n")); } } #ifdef ALTQ - if (s && s->qid) { - pd.act.pqid = s->pqid; - pd.act.qid = s->qid; - } else if (r->qid) { - pd.act.pqid = r->pqid; - pd.act.qid = r->qid; - } if (action == PF_PASS && pd.act.qid) { if (pd.pf_mtag == NULL && ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); } else { if (s != NULL) pd.pf_mtag->qid_hash = pf_state_hash(s); if (pqid || (pd.tos & IPTOS_LOWDELAY)) pd.pf_mtag->qid = pd.act.pqid; else pd.pf_mtag->qid = pd.act.qid; /* Add hints for ecn. */ pd.pf_mtag->hdr = h; } } #endif /* ALTQ */ /* * connections redirected to loopback should not match sockets * bound specifically to loopback due to security implications, * see tcp_input() and in_pcblookup_listen(). */ if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && (s->nat_rule.ptr->action == PF_RDR || s->nat_rule.ptr->action == PF_BINAT) && IN_LOOPBACK(ntohl(pd.dst->v4.s_addr))) m->m_flags |= M_SKIP_FIREWALL; if (__predict_false(ip_divert_ptr != NULL) && action == PF_PASS && r->divert.port && !PACKET_LOOPED(&pd)) { ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0, sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO); if (ipfwtag != NULL) { ((struct ipfw_rule_ref *)(ipfwtag+1))->info = ntohs(r->divert.port); ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir; if (s) PF_STATE_UNLOCK(s); m_tag_prepend(m, ipfwtag); if (m->m_flags & M_FASTFWD_OURS) { if (pd.pf_mtag == NULL && ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); - log = 1; + log = PF_LOG_FORCE; DPFPRINTF(PF_DEBUG_MISC, ("pf: failed to allocate tag\n")); } else { pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT; m->m_flags &= ~M_FASTFWD_OURS; } } ip_divert_ptr(*m0, dir == PF_IN); *m0 = NULL; return (action); } else { /* XXX: ipfw has the same behaviour! */ action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); - log = 1; + log = PF_LOG_FORCE; DPFPRINTF(PF_DEBUG_MISC, ("pf: failed to allocate divert tag\n")); } } if (log) { - struct pf_krule *lr; + struct pf_krule *lr; + struct pf_krule_item *ri; if (s != NULL && s->nat_rule.ptr != NULL && s->nat_rule.ptr->log & PF_LOG_ALL) lr = s->nat_rule.ptr; else lr = r; - PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd, - (s == NULL)); + + if (log & PF_LOG_FORCE || lr->log & PF_LOG_ALL) + PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, + ruleset, &pd, (s == NULL)); + if (s) { + SLIST_FOREACH(ri, &s->match_rules, entry) + if (ri->r->log & PF_LOG_ALL) + PFLOG_PACKET(kif, m, AF_INET, dir, + reason, ri->r, a, ruleset, &pd, 0); + } } pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS], pd.tot_len); pf_counter_u64_add_protected(&kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS], 1); if (action == PF_PASS || r->action == PF_DROP) { dirndx = (dir == PF_OUT); pf_counter_u64_add_protected(&r->packets[dirndx], 1); pf_counter_u64_add_protected(&r->bytes[dirndx], pd.tot_len); pf_update_timestamp(r); if (a != NULL) { pf_counter_u64_add_protected(&a->packets[dirndx], 1); pf_counter_u64_add_protected(&a->bytes[dirndx], pd.tot_len); } if (s != NULL) { + struct pf_krule_item *ri; + if (s->nat_rule.ptr != NULL) { pf_counter_u64_add_protected(&s->nat_rule.ptr->packets[dirndx], 1); pf_counter_u64_add_protected(&s->nat_rule.ptr->bytes[dirndx], pd.tot_len); } if (s->src_node != NULL) { counter_u64_add(s->src_node->packets[dirndx], 1); counter_u64_add(s->src_node->bytes[dirndx], pd.tot_len); } if (s->nat_src_node != NULL) { counter_u64_add(s->nat_src_node->packets[dirndx], 1); counter_u64_add(s->nat_src_node->bytes[dirndx], pd.tot_len); } dirndx = (dir == s->direction) ? 0 : 1; s->packets[dirndx]++; s->bytes[dirndx] += pd.tot_len; + SLIST_FOREACH(ri, &s->match_rules, entry) { + pf_counter_u64_add_protected(&ri->r->packets[dirndx], 1); + pf_counter_u64_add_protected(&ri->r->bytes[dirndx], pd.tot_len); + } } tr = r; nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; if (nr != NULL && r == &V_pf_default_rule) tr = nr; if (tr->src.addr.type == PF_ADDR_TABLE) pfr_update_stats(tr->src.addr.p.tbl, (s == NULL) ? pd.src : &s->key[(s->direction == PF_IN)]-> addr[(s->direction == PF_OUT)], pd.af, pd.tot_len, dir == PF_OUT, r->action == PF_PASS, tr->src.neg); if (tr->dst.addr.type == PF_ADDR_TABLE) pfr_update_stats(tr->dst.addr.p.tbl, (s == NULL) ? pd.dst : &s->key[(s->direction == PF_IN)]-> addr[(s->direction == PF_IN)], pd.af, pd.tot_len, dir == PF_OUT, r->action == PF_PASS, tr->dst.neg); } pf_counter_u64_critical_exit(); switch (action) { case PF_SYNPROXY_DROP: m_freem(*m0); case PF_DEFER: *m0 = NULL; action = PF_PASS; break; case PF_DROP: m_freem(*m0); *m0 = NULL; break; default: /* pf_route() returns unlocked. */ if (r->rt) { pf_route(m0, r, dir, kif->pfik_ifp, s, &pd, inp); return (action); } if (pf_dummynet(&pd, dir, s, r, m0) != 0) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); } break; } SDT_PROBE4(pf, ip, test, done, action, reason, r, s); if (s) PF_STATE_UNLOCK(s); return (action); } #endif /* INET */ #ifdef INET6 int pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp) { struct pfi_kkif *kif; u_short action, reason = 0, log = 0; struct mbuf *m = *m0, *n = NULL; struct m_tag *mtag; struct ip6_hdr *h = NULL; struct pf_krule *a = NULL, *r = &V_pf_default_rule, *tr, *nr; struct pf_kstate *s = NULL; struct pf_kruleset *ruleset = NULL; struct pf_pdesc pd; int off, terminal = 0, dirndx, rh_cnt = 0, pqid = 0; PF_RULES_RLOCK_TRACKER; KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir)); M_ASSERTPKTHDR(m); if (!V_pf_status.running) return (PF_PASS); PF_RULES_RLOCK(); kif = (struct pfi_kkif *)ifp->if_pf_kif; if (__predict_false(kif == NULL)) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname)); PF_RULES_RUNLOCK(); return (PF_DROP); } if (kif->pfik_flags & PFI_IFLAG_SKIP) { PF_RULES_RUNLOCK(); return (PF_PASS); } if (m->m_flags & M_SKIP_FIREWALL) { PF_RULES_RUNLOCK(); return (PF_PASS); } memset(&pd, 0, sizeof(pd)); pd.pf_mtag = pf_find_mtag(m); if (pd.pf_mtag != NULL && (pd.pf_mtag->flags & PF_TAG_ROUTE_TO)) { pd.pf_mtag->flags &= ~PF_TAG_ROUTE_TO; ifp = ifnet_byindexgen(pd.pf_mtag->if_index, pd.pf_mtag->if_idxgen); if (ifp == NULL || ifp->if_flags & IFF_DYING) { PF_RULES_RUNLOCK(); m_freem(*m0); *m0 = NULL; return (PF_PASS); } PF_RULES_RUNLOCK(); nd6_output_ifp(ifp, ifp, m, (struct sockaddr_in6 *)&pd.pf_mtag->dst, NULL); *m0 = NULL; return (PF_PASS); } if (pd.pf_mtag && pd.pf_mtag->dnpipe) { pd.act.dnpipe = pd.pf_mtag->dnpipe; pd.act.flags = pd.pf_mtag->dnflags; } if (ip_dn_io_ptr != NULL && pd.pf_mtag != NULL && pd.pf_mtag->flags & PF_TAG_DUMMYNET) { pd.pf_mtag->flags &= ~PF_TAG_DUMMYNET; /* Dummynet re-injects packets after they've * completed their delay. We've already * processed them, so pass unconditionally. */ PF_RULES_RUNLOCK(); return (PF_PASS); } /* We do IP header normalization and packet reassembly here */ if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) { action = PF_DROP; goto done; } m = *m0; /* pf_normalize messes with m0 */ h = mtod(m, struct ip6_hdr *); /* * we do not support jumbogram. if we keep going, zero ip6_plen * will do something bad, so drop the packet for now. */ if (htons(h->ip6_plen) == 0) { action = PF_DROP; REASON_SET(&reason, PFRES_NORM); /*XXX*/ goto done; } pd.src = (struct pf_addr *)&h->ip6_src; pd.dst = (struct pf_addr *)&h->ip6_dst; pd.sport = pd.dport = NULL; pd.ip_sum = NULL; pd.proto_sum = NULL; pd.dir = dir; pd.sidx = (dir == PF_IN) ? 0 : 1; pd.didx = (dir == PF_IN) ? 1 : 0; pd.af = AF_INET6; pd.tos = IPV6_DSCP(h); pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); + pd.act.rtableid = -1; off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); pd.proto = h->ip6_nxt; do { switch (pd.proto) { case IPPROTO_FRAGMENT: action = pf_test_fragment(&r, dir, kif, m, h, &pd, &a, &ruleset); if (action == PF_DROP) REASON_SET(&reason, PFRES_FRAG); goto done; case IPPROTO_ROUTING: { struct ip6_rthdr rthdr; if (rh_cnt++) { DPFPRINTF(PF_DEBUG_MISC, ("pf: IPv6 more than one rthdr\n")); action = PF_DROP; REASON_SET(&reason, PFRES_IPOPTIONS); - log = 1; + log = PF_LOG_FORCE; goto done; } if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL, &reason, pd.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: IPv6 short rthdr\n")); action = PF_DROP; REASON_SET(&reason, PFRES_SHORT); - log = 1; + log = PF_LOG_FORCE; goto done; } if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { DPFPRINTF(PF_DEBUG_MISC, ("pf: IPv6 rthdr0\n")); action = PF_DROP; REASON_SET(&reason, PFRES_IPOPTIONS); - log = 1; + log = PF_LOG_FORCE; goto done; } /* FALLTHROUGH */ } case IPPROTO_AH: case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: { /* get next header and header length */ struct ip6_ext opt6; if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), NULL, &reason, pd.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: IPv6 short opt\n")); action = PF_DROP; - log = 1; + log = PF_LOG_FORCE; goto done; } if (pd.proto == IPPROTO_AH) off += (opt6.ip6e_len + 2) * 4; else off += (opt6.ip6e_len + 1) * 8; pd.proto = opt6.ip6e_nxt; /* goto the next header */ break; } default: terminal++; break; } } while (!terminal); /* if there's no routing header, use unmodified mbuf for checksumming */ if (!n) n = m; switch (pd.proto) { case IPPROTO_TCP: { if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp), &action, &reason, AF_INET6)) { - log = action != PF_PASS; + if (action != PF_PASS) + log |= PF_LOG_FORCE; goto done; } pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2); pd.sport = &pd.hdr.tcp.th_sport; pd.dport = &pd.hdr.tcp.th_dport; action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); if (action == PF_DROP) goto done; action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, &reason); if (action == PF_PASS) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule.ptr; a = s->anchor.ptr; log = s->log; } else if (s == NULL) action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, &a, &ruleset, inp); + if (s) { + if (s->max_mss) + pf_normalize_mss(m, off, &pd, s->max_mss); + } else if (r->max_mss) + pf_normalize_mss(m, off, &pd, r->max_mss); break; } case IPPROTO_UDP: { if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp), &action, &reason, AF_INET6)) { - log = action != PF_PASS; + if (action != PF_PASS) + log |= PF_LOG_FORCE; goto done; } pd.sport = &pd.hdr.udp.uh_sport; pd.dport = &pd.hdr.udp.uh_dport; if (pd.hdr.udp.uh_dport == 0 || ntohs(pd.hdr.udp.uh_ulen) > m->m_pkthdr.len - off || ntohs(pd.hdr.udp.uh_ulen) < sizeof(struct udphdr)) { action = PF_DROP; REASON_SET(&reason, PFRES_SHORT); goto done; } action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); if (action == PF_PASS) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule.ptr; a = s->anchor.ptr; log = s->log; } else if (s == NULL) action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, &a, &ruleset, inp); break; } case IPPROTO_ICMP: { action = PF_DROP; DPFPRINTF(PF_DEBUG_MISC, ("pf: dropping IPv6 packet with ICMPv4 payload\n")); goto done; } case IPPROTO_ICMPV6: { if (!pf_pull_hdr(m, off, &pd.hdr.icmp6, sizeof(pd.hdr.icmp6), &action, &reason, AF_INET6)) { - log = action != PF_PASS; + if (action != PF_PASS) + log |= PF_LOG_FORCE; goto done; } action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, &reason); if (action == PF_PASS) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule.ptr; a = s->anchor.ptr; log = s->log; } else if (s == NULL) action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, &a, &ruleset, inp); break; } default: action = pf_test_state_other(&s, dir, kif, m, &pd); if (action == PF_PASS) { if (V_pfsync_update_state_ptr != NULL) V_pfsync_update_state_ptr(s); r = s->rule.ptr; a = s->anchor.ptr; log = s->log; } else if (s == NULL) action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, &a, &ruleset, inp); break; } done: PF_RULES_RUNLOCK(); if (n != m) { m_freem(n); n = NULL; } /* handle dangerous IPv6 extension headers. */ if (action == PF_PASS && rh_cnt && !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { action = PF_DROP; REASON_SET(&reason, PFRES_IPOPTIONS); log = r->log; DPFPRINTF(PF_DEBUG_MISC, ("pf: dropping packet with dangerous v6 headers\n")); } if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); } - if (r->rtableid >= 0) - M_SETFIB(m, r->rtableid); + + if (s) { + pf_scrub_ip6(&m, s->state_flags, s->min_ttl, s->set_tos); + if (s->rtableid >= 0) + M_SETFIB(m, s->rtableid); +#ifdef ALTQ + if (s->qid) { + pd.act.pqid = s->pqid; + pd.act.qid = s->qid; + } +#endif + } else { + pf_scrub_ip6(&m, r->scrub_flags, r->min_ttl, r->set_tos); + if (r->rtableid >= 0) + M_SETFIB(m, r->rtableid); +#ifdef ALTQ + if (r->qid) { + pd.act.pqid = r->pqid; + pd.act.qid = r->qid; + } +#endif + } if (r->scrub_flags & PFSTATE_SETPRIO) { if (pd.tos & IPTOS_LOWDELAY) pqid = 1; if (vlan_set_pcp(m, r->set_prio[pqid])) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); - log = 1; + log = PF_LOG_FORCE; DPFPRINTF(PF_DEBUG_MISC, ("pf: failed to allocate 802.1q mtag\n")); } } #ifdef ALTQ - if (s && s->qid) { - pd.act.pqid = s->pqid; - pd.act.qid = s->qid; - } else if (r->qid) { - pd.act.pqid = r->pqid; - pd.act.qid = r->qid; - } if (action == PF_PASS && pd.act.qid) { if (pd.pf_mtag == NULL && ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); } else { if (s != NULL) pd.pf_mtag->qid_hash = pf_state_hash(s); if (pd.tos & IPTOS_LOWDELAY) pd.pf_mtag->qid = pd.act.pqid; else pd.pf_mtag->qid = pd.act.qid; /* Add hints for ecn. */ pd.pf_mtag->hdr = h; } } #endif /* ALTQ */ if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && (s->nat_rule.ptr->action == PF_RDR || s->nat_rule.ptr->action == PF_BINAT) && IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) m->m_flags |= M_SKIP_FIREWALL; /* XXX: Anybody working on it?! */ if (r->divert.port) printf("pf: divert(9) is not supported for IPv6\n"); if (log) { - struct pf_krule *lr; + struct pf_krule *lr; + struct pf_krule_item *ri; if (s != NULL && s->nat_rule.ptr != NULL && s->nat_rule.ptr->log & PF_LOG_ALL) lr = s->nat_rule.ptr; else lr = r; - PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset, - &pd, (s == NULL)); + + if (log & PF_LOG_FORCE || lr->log & PF_LOG_ALL) + PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, + ruleset, &pd, (s == NULL)); + if (s) { + SLIST_FOREACH(ri, &s->match_rules, entry) + if (ri->r->log & PF_LOG_ALL) + PFLOG_PACKET(kif, m, AF_INET6, dir, + reason, ri->r, a, ruleset, &pd, 0); + } } pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS], pd.tot_len); pf_counter_u64_add_protected(&kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS], 1); if (action == PF_PASS || r->action == PF_DROP) { dirndx = (dir == PF_OUT); pf_counter_u64_add_protected(&r->packets[dirndx], 1); pf_counter_u64_add_protected(&r->bytes[dirndx], pd.tot_len); if (a != NULL) { pf_counter_u64_add_protected(&a->packets[dirndx], 1); pf_counter_u64_add_protected(&a->bytes[dirndx], pd.tot_len); } if (s != NULL) { if (s->nat_rule.ptr != NULL) { pf_counter_u64_add_protected(&s->nat_rule.ptr->packets[dirndx], 1); pf_counter_u64_add_protected(&s->nat_rule.ptr->bytes[dirndx], pd.tot_len); } if (s->src_node != NULL) { counter_u64_add(s->src_node->packets[dirndx], 1); counter_u64_add(s->src_node->bytes[dirndx], pd.tot_len); } if (s->nat_src_node != NULL) { counter_u64_add(s->nat_src_node->packets[dirndx], 1); counter_u64_add(s->nat_src_node->bytes[dirndx], pd.tot_len); } dirndx = (dir == s->direction) ? 0 : 1; s->packets[dirndx]++; s->bytes[dirndx] += pd.tot_len; } tr = r; nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; if (nr != NULL && r == &V_pf_default_rule) tr = nr; if (tr->src.addr.type == PF_ADDR_TABLE) pfr_update_stats(tr->src.addr.p.tbl, (s == NULL) ? pd.src : &s->key[(s->direction == PF_IN)]->addr[0], pd.af, pd.tot_len, dir == PF_OUT, r->action == PF_PASS, tr->src.neg); if (tr->dst.addr.type == PF_ADDR_TABLE) pfr_update_stats(tr->dst.addr.p.tbl, (s == NULL) ? pd.dst : &s->key[(s->direction == PF_IN)]->addr[1], pd.af, pd.tot_len, dir == PF_OUT, r->action == PF_PASS, tr->dst.neg); } pf_counter_u64_critical_exit(); switch (action) { case PF_SYNPROXY_DROP: m_freem(*m0); case PF_DEFER: *m0 = NULL; action = PF_PASS; break; case PF_DROP: m_freem(*m0); *m0 = NULL; break; default: /* pf_route6() returns unlocked. */ if (r->rt) { pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd, inp); return (action); } if (pf_dummynet(&pd, dir, s, r, m0) != 0) { action = PF_DROP; REASON_SET(&reason, PFRES_MEMORY); } break; } if (s) PF_STATE_UNLOCK(s); /* If reassembled packet passed, create new fragments. */ if (action == PF_PASS && *m0 && dir == PF_OUT && (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL) action = pf_refragment6(ifp, m0, mtag, pflags & PFIL_FWD); SDT_PROBE4(pf, ip, test6, done, action, reason, r, s); return (action); } #endif /* INET6 */ diff --git a/sys/netpfil/pf/pf.h b/sys/netpfil/pf/pf.h index 3e3ba977285a..f226232419f8 100644 --- a/sys/netpfil/pf/pf.h +++ b/sys/netpfil/pf/pf.h @@ -1,682 +1,705 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $OpenBSD: pfvar.h,v 1.282 2009/01/29 15:12:28 pyr Exp $ * $FreeBSD$ */ #ifndef _NET_PF_H_ #define _NET_PF_H_ #include #define PF_TCPS_PROXY_SRC ((TCP_NSTATES)+0) #define PF_TCPS_PROXY_DST ((TCP_NSTATES)+1) #define PF_MD5_DIGEST_LENGTH 16 #ifdef MD5_DIGEST_LENGTH #if PF_MD5_DIGEST_LENGTH != MD5_DIGEST_LENGTH #error #endif #endif enum { PF_INOUT, PF_IN, PF_OUT }; enum { PF_PASS, PF_DROP, PF_SCRUB, PF_NOSCRUB, PF_NAT, PF_NONAT, PF_BINAT, PF_NOBINAT, PF_RDR, PF_NORDR, PF_SYNPROXY_DROP, PF_DEFER, PF_MATCH }; enum { PF_RULESET_SCRUB, PF_RULESET_FILTER, PF_RULESET_NAT, PF_RULESET_BINAT, PF_RULESET_RDR, PF_RULESET_MAX }; enum { PF_OP_NONE, PF_OP_IRG, PF_OP_EQ, PF_OP_NE, PF_OP_LT, PF_OP_LE, PF_OP_GT, PF_OP_GE, PF_OP_XRG, PF_OP_RRG }; enum { PF_DEBUG_NONE, PF_DEBUG_URGENT, PF_DEBUG_MISC, PF_DEBUG_NOISY }; enum { PF_CHANGE_NONE, PF_CHANGE_ADD_HEAD, PF_CHANGE_ADD_TAIL, PF_CHANGE_ADD_BEFORE, PF_CHANGE_ADD_AFTER, PF_CHANGE_REMOVE, PF_CHANGE_GET_TICKET }; enum { PF_GET_NONE, PF_GET_CLR_CNTR }; enum { PF_SK_WIRE, PF_SK_STACK, PF_SK_BOTH }; enum { PF_PEER_SRC, PF_PEER_DST, PF_PEER_BOTH }; /* * Note about PFTM_*: real indices into pf_rule.timeout[] come before * PFTM_MAX, special cases afterwards. See pf_state_expires(). */ enum { PFTM_TCP_FIRST_PACKET, PFTM_TCP_OPENING, PFTM_TCP_ESTABLISHED, PFTM_TCP_CLOSING, PFTM_TCP_FIN_WAIT, PFTM_TCP_CLOSED, PFTM_UDP_FIRST_PACKET, PFTM_UDP_SINGLE, PFTM_UDP_MULTIPLE, PFTM_ICMP_FIRST_PACKET, PFTM_ICMP_ERROR_REPLY, PFTM_OTHER_FIRST_PACKET, PFTM_OTHER_SINGLE, PFTM_OTHER_MULTIPLE, PFTM_FRAG, PFTM_INTERVAL, PFTM_ADAPTIVE_START, PFTM_ADAPTIVE_END, PFTM_SRC_NODE, PFTM_TS_DIFF, PFTM_MAX, PFTM_PURGE, PFTM_UNLINKED }; /* PFTM default values */ #define PFTM_TCP_FIRST_PACKET_VAL 120 /* First TCP packet */ #define PFTM_TCP_OPENING_VAL 30 /* No response yet */ #define PFTM_TCP_ESTABLISHED_VAL 24*60*60/* Established */ #define PFTM_TCP_CLOSING_VAL 15 * 60 /* Half closed */ #define PFTM_TCP_FIN_WAIT_VAL 45 /* Got both FINs */ #define PFTM_TCP_CLOSED_VAL 90 /* Got a RST */ #define PFTM_UDP_FIRST_PACKET_VAL 60 /* First UDP packet */ #define PFTM_UDP_SINGLE_VAL 30 /* Unidirectional */ #define PFTM_UDP_MULTIPLE_VAL 60 /* Bidirectional */ #define PFTM_ICMP_FIRST_PACKET_VAL 20 /* First ICMP packet */ #define PFTM_ICMP_ERROR_REPLY_VAL 10 /* Got error response */ #define PFTM_OTHER_FIRST_PACKET_VAL 60 /* First packet */ #define PFTM_OTHER_SINGLE_VAL 30 /* Unidirectional */ #define PFTM_OTHER_MULTIPLE_VAL 60 /* Bidirectional */ #define PFTM_FRAG_VAL 30 /* Fragment expire */ #define PFTM_INTERVAL_VAL 10 /* Expire interval */ #define PFTM_SRC_NODE_VAL 0 /* Source tracking */ #define PFTM_TS_DIFF_VAL 30 /* Allowed TS diff */ enum { PF_NOPFROUTE, PF_FASTROUTE, PF_ROUTETO, PF_DUPTO, PF_REPLYTO }; enum { PF_LIMIT_STATES, PF_LIMIT_SRC_NODES, PF_LIMIT_FRAGS, PF_LIMIT_TABLE_ENTRIES, PF_LIMIT_MAX }; #define PF_POOL_IDMASK 0x0f enum { PF_POOL_NONE, PF_POOL_BITMASK, PF_POOL_RANDOM, PF_POOL_SRCHASH, PF_POOL_ROUNDROBIN }; enum { PF_ADDR_ADDRMASK, PF_ADDR_NOROUTE, PF_ADDR_DYNIFTL, PF_ADDR_TABLE, PF_ADDR_URPFFAILED, PF_ADDR_RANGE }; #define PF_POOL_TYPEMASK 0x0f #define PF_POOL_STICKYADDR 0x20 #define PF_WSCALE_FLAG 0x80 #define PF_WSCALE_MASK 0x0f #define PF_LOG 0x01 #define PF_LOG_ALL 0x02 #define PF_LOG_SOCKET_LOOKUP 0x04 +#define PF_LOG_FORCE 0x08 /* Reasons code for passing/dropping a packet */ #define PFRES_MATCH 0 /* Explicit match of a rule */ #define PFRES_BADOFF 1 /* Bad offset for pull_hdr */ #define PFRES_FRAG 2 /* Dropping following fragment */ #define PFRES_SHORT 3 /* Dropping short packet */ #define PFRES_NORM 4 /* Dropping by normalizer */ #define PFRES_MEMORY 5 /* Dropped due to lacking mem */ #define PFRES_TS 6 /* Bad TCP Timestamp (RFC1323) */ #define PFRES_CONGEST 7 /* Congestion (of ipintrq) */ #define PFRES_IPOPTIONS 8 /* IP option */ #define PFRES_PROTCKSUM 9 /* Protocol checksum invalid */ #define PFRES_BADSTATE 10 /* State mismatch */ #define PFRES_STATEINS 11 /* State insertion failure */ #define PFRES_MAXSTATES 12 /* State limit */ #define PFRES_SRCLIMIT 13 /* Source node/conn limit */ #define PFRES_SYNPROXY 14 /* SYN proxy */ #define PFRES_MAPFAILED 15 /* pf_map_addr() failed */ #define PFRES_MAX 16 /* total+1 */ #define PFRES_NAMES { \ "match", \ "bad-offset", \ "fragment", \ "short", \ "normalize", \ "memory", \ "bad-timestamp", \ "congestion", \ "ip-option", \ "proto-cksum", \ "state-mismatch", \ "state-insert", \ "state-limit", \ "src-limit", \ "synproxy", \ "map-failed", \ NULL \ } /* Counters for other things we want to keep track of */ #define LCNT_STATES 0 /* states */ #define LCNT_SRCSTATES 1 /* max-src-states */ #define LCNT_SRCNODES 2 /* max-src-nodes */ #define LCNT_SRCCONN 3 /* max-src-conn */ #define LCNT_SRCCONNRATE 4 /* max-src-conn-rate */ #define LCNT_OVERLOAD_TABLE 5 /* entry added to overload table */ #define LCNT_OVERLOAD_FLUSH 6 /* state entries flushed */ #define LCNT_MAX 7 /* total+1 */ /* Only available via the nvlist-based API */ #define KLCNT_SYNFLOODS 7 /* synfloods detected */ #define KLCNT_SYNCOOKIES_SENT 8 /* syncookies sent */ #define KLCNT_SYNCOOKIES_VALID 9 /* syncookies validated */ #define KLCNT_MAX 10 /* total+1 */ #define LCNT_NAMES { \ "max states per rule", \ "max-src-states", \ "max-src-nodes", \ "max-src-conn", \ "max-src-conn-rate", \ "overload table insertion", \ "overload flush states", \ NULL \ } #define KLCNT_NAMES { \ "max states per rule", \ "max-src-states", \ "max-src-nodes", \ "max-src-conn", \ "max-src-conn-rate", \ "overload table insertion", \ "overload flush states", \ "synfloods detected", \ "syncookies sent", \ "syncookies validated", \ NULL \ } /* state operation counters */ #define FCNT_STATE_SEARCH 0 #define FCNT_STATE_INSERT 1 #define FCNT_STATE_REMOVALS 2 #define FCNT_MAX 3 #ifdef _KERNEL #define FCNT_NAMES { \ "searches", \ "inserts", \ "removals", \ NULL \ } #endif /* src_node operation counters */ #define SCNT_SRC_NODE_SEARCH 0 #define SCNT_SRC_NODE_INSERT 1 #define SCNT_SRC_NODE_REMOVALS 2 #define SCNT_MAX 3 #define PF_TABLE_NAME_SIZE 32 #define PF_QNAME_SIZE 64 struct pfioc_nv { void *data; size_t len; /* The length of the nvlist data. */ size_t size; /* The total size of the data buffer. */ }; struct pf_rule; /* keep synced with pfi_kif, used in RB_FIND */ struct pfi_kif_cmp { char pfik_name[IFNAMSIZ]; }; struct pfi_kif { char pfik_name[IFNAMSIZ]; union { RB_ENTRY(pfi_kif) _pfik_tree; LIST_ENTRY(pfi_kif) _pfik_list; } _pfik_glue; #define pfik_tree _pfik_glue._pfik_tree #define pfik_list _pfik_glue._pfik_list u_int64_t pfik_packets[2][2][2]; u_int64_t pfik_bytes[2][2][2]; u_int32_t pfik_tzero; u_int pfik_flags; struct ifnet *pfik_ifp; struct ifg_group *pfik_group; u_int pfik_rulerefs; TAILQ_HEAD(, pfi_dynaddr) pfik_dynaddrs; }; struct pf_status { uint64_t counters[PFRES_MAX]; uint64_t lcounters[LCNT_MAX]; uint64_t fcounters[FCNT_MAX]; uint64_t scounters[SCNT_MAX]; uint64_t pcounters[2][2][3]; uint64_t bcounters[2][2]; uint32_t running; uint32_t states; uint32_t src_nodes; uint32_t since; uint32_t debug; uint32_t hostid; char ifname[IFNAMSIZ]; uint8_t pf_chksum[PF_MD5_DIGEST_LENGTH]; }; +#define PF_REASS_ENABLED 0x01 +#define PF_REASS_NODF 0x02 + struct pf_addr { union { struct in_addr v4; struct in6_addr v6; u_int8_t addr8[16]; u_int16_t addr16[8]; u_int32_t addr32[4]; } pfa; /* 128-bit address */ #define v4 pfa.v4 #define v6 pfa.v6 #define addr8 pfa.addr8 #define addr16 pfa.addr16 #define addr32 pfa.addr32 }; #define PFI_AFLAG_NETWORK 0x01 #define PFI_AFLAG_BROADCAST 0x02 #define PFI_AFLAG_PEER 0x04 #define PFI_AFLAG_MODEMASK 0x07 #define PFI_AFLAG_NOALIAS 0x08 struct pf_addr_wrap { union { struct { struct pf_addr addr; struct pf_addr mask; } a; char ifname[IFNAMSIZ]; char tblname[PF_TABLE_NAME_SIZE]; } v; union { struct pfi_dynaddr *dyn; struct pfr_ktable *tbl; int dyncnt; int tblcnt; } p; u_int8_t type; /* PF_ADDR_* */ u_int8_t iflags; /* PFI_AFLAG_* */ }; union pf_rule_ptr { struct pf_rule *ptr; u_int32_t nr; }; struct pf_rule_uid { uid_t uid[2]; u_int8_t op; }; struct pf_rule_gid { uid_t gid[2]; u_int8_t op; }; struct pf_rule_addr { struct pf_addr_wrap addr; u_int16_t port[2]; u_int8_t neg; u_int8_t port_op; }; struct pf_pooladdr { struct pf_addr_wrap addr; TAILQ_ENTRY(pf_pooladdr) entries; char ifname[IFNAMSIZ]; struct pfi_kif *kif; }; TAILQ_HEAD(pf_palist, pf_pooladdr); struct pf_poolhashkey { union { u_int8_t key8[16]; u_int16_t key16[8]; u_int32_t key32[4]; } pfk; /* 128-bit hash key */ #define key8 pfk.key8 #define key16 pfk.key16 #define key32 pfk.key32 }; struct pf_mape_portset { u_int8_t offset; u_int8_t psidlen; u_int16_t psid; }; struct pf_pool { struct pf_palist list; struct pf_pooladdr *cur; struct pf_poolhashkey key; struct pf_addr counter; int tblidx; u_int16_t proxy_port[2]; u_int8_t opts; }; /* A packed Operating System description for fingerprinting */ typedef u_int32_t pf_osfp_t; #define PF_OSFP_ANY ((pf_osfp_t)0) #define PF_OSFP_UNKNOWN ((pf_osfp_t)-1) #define PF_OSFP_NOMATCH ((pf_osfp_t)-2) struct pf_osfp_entry { SLIST_ENTRY(pf_osfp_entry) fp_entry; pf_osfp_t fp_os; int fp_enflags; #define PF_OSFP_EXPANDED 0x001 /* expanded entry */ #define PF_OSFP_GENERIC 0x002 /* generic signature */ #define PF_OSFP_NODETAIL 0x004 /* no p0f details */ #define PF_OSFP_LEN 32 char fp_class_nm[PF_OSFP_LEN]; char fp_version_nm[PF_OSFP_LEN]; char fp_subtype_nm[PF_OSFP_LEN]; }; #define PF_OSFP_ENTRY_EQ(a, b) \ ((a)->fp_os == (b)->fp_os && \ memcmp((a)->fp_class_nm, (b)->fp_class_nm, PF_OSFP_LEN) == 0 && \ memcmp((a)->fp_version_nm, (b)->fp_version_nm, PF_OSFP_LEN) == 0 && \ memcmp((a)->fp_subtype_nm, (b)->fp_subtype_nm, PF_OSFP_LEN) == 0) /* handle pf_osfp_t packing */ #define _FP_RESERVED_BIT 1 /* For the special negative #defines */ #define _FP_UNUSED_BITS 1 #define _FP_CLASS_BITS 10 /* OS Class (Windows, Linux) */ #define _FP_VERSION_BITS 10 /* OS version (95, 98, NT, 2.4.54, 3.2) */ #define _FP_SUBTYPE_BITS 10 /* patch level (NT SP4, SP3, ECN patch) */ #define PF_OSFP_UNPACK(osfp, class, version, subtype) do { \ (class) = ((osfp) >> (_FP_VERSION_BITS+_FP_SUBTYPE_BITS)) & \ ((1 << _FP_CLASS_BITS) - 1); \ (version) = ((osfp) >> _FP_SUBTYPE_BITS) & \ ((1 << _FP_VERSION_BITS) - 1);\ (subtype) = (osfp) & ((1 << _FP_SUBTYPE_BITS) - 1); \ } while(0) #define PF_OSFP_PACK(osfp, class, version, subtype) do { \ (osfp) = ((class) & ((1 << _FP_CLASS_BITS) - 1)) << (_FP_VERSION_BITS \ + _FP_SUBTYPE_BITS); \ (osfp) |= ((version) & ((1 << _FP_VERSION_BITS) - 1)) << \ _FP_SUBTYPE_BITS; \ (osfp) |= (subtype) & ((1 << _FP_SUBTYPE_BITS) - 1); \ } while(0) /* the fingerprint of an OSes TCP SYN packet */ typedef u_int64_t pf_tcpopts_t; struct pf_os_fingerprint { SLIST_HEAD(pf_osfp_enlist, pf_osfp_entry) fp_oses; /* list of matches */ pf_tcpopts_t fp_tcpopts; /* packed TCP options */ u_int16_t fp_wsize; /* TCP window size */ u_int16_t fp_psize; /* ip->ip_len */ u_int16_t fp_mss; /* TCP MSS */ u_int16_t fp_flags; #define PF_OSFP_WSIZE_MOD 0x0001 /* Window modulus */ #define PF_OSFP_WSIZE_DC 0x0002 /* Window don't care */ #define PF_OSFP_WSIZE_MSS 0x0004 /* Window multiple of MSS */ #define PF_OSFP_WSIZE_MTU 0x0008 /* Window multiple of MTU */ #define PF_OSFP_PSIZE_MOD 0x0010 /* packet size modulus */ #define PF_OSFP_PSIZE_DC 0x0020 /* packet size don't care */ #define PF_OSFP_WSCALE 0x0040 /* TCP window scaling */ #define PF_OSFP_WSCALE_MOD 0x0080 /* TCP window scale modulus */ #define PF_OSFP_WSCALE_DC 0x0100 /* TCP window scale dont-care */ #define PF_OSFP_MSS 0x0200 /* TCP MSS */ #define PF_OSFP_MSS_MOD 0x0400 /* TCP MSS modulus */ #define PF_OSFP_MSS_DC 0x0800 /* TCP MSS dont-care */ #define PF_OSFP_DF 0x1000 /* IPv4 don't fragment bit */ #define PF_OSFP_TS0 0x2000 /* Zero timestamp */ #define PF_OSFP_INET6 0x4000 /* IPv6 */ u_int8_t fp_optcnt; /* TCP option count */ u_int8_t fp_wscale; /* TCP window scaling */ u_int8_t fp_ttl; /* IPv4 TTL */ #define PF_OSFP_MAXTTL_OFFSET 40 /* TCP options packing */ #define PF_OSFP_TCPOPT_NOP 0x0 /* TCP NOP option */ #define PF_OSFP_TCPOPT_WSCALE 0x1 /* TCP window scaling option */ #define PF_OSFP_TCPOPT_MSS 0x2 /* TCP max segment size opt */ #define PF_OSFP_TCPOPT_SACK 0x3 /* TCP SACK OK option */ #define PF_OSFP_TCPOPT_TS 0x4 /* TCP timestamp option */ #define PF_OSFP_TCPOPT_BITS 3 /* bits used by each option */ #define PF_OSFP_MAX_OPTS \ (sizeof(((struct pf_os_fingerprint *)0)->fp_tcpopts) * 8) \ / PF_OSFP_TCPOPT_BITS SLIST_ENTRY(pf_os_fingerprint) fp_next; }; struct pf_osfp_ioctl { struct pf_osfp_entry fp_os; pf_tcpopts_t fp_tcpopts; /* packed TCP options */ u_int16_t fp_wsize; /* TCP window size */ u_int16_t fp_psize; /* ip->ip_len */ u_int16_t fp_mss; /* TCP MSS */ u_int16_t fp_flags; u_int8_t fp_optcnt; /* TCP option count */ u_int8_t fp_wscale; /* TCP window scaling */ u_int8_t fp_ttl; /* IPv4 TTL */ int fp_getnum; /* DIOCOSFPGET number */ }; #define PF_ANCHOR_NAME_SIZE 64 struct pf_rule { struct pf_rule_addr src; struct pf_rule_addr dst; #define PF_SKIP_IFP 0 #define PF_SKIP_DIR 1 #define PF_SKIP_AF 2 #define PF_SKIP_PROTO 3 #define PF_SKIP_SRC_ADDR 4 #define PF_SKIP_SRC_PORT 5 #define PF_SKIP_DST_ADDR 6 #define PF_SKIP_DST_PORT 7 #define PF_SKIP_COUNT 8 union pf_rule_ptr skip[PF_SKIP_COUNT]; #define PF_RULE_LABEL_SIZE 64 #define PF_RULE_MAX_LABEL_COUNT 5 char label[PF_RULE_LABEL_SIZE]; char ifname[IFNAMSIZ]; char qname[PF_QNAME_SIZE]; char pqname[PF_QNAME_SIZE]; #define PF_TAG_NAME_SIZE 64 char tagname[PF_TAG_NAME_SIZE]; char match_tagname[PF_TAG_NAME_SIZE]; char overload_tblname[PF_TABLE_NAME_SIZE]; TAILQ_ENTRY(pf_rule) entries; struct pf_pool rpool; u_int64_t evaluations; u_int64_t packets[2]; u_int64_t bytes[2]; struct pfi_kif *kif; struct pf_anchor *anchor; struct pfr_ktable *overload_tbl; pf_osfp_t os_fingerprint; int rtableid; u_int32_t timeout[PFTM_MAX]; u_int32_t max_states; u_int32_t max_src_nodes; u_int32_t max_src_states; u_int32_t max_src_conn; struct { u_int32_t limit; u_int32_t seconds; } max_src_conn_rate; u_int32_t qid; u_int32_t pqid; u_int32_t rt_listid; u_int32_t nr; u_int32_t prob; uid_t cuid; pid_t cpid; counter_u64_t states_cur; counter_u64_t states_tot; counter_u64_t src_nodes; u_int16_t return_icmp; u_int16_t return_icmp6; u_int16_t max_mss; u_int16_t tag; u_int16_t match_tag; u_int16_t scrub_flags; struct pf_rule_uid uid; struct pf_rule_gid gid; u_int32_t rule_flag; u_int8_t action; u_int8_t direction; u_int8_t log; u_int8_t logif; u_int8_t quick; u_int8_t ifnot; u_int8_t match_tag_not; u_int8_t natpass; #define PF_STATE_NORMAL 0x1 #define PF_STATE_MODULATE 0x2 #define PF_STATE_SYNPROXY 0x3 u_int8_t keep_state; sa_family_t af; u_int8_t proto; u_int8_t type; u_int8_t code; u_int8_t flags; u_int8_t flagset; u_int8_t min_ttl; u_int8_t allow_opts; u_int8_t rt; u_int8_t return_ttl; u_int8_t tos; u_int8_t set_tos; u_int8_t anchor_relative; u_int8_t anchor_wildcard; #define PF_FLUSH 0x01 #define PF_FLUSH_GLOBAL 0x02 u_int8_t flush; #define PF_PRIO_ZERO 0xff /* match "prio 0" packets */ #define PF_PRIO_MAX 7 u_int8_t prio; u_int8_t set_prio[2]; struct { struct pf_addr addr; u_int16_t port; } divert; uint64_t u_states_cur; uint64_t u_states_tot; uint64_t u_src_nodes; }; -/* rule flags */ -#define PFRULE_DROP 0x0000 -#define PFRULE_RETURNRST 0x0001 -#define PFRULE_FRAGMENT 0x0002 -#define PFRULE_RETURNICMP 0x0004 -#define PFRULE_RETURN 0x0008 -#define PFRULE_NOSYNC 0x0010 -#define PFRULE_SRCTRACK 0x0020 /* track source states */ -#define PFRULE_RULESRCTRACK 0x0040 /* per rule */ +/* pf_krule->rule_flag and old-style scrub flags */ +#define PFRULE_DROP 0x00000000 +#define PFRULE_RETURNRST 0x00000001 +#define PFRULE_FRAGMENT 0x00000002 +#define PFRULE_RETURNICMP 0x00000004 +#define PFRULE_RETURN 0x00000008 +#define PFRULE_NOSYNC 0x00000010 +#define PFRULE_SRCTRACK 0x00000020 /* track source states */ +#define PFRULE_RULESRCTRACK 0x00000040 /* per rule */ +#define PFRULE_NODF 0x00000100 +#define PFRULE_FRAGMENT_NOREASS 0x00000200 +#define PFRULE_RANDOMID 0x00000800 +#define PFRULE_REASSEMBLE_TCP 0x00001000 +#define PFRULE_SET_TOS 0x00002000 +#define PFRULE_IFBOUND 0x00010000 /* if-bound */ +#define PFRULE_STATESLOPPY 0x00020000 /* sloppy state tracking */ #ifdef _KERNEL #define PFRULE_REFS 0x0080 /* rule has references */ #endif -/* scrub flags */ -#define PFRULE_NODF 0x0100 -#define PFRULE_FRAGMENT_NOREASS 0x0200 -#define PFRULE_RANDOMID 0x0800 -#define PFRULE_REASSEMBLE_TCP 0x1000 -#define PFRULE_SET_TOS 0x2000 - -/* rule flags again */ -#define PFRULE_IFBOUND 0x00010000 /* if-bound */ -#define PFRULE_STATESLOPPY 0x00020000 /* sloppy state tracking */ +/* pf_rule_actions->dnflags */ +#define PFRULE_DN_IS_PIPE 0x0040 +#define PFRULE_DN_IS_QUEUE 0x0080 + +/* pf_state->state_flags, pf_rule_actions->flags, pf_krule->scrub_flags */ +#define PFSTATE_ALLOWOPTS 0x0001 +#define PFSTATE_SLOPPY 0x0002 +/* was PFSTATE_PFLOW 0x0004 */ +#define PFSTATE_NOSYNC 0x0008 +#define PFSTATE_ACK 0x0010 +#define PFSTATE_NODF 0x0020 +#define PFSTATE_SETTOS 0x0040 +#define PFSTATE_RANDOMID 0x0080 +#define PFSTATE_SCRUB_TCP 0x0100 +#define PFSTATE_SETPRIO 0x0200 +/* was PFSTATE_INP_UNLINKED 0x0400 */ +/* FreeBSD-specific flags are added from the end to keep space for porting + * flags from OpenBSD */ +#define PFSTATE_DN_IS_PIPE 0x4000 +#define PFSTATE_DN_IS_QUEUE 0x8000 +#define PFSTATE_SCRUBMASK (PFSTATE_NODF|PFSTATE_RANDOMID|PFSTATE_SCRUB_TCP) +#define PFSTATE_SETMASK (PFSTATE_SETTOS|PFSTATE_SETPRIO) #define PFSTATE_HIWAT 100000 /* default state table size */ #define PFSTATE_ADAPT_START 60000 /* default adaptive timeout start */ #define PFSTATE_ADAPT_END 120000 /* default adaptive timeout end */ struct pf_threshold { u_int32_t limit; #define PF_THRESHOLD_MULT 1000 #define PF_THRESHOLD_MAX 0xffffffff / PF_THRESHOLD_MULT u_int32_t seconds; u_int32_t count; u_int32_t last; }; struct pf_src_node { LIST_ENTRY(pf_src_node) entry; struct pf_addr addr; struct pf_addr raddr; union pf_rule_ptr rule; struct pfi_kif *kif; u_int64_t bytes[2]; u_int64_t packets[2]; u_int32_t states; u_int32_t conn; struct pf_threshold conn_rate; u_int32_t creation; u_int32_t expire; sa_family_t af; u_int8_t ruletype; }; #define PFSNODE_HIWAT 10000 /* default source node table size */ TAILQ_HEAD(pf_rulequeue, pf_rule); struct pf_anchor; struct pf_ruleset { struct { struct pf_rulequeue queues[2]; struct { struct pf_rulequeue *ptr; struct pf_rule **ptr_array; u_int32_t rcount; u_int32_t ticket; int open; } active, inactive; } rules[PF_RULESET_MAX]; struct pf_anchor *anchor; u_int32_t tticket; int tables; int topen; }; RB_HEAD(pf_anchor_global, pf_anchor); RB_HEAD(pf_anchor_node, pf_anchor); struct pf_anchor { RB_ENTRY(pf_anchor) entry_global; RB_ENTRY(pf_anchor) entry_node; struct pf_anchor *parent; struct pf_anchor_node children; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; struct pf_ruleset ruleset; int refcnt; /* anchor rules */ int match; /* XXX: used for pfctl black magic */ }; RB_PROTOTYPE(pf_anchor_global, pf_anchor, entry_global, pf_anchor_compare); RB_PROTOTYPE(pf_anchor_node, pf_anchor, entry_node, pf_anchor_compare); int pf_get_ruleset_number(u_int8_t); #endif /* _NET_PF_H_ */ diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c index cc1f5a5c2138..5dc0072451a7 100644 --- a/sys/netpfil/pf/pf_ioctl.c +++ b/sys/netpfil/pf/pf_ioctl.c @@ -1,6863 +1,6889 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * Copyright (c) 2002,2003 Henning Brauer * Copyright (c) 2012 Gleb Smirnoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_bpf.h" #include "opt_pf.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #endif /* INET6 */ #ifdef ALTQ #include #endif SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, u_int32_t, u_int8_t, u_int8_t, u_int8_t); static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); static void pf_empty_kpool(struct pf_kpalist *); static int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); static int pf_begin_eth(uint32_t *, const char *); static void pf_rollback_eth_cb(struct epoch_context *); static int pf_rollback_eth(uint32_t, const char *); static int pf_commit_eth(uint32_t, const char *); static void pf_free_eth_rule(struct pf_keth_rule *); #ifdef ALTQ static int pf_begin_altq(u_int32_t *); static int pf_rollback_altq(u_int32_t); static int pf_commit_altq(u_int32_t); static int pf_enable_altq(struct pf_altq *); static int pf_disable_altq(struct pf_altq *); static uint16_t pf_qname2qid(const char *); static void pf_qid_unref(uint16_t); #endif /* ALTQ */ static int pf_begin_rules(u_int32_t *, int, const char *); static int pf_rollback_rules(u_int32_t, int, char *); static int pf_setup_pfsync_matching(struct pf_kruleset *); static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); static void pf_hash_rule(struct pf_krule *); static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); static int pf_commit_rules(u_int32_t, int, char *); static int pf_addr_setup(struct pf_kruleset *, struct pf_addr_wrap *, sa_family_t); static void pf_addr_copyout(struct pf_addr_wrap *); static void pf_src_node_copy(const struct pf_ksrc_node *, struct pf_src_node *); #ifdef ALTQ static int pf_export_kaltq(struct pf_altq *, struct pfioc_altq_v1 *, size_t); static int pf_import_kaltq(struct pfioc_altq_v1 *, struct pf_altq *, size_t); #endif /* ALTQ */ VNET_DEFINE(struct pf_krule, pf_default_rule); static __inline int pf_krule_compare(struct pf_krule *, struct pf_krule *); RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); #ifdef ALTQ VNET_DEFINE_STATIC(int, pf_altq_running); #define V_pf_altq_running VNET(pf_altq_running) #endif #define TAGID_MAX 50000 struct pf_tagname { TAILQ_ENTRY(pf_tagname) namehash_entries; TAILQ_ENTRY(pf_tagname) taghash_entries; char name[PF_TAG_NAME_SIZE]; uint16_t tag; int ref; }; struct pf_tagset { TAILQ_HEAD(, pf_tagname) *namehash; TAILQ_HEAD(, pf_tagname) *taghash; unsigned int mask; uint32_t seed; BITSET_DEFINE(, TAGID_MAX) avail; }; VNET_DEFINE(struct pf_tagset, pf_tags); #define V_pf_tags VNET(pf_tags) static unsigned int pf_rule_tag_hashsize; #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, "Size of pf(4) rule tag hashtable"); #ifdef ALTQ VNET_DEFINE(struct pf_tagset, pf_qids); #define V_pf_qids VNET(pf_qids) static unsigned int pf_queue_tag_hashsize; #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, "Size of pf(4) queue tag hashtable"); #endif VNET_DEFINE(uma_zone_t, pf_tag_z); #define V_pf_tag_z VNET(pf_tag_z) static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE #endif static void pf_init_tagset(struct pf_tagset *, unsigned int *, unsigned int); static void pf_cleanup_tagset(struct pf_tagset *); static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); static u_int16_t tagname2tag(struct pf_tagset *, const char *); static u_int16_t pf_tagname2tag(const char *); static void tag_unref(struct pf_tagset *, u_int16_t); #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x struct cdev *pf_dev; /* * XXX - These are new and need to be checked when moveing to a new version */ static void pf_clear_all_states(void); static unsigned int pf_clear_states(const struct pf_kstate_kill *); static void pf_killstates(struct pf_kstate_kill *, unsigned int *); static int pf_killstates_row(struct pf_kstate_kill *, struct pf_idhash *); static int pf_killstates_nv(struct pfioc_nv *); static int pf_clearstates_nv(struct pfioc_nv *); static int pf_getstate(struct pfioc_nv *); static int pf_getstatus(struct pfioc_nv *); static int pf_clear_tables(void); static void pf_clear_srcnodes(struct pf_ksrc_node *); static void pf_kill_srcnodes(struct pfioc_src_node_kill *); static int pf_keepcounters(struct pfioc_nv *); static void pf_tbladdr_copyout(struct pf_addr_wrap *); /* * Wrapper functions for pfil(9) hooks */ static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); #ifdef INET static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); #endif #ifdef INET6 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); #endif static void hook_pf_eth(void); static void hook_pf(void); static void dehook_pf_eth(void); static void dehook_pf(void); static int shutdown_pf(void); static int pf_load(void); static void pf_unload(void); static struct cdevsw pf_cdevsw = { .d_ioctl = pfioctl, .d_name = PF_NAME, .d_version = D_VERSION, }; VNET_DEFINE_STATIC(bool, pf_pfil_hooked); #define V_pf_pfil_hooked VNET(pf_pfil_hooked) VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked); #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) /* * We need a flag that is neither hooked nor running to know when * the VNET is "valid". We primarily need this to control (global) * external event, e.g., eventhandlers. */ VNET_DEFINE(int, pf_vnet_active); #define V_pf_vnet_active VNET(pf_vnet_active) int pf_end_threads; struct proc *pf_purge_proc; struct rmlock pf_rules_lock; struct sx pf_ioctl_lock; struct sx pf_end_lock; /* pfsync */ VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; /* pflog */ pflog_packet_t *pflog_packet_ptr = NULL; /* * Copy a user-provided string, returning an error if truncation would occur. * Avoid scanning past "sz" bytes in the source string since there's no * guarantee that it's nul-terminated. */ static int pf_user_strcpy(char *dst, const char *src, size_t sz) { if (strnlen(src, sz) == sz) return (EINVAL); (void)strlcpy(dst, src, sz); return (0); } static void pfattach_vnet(void) { u_int32_t *my_timeout = V_pf_default_rule.timeout; bzero(&V_pf_status, sizeof(V_pf_status)); pf_initialize(); pfr_initialize(); pfi_initialize_vnet(); pf_normalize_init(); pf_syncookies_init(); V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; RB_INIT(&V_pf_anchors); pf_init_kruleset(&pf_main_ruleset); pf_init_keth(V_pf_keth); /* default rule should never be garbage collected */ V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; #ifdef PF_DEFAULT_TO_DROP V_pf_default_rule.action = PF_DROP; #else V_pf_default_rule.action = PF_PASS; #endif V_pf_default_rule.nr = -1; V_pf_default_rule.rtableid = -1; pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); for (int i = 0; i < 2; i++) { pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); } V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, M_WAITOK | M_ZERO); #ifdef PF_WANT_32_TO_64_COUNTER V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); PF_RULES_WLOCK(); LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); V_pf_allrulecount++; LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); PF_RULES_WUNLOCK(); #endif /* initialize default timeouts */ my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; V_pf_status.debug = PF_DEBUG_URGENT; + /* + * XXX This is different than in OpenBSD where reassembly is enabled by + * defult. In FreeBSD we expect people to still use scrub rules and + * switch to the new syntax later. Only when they switch they must + * explicitly enable reassemle. We could change the default once the + * scrub rule functionality is hopefully removed some day in future. + */ + V_pf_status.reass = 0; V_pf_pfil_hooked = false; V_pf_pfil_eth_hooked = false; /* XXX do our best to avoid a conflict */ V_pf_status.hostid = arc4random(); for (int i = 0; i < PFRES_MAX; i++) V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); for (int i = 0; i < KLCNT_MAX; i++) V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); for (int i = 0; i < FCNT_MAX; i++) pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); for (int i = 0; i < SCNT_MAX; i++) V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, INTR_MPSAFE, &V_pf_swi_cookie) != 0) /* XXXGL: leaked all above. */ return; } static struct pf_kpool * pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, u_int32_t rule_number, u_int8_t r_last, u_int8_t active, u_int8_t check_ticket) { struct pf_kruleset *ruleset; struct pf_krule *rule; int rs_num; ruleset = pf_find_kruleset(anchor); if (ruleset == NULL) return (NULL); rs_num = pf_get_ruleset_number(rule_action); if (rs_num >= PF_RULESET_MAX) return (NULL); if (active) { if (check_ticket && ticket != ruleset->rules[rs_num].active.ticket) return (NULL); if (r_last) rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, pf_krulequeue); else rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); } else { if (check_ticket && ticket != ruleset->rules[rs_num].inactive.ticket) return (NULL); if (r_last) rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, pf_krulequeue); else rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); } if (!r_last) { while ((rule != NULL) && (rule->nr != rule_number)) rule = TAILQ_NEXT(rule, entries); } if (rule == NULL) return (NULL); return (&rule->rpool); } static void pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) { struct pf_kpooladdr *mv_pool_pa; while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { TAILQ_REMOVE(poola, mv_pool_pa, entries); TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); } } static void pf_empty_kpool(struct pf_kpalist *poola) { struct pf_kpooladdr *pa; while ((pa = TAILQ_FIRST(poola)) != NULL) { switch (pa->addr.type) { case PF_ADDR_DYNIFTL: pfi_dynaddr_remove(pa->addr.p.dyn); break; case PF_ADDR_TABLE: /* XXX: this could be unfinished pooladdr on pabuf */ if (pa->addr.p.tbl != NULL) pfr_detach_table(pa->addr.p.tbl); break; } if (pa->kif) pfi_kkif_unref(pa->kif); TAILQ_REMOVE(poola, pa, entries); free(pa, M_PFRULE); } } static void pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) { PF_RULES_WASSERT(); PF_UNLNKDRULES_ASSERT(); TAILQ_REMOVE(rulequeue, rule, entries); rule->rule_ref |= PFRULE_REFS; TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); } static void pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) { PF_RULES_WASSERT(); PF_UNLNKDRULES_LOCK(); pf_unlink_rule_locked(rulequeue, rule); PF_UNLNKDRULES_UNLOCK(); } static void pf_free_eth_rule(struct pf_keth_rule *rule) { PF_RULES_WASSERT(); if (rule == NULL) return; if (rule->tag) tag_unref(&V_pf_tags, rule->tag); if (rule->match_tag) tag_unref(&V_pf_tags, rule->match_tag); #ifdef ALTQ pf_qid_unref(rule->qid); #endif if (rule->bridge_to) pfi_kkif_unref(rule->bridge_to); if (rule->kif) pfi_kkif_unref(rule->kif); if (rule->ipsrc.addr.type == PF_ADDR_TABLE) pfr_detach_table(rule->ipsrc.addr.p.tbl); if (rule->ipdst.addr.type == PF_ADDR_TABLE) pfr_detach_table(rule->ipdst.addr.p.tbl); counter_u64_free(rule->evaluations); for (int i = 0; i < 2; i++) { counter_u64_free(rule->packets[i]); counter_u64_free(rule->bytes[i]); } uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); pf_keth_anchor_remove(rule); free(rule, M_PFRULE); } void pf_free_rule(struct pf_krule *rule) { PF_RULES_WASSERT(); PF_CONFIG_ASSERT(); if (rule->tag) tag_unref(&V_pf_tags, rule->tag); if (rule->match_tag) tag_unref(&V_pf_tags, rule->match_tag); #ifdef ALTQ if (rule->pqid != rule->qid) pf_qid_unref(rule->pqid); pf_qid_unref(rule->qid); #endif switch (rule->src.addr.type) { case PF_ADDR_DYNIFTL: pfi_dynaddr_remove(rule->src.addr.p.dyn); break; case PF_ADDR_TABLE: pfr_detach_table(rule->src.addr.p.tbl); break; } switch (rule->dst.addr.type) { case PF_ADDR_DYNIFTL: pfi_dynaddr_remove(rule->dst.addr.p.dyn); break; case PF_ADDR_TABLE: pfr_detach_table(rule->dst.addr.p.tbl); break; } if (rule->overload_tbl) pfr_detach_table(rule->overload_tbl); if (rule->kif) pfi_kkif_unref(rule->kif); pf_kanchor_remove(rule); pf_empty_kpool(&rule->rpool.list); pf_krule_free(rule); } static void pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, unsigned int default_size) { unsigned int i; unsigned int hashsize; if (*tunable_size == 0 || !powerof2(*tunable_size)) *tunable_size = default_size; hashsize = *tunable_size; ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, M_WAITOK); ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, M_WAITOK); ts->mask = hashsize - 1; ts->seed = arc4random(); for (i = 0; i < hashsize; i++) { TAILQ_INIT(&ts->namehash[i]); TAILQ_INIT(&ts->taghash[i]); } BIT_FILL(TAGID_MAX, &ts->avail); } static void pf_cleanup_tagset(struct pf_tagset *ts) { unsigned int i; unsigned int hashsize; struct pf_tagname *t, *tmp; /* * Only need to clean up one of the hashes as each tag is hashed * into each table. */ hashsize = ts->mask + 1; for (i = 0; i < hashsize; i++) TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) uma_zfree(V_pf_tag_z, t); free(ts->namehash, M_PFHASH); free(ts->taghash, M_PFHASH); } static uint16_t tagname2hashindex(const struct pf_tagset *ts, const char *tagname) { size_t len; len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); } static uint16_t tag2hashindex(const struct pf_tagset *ts, uint16_t tag) { return (tag & ts->mask); } static u_int16_t tagname2tag(struct pf_tagset *ts, const char *tagname) { struct pf_tagname *tag; u_int32_t index; u_int16_t new_tagid; PF_RULES_WASSERT(); index = tagname2hashindex(ts, tagname); TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) if (strcmp(tagname, tag->name) == 0) { tag->ref++; return (tag->tag); } /* * new entry * * to avoid fragmentation, we do a linear search from the beginning * and take the first free slot we find. */ new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); /* * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits * set. It may also return a bit number greater than TAGID_MAX due * to rounding of the number of bits in the vector up to a multiple * of the vector word size at declaration/allocation time. */ if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) return (0); /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); /* allocate and fill new struct pf_tagname */ tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); if (tag == NULL) return (0); strlcpy(tag->name, tagname, sizeof(tag->name)); tag->tag = new_tagid; tag->ref = 1; /* Insert into namehash */ TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); /* Insert into taghash */ index = tag2hashindex(ts, new_tagid); TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); return (tag->tag); } static void tag_unref(struct pf_tagset *ts, u_int16_t tag) { struct pf_tagname *t; uint16_t index; PF_RULES_WASSERT(); index = tag2hashindex(ts, tag); TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) if (tag == t->tag) { if (--t->ref == 0) { TAILQ_REMOVE(&ts->taghash[index], t, taghash_entries); index = tagname2hashindex(ts, t->name); TAILQ_REMOVE(&ts->namehash[index], t, namehash_entries); /* Bits are 0-based for BIT_SET() */ BIT_SET(TAGID_MAX, tag - 1, &ts->avail); uma_zfree(V_pf_tag_z, t); } break; } } static uint16_t pf_tagname2tag(const char *tagname) { return (tagname2tag(&V_pf_tags, tagname)); } static int pf_begin_eth(uint32_t *ticket, const char *anchor) { struct pf_keth_rule *rule, *tmp; struct pf_keth_ruleset *rs; PF_RULES_WASSERT(); rs = pf_find_or_create_keth_ruleset(anchor); if (rs == NULL) return (EINVAL); /* Purge old inactive rules. */ TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, tmp) { TAILQ_REMOVE(rs->inactive.rules, rule, entries); pf_free_eth_rule(rule); } *ticket = ++rs->inactive.ticket; rs->inactive.open = 1; return (0); } static void pf_rollback_eth_cb(struct epoch_context *ctx) { struct pf_keth_ruleset *rs; rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); CURVNET_SET(rs->vnet); PF_RULES_WLOCK(); pf_rollback_eth(rs->inactive.ticket, rs->anchor ? rs->anchor->path : ""); PF_RULES_WUNLOCK(); CURVNET_RESTORE(); } static int pf_rollback_eth(uint32_t ticket, const char *anchor) { struct pf_keth_rule *rule, *tmp; struct pf_keth_ruleset *rs; PF_RULES_WASSERT(); rs = pf_find_keth_ruleset(anchor); if (rs == NULL) return (EINVAL); if (!rs->inactive.open || ticket != rs->inactive.ticket) return (0); /* Purge old inactive rules. */ TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, tmp) { TAILQ_REMOVE(rs->inactive.rules, rule, entries); pf_free_eth_rule(rule); } rs->inactive.open = 0; pf_remove_if_empty_keth_ruleset(rs); return (0); } #define PF_SET_SKIP_STEPS(i) \ do { \ while (head[i] != cur) { \ head[i]->skip[i].ptr = cur; \ head[i] = TAILQ_NEXT(head[i], entries); \ } \ } while (0) static void pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) { struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; int i; cur = TAILQ_FIRST(rules); prev = cur; for (i = 0; i < PFE_SKIP_COUNT; ++i) head[i] = cur; while (cur != NULL) { if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) PF_SET_SKIP_STEPS(PFE_SKIP_IFP); if (cur->direction != prev->direction) PF_SET_SKIP_STEPS(PFE_SKIP_DIR); if (cur->proto != prev->proto) PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); prev = cur; cur = TAILQ_NEXT(cur, entries); } for (i = 0; i < PFE_SKIP_COUNT; ++i) PF_SET_SKIP_STEPS(i); } static int pf_commit_eth(uint32_t ticket, const char *anchor) { struct pf_keth_ruleq *rules; struct pf_keth_ruleset *rs; rs = pf_find_keth_ruleset(anchor); if (rs == NULL) { return (EINVAL); } if (!rs->inactive.open || ticket != rs->inactive.ticket) return (EBUSY); PF_RULES_WASSERT(); pf_eth_calc_skip_steps(rs->inactive.rules); rules = rs->active.rules; ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); rs->inactive.rules = rules; rs->inactive.ticket = rs->active.ticket; /* Clean up inactive rules (i.e. previously active rules), only when * we're sure they're no longer used. */ NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); return (0); } #ifdef ALTQ static uint16_t pf_qname2qid(const char *qname) { return (tagname2tag(&V_pf_qids, qname)); } static void pf_qid_unref(uint16_t qid) { tag_unref(&V_pf_qids, qid); } static int pf_begin_altq(u_int32_t *ticket) { struct pf_altq *altq, *tmp; int error = 0; PF_RULES_WASSERT(); /* Purge the old altq lists */ TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { /* detach and destroy the discipline */ error = altq_remove(altq); } free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altq_ifs_inactive); TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { pf_qid_unref(altq->qid); free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altqs_inactive); if (error) return (error); *ticket = ++V_ticket_altqs_inactive; V_altqs_inactive_open = 1; return (0); } static int pf_rollback_altq(u_int32_t ticket) { struct pf_altq *altq, *tmp; int error = 0; PF_RULES_WASSERT(); if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) return (0); /* Purge the old altq lists */ TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { /* detach and destroy the discipline */ error = altq_remove(altq); } free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altq_ifs_inactive); TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { pf_qid_unref(altq->qid); free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altqs_inactive); V_altqs_inactive_open = 0; return (error); } static int pf_commit_altq(u_int32_t ticket) { struct pf_altqqueue *old_altqs, *old_altq_ifs; struct pf_altq *altq, *tmp; int err, error = 0; PF_RULES_WASSERT(); if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) return (EBUSY); /* swap altqs, keep the old. */ old_altqs = V_pf_altqs_active; old_altq_ifs = V_pf_altq_ifs_active; V_pf_altqs_active = V_pf_altqs_inactive; V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; V_pf_altqs_inactive = old_altqs; V_pf_altq_ifs_inactive = old_altq_ifs; V_ticket_altqs_active = V_ticket_altqs_inactive; /* Attach new disciplines */ TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { /* attach the discipline */ error = altq_pfattach(altq); if (error == 0 && V_pf_altq_running) error = pf_enable_altq(altq); if (error != 0) return (error); } } /* Purge the old altq lists */ TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { /* detach and destroy the discipline */ if (V_pf_altq_running) error = pf_disable_altq(altq); err = altq_pfdetach(altq); if (err != 0 && error == 0) error = err; err = altq_remove(altq); if (err != 0 && error == 0) error = err; } free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altq_ifs_inactive); TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { pf_qid_unref(altq->qid); free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altqs_inactive); V_altqs_inactive_open = 0; return (error); } static int pf_enable_altq(struct pf_altq *altq) { struct ifnet *ifp; struct tb_profile tb; int error = 0; if ((ifp = ifunit(altq->ifname)) == NULL) return (EINVAL); if (ifp->if_snd.altq_type != ALTQT_NONE) error = altq_enable(&ifp->if_snd); /* set tokenbucket regulator */ if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { tb.rate = altq->ifbandwidth; tb.depth = altq->tbrsize; error = tbr_set(&ifp->if_snd, &tb); } return (error); } static int pf_disable_altq(struct pf_altq *altq) { struct ifnet *ifp; struct tb_profile tb; int error; if ((ifp = ifunit(altq->ifname)) == NULL) return (EINVAL); /* * when the discipline is no longer referenced, it was overridden * by a new one. if so, just return. */ if (altq->altq_disc != ifp->if_snd.altq_disc) return (0); error = altq_disable(&ifp->if_snd); if (error == 0) { /* clear tokenbucket regulator */ tb.rate = 0; error = tbr_set(&ifp->if_snd, &tb); } return (error); } static int pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, struct pf_altq *altq) { struct ifnet *ifp1; int error = 0; /* Deactivate the interface in question */ altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; if ((ifp1 = ifunit(altq->ifname)) == NULL || (remove && ifp1 == ifp)) { altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; } else { error = altq_add(ifp1, altq); if (ticket != V_ticket_altqs_inactive) error = EBUSY; if (error) free(altq, M_PFALTQ); } return (error); } void pf_altq_ifnet_event(struct ifnet *ifp, int remove) { struct pf_altq *a1, *a2, *a3; u_int32_t ticket; int error = 0; /* * No need to re-evaluate the configuration for events on interfaces * that do not support ALTQ, as it's not possible for such * interfaces to be part of the configuration. */ if (!ALTQ_IS_READY(&ifp->if_snd)) return; /* Interrupt userland queue modifications */ if (V_altqs_inactive_open) pf_rollback_altq(V_ticket_altqs_inactive); /* Start new altq ruleset */ if (pf_begin_altq(&ticket)) return; /* Copy the current active set */ TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); if (a2 == NULL) { error = ENOMEM; break; } bcopy(a1, a2, sizeof(struct pf_altq)); error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); if (error) break; TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); } if (error) goto out; TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); if (a2 == NULL) { error = ENOMEM; break; } bcopy(a1, a2, sizeof(struct pf_altq)); if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { error = EBUSY; free(a2, M_PFALTQ); break; } a2->altq_disc = NULL; TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { if (strncmp(a3->ifname, a2->ifname, IFNAMSIZ) == 0) { a2->altq_disc = a3->altq_disc; break; } } error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); if (error) break; TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); } out: if (error != 0) pf_rollback_altq(ticket); else pf_commit_altq(ticket); } #endif /* ALTQ */ static struct pf_krule_global * pf_rule_tree_alloc(int flags) { struct pf_krule_global *tree; tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); if (tree == NULL) return (NULL); RB_INIT(tree); return (tree); } static void pf_rule_tree_free(struct pf_krule_global *tree) { free(tree, M_TEMP); } static int pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) { struct pf_krule_global *tree; struct pf_kruleset *rs; struct pf_krule *rule; PF_RULES_WASSERT(); if (rs_num < 0 || rs_num >= PF_RULESET_MAX) return (EINVAL); tree = pf_rule_tree_alloc(M_NOWAIT); if (tree == NULL) return (ENOMEM); rs = pf_find_or_create_kruleset(anchor); if (rs == NULL) { free(tree, M_TEMP); return (EINVAL); } pf_rule_tree_free(rs->rules[rs_num].inactive.tree); rs->rules[rs_num].inactive.tree = tree; while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); rs->rules[rs_num].inactive.rcount--; } *ticket = ++rs->rules[rs_num].inactive.ticket; rs->rules[rs_num].inactive.open = 1; return (0); } static int pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) { struct pf_kruleset *rs; struct pf_krule *rule; PF_RULES_WASSERT(); if (rs_num < 0 || rs_num >= PF_RULESET_MAX) return (EINVAL); rs = pf_find_kruleset(anchor); if (rs == NULL || !rs->rules[rs_num].inactive.open || rs->rules[rs_num].inactive.ticket != ticket) return (0); while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); rs->rules[rs_num].inactive.rcount--; } rs->rules[rs_num].inactive.open = 0; return (0); } #define PF_MD5_UPD(st, elm) \ MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) #define PF_MD5_UPD_STR(st, elm) \ MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ (stor) = htonl((st)->elm); \ MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ } while (0) #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ (stor) = htons((st)->elm); \ MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ } while (0) static void pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) { PF_MD5_UPD(pfr, addr.type); switch (pfr->addr.type) { case PF_ADDR_DYNIFTL: PF_MD5_UPD(pfr, addr.v.ifname); PF_MD5_UPD(pfr, addr.iflags); break; case PF_ADDR_TABLE: PF_MD5_UPD(pfr, addr.v.tblname); break; case PF_ADDR_ADDRMASK: /* XXX ignore af? */ PF_MD5_UPD(pfr, addr.v.a.addr.addr32); PF_MD5_UPD(pfr, addr.v.a.mask.addr32); break; } PF_MD5_UPD(pfr, port[0]); PF_MD5_UPD(pfr, port[1]); PF_MD5_UPD(pfr, neg); PF_MD5_UPD(pfr, port_op); } static void pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) { u_int16_t x; u_int32_t y; pf_hash_rule_addr(ctx, &rule->src); pf_hash_rule_addr(ctx, &rule->dst); for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) PF_MD5_UPD_STR(rule, label[i]); PF_MD5_UPD_STR(rule, ifname); PF_MD5_UPD_STR(rule, match_tagname); PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ PF_MD5_UPD_HTONL(rule, os_fingerprint, y); PF_MD5_UPD_HTONL(rule, prob, y); PF_MD5_UPD_HTONL(rule, uid.uid[0], y); PF_MD5_UPD_HTONL(rule, uid.uid[1], y); PF_MD5_UPD(rule, uid.op); PF_MD5_UPD_HTONL(rule, gid.gid[0], y); PF_MD5_UPD_HTONL(rule, gid.gid[1], y); PF_MD5_UPD(rule, gid.op); PF_MD5_UPD_HTONL(rule, rule_flag, y); PF_MD5_UPD(rule, action); PF_MD5_UPD(rule, direction); PF_MD5_UPD(rule, af); PF_MD5_UPD(rule, quick); PF_MD5_UPD(rule, ifnot); PF_MD5_UPD(rule, match_tag_not); PF_MD5_UPD(rule, natpass); PF_MD5_UPD(rule, keep_state); PF_MD5_UPD(rule, proto); PF_MD5_UPD(rule, type); PF_MD5_UPD(rule, code); PF_MD5_UPD(rule, flags); PF_MD5_UPD(rule, flagset); PF_MD5_UPD(rule, allow_opts); PF_MD5_UPD(rule, rt); PF_MD5_UPD(rule, tos); + PF_MD5_UPD(rule, scrub_flags); + PF_MD5_UPD(rule, min_ttl); + PF_MD5_UPD(rule, set_tos); if (rule->anchor != NULL) PF_MD5_UPD_STR(rule, anchor->path); } static void pf_hash_rule(struct pf_krule *rule) { MD5_CTX ctx; MD5Init(&ctx); pf_hash_rule_rolling(&ctx, rule); MD5Final(rule->md5sum, &ctx); } static int pf_krule_compare(struct pf_krule *a, struct pf_krule *b) { return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); } static int pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) { struct pf_kruleset *rs; struct pf_krule *rule, **old_array, *old_rule; struct pf_krulequeue *old_rules; struct pf_krule_global *old_tree; int error; u_int32_t old_rcount; PF_RULES_WASSERT(); if (rs_num < 0 || rs_num >= PF_RULESET_MAX) return (EINVAL); rs = pf_find_kruleset(anchor); if (rs == NULL || !rs->rules[rs_num].inactive.open || ticket != rs->rules[rs_num].inactive.ticket) return (EBUSY); /* Calculate checksum for the main ruleset */ if (rs == &pf_main_ruleset) { error = pf_setup_pfsync_matching(rs); if (error != 0) return (error); } /* Swap rules, keep the old. */ old_rules = rs->rules[rs_num].active.ptr; old_rcount = rs->rules[rs_num].active.rcount; old_array = rs->rules[rs_num].active.ptr_array; old_tree = rs->rules[rs_num].active.tree; rs->rules[rs_num].active.ptr = rs->rules[rs_num].inactive.ptr; rs->rules[rs_num].active.ptr_array = rs->rules[rs_num].inactive.ptr_array; rs->rules[rs_num].active.tree = rs->rules[rs_num].inactive.tree; rs->rules[rs_num].active.rcount = rs->rules[rs_num].inactive.rcount; /* Attempt to preserve counter information. */ if (V_pf_status.keep_counters && old_tree != NULL) { TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, entries) { old_rule = RB_FIND(pf_krule_global, old_tree, rule); if (old_rule == NULL) { continue; } pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&rule->evaluations, pf_counter_u64_fetch(&old_rule->evaluations)); pf_counter_u64_add_protected(&rule->packets[0], pf_counter_u64_fetch(&old_rule->packets[0])); pf_counter_u64_add_protected(&rule->packets[1], pf_counter_u64_fetch(&old_rule->packets[1])); pf_counter_u64_add_protected(&rule->bytes[0], pf_counter_u64_fetch(&old_rule->bytes[0])); pf_counter_u64_add_protected(&rule->bytes[1], pf_counter_u64_fetch(&old_rule->bytes[1])); pf_counter_u64_critical_exit(); } } rs->rules[rs_num].inactive.ptr = old_rules; rs->rules[rs_num].inactive.ptr_array = old_array; rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ rs->rules[rs_num].inactive.rcount = old_rcount; rs->rules[rs_num].active.ticket = rs->rules[rs_num].inactive.ticket; pf_calc_skip_steps(rs->rules[rs_num].active.ptr); /* Purge the old rule list. */ PF_UNLNKDRULES_LOCK(); while ((rule = TAILQ_FIRST(old_rules)) != NULL) pf_unlink_rule_locked(old_rules, rule); PF_UNLNKDRULES_UNLOCK(); if (rs->rules[rs_num].inactive.ptr_array) free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); rs->rules[rs_num].inactive.ptr_array = NULL; rs->rules[rs_num].inactive.rcount = 0; rs->rules[rs_num].inactive.open = 0; pf_remove_if_empty_kruleset(rs); free(old_tree, M_TEMP); return (0); } static int pf_setup_pfsync_matching(struct pf_kruleset *rs) { MD5_CTX ctx; struct pf_krule *rule; int rs_cnt; u_int8_t digest[PF_MD5_DIGEST_LENGTH]; MD5Init(&ctx); for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { /* XXX PF_RULESET_SCRUB as well? */ if (rs_cnt == PF_RULESET_SCRUB) continue; if (rs->rules[rs_cnt].inactive.ptr_array) free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); rs->rules[rs_cnt].inactive.ptr_array = NULL; if (rs->rules[rs_cnt].inactive.rcount) { rs->rules[rs_cnt].inactive.ptr_array = malloc(sizeof(caddr_t) * rs->rules[rs_cnt].inactive.rcount, M_TEMP, M_NOWAIT); if (!rs->rules[rs_cnt].inactive.ptr_array) return (ENOMEM); } TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, entries) { pf_hash_rule_rolling(&ctx, rule); (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; } } MD5Final(digest, &ctx); memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); return (0); } static int pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) { int error = 0; switch (addr->type) { case PF_ADDR_TABLE: addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); if (addr->p.tbl == NULL) error = ENOMEM; break; default: error = EINVAL; } return (error); } static int pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, sa_family_t af) { int error = 0; switch (addr->type) { case PF_ADDR_TABLE: addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); if (addr->p.tbl == NULL) error = ENOMEM; break; case PF_ADDR_DYNIFTL: error = pfi_dynaddr_setup(addr, af); break; } return (error); } static void pf_addr_copyout(struct pf_addr_wrap *addr) { switch (addr->type) { case PF_ADDR_DYNIFTL: pfi_dynaddr_copyout(addr); break; case PF_ADDR_TABLE: pf_tbladdr_copyout(addr); break; } } static void pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) { int secs = time_uptime, diff; bzero(out, sizeof(struct pf_src_node)); bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); if (in->rule.ptr != NULL) out->rule.nr = in->rule.ptr->nr; for (int i = 0; i < 2; i++) { out->bytes[i] = counter_u64_fetch(in->bytes[i]); out->packets[i] = counter_u64_fetch(in->packets[i]); } out->states = in->states; out->conn = in->conn; out->af = in->af; out->ruletype = in->ruletype; out->creation = secs - in->creation; if (out->expire > secs) out->expire -= secs; else out->expire = 0; /* Adjust the connection rate estimate. */ diff = secs - in->conn_rate.last; if (diff >= in->conn_rate.seconds) out->conn_rate.count = 0; else out->conn_rate.count -= in->conn_rate.count * diff / in->conn_rate.seconds; } #ifdef ALTQ /* * Handle export of struct pf_kaltq to user binaries that may be using any * version of struct pf_altq. */ static int pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) { u_int32_t version; if (ioc_size == sizeof(struct pfioc_altq_v0)) version = 0; else version = pa->version; if (version > PFIOC_ALTQ_VERSION) return (EINVAL); #define ASSIGN(x) exported_q->x = q->x #define COPY(x) \ bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) switch (version) { case 0: { struct pf_altq_v0 *exported_q = &((struct pfioc_altq_v0 *)pa)->altq; COPY(ifname); ASSIGN(scheduler); ASSIGN(tbrsize); exported_q->tbrsize = SATU16(q->tbrsize); exported_q->ifbandwidth = SATU32(q->ifbandwidth); COPY(qname); COPY(parent); ASSIGN(parent_qid); exported_q->bandwidth = SATU32(q->bandwidth); ASSIGN(priority); ASSIGN(local_flags); ASSIGN(qlimit); ASSIGN(flags); if (q->scheduler == ALTQT_HFSC) { #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ SATU32(q->pq_u.hfsc_opts.x) ASSIGN_OPT_SATU32(rtsc_m1); ASSIGN_OPT(rtsc_d); ASSIGN_OPT_SATU32(rtsc_m2); ASSIGN_OPT_SATU32(lssc_m1); ASSIGN_OPT(lssc_d); ASSIGN_OPT_SATU32(lssc_m2); ASSIGN_OPT_SATU32(ulsc_m1); ASSIGN_OPT(ulsc_d); ASSIGN_OPT_SATU32(ulsc_m2); ASSIGN_OPT(flags); #undef ASSIGN_OPT #undef ASSIGN_OPT_SATU32 } else COPY(pq_u); ASSIGN(qid); break; } case 1: { struct pf_altq_v1 *exported_q = &((struct pfioc_altq_v1 *)pa)->altq; COPY(ifname); ASSIGN(scheduler); ASSIGN(tbrsize); ASSIGN(ifbandwidth); COPY(qname); COPY(parent); ASSIGN(parent_qid); ASSIGN(bandwidth); ASSIGN(priority); ASSIGN(local_flags); ASSIGN(qlimit); ASSIGN(flags); COPY(pq_u); ASSIGN(qid); break; } default: panic("%s: unhandled struct pfioc_altq version", __func__); break; } #undef ASSIGN #undef COPY #undef SATU16 #undef SATU32 return (0); } /* * Handle import to struct pf_kaltq of struct pf_altq from user binaries * that may be using any version of it. */ static int pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) { u_int32_t version; if (ioc_size == sizeof(struct pfioc_altq_v0)) version = 0; else version = pa->version; if (version > PFIOC_ALTQ_VERSION) return (EINVAL); #define ASSIGN(x) q->x = imported_q->x #define COPY(x) \ bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) switch (version) { case 0: { struct pf_altq_v0 *imported_q = &((struct pfioc_altq_v0 *)pa)->altq; COPY(ifname); ASSIGN(scheduler); ASSIGN(tbrsize); /* 16-bit -> 32-bit */ ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ COPY(qname); COPY(parent); ASSIGN(parent_qid); ASSIGN(bandwidth); /* 32-bit -> 64-bit */ ASSIGN(priority); ASSIGN(local_flags); ASSIGN(qlimit); ASSIGN(flags); if (imported_q->scheduler == ALTQT_HFSC) { #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x /* * The m1 and m2 parameters are being copied from * 32-bit to 64-bit. */ ASSIGN_OPT(rtsc_m1); ASSIGN_OPT(rtsc_d); ASSIGN_OPT(rtsc_m2); ASSIGN_OPT(lssc_m1); ASSIGN_OPT(lssc_d); ASSIGN_OPT(lssc_m2); ASSIGN_OPT(ulsc_m1); ASSIGN_OPT(ulsc_d); ASSIGN_OPT(ulsc_m2); ASSIGN_OPT(flags); #undef ASSIGN_OPT } else COPY(pq_u); ASSIGN(qid); break; } case 1: { struct pf_altq_v1 *imported_q = &((struct pfioc_altq_v1 *)pa)->altq; COPY(ifname); ASSIGN(scheduler); ASSIGN(tbrsize); ASSIGN(ifbandwidth); COPY(qname); COPY(parent); ASSIGN(parent_qid); ASSIGN(bandwidth); ASSIGN(priority); ASSIGN(local_flags); ASSIGN(qlimit); ASSIGN(flags); COPY(pq_u); ASSIGN(qid); break; } default: panic("%s: unhandled struct pfioc_altq version", __func__); break; } #undef ASSIGN #undef COPY return (0); } static struct pf_altq * pf_altq_get_nth_active(u_int32_t n) { struct pf_altq *altq; u_int32_t nr; nr = 0; TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { if (nr == n) return (altq); nr++; } TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { if (nr == n) return (altq); nr++; } return (NULL); } #endif /* ALTQ */ struct pf_krule * pf_krule_alloc(void) { struct pf_krule *rule; rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, M_WAITOK | M_ZERO); return (rule); } void pf_krule_free(struct pf_krule *rule) { #ifdef PF_WANT_32_TO_64_COUNTER bool wowned; #endif if (rule == NULL) return; #ifdef PF_WANT_32_TO_64_COUNTER if (rule->allrulelinked) { wowned = PF_RULES_WOWNED(); if (!wowned) PF_RULES_WLOCK(); LIST_REMOVE(rule, allrulelist); V_pf_allrulecount--; if (!wowned) PF_RULES_WUNLOCK(); } #endif pf_counter_u64_deinit(&rule->evaluations); for (int i = 0; i < 2; i++) { pf_counter_u64_deinit(&rule->packets[i]); pf_counter_u64_deinit(&rule->bytes[i]); } counter_u64_free(rule->states_cur); counter_u64_free(rule->states_tot); counter_u64_free(rule->src_nodes); uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); mtx_destroy(&rule->rpool.mtx); free(rule, M_PFRULE); } static void pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, struct pf_pooladdr *pool) { bzero(pool, sizeof(*pool)); bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); } static int pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, struct pf_kpooladdr *kpool) { int ret; bzero(kpool, sizeof(*kpool)); bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); ret = pf_user_strcpy(kpool->ifname, pool->ifname, sizeof(kpool->ifname)); return (ret); } static void pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) { bzero(pool, sizeof(*pool)); bcopy(&kpool->key, &pool->key, sizeof(pool->key)); bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); pool->tblidx = kpool->tblidx; pool->proxy_port[0] = kpool->proxy_port[0]; pool->proxy_port[1] = kpool->proxy_port[1]; pool->opts = kpool->opts; } static void pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) { _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); kpool->tblidx = pool->tblidx; kpool->proxy_port[0] = pool->proxy_port[0]; kpool->proxy_port[1] = pool->proxy_port[1]; kpool->opts = pool->opts; } static void pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) { bzero(rule, sizeof(*rule)); bcopy(&krule->src, &rule->src, sizeof(rule->src)); bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); for (int i = 0; i < PF_SKIP_COUNT; ++i) { if (rule->skip[i].ptr == NULL) rule->skip[i].nr = -1; else rule->skip[i].nr = krule->skip[i].ptr->nr; } strlcpy(rule->label, krule->label[0], sizeof(rule->label)); strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); strlcpy(rule->match_tagname, krule->match_tagname, sizeof(rule->match_tagname)); strlcpy(rule->overload_tblname, krule->overload_tblname, sizeof(rule->overload_tblname)); pf_kpool_to_pool(&krule->rpool, &rule->rpool); rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); for (int i = 0; i < 2; i++) { rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); } /* kif, anchor, overload_tbl are not copied over. */ rule->os_fingerprint = krule->os_fingerprint; rule->rtableid = krule->rtableid; bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); rule->max_states = krule->max_states; rule->max_src_nodes = krule->max_src_nodes; rule->max_src_states = krule->max_src_states; rule->max_src_conn = krule->max_src_conn; rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; rule->qid = krule->qid; rule->pqid = krule->pqid; rule->nr = krule->nr; rule->prob = krule->prob; rule->cuid = krule->cuid; rule->cpid = krule->cpid; rule->return_icmp = krule->return_icmp; rule->return_icmp6 = krule->return_icmp6; rule->max_mss = krule->max_mss; rule->tag = krule->tag; rule->match_tag = krule->match_tag; rule->scrub_flags = krule->scrub_flags; bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); rule->rule_flag = krule->rule_flag; rule->action = krule->action; rule->direction = krule->direction; rule->log = krule->log; rule->logif = krule->logif; rule->quick = krule->quick; rule->ifnot = krule->ifnot; rule->match_tag_not = krule->match_tag_not; rule->natpass = krule->natpass; rule->keep_state = krule->keep_state; rule->af = krule->af; rule->proto = krule->proto; rule->type = krule->type; rule->code = krule->code; rule->flags = krule->flags; rule->flagset = krule->flagset; rule->min_ttl = krule->min_ttl; rule->allow_opts = krule->allow_opts; rule->rt = krule->rt; rule->return_ttl = krule->return_ttl; rule->tos = krule->tos; rule->set_tos = krule->set_tos; rule->anchor_relative = krule->anchor_relative; rule->anchor_wildcard = krule->anchor_wildcard; rule->flush = krule->flush; rule->prio = krule->prio; rule->set_prio[0] = krule->set_prio[0]; rule->set_prio[1] = krule->set_prio[1]; bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); rule->u_states_cur = counter_u64_fetch(krule->states_cur); rule->u_states_tot = counter_u64_fetch(krule->states_tot); rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); } static int pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) { int ret; #ifndef INET if (rule->af == AF_INET) { return (EAFNOSUPPORT); } #endif /* INET */ #ifndef INET6 if (rule->af == AF_INET6) { return (EAFNOSUPPORT); } #endif /* INET6 */ ret = pf_check_rule_addr(&rule->src); if (ret != 0) return (ret); ret = pf_check_rule_addr(&rule->dst); if (ret != 0) return (ret); bcopy(&rule->src, &krule->src, sizeof(rule->src)); bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->tagname, rule->tagname, sizeof(rule->tagname)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, sizeof(rule->match_tagname)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, sizeof(rule->overload_tblname)); if (ret != 0) return (ret); pf_pool_to_kpool(&rule->rpool, &krule->rpool); /* Don't allow userspace to set evaulations, packets or bytes. */ /* kif, anchor, overload_tbl are not copied over. */ krule->os_fingerprint = rule->os_fingerprint; krule->rtableid = rule->rtableid; bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); krule->max_states = rule->max_states; krule->max_src_nodes = rule->max_src_nodes; krule->max_src_states = rule->max_src_states; krule->max_src_conn = rule->max_src_conn; krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; krule->qid = rule->qid; krule->pqid = rule->pqid; krule->nr = rule->nr; krule->prob = rule->prob; krule->cuid = rule->cuid; krule->cpid = rule->cpid; krule->return_icmp = rule->return_icmp; krule->return_icmp6 = rule->return_icmp6; krule->max_mss = rule->max_mss; krule->tag = rule->tag; krule->match_tag = rule->match_tag; krule->scrub_flags = rule->scrub_flags; bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); krule->rule_flag = rule->rule_flag; krule->action = rule->action; krule->direction = rule->direction; krule->log = rule->log; krule->logif = rule->logif; krule->quick = rule->quick; krule->ifnot = rule->ifnot; krule->match_tag_not = rule->match_tag_not; krule->natpass = rule->natpass; krule->keep_state = rule->keep_state; krule->af = rule->af; krule->proto = rule->proto; krule->type = rule->type; krule->code = rule->code; krule->flags = rule->flags; krule->flagset = rule->flagset; krule->min_ttl = rule->min_ttl; krule->allow_opts = rule->allow_opts; krule->rt = rule->rt; krule->return_ttl = rule->return_ttl; krule->tos = rule->tos; krule->set_tos = rule->set_tos; krule->flush = rule->flush; krule->prio = rule->prio; krule->set_prio[0] = rule->set_prio[0]; krule->set_prio[1] = rule->set_prio[1]; bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); return (0); } static int pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, struct pf_kstate_kill *kill) { int ret; bzero(kill, sizeof(*kill)); bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); kill->psk_af = psk->psk_af; kill->psk_proto = psk->psk_proto; bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname, sizeof(kill->psk_ifname)); if (ret != 0) return (ret); ret = pf_user_strcpy(kill->psk_label, psk->psk_label, sizeof(kill->psk_label)); if (ret != 0) return (ret); return (0); } static int pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, uint32_t pool_ticket, const char *anchor, const char *anchor_call, struct thread *td) { struct pf_kruleset *ruleset; struct pf_krule *tail; struct pf_kpooladdr *pa; struct pfi_kkif *kif = NULL; int rs_num; int error = 0; if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { error = EINVAL; goto errout_unlocked; } #define ERROUT(x) ERROUT_FUNCTION(errout, x) if (rule->ifname[0]) kif = pf_kkif_create(M_WAITOK); pf_counter_u64_init(&rule->evaluations, M_WAITOK); for (int i = 0; i < 2; i++) { pf_counter_u64_init(&rule->packets[i], M_WAITOK); pf_counter_u64_init(&rule->bytes[i], M_WAITOK); } rule->states_cur = counter_u64_alloc(M_WAITOK); rule->states_tot = counter_u64_alloc(M_WAITOK); rule->src_nodes = counter_u64_alloc(M_WAITOK); rule->cuid = td->td_ucred->cr_ruid; rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; TAILQ_INIT(&rule->rpool.list); PF_CONFIG_LOCK(); PF_RULES_WLOCK(); #ifdef PF_WANT_32_TO_64_COUNTER LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); MPASS(!rule->allrulelinked); rule->allrulelinked = true; V_pf_allrulecount++; #endif ruleset = pf_find_kruleset(anchor); if (ruleset == NULL) ERROUT(EINVAL); rs_num = pf_get_ruleset_number(rule->action); if (rs_num >= PF_RULESET_MAX) ERROUT(EINVAL); if (ticket != ruleset->rules[rs_num].inactive.ticket) { DPFPRINTF(PF_DEBUG_MISC, ("ticket: %d != [%d]%d\n", ticket, rs_num, ruleset->rules[rs_num].inactive.ticket)); ERROUT(EBUSY); } if (pool_ticket != V_ticket_pabuf) { DPFPRINTF(PF_DEBUG_MISC, ("pool_ticket: %d != %d\n", pool_ticket, V_ticket_pabuf)); ERROUT(EBUSY); } /* * XXXMJG hack: there is no mechanism to ensure they started the * transaction. Ticket checked above may happen to match by accident, * even if nobody called DIOCXBEGIN, let alone this process. * Partially work around it by checking if the RB tree got allocated, * see pf_begin_rules. */ if (ruleset->rules[rs_num].inactive.tree == NULL) { ERROUT(EINVAL); } tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, pf_krulequeue); if (tail) rule->nr = tail->nr + 1; else rule->nr = 0; if (rule->ifname[0]) { rule->kif = pfi_kkif_attach(kif, rule->ifname); kif = NULL; pfi_kkif_ref(rule->kif); } else rule->kif = NULL; if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) error = EBUSY; #ifdef ALTQ /* set queue IDs */ if (rule->qname[0] != 0) { if ((rule->qid = pf_qname2qid(rule->qname)) == 0) error = EBUSY; else if (rule->pqname[0] != 0) { if ((rule->pqid = pf_qname2qid(rule->pqname)) == 0) error = EBUSY; } else rule->pqid = rule->qid; } #endif if (rule->tagname[0]) if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) error = EBUSY; if (rule->match_tagname[0]) if ((rule->match_tag = pf_tagname2tag(rule->match_tagname)) == 0) error = EBUSY; if (rule->rt && !rule->direction) error = EINVAL; if (!rule->log) rule->logif = 0; if (rule->logif >= PFLOGIFS_MAX) error = EINVAL; if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) error = ENOMEM; if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) error = ENOMEM; if (pf_kanchor_setup(rule, ruleset, anchor_call)) error = EINVAL; if (rule->scrub_flags & PFSTATE_SETPRIO && (rule->set_prio[0] > PF_PRIO_MAX || rule->set_prio[1] > PF_PRIO_MAX)) error = EINVAL; TAILQ_FOREACH(pa, &V_pf_pabuf, entries) if (pa->addr.type == PF_ADDR_TABLE) { pa->addr.p.tbl = pfr_attach_table(ruleset, pa->addr.v.tblname); if (pa->addr.p.tbl == NULL) error = ENOMEM; } rule->overload_tbl = NULL; if (rule->overload_tblname[0]) { if ((rule->overload_tbl = pfr_attach_table(ruleset, rule->overload_tblname)) == NULL) error = EINVAL; else rule->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; } pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || (rule->action == PF_BINAT)) && rule->anchor == NULL) || (rule->rt > PF_NOPFROUTE)) && (TAILQ_FIRST(&rule->rpool.list) == NULL)) error = EINVAL; if (error) { pf_free_rule(rule); rule = NULL; ERROUT(error); } rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, rule, entries); ruleset->rules[rs_num].inactive.rcount++; PF_RULES_WUNLOCK(); pf_hash_rule(rule); if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { PF_RULES_WLOCK(); TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); ruleset->rules[rs_num].inactive.rcount--; pf_free_rule(rule); rule = NULL; ERROUT(EEXIST); } PF_CONFIG_UNLOCK(); return (0); #undef ERROUT errout: PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); errout_unlocked: pf_kkif_free(kif); pf_krule_free(rule); return (error); } static bool pf_label_match(const struct pf_krule *rule, const char *label) { int i = 0; while (*rule->label[i]) { if (strcmp(rule->label[i], label) == 0) return (true); i++; } return (false); } static unsigned int pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) { struct pf_kstate *s; int more = 0; s = pf_find_state_all(key, dir, &more); if (s == NULL) return (0); if (more) { PF_STATE_UNLOCK(s); return (0); } pf_unlink_state(s); return (1); } static int pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) { struct pf_kstate *s; struct pf_state_key *sk; struct pf_addr *srcaddr, *dstaddr; struct pf_state_key_cmp match_key; int idx, killed = 0; unsigned int dir; u_int16_t srcport, dstport; struct pfi_kkif *kif; relock_DIOCKILLSTATES: PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { /* For floating states look at the original kif. */ kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; sk = s->key[PF_SK_WIRE]; if (s->direction == PF_OUT) { srcaddr = &sk->addr[1]; dstaddr = &sk->addr[0]; srcport = sk->port[1]; dstport = sk->port[0]; } else { srcaddr = &sk->addr[0]; dstaddr = &sk->addr[1]; srcport = sk->port[0]; dstport = sk->port[1]; } if (psk->psk_af && sk->af != psk->psk_af) continue; if (psk->psk_proto && psk->psk_proto != sk->proto) continue; if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) continue; if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) continue; if (! PF_MATCHA(psk->psk_rt_addr.neg, &psk->psk_rt_addr.addr.v.a.addr, &psk->psk_rt_addr.addr.v.a.mask, &s->rt_addr, sk->af)) continue; if (psk->psk_src.port_op != 0 && ! pf_match_port(psk->psk_src.port_op, psk->psk_src.port[0], psk->psk_src.port[1], srcport)) continue; if (psk->psk_dst.port_op != 0 && ! pf_match_port(psk->psk_dst.port_op, psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) continue; if (psk->psk_label[0] && ! pf_label_match(s->rule.ptr, psk->psk_label)) continue; if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, kif->pfik_name)) continue; if (psk->psk_kill_match) { /* Create the key to find matching states, with lock * held. */ bzero(&match_key, sizeof(match_key)); if (s->direction == PF_OUT) { dir = PF_IN; idx = PF_SK_STACK; } else { dir = PF_OUT; idx = PF_SK_WIRE; } match_key.af = s->key[idx]->af; match_key.proto = s->key[idx]->proto; PF_ACPY(&match_key.addr[0], &s->key[idx]->addr[1], match_key.af); match_key.port[0] = s->key[idx]->port[1]; PF_ACPY(&match_key.addr[1], &s->key[idx]->addr[0], match_key.af); match_key.port[1] = s->key[idx]->port[0]; } pf_unlink_state(s); killed++; if (psk->psk_kill_match) killed += pf_kill_matching_state(&match_key, dir); goto relock_DIOCKILLSTATES; } PF_HASHROW_UNLOCK(ih); return (killed); } static int pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) { int error = 0; PF_RULES_RLOCK_TRACKER; #define ERROUT_IOCTL(target, x) \ do { \ error = (x); \ SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ goto target; \ } while (0) /* XXX keep in sync with switch() below */ if (securelevel_gt(td->td_ucred, 2)) switch (cmd) { case DIOCGETRULES: case DIOCGETRULE: case DIOCGETRULENV: case DIOCGETADDRS: case DIOCGETADDR: case DIOCGETSTATE: case DIOCGETSTATENV: case DIOCSETSTATUSIF: case DIOCGETSTATUS: case DIOCGETSTATUSNV: case DIOCCLRSTATUS: case DIOCNATLOOK: case DIOCSETDEBUG: case DIOCGETSTATES: case DIOCGETSTATESV2: case DIOCGETTIMEOUT: case DIOCCLRRULECTRS: case DIOCGETLIMIT: case DIOCGETALTQSV0: case DIOCGETALTQSV1: case DIOCGETALTQV0: case DIOCGETALTQV1: case DIOCGETQSTATSV0: case DIOCGETQSTATSV1: case DIOCGETRULESETS: case DIOCGETRULESET: case DIOCRGETTABLES: case DIOCRGETTSTATS: case DIOCRCLRTSTATS: case DIOCRCLRADDRS: case DIOCRADDADDRS: case DIOCRDELADDRS: case DIOCRSETADDRS: case DIOCRGETADDRS: case DIOCRGETASTATS: case DIOCRCLRASTATS: case DIOCRTSTADDRS: case DIOCOSFPGET: case DIOCGETSRCNODES: case DIOCCLRSRCNODES: case DIOCGETSYNCOOKIES: case DIOCIGETIFACES: case DIOCGIFSPEEDV0: case DIOCGIFSPEEDV1: case DIOCSETIFFLAG: case DIOCCLRIFFLAG: case DIOCGETETHRULES: case DIOCGETETHRULE: case DIOCGETETHRULESETS: case DIOCGETETHRULESET: break; case DIOCRCLRTABLES: case DIOCRADDTABLES: case DIOCRDELTABLES: case DIOCRSETTFLAGS: if (((struct pfioc_table *)addr)->pfrio_flags & PFR_FLAG_DUMMY) break; /* dummy operation ok */ return (EPERM); default: return (EPERM); } if (!(flags & FWRITE)) switch (cmd) { case DIOCGETRULES: case DIOCGETADDRS: case DIOCGETADDR: case DIOCGETSTATE: case DIOCGETSTATENV: case DIOCGETSTATUS: case DIOCGETSTATUSNV: case DIOCGETSTATES: case DIOCGETSTATESV2: case DIOCGETTIMEOUT: case DIOCGETLIMIT: case DIOCGETALTQSV0: case DIOCGETALTQSV1: case DIOCGETALTQV0: case DIOCGETALTQV1: case DIOCGETQSTATSV0: case DIOCGETQSTATSV1: case DIOCGETRULESETS: case DIOCGETRULESET: case DIOCNATLOOK: case DIOCRGETTABLES: case DIOCRGETTSTATS: case DIOCRGETADDRS: case DIOCRGETASTATS: case DIOCRTSTADDRS: case DIOCOSFPGET: case DIOCGETSRCNODES: case DIOCGETSYNCOOKIES: case DIOCIGETIFACES: case DIOCGIFSPEEDV1: case DIOCGIFSPEEDV0: case DIOCGETRULENV: case DIOCGETETHRULES: case DIOCGETETHRULE: case DIOCGETETHRULESETS: case DIOCGETETHRULESET: break; case DIOCRCLRTABLES: case DIOCRADDTABLES: case DIOCRDELTABLES: case DIOCRCLRTSTATS: case DIOCRCLRADDRS: case DIOCRADDADDRS: case DIOCRDELADDRS: case DIOCRSETADDRS: case DIOCRSETTFLAGS: if (((struct pfioc_table *)addr)->pfrio_flags & PFR_FLAG_DUMMY) { flags |= FWRITE; /* need write lock for dummy */ break; /* dummy operation ok */ } return (EACCES); case DIOCGETRULE: if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) return (EACCES); break; default: return (EACCES); } CURVNET_SET(TD_TO_VNET(td)); switch (cmd) { case DIOCSTART: sx_xlock(&pf_ioctl_lock); if (V_pf_status.running) error = EEXIST; else { hook_pf(); if (! TAILQ_EMPTY(V_pf_keth->active.rules)) hook_pf_eth(); V_pf_status.running = 1; V_pf_status.since = time_second; new_unrhdr64(&V_pf_stateid, time_second); DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); } break; case DIOCSTOP: sx_xlock(&pf_ioctl_lock); if (!V_pf_status.running) error = ENOENT; else { V_pf_status.running = 0; dehook_pf(); dehook_pf_eth(); V_pf_status.since = time_second; DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); } break; case DIOCGETETHRULES: { struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl; void *packed; struct pf_keth_rule *tail; struct pf_keth_ruleset *rs; u_int32_t ticket, nr; const char *anchor = ""; nvl = NULL; packed = NULL; #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); /* Copy the request in */ packed = malloc(nv->len, M_NVLIST, M_WAITOK); if (packed == NULL) ERROUT(ENOMEM); error = copyin(nv->data, packed, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(packed, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_string(nvl, "anchor")) ERROUT(EBADMSG); anchor = nvlist_get_string(nvl, "anchor"); rs = pf_find_keth_ruleset(anchor); nvlist_destroy(nvl); nvl = NULL; free(packed, M_NVLIST); packed = NULL; if (rs == NULL) ERROUT(ENOENT); /* Reply */ nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); PF_RULES_RLOCK(); ticket = rs->active.ticket; tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); if (tail) nr = tail->nr + 1; else nr = 0; PF_RULES_RUNLOCK(); nvlist_add_number(nvl, "ticket", ticket); nvlist_add_number(nvl, "nr", nr); packed = nvlist_pack(nvl, &nv->len); if (packed == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(packed, nv->data, nv->len); #undef ERROUT DIOCGETETHRULES_error: free(packed, M_NVLIST); nvlist_destroy(nvl); break; } case DIOCGETETHRULE: { struct epoch_tracker et; struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl = NULL; void *nvlpacked = NULL; struct pf_keth_rule *rule = NULL; struct pf_keth_ruleset *rs; u_int32_t ticket, nr; bool clear = false; const char *anchor; #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "ticket")) ERROUT(EBADMSG); ticket = nvlist_get_number(nvl, "ticket"); if (! nvlist_exists_string(nvl, "anchor")) ERROUT(EBADMSG); anchor = nvlist_get_string(nvl, "anchor"); if (nvlist_exists_bool(nvl, "clear")) clear = nvlist_get_bool(nvl, "clear"); if (clear && !(flags & FWRITE)) ERROUT(EACCES); if (! nvlist_exists_number(nvl, "nr")) ERROUT(EBADMSG); nr = nvlist_get_number(nvl, "nr"); PF_RULES_RLOCK(); rs = pf_find_keth_ruleset(anchor); if (rs == NULL) { PF_RULES_RUNLOCK(); ERROUT(ENOENT); } if (ticket != rs->active.ticket) { PF_RULES_RUNLOCK(); ERROUT(EBUSY); } nvlist_destroy(nvl); nvl = NULL; free(nvlpacked, M_NVLIST); nvlpacked = NULL; rule = TAILQ_FIRST(rs->active.rules); while ((rule != NULL) && (rule->nr != nr)) rule = TAILQ_NEXT(rule, entries); if (rule == NULL) { PF_RULES_RUNLOCK(); ERROUT(ENOENT); } /* Make sure rule can't go away. */ NET_EPOCH_ENTER(et); PF_RULES_RUNLOCK(); nvl = pf_keth_rule_to_nveth_rule(rule); if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) ERROUT(EBUSY); NET_EPOCH_EXIT(et); if (nvl == NULL) ERROUT(ENOMEM); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); if (error == 0 && clear) { counter_u64_zero(rule->evaluations); for (int i = 0; i < 2; i++) { counter_u64_zero(rule->packets[i]); counter_u64_zero(rule->bytes[i]); } } #undef ERROUT DIOCGETETHRULE_error: free(nvlpacked, M_NVLIST); nvlist_destroy(nvl); break; } case DIOCADDETHRULE: { struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl = NULL; void *nvlpacked = NULL; struct pf_keth_rule *rule = NULL, *tail = NULL; struct pf_keth_ruleset *ruleset = NULL; struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL; const char *anchor = "", *anchor_call = ""; #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "ticket")) ERROUT(EBADMSG); if (nvlist_exists_string(nvl, "anchor")) anchor = nvlist_get_string(nvl, "anchor"); if (nvlist_exists_string(nvl, "anchor_call")) anchor_call = nvlist_get_string(nvl, "anchor_call"); ruleset = pf_find_keth_ruleset(anchor); if (ruleset == NULL) ERROUT(EINVAL); if (nvlist_get_number(nvl, "ticket") != ruleset->inactive.ticket) { DPFPRINTF(PF_DEBUG_MISC, ("ticket: %d != %d\n", (u_int32_t)nvlist_get_number(nvl, "ticket"), ruleset->inactive.ticket)); ERROUT(EBUSY); } rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); if (rule == NULL) ERROUT(ENOMEM); rule->timestamp = NULL; error = pf_nveth_rule_to_keth_rule(nvl, rule); if (error != 0) ERROUT(error); if (rule->ifname[0]) kif = pf_kkif_create(M_WAITOK); if (rule->bridge_to_name[0]) bridge_to_kif = pf_kkif_create(M_WAITOK); rule->evaluations = counter_u64_alloc(M_WAITOK); for (int i = 0; i < 2; i++) { rule->packets[i] = counter_u64_alloc(M_WAITOK); rule->bytes[i] = counter_u64_alloc(M_WAITOK); } rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, M_WAITOK | M_ZERO); PF_RULES_WLOCK(); if (rule->ifname[0]) { rule->kif = pfi_kkif_attach(kif, rule->ifname); pfi_kkif_ref(rule->kif); } else rule->kif = NULL; if (rule->bridge_to_name[0]) { rule->bridge_to = pfi_kkif_attach(bridge_to_kif, rule->bridge_to_name); pfi_kkif_ref(rule->bridge_to); } else rule->bridge_to = NULL; #ifdef ALTQ /* set queue IDs */ if (rule->qname[0] != 0) { if ((rule->qid = pf_qname2qid(rule->qname)) == 0) error = EBUSY; else rule->qid = rule->qid; } #endif if (rule->tagname[0]) if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) error = EBUSY; if (rule->match_tagname[0]) if ((rule->match_tag = pf_tagname2tag( rule->match_tagname)) == 0) error = EBUSY; if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); if (error) { pf_free_eth_rule(rule); PF_RULES_WUNLOCK(); ERROUT(error); } if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { pf_free_eth_rule(rule); PF_RULES_WUNLOCK(); ERROUT(EINVAL); } tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); if (tail) rule->nr = tail->nr + 1; else rule->nr = 0; TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); PF_RULES_WUNLOCK(); #undef ERROUT DIOCADDETHRULE_error: nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); break; } case DIOCGETETHRULESETS: { struct epoch_tracker et; struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl = NULL; void *nvlpacked = NULL; struct pf_keth_ruleset *ruleset; struct pf_keth_anchor *anchor; int nr = 0; #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_string(nvl, "path")) ERROUT(EBADMSG); NET_EPOCH_ENTER(et); if ((ruleset = pf_find_keth_ruleset( nvlist_get_string(nvl, "path"))) == NULL) { NET_EPOCH_EXIT(et); ERROUT(ENOENT); } if (ruleset->anchor == NULL) { RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) if (anchor->parent == NULL) nr++; } else { RB_FOREACH(anchor, pf_keth_anchor_node, &ruleset->anchor->children) nr++; } NET_EPOCH_EXIT(et); nvlist_destroy(nvl); nvl = NULL; free(nvlpacked, M_NVLIST); nvlpacked = NULL; nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); nvlist_add_number(nvl, "nr", nr); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); #undef ERROUT DIOCGETETHRULESETS_error: free(nvlpacked, M_NVLIST); nvlist_destroy(nvl); break; } case DIOCGETETHRULESET: { struct epoch_tracker et; struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl = NULL; void *nvlpacked = NULL; struct pf_keth_ruleset *ruleset; struct pf_keth_anchor *anchor; int nr = 0, req_nr = 0; bool found = false; #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_string(nvl, "path")) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "nr")) ERROUT(EBADMSG); req_nr = nvlist_get_number(nvl, "nr"); NET_EPOCH_ENTER(et); if ((ruleset = pf_find_keth_ruleset( nvlist_get_string(nvl, "path"))) == NULL) { NET_EPOCH_EXIT(et); ERROUT(ENOENT); } nvlist_destroy(nvl); nvl = NULL; free(nvlpacked, M_NVLIST); nvlpacked = NULL; nvl = nvlist_create(0); if (nvl == NULL) { NET_EPOCH_EXIT(et); ERROUT(ENOMEM); } if (ruleset->anchor == NULL) { RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) { if (anchor->parent == NULL && nr++ == req_nr) { found = true; break; } } } else { RB_FOREACH(anchor, pf_keth_anchor_node, &ruleset->anchor->children) { if (nr++ == req_nr) { found = true; break; } } } NET_EPOCH_EXIT(et); if (found) { nvlist_add_number(nvl, "nr", nr); nvlist_add_string(nvl, "name", anchor->name); if (ruleset->anchor) nvlist_add_string(nvl, "path", ruleset->anchor->path); else nvlist_add_string(nvl, "path", ""); } else { ERROUT(EBUSY); } nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); #undef ERROUT DIOCGETETHRULESET_error: free(nvlpacked, M_NVLIST); nvlist_destroy(nvl); break; } case DIOCADDRULENV: { struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl = NULL; void *nvlpacked = NULL; struct pf_krule *rule = NULL; const char *anchor = "", *anchor_call = ""; uint32_t ticket = 0, pool_ticket = 0; #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "ticket")) ERROUT(EINVAL); ticket = nvlist_get_number(nvl, "ticket"); if (! nvlist_exists_number(nvl, "pool_ticket")) ERROUT(EINVAL); pool_ticket = nvlist_get_number(nvl, "pool_ticket"); if (! nvlist_exists_nvlist(nvl, "rule")) ERROUT(EINVAL); rule = pf_krule_alloc(); error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), rule); if (error) ERROUT(error); if (nvlist_exists_string(nvl, "anchor")) anchor = nvlist_get_string(nvl, "anchor"); if (nvlist_exists_string(nvl, "anchor_call")) anchor_call = nvlist_get_string(nvl, "anchor_call"); if ((error = nvlist_error(nvl))) ERROUT(error); /* Frees rule on error */ error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, anchor_call, td); nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); break; #undef ERROUT DIOCADDRULENV_error: pf_krule_free(rule); nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); break; } case DIOCADDRULE: { struct pfioc_rule *pr = (struct pfioc_rule *)addr; struct pf_krule *rule; rule = pf_krule_alloc(); error = pf_rule_to_krule(&pr->rule, rule); if (error != 0) { pf_krule_free(rule); break; } pr->anchor[sizeof(pr->anchor) - 1] = 0; /* Frees rule on error */ error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, pr->anchor, pr->anchor_call, td); break; } case DIOCGETRULES: { struct pfioc_rule *pr = (struct pfioc_rule *)addr; struct pf_kruleset *ruleset; struct pf_krule *tail; int rs_num; pr->anchor[sizeof(pr->anchor) - 1] = 0; PF_RULES_WLOCK(); ruleset = pf_find_kruleset(pr->anchor); if (ruleset == NULL) { PF_RULES_WUNLOCK(); error = EINVAL; break; } rs_num = pf_get_ruleset_number(pr->rule.action); if (rs_num >= PF_RULESET_MAX) { PF_RULES_WUNLOCK(); error = EINVAL; break; } tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, pf_krulequeue); if (tail) pr->nr = tail->nr + 1; else pr->nr = 0; pr->ticket = ruleset->rules[rs_num].active.ticket; PF_RULES_WUNLOCK(); break; } case DIOCGETRULE: { struct pfioc_rule *pr = (struct pfioc_rule *)addr; struct pf_kruleset *ruleset; struct pf_krule *rule; int rs_num; pr->anchor[sizeof(pr->anchor) - 1] = 0; PF_RULES_WLOCK(); ruleset = pf_find_kruleset(pr->anchor); if (ruleset == NULL) { PF_RULES_WUNLOCK(); error = EINVAL; break; } rs_num = pf_get_ruleset_number(pr->rule.action); if (rs_num >= PF_RULESET_MAX) { PF_RULES_WUNLOCK(); error = EINVAL; break; } if (pr->ticket != ruleset->rules[rs_num].active.ticket) { PF_RULES_WUNLOCK(); error = EBUSY; break; } rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); while ((rule != NULL) && (rule->nr != pr->nr)) rule = TAILQ_NEXT(rule, entries); if (rule == NULL) { PF_RULES_WUNLOCK(); error = EBUSY; break; } pf_krule_to_rule(rule, &pr->rule); if (pf_kanchor_copyout(ruleset, rule, pr)) { PF_RULES_WUNLOCK(); error = EBUSY; break; } pf_addr_copyout(&pr->rule.src.addr); pf_addr_copyout(&pr->rule.dst.addr); if (pr->action == PF_GET_CLR_CNTR) { pf_counter_u64_zero(&rule->evaluations); for (int i = 0; i < 2; i++) { pf_counter_u64_zero(&rule->packets[i]); pf_counter_u64_zero(&rule->bytes[i]); } counter_u64_zero(rule->states_tot); } PF_RULES_WUNLOCK(); break; } case DIOCGETRULENV: { struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvrule = NULL; nvlist_t *nvl = NULL; struct pf_kruleset *ruleset; struct pf_krule *rule; void *nvlpacked = NULL; int rs_num, nr; bool clear_counter = false; #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); /* Copy the request in */ nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_string(nvl, "anchor")) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "ruleset")) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "ticket")) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "nr")) ERROUT(EBADMSG); if (nvlist_exists_bool(nvl, "clear_counter")) clear_counter = nvlist_get_bool(nvl, "clear_counter"); if (clear_counter && !(flags & FWRITE)) ERROUT(EACCES); nr = nvlist_get_number(nvl, "nr"); PF_RULES_WLOCK(); ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); if (ruleset == NULL) { PF_RULES_WUNLOCK(); ERROUT(ENOENT); } rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); if (rs_num >= PF_RULESET_MAX) { PF_RULES_WUNLOCK(); ERROUT(EINVAL); } if (nvlist_get_number(nvl, "ticket") != ruleset->rules[rs_num].active.ticket) { PF_RULES_WUNLOCK(); ERROUT(EBUSY); } if ((error = nvlist_error(nvl))) { PF_RULES_WUNLOCK(); ERROUT(error); } rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); while ((rule != NULL) && (rule->nr != nr)) rule = TAILQ_NEXT(rule, entries); if (rule == NULL) { PF_RULES_WUNLOCK(); ERROUT(EBUSY); } nvrule = pf_krule_to_nvrule(rule); nvlist_destroy(nvl); nvl = nvlist_create(0); if (nvl == NULL) { PF_RULES_WUNLOCK(); ERROUT(ENOMEM); } nvlist_add_number(nvl, "nr", nr); nvlist_add_nvlist(nvl, "rule", nvrule); nvlist_destroy(nvrule); nvrule = NULL; if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { PF_RULES_WUNLOCK(); ERROUT(EBUSY); } free(nvlpacked, M_NVLIST); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) { PF_RULES_WUNLOCK(); ERROUT(ENOMEM); } if (nv->size == 0) { PF_RULES_WUNLOCK(); ERROUT(0); } else if (nv->size < nv->len) { PF_RULES_WUNLOCK(); ERROUT(ENOSPC); } if (clear_counter) { pf_counter_u64_zero(&rule->evaluations); for (int i = 0; i < 2; i++) { pf_counter_u64_zero(&rule->packets[i]); pf_counter_u64_zero(&rule->bytes[i]); } counter_u64_zero(rule->states_tot); } PF_RULES_WUNLOCK(); error = copyout(nvlpacked, nv->data, nv->len); #undef ERROUT DIOCGETRULENV_error: free(nvlpacked, M_NVLIST); nvlist_destroy(nvrule); nvlist_destroy(nvl); break; } case DIOCCHANGERULE: { struct pfioc_rule *pcr = (struct pfioc_rule *)addr; struct pf_kruleset *ruleset; struct pf_krule *oldrule = NULL, *newrule = NULL; struct pfi_kkif *kif = NULL; struct pf_kpooladdr *pa; u_int32_t nr = 0; int rs_num; pcr->anchor[sizeof(pcr->anchor) - 1] = 0; if (pcr->action < PF_CHANGE_ADD_HEAD || pcr->action > PF_CHANGE_GET_TICKET) { error = EINVAL; break; } if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { error = EINVAL; break; } if (pcr->action != PF_CHANGE_REMOVE) { newrule = pf_krule_alloc(); error = pf_rule_to_krule(&pcr->rule, newrule); if (error != 0) { pf_krule_free(newrule); break; } if (newrule->ifname[0]) kif = pf_kkif_create(M_WAITOK); pf_counter_u64_init(&newrule->evaluations, M_WAITOK); for (int i = 0; i < 2; i++) { pf_counter_u64_init(&newrule->packets[i], M_WAITOK); pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); } newrule->states_cur = counter_u64_alloc(M_WAITOK); newrule->states_tot = counter_u64_alloc(M_WAITOK); newrule->src_nodes = counter_u64_alloc(M_WAITOK); newrule->cuid = td->td_ucred->cr_ruid; newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; TAILQ_INIT(&newrule->rpool.list); } #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) PF_CONFIG_LOCK(); PF_RULES_WLOCK(); #ifdef PF_WANT_32_TO_64_COUNTER if (newrule != NULL) { LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); newrule->allrulelinked = true; V_pf_allrulecount++; } #endif if (!(pcr->action == PF_CHANGE_REMOVE || pcr->action == PF_CHANGE_GET_TICKET) && pcr->pool_ticket != V_ticket_pabuf) ERROUT(EBUSY); ruleset = pf_find_kruleset(pcr->anchor); if (ruleset == NULL) ERROUT(EINVAL); rs_num = pf_get_ruleset_number(pcr->rule.action); if (rs_num >= PF_RULESET_MAX) ERROUT(EINVAL); /* * XXXMJG: there is no guarantee that the ruleset was * created by the usual route of calling DIOCXBEGIN. * As a result it is possible the rule tree will not * be allocated yet. Hack around it by doing it here. * Note it is fine to let the tree persist in case of * error as it will be freed down the road on future * updates (if need be). */ if (ruleset->rules[rs_num].active.tree == NULL) { ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); if (ruleset->rules[rs_num].active.tree == NULL) { ERROUT(ENOMEM); } } if (pcr->action == PF_CHANGE_GET_TICKET) { pcr->ticket = ++ruleset->rules[rs_num].active.ticket; ERROUT(0); } else if (pcr->ticket != ruleset->rules[rs_num].active.ticket) ERROUT(EINVAL); if (pcr->action != PF_CHANGE_REMOVE) { if (newrule->ifname[0]) { newrule->kif = pfi_kkif_attach(kif, newrule->ifname); kif = NULL; pfi_kkif_ref(newrule->kif); } else newrule->kif = NULL; if (newrule->rtableid > 0 && newrule->rtableid >= rt_numfibs) error = EBUSY; #ifdef ALTQ /* set queue IDs */ if (newrule->qname[0] != 0) { if ((newrule->qid = pf_qname2qid(newrule->qname)) == 0) error = EBUSY; else if (newrule->pqname[0] != 0) { if ((newrule->pqid = pf_qname2qid(newrule->pqname)) == 0) error = EBUSY; } else newrule->pqid = newrule->qid; } #endif /* ALTQ */ if (newrule->tagname[0]) if ((newrule->tag = pf_tagname2tag(newrule->tagname)) == 0) error = EBUSY; if (newrule->match_tagname[0]) if ((newrule->match_tag = pf_tagname2tag( newrule->match_tagname)) == 0) error = EBUSY; if (newrule->rt && !newrule->direction) error = EINVAL; if (!newrule->log) newrule->logif = 0; if (newrule->logif >= PFLOGIFS_MAX) error = EINVAL; if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) error = ENOMEM; if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) error = ENOMEM; if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) error = EINVAL; TAILQ_FOREACH(pa, &V_pf_pabuf, entries) if (pa->addr.type == PF_ADDR_TABLE) { pa->addr.p.tbl = pfr_attach_table(ruleset, pa->addr.v.tblname); if (pa->addr.p.tbl == NULL) error = ENOMEM; } newrule->overload_tbl = NULL; if (newrule->overload_tblname[0]) { if ((newrule->overload_tbl = pfr_attach_table( ruleset, newrule->overload_tblname)) == NULL) error = EINVAL; else newrule->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; } pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); if (((((newrule->action == PF_NAT) || (newrule->action == PF_RDR) || (newrule->action == PF_BINAT) || (newrule->rt > PF_NOPFROUTE)) && !newrule->anchor)) && (TAILQ_FIRST(&newrule->rpool.list) == NULL)) error = EINVAL; if (error) { pf_free_rule(newrule); PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); break; } newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); } pf_empty_kpool(&V_pf_pabuf); if (pcr->action == PF_CHANGE_ADD_HEAD) oldrule = TAILQ_FIRST( ruleset->rules[rs_num].active.ptr); else if (pcr->action == PF_CHANGE_ADD_TAIL) oldrule = TAILQ_LAST( ruleset->rules[rs_num].active.ptr, pf_krulequeue); else { oldrule = TAILQ_FIRST( ruleset->rules[rs_num].active.ptr); while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) oldrule = TAILQ_NEXT(oldrule, entries); if (oldrule == NULL) { if (newrule != NULL) pf_free_rule(newrule); PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); error = EINVAL; break; } } if (pcr->action == PF_CHANGE_REMOVE) { pf_unlink_rule(ruleset->rules[rs_num].active.ptr, oldrule); RB_REMOVE(pf_krule_global, ruleset->rules[rs_num].active.tree, oldrule); ruleset->rules[rs_num].active.rcount--; } else { pf_hash_rule(newrule); if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].active.tree, newrule) != NULL) { pf_free_rule(newrule); PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); error = EEXIST; break; } if (oldrule == NULL) TAILQ_INSERT_TAIL( ruleset->rules[rs_num].active.ptr, newrule, entries); else if (pcr->action == PF_CHANGE_ADD_HEAD || pcr->action == PF_CHANGE_ADD_BEFORE) TAILQ_INSERT_BEFORE(oldrule, newrule, entries); else TAILQ_INSERT_AFTER( ruleset->rules[rs_num].active.ptr, oldrule, newrule, entries); ruleset->rules[rs_num].active.rcount++; } nr = 0; TAILQ_FOREACH(oldrule, ruleset->rules[rs_num].active.ptr, entries) oldrule->nr = nr++; ruleset->rules[rs_num].active.ticket++; pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); pf_remove_if_empty_kruleset(ruleset); PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); break; #undef ERROUT DIOCCHANGERULE_error: PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); pf_krule_free(newrule); pf_kkif_free(kif); break; } case DIOCCLRSTATES: { struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; struct pf_kstate_kill kill; error = pf_state_kill_to_kstate_kill(psk, &kill); if (error) break; psk->psk_killed = pf_clear_states(&kill); break; } case DIOCCLRSTATESNV: { error = pf_clearstates_nv((struct pfioc_nv *)addr); break; } case DIOCKILLSTATES: { struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; struct pf_kstate_kill kill; error = pf_state_kill_to_kstate_kill(psk, &kill); if (error) break; psk->psk_killed = 0; pf_killstates(&kill, &psk->psk_killed); break; } case DIOCKILLSTATESNV: { error = pf_killstates_nv((struct pfioc_nv *)addr); break; } case DIOCADDSTATE: { struct pfioc_state *ps = (struct pfioc_state *)addr; struct pfsync_state *sp = &ps->state; if (sp->timeout >= PFTM_MAX) { error = EINVAL; break; } if (V_pfsync_state_import_ptr != NULL) { PF_RULES_RLOCK(); error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); PF_RULES_RUNLOCK(); } else error = EOPNOTSUPP; break; } case DIOCGETSTATE: { struct pfioc_state *ps = (struct pfioc_state *)addr; struct pf_kstate *s; s = pf_find_state_byid(ps->state.id, ps->state.creatorid); if (s == NULL) { error = ENOENT; break; } pfsync_state_export(&ps->state, s); PF_STATE_UNLOCK(s); break; } case DIOCGETSTATENV: { error = pf_getstate((struct pfioc_nv *)addr); break; } case DIOCGETSTATES: { struct pfioc_states *ps = (struct pfioc_states *)addr; struct pf_kstate *s; struct pfsync_state *pstore, *p; int i, nr; size_t slice_count = 16, count; void *out; if (ps->ps_len <= 0) { nr = uma_zone_get_cur(V_pf_state_z); ps->ps_len = sizeof(struct pfsync_state) * nr; break; } out = ps->ps_states; pstore = mallocarray(slice_count, sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO); nr = 0; for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; DIOCGETSTATES_retry: p = pstore; if (LIST_EMPTY(&ih->states)) continue; PF_HASHROW_LOCK(ih); count = 0; LIST_FOREACH(s, &ih->states, entry) { if (s->timeout == PFTM_UNLINKED) continue; count++; } if (count > slice_count) { PF_HASHROW_UNLOCK(ih); free(pstore, M_TEMP); slice_count = count * 2; pstore = mallocarray(slice_count, sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO); goto DIOCGETSTATES_retry; } if ((nr+count) * sizeof(*p) > ps->ps_len) { PF_HASHROW_UNLOCK(ih); goto DIOCGETSTATES_full; } LIST_FOREACH(s, &ih->states, entry) { if (s->timeout == PFTM_UNLINKED) continue; pfsync_state_export(p, s); p++; nr++; } PF_HASHROW_UNLOCK(ih); error = copyout(pstore, out, sizeof(struct pfsync_state) * count); if (error) break; out = ps->ps_states + nr; } DIOCGETSTATES_full: ps->ps_len = sizeof(struct pfsync_state) * nr; free(pstore, M_TEMP); break; } case DIOCGETSTATESV2: { struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; struct pf_kstate *s; struct pf_state_export *pstore, *p; int i, nr; size_t slice_count = 16, count; void *out; if (ps->ps_req_version > PF_STATE_VERSION) { error = ENOTSUP; break; } if (ps->ps_len <= 0) { nr = uma_zone_get_cur(V_pf_state_z); ps->ps_len = sizeof(struct pf_state_export) * nr; break; } out = ps->ps_states; pstore = mallocarray(slice_count, sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); nr = 0; for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; DIOCGETSTATESV2_retry: p = pstore; if (LIST_EMPTY(&ih->states)) continue; PF_HASHROW_LOCK(ih); count = 0; LIST_FOREACH(s, &ih->states, entry) { if (s->timeout == PFTM_UNLINKED) continue; count++; } if (count > slice_count) { PF_HASHROW_UNLOCK(ih); free(pstore, M_TEMP); slice_count = count * 2; pstore = mallocarray(slice_count, sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); goto DIOCGETSTATESV2_retry; } if ((nr+count) * sizeof(*p) > ps->ps_len) { PF_HASHROW_UNLOCK(ih); goto DIOCGETSTATESV2_full; } LIST_FOREACH(s, &ih->states, entry) { if (s->timeout == PFTM_UNLINKED) continue; pf_state_export(p, s); p++; nr++; } PF_HASHROW_UNLOCK(ih); error = copyout(pstore, out, sizeof(struct pf_state_export) * count); if (error) break; out = ps->ps_states + nr; } DIOCGETSTATESV2_full: ps->ps_len = nr * sizeof(struct pf_state_export); free(pstore, M_TEMP); break; } case DIOCGETSTATUS: { struct pf_status *s = (struct pf_status *)addr; PF_RULES_RLOCK(); s->running = V_pf_status.running; s->since = V_pf_status.since; s->debug = V_pf_status.debug; s->hostid = V_pf_status.hostid; s->states = V_pf_status.states; s->src_nodes = V_pf_status.src_nodes; for (int i = 0; i < PFRES_MAX; i++) s->counters[i] = counter_u64_fetch(V_pf_status.counters[i]); for (int i = 0; i < LCNT_MAX; i++) s->lcounters[i] = counter_u64_fetch(V_pf_status.lcounters[i]); for (int i = 0; i < FCNT_MAX; i++) s->fcounters[i] = pf_counter_u64_fetch(&V_pf_status.fcounters[i]); for (int i = 0; i < SCNT_MAX; i++) s->scounters[i] = counter_u64_fetch(V_pf_status.scounters[i]); bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); bcopy(V_pf_status.pf_chksum, s->pf_chksum, PF_MD5_DIGEST_LENGTH); pfi_update_status(s->ifname, s); PF_RULES_RUNLOCK(); break; } case DIOCGETSTATUSNV: { error = pf_getstatus((struct pfioc_nv *)addr); break; } case DIOCSETSTATUSIF: { struct pfioc_if *pi = (struct pfioc_if *)addr; if (pi->ifname[0] == 0) { bzero(V_pf_status.ifname, IFNAMSIZ); break; } PF_RULES_WLOCK(); error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); PF_RULES_WUNLOCK(); break; } case DIOCCLRSTATUS: { PF_RULES_WLOCK(); for (int i = 0; i < PFRES_MAX; i++) counter_u64_zero(V_pf_status.counters[i]); for (int i = 0; i < FCNT_MAX; i++) pf_counter_u64_zero(&V_pf_status.fcounters[i]); for (int i = 0; i < SCNT_MAX; i++) counter_u64_zero(V_pf_status.scounters[i]); for (int i = 0; i < KLCNT_MAX; i++) counter_u64_zero(V_pf_status.lcounters[i]); V_pf_status.since = time_second; if (*V_pf_status.ifname) pfi_update_status(V_pf_status.ifname, NULL); PF_RULES_WUNLOCK(); break; } case DIOCNATLOOK: { struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; struct pf_state_key *sk; struct pf_kstate *state; struct pf_state_key_cmp key; int m = 0, direction = pnl->direction; int sidx, didx; /* NATLOOK src and dst are reversed, so reverse sidx/didx */ sidx = (direction == PF_IN) ? 1 : 0; didx = (direction == PF_IN) ? 0 : 1; if (!pnl->proto || PF_AZERO(&pnl->saddr, pnl->af) || PF_AZERO(&pnl->daddr, pnl->af) || ((pnl->proto == IPPROTO_TCP || pnl->proto == IPPROTO_UDP) && (!pnl->dport || !pnl->sport))) error = EINVAL; else { bzero(&key, sizeof(key)); key.af = pnl->af; key.proto = pnl->proto; PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); key.port[sidx] = pnl->sport; PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); key.port[didx] = pnl->dport; state = pf_find_state_all(&key, direction, &m); if (state == NULL) { error = ENOENT; } else { if (m > 1) { PF_STATE_UNLOCK(state); error = E2BIG; /* more than one state */ } else { sk = state->key[sidx]; PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); pnl->rsport = sk->port[sidx]; PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); pnl->rdport = sk->port[didx]; PF_STATE_UNLOCK(state); } } } break; } case DIOCSETTIMEOUT: { struct pfioc_tm *pt = (struct pfioc_tm *)addr; int old; if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || pt->seconds < 0) { error = EINVAL; break; } PF_RULES_WLOCK(); old = V_pf_default_rule.timeout[pt->timeout]; if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) pt->seconds = 1; V_pf_default_rule.timeout[pt->timeout] = pt->seconds; if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) wakeup(pf_purge_thread); pt->seconds = old; PF_RULES_WUNLOCK(); break; } case DIOCGETTIMEOUT: { struct pfioc_tm *pt = (struct pfioc_tm *)addr; if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { error = EINVAL; break; } PF_RULES_RLOCK(); pt->seconds = V_pf_default_rule.timeout[pt->timeout]; PF_RULES_RUNLOCK(); break; } case DIOCGETLIMIT: { struct pfioc_limit *pl = (struct pfioc_limit *)addr; if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { error = EINVAL; break; } PF_RULES_RLOCK(); pl->limit = V_pf_limits[pl->index].limit; PF_RULES_RUNLOCK(); break; } case DIOCSETLIMIT: { struct pfioc_limit *pl = (struct pfioc_limit *)addr; int old_limit; PF_RULES_WLOCK(); if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || V_pf_limits[pl->index].zone == NULL) { PF_RULES_WUNLOCK(); error = EINVAL; break; } uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); old_limit = V_pf_limits[pl->index].limit; V_pf_limits[pl->index].limit = pl->limit; pl->limit = old_limit; PF_RULES_WUNLOCK(); break; } case DIOCSETDEBUG: { u_int32_t *level = (u_int32_t *)addr; PF_RULES_WLOCK(); V_pf_status.debug = *level; PF_RULES_WUNLOCK(); break; } case DIOCCLRRULECTRS: { /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ struct pf_kruleset *ruleset = &pf_main_ruleset; struct pf_krule *rule; PF_RULES_WLOCK(); TAILQ_FOREACH(rule, ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { pf_counter_u64_zero(&rule->evaluations); for (int i = 0; i < 2; i++) { pf_counter_u64_zero(&rule->packets[i]); pf_counter_u64_zero(&rule->bytes[i]); } } PF_RULES_WUNLOCK(); break; } case DIOCGIFSPEEDV0: case DIOCGIFSPEEDV1: { struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; struct pf_ifspeed_v1 ps; struct ifnet *ifp; if (psp->ifname[0] == '\0') { error = EINVAL; break; } error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); if (error != 0) break; ifp = ifunit(ps.ifname); if (ifp != NULL) { psp->baudrate32 = (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); if (cmd == DIOCGIFSPEEDV1) psp->baudrate = ifp->if_baudrate; } else { error = EINVAL; } break; } #ifdef ALTQ case DIOCSTARTALTQ: { struct pf_altq *altq; PF_RULES_WLOCK(); /* enable all altq interfaces on active list */ TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { error = pf_enable_altq(altq); if (error != 0) break; } } if (error == 0) V_pf_altq_running = 1; PF_RULES_WUNLOCK(); DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); break; } case DIOCSTOPALTQ: { struct pf_altq *altq; PF_RULES_WLOCK(); /* disable all altq interfaces on active list */ TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { error = pf_disable_altq(altq); if (error != 0) break; } } if (error == 0) V_pf_altq_running = 0; PF_RULES_WUNLOCK(); DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); break; } case DIOCADDALTQV0: case DIOCADDALTQV1: { struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; struct pf_altq *altq, *a; struct ifnet *ifp; altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); if (error) break; altq->local_flags = 0; PF_RULES_WLOCK(); if (pa->ticket != V_ticket_altqs_inactive) { PF_RULES_WUNLOCK(); free(altq, M_PFALTQ); error = EBUSY; break; } /* * if this is for a queue, find the discipline and * copy the necessary fields */ if (altq->qname[0] != 0) { if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { PF_RULES_WUNLOCK(); error = EBUSY; free(altq, M_PFALTQ); break; } altq->altq_disc = NULL; TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { if (strncmp(a->ifname, altq->ifname, IFNAMSIZ) == 0) { altq->altq_disc = a->altq_disc; break; } } } if ((ifp = ifunit(altq->ifname)) == NULL) altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; else error = altq_add(ifp, altq); if (error) { PF_RULES_WUNLOCK(); free(altq, M_PFALTQ); break; } if (altq->qname[0] != 0) TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); else TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); /* version error check done on import above */ pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); PF_RULES_WUNLOCK(); break; } case DIOCGETALTQSV0: case DIOCGETALTQSV1: { struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; struct pf_altq *altq; PF_RULES_RLOCK(); pa->nr = 0; TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) pa->nr++; TAILQ_FOREACH(altq, V_pf_altqs_active, entries) pa->nr++; pa->ticket = V_ticket_altqs_active; PF_RULES_RUNLOCK(); break; } case DIOCGETALTQV0: case DIOCGETALTQV1: { struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; struct pf_altq *altq; PF_RULES_RLOCK(); if (pa->ticket != V_ticket_altqs_active) { PF_RULES_RUNLOCK(); error = EBUSY; break; } altq = pf_altq_get_nth_active(pa->nr); if (altq == NULL) { PF_RULES_RUNLOCK(); error = EBUSY; break; } pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); PF_RULES_RUNLOCK(); break; } case DIOCCHANGEALTQV0: case DIOCCHANGEALTQV1: /* CHANGEALTQ not supported yet! */ error = ENODEV; break; case DIOCGETQSTATSV0: case DIOCGETQSTATSV1: { struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; struct pf_altq *altq; int nbytes; u_int32_t version; PF_RULES_RLOCK(); if (pq->ticket != V_ticket_altqs_active) { PF_RULES_RUNLOCK(); error = EBUSY; break; } nbytes = pq->nbytes; altq = pf_altq_get_nth_active(pq->nr); if (altq == NULL) { PF_RULES_RUNLOCK(); error = EBUSY; break; } if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { PF_RULES_RUNLOCK(); error = ENXIO; break; } PF_RULES_RUNLOCK(); if (cmd == DIOCGETQSTATSV0) version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ else version = pq->version; error = altq_getqstats(altq, pq->buf, &nbytes, version); if (error == 0) { pq->scheduler = altq->scheduler; pq->nbytes = nbytes; } break; } #endif /* ALTQ */ case DIOCBEGINADDRS: { struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; PF_RULES_WLOCK(); pf_empty_kpool(&V_pf_pabuf); pp->ticket = ++V_ticket_pabuf; PF_RULES_WUNLOCK(); break; } case DIOCADDADDR: { struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; struct pf_kpooladdr *pa; struct pfi_kkif *kif = NULL; #ifndef INET if (pp->af == AF_INET) { error = EAFNOSUPPORT; break; } #endif /* INET */ #ifndef INET6 if (pp->af == AF_INET6) { error = EAFNOSUPPORT; break; } #endif /* INET6 */ if (pp->addr.addr.type != PF_ADDR_ADDRMASK && pp->addr.addr.type != PF_ADDR_DYNIFTL && pp->addr.addr.type != PF_ADDR_TABLE) { error = EINVAL; break; } if (pp->addr.addr.p.dyn != NULL) { error = EINVAL; break; } pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); if (error != 0) break; if (pa->ifname[0]) kif = pf_kkif_create(M_WAITOK); PF_RULES_WLOCK(); if (pp->ticket != V_ticket_pabuf) { PF_RULES_WUNLOCK(); if (pa->ifname[0]) pf_kkif_free(kif); free(pa, M_PFRULE); error = EBUSY; break; } if (pa->ifname[0]) { pa->kif = pfi_kkif_attach(kif, pa->ifname); kif = NULL; pfi_kkif_ref(pa->kif); } else pa->kif = NULL; if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { if (pa->ifname[0]) pfi_kkif_unref(pa->kif); PF_RULES_WUNLOCK(); free(pa, M_PFRULE); break; } TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); PF_RULES_WUNLOCK(); break; } case DIOCGETADDRS: { struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; struct pf_kpool *pool; struct pf_kpooladdr *pa; pp->anchor[sizeof(pp->anchor) - 1] = 0; pp->nr = 0; PF_RULES_RLOCK(); pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, pp->r_num, 0, 1, 0); if (pool == NULL) { PF_RULES_RUNLOCK(); error = EBUSY; break; } TAILQ_FOREACH(pa, &pool->list, entries) pp->nr++; PF_RULES_RUNLOCK(); break; } case DIOCGETADDR: { struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; struct pf_kpool *pool; struct pf_kpooladdr *pa; u_int32_t nr = 0; pp->anchor[sizeof(pp->anchor) - 1] = 0; PF_RULES_RLOCK(); pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, pp->r_num, 0, 1, 1); if (pool == NULL) { PF_RULES_RUNLOCK(); error = EBUSY; break; } pa = TAILQ_FIRST(&pool->list); while ((pa != NULL) && (nr < pp->nr)) { pa = TAILQ_NEXT(pa, entries); nr++; } if (pa == NULL) { PF_RULES_RUNLOCK(); error = EBUSY; break; } pf_kpooladdr_to_pooladdr(pa, &pp->addr); pf_addr_copyout(&pp->addr.addr); PF_RULES_RUNLOCK(); break; } case DIOCCHANGEADDR: { struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; struct pf_kpool *pool; struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; struct pf_kruleset *ruleset; struct pfi_kkif *kif = NULL; pca->anchor[sizeof(pca->anchor) - 1] = 0; if (pca->action < PF_CHANGE_ADD_HEAD || pca->action > PF_CHANGE_REMOVE) { error = EINVAL; break; } if (pca->addr.addr.type != PF_ADDR_ADDRMASK && pca->addr.addr.type != PF_ADDR_DYNIFTL && pca->addr.addr.type != PF_ADDR_TABLE) { error = EINVAL; break; } if (pca->addr.addr.p.dyn != NULL) { error = EINVAL; break; } if (pca->action != PF_CHANGE_REMOVE) { #ifndef INET if (pca->af == AF_INET) { error = EAFNOSUPPORT; break; } #endif /* INET */ #ifndef INET6 if (pca->af == AF_INET6) { error = EAFNOSUPPORT; break; } #endif /* INET6 */ newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); if (newpa->ifname[0]) kif = pf_kkif_create(M_WAITOK); newpa->kif = NULL; } #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) PF_RULES_WLOCK(); ruleset = pf_find_kruleset(pca->anchor); if (ruleset == NULL) ERROUT(EBUSY); pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, pca->r_num, pca->r_last, 1, 1); if (pool == NULL) ERROUT(EBUSY); if (pca->action != PF_CHANGE_REMOVE) { if (newpa->ifname[0]) { newpa->kif = pfi_kkif_attach(kif, newpa->ifname); pfi_kkif_ref(newpa->kif); kif = NULL; } switch (newpa->addr.type) { case PF_ADDR_DYNIFTL: error = pfi_dynaddr_setup(&newpa->addr, pca->af); break; case PF_ADDR_TABLE: newpa->addr.p.tbl = pfr_attach_table(ruleset, newpa->addr.v.tblname); if (newpa->addr.p.tbl == NULL) error = ENOMEM; break; } if (error) goto DIOCCHANGEADDR_error; } switch (pca->action) { case PF_CHANGE_ADD_HEAD: oldpa = TAILQ_FIRST(&pool->list); break; case PF_CHANGE_ADD_TAIL: oldpa = TAILQ_LAST(&pool->list, pf_kpalist); break; default: oldpa = TAILQ_FIRST(&pool->list); for (int i = 0; oldpa && i < pca->nr; i++) oldpa = TAILQ_NEXT(oldpa, entries); if (oldpa == NULL) ERROUT(EINVAL); } if (pca->action == PF_CHANGE_REMOVE) { TAILQ_REMOVE(&pool->list, oldpa, entries); switch (oldpa->addr.type) { case PF_ADDR_DYNIFTL: pfi_dynaddr_remove(oldpa->addr.p.dyn); break; case PF_ADDR_TABLE: pfr_detach_table(oldpa->addr.p.tbl); break; } if (oldpa->kif) pfi_kkif_unref(oldpa->kif); free(oldpa, M_PFRULE); } else { if (oldpa == NULL) TAILQ_INSERT_TAIL(&pool->list, newpa, entries); else if (pca->action == PF_CHANGE_ADD_HEAD || pca->action == PF_CHANGE_ADD_BEFORE) TAILQ_INSERT_BEFORE(oldpa, newpa, entries); else TAILQ_INSERT_AFTER(&pool->list, oldpa, newpa, entries); } pool->cur = TAILQ_FIRST(&pool->list); PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); PF_RULES_WUNLOCK(); break; #undef ERROUT DIOCCHANGEADDR_error: if (newpa != NULL) { if (newpa->kif) pfi_kkif_unref(newpa->kif); free(newpa, M_PFRULE); } PF_RULES_WUNLOCK(); pf_kkif_free(kif); break; } case DIOCGETRULESETS: { struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; struct pf_kruleset *ruleset; struct pf_kanchor *anchor; pr->path[sizeof(pr->path) - 1] = 0; PF_RULES_RLOCK(); if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { PF_RULES_RUNLOCK(); error = ENOENT; break; } pr->nr = 0; if (ruleset->anchor == NULL) { /* XXX kludge for pf_main_ruleset */ RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) if (anchor->parent == NULL) pr->nr++; } else { RB_FOREACH(anchor, pf_kanchor_node, &ruleset->anchor->children) pr->nr++; } PF_RULES_RUNLOCK(); break; } case DIOCGETRULESET: { struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; struct pf_kruleset *ruleset; struct pf_kanchor *anchor; u_int32_t nr = 0; pr->path[sizeof(pr->path) - 1] = 0; PF_RULES_RLOCK(); if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { PF_RULES_RUNLOCK(); error = ENOENT; break; } pr->name[0] = 0; if (ruleset->anchor == NULL) { /* XXX kludge for pf_main_ruleset */ RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) if (anchor->parent == NULL && nr++ == pr->nr) { strlcpy(pr->name, anchor->name, sizeof(pr->name)); break; } } else { RB_FOREACH(anchor, pf_kanchor_node, &ruleset->anchor->children) if (nr++ == pr->nr) { strlcpy(pr->name, anchor->name, sizeof(pr->name)); break; } } if (!pr->name[0]) error = EBUSY; PF_RULES_RUNLOCK(); break; } case DIOCRCLRTABLES: { struct pfioc_table *io = (struct pfioc_table *)addr; if (io->pfrio_esize != 0) { error = ENODEV; break; } PF_RULES_WLOCK(); error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); break; } case DIOCRADDTABLES: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_table *pfrts; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { error = ENOMEM; break; } totlen = io->pfrio_size * sizeof(struct pfr_table); pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfrts, totlen); if (error) { free(pfrts, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_add_tables(pfrts, io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); free(pfrts, M_TEMP); break; } case DIOCRDELTABLES: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_table *pfrts; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { error = ENOMEM; break; } totlen = io->pfrio_size * sizeof(struct pfr_table); pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfrts, totlen); if (error) { free(pfrts, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_del_tables(pfrts, io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); free(pfrts, M_TEMP); break; } case DIOCRGETTABLES: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_table *pfrts; size_t totlen; int n; if (io->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } PF_RULES_RLOCK(); n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); if (n < 0) { PF_RULES_RUNLOCK(); error = EINVAL; break; } io->pfrio_size = min(io->pfrio_size, n); totlen = io->pfrio_size * sizeof(struct pfr_table); pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), M_TEMP, M_NOWAIT | M_ZERO); if (pfrts == NULL) { error = ENOMEM; PF_RULES_RUNLOCK(); break; } error = pfr_get_tables(&io->pfrio_table, pfrts, &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); if (error == 0) error = copyout(pfrts, io->pfrio_buffer, totlen); free(pfrts, M_TEMP); break; } case DIOCRGETTSTATS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_tstats *pfrtstats; size_t totlen; int n; if (io->pfrio_esize != sizeof(struct pfr_tstats)) { error = ENODEV; break; } PF_TABLE_STATS_LOCK(); PF_RULES_RLOCK(); n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); if (n < 0) { PF_RULES_RUNLOCK(); PF_TABLE_STATS_UNLOCK(); error = EINVAL; break; } io->pfrio_size = min(io->pfrio_size, n); totlen = io->pfrio_size * sizeof(struct pfr_tstats); pfrtstats = mallocarray(io->pfrio_size, sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); if (pfrtstats == NULL) { error = ENOMEM; PF_RULES_RUNLOCK(); PF_TABLE_STATS_UNLOCK(); break; } error = pfr_get_tstats(&io->pfrio_table, pfrtstats, &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); PF_TABLE_STATS_UNLOCK(); if (error == 0) error = copyout(pfrtstats, io->pfrio_buffer, totlen); free(pfrtstats, M_TEMP); break; } case DIOCRCLRTSTATS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_table *pfrts; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { /* We used to count tables and use the minimum required * size, so we didn't fail on overly large requests. * Keep doing so. */ io->pfrio_size = pf_ioctl_maxcount; break; } totlen = io->pfrio_size * sizeof(struct pfr_table); pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfrts, totlen); if (error) { free(pfrts, M_TEMP); break; } PF_TABLE_STATS_LOCK(); PF_RULES_RLOCK(); error = pfr_clr_tstats(pfrts, io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); PF_TABLE_STATS_UNLOCK(); free(pfrts, M_TEMP); break; } case DIOCRSETTFLAGS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_table *pfrts; size_t totlen; int n; if (io->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } PF_RULES_RLOCK(); n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); if (n < 0) { PF_RULES_RUNLOCK(); error = EINVAL; break; } io->pfrio_size = min(io->pfrio_size, n); PF_RULES_RUNLOCK(); totlen = io->pfrio_size * sizeof(struct pfr_table); pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfrts, totlen); if (error) { free(pfrts, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_set_tflags(pfrts, io->pfrio_size, io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); free(pfrts, M_TEMP); break; } case DIOCRCLRADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; if (io->pfrio_esize != 0) { error = ENODEV; break; } PF_RULES_WLOCK(); error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); break; } case DIOCRADDADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_add_addrs(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRDELADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_del_addrs(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRSETADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen, count; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { error = EINVAL; break; } count = max(io->pfrio_size, io->pfrio_size2); if (count > pf_ioctl_maxcount || WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = count * sizeof(struct pfr_addr); pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_set_addrs(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | PFR_FLAG_USERIOCTL, 0); PF_RULES_WUNLOCK(); if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRGETADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK | M_ZERO); PF_RULES_RLOCK(); error = pfr_get_addrs(&io->pfrio_table, pfras, &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); if (error == 0) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRGETASTATS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_astats *pfrastats; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_astats)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_astats); pfrastats = mallocarray(io->pfrio_size, sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); PF_RULES_RLOCK(); error = pfr_get_astats(&io->pfrio_table, pfrastats, &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); if (error == 0) error = copyout(pfrastats, io->pfrio_buffer, totlen); free(pfrastats, M_TEMP); break; } case DIOCRCLRASTATS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_clr_astats(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRTSTADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_RLOCK(); error = pfr_tst_addrs(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); if (error == 0) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRINADEFINE: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_ina_define(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); free(pfras, M_TEMP); break; } case DIOCOSFPADD: { struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; PF_RULES_WLOCK(); error = pf_osfp_add(io); PF_RULES_WUNLOCK(); break; } case DIOCOSFPGET: { struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; PF_RULES_RLOCK(); error = pf_osfp_get(io); PF_RULES_RUNLOCK(); break; } case DIOCXBEGIN: { struct pfioc_trans *io = (struct pfioc_trans *)addr; struct pfioc_trans_e *ioes, *ioe; size_t totlen; int i; if (io->esize != sizeof(*ioe)) { error = ENODEV; break; } if (io->size < 0 || io->size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { error = EINVAL; break; } totlen = sizeof(struct pfioc_trans_e) * io->size; ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), M_TEMP, M_WAITOK); error = copyin(io->array, ioes, totlen); if (error) { free(ioes, M_TEMP); break; } /* Ensure there's no more ethernet rules to clean up. */ NET_EPOCH_DRAIN_CALLBACKS(); PF_RULES_WLOCK(); for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; switch (ioe->rs_num) { case PF_RULESET_ETH: if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; } break; #ifdef ALTQ case PF_RULESET_ALTQ: if (ioe->anchor[0]) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EINVAL; goto fail; } if ((error = pf_begin_altq(&ioe->ticket))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; } break; #endif /* ALTQ */ case PF_RULESET_TABLE: { struct pfr_table table; bzero(&table, sizeof(table)); strlcpy(table.pfrt_anchor, ioe->anchor, sizeof(table.pfrt_anchor)); if ((error = pfr_ina_begin(&table, &ioe->ticket, NULL, 0))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; } break; } default: if ((error = pf_begin_rules(&ioe->ticket, ioe->rs_num, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; } break; } } PF_RULES_WUNLOCK(); error = copyout(ioes, io->array, totlen); free(ioes, M_TEMP); break; } case DIOCXROLLBACK: { struct pfioc_trans *io = (struct pfioc_trans *)addr; struct pfioc_trans_e *ioe, *ioes; size_t totlen; int i; if (io->esize != sizeof(*ioe)) { error = ENODEV; break; } if (io->size < 0 || io->size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { error = EINVAL; break; } totlen = sizeof(struct pfioc_trans_e) * io->size; ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), M_TEMP, M_WAITOK); error = copyin(io->array, ioes, totlen); if (error) { free(ioes, M_TEMP); break; } PF_RULES_WLOCK(); for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; switch (ioe->rs_num) { case PF_RULESET_ETH: if ((error = pf_rollback_eth(ioe->ticket, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; #ifdef ALTQ case PF_RULESET_ALTQ: if (ioe->anchor[0]) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EINVAL; goto fail; } if ((error = pf_rollback_altq(ioe->ticket))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; #endif /* ALTQ */ case PF_RULESET_TABLE: { struct pfr_table table; bzero(&table, sizeof(table)); strlcpy(table.pfrt_anchor, ioe->anchor, sizeof(table.pfrt_anchor)); if ((error = pfr_ina_rollback(&table, ioe->ticket, NULL, 0))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; } default: if ((error = pf_rollback_rules(ioe->ticket, ioe->rs_num, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; } } PF_RULES_WUNLOCK(); free(ioes, M_TEMP); break; } case DIOCXCOMMIT: { struct pfioc_trans *io = (struct pfioc_trans *)addr; struct pfioc_trans_e *ioe, *ioes; struct pf_kruleset *rs; struct pf_keth_ruleset *ers; size_t totlen; int i; if (io->esize != sizeof(*ioe)) { error = ENODEV; break; } if (io->size < 0 || io->size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { error = EINVAL; break; } totlen = sizeof(struct pfioc_trans_e) * io->size; ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), M_TEMP, M_WAITOK); error = copyin(io->array, ioes, totlen); if (error) { free(ioes, M_TEMP); break; } PF_RULES_WLOCK(); /* First makes sure everything will succeed. */ for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { ioe->anchor[sizeof(ioe->anchor) - 1] = 0; switch (ioe->rs_num) { case PF_RULESET_ETH: ers = pf_find_keth_ruleset(ioe->anchor); if (ers == NULL || ioe->ticket == 0 || ioe->ticket != ers->inactive.ticket) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EINVAL; goto fail; } break; #ifdef ALTQ case PF_RULESET_ALTQ: if (ioe->anchor[0]) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EINVAL; goto fail; } if (!V_altqs_inactive_open || ioe->ticket != V_ticket_altqs_inactive) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EBUSY; goto fail; } break; #endif /* ALTQ */ case PF_RULESET_TABLE: rs = pf_find_kruleset(ioe->anchor); if (rs == NULL || !rs->topen || ioe->ticket != rs->tticket) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EBUSY; goto fail; } break; default: if (ioe->rs_num < 0 || ioe->rs_num >= PF_RULESET_MAX) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EINVAL; goto fail; } rs = pf_find_kruleset(ioe->anchor); if (rs == NULL || !rs->rules[ioe->rs_num].inactive.open || rs->rules[ioe->rs_num].inactive.ticket != ioe->ticket) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EBUSY; goto fail; } break; } } /* Now do the commit - no errors should happen here. */ for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { switch (ioe->rs_num) { case PF_RULESET_ETH: if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; #ifdef ALTQ case PF_RULESET_ALTQ: if ((error = pf_commit_altq(ioe->ticket))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; #endif /* ALTQ */ case PF_RULESET_TABLE: { struct pfr_table table; bzero(&table, sizeof(table)); (void)strlcpy(table.pfrt_anchor, ioe->anchor, sizeof(table.pfrt_anchor)); if ((error = pfr_ina_commit(&table, ioe->ticket, NULL, NULL, 0))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; } default: if ((error = pf_commit_rules(ioe->ticket, ioe->rs_num, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; } } PF_RULES_WUNLOCK(); /* Only hook into EtherNet taffic if we've got rules for it. */ if (! TAILQ_EMPTY(V_pf_keth->active.rules)) hook_pf_eth(); else dehook_pf_eth(); free(ioes, M_TEMP); break; } case DIOCGETSRCNODES: { struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; struct pf_srchash *sh; struct pf_ksrc_node *n; struct pf_src_node *p, *pstore; uint32_t i, nr = 0; for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH(n, &sh->nodes, entry) nr++; PF_HASHROW_UNLOCK(sh); } psn->psn_len = min(psn->psn_len, sizeof(struct pf_src_node) * nr); if (psn->psn_len == 0) { psn->psn_len = sizeof(struct pf_src_node) * nr; break; } nr = 0; p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH(n, &sh->nodes, entry) { if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) break; pf_src_node_copy(n, p); p++; nr++; } PF_HASHROW_UNLOCK(sh); } error = copyout(pstore, psn->psn_src_nodes, sizeof(struct pf_src_node) * nr); if (error) { free(pstore, M_TEMP); break; } psn->psn_len = sizeof(struct pf_src_node) * nr; free(pstore, M_TEMP); break; } case DIOCCLRSRCNODES: { pf_clear_srcnodes(NULL); pf_purge_expired_src_nodes(); break; } case DIOCKILLSRCNODES: pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); break; #ifdef COMPAT_FREEBSD13 case DIOCKEEPCOUNTERS_FREEBSD13: #endif case DIOCKEEPCOUNTERS: error = pf_keepcounters((struct pfioc_nv *)addr); break; case DIOCGETSYNCOOKIES: error = pf_get_syncookies((struct pfioc_nv *)addr); break; case DIOCSETSYNCOOKIES: error = pf_set_syncookies((struct pfioc_nv *)addr); break; case DIOCSETHOSTID: { u_int32_t *hostid = (u_int32_t *)addr; PF_RULES_WLOCK(); if (*hostid == 0) V_pf_status.hostid = arc4random(); else V_pf_status.hostid = *hostid; PF_RULES_WUNLOCK(); break; } case DIOCOSFPFLUSH: PF_RULES_WLOCK(); pf_osfp_flush(); PF_RULES_WUNLOCK(); break; case DIOCIGETIFACES: { struct pfioc_iface *io = (struct pfioc_iface *)addr; struct pfi_kif *ifstore; size_t bufsiz; if (io->pfiio_esize != sizeof(struct pfi_kif)) { error = ENODEV; break; } if (io->pfiio_size < 0 || io->pfiio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { error = EINVAL; break; } io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; bufsiz = io->pfiio_size * sizeof(struct pfi_kif); ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), M_TEMP, M_WAITOK | M_ZERO); PF_RULES_RLOCK(); pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); PF_RULES_RUNLOCK(); error = copyout(ifstore, io->pfiio_buffer, bufsiz); free(ifstore, M_TEMP); break; } case DIOCSETIFFLAG: { struct pfioc_iface *io = (struct pfioc_iface *)addr; io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; PF_RULES_WLOCK(); error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); PF_RULES_WUNLOCK(); break; } case DIOCCLRIFFLAG: { struct pfioc_iface *io = (struct pfioc_iface *)addr; io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; PF_RULES_WLOCK(); error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); PF_RULES_WUNLOCK(); break; } + case DIOCSETREASS: { + u_int32_t *reass = (u_int32_t *)addr; + + V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF); + /* Removal of DF flag without reassembly enabled is not a + * valid combination. Disable reassembly in such case. */ + if (!(V_pf_status.reass & PF_REASS_ENABLED)) + V_pf_status.reass = 0; + break; + } + default: error = ENODEV; break; } fail: if (sx_xlocked(&pf_ioctl_lock)) sx_xunlock(&pf_ioctl_lock); CURVNET_RESTORE(); #undef ERROUT_IOCTL return (error); } void pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st) { bzero(sp, sizeof(struct pfsync_state)); /* copy from state key */ sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; sp->proto = st->key[PF_SK_WIRE]->proto; sp->af = st->key[PF_SK_WIRE]->af; /* copy from state */ strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); sp->creation = htonl(time_uptime - st->creation); sp->expire = pf_state_expires(st); if (sp->expire <= time_uptime) sp->expire = htonl(0); else sp->expire = htonl(sp->expire - time_uptime); sp->direction = st->direction; sp->log = st->log; sp->timeout = st->timeout; - sp->state_flags = st->state_flags; + sp->state_flags_compat = st->state_flags; + sp->state_flags = htons(st->state_flags); if (st->src_node) sp->sync_flags |= PFSYNC_FLAG_SRCNODE; if (st->nat_src_node) sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; sp->id = st->id; sp->creatorid = st->creatorid; pf_state_peer_hton(&st->src, &sp->src); pf_state_peer_hton(&st->dst, &sp->dst); if (st->rule.ptr == NULL) sp->rule = htonl(-1); else sp->rule = htonl(st->rule.ptr->nr); if (st->anchor.ptr == NULL) sp->anchor = htonl(-1); else sp->anchor = htonl(st->anchor.ptr->nr); if (st->nat_rule.ptr == NULL) sp->nat_rule = htonl(-1); else sp->nat_rule = htonl(st->nat_rule.ptr->nr); pf_state_counter_hton(st->packets[0], sp->packets[0]); pf_state_counter_hton(st->packets[1], sp->packets[1]); pf_state_counter_hton(st->bytes[0], sp->bytes[0]); pf_state_counter_hton(st->bytes[1], sp->bytes[1]); } void pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) { bzero(sp, sizeof(*sp)); sp->version = PF_STATE_VERSION; /* copy from state key */ sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; sp->proto = st->key[PF_SK_WIRE]->proto; sp->af = st->key[PF_SK_WIRE]->af; /* copy from state */ strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, sizeof(sp->orig_ifname)); bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); sp->creation = htonl(time_uptime - st->creation); sp->expire = pf_state_expires(st); if (sp->expire <= time_uptime) sp->expire = htonl(0); else sp->expire = htonl(sp->expire - time_uptime); sp->direction = st->direction; sp->log = st->log; sp->timeout = st->timeout; + /* 8 bits for old peers, 16 bits for new peers */ + sp->state_flags_compat = st->state_flags; sp->state_flags = st->state_flags; if (st->src_node) sp->sync_flags |= PFSYNC_FLAG_SRCNODE; if (st->nat_src_node) sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; sp->id = st->id; sp->creatorid = st->creatorid; pf_state_peer_hton(&st->src, &sp->src); pf_state_peer_hton(&st->dst, &sp->dst); if (st->rule.ptr == NULL) sp->rule = htonl(-1); else sp->rule = htonl(st->rule.ptr->nr); if (st->anchor.ptr == NULL) sp->anchor = htonl(-1); else sp->anchor = htonl(st->anchor.ptr->nr); if (st->nat_rule.ptr == NULL) sp->nat_rule = htonl(-1); else sp->nat_rule = htonl(st->nat_rule.ptr->nr); sp->packets[0] = st->packets[0]; sp->packets[1] = st->packets[1]; sp->bytes[0] = st->bytes[0]; sp->bytes[1] = st->bytes[1]; } static void pf_tbladdr_copyout(struct pf_addr_wrap *aw) { struct pfr_ktable *kt; KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); kt = aw->p.tbl; if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) kt = kt->pfrkt_root; aw->p.tbl = NULL; aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? kt->pfrkt_cnt : -1; } static int pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, size_t number, char **names) { nvlist_t *nvc; nvc = nvlist_create(0); if (nvc == NULL) return (ENOMEM); for (int i = 0; i < number; i++) { nvlist_append_number_array(nvc, "counters", counter_u64_fetch(counters[i])); nvlist_append_string_array(nvc, "names", names[i]); nvlist_append_number_array(nvc, "ids", i); } nvlist_add_nvlist(nvl, name, nvc); nvlist_destroy(nvc); return (0); } static int pf_getstatus(struct pfioc_nv *nv) { nvlist_t *nvl = NULL, *nvc = NULL; void *nvlpacked = NULL; int error; struct pf_status s; char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; PF_RULES_RLOCK_TRACKER; #define ERROUT(x) ERROUT_FUNCTION(errout, x) PF_RULES_RLOCK(); nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); nvlist_add_bool(nvl, "running", V_pf_status.running); nvlist_add_number(nvl, "since", V_pf_status.since); nvlist_add_number(nvl, "debug", V_pf_status.debug); nvlist_add_number(nvl, "hostid", V_pf_status.hostid); nvlist_add_number(nvl, "states", V_pf_status.states); nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); + nvlist_add_number(nvl, "reass", V_pf_status.reass); nvlist_add_bool(nvl, "syncookies_active", V_pf_status.syncookies_active); /* counters */ error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, PFRES_MAX, pf_reasons); if (error != 0) ERROUT(error); /* lcounters */ error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, KLCNT_MAX, pf_lcounter); if (error != 0) ERROUT(error); /* fcounters */ nvc = nvlist_create(0); if (nvc == NULL) ERROUT(ENOMEM); for (int i = 0; i < FCNT_MAX; i++) { nvlist_append_number_array(nvc, "counters", pf_counter_u64_fetch(&V_pf_status.fcounters[i])); nvlist_append_string_array(nvc, "names", pf_fcounter[i]); nvlist_append_number_array(nvc, "ids", i); } nvlist_add_nvlist(nvl, "fcounters", nvc); nvlist_destroy(nvc); nvc = NULL; /* scounters */ error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, SCNT_MAX, pf_fcounter); if (error != 0) ERROUT(error); nvlist_add_string(nvl, "ifname", V_pf_status.ifname); nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH); pfi_update_status(V_pf_status.ifname, &s); /* pcounters / bcounters */ for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { nvlist_append_number_array(nvl, "pcounters", s.pcounters[i][j][k]); } nvlist_append_number_array(nvl, "bcounters", s.bcounters[i][j]); } } nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); PF_RULES_RUNLOCK(); error = copyout(nvlpacked, nv->data, nv->len); goto done; #undef ERROUT errout: PF_RULES_RUNLOCK(); done: free(nvlpacked, M_NVLIST); nvlist_destroy(nvc); nvlist_destroy(nvl); return (error); } /* * XXX - Check for version mismatch!!! */ static void pf_clear_all_states(void) { struct pf_kstate *s; u_int i; for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; relock: PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { s->timeout = PFTM_PURGE; /* Don't send out individual delete messages. */ s->state_flags |= PFSTATE_NOSYNC; pf_unlink_state(s); goto relock; } PF_HASHROW_UNLOCK(ih); } } static int pf_clear_tables(void) { struct pfioc_table io; int error; bzero(&io, sizeof(io)); error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, io.pfrio_flags); return (error); } static void pf_clear_srcnodes(struct pf_ksrc_node *n) { struct pf_kstate *s; int i; for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { if (n == NULL || n == s->src_node) s->src_node = NULL; if (n == NULL || n == s->nat_src_node) s->nat_src_node = NULL; } PF_HASHROW_UNLOCK(ih); } if (n == NULL) { struct pf_srchash *sh; for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH(n, &sh->nodes, entry) { n->expire = 1; n->states = 0; } PF_HASHROW_UNLOCK(sh); } } else { /* XXX: hash slot should already be locked here. */ n->expire = 1; n->states = 0; } } static void pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) { struct pf_ksrc_node_list kill; LIST_INIT(&kill); for (int i = 0; i <= pf_srchashmask; i++) { struct pf_srchash *sh = &V_pf_srchash[i]; struct pf_ksrc_node *sn, *tmp; PF_HASHROW_LOCK(sh); LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) if (PF_MATCHA(psnk->psnk_src.neg, &psnk->psnk_src.addr.v.a.addr, &psnk->psnk_src.addr.v.a.mask, &sn->addr, sn->af) && PF_MATCHA(psnk->psnk_dst.neg, &psnk->psnk_dst.addr.v.a.addr, &psnk->psnk_dst.addr.v.a.mask, &sn->raddr, sn->af)) { pf_unlink_src_node(sn); LIST_INSERT_HEAD(&kill, sn, entry); sn->expire = 1; } PF_HASHROW_UNLOCK(sh); } for (int i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; struct pf_kstate *s; PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { if (s->src_node && s->src_node->expire == 1) s->src_node = NULL; if (s->nat_src_node && s->nat_src_node->expire == 1) s->nat_src_node = NULL; } PF_HASHROW_UNLOCK(ih); } psnk->psnk_killed = pf_free_src_nodes(&kill); } static int pf_keepcounters(struct pfioc_nv *nv) { nvlist_t *nvl = NULL; void *nvlpacked = NULL; int error = 0; #define ERROUT(x) ERROUT_FUNCTION(on_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_bool(nvl, "keep_counters")) ERROUT(EBADMSG); V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); on_error: nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); return (error); } static unsigned int pf_clear_states(const struct pf_kstate_kill *kill) { struct pf_state_key_cmp match_key; struct pf_kstate *s; struct pfi_kkif *kif; int idx; unsigned int killed = 0, dir; for (unsigned int i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; relock_DIOCCLRSTATES: PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { /* For floating states look at the original kif. */ kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; if (kill->psk_ifname[0] && strcmp(kill->psk_ifname, kif->pfik_name)) continue; if (kill->psk_kill_match) { bzero(&match_key, sizeof(match_key)); if (s->direction == PF_OUT) { dir = PF_IN; idx = PF_SK_STACK; } else { dir = PF_OUT; idx = PF_SK_WIRE; } match_key.af = s->key[idx]->af; match_key.proto = s->key[idx]->proto; PF_ACPY(&match_key.addr[0], &s->key[idx]->addr[1], match_key.af); match_key.port[0] = s->key[idx]->port[1]; PF_ACPY(&match_key.addr[1], &s->key[idx]->addr[0], match_key.af); match_key.port[1] = s->key[idx]->port[0]; } /* * Don't send out individual * delete messages. */ s->state_flags |= PFSTATE_NOSYNC; pf_unlink_state(s); killed++; if (kill->psk_kill_match) killed += pf_kill_matching_state(&match_key, dir); goto relock_DIOCCLRSTATES; } PF_HASHROW_UNLOCK(ih); } if (V_pfsync_clear_states_ptr != NULL) V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); return (killed); } static void pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) { struct pf_kstate *s; if (kill->psk_pfcmp.id) { if (kill->psk_pfcmp.creatorid == 0) kill->psk_pfcmp.creatorid = V_pf_status.hostid; if ((s = pf_find_state_byid(kill->psk_pfcmp.id, kill->psk_pfcmp.creatorid))) { pf_unlink_state(s); *killed = 1; } return; } for (unsigned int i = 0; i <= pf_hashmask; i++) *killed += pf_killstates_row(kill, &V_pf_idhash[i]); return; } static int pf_killstates_nv(struct pfioc_nv *nv) { struct pf_kstate_kill kill; nvlist_t *nvl = NULL; void *nvlpacked = NULL; int error = 0; unsigned int killed = 0; #define ERROUT(x) ERROUT_FUNCTION(on_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); if (error) ERROUT(error); pf_killstates(&kill, &killed); free(nvlpacked, M_NVLIST); nvlpacked = NULL; nvlist_destroy(nvl); nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); nvlist_add_number(nvl, "killed", killed); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); on_error: nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); return (error); } static int pf_clearstates_nv(struct pfioc_nv *nv) { struct pf_kstate_kill kill; nvlist_t *nvl = NULL; void *nvlpacked = NULL; int error = 0; unsigned int killed; #define ERROUT(x) ERROUT_FUNCTION(on_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); if (error) ERROUT(error); killed = pf_clear_states(&kill); free(nvlpacked, M_NVLIST); nvlpacked = NULL; nvlist_destroy(nvl); nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); nvlist_add_number(nvl, "killed", killed); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); #undef ERROUT on_error: nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); return (error); } static int pf_getstate(struct pfioc_nv *nv) { nvlist_t *nvl = NULL, *nvls; void *nvlpacked = NULL; struct pf_kstate *s = NULL; int error = 0; uint64_t id, creatorid; #define ERROUT(x) ERROUT_FUNCTION(errout, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); PFNV_CHK(pf_nvuint64(nvl, "id", &id)); PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); s = pf_find_state_byid(id, creatorid); if (s == NULL) ERROUT(ENOENT); free(nvlpacked, M_NVLIST); nvlpacked = NULL; nvlist_destroy(nvl); nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); nvls = pf_state_to_nvstate(s); if (nvls == NULL) ERROUT(ENOMEM); nvlist_add_nvlist(nvl, "state", nvls); nvlist_destroy(nvls); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); #undef ERROUT errout: if (s != NULL) PF_STATE_UNLOCK(s); free(nvlpacked, M_NVLIST); nvlist_destroy(nvl); return (error); } /* * XXX - Check for version mismatch!!! */ /* * Duplicate pfctl -Fa operation to get rid of as much as we can. */ static int shutdown_pf(void) { int error = 0; u_int32_t t[5]; char nn = '\0'; do { if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); break; } if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); break; /* XXX: rollback? */ } if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); break; /* XXX: rollback? */ } if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));