diff --git a/lib/libpfctl/libpfctl.c b/lib/libpfctl/libpfctl.c index 25bb77d9c021..0360c0c63be7 100644 --- a/lib/libpfctl/libpfctl.c +++ b/lib/libpfctl/libpfctl.c @@ -1,1772 +1,1773 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libpfctl.h" const char* PFCTL_SYNCOOKIES_MODE_NAMES[] = { "never", "always", "adaptive" }; static int _pfctl_clear_states(int , const struct pfctl_kill *, unsigned int *, uint64_t); static int pfctl_do_ioctl(int dev, uint cmd, size_t size, nvlist_t **nvl) { struct pfioc_nv nv; void *data; size_t nvlen; int ret; data = nvlist_pack(*nvl, &nvlen); if (nvlen > size) size = nvlen; retry: nv.data = malloc(size); memcpy(nv.data, data, nvlen); free(data); nv.len = nvlen; nv.size = size; ret = ioctl(dev, cmd, &nv); if (ret == -1 && errno == ENOSPC) { size *= 2; free(nv.data); goto retry; } nvlist_destroy(*nvl); *nvl = NULL; if (ret == 0) { *nvl = nvlist_unpack(nv.data, nv.len, 0); if (*nvl == NULL) { free(nv.data); return (EIO); } } else { ret = errno; } free(nv.data); return (ret); } static void pf_nvuint_8_array(const nvlist_t *nvl, const char *name, size_t maxelems, uint8_t *numbers, size_t *nelems) { const uint64_t *tmp; size_t elems; tmp = nvlist_get_number_array(nvl, name, &elems); assert(elems <= maxelems); for (size_t i = 0; i < elems; i++) numbers[i] = tmp[i]; if (nelems) *nelems = elems; } static void pf_nvuint_16_array(const nvlist_t *nvl, const char *name, size_t maxelems, uint16_t *numbers, size_t *nelems) { const uint64_t *tmp; size_t elems; tmp = nvlist_get_number_array(nvl, name, &elems); assert(elems <= maxelems); for (size_t i = 0; i < elems; i++) numbers[i] = tmp[i]; if (nelems) *nelems = elems; } static void pf_nvuint_32_array(const nvlist_t *nvl, const char *name, size_t maxelems, uint32_t *numbers, size_t *nelems) { const uint64_t *tmp; size_t elems; tmp = nvlist_get_number_array(nvl, name, &elems); assert(elems <= maxelems); for (size_t i = 0; i < elems; i++) numbers[i] = tmp[i]; if (nelems) *nelems = elems; } static void pf_nvuint_64_array(const nvlist_t *nvl, const char *name, size_t maxelems, uint64_t *numbers, size_t *nelems) { const uint64_t *tmp; size_t elems; tmp = nvlist_get_number_array(nvl, name, &elems); assert(elems <= maxelems); for (size_t i = 0; i < elems; i++) numbers[i] = tmp[i]; if (nelems) *nelems = elems; } int pfctl_startstop(int start) { struct snl_state ss = {}; struct snl_errmsg_data e = {}; struct snl_writer nw; struct nlmsghdr *hdr; uint32_t seq_id; int family_id; snl_init(&ss, NETLINK_GENERIC); family_id = snl_get_genl_family(&ss, PFNL_FAMILY_NAME); snl_init_writer(&ss, &nw); hdr = snl_create_genl_msg_request(&nw, family_id, start ? PFNL_CMD_START : PFNL_CMD_STOP); hdr = snl_finalize_msg(&nw); seq_id = hdr->nlmsg_seq; snl_send_message(&ss, hdr); while ((hdr = snl_read_reply_multi(&ss, seq_id, &e)) != NULL) { } return (e.error); } static void _pfctl_get_status_counters(const nvlist_t *nvl, struct pfctl_status_counters *counters) { const uint64_t *ids, *counts; const char *const *names; size_t id_len, counter_len, names_len; ids = nvlist_get_number_array(nvl, "ids", &id_len); counts = nvlist_get_number_array(nvl, "counters", &counter_len); names = nvlist_get_string_array(nvl, "names", &names_len); assert(id_len == counter_len); assert(counter_len == names_len); TAILQ_INIT(counters); for (size_t i = 0; i < id_len; i++) { struct pfctl_status_counter *c; c = malloc(sizeof(*c)); c->id = ids[i]; c->counter = counts[i]; c->name = strdup(names[i]); TAILQ_INSERT_TAIL(counters, c, entry); } } struct pfctl_status * pfctl_get_status(int dev) { struct pfctl_status *status; nvlist_t *nvl; size_t len; const void *chksum; status = calloc(1, sizeof(*status)); if (status == NULL) return (NULL); nvl = nvlist_create(0); if (pfctl_do_ioctl(dev, DIOCGETSTATUSNV, 4096, &nvl)) { free(status); return (NULL); } status->running = nvlist_get_bool(nvl, "running"); status->since = nvlist_get_number(nvl, "since"); status->debug = nvlist_get_number(nvl, "debug"); status->hostid = ntohl(nvlist_get_number(nvl, "hostid")); status->states = nvlist_get_number(nvl, "states"); status->src_nodes = nvlist_get_number(nvl, "src_nodes"); status->syncookies_active = nvlist_get_bool(nvl, "syncookies_active"); status->reass = nvlist_get_number(nvl, "reass"); strlcpy(status->ifname, nvlist_get_string(nvl, "ifname"), IFNAMSIZ); chksum = nvlist_get_binary(nvl, "chksum", &len); assert(len == PF_MD5_DIGEST_LENGTH); memcpy(status->pf_chksum, chksum, len); _pfctl_get_status_counters(nvlist_get_nvlist(nvl, "counters"), &status->counters); _pfctl_get_status_counters(nvlist_get_nvlist(nvl, "lcounters"), &status->lcounters); _pfctl_get_status_counters(nvlist_get_nvlist(nvl, "fcounters"), &status->fcounters); _pfctl_get_status_counters(nvlist_get_nvlist(nvl, "scounters"), &status->scounters); pf_nvuint_64_array(nvl, "pcounters", 2 * 2 * 3, (uint64_t *)status->pcounters, NULL); pf_nvuint_64_array(nvl, "bcounters", 2 * 2, (uint64_t *)status->bcounters, NULL); nvlist_destroy(nvl); return (status); } static uint64_t _pfctl_status_counter(struct pfctl_status_counters *counters, uint64_t id) { struct pfctl_status_counter *c; TAILQ_FOREACH(c, counters, entry) { if (c->id == id) return (c->counter); } return (0); } uint64_t pfctl_status_counter(struct pfctl_status *status, int id) { return (_pfctl_status_counter(&status->counters, id)); } uint64_t pfctl_status_fcounter(struct pfctl_status *status, int id) { return (_pfctl_status_counter(&status->fcounters, id)); } uint64_t pfctl_status_scounter(struct pfctl_status *status, int id) { return (_pfctl_status_counter(&status->scounters, id)); } void pfctl_free_status(struct pfctl_status *status) { struct pfctl_status_counter *c, *tmp; if (status == NULL) return; TAILQ_FOREACH_SAFE(c, &status->counters, entry, tmp) { free(c->name); free(c); } TAILQ_FOREACH_SAFE(c, &status->lcounters, entry, tmp) { free(c->name); free(c); } TAILQ_FOREACH_SAFE(c, &status->fcounters, entry, tmp) { free(c->name); free(c); } TAILQ_FOREACH_SAFE(c, &status->scounters, entry, tmp) { free(c->name); free(c); } free(status); } static void pfctl_nv_add_addr(nvlist_t *nvparent, const char *name, const struct pf_addr *addr) { nvlist_t *nvl = nvlist_create(0); nvlist_add_binary(nvl, "addr", addr, sizeof(*addr)); nvlist_add_nvlist(nvparent, name, nvl); nvlist_destroy(nvl); } static void pf_nvaddr_to_addr(const nvlist_t *nvl, struct pf_addr *addr) { size_t len; const void *data; data = nvlist_get_binary(nvl, "addr", &len); assert(len == sizeof(struct pf_addr)); memcpy(addr, data, len); } static void pfctl_nv_add_addr_wrap(nvlist_t *nvparent, const char *name, const struct pf_addr_wrap *addr) { nvlist_t *nvl = nvlist_create(0); nvlist_add_number(nvl, "type", addr->type); nvlist_add_number(nvl, "iflags", addr->iflags); if (addr->type == PF_ADDR_DYNIFTL) nvlist_add_string(nvl, "ifname", addr->v.ifname); if (addr->type == PF_ADDR_TABLE) nvlist_add_string(nvl, "tblname", addr->v.tblname); pfctl_nv_add_addr(nvl, "addr", &addr->v.a.addr); pfctl_nv_add_addr(nvl, "mask", &addr->v.a.mask); nvlist_add_nvlist(nvparent, name, nvl); nvlist_destroy(nvl); } static void pf_nvaddr_wrap_to_addr_wrap(const nvlist_t *nvl, struct pf_addr_wrap *addr) { bzero(addr, sizeof(*addr)); addr->type = nvlist_get_number(nvl, "type"); addr->iflags = nvlist_get_number(nvl, "iflags"); if (addr->type == PF_ADDR_DYNIFTL) { strlcpy(addr->v.ifname, nvlist_get_string(nvl, "ifname"), IFNAMSIZ); addr->p.dyncnt = nvlist_get_number(nvl, "dyncnt"); } if (addr->type == PF_ADDR_TABLE) { strlcpy(addr->v.tblname, nvlist_get_string(nvl, "tblname"), PF_TABLE_NAME_SIZE); addr->p.tblcnt = nvlist_get_number(nvl, "tblcnt"); } pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "addr"), &addr->v.a.addr); pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "mask"), &addr->v.a.mask); } static void pfctl_nv_add_rule_addr(nvlist_t *nvparent, const char *name, const struct pf_rule_addr *addr) { uint64_t ports[2]; nvlist_t *nvl = nvlist_create(0); pfctl_nv_add_addr_wrap(nvl, "addr", &addr->addr); ports[0] = addr->port[0]; ports[1] = addr->port[1]; nvlist_add_number_array(nvl, "port", ports, 2); nvlist_add_number(nvl, "neg", addr->neg); nvlist_add_number(nvl, "port_op", addr->port_op); nvlist_add_nvlist(nvparent, name, nvl); nvlist_destroy(nvl); } static void pf_nvrule_addr_to_rule_addr(const nvlist_t *nvl, struct pf_rule_addr *addr) { pf_nvaddr_wrap_to_addr_wrap(nvlist_get_nvlist(nvl, "addr"), &addr->addr); pf_nvuint_16_array(nvl, "port", 2, addr->port, NULL); addr->neg = nvlist_get_number(nvl, "neg"); addr->port_op = nvlist_get_number(nvl, "port_op"); } static void pf_nvmape_to_mape(const nvlist_t *nvl, struct pf_mape_portset *mape) { mape->offset = nvlist_get_number(nvl, "offset"); mape->psidlen = nvlist_get_number(nvl, "psidlen"); mape->psid = nvlist_get_number(nvl, "psid"); } static void pf_nvpool_to_pool(const nvlist_t *nvl, struct pfctl_pool *pool) { size_t len; const void *data; data = nvlist_get_binary(nvl, "key", &len); assert(len == sizeof(pool->key)); memcpy(&pool->key, data, len); pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "counter"), &pool->counter); pool->tblidx = nvlist_get_number(nvl, "tblidx"); pf_nvuint_16_array(nvl, "proxy_port", 2, pool->proxy_port, NULL); pool->opts = nvlist_get_number(nvl, "opts"); if (nvlist_exists_nvlist(nvl, "mape")) pf_nvmape_to_mape(nvlist_get_nvlist(nvl, "mape"), &pool->mape); } static void pf_nvrule_uid_to_rule_uid(const nvlist_t *nvl, struct pf_rule_uid *uid) { pf_nvuint_32_array(nvl, "uid", 2, uid->uid, NULL); uid->op = nvlist_get_number(nvl, "op"); } static void pf_nvdivert_to_divert(const nvlist_t *nvl, struct pfctl_rule *rule) { pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "addr"), &rule->divert.addr); rule->divert.port = nvlist_get_number(nvl, "port"); } static void pf_nvrule_to_rule(const nvlist_t *nvl, struct pfctl_rule *rule) { const uint64_t *skip; const char *const *labels; size_t skipcount, labelcount; rule->nr = nvlist_get_number(nvl, "nr"); pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"), &rule->src); pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"), &rule->dst); skip = nvlist_get_number_array(nvl, "skip", &skipcount); assert(skip); assert(skipcount == PF_SKIP_COUNT); for (int i = 0; i < PF_SKIP_COUNT; i++) rule->skip[i].nr = skip[i]; labels = nvlist_get_string_array(nvl, "labels", &labelcount); assert(labelcount <= PF_RULE_MAX_LABEL_COUNT); for (size_t i = 0; i < labelcount; i++) strlcpy(rule->label[i], labels[i], PF_RULE_LABEL_SIZE); rule->ridentifier = nvlist_get_number(nvl, "ridentifier"); strlcpy(rule->ifname, nvlist_get_string(nvl, "ifname"), IFNAMSIZ); strlcpy(rule->qname, nvlist_get_string(nvl, "qname"), PF_QNAME_SIZE); strlcpy(rule->pqname, nvlist_get_string(nvl, "pqname"), PF_QNAME_SIZE); strlcpy(rule->tagname, nvlist_get_string(nvl, "tagname"), PF_TAG_NAME_SIZE); strlcpy(rule->match_tagname, nvlist_get_string(nvl, "match_tagname"), PF_TAG_NAME_SIZE); strlcpy(rule->overload_tblname, nvlist_get_string(nvl, "overload_tblname"), PF_TABLE_NAME_SIZE); pf_nvpool_to_pool(nvlist_get_nvlist(nvl, "rpool"), &rule->rpool); rule->evaluations = nvlist_get_number(nvl, "evaluations"); pf_nvuint_64_array(nvl, "packets", 2, rule->packets, NULL); pf_nvuint_64_array(nvl, "bytes", 2, rule->bytes, NULL); if (nvlist_exists_number(nvl, "timestamp")) { rule->last_active_timestamp = nvlist_get_number(nvl, "timestamp"); } rule->os_fingerprint = nvlist_get_number(nvl, "os_fingerprint"); rule->rtableid = nvlist_get_number(nvl, "rtableid"); pf_nvuint_32_array(nvl, "timeout", PFTM_MAX, rule->timeout, NULL); rule->max_states = nvlist_get_number(nvl, "max_states"); rule->max_src_nodes = nvlist_get_number(nvl, "max_src_nodes"); rule->max_src_states = nvlist_get_number(nvl, "max_src_states"); rule->max_src_conn = nvlist_get_number(nvl, "max_src_conn"); rule->max_src_conn_rate.limit = nvlist_get_number(nvl, "max_src_conn_rate.limit"); rule->max_src_conn_rate.seconds = nvlist_get_number(nvl, "max_src_conn_rate.seconds"); rule->qid = nvlist_get_number(nvl, "qid"); rule->pqid = nvlist_get_number(nvl, "pqid"); rule->dnpipe = nvlist_get_number(nvl, "dnpipe"); rule->dnrpipe = nvlist_get_number(nvl, "dnrpipe"); rule->free_flags = nvlist_get_number(nvl, "dnflags"); rule->prob = nvlist_get_number(nvl, "prob"); rule->cuid = nvlist_get_number(nvl, "cuid"); rule->cpid = nvlist_get_number(nvl, "cpid"); rule->return_icmp = nvlist_get_number(nvl, "return_icmp"); rule->return_icmp6 = nvlist_get_number(nvl, "return_icmp6"); rule->max_mss = nvlist_get_number(nvl, "max_mss"); rule->scrub_flags = nvlist_get_number(nvl, "scrub_flags"); pf_nvrule_uid_to_rule_uid(nvlist_get_nvlist(nvl, "uid"), &rule->uid); pf_nvrule_uid_to_rule_uid(nvlist_get_nvlist(nvl, "gid"), (struct pf_rule_uid *)&rule->gid); rule->rule_flag = nvlist_get_number(nvl, "rule_flag"); rule->action = nvlist_get_number(nvl, "action"); rule->direction = nvlist_get_number(nvl, "direction"); rule->log = nvlist_get_number(nvl, "log"); rule->logif = nvlist_get_number(nvl, "logif"); rule->quick = nvlist_get_number(nvl, "quick"); rule->ifnot = nvlist_get_number(nvl, "ifnot"); rule->match_tag_not = nvlist_get_number(nvl, "match_tag_not"); rule->natpass = nvlist_get_number(nvl, "natpass"); rule->keep_state = nvlist_get_number(nvl, "keep_state"); rule->af = nvlist_get_number(nvl, "af"); rule->proto = nvlist_get_number(nvl, "proto"); rule->type = nvlist_get_number(nvl, "type"); rule->code = nvlist_get_number(nvl, "code"); rule->flags = nvlist_get_number(nvl, "flags"); rule->flagset = nvlist_get_number(nvl, "flagset"); rule->min_ttl = nvlist_get_number(nvl, "min_ttl"); rule->allow_opts = nvlist_get_number(nvl, "allow_opts"); rule->rt = nvlist_get_number(nvl, "rt"); rule->return_ttl = nvlist_get_number(nvl, "return_ttl"); rule->tos = nvlist_get_number(nvl, "tos"); rule->set_tos = nvlist_get_number(nvl, "set_tos"); rule->anchor_relative = nvlist_get_number(nvl, "anchor_relative"); rule->anchor_wildcard = nvlist_get_number(nvl, "anchor_wildcard"); rule->flush = nvlist_get_number(nvl, "flush"); rule->prio = nvlist_get_number(nvl, "prio"); pf_nvuint_8_array(nvl, "set_prio", 2, rule->set_prio, NULL); pf_nvdivert_to_divert(nvlist_get_nvlist(nvl, "divert"), rule); rule->states_cur = nvlist_get_number(nvl, "states_cur"); rule->states_tot = nvlist_get_number(nvl, "states_tot"); rule->src_nodes = nvlist_get_number(nvl, "src_nodes"); } static void pfctl_nveth_addr_to_eth_addr(const nvlist_t *nvl, struct pfctl_eth_addr *addr) { static const u_int8_t EMPTY_MAC[ETHER_ADDR_LEN] = { 0 }; size_t len; const void *data; data = nvlist_get_binary(nvl, "addr", &len); assert(len == sizeof(addr->addr)); memcpy(addr->addr, data, sizeof(addr->addr)); data = nvlist_get_binary(nvl, "mask", &len); assert(len == sizeof(addr->mask)); memcpy(addr->mask, data, sizeof(addr->mask)); addr->neg = nvlist_get_bool(nvl, "neg"); /* To make checks for 'is this address set?' easier. */ addr->isset = memcmp(addr->addr, EMPTY_MAC, ETHER_ADDR_LEN) != 0; } static nvlist_t * pfctl_eth_addr_to_nveth_addr(const struct pfctl_eth_addr *addr) { nvlist_t *nvl; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); nvlist_add_bool(nvl, "neg", addr->neg); nvlist_add_binary(nvl, "addr", &addr->addr, ETHER_ADDR_LEN); nvlist_add_binary(nvl, "mask", &addr->mask, ETHER_ADDR_LEN); return (nvl); } static void pfctl_nveth_rule_to_eth_rule(const nvlist_t *nvl, struct pfctl_eth_rule *rule) { const char *const *labels; size_t labelcount, i; rule->nr = nvlist_get_number(nvl, "nr"); rule->quick = nvlist_get_bool(nvl, "quick"); strlcpy(rule->ifname, nvlist_get_string(nvl, "ifname"), IFNAMSIZ); rule->ifnot = nvlist_get_bool(nvl, "ifnot"); rule->direction = nvlist_get_number(nvl, "direction"); rule->proto = nvlist_get_number(nvl, "proto"); strlcpy(rule->match_tagname, nvlist_get_string(nvl, "match_tagname"), PF_TAG_NAME_SIZE); rule->match_tag = nvlist_get_number(nvl, "match_tag"); rule->match_tag_not = nvlist_get_bool(nvl, "match_tag_not"); labels = nvlist_get_string_array(nvl, "labels", &labelcount); assert(labelcount <= PF_RULE_MAX_LABEL_COUNT); for (i = 0; i < labelcount; i++) strlcpy(rule->label[i], labels[i], PF_RULE_LABEL_SIZE); rule->ridentifier = nvlist_get_number(nvl, "ridentifier"); pfctl_nveth_addr_to_eth_addr(nvlist_get_nvlist(nvl, "src"), &rule->src); pfctl_nveth_addr_to_eth_addr(nvlist_get_nvlist(nvl, "dst"), &rule->dst); pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "ipsrc"), &rule->ipsrc); pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "ipdst"), &rule->ipdst); rule->evaluations = nvlist_get_number(nvl, "evaluations"); rule->packets[0] = nvlist_get_number(nvl, "packets-in"); rule->packets[1] = nvlist_get_number(nvl, "packets-out"); rule->bytes[0] = nvlist_get_number(nvl, "bytes-in"); rule->bytes[1] = nvlist_get_number(nvl, "bytes-out"); if (nvlist_exists_number(nvl, "timestamp")) { rule->last_active_timestamp = nvlist_get_number(nvl, "timestamp"); } strlcpy(rule->qname, nvlist_get_string(nvl, "qname"), PF_QNAME_SIZE); strlcpy(rule->tagname, nvlist_get_string(nvl, "tagname"), PF_TAG_NAME_SIZE); rule->dnpipe = nvlist_get_number(nvl, "dnpipe"); rule->dnflags = nvlist_get_number(nvl, "dnflags"); rule->anchor_relative = nvlist_get_number(nvl, "anchor_relative"); rule->anchor_wildcard = nvlist_get_number(nvl, "anchor_wildcard"); strlcpy(rule->bridge_to, nvlist_get_string(nvl, "bridge_to"), IFNAMSIZ); rule->action = nvlist_get_number(nvl, "action"); } int pfctl_get_eth_rulesets_info(int dev, struct pfctl_eth_rulesets_info *ri, const char *path) { nvlist_t *nvl; int ret; bzero(ri, sizeof(*ri)); nvl = nvlist_create(0); nvlist_add_string(nvl, "path", path); if ((ret = pfctl_do_ioctl(dev, DIOCGETETHRULESETS, 256, &nvl)) != 0) return (ret); ri->nr = nvlist_get_number(nvl, "nr"); nvlist_destroy(nvl); return (0); } int pfctl_get_eth_ruleset(int dev, const char *path, int nr, struct pfctl_eth_ruleset_info *ri) { nvlist_t *nvl; int ret; bzero(ri, sizeof(*ri)); nvl = nvlist_create(0); nvlist_add_string(nvl, "path", path); nvlist_add_number(nvl, "nr", nr); if ((ret = pfctl_do_ioctl(dev, DIOCGETETHRULESET, 1024, &nvl)) != 0) return (ret); ri->nr = nvlist_get_number(nvl, "nr"); strlcpy(ri->path, nvlist_get_string(nvl, "path"), MAXPATHLEN); strlcpy(ri->name, nvlist_get_string(nvl, "name"), PF_ANCHOR_NAME_SIZE); return (0); } int pfctl_get_eth_rules_info(int dev, struct pfctl_eth_rules_info *rules, const char *path) { nvlist_t *nvl; int ret; bzero(rules, sizeof(*rules)); nvl = nvlist_create(0); nvlist_add_string(nvl, "anchor", path); if ((ret = pfctl_do_ioctl(dev, DIOCGETETHRULES, 1024, &nvl)) != 0) return (ret); rules->nr = nvlist_get_number(nvl, "nr"); rules->ticket = nvlist_get_number(nvl, "ticket"); nvlist_destroy(nvl); return (0); } int pfctl_get_eth_rule(int dev, uint32_t nr, uint32_t ticket, const char *path, struct pfctl_eth_rule *rule, bool clear, char *anchor_call) { nvlist_t *nvl; int ret; nvl = nvlist_create(0); nvlist_add_string(nvl, "anchor", path); nvlist_add_number(nvl, "ticket", ticket); nvlist_add_number(nvl, "nr", nr); nvlist_add_bool(nvl, "clear", clear); if ((ret = pfctl_do_ioctl(dev, DIOCGETETHRULE, 4096, &nvl)) != 0) return (ret); pfctl_nveth_rule_to_eth_rule(nvl, rule); if (anchor_call) strlcpy(anchor_call, nvlist_get_string(nvl, "anchor_call"), MAXPATHLEN); nvlist_destroy(nvl); return (0); } int pfctl_add_eth_rule(int dev, const struct pfctl_eth_rule *r, const char *anchor, const char *anchor_call, uint32_t ticket) { struct pfioc_nv nv; nvlist_t *nvl, *addr; void *packed; int error = 0; size_t labelcount, size; nvl = nvlist_create(0); nvlist_add_number(nvl, "ticket", ticket); nvlist_add_string(nvl, "anchor", anchor); nvlist_add_string(nvl, "anchor_call", anchor_call); nvlist_add_number(nvl, "nr", r->nr); nvlist_add_bool(nvl, "quick", r->quick); nvlist_add_string(nvl, "ifname", r->ifname); nvlist_add_bool(nvl, "ifnot", r->ifnot); nvlist_add_number(nvl, "direction", r->direction); nvlist_add_number(nvl, "proto", r->proto); nvlist_add_string(nvl, "match_tagname", r->match_tagname); nvlist_add_bool(nvl, "match_tag_not", r->match_tag_not); addr = pfctl_eth_addr_to_nveth_addr(&r->src); if (addr == NULL) { nvlist_destroy(nvl); return (ENOMEM); } nvlist_add_nvlist(nvl, "src", addr); nvlist_destroy(addr); addr = pfctl_eth_addr_to_nveth_addr(&r->dst); if (addr == NULL) { nvlist_destroy(nvl); return (ENOMEM); } nvlist_add_nvlist(nvl, "dst", addr); nvlist_destroy(addr); pfctl_nv_add_rule_addr(nvl, "ipsrc", &r->ipsrc); pfctl_nv_add_rule_addr(nvl, "ipdst", &r->ipdst); labelcount = 0; while (r->label[labelcount][0] != 0 && labelcount < PF_RULE_MAX_LABEL_COUNT) { nvlist_append_string_array(nvl, "labels", r->label[labelcount]); labelcount++; } nvlist_add_number(nvl, "ridentifier", r->ridentifier); nvlist_add_string(nvl, "qname", r->qname); nvlist_add_string(nvl, "tagname", r->tagname); nvlist_add_number(nvl, "dnpipe", r->dnpipe); nvlist_add_number(nvl, "dnflags", r->dnflags); nvlist_add_string(nvl, "bridge_to", r->bridge_to); nvlist_add_number(nvl, "action", r->action); packed = nvlist_pack(nvl, &size); if (packed == NULL) { nvlist_destroy(nvl); return (ENOMEM); } nv.len = size; nv.size = size; nv.data = packed; if (ioctl(dev, DIOCADDETHRULE, &nv) != 0) error = errno; free(packed); nvlist_destroy(nvl); return (error); } static void snl_add_msg_attr_addr_wrap(struct snl_writer *nw, uint32_t type, const struct pf_addr_wrap *addr) { int off; off = snl_add_msg_attr_nested(nw, type); snl_add_msg_attr_ip6(nw, PF_AT_ADDR, &addr->v.a.addr.v6); snl_add_msg_attr_ip6(nw, PF_AT_MASK, &addr->v.a.mask.v6); if (addr->type == PF_ADDR_DYNIFTL) snl_add_msg_attr_string(nw, PF_AT_IFNAME, addr->v.ifname); if (addr->type == PF_ADDR_TABLE) snl_add_msg_attr_string(nw, PF_AT_TABLENAME, addr->v.tblname); snl_add_msg_attr_u8(nw, PF_AT_TYPE, addr->type); snl_add_msg_attr_u8(nw, PF_AT_IFLAGS, addr->iflags); snl_end_attr_nested(nw, off); } static void snl_add_msg_attr_rule_addr(struct snl_writer *nw, uint32_t type, const struct pf_rule_addr *addr) { int off; off = snl_add_msg_attr_nested(nw, type); snl_add_msg_attr_addr_wrap(nw, PF_RAT_ADDR, &addr->addr); snl_add_msg_attr_u16(nw, PF_RAT_SRC_PORT, addr->port[0]); snl_add_msg_attr_u16(nw, PF_RAT_DST_PORT, addr->port[1]); snl_add_msg_attr_u8(nw, PF_RAT_NEG, addr->neg); snl_add_msg_attr_u8(nw, PF_RAT_OP, addr->port_op); snl_end_attr_nested(nw, off); } static void snl_add_msg_attr_rule_labels(struct snl_writer *nw, uint32_t type, const char labels[PF_RULE_MAX_LABEL_COUNT][PF_RULE_LABEL_SIZE]) { int off, i = 0; off = snl_add_msg_attr_nested(nw, type); while (labels[i][0] != 0 && i < PF_RULE_MAX_LABEL_COUNT) { snl_add_msg_attr_string(nw, PF_LT_LABEL, labels[i]); i++; } snl_end_attr_nested(nw, off); } static void snl_add_msg_attr_mape(struct snl_writer *nw, uint32_t type, const struct pf_mape_portset *me) { int off; off = snl_add_msg_attr_nested(nw, type); snl_add_msg_attr_u8(nw, PF_MET_OFFSET, me->offset); snl_add_msg_attr_u8(nw, PF_MET_PSID_LEN, me->psidlen); snl_add_msg_attr_u16(nw, PF_MET_PSID, me->psid); snl_end_attr_nested(nw, off); } static void snl_add_msg_attr_rpool(struct snl_writer *nw, uint32_t type, const struct pfctl_pool *pool) { int off; off = snl_add_msg_attr_nested(nw, type); snl_add_msg_attr(nw, PF_PT_KEY, sizeof(pool->key), &pool->key); snl_add_msg_attr_ip6(nw, PF_PT_COUNTER, &pool->counter.v6); snl_add_msg_attr_u32(nw, PF_PT_TBLIDX, pool->tblidx); snl_add_msg_attr_u16(nw, PF_PT_PROXY_SRC_PORT, pool->proxy_port[0]); snl_add_msg_attr_u16(nw, PF_PT_PROXY_DST_PORT, pool->proxy_port[1]); snl_add_msg_attr_u8(nw, PF_PT_OPTS, pool->opts); snl_add_msg_attr_mape(nw, PF_PT_MAPE, &pool->mape); snl_end_attr_nested(nw, off); } static void snl_add_msg_attr_timeouts(struct snl_writer *nw, uint32_t type, const uint32_t *timeouts) { int off; off = snl_add_msg_attr_nested(nw, type); for (int i = 0; i < PFTM_MAX; i++) snl_add_msg_attr_u32(nw, PF_TT_TIMEOUT, timeouts[i]); snl_end_attr_nested(nw, off); } static void snl_add_msg_attr_uid(struct snl_writer *nw, uint32_t type, const struct pf_rule_uid *uid) { int off; off = snl_add_msg_attr_nested(nw, type); snl_add_msg_attr_u32(nw, PF_RUT_UID_LOW, uid->uid[0]); snl_add_msg_attr_u32(nw, PF_RUT_UID_HIGH, uid->uid[1]); snl_add_msg_attr_u8(nw, PF_RUT_OP, uid->op); snl_end_attr_nested(nw, off); } static void snl_add_msg_attr_pf_rule(struct snl_writer *nw, uint32_t type, const struct pfctl_rule *r) { int off; off = snl_add_msg_attr_nested(nw, type); snl_add_msg_attr_rule_addr(nw, PF_RT_SRC, &r->src); snl_add_msg_attr_rule_addr(nw, PF_RT_DST, &r->dst); snl_add_msg_attr_rule_labels(nw, PF_RT_LABELS, r->label); snl_add_msg_attr_u32(nw, PF_RT_RIDENTIFIER, r->ridentifier); snl_add_msg_attr_string(nw, PF_RT_IFNAME, r->ifname); snl_add_msg_attr_string(nw, PF_RT_QNAME, r->qname); snl_add_msg_attr_string(nw, PF_RT_PQNAME, r->pqname); snl_add_msg_attr_string(nw, PF_RT_TAGNAME, r->tagname); snl_add_msg_attr_string(nw, PF_RT_MATCH_TAGNAME, r->match_tagname); snl_add_msg_attr_string(nw, PF_RT_OVERLOAD_TBLNAME, r->overload_tblname); snl_add_msg_attr_rpool(nw, PF_RT_RPOOL, &r->rpool); snl_add_msg_attr_u32(nw, PF_RT_OS_FINGERPRINT, r->os_fingerprint); snl_add_msg_attr_u32(nw, PF_RT_RTABLEID, r->rtableid); snl_add_msg_attr_timeouts(nw, PF_RT_TIMEOUT, r->timeout); snl_add_msg_attr_u32(nw, PF_RT_MAX_STATES, r->max_states); snl_add_msg_attr_u32(nw, PF_RT_MAX_SRC_NODES, r->max_src_nodes); snl_add_msg_attr_u32(nw, PF_RT_MAX_SRC_STATES, r->max_src_states); snl_add_msg_attr_u32(nw, PF_RT_MAX_SRC_CONN_RATE_LIMIT, r->max_src_conn_rate.limit); snl_add_msg_attr_u32(nw, PF_RT_MAX_SRC_CONN_RATE_SECS, r->max_src_conn_rate.seconds); snl_add_msg_attr_u16(nw, PF_RT_DNPIPE, r->dnpipe); snl_add_msg_attr_u16(nw, PF_RT_DNRPIPE, r->dnrpipe); snl_add_msg_attr_u32(nw, PF_RT_DNFLAGS, r->free_flags); snl_add_msg_attr_u32(nw, PF_RT_NR, r->nr); snl_add_msg_attr_u32(nw, PF_RT_PROB, r->prob); snl_add_msg_attr_u32(nw, PF_RT_CUID, r->cuid); snl_add_msg_attr_u32(nw, PF_RT_CPID, r->cpid); snl_add_msg_attr_u16(nw, PF_RT_RETURN_ICMP, r->return_icmp); snl_add_msg_attr_u16(nw, PF_RT_RETURN_ICMP6, r->return_icmp6); snl_add_msg_attr_u16(nw, PF_RT_MAX_MSS, r->max_mss); snl_add_msg_attr_u16(nw, PF_RT_SCRUB_FLAGS, r->scrub_flags); snl_add_msg_attr_uid(nw, PF_RT_UID, &r->uid); snl_add_msg_attr_uid(nw, PF_RT_GID, (const struct pf_rule_uid *)&r->gid); snl_add_msg_attr_u32(nw, PF_RT_RULE_FLAG, r->rule_flag); snl_add_msg_attr_u8(nw, PF_RT_ACTION, r->action); snl_add_msg_attr_u8(nw, PF_RT_DIRECTION, r->direction); snl_add_msg_attr_u8(nw, PF_RT_LOG, r->log); snl_add_msg_attr_u8(nw, PF_RT_LOGIF, r->logif); snl_add_msg_attr_u8(nw, PF_RT_QUICK, r->quick); snl_add_msg_attr_u8(nw, PF_RT_IF_NOT, r->ifnot); snl_add_msg_attr_u8(nw, PF_RT_MATCH_TAG_NOT, r->match_tag_not); snl_add_msg_attr_u8(nw, PF_RT_NATPASS, r->natpass); snl_add_msg_attr_u8(nw, PF_RT_KEEP_STATE, r->keep_state); snl_add_msg_attr_u8(nw, PF_RT_AF, r->af); snl_add_msg_attr_u8(nw, PF_RT_PROTO, r->proto); snl_add_msg_attr_u8(nw, PF_RT_TYPE, r->type); snl_add_msg_attr_u8(nw, PF_RT_CODE, r->code); snl_add_msg_attr_u8(nw, PF_RT_FLAGS, r->flags); snl_add_msg_attr_u8(nw, PF_RT_FLAGSET, r->flagset); snl_add_msg_attr_u8(nw, PF_RT_MIN_TTL, r->min_ttl); snl_add_msg_attr_u8(nw, PF_RT_ALLOW_OPTS, r->allow_opts); snl_add_msg_attr_u8(nw, PF_RT_RT, r->rt); snl_add_msg_attr_u8(nw, PF_RT_RETURN_TTL, r->return_ttl); snl_add_msg_attr_u8(nw, PF_RT_TOS, r->tos); snl_add_msg_attr_u8(nw, PF_RT_SET_TOS, r->set_tos); snl_add_msg_attr_u8(nw, PF_RT_ANCHOR_RELATIVE, r->anchor_relative); snl_add_msg_attr_u8(nw, PF_RT_ANCHOR_WILDCARD, r->anchor_wildcard); snl_add_msg_attr_u8(nw, PF_RT_FLUSH, r->flush); snl_add_msg_attr_u8(nw, PF_RT_PRIO, r->prio); snl_add_msg_attr_u8(nw, PF_RT_SET_PRIO, r->set_prio[0]); snl_add_msg_attr_u8(nw, PF_RT_SET_PRIO_REPLY, r->set_prio[1]); snl_add_msg_attr_ip6(nw, PF_RT_DIVERT_ADDRESS, &r->divert.addr.v6); snl_add_msg_attr_u16(nw, PF_RT_DIVERT_PORT, r->divert.port); snl_end_attr_nested(nw, off); } int pfctl_add_rule(int dev __unused, const struct pfctl_rule *r, const char *anchor, const char *anchor_call, uint32_t ticket, uint32_t pool_ticket) { struct snl_writer nw; struct snl_state ss = {}; struct snl_errmsg_data e = {}; struct nlmsghdr *hdr; uint32_t seq_id; int family_id; snl_init(&ss, NETLINK_GENERIC); family_id = snl_get_genl_family(&ss, PFNL_FAMILY_NAME); snl_init_writer(&ss, &nw); hdr = snl_create_genl_msg_request(&nw, family_id, PFNL_CMD_ADDRULE); hdr->nlmsg_flags |= NLM_F_DUMP; snl_add_msg_attr_u32(&nw, PF_ART_TICKET, ticket); snl_add_msg_attr_u32(&nw, PF_ART_POOL_TICKET, pool_ticket); snl_add_msg_attr_string(&nw, PF_ART_ANCHOR, anchor); snl_add_msg_attr_string(&nw, PF_ART_ANCHOR_CALL, anchor_call); snl_add_msg_attr_pf_rule(&nw, PF_ART_RULE, r); if ((hdr = snl_finalize_msg(&nw)) == NULL) return (ENXIO); seq_id = hdr->nlmsg_seq; if (! snl_send_message(&ss, hdr)) { printf("Send failed\n"); return (ENXIO); } while ((hdr = snl_read_reply_multi(&ss, seq_id, &e)) != NULL) { } return (e.error); } int pfctl_get_rules_info(int dev, struct pfctl_rules_info *rules, uint32_t ruleset, const char *path) { struct pfioc_rule pr; int ret; bzero(&pr, sizeof(pr)); if (strlcpy(pr.anchor, path, sizeof(pr.anchor)) >= sizeof(pr.anchor)) return (E2BIG); pr.rule.action = ruleset; ret = ioctl(dev, DIOCGETRULES, &pr); if (ret != 0) return (ret); rules->nr = pr.nr; rules->ticket = pr.ticket; return (0); } int pfctl_get_rule(int dev, uint32_t nr, uint32_t ticket, const char *anchor, uint32_t ruleset, struct pfctl_rule *rule, char *anchor_call) { return (pfctl_get_clear_rule(dev, nr, ticket, anchor, ruleset, rule, anchor_call, false)); } int pfctl_get_clear_rule(int dev, uint32_t nr, uint32_t ticket, const char *anchor, uint32_t ruleset, struct pfctl_rule *rule, char *anchor_call, bool clear) { nvlist_t *nvl; int ret; nvl = nvlist_create(0); if (nvl == 0) return (ENOMEM); nvlist_add_number(nvl, "nr", nr); nvlist_add_number(nvl, "ticket", ticket); nvlist_add_string(nvl, "anchor", anchor); nvlist_add_number(nvl, "ruleset", ruleset); if (clear) nvlist_add_bool(nvl, "clear_counter", true); if ((ret = pfctl_do_ioctl(dev, DIOCGETRULENV, 8192, &nvl)) != 0) return (ret); pf_nvrule_to_rule(nvlist_get_nvlist(nvl, "rule"), rule); if (anchor_call) strlcpy(anchor_call, nvlist_get_string(nvl, "anchor_call"), MAXPATHLEN); nvlist_destroy(nvl); return (0); } int pfctl_set_keepcounters(int dev, bool keep) { struct pfioc_nv nv; nvlist_t *nvl; int ret; nvl = nvlist_create(0); nvlist_add_bool(nvl, "keep_counters", keep); nv.data = nvlist_pack(nvl, &nv.len); nv.size = nv.len; nvlist_destroy(nvl); ret = ioctl(dev, DIOCKEEPCOUNTERS, &nv); free(nv.data); return (ret); } struct pfctl_creator { uint32_t id; }; #define _IN(_field) offsetof(struct genlmsghdr, _field) #define _OUT(_field) offsetof(struct pfctl_creator, _field) static struct snl_attr_parser ap_creators[] = { { .type = PF_ST_CREATORID, .off = _OUT(id), .cb = snl_attr_get_uint32 }, }; static struct snl_field_parser fp_creators[] = { }; #undef _IN #undef _OUT SNL_DECLARE_PARSER(creator_parser, struct genlmsghdr, fp_creators, ap_creators); static int pfctl_get_creators_nl(struct snl_state *ss, uint32_t *creators, size_t *len) { int family_id = snl_get_genl_family(ss, PFNL_FAMILY_NAME); size_t i = 0; struct nlmsghdr *hdr; struct snl_writer nw; snl_init_writer(ss, &nw); hdr = snl_create_genl_msg_request(&nw, family_id, PFNL_CMD_GETCREATORS); hdr->nlmsg_flags |= NLM_F_DUMP; hdr = snl_finalize_msg(&nw); uint32_t seq_id = hdr->nlmsg_seq; snl_send_message(ss, hdr); struct snl_errmsg_data e = {}; while ((hdr = snl_read_reply_multi(ss, seq_id, &e)) != NULL) { struct pfctl_creator c; bzero(&c, sizeof(c)); if (!snl_parse_nlmsg(ss, hdr, &creator_parser, &c)) continue; creators[i] = c.id; i++; if (i > *len) return (E2BIG); } *len = i; return (0); } int pfctl_get_creatorids(uint32_t *creators, size_t *len) { struct snl_state ss = {}; int error; snl_init(&ss, NETLINK_GENERIC); error = pfctl_get_creators_nl(&ss, creators, len); snl_free(&ss); return (error); } static void pfctl_nv_add_state_cmp(nvlist_t *nvl, const char *name, const struct pfctl_state_cmp *cmp) { nvlist_t *nv; nv = nvlist_create(0); nvlist_add_number(nv, "id", cmp->id); nvlist_add_number(nv, "creatorid", htonl(cmp->creatorid)); nvlist_add_number(nv, "direction", cmp->direction); nvlist_add_nvlist(nvl, name, nv); nvlist_destroy(nv); } static inline bool snl_attr_get_pfaddr(struct snl_state *ss __unused, struct nlattr *nla, const void *arg __unused, void *target) { memcpy(target, NLA_DATA(nla), NLA_DATA_LEN(nla)); return (true); } static inline bool snl_attr_store_ifname(struct snl_state *ss __unused, struct nlattr *nla, const void *arg __unused, void *target) { size_t maxlen = NLA_DATA_LEN(nla); if (strnlen((char *)NLA_DATA(nla), maxlen) < maxlen) { strlcpy(target, (char *)NLA_DATA(nla), maxlen); return (true); } return (false); } #define _OUT(_field) offsetof(struct pfctl_state_peer, _field) static const struct snl_attr_parser nla_p_speer[] = { { .type = PF_STP_SEQLO, .off = _OUT(seqlo), .cb = snl_attr_get_uint32 }, { .type = PF_STP_SEQHI, .off = _OUT(seqhi), .cb = snl_attr_get_uint32 }, { .type = PF_STP_SEQDIFF, .off = _OUT(seqdiff), .cb = snl_attr_get_uint32 }, { .type = PF_STP_STATE, .off = _OUT(state), .cb = snl_attr_get_uint8 }, { .type = PF_STP_WSCALE, .off = _OUT(wscale), .cb = snl_attr_get_uint8 }, }; SNL_DECLARE_ATTR_PARSER(speer_parser, nla_p_speer); #undef _OUT #define _OUT(_field) offsetof(struct pf_state_key_export, _field) static const struct snl_attr_parser nla_p_skey[] = { { .type = PF_STK_ADDR0, .off = _OUT(addr[0]), .cb = snl_attr_get_pfaddr }, { .type = PF_STK_ADDR1, .off = _OUT(addr[1]), .cb = snl_attr_get_pfaddr }, { .type = PF_STK_PORT0, .off = _OUT(port[0]), .cb = snl_attr_get_uint16 }, { .type = PF_STK_PORT1, .off = _OUT(port[1]), .cb = snl_attr_get_uint16 }, }; SNL_DECLARE_ATTR_PARSER(skey_parser, nla_p_skey); #undef _OUT #define _IN(_field) offsetof(struct genlmsghdr, _field) #define _OUT(_field) offsetof(struct pfctl_state, _field) static struct snl_attr_parser ap_state[] = { { .type = PF_ST_ID, .off = _OUT(id), .cb = snl_attr_get_uint64 }, { .type = PF_ST_CREATORID, .off = _OUT(creatorid), .cb = snl_attr_get_uint32 }, { .type = PF_ST_IFNAME, .off = _OUT(ifname), .cb = snl_attr_store_ifname }, { .type = PF_ST_ORIG_IFNAME, .off = _OUT(orig_ifname), .cb = snl_attr_store_ifname }, { .type = PF_ST_KEY_WIRE, .off = _OUT(key[0]), .arg = &skey_parser, .cb = snl_attr_get_nested }, { .type = PF_ST_KEY_STACK, .off = _OUT(key[1]), .arg = &skey_parser, .cb = snl_attr_get_nested }, { .type = PF_ST_PEER_SRC, .off = _OUT(src), .arg = &speer_parser, .cb = snl_attr_get_nested }, { .type = PF_ST_PEER_DST, .off = _OUT(dst), .arg = &speer_parser, .cb = snl_attr_get_nested }, { .type = PF_ST_RT_ADDR, .off = _OUT(rt_addr), .cb = snl_attr_get_pfaddr }, { .type = PF_ST_RULE, .off = _OUT(rule), .cb = snl_attr_get_uint32 }, { .type = PF_ST_ANCHOR, .off = _OUT(anchor), .cb = snl_attr_get_uint32 }, { .type = PF_ST_NAT_RULE, .off = _OUT(nat_rule), .cb = snl_attr_get_uint32 }, { .type = PF_ST_CREATION, .off = _OUT(creation), .cb = snl_attr_get_uint32 }, { .type = PF_ST_EXPIRE, .off = _OUT(expire), .cb = snl_attr_get_uint32 }, { .type = PF_ST_PACKETS0, .off = _OUT(packets[0]), .cb = snl_attr_get_uint64 }, { .type = PF_ST_PACKETS1, .off = _OUT(packets[1]), .cb = snl_attr_get_uint64 }, { .type = PF_ST_BYTES0, .off = _OUT(bytes[0]), .cb = snl_attr_get_uint64 }, { .type = PF_ST_BYTES1, .off = _OUT(bytes[1]), .cb = snl_attr_get_uint64 }, { .type = PF_ST_AF, .off = _OUT(key[0].af), .cb = snl_attr_get_uint8 }, { .type = PF_ST_PROTO, .off = _OUT(key[0].proto), .cb = snl_attr_get_uint8 }, { .type = PF_ST_DIRECTION, .off = _OUT(direction), .cb = snl_attr_get_uint8 }, { .type = PF_ST_LOG, .off = _OUT(log), .cb = snl_attr_get_uint8 }, { .type = PF_ST_STATE_FLAGS, .off = _OUT(state_flags), .cb = snl_attr_get_uint16 }, { .type = PF_ST_SYNC_FLAGS, .off = _OUT(sync_flags), .cb = snl_attr_get_uint8 }, }; static struct snl_field_parser fp_state[] = { }; #undef _IN #undef _OUT SNL_DECLARE_PARSER(state_parser, struct genlmsghdr, fp_state, ap_state); static const struct snl_hdr_parser *all_parsers[] = { &state_parser, &skey_parser, &speer_parser, &creator_parser, }; static int pfctl_get_states_nl(struct pfctl_state_filter *filter, struct snl_state *ss, pfctl_get_state_fn f, void *arg) { SNL_VERIFY_PARSERS(all_parsers); int family_id = snl_get_genl_family(ss, PFNL_FAMILY_NAME); int ret; struct nlmsghdr *hdr; struct snl_writer nw; snl_init_writer(ss, &nw); hdr = snl_create_genl_msg_request(&nw, family_id, PFNL_CMD_GETSTATES); hdr->nlmsg_flags |= NLM_F_DUMP; snl_add_msg_attr_string(&nw, PF_ST_IFNAME, filter->ifname); snl_add_msg_attr_u16(&nw, PF_ST_PROTO, filter->proto); snl_add_msg_attr_u8(&nw, PF_ST_AF, filter->af); snl_add_msg_attr_ip6(&nw, PF_ST_FILTER_ADDR, &filter->addr.v6); snl_add_msg_attr_ip6(&nw, PF_ST_FILTER_MASK, &filter->mask.v6); hdr = snl_finalize_msg(&nw); uint32_t seq_id = hdr->nlmsg_seq; snl_send_message(ss, hdr); struct snl_errmsg_data e = {}; while ((hdr = snl_read_reply_multi(ss, seq_id, &e)) != NULL) { struct pfctl_state s; bzero(&s, sizeof(s)); if (!snl_parse_nlmsg(ss, hdr, &state_parser, &s)) continue; s.key[1].af = s.key[0].af; s.key[1].proto = s.key[0].proto; ret = f(&s, arg); if (ret != 0) return (ret); } return (0); } int pfctl_get_states_iter(pfctl_get_state_fn f, void *arg) { struct pfctl_state_filter filter = {}; return (pfctl_get_filtered_states_iter(&filter, f, arg)); } int pfctl_get_filtered_states_iter(struct pfctl_state_filter *filter, pfctl_get_state_fn f, void *arg) { struct snl_state ss = {}; int error; snl_init(&ss, NETLINK_GENERIC); error = pfctl_get_states_nl(filter, &ss, f, arg); snl_free(&ss); return (error); } static int pfctl_append_states(struct pfctl_state *s, void *arg) { struct pfctl_state *new; struct pfctl_states *states = (struct pfctl_states *)arg; new = malloc(sizeof(*s)); if (new == NULL) return (ENOMEM); memcpy(new, s, sizeof(*s)); TAILQ_INSERT_TAIL(&states->states, s, entry); return (0); } int pfctl_get_states(int dev __unused, struct pfctl_states *states) { int ret; bzero(states, sizeof(*states)); TAILQ_INIT(&states->states); ret = pfctl_get_states_iter(pfctl_append_states, states); if (ret != 0) { pfctl_free_states(states); return (ret); } return (0); } void pfctl_free_states(struct pfctl_states *states) { struct pfctl_state *s, *tmp; TAILQ_FOREACH_SAFE(s, &states->states, entry, tmp) { free(s); } bzero(states, sizeof(*states)); } static int _pfctl_clear_states(int dev, const struct pfctl_kill *kill, unsigned int *killed, uint64_t ioctlval) { nvlist_t *nvl; int ret; nvl = nvlist_create(0); pfctl_nv_add_state_cmp(nvl, "cmp", &kill->cmp); nvlist_add_number(nvl, "af", kill->af); nvlist_add_number(nvl, "proto", kill->proto); pfctl_nv_add_rule_addr(nvl, "src", &kill->src); pfctl_nv_add_rule_addr(nvl, "dst", &kill->dst); pfctl_nv_add_rule_addr(nvl, "rt_addr", &kill->rt_addr); nvlist_add_string(nvl, "ifname", kill->ifname); nvlist_add_string(nvl, "label", kill->label); nvlist_add_bool(nvl, "kill_match", kill->kill_match); + nvlist_add_bool(nvl, "nat", kill->nat); if ((ret = pfctl_do_ioctl(dev, ioctlval, 1024, &nvl)) != 0) return (ret); if (killed) *killed = nvlist_get_number(nvl, "killed"); nvlist_destroy(nvl); return (ret); } int pfctl_clear_states(int dev, const struct pfctl_kill *kill, unsigned int *killed) { return (_pfctl_clear_states(dev, kill, killed, DIOCCLRSTATESNV)); } int pfctl_kill_states(int dev, const struct pfctl_kill *kill, unsigned int *killed) { return (_pfctl_clear_states(dev, kill, killed, DIOCKILLSTATESNV)); } int pfctl_clear_rules(int dev, const char *anchorname) { struct pfioc_trans trans; struct pfioc_trans_e transe[2]; int ret; bzero(&trans, sizeof(trans)); bzero(&transe, sizeof(transe)); transe[0].rs_num = PF_RULESET_SCRUB; if (strlcpy(transe[0].anchor, anchorname, sizeof(transe[0].anchor)) >= sizeof(transe[0].anchor)) return (E2BIG); transe[1].rs_num = PF_RULESET_FILTER; if (strlcpy(transe[1].anchor, anchorname, sizeof(transe[1].anchor)) >= sizeof(transe[1].anchor)) return (E2BIG); trans.size = 2; trans.esize = sizeof(transe[0]); trans.array = transe; ret = ioctl(dev, DIOCXBEGIN, &trans); if (ret != 0) return (ret); return ioctl(dev, DIOCXCOMMIT, &trans); } int pfctl_clear_nat(int dev, const char *anchorname) { struct pfioc_trans trans; struct pfioc_trans_e transe[3]; int ret; bzero(&trans, sizeof(trans)); bzero(&transe, sizeof(transe)); transe[0].rs_num = PF_RULESET_NAT; if (strlcpy(transe[0].anchor, anchorname, sizeof(transe[0].anchor)) >= sizeof(transe[0].anchor)) return (E2BIG); transe[1].rs_num = PF_RULESET_BINAT; if (strlcpy(transe[1].anchor, anchorname, sizeof(transe[1].anchor)) >= sizeof(transe[0].anchor)) return (E2BIG); transe[2].rs_num = PF_RULESET_RDR; if (strlcpy(transe[2].anchor, anchorname, sizeof(transe[2].anchor)) >= sizeof(transe[2].anchor)) return (E2BIG); trans.size = 3; trans.esize = sizeof(transe[0]); trans.array = transe; ret = ioctl(dev, DIOCXBEGIN, &trans); if (ret != 0) return (ret); return ioctl(dev, DIOCXCOMMIT, &trans); } int pfctl_clear_eth_rules(int dev, const char *anchorname) { struct pfioc_trans trans; struct pfioc_trans_e transe; int ret; bzero(&trans, sizeof(trans)); bzero(&transe, sizeof(transe)); transe.rs_num = PF_RULESET_ETH; if (strlcpy(transe.anchor, anchorname, sizeof(transe.anchor)) >= sizeof(transe.anchor)) return (E2BIG); trans.size = 1; trans.esize = sizeof(transe); trans.array = &transe; ret = ioctl(dev, DIOCXBEGIN, &trans); if (ret != 0) return (ret); return ioctl(dev, DIOCXCOMMIT, &trans); } static int pfctl_get_limit(int dev, const int index, uint *limit) { struct pfioc_limit pl; bzero(&pl, sizeof(pl)); pl.index = index; if (ioctl(dev, DIOCGETLIMIT, &pl) == -1) return (errno); *limit = pl.limit; return (0); } int pfctl_set_syncookies(int dev, const struct pfctl_syncookies *s) { struct pfioc_nv nv; nvlist_t *nvl; int ret; uint state_limit; uint64_t lim, hi, lo; ret = pfctl_get_limit(dev, PF_LIMIT_STATES, &state_limit); if (ret != 0) return (ret); lim = state_limit; hi = lim * s->highwater / 100; lo = lim * s->lowwater / 100; if (lo == hi) hi++; nvl = nvlist_create(0); nvlist_add_bool(nvl, "enabled", s->mode != PFCTL_SYNCOOKIES_NEVER); nvlist_add_bool(nvl, "adaptive", s->mode == PFCTL_SYNCOOKIES_ADAPTIVE); nvlist_add_number(nvl, "highwater", hi); nvlist_add_number(nvl, "lowwater", lo); nv.data = nvlist_pack(nvl, &nv.len); nv.size = nv.len; nvlist_destroy(nvl); nvl = NULL; ret = ioctl(dev, DIOCSETSYNCOOKIES, &nv); free(nv.data); return (ret); } int pfctl_get_syncookies(int dev, struct pfctl_syncookies *s) { nvlist_t *nvl; int ret; uint state_limit; bool enabled, adaptive; ret = pfctl_get_limit(dev, PF_LIMIT_STATES, &state_limit); if (ret != 0) return (ret); bzero(s, sizeof(*s)); nvl = nvlist_create(0); if ((ret = pfctl_do_ioctl(dev, DIOCGETSYNCOOKIES, 256, &nvl)) != 0) return (errno); enabled = nvlist_get_bool(nvl, "enabled"); adaptive = nvlist_get_bool(nvl, "adaptive"); if (enabled) { if (adaptive) s->mode = PFCTL_SYNCOOKIES_ADAPTIVE; else s->mode = PFCTL_SYNCOOKIES_ALWAYS; } else { s->mode = PFCTL_SYNCOOKIES_NEVER; } s->highwater = nvlist_get_number(nvl, "highwater") * 100 / state_limit; s->lowwater = nvlist_get_number(nvl, "lowwater") * 100 / state_limit; nvlist_destroy(nvl); return (0); } int pfctl_table_add_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *nadd, int flags) { struct pfioc_table io; if (tbl == NULL || size < 0 || (size && addr == NULL)) { return (EINVAL); } bzero(&io, sizeof io); io.pfrio_flags = flags; io.pfrio_table = *tbl; io.pfrio_buffer = addr; io.pfrio_esize = sizeof(*addr); io.pfrio_size = size; if (ioctl(dev, DIOCRADDADDRS, &io)) return (errno); if (nadd != NULL) *nadd = io.pfrio_nadd; return (0); } int pfctl_table_del_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *ndel, int flags) { struct pfioc_table io; if (tbl == NULL || size < 0 || (size && addr == NULL)) { return (EINVAL); } bzero(&io, sizeof io); io.pfrio_flags = flags; io.pfrio_table = *tbl; io.pfrio_buffer = addr; io.pfrio_esize = sizeof(*addr); io.pfrio_size = size; if (ioctl(dev, DIOCRDELADDRS, &io)) return (errno); if (ndel != NULL) *ndel = io.pfrio_ndel; return (0); } int pfctl_table_set_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *size2, int *nadd, int *ndel, int *nchange, int flags) { struct pfioc_table io; if (tbl == NULL || size < 0 || (size && addr == NULL)) { return (EINVAL); } bzero(&io, sizeof io); io.pfrio_flags = flags; io.pfrio_table = *tbl; io.pfrio_buffer = addr; io.pfrio_esize = sizeof(*addr); io.pfrio_size = size; io.pfrio_size2 = (size2 != NULL) ? *size2 : 0; if (ioctl(dev, DIOCRSETADDRS, &io)) return (-1); if (nadd != NULL) *nadd = io.pfrio_nadd; if (ndel != NULL) *ndel = io.pfrio_ndel; if (nchange != NULL) *nchange = io.pfrio_nchange; if (size2 != NULL) *size2 = io.pfrio_size2; return (0); } int pfctl_table_get_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int *size, int flags) { struct pfioc_table io; if (tbl == NULL || size == NULL || *size < 0 || (*size && addr == NULL)) { return (EINVAL); } bzero(&io, sizeof io); io.pfrio_flags = flags; io.pfrio_table = *tbl; io.pfrio_buffer = addr; io.pfrio_esize = sizeof(*addr); io.pfrio_size = *size; if (ioctl(dev, DIOCRGETADDRS, &io)) return (-1); *size = io.pfrio_size; return (0); } diff --git a/lib/libpfctl/libpfctl.h b/lib/libpfctl/libpfctl.h index ad6fde89771c..0b50cc054060 100644 --- a/lib/libpfctl/libpfctl.h +++ b/lib/libpfctl/libpfctl.h @@ -1,449 +1,450 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _PFCTL_IOCTL_H_ #define _PFCTL_IOCTL_H_ #include struct pfctl_anchor; struct pfctl_eth_anchor; struct pfctl_status_counter { uint64_t id; uint64_t counter; char *name; TAILQ_ENTRY(pfctl_status_counter) entry; }; TAILQ_HEAD(pfctl_status_counters, pfctl_status_counter); struct pfctl_status { bool running; uint32_t since; uint32_t debug; uint32_t hostid; uint64_t states; uint64_t src_nodes; char ifname[IFNAMSIZ]; uint8_t pf_chksum[PF_MD5_DIGEST_LENGTH]; bool syncookies_active; uint32_t reass; struct pfctl_status_counters counters; struct pfctl_status_counters lcounters; struct pfctl_status_counters fcounters; struct pfctl_status_counters scounters; uint64_t pcounters[2][2][3]; uint64_t bcounters[2][2]; }; struct pfctl_eth_rulesets_info { uint32_t nr; }; struct pfctl_eth_rules_info { uint32_t nr; uint32_t ticket; }; struct pfctl_eth_addr { uint8_t addr[ETHER_ADDR_LEN]; uint8_t mask[ETHER_ADDR_LEN]; bool neg; bool isset; }; struct pfctl_eth_rule { uint32_t nr; char label[PF_RULE_MAX_LABEL_COUNT][PF_RULE_LABEL_SIZE]; uint32_t ridentifier; bool quick; /* Filter */ char ifname[IFNAMSIZ]; uint8_t ifnot; uint8_t direction; uint16_t proto; struct pfctl_eth_addr src, dst; struct pf_rule_addr ipsrc, ipdst; char match_tagname[PF_TAG_NAME_SIZE]; uint16_t match_tag; bool match_tag_not; /* Stats */ uint64_t evaluations; uint64_t packets[2]; uint64_t bytes[2]; time_t last_active_timestamp; /* Action */ char qname[PF_QNAME_SIZE]; char tagname[PF_TAG_NAME_SIZE]; uint16_t dnpipe; uint32_t dnflags; char bridge_to[IFNAMSIZ]; uint8_t action; struct pfctl_eth_anchor *anchor; uint8_t anchor_relative; uint8_t anchor_wildcard; TAILQ_ENTRY(pfctl_eth_rule) entries; }; TAILQ_HEAD(pfctl_eth_rules, pfctl_eth_rule); struct pfctl_eth_ruleset_info { uint32_t nr; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; }; struct pfctl_eth_ruleset { struct pfctl_eth_rules rules; struct pfctl_eth_anchor *anchor; }; struct pfctl_eth_anchor { struct pfctl_eth_anchor *parent; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; struct pfctl_eth_ruleset ruleset; int refcnt; /* anchor rules */ int match; /* XXX: used for pfctl black magic */ }; struct pfctl_pool { struct pf_palist list; struct pf_pooladdr *cur; struct pf_poolhashkey key; struct pf_addr counter; struct pf_mape_portset mape; int tblidx; uint16_t proxy_port[2]; uint8_t opts; }; struct pfctl_rules_info { uint32_t nr; uint32_t ticket; }; struct pfctl_rule { struct pf_rule_addr src; struct pf_rule_addr dst; union pf_rule_ptr skip[PF_SKIP_COUNT]; char label[PF_RULE_MAX_LABEL_COUNT][PF_RULE_LABEL_SIZE]; uint32_t ridentifier; char ifname[IFNAMSIZ]; char qname[PF_QNAME_SIZE]; char pqname[PF_QNAME_SIZE]; char tagname[PF_TAG_NAME_SIZE]; char match_tagname[PF_TAG_NAME_SIZE]; char overload_tblname[PF_TABLE_NAME_SIZE]; TAILQ_ENTRY(pfctl_rule) entries; struct pfctl_pool rpool; uint64_t evaluations; uint64_t packets[2]; uint64_t bytes[2]; time_t last_active_timestamp; struct pfi_kif *kif; struct pfctl_anchor *anchor; struct pfr_ktable *overload_tbl; pf_osfp_t os_fingerprint; int rtableid; uint32_t timeout[PFTM_MAX]; uint32_t max_states; uint32_t max_src_nodes; uint32_t max_src_states; uint32_t max_src_conn; struct { uint32_t limit; uint32_t seconds; } max_src_conn_rate; uint32_t qid; uint32_t pqid; uint16_t dnpipe; uint16_t dnrpipe; uint32_t free_flags; uint32_t nr; uint32_t prob; uid_t cuid; pid_t cpid; uint64_t states_cur; uint64_t states_tot; uint64_t src_nodes; uint16_t return_icmp; uint16_t return_icmp6; uint16_t max_mss; uint16_t tag; uint16_t match_tag; uint16_t scrub_flags; struct pf_rule_uid uid; struct pf_rule_gid gid; uint32_t rule_flag; uint8_t action; uint8_t direction; uint8_t log; uint8_t logif; uint8_t quick; uint8_t ifnot; uint8_t match_tag_not; uint8_t natpass; uint8_t keep_state; sa_family_t af; uint8_t proto; uint8_t type; uint8_t code; uint8_t flags; uint8_t flagset; uint8_t min_ttl; uint8_t allow_opts; uint8_t rt; uint8_t return_ttl; uint8_t tos; uint8_t set_tos; uint8_t anchor_relative; uint8_t anchor_wildcard; uint8_t flush; uint8_t prio; uint8_t set_prio[2]; struct { struct pf_addr addr; uint16_t port; } divert; }; TAILQ_HEAD(pfctl_rulequeue, pfctl_rule); struct pfctl_ruleset { struct { struct pfctl_rulequeue queues[2]; struct { struct pfctl_rulequeue *ptr; struct pfctl_rule **ptr_array; uint32_t rcount; uint32_t ticket; int open; } active, inactive; } rules[PF_RULESET_MAX]; struct pfctl_anchor *anchor; uint32_t tticket; int tables; int topen; }; RB_HEAD(pfctl_anchor_global, pfctl_anchor); RB_HEAD(pfctl_anchor_node, pfctl_anchor); struct pfctl_anchor { RB_ENTRY(pfctl_anchor) entry_global; RB_ENTRY(pfctl_anchor) entry_node; struct pfctl_anchor *parent; struct pfctl_anchor_node children; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; struct pfctl_ruleset ruleset; int refcnt; /* anchor rules */ int match; /* XXX: used for pfctl black magic */ }; RB_PROTOTYPE(pfctl_anchor_global, pfctl_anchor, entry_global, pf_anchor_compare); RB_PROTOTYPE(pfctl_anchor_node, pfctl_anchor, entry_node, pf_anchor_compare); struct pfctl_state_cmp { uint64_t id; uint32_t creatorid; uint8_t direction; }; struct pfctl_kill { struct pfctl_state_cmp cmp; sa_family_t af; int proto; struct pf_rule_addr src; struct pf_rule_addr dst; struct pf_rule_addr rt_addr; char ifname[IFNAMSIZ]; char label[PF_RULE_LABEL_SIZE]; bool kill_match; + bool nat; }; struct pfctl_state_peer { uint32_t seqlo; uint32_t seqhi; uint32_t seqdiff; uint8_t state; uint8_t wscale; }; struct pfctl_state_key { struct pf_addr addr[2]; uint16_t port[2]; sa_family_t af; uint8_t proto; }; struct pfctl_state { TAILQ_ENTRY(pfctl_state) entry; uint64_t id; uint32_t creatorid; uint8_t direction; struct pfctl_state_peer src; struct pfctl_state_peer dst; uint32_t rule; uint32_t anchor; uint32_t nat_rule; struct pf_addr rt_addr; struct pfctl_state_key key[2]; /* addresses stack and wire */ char ifname[IFNAMSIZ]; char orig_ifname[IFNAMSIZ]; uint64_t packets[2]; uint64_t bytes[2]; uint32_t creation; uint32_t expire; uint32_t pfsync_time; uint16_t state_flags; uint32_t sync_flags; uint16_t qid; uint16_t pqid; uint16_t dnpipe; uint16_t dnrpipe; uint8_t log; int32_t rtableid; uint8_t min_ttl; uint8_t set_tos; uint16_t max_mss; uint8_t set_prio[2]; uint8_t rt; char rt_ifname[IFNAMSIZ]; }; TAILQ_HEAD(pfctl_statelist, pfctl_state); struct pfctl_states { struct pfctl_statelist states; size_t count; }; enum pfctl_syncookies_mode { PFCTL_SYNCOOKIES_NEVER, PFCTL_SYNCOOKIES_ALWAYS, PFCTL_SYNCOOKIES_ADAPTIVE }; extern const char* PFCTL_SYNCOOKIES_MODE_NAMES[]; struct pfctl_syncookies { enum pfctl_syncookies_mode mode; uint8_t highwater; /* Percent */ uint8_t lowwater; /* Percent */ }; int pfctl_startstop(int start); struct pfctl_status* pfctl_get_status(int dev); uint64_t pfctl_status_counter(struct pfctl_status *status, int id); uint64_t pfctl_status_fcounter(struct pfctl_status *status, int id); uint64_t pfctl_status_scounter(struct pfctl_status *status, int id); void pfctl_free_status(struct pfctl_status *status); int pfctl_get_eth_rulesets_info(int dev, struct pfctl_eth_rulesets_info *ri, const char *path); int pfctl_get_eth_ruleset(int dev, const char *path, int nr, struct pfctl_eth_ruleset_info *ri); int pfctl_get_eth_rules_info(int dev, struct pfctl_eth_rules_info *rules, const char *path); int pfctl_get_eth_rule(int dev, uint32_t nr, uint32_t ticket, const char *path, struct pfctl_eth_rule *rule, bool clear, char *anchor_call); int pfctl_add_eth_rule(int dev, const struct pfctl_eth_rule *r, const char *anchor, const char *anchor_call, uint32_t ticket); int pfctl_get_rules_info(int dev, struct pfctl_rules_info *rules, uint32_t ruleset, const char *path); int pfctl_get_rule(int dev, uint32_t nr, uint32_t ticket, const char *anchor, uint32_t ruleset, struct pfctl_rule *rule, char *anchor_call); int pfctl_get_clear_rule(int dev, uint32_t nr, uint32_t ticket, const char *anchor, uint32_t ruleset, struct pfctl_rule *rule, char *anchor_call, bool clear); int pfctl_add_rule(int dev, const struct pfctl_rule *r, const char *anchor, const char *anchor_call, uint32_t ticket, uint32_t pool_ticket); int pfctl_set_keepcounters(int dev, bool keep); int pfctl_get_creatorids(uint32_t *creators, size_t *len); struct pfctl_state_filter { char ifname[IFNAMSIZ]; uint16_t proto; sa_family_t af; struct pf_addr addr; struct pf_addr mask; }; typedef int (*pfctl_get_state_fn)(struct pfctl_state *, void *); int pfctl_get_states_iter(pfctl_get_state_fn f, void *arg); int pfctl_get_filtered_states_iter(struct pfctl_state_filter *filter, pfctl_get_state_fn f, void *arg); int pfctl_get_states(int dev, struct pfctl_states *states); void pfctl_free_states(struct pfctl_states *states); int pfctl_clear_states(int dev, const struct pfctl_kill *kill, unsigned int *killed); int pfctl_kill_states(int dev, const struct pfctl_kill *kill, unsigned int *killed); int pfctl_clear_rules(int dev, const char *anchorname); int pfctl_clear_nat(int dev, const char *anchorname); int pfctl_clear_eth_rules(int dev, const char *anchorname); int pfctl_set_syncookies(int dev, const struct pfctl_syncookies *s); int pfctl_get_syncookies(int dev, struct pfctl_syncookies *s); int pfctl_table_add_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *nadd, int flags); int pfctl_table_del_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *ndel, int flags); int pfctl_table_set_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int size, int *size2, int *nadd, int *ndel, int *nchange, int flags); int pfctl_table_get_addrs(int dev, struct pfr_table *tbl, struct pfr_addr *addr, int *size, int flags); #endif diff --git a/sbin/pfctl/pfctl.8 b/sbin/pfctl/pfctl.8 index 41eb2bea9f94..6c9a9f3b2ca4 100644 --- a/sbin/pfctl/pfctl.8 +++ b/sbin/pfctl/pfctl.8 @@ -1,723 +1,728 @@ .\" $OpenBSD: pfctl.8,v 1.138 2008/06/10 20:55:02 mcbride Exp $ .\" .\" Copyright (c) 2001 Kjell Wooding. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. The name of the author may not be used to endorse or promote products .\" derived from this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" -.Dd February 22, 2021 +.Dd October 20, 2023 .Dt PFCTL 8 .Os .Sh NAME .Nm pfctl .Nd control the packet filter (PF) device .Sh SYNOPSIS .Nm pfctl .Bk -words .Op Fl AdeghMmNnOPqRrvz .Op Fl a Ar anchor .Oo Fl D Ar macro Ns = .Ar value Oc .Op Fl F Ar modifier .Op Fl f Ar file .Op Fl i Ar interface .Op Fl K Ar host | network .Xo .Oo Fl k -.Ar host | network | label | id | gateway +.Ar host | network | label | id | gateway | nat .Oc Xc .Op Fl o Ar level .Op Fl p Ar device .Op Fl s Ar modifier .Xo .Oo Fl t Ar table .Fl T Ar command .Op Ar address ... .Oc Xc .Op Fl x Ar level .Ek .Sh DESCRIPTION The .Nm utility communicates with the packet filter device using the ioctl interface described in .Xr pf 4 . It allows ruleset and parameter configuration and retrieval of status information from the packet filter. .Pp Packet filtering restricts the types of packets that pass through network interfaces entering or leaving the host based on filter rules as described in .Xr pf.conf 5 . The packet filter can also replace addresses and ports of packets. Replacing source addresses and ports of outgoing packets is called NAT (Network Address Translation) and is used to connect an internal network (usually reserved address space) to an external one (the Internet) by making all connections to external hosts appear to come from the gateway. Replacing destination addresses and ports of incoming packets is used to redirect connections to different hosts and/or ports. A combination of both translations, bidirectional NAT, is also supported. Translation rules are described in .Xr pf.conf 5 . .Pp When the variable .Va pf is set to .Dv YES in .Xr rc.conf 5 , the rule file specified with the variable .Va pf_rules is loaded automatically by the .Xr rc 8 scripts and the packet filter is enabled. .Pp The packet filter does not itself forward packets between interfaces. Forwarding can be enabled by setting the .Xr sysctl 8 variables .Em net.inet.ip.forwarding and/or .Em net.inet6.ip6.forwarding to 1. Set them permanently in .Xr sysctl.conf 5 . .Pp The .Nm utility provides several commands. The options are as follows: .Bl -tag -width Ds .It Fl A Load only the queue rules present in the rule file. Other rules and options are ignored. .It Fl a Ar anchor Apply flags .Fl f , .Fl F , and .Fl s only to the rules in the specified .Ar anchor . In addition to the main ruleset, .Nm can load and manipulate additional rulesets by name, called anchors. The main ruleset is the default anchor. .Pp Anchors are referenced by name and may be nested, with the various components of the anchor path separated by .Sq / characters, similar to how file system hierarchies are laid out. The last component of the anchor path is where ruleset operations are performed. .Pp Evaluation of .Ar anchor rules from the main ruleset is described in .Xr pf.conf 5 . .Pp For example, the following will show all filter rules (see the .Fl s flag below) inside the anchor .Dq authpf/smith(1234) , which would have been created for user .Dq smith by .Xr authpf 8 , PID 1234: .Bd -literal -offset indent # pfctl -a "authpf/smith(1234)" -s rules .Ed .Pp Private tables can also be put inside anchors, either by having table statements in the .Xr pf.conf 5 file that is loaded in the anchor, or by using regular table commands, as in: .Bd -literal -offset indent # pfctl -a foo/bar -t mytable -T add 1.2.3.4 5.6.7.8 .Ed .Pp When a rule referring to a table is loaded in an anchor, the rule will use the private table if one is defined, and then fall back to the table defined in the main ruleset, if there is one. This is similar to C rules for variable scope. It is possible to create distinct tables with the same name in the global ruleset and in an anchor, but this is often bad design and a warning will be issued in that case. .Pp By default, recursive inline printing of anchors applies only to unnamed anchors specified inline in the ruleset. If the anchor name is terminated with a .Sq * character, the .Fl s flag will recursively print all anchors in a brace delimited block. For example the following will print the .Dq authpf ruleset recursively: .Bd -literal -offset indent # pfctl -a 'authpf/*' -sr .Ed .Pp To print the main ruleset recursively, specify only .Sq * as the anchor name: .Bd -literal -offset indent # pfctl -a '*' -sr .Ed .It Fl D Ar macro Ns = Ns Ar value Define .Ar macro to be set to .Ar value on the command line. Overrides the definition of .Ar macro in the ruleset. .It Fl d Disable the packet filter. .It Fl e Enable the packet filter. .It Fl F Ar modifier Flush the filter parameters specified by .Ar modifier (may be abbreviated): .Pp .Bl -tag -width xxxxxxxxxxxx -compact .It Fl F Cm nat Flush the NAT rules. .It Fl F Cm queue Flush the queue rules. .It Fl F Cm ethernet Flush the Ethernet filter rules. .It Fl F Cm rules Flush the filter rules. .It Fl F Cm states Flush the state table (NAT and filter). .It Fl F Cm Sources Flush the source tracking table. .It Fl F Cm info Flush the filter information (statistics that are not bound to rules). .It Fl F Cm Tables Flush the tables. .It Fl F Cm osfp Flush the passive operating system fingerprints. .It Fl F Cm all Flush all of the above. .El .It Fl f Ar file Load the rules contained in .Ar file . This .Ar file may contain macros, tables, options, and normalization, queueing, translation, and filtering rules. With the exception of macros and tables, the statements must appear in that order. .It Fl g Include output helpful for debugging. .It Fl h Help. .It Fl i Ar interface Restrict the operation to the given .Ar interface . .It Fl K Ar host | network Kill all of the source tracking entries originating from the specified .Ar host or .Ar network . A second .Fl K Ar host or .Fl K Ar network option may be specified, which will kill all the source tracking entries from the first host/network to the second. .It Xo .Fl k -.Ar host | network | label | id | gateway +.Ar host | network | label | id | gateway | nat .Xc Kill all of the state entries matching the specified .Ar host , .Ar network , .Ar label , .Ar id , +.Ar gateway, or -.Ar gateway. +.Ar nat. .Pp For example, to kill all of the state entries originating from .Dq host : .Pp .Dl # pfctl -k host .Pp A second .Fl k Ar host or .Fl k Ar network option may be specified, which will kill all the state entries from the first host/network to the second. To kill all of the state entries from .Dq host1 to .Dq host2 : .Pp .Dl # pfctl -k host1 -k host2 .Pp To kill all states originating from 192.168.1.0/24 to 172.16.0.0/16: .Pp .Dl # pfctl -k 192.168.1.0/24 -k 172.16.0.0/16 .Pp A network prefix length of 0 can be used as a wildcard. To kill all states with the target .Dq host2 : .Pp .Dl # pfctl -k 0.0.0.0/0 -k host2 .Pp It is also possible to kill states by rule label or state ID. In this mode the first .Fl k argument is used to specify the type of the second argument. The following command would kill all states that have been created from rules carrying the label .Dq foobar : .Pp .Dl # pfctl -k label -k foobar .Pp To kill one specific state by its unique state ID (as shown by pfctl -s state -vv), use the .Ar id modifier and as a second argument the state ID and optional creator ID. To kill a state with ID 4823e84500000003 use: .Pp .Dl # pfctl -k id -k 4823e84500000003 .Pp To kill a state with ID 4823e84500000018 created from a backup firewall with hostid 00000002 use: .Pp .Dl # pfctl -k id -k 4823e84500000018/2 .Pp It is also possible to kill states created from a rule with the route-to/reply-to parameter set to route the connection through a particular gateway. Note that rules routing via the default routing table (not via a route-to rule) will have their rt_addr set as 0.0.0.0 or ::. To kill all states using a gateway of 192.168.0.1 use: .Pp .Dl # pfctl -k gateway -k 192.168.0.1 .Pp A network prefix length can also be specified. To kill all states using a gateway in 192.168.0.0/24: .Pp .Dl # pfctl -k gateway -k 192.168.0.0/24 .Pp +States can also be killed based on their pre-NAT address: +.Pp +.Dl # pfctl -k nat -k 192.168.0.1 +.Pp .It Fl M Kill matching states in the opposite direction (on other interfaces) when killing states. This applies to states killed using the -k option and also will apply to the flush command when flushing states. This is useful when an interface is specified when flushing states. Example: .Pp .Dl # pfctl -M -i interface -Fs .Pp .It Fl m Merge in explicitly given options without resetting those which are omitted. Allows single options to be modified without disturbing the others: .Bd -literal -offset indent # echo "set loginterface fxp0" | pfctl -mf - .Ed .It Fl N Load only the NAT rules present in the rule file. Other rules and options are ignored. .It Fl n Do not actually load rules, just parse them. .It Fl O Load only the options present in the rule file. Other rules and options are ignored. .It Fl o Ar level Control the ruleset optimizer, overriding any rule file settings. .Pp .Bl -tag -width xxxxxxxxxxxx -compact .It Fl o Cm none Disable the ruleset optimizer. .It Fl o Cm basic Enable basic ruleset optimizations. This is the default behaviour. .It Fl o Cm profile Enable basic ruleset optimizations with profiling. .El For further information on the ruleset optimizer, see .Xr pf.conf 5 . .It Fl P Do not perform service name lookup for port specific rules, instead display the ports numerically. .It Fl p Ar device Use the device file .Ar device instead of the default .Pa /dev/pf . .It Fl q Only print errors and warnings. .It Fl R Load only the filter rules present in the rule file. Other rules and options are ignored. .It Fl r Perform reverse DNS lookups on states when displaying them. .It Fl s Ar modifier Show the filter parameters specified by .Ar modifier (may be abbreviated): .Pp .Bl -tag -width xxxxxxxxxxxxx -compact .It Fl s Cm nat Show the currently loaded NAT rules. .It Fl s Cm queue Show the currently loaded queue rules. When used together with .Fl v , per-queue statistics are also shown. When used together with .Fl v v , .Nm will loop and show updated queue statistics every five seconds, including measured bandwidth and packets per second. .It Fl s Cm ether Show the currently loaded Ethernet rules. When used together with .Fl v , the per-rule statistics (number of evaluations, packets, and bytes) are also shown. .It Fl s Cm rules Show the currently loaded filter rules. When used together with .Fl v , the per-rule statistics (number of evaluations, packets, and bytes) are also shown. Note that the .Dq skip step optimization done automatically by the kernel will skip evaluation of rules where possible. Packets passed statefully are counted in the rule that created the state (even though the rule is not evaluated more than once for the entire connection). .It Fl s Cm Anchors Show the currently loaded anchors directly attached to the main ruleset. If .Fl a Ar anchor is specified as well, the anchors loaded directly below the given .Ar anchor are shown instead. If .Fl v is specified, all anchors attached under the target anchor will be displayed recursively. .It Fl s Cm states Show the contents of the state table. .It Fl s Cm Sources Show the contents of the source tracking table. .It Fl s Cm info Show filter information (statistics and counters). When used together with .Fl v , source tracking statistics are also shown. .It Fl s Cm Running Show the running status and provide a non-zero exit status when disabled. .It Fl s Cm labels Show per-rule statistics (label, evaluations, packets total, bytes total, packets in, bytes in, packets out, bytes out, state creations) of filter rules with labels, useful for accounting. .It Fl s Cm timeouts Show the current global timeouts. .It Fl s Cm memory Show the current pool memory hard limits. .It Fl s Cm Tables Show the list of tables. .It Fl s Cm osfp Show the list of operating system fingerprints. .It Fl s Cm Interfaces Show the list of interfaces and interface drivers available to PF. When used together with .Fl v , it additionally lists which interfaces have skip rules activated. When used together with .Fl vv , interface statistics are also shown. .Fl i can be used to select an interface or a group of interfaces. .It Fl s Cm all Show all of the above, except for the lists of interfaces and operating system fingerprints. .El .It Fl T Ar command Op Ar address ... Specify the .Ar command (may be abbreviated) to apply to the table. Commands include: .Pp .Bl -tag -width xxxxxxxxxxxx -compact .It Fl T Cm kill Kill a table. .It Fl T Cm flush Flush all addresses of a table. .It Fl T Cm add Add one or more addresses in a table. Automatically create a nonexisting table. .It Fl T Cm delete Delete one or more addresses from a table. .It Fl T Cm expire Ar number Delete addresses which had their statistics cleared more than .Ar number seconds ago. For entries which have never had their statistics cleared, .Ar number refers to the time they were added to the table. .It Fl T Cm replace Replace the addresses of the table. Automatically create a nonexisting table. .It Fl T Cm show Show the content (addresses) of a table. .It Fl T Cm test Test if the given addresses match a table. .It Fl T Cm zero Clear all the statistics of a table. .It Fl T Cm load Load only the table definitions from .Xr pf.conf 5 . This is used in conjunction with the .Fl f flag, as in: .Bd -literal -offset indent # pfctl -Tl -f pf.conf .Ed .El .Pp For the .Cm add , .Cm delete , .Cm replace , and .Cm test commands, the list of addresses can be specified either directly on the command line and/or in an unformatted text file, using the .Fl f flag. Comments starting with a .Sq # are allowed in the text file. With these commands, the .Fl v flag can also be used once or twice, in which case .Nm will print the detailed result of the operation for each individual address, prefixed by one of the following letters: .Pp .Bl -tag -width XXX -compact .It A The address/network has been added. .It C The address/network has been changed (negated). .It D The address/network has been deleted. .It M The address matches .Po .Cm test operation only .Pc . .It X The address/network is duplicated and therefore ignored. .It Y The address/network cannot be added/deleted due to conflicting .Sq \&! attributes. .It Z The address/network has been cleared (statistics). .El .Pp Each table can maintain a set of counters that can be retrieved using the .Fl v flag of .Nm . For example, the following commands define a wide open firewall which will keep track of packets going to or coming from the .Ox FTP server. The following commands configure the firewall and send 10 pings to the FTP server: .Bd -literal -offset indent # printf "table counters { ftp.openbsd.org }\en \e pass out to \en" | pfctl -f- # ping -qc10 ftp.openbsd.org .Ed .Pp We can now use the table .Cm show command to output, for each address and packet direction, the number of packets and bytes that are being passed or blocked by rules referencing the table. The time at which the current accounting started is also shown with the .Dq Cleared line. .Bd -literal -offset indent # pfctl -t test -vTshow 129.128.5.191 Cleared: Thu Feb 13 18:55:18 2003 In/Block: [ Packets: 0 Bytes: 0 ] In/Pass: [ Packets: 10 Bytes: 840 ] Out/Block: [ Packets: 0 Bytes: 0 ] Out/Pass: [ Packets: 10 Bytes: 840 ] .Ed .Pp Similarly, it is possible to view global information about the tables by using the .Fl v modifier twice and the .Fl s .Cm Tables command. This will display the number of addresses on each table, the number of rules which reference the table, and the global packet statistics for the whole table: .Bd -literal -offset indent # pfctl -vvsTables --a-r-C test Addresses: 1 Cleared: Thu Feb 13 18:55:18 2003 References: [ Anchors: 0 Rules: 1 ] Evaluations: [ NoMatch: 3496 Match: 1 ] In/Block: [ Packets: 0 Bytes: 0 ] In/Pass: [ Packets: 10 Bytes: 840 ] In/XPass: [ Packets: 0 Bytes: 0 ] Out/Block: [ Packets: 0 Bytes: 0 ] Out/Pass: [ Packets: 10 Bytes: 840 ] Out/XPass: [ Packets: 0 Bytes: 0 ] .Ed .Pp As we can see here, only one packet \- the initial ping request \- matched the table, but all packets passing as the result of the state are correctly accounted for. Reloading the table(s) or ruleset will not affect packet accounting in any way. The two .Dq XPass counters are incremented instead of the .Dq Pass counters when a .Dq stateful packet is passed but does not match the table anymore. This will happen in our example if someone flushes the table while the .Xr ping 8 command is running. .Pp When used with a single .Fl v , .Nm will only display the first line containing the table flags and name. The flags are defined as follows: .Pp .Bl -tag -width XXX -compact .It c For constant tables, which cannot be altered outside .Xr pf.conf 5 . .It p For persistent tables, which do not get automatically killed when no rules refer to them. .It a For tables which are part of the .Em active tableset. Tables without this flag do not really exist, cannot contain addresses, and are only listed if the .Fl g flag is given. .It i For tables which are part of the .Em inactive tableset. This flag can only be witnessed briefly during the loading of .Xr pf.conf 5 . .It r For tables which are referenced (used) by rules. .It h This flag is set when a table in the main ruleset is hidden by one or more tables of the same name from anchors attached below it. .It C This flag is set when per-address counters are enabled on the table. .El .It Fl t Ar table Specify the name of the table. .It Fl v Produce more verbose output. A second use of .Fl v will produce even more verbose output including ruleset warnings. See the previous section for its effect on table commands. .It Fl x Ar level Set the debug .Ar level (may be abbreviated) to one of the following: .Pp .Bl -tag -width xxxxxxxxxxxx -compact .It Fl x Cm none Do not generate debug messages. .It Fl x Cm urgent Generate debug messages only for serious errors. .It Fl x Cm misc Generate debug messages for various errors. .It Fl x Cm loud Generate debug messages for common conditions. .El .It Fl z Clear per-rule statistics. .El .Sh FILES .Bl -tag -width "/etc/pf.conf" -compact .It Pa /etc/pf.conf Packet filter rules file. .It Pa /etc/pf.os Passive operating system fingerprint database. .El .Sh SEE ALSO .Xr pf 4 , .Xr pf.conf 5 , .Xr pf.os 5 , .Xr rc.conf 5 , .Xr services 5 , .Xr sysctl.conf 5 , .Xr authpf 8 , .Xr ftp-proxy 8 , .Xr rc 8 , .Xr sysctl 8 .Sh HISTORY The .Nm program and the .Xr pf 4 filter mechanism appeared in .Ox 3.0 . They first appeared in .Fx 5.3 ported from the version in .Ox 3.5 diff --git a/sbin/pfctl/pfctl.c b/sbin/pfctl/pfctl.c index c3f3d82ff767..03b7f24ce60a 100644 --- a/sbin/pfctl/pfctl.c +++ b/sbin/pfctl/pfctl.c @@ -1,3285 +1,3291 @@ /* $OpenBSD: pfctl.c,v 1.278 2008/08/31 20:18:17 jmc Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * Copyright (c) 2002,2003 Henning Brauer * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include #define PFIOC_USE_LATEST #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pfctl_parser.h" #include "pfctl.h" void usage(void); int pfctl_enable(int, int); int pfctl_disable(int, int); int pfctl_clear_stats(int, int); int pfctl_get_skip_ifaces(void); int pfctl_check_skip_ifaces(char *); int pfctl_adjust_skip_ifaces(struct pfctl *); int pfctl_clear_interface_flags(int, int); int pfctl_flush_eth_rules(int, int, char *); int pfctl_flush_rules(int, int, char *); int pfctl_flush_nat(int, int, char *); int pfctl_clear_altq(int, int); int pfctl_clear_src_nodes(int, int); int pfctl_clear_iface_states(int, const char *, int); void pfctl_addrprefix(char *, struct pf_addr *); int pfctl_kill_src_nodes(int, const char *, int); int pfctl_net_kill_states(int, const char *, int); int pfctl_gateway_kill_states(int, const char *, int); int pfctl_label_kill_states(int, const char *, int); int pfctl_id_kill_states(int, const char *, int); void pfctl_init_options(struct pfctl *); int pfctl_load_options(struct pfctl *); int pfctl_load_limit(struct pfctl *, unsigned int, unsigned int); int pfctl_load_timeout(struct pfctl *, unsigned int, unsigned int); int pfctl_load_debug(struct pfctl *, unsigned int); int pfctl_load_logif(struct pfctl *, char *); int pfctl_load_hostid(struct pfctl *, u_int32_t); int pfctl_load_reassembly(struct pfctl *, u_int32_t); int pfctl_load_syncookies(struct pfctl *, u_int8_t); int pfctl_get_pool(int, struct pfctl_pool *, u_int32_t, u_int32_t, int, char *); void pfctl_print_eth_rule_counters(struct pfctl_eth_rule *, int); void pfctl_print_rule_counters(struct pfctl_rule *, int); int pfctl_show_eth_rules(int, char *, int, enum pfctl_show, char *, int, int); int pfctl_show_rules(int, char *, int, enum pfctl_show, char *, int, int); int pfctl_show_nat(int, char *, int, char *, int); int pfctl_show_src_nodes(int, int); int pfctl_show_states(int, const char *, int); int pfctl_show_status(int, int); int pfctl_show_running(int); int pfctl_show_timeouts(int, int); int pfctl_show_limits(int, int); void pfctl_debug(int, u_int32_t, int); int pfctl_test_altqsupport(int, int); int pfctl_show_anchors(int, int, char *); int pfctl_show_eth_anchors(int, int, char *); int pfctl_ruleset_trans(struct pfctl *, char *, struct pfctl_anchor *, bool); int pfctl_eth_ruleset_trans(struct pfctl *, char *, struct pfctl_eth_anchor *); int pfctl_load_eth_ruleset(struct pfctl *, char *, struct pfctl_eth_ruleset *, int); int pfctl_load_eth_rule(struct pfctl *, char *, struct pfctl_eth_rule *, int); int pfctl_load_ruleset(struct pfctl *, char *, struct pfctl_ruleset *, int, int); int pfctl_load_rule(struct pfctl *, char *, struct pfctl_rule *, int); const char *pfctl_lookup_option(char *, const char * const *); static struct pfctl_anchor_global pf_anchors; struct pfctl_anchor pf_main_anchor; struct pfctl_eth_anchor pf_eth_main_anchor; static struct pfr_buffer skip_b; static const char *clearopt; static char *rulesopt; static const char *showopt; static const char *debugopt; static char *anchoropt; static const char *optiopt = NULL; static const char *pf_device = "/dev/pf"; static char *ifaceopt; static char *tableopt; static const char *tblcmdopt; static int src_node_killers; static char *src_node_kill[2]; static int state_killers; static char *state_kill[2]; int loadopt; int altqsupport; int dev = -1; static int first_title = 1; static int labels = 0; #define INDENT(d, o) do { \ if (o) { \ int i; \ for (i=0; i < d; i++) \ printf(" "); \ } \ } while (0); \ static const struct { const char *name; int index; } pf_limits[] = { { "states", PF_LIMIT_STATES }, { "src-nodes", PF_LIMIT_SRC_NODES }, { "frags", PF_LIMIT_FRAGS }, { "table-entries", PF_LIMIT_TABLE_ENTRIES }, { NULL, 0 } }; struct pf_hint { const char *name; int timeout; }; static const struct pf_hint pf_hint_normal[] = { { "tcp.first", 2 * 60 }, { "tcp.opening", 30 }, { "tcp.established", 24 * 60 * 60 }, { "tcp.closing", 15 * 60 }, { "tcp.finwait", 45 }, { "tcp.closed", 90 }, { "tcp.tsdiff", 30 }, { NULL, 0 } }; static const struct pf_hint pf_hint_satellite[] = { { "tcp.first", 3 * 60 }, { "tcp.opening", 30 + 5 }, { "tcp.established", 24 * 60 * 60 }, { "tcp.closing", 15 * 60 + 5 }, { "tcp.finwait", 45 + 5 }, { "tcp.closed", 90 + 5 }, { "tcp.tsdiff", 60 }, { NULL, 0 } }; static const struct pf_hint pf_hint_conservative[] = { { "tcp.first", 60 * 60 }, { "tcp.opening", 15 * 60 }, { "tcp.established", 5 * 24 * 60 * 60 }, { "tcp.closing", 60 * 60 }, { "tcp.finwait", 10 * 60 }, { "tcp.closed", 3 * 60 }, { "tcp.tsdiff", 60 }, { NULL, 0 } }; static const struct pf_hint pf_hint_aggressive[] = { { "tcp.first", 30 }, { "tcp.opening", 5 }, { "tcp.established", 5 * 60 * 60 }, { "tcp.closing", 60 }, { "tcp.finwait", 30 }, { "tcp.closed", 30 }, { "tcp.tsdiff", 10 }, { NULL, 0 } }; static const struct { const char *name; const struct pf_hint *hint; } pf_hints[] = { { "normal", pf_hint_normal }, { "satellite", pf_hint_satellite }, { "high-latency", pf_hint_satellite }, { "conservative", pf_hint_conservative }, { "aggressive", pf_hint_aggressive }, { NULL, NULL } }; static const char * const clearopt_list[] = { "nat", "queue", "rules", "Sources", "states", "info", "Tables", "osfp", "all", "ethernet", NULL }; static const char * const showopt_list[] = { "ether", "nat", "queue", "rules", "Anchors", "Sources", "states", "info", "Interfaces", "labels", "timeouts", "memory", "Tables", "osfp", "Running", "all", "creatorids", NULL }; static const char * const tblcmdopt_list[] = { "kill", "flush", "add", "delete", "load", "replace", "show", "test", "zero", "expire", NULL }; static const char * const debugopt_list[] = { "none", "urgent", "misc", "loud", NULL }; static const char * const optiopt_list[] = { "none", "basic", "profile", NULL }; void usage(void) { extern char *__progname; fprintf(stderr, "usage: %s [-AdeghMmNnOPqRrvz] [-a anchor] [-D macro=value] [-F modifier]\n" "\t[-f file] [-i interface] [-K host | network]\n" "\t[-k host | network | gateway | label | id] [-o level] [-p device]\n" "\t[-s modifier] [-t table -T command [address ...]] [-x level]\n", __progname); exit(1); } /* * Cache protocol number to name translations. * * Translation is performed a lot e.g., when dumping states and * getprotobynumber is incredibly expensive. * * Note from the getprotobynumber(3) manpage: * * These functions use a thread-specific data space; if the data is needed * for future use, it should be copied before any subsequent calls overwrite * it. Only the Internet protocols are currently understood. * * * Consequently we only cache the name and strdup it for safety. * * At the time of writing this comment the last entry in /etc/protocols is: * divert 258 DIVERT # Divert pseudo-protocol [non IANA] */ const char * pfctl_proto2name(int proto) { static const char *pfctl_proto_cache[259]; struct protoent *p; if (proto >= nitems(pfctl_proto_cache)) { p = getprotobynumber(proto); if (p == NULL) { return (NULL); } return (p->p_name); } if (pfctl_proto_cache[proto] == NULL) { p = getprotobynumber(proto); if (p == NULL) { return (NULL); } pfctl_proto_cache[proto] = strdup(p->p_name); } return (pfctl_proto_cache[proto]); } int pfctl_enable(int dev, int opts) { int ret; if ((ret = pfctl_startstop(1)) != 0) { if (ret == EEXIST) errx(1, "pf already enabled"); else if (ret == ESRCH) errx(1, "pfil registeration failed"); else err(1, "DIOCSTART"); } if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "pf enabled\n"); if (altqsupport && ioctl(dev, DIOCSTARTALTQ)) if (errno != EEXIST) err(1, "DIOCSTARTALTQ"); return (0); } int pfctl_disable(int dev, int opts) { int ret; if ((ret = pfctl_startstop(0)) != 0) { if (ret == ENOENT) errx(1, "pf not enabled"); else err(1, "DIOCSTOP"); } if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "pf disabled\n"); if (altqsupport && ioctl(dev, DIOCSTOPALTQ)) if (errno != ENOENT) err(1, "DIOCSTOPALTQ"); return (0); } int pfctl_clear_stats(int dev, int opts) { if (ioctl(dev, DIOCCLRSTATUS)) err(1, "DIOCCLRSTATUS"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "pf: statistics cleared\n"); return (0); } int pfctl_get_skip_ifaces(void) { bzero(&skip_b, sizeof(skip_b)); skip_b.pfrb_type = PFRB_IFACES; for (;;) { pfr_buf_grow(&skip_b, skip_b.pfrb_size); skip_b.pfrb_size = skip_b.pfrb_msize; if (pfi_get_ifaces(NULL, skip_b.pfrb_caddr, &skip_b.pfrb_size)) err(1, "pfi_get_ifaces"); if (skip_b.pfrb_size <= skip_b.pfrb_msize) break; } return (0); } int pfctl_check_skip_ifaces(char *ifname) { struct pfi_kif *p; struct node_host *h = NULL, *n = NULL; PFRB_FOREACH(p, &skip_b) { if (!strcmp(ifname, p->pfik_name) && (p->pfik_flags & PFI_IFLAG_SKIP)) p->pfik_flags &= ~PFI_IFLAG_SKIP; if (!strcmp(ifname, p->pfik_name) && p->pfik_group != NULL) { if ((h = ifa_grouplookup(p->pfik_name, 0)) == NULL) continue; for (n = h; n != NULL; n = n->next) { if (p->pfik_ifp == NULL) continue; if (strncmp(p->pfik_name, ifname, IFNAMSIZ)) continue; p->pfik_flags &= ~PFI_IFLAG_SKIP; } } } return (0); } int pfctl_adjust_skip_ifaces(struct pfctl *pf) { struct pfi_kif *p, *pp; struct node_host *h = NULL, *n = NULL; PFRB_FOREACH(p, &skip_b) { if (p->pfik_group == NULL || !(p->pfik_flags & PFI_IFLAG_SKIP)) continue; pfctl_set_interface_flags(pf, p->pfik_name, PFI_IFLAG_SKIP, 0); if ((h = ifa_grouplookup(p->pfik_name, 0)) == NULL) continue; for (n = h; n != NULL; n = n->next) PFRB_FOREACH(pp, &skip_b) { if (pp->pfik_ifp == NULL) continue; if (strncmp(pp->pfik_name, n->ifname, IFNAMSIZ)) continue; if (!(pp->pfik_flags & PFI_IFLAG_SKIP)) pfctl_set_interface_flags(pf, pp->pfik_name, PFI_IFLAG_SKIP, 1); if (pp->pfik_flags & PFI_IFLAG_SKIP) pp->pfik_flags &= ~PFI_IFLAG_SKIP; } } PFRB_FOREACH(p, &skip_b) { if (p->pfik_ifp == NULL || ! (p->pfik_flags & PFI_IFLAG_SKIP)) continue; pfctl_set_interface_flags(pf, p->pfik_name, PFI_IFLAG_SKIP, 0); } return (0); } int pfctl_clear_interface_flags(int dev, int opts) { struct pfioc_iface pi; if ((opts & PF_OPT_NOACTION) == 0) { bzero(&pi, sizeof(pi)); pi.pfiio_flags = PFI_IFLAG_SKIP; if (ioctl(dev, DIOCCLRIFFLAG, &pi)) err(1, "DIOCCLRIFFLAG"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "pf: interface flags reset\n"); } return (0); } int pfctl_flush_eth_rules(int dev, int opts, char *anchorname) { int ret; ret = pfctl_clear_eth_rules(dev, anchorname); if (ret != 0) err(1, "pfctl_clear_eth_rules"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "Ethernet rules cleared\n"); return (ret); } int pfctl_flush_rules(int dev, int opts, char *anchorname) { int ret; ret = pfctl_clear_rules(dev, anchorname); if (ret != 0) err(1, "pfctl_clear_rules"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "rules cleared\n"); return (0); } int pfctl_flush_nat(int dev, int opts, char *anchorname) { int ret; ret = pfctl_clear_nat(dev, anchorname); if (ret != 0) err(1, "pfctl_clear_nat"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "nat cleared\n"); return (0); } int pfctl_clear_altq(int dev, int opts) { struct pfr_buffer t; if (!altqsupport) return (-1); memset(&t, 0, sizeof(t)); t.pfrb_type = PFRB_TRANS; if (pfctl_add_trans(&t, PF_RULESET_ALTQ, "") || pfctl_trans(dev, &t, DIOCXBEGIN, 0) || pfctl_trans(dev, &t, DIOCXCOMMIT, 0)) err(1, "pfctl_clear_altq"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "altq cleared\n"); return (0); } int pfctl_clear_src_nodes(int dev, int opts) { if (ioctl(dev, DIOCCLRSRCNODES)) err(1, "DIOCCLRSRCNODES"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "source tracking entries cleared\n"); return (0); } int pfctl_clear_iface_states(int dev, const char *iface, int opts) { struct pfctl_kill kill; unsigned int killed; memset(&kill, 0, sizeof(kill)); if (iface != NULL && strlcpy(kill.ifname, iface, sizeof(kill.ifname)) >= sizeof(kill.ifname)) errx(1, "invalid interface: %s", iface); if (opts & PF_OPT_KILLMATCH) kill.kill_match = true; if (pfctl_clear_states(dev, &kill, &killed)) err(1, "DIOCCLRSTATES"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "%d states cleared\n", killed); return (0); } void pfctl_addrprefix(char *addr, struct pf_addr *mask) { char *p; const char *errstr; int prefix, ret_ga, q, r; struct addrinfo hints, *res; if ((p = strchr(addr, '/')) == NULL) return; *p++ = '\0'; prefix = strtonum(p, 0, 128, &errstr); if (errstr) errx(1, "prefix is %s: %s", errstr, p); bzero(&hints, sizeof(hints)); /* prefix only with numeric addresses */ hints.ai_flags |= AI_NUMERICHOST; if ((ret_ga = getaddrinfo(addr, NULL, &hints, &res))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } if (res->ai_family == AF_INET && prefix > 32) errx(1, "prefix too long for AF_INET"); else if (res->ai_family == AF_INET6 && prefix > 128) errx(1, "prefix too long for AF_INET6"); q = prefix >> 3; r = prefix & 7; switch (res->ai_family) { case AF_INET: bzero(&mask->v4, sizeof(mask->v4)); mask->v4.s_addr = htonl((u_int32_t) (0xffffffffffULL << (32 - prefix))); break; case AF_INET6: bzero(&mask->v6, sizeof(mask->v6)); if (q > 0) memset((void *)&mask->v6, 0xff, q); if (r > 0) *((u_char *)&mask->v6 + q) = (0xff00 >> r) & 0xff; break; } freeaddrinfo(res); } int pfctl_kill_src_nodes(int dev, const char *iface, int opts) { struct pfioc_src_node_kill psnk; struct addrinfo *res[2], *resp[2]; struct sockaddr last_src, last_dst; int killed, sources, dests; int ret_ga; killed = sources = dests = 0; memset(&psnk, 0, sizeof(psnk)); memset(&psnk.psnk_src.addr.v.a.mask, 0xff, sizeof(psnk.psnk_src.addr.v.a.mask)); memset(&last_src, 0xff, sizeof(last_src)); memset(&last_dst, 0xff, sizeof(last_dst)); pfctl_addrprefix(src_node_kill[0], &psnk.psnk_src.addr.v.a.mask); if ((ret_ga = getaddrinfo(src_node_kill[0], NULL, NULL, &res[0]))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } for (resp[0] = res[0]; resp[0]; resp[0] = resp[0]->ai_next) { if (resp[0]->ai_addr == NULL) continue; /* We get lots of duplicates. Catch the easy ones */ if (memcmp(&last_src, resp[0]->ai_addr, sizeof(last_src)) == 0) continue; last_src = *(struct sockaddr *)resp[0]->ai_addr; psnk.psnk_af = resp[0]->ai_family; sources++; if (psnk.psnk_af == AF_INET) psnk.psnk_src.addr.v.a.addr.v4 = ((struct sockaddr_in *)resp[0]->ai_addr)->sin_addr; else if (psnk.psnk_af == AF_INET6) psnk.psnk_src.addr.v.a.addr.v6 = ((struct sockaddr_in6 *)resp[0]->ai_addr)-> sin6_addr; else errx(1, "Unknown address family %d", psnk.psnk_af); if (src_node_killers > 1) { dests = 0; memset(&psnk.psnk_dst.addr.v.a.mask, 0xff, sizeof(psnk.psnk_dst.addr.v.a.mask)); memset(&last_dst, 0xff, sizeof(last_dst)); pfctl_addrprefix(src_node_kill[1], &psnk.psnk_dst.addr.v.a.mask); if ((ret_ga = getaddrinfo(src_node_kill[1], NULL, NULL, &res[1]))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } for (resp[1] = res[1]; resp[1]; resp[1] = resp[1]->ai_next) { if (resp[1]->ai_addr == NULL) continue; if (psnk.psnk_af != resp[1]->ai_family) continue; if (memcmp(&last_dst, resp[1]->ai_addr, sizeof(last_dst)) == 0) continue; last_dst = *(struct sockaddr *)resp[1]->ai_addr; dests++; if (psnk.psnk_af == AF_INET) psnk.psnk_dst.addr.v.a.addr.v4 = ((struct sockaddr_in *)resp[1]-> ai_addr)->sin_addr; else if (psnk.psnk_af == AF_INET6) psnk.psnk_dst.addr.v.a.addr.v6 = ((struct sockaddr_in6 *)resp[1]-> ai_addr)->sin6_addr; else errx(1, "Unknown address family %d", psnk.psnk_af); if (ioctl(dev, DIOCKILLSRCNODES, &psnk)) err(1, "DIOCKILLSRCNODES"); killed += psnk.psnk_killed; } freeaddrinfo(res[1]); } else { if (ioctl(dev, DIOCKILLSRCNODES, &psnk)) err(1, "DIOCKILLSRCNODES"); killed += psnk.psnk_killed; } } freeaddrinfo(res[0]); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "killed %d src nodes from %d sources and %d " "destinations\n", killed, sources, dests); return (0); } int pfctl_net_kill_states(int dev, const char *iface, int opts) { struct pfctl_kill kill; struct addrinfo *res[2], *resp[2]; struct sockaddr last_src, last_dst; unsigned int newkilled; int killed, sources, dests; int ret_ga; killed = sources = dests = 0; memset(&kill, 0, sizeof(kill)); memset(&kill.src.addr.v.a.mask, 0xff, sizeof(kill.src.addr.v.a.mask)); memset(&last_src, 0xff, sizeof(last_src)); memset(&last_dst, 0xff, sizeof(last_dst)); if (iface != NULL && strlcpy(kill.ifname, iface, sizeof(kill.ifname)) >= sizeof(kill.ifname)) errx(1, "invalid interface: %s", iface); + if (state_killers == 2 && (strcmp(state_kill[0], "nat") == 0)) { + kill.nat = true; + state_kill[0] = state_kill[1]; + state_killers = 1; + } + pfctl_addrprefix(state_kill[0], &kill.src.addr.v.a.mask); if (opts & PF_OPT_KILLMATCH) kill.kill_match = true; if ((ret_ga = getaddrinfo(state_kill[0], NULL, NULL, &res[0]))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } for (resp[0] = res[0]; resp[0]; resp[0] = resp[0]->ai_next) { if (resp[0]->ai_addr == NULL) continue; /* We get lots of duplicates. Catch the easy ones */ if (memcmp(&last_src, resp[0]->ai_addr, sizeof(last_src)) == 0) continue; last_src = *(struct sockaddr *)resp[0]->ai_addr; kill.af = resp[0]->ai_family; sources++; if (kill.af == AF_INET) kill.src.addr.v.a.addr.v4 = ((struct sockaddr_in *)resp[0]->ai_addr)->sin_addr; else if (kill.af == AF_INET6) kill.src.addr.v.a.addr.v6 = ((struct sockaddr_in6 *)resp[0]->ai_addr)-> sin6_addr; else errx(1, "Unknown address family %d", kill.af); if (state_killers > 1) { dests = 0; memset(&kill.dst.addr.v.a.mask, 0xff, sizeof(kill.dst.addr.v.a.mask)); memset(&last_dst, 0xff, sizeof(last_dst)); pfctl_addrprefix(state_kill[1], &kill.dst.addr.v.a.mask); if ((ret_ga = getaddrinfo(state_kill[1], NULL, NULL, &res[1]))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } for (resp[1] = res[1]; resp[1]; resp[1] = resp[1]->ai_next) { if (resp[1]->ai_addr == NULL) continue; if (kill.af != resp[1]->ai_family) continue; if (memcmp(&last_dst, resp[1]->ai_addr, sizeof(last_dst)) == 0) continue; last_dst = *(struct sockaddr *)resp[1]->ai_addr; dests++; if (kill.af == AF_INET) kill.dst.addr.v.a.addr.v4 = ((struct sockaddr_in *)resp[1]-> ai_addr)->sin_addr; else if (kill.af == AF_INET6) kill.dst.addr.v.a.addr.v6 = ((struct sockaddr_in6 *)resp[1]-> ai_addr)->sin6_addr; else errx(1, "Unknown address family %d", kill.af); if (pfctl_kill_states(dev, &kill, &newkilled)) err(1, "DIOCKILLSTATES"); killed += newkilled; } freeaddrinfo(res[1]); } else { if (pfctl_kill_states(dev, &kill, &newkilled)) err(1, "DIOCKILLSTATES"); killed += newkilled; } } freeaddrinfo(res[0]); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "killed %d states from %d sources and %d " "destinations\n", killed, sources, dests); return (0); } int pfctl_gateway_kill_states(int dev, const char *iface, int opts) { struct pfctl_kill kill; struct addrinfo *res, *resp; struct sockaddr last_src; unsigned int newkilled; int killed = 0; int ret_ga; if (state_killers != 2 || (strlen(state_kill[1]) == 0)) { warnx("no gateway specified"); usage(); } memset(&kill, 0, sizeof(kill)); memset(&kill.rt_addr.addr.v.a.mask, 0xff, sizeof(kill.rt_addr.addr.v.a.mask)); memset(&last_src, 0xff, sizeof(last_src)); if (iface != NULL && strlcpy(kill.ifname, iface, sizeof(kill.ifname)) >= sizeof(kill.ifname)) errx(1, "invalid interface: %s", iface); if (opts & PF_OPT_KILLMATCH) kill.kill_match = true; pfctl_addrprefix(state_kill[1], &kill.rt_addr.addr.v.a.mask); if ((ret_ga = getaddrinfo(state_kill[1], NULL, NULL, &res))) { errx(1, "getaddrinfo: %s", gai_strerror(ret_ga)); /* NOTREACHED */ } for (resp = res; resp; resp = resp->ai_next) { if (resp->ai_addr == NULL) continue; /* We get lots of duplicates. Catch the easy ones */ if (memcmp(&last_src, resp->ai_addr, sizeof(last_src)) == 0) continue; last_src = *(struct sockaddr *)resp->ai_addr; kill.af = resp->ai_family; if (kill.af == AF_INET) kill.rt_addr.addr.v.a.addr.v4 = ((struct sockaddr_in *)resp->ai_addr)->sin_addr; else if (kill.af == AF_INET6) kill.rt_addr.addr.v.a.addr.v6 = ((struct sockaddr_in6 *)resp->ai_addr)-> sin6_addr; else errx(1, "Unknown address family %d", kill.af); if (pfctl_kill_states(dev, &kill, &newkilled)) err(1, "DIOCKILLSTATES"); killed += newkilled; } freeaddrinfo(res); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "killed %d states\n", killed); return (0); } int pfctl_label_kill_states(int dev, const char *iface, int opts) { struct pfctl_kill kill; unsigned int killed; if (state_killers != 2 || (strlen(state_kill[1]) == 0)) { warnx("no label specified"); usage(); } memset(&kill, 0, sizeof(kill)); if (iface != NULL && strlcpy(kill.ifname, iface, sizeof(kill.ifname)) >= sizeof(kill.ifname)) errx(1, "invalid interface: %s", iface); if (opts & PF_OPT_KILLMATCH) kill.kill_match = true; if (strlcpy(kill.label, state_kill[1], sizeof(kill.label)) >= sizeof(kill.label)) errx(1, "label too long: %s", state_kill[1]); if (pfctl_kill_states(dev, &kill, &killed)) err(1, "DIOCKILLSTATES"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "killed %d states\n", killed); return (0); } int pfctl_id_kill_states(int dev, const char *iface, int opts) { struct pfctl_kill kill; unsigned int killed; if (state_killers != 2 || (strlen(state_kill[1]) == 0)) { warnx("no id specified"); usage(); } memset(&kill, 0, sizeof(kill)); if (opts & PF_OPT_KILLMATCH) kill.kill_match = true; if ((sscanf(state_kill[1], "%jx/%x", &kill.cmp.id, &kill.cmp.creatorid)) == 2) { } else if ((sscanf(state_kill[1], "%jx", &kill.cmp.id)) == 1) { kill.cmp.creatorid = 0; } else { warnx("wrong id format specified"); usage(); } if (kill.cmp.id == 0) { warnx("cannot kill id 0"); usage(); } if (pfctl_kill_states(dev, &kill, &killed)) err(1, "DIOCKILLSTATES"); if ((opts & PF_OPT_QUIET) == 0) fprintf(stderr, "killed %d states\n", killed); return (0); } int pfctl_get_pool(int dev, struct pfctl_pool *pool, u_int32_t nr, u_int32_t ticket, int r_action, char *anchorname) { struct pfioc_pooladdr pp; struct pf_pooladdr *pa; u_int32_t pnr, mpnr; memset(&pp, 0, sizeof(pp)); memcpy(pp.anchor, anchorname, sizeof(pp.anchor)); pp.r_action = r_action; pp.r_num = nr; pp.ticket = ticket; if (ioctl(dev, DIOCGETADDRS, &pp)) { warn("DIOCGETADDRS"); return (-1); } mpnr = pp.nr; TAILQ_INIT(&pool->list); for (pnr = 0; pnr < mpnr; ++pnr) { pp.nr = pnr; if (ioctl(dev, DIOCGETADDR, &pp)) { warn("DIOCGETADDR"); return (-1); } pa = calloc(1, sizeof(struct pf_pooladdr)); if (pa == NULL) err(1, "calloc"); bcopy(&pp.addr, pa, sizeof(struct pf_pooladdr)); TAILQ_INSERT_TAIL(&pool->list, pa, entries); } return (0); } void pfctl_move_pool(struct pfctl_pool *src, struct pfctl_pool *dst) { struct pf_pooladdr *pa; while ((pa = TAILQ_FIRST(&src->list)) != NULL) { TAILQ_REMOVE(&src->list, pa, entries); TAILQ_INSERT_TAIL(&dst->list, pa, entries); } } void pfctl_clear_pool(struct pfctl_pool *pool) { struct pf_pooladdr *pa; while ((pa = TAILQ_FIRST(&pool->list)) != NULL) { TAILQ_REMOVE(&pool->list, pa, entries); free(pa); } } void pfctl_print_eth_rule_counters(struct pfctl_eth_rule *rule, int opts) { if (opts & PF_OPT_VERBOSE) { printf(" [ Evaluations: %-8llu Packets: %-8llu " "Bytes: %-10llu]\n", (unsigned long long)rule->evaluations, (unsigned long long)(rule->packets[0] + rule->packets[1]), (unsigned long long)(rule->bytes[0] + rule->bytes[1])); } if (opts & PF_OPT_VERBOSE2) { char timestr[30]; if (rule->last_active_timestamp != 0) { bcopy(ctime(&rule->last_active_timestamp), timestr, sizeof(timestr)); *strchr(timestr, '\n') = '\0'; } else { snprintf(timestr, sizeof(timestr), "N/A"); } printf(" [ Last Active Time: %s ]\n", timestr); } } void pfctl_print_rule_counters(struct pfctl_rule *rule, int opts) { if (opts & PF_OPT_DEBUG) { const char *t[PF_SKIP_COUNT] = { "i", "d", "f", "p", "sa", "sp", "da", "dp" }; int i; printf(" [ Skip steps: "); for (i = 0; i < PF_SKIP_COUNT; ++i) { if (rule->skip[i].nr == rule->nr + 1) continue; printf("%s=", t[i]); if (rule->skip[i].nr == -1) printf("end "); else printf("%u ", rule->skip[i].nr); } printf("]\n"); printf(" [ queue: qname=%s qid=%u pqname=%s pqid=%u ]\n", rule->qname, rule->qid, rule->pqname, rule->pqid); } if (opts & PF_OPT_VERBOSE) { printf(" [ Evaluations: %-8llu Packets: %-8llu " "Bytes: %-10llu States: %-6ju]\n", (unsigned long long)rule->evaluations, (unsigned long long)(rule->packets[0] + rule->packets[1]), (unsigned long long)(rule->bytes[0] + rule->bytes[1]), (uintmax_t)rule->states_cur); if (!(opts & PF_OPT_DEBUG)) printf(" [ Inserted: uid %u pid %u " "State Creations: %-6ju]\n", (unsigned)rule->cuid, (unsigned)rule->cpid, (uintmax_t)rule->states_tot); } if (opts & PF_OPT_VERBOSE2) { char timestr[30]; if (rule->last_active_timestamp != 0) { bcopy(ctime(&rule->last_active_timestamp), timestr, sizeof(timestr)); *strchr(timestr, '\n') = '\0'; } else { snprintf(timestr, sizeof(timestr), "N/A"); } printf(" [ Last Active Time: %s ]\n", timestr); } } void pfctl_print_title(char *title) { if (!first_title) printf("\n"); first_title = 0; printf("%s\n", title); } int pfctl_show_eth_rules(int dev, char *path, int opts, enum pfctl_show format, char *anchorname, int depth, int wildcard) { char anchor_call[MAXPATHLEN]; struct pfctl_eth_rules_info info; struct pfctl_eth_rule rule; int brace; int dotitle = opts & PF_OPT_SHOWALL; int len = strlen(path); char *npath, *p; /* * Truncate a trailing / and * on an anchorname before searching for * the ruleset, this is syntactic sugar that doesn't actually make it * to the kernel. */ if ((p = strrchr(anchorname, '/')) != NULL && p[1] == '*' && p[2] == '\0') { p[0] = '\0'; } if (anchorname[0] == '/') { if ((npath = calloc(1, MAXPATHLEN)) == NULL) errx(1, "pfctl_rules: calloc"); snprintf(npath, MAXPATHLEN, "%s", anchorname); } else { if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", anchorname); else snprintf(&path[len], MAXPATHLEN - len, "%s", anchorname); npath = path; } /* * If this anchor was called with a wildcard path, go through * the rulesets in the anchor rather than the rules. */ if (wildcard && (opts & PF_OPT_RECURSE)) { struct pfctl_eth_rulesets_info ri; u_int32_t mnr, nr; if (pfctl_get_eth_rulesets_info(dev, &ri, npath)) { if (errno == EINVAL) { fprintf(stderr, "Anchor '%s' " "not found.\n", anchorname); } else { warn("DIOCGETETHRULESETS"); return (-1); } } mnr = ri.nr; pfctl_print_eth_rule_counters(&rule, opts); for (nr = 0; nr < mnr; ++nr) { struct pfctl_eth_ruleset_info rs; if (pfctl_get_eth_ruleset(dev, npath, nr, &rs)) err(1, "DIOCGETETHRULESET"); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("anchor \"%s\" all {\n", rs.name); pfctl_show_eth_rules(dev, npath, opts, format, rs.name, depth + 1, 0); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("}\n"); } path[len] = '\0'; return (0); } if (pfctl_get_eth_rules_info(dev, &info, path)) { warn("DIOCGETETHRULES"); return (-1); } for (int nr = 0; nr < info.nr; nr++) { brace = 0; INDENT(depth, !(opts & PF_OPT_VERBOSE)); if (pfctl_get_eth_rule(dev, nr, info.ticket, path, &rule, opts & PF_OPT_CLRRULECTRS, anchor_call) != 0) { warn("DIOCGETETHRULE"); return (-1); } if (anchor_call[0] && ((((p = strrchr(anchor_call, '_')) != NULL) && (p == anchor_call || *(--p) == '/')) || (opts & PF_OPT_RECURSE))) { brace++; int aclen = strlen(anchor_call); if (anchor_call[aclen - 1] == '*') anchor_call[aclen - 2] = '\0'; } p = &anchor_call[0]; if (dotitle) { pfctl_print_title("ETH RULES:"); dotitle = 0; } print_eth_rule(&rule, anchor_call, opts & (PF_OPT_VERBOSE2 | PF_OPT_DEBUG)); if (brace) printf(" {\n"); else printf("\n"); pfctl_print_eth_rule_counters(&rule, opts); if (brace) { pfctl_show_eth_rules(dev, path, opts, format, p, depth + 1, rule.anchor_wildcard); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("}\n"); } } path[len] = '\0'; return (0); } int pfctl_show_rules(int dev, char *path, int opts, enum pfctl_show format, char *anchorname, int depth, int wildcard) { struct pfctl_rules_info ri; struct pfctl_rule rule; char anchor_call[MAXPATHLEN]; u_int32_t nr, header = 0; int rule_numbers = opts & (PF_OPT_VERBOSE2 | PF_OPT_DEBUG); int numeric = opts & PF_OPT_NUMERIC; int len = strlen(path), ret = 0; char *npath, *p; /* * Truncate a trailing / and * on an anchorname before searching for * the ruleset, this is syntactic sugar that doesn't actually make it * to the kernel. */ if ((p = strrchr(anchorname, '/')) != NULL && p[1] == '*' && p[2] == '\0') { p[0] = '\0'; } if (anchorname[0] == '/') { if ((npath = calloc(1, MAXPATHLEN)) == NULL) errx(1, "pfctl_rules: calloc"); snprintf(npath, MAXPATHLEN, "%s", anchorname); } else { if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", anchorname); else snprintf(&path[len], MAXPATHLEN - len, "%s", anchorname); npath = path; } /* * If this anchor was called with a wildcard path, go through * the rulesets in the anchor rather than the rules. */ if (wildcard && (opts & PF_OPT_RECURSE)) { struct pfioc_ruleset prs; u_int32_t mnr, nr; memset(&prs, 0, sizeof(prs)); memcpy(prs.path, npath, sizeof(prs.path)); if (ioctl(dev, DIOCGETRULESETS, &prs)) { if (errno == EINVAL) fprintf(stderr, "Anchor '%s' " "not found.\n", anchorname); else err(1, "DIOCGETRULESETS"); } mnr = prs.nr; pfctl_print_rule_counters(&rule, opts); for (nr = 0; nr < mnr; ++nr) { prs.nr = nr; if (ioctl(dev, DIOCGETRULESET, &prs)) err(1, "DIOCGETRULESET"); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("anchor \"%s\" all {\n", prs.name); pfctl_show_rules(dev, npath, opts, format, prs.name, depth + 1, 0); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("}\n"); } path[len] = '\0'; return (0); } if (opts & PF_OPT_SHOWALL) { ret = pfctl_get_rules_info(dev, &ri, PF_PASS, path); if (ret != 0) { warn("DIOCGETRULES"); goto error; } header++; } ret = pfctl_get_rules_info(dev, &ri, PF_SCRUB, path); if (ret != 0) { warn("DIOCGETRULES"); goto error; } if (opts & PF_OPT_SHOWALL) { if (format == PFCTL_SHOW_RULES && (ri.nr > 0 || header)) pfctl_print_title("FILTER RULES:"); else if (format == PFCTL_SHOW_LABELS && labels) pfctl_print_title("LABEL COUNTERS:"); } for (nr = 0; nr < ri.nr; ++nr) { if (pfctl_get_clear_rule(dev, nr, ri.ticket, path, PF_SCRUB, &rule, anchor_call, opts & PF_OPT_CLRRULECTRS)) { warn("DIOCGETRULENV"); goto error; } if (pfctl_get_pool(dev, &rule.rpool, nr, ri.ticket, PF_SCRUB, path) != 0) goto error; switch (format) { case PFCTL_SHOW_LABELS: break; case PFCTL_SHOW_RULES: if (rule.label[0][0] && (opts & PF_OPT_SHOWALL)) labels = 1; print_rule(&rule, anchor_call, rule_numbers, numeric); printf("\n"); pfctl_print_rule_counters(&rule, opts); break; case PFCTL_SHOW_NOTHING: break; } pfctl_clear_pool(&rule.rpool); } ret = pfctl_get_rules_info(dev, &ri, PF_PASS, path); if (ret != 0) { warn("DIOCGETRULES"); goto error; } for (nr = 0; nr < ri.nr; ++nr) { if (pfctl_get_clear_rule(dev, nr, ri.ticket, path, PF_PASS, &rule, anchor_call, opts & PF_OPT_CLRRULECTRS)) { warn("DIOCGETRULE"); goto error; } if (pfctl_get_pool(dev, &rule.rpool, nr, ri.ticket, PF_PASS, path) != 0) goto error; switch (format) { case PFCTL_SHOW_LABELS: { bool show = false; int i = 0; while (rule.label[i][0]) { printf("%s ", rule.label[i++]); show = true; } if (show) { printf("%llu %llu %llu %llu" " %llu %llu %llu %ju\n", (unsigned long long)rule.evaluations, (unsigned long long)(rule.packets[0] + rule.packets[1]), (unsigned long long)(rule.bytes[0] + rule.bytes[1]), (unsigned long long)rule.packets[0], (unsigned long long)rule.bytes[0], (unsigned long long)rule.packets[1], (unsigned long long)rule.bytes[1], (uintmax_t)rule.states_tot); } break; } case PFCTL_SHOW_RULES: if (rule.label[0][0] && (opts & PF_OPT_SHOWALL)) labels = 1; INDENT(depth, !(opts & PF_OPT_VERBOSE)); print_rule(&rule, anchor_call, rule_numbers, numeric); /* * If this is a 'unnamed' brace notation * anchor, OR the user has explicitly requested * recursion, print it recursively. */ if (anchor_call[0] && (((p = strrchr(anchor_call, '/')) ? p[1] == '_' : anchor_call[0] == '_') || opts & PF_OPT_RECURSE)) { printf(" {\n"); pfctl_print_rule_counters(&rule, opts); pfctl_show_rules(dev, npath, opts, format, anchor_call, depth + 1, rule.anchor_wildcard); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("}\n"); } else { printf("\n"); pfctl_print_rule_counters(&rule, opts); } break; case PFCTL_SHOW_NOTHING: break; } pfctl_clear_pool(&rule.rpool); } error: path[len] = '\0'; return (ret); } int pfctl_show_nat(int dev, char *path, int opts, char *anchorname, int depth) { struct pfctl_rules_info ri; struct pfctl_rule rule; char anchor_call[MAXPATHLEN]; u_int32_t nr; static int nattype[3] = { PF_NAT, PF_RDR, PF_BINAT }; int i, dotitle = opts & PF_OPT_SHOWALL; int brace, ret; int len = strlen(path); char *p; if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", anchorname); else snprintf(&path[len], MAXPATHLEN - len, "%s", anchorname); for (i = 0; i < 3; i++) { ret = pfctl_get_rules_info(dev, &ri, nattype[i], path); if (ret != 0) { warn("DIOCGETRULES"); return (-1); } for (nr = 0; nr < ri.nr; ++nr) { brace = 0; INDENT(depth, !(opts & PF_OPT_VERBOSE)); if (pfctl_get_rule(dev, nr, ri.ticket, path, nattype[i], &rule, anchor_call)) { warn("DIOCGETRULE"); return (-1); } if (pfctl_get_pool(dev, &rule.rpool, nr, ri.ticket, nattype[i], path) != 0) return (-1); if (anchor_call[0] && ((((p = strrchr(anchor_call, '_')) != NULL) && (p == anchor_call || *(--p) == '/')) || (opts & PF_OPT_RECURSE))) { brace++; if ((p = strrchr(anchor_call, '/')) != NULL) p++; else p = &anchor_call[0]; } else p = &anchor_call[0]; if (dotitle) { pfctl_print_title("TRANSLATION RULES:"); dotitle = 0; } print_rule(&rule, anchor_call, opts & PF_OPT_VERBOSE2, opts & PF_OPT_NUMERIC); if (brace) printf(" {\n"); else printf("\n"); pfctl_print_rule_counters(&rule, opts); pfctl_clear_pool(&rule.rpool); if (brace) { pfctl_show_nat(dev, path, opts, p, depth + 1); INDENT(depth, !(opts & PF_OPT_VERBOSE)); printf("}\n"); } } } return (0); } int pfctl_show_src_nodes(int dev, int opts) { struct pfioc_src_nodes psn; struct pf_src_node *p; char *inbuf = NULL, *newinbuf = NULL; unsigned int len = 0; int i; memset(&psn, 0, sizeof(psn)); for (;;) { psn.psn_len = len; if (len) { newinbuf = realloc(inbuf, len); if (newinbuf == NULL) err(1, "realloc"); psn.psn_buf = inbuf = newinbuf; } if (ioctl(dev, DIOCGETSRCNODES, &psn) < 0) { warn("DIOCGETSRCNODES"); free(inbuf); return (-1); } if (psn.psn_len + sizeof(struct pfioc_src_nodes) < len) break; if (len == 0 && psn.psn_len == 0) goto done; if (len == 0 && psn.psn_len != 0) len = psn.psn_len; if (psn.psn_len == 0) goto done; /* no src_nodes */ len *= 2; } p = psn.psn_src_nodes; if (psn.psn_len > 0 && (opts & PF_OPT_SHOWALL)) pfctl_print_title("SOURCE TRACKING NODES:"); for (i = 0; i < psn.psn_len; i += sizeof(*p)) { print_src_node(p, opts); p++; } done: free(inbuf); return (0); } struct pfctl_show_state_arg { int opts; int dotitle; const char *iface; }; static int pfctl_show_state(struct pfctl_state *s, void *arg) { struct pfctl_show_state_arg *a = (struct pfctl_show_state_arg *)arg; if (a->dotitle) { pfctl_print_title("STATES:"); a->dotitle = 0; } print_state(s, a->opts); return (0); } int pfctl_show_states(int dev, const char *iface, int opts) { struct pfctl_show_state_arg arg; struct pfctl_state_filter filter = {}; if (iface != NULL) strncpy(filter.ifname, iface, IFNAMSIZ); arg.opts = opts; arg.dotitle = opts & PF_OPT_SHOWALL; arg.iface = iface; if (pfctl_get_filtered_states_iter(&filter, pfctl_show_state, &arg)) return (-1); return (0); } int pfctl_show_status(int dev, int opts) { struct pfctl_status *status; struct pfctl_syncookies cookies; if ((status = pfctl_get_status(dev)) == NULL) { warn("DIOCGETSTATUS"); return (-1); } if (pfctl_get_syncookies(dev, &cookies)) { pfctl_free_status(status); warn("DIOCGETSYNCOOKIES"); return (-1); } if (opts & PF_OPT_SHOWALL) pfctl_print_title("INFO:"); print_status(status, &cookies, opts); pfctl_free_status(status); return (0); } int pfctl_show_running(int dev) { struct pfctl_status *status; int running; if ((status = pfctl_get_status(dev)) == NULL) { warn("DIOCGETSTATUS"); return (-1); } running = status->running; print_running(status); pfctl_free_status(status); return (!running); } int pfctl_show_timeouts(int dev, int opts) { struct pfioc_tm pt; int i; if (opts & PF_OPT_SHOWALL) pfctl_print_title("TIMEOUTS:"); memset(&pt, 0, sizeof(pt)); for (i = 0; pf_timeouts[i].name; i++) { pt.timeout = pf_timeouts[i].timeout; if (ioctl(dev, DIOCGETTIMEOUT, &pt)) err(1, "DIOCGETTIMEOUT"); printf("%-20s %10d", pf_timeouts[i].name, pt.seconds); if (pf_timeouts[i].timeout >= PFTM_ADAPTIVE_START && pf_timeouts[i].timeout <= PFTM_ADAPTIVE_END) printf(" states"); else printf("s"); printf("\n"); } return (0); } int pfctl_show_limits(int dev, int opts) { struct pfioc_limit pl; int i; if (opts & PF_OPT_SHOWALL) pfctl_print_title("LIMITS:"); memset(&pl, 0, sizeof(pl)); for (i = 0; pf_limits[i].name; i++) { pl.index = pf_limits[i].index; if (ioctl(dev, DIOCGETLIMIT, &pl)) err(1, "DIOCGETLIMIT"); printf("%-13s ", pf_limits[i].name); if (pl.limit == UINT_MAX) printf("unlimited\n"); else printf("hard limit %8u\n", pl.limit); } return (0); } void pfctl_show_creators(int opts) { int ret; uint32_t creators[16]; size_t count = nitems(creators); ret = pfctl_get_creatorids(creators, &count); if (ret != 0) errx(ret, "Failed to retrieve creators"); printf("Creator IDs:\n"); for (size_t i = 0; i < count; i++) printf("%08x\n", creators[i]); } /* callbacks for rule/nat/rdr/addr */ int pfctl_add_pool(struct pfctl *pf, struct pfctl_pool *p, sa_family_t af) { struct pf_pooladdr *pa; if ((pf->opts & PF_OPT_NOACTION) == 0) { if (ioctl(pf->dev, DIOCBEGINADDRS, &pf->paddr)) err(1, "DIOCBEGINADDRS"); } pf->paddr.af = af; TAILQ_FOREACH(pa, &p->list, entries) { memcpy(&pf->paddr.addr, pa, sizeof(struct pf_pooladdr)); if ((pf->opts & PF_OPT_NOACTION) == 0) { if (ioctl(pf->dev, DIOCADDADDR, &pf->paddr)) err(1, "DIOCADDADDR"); } } return (0); } int pfctl_append_rule(struct pfctl *pf, struct pfctl_rule *r, const char *anchor_call) { u_int8_t rs_num; struct pfctl_rule *rule; struct pfctl_ruleset *rs; char *p; rs_num = pf_get_ruleset_number(r->action); if (rs_num == PF_RULESET_MAX) errx(1, "Invalid rule type %d", r->action); rs = &pf->anchor->ruleset; if (anchor_call[0] && r->anchor == NULL) { /* * Don't make non-brace anchors part of the main anchor pool. */ if ((r->anchor = calloc(1, sizeof(*r->anchor))) == NULL) err(1, "pfctl_append_rule: calloc"); pf_init_ruleset(&r->anchor->ruleset); r->anchor->ruleset.anchor = r->anchor; if (strlcpy(r->anchor->path, anchor_call, sizeof(rule->anchor->path)) >= sizeof(rule->anchor->path)) errx(1, "pfctl_append_rule: strlcpy"); if ((p = strrchr(anchor_call, '/')) != NULL) { if (!strlen(p)) err(1, "pfctl_append_rule: bad anchor name %s", anchor_call); } else p = (char *)anchor_call; if (strlcpy(r->anchor->name, p, sizeof(rule->anchor->name)) >= sizeof(rule->anchor->name)) errx(1, "pfctl_append_rule: strlcpy"); } if ((rule = calloc(1, sizeof(*rule))) == NULL) err(1, "calloc"); bcopy(r, rule, sizeof(*rule)); TAILQ_INIT(&rule->rpool.list); pfctl_move_pool(&r->rpool, &rule->rpool); TAILQ_INSERT_TAIL(rs->rules[rs_num].active.ptr, rule, entries); return (0); } int pfctl_append_eth_rule(struct pfctl *pf, struct pfctl_eth_rule *r, const char *anchor_call) { struct pfctl_eth_rule *rule; struct pfctl_eth_ruleset *rs; char *p; rs = &pf->eanchor->ruleset; if (anchor_call[0] && r->anchor == NULL) { /* * Don't make non-brace anchors part of the main anchor pool. */ if ((r->anchor = calloc(1, sizeof(*r->anchor))) == NULL) err(1, "pfctl_append_rule: calloc"); pf_init_eth_ruleset(&r->anchor->ruleset); r->anchor->ruleset.anchor = r->anchor; if (strlcpy(r->anchor->path, anchor_call, sizeof(rule->anchor->path)) >= sizeof(rule->anchor->path)) errx(1, "pfctl_append_rule: strlcpy"); if ((p = strrchr(anchor_call, '/')) != NULL) { if (!strlen(p)) err(1, "pfctl_append_eth_rule: bad anchor name %s", anchor_call); } else p = (char *)anchor_call; if (strlcpy(r->anchor->name, p, sizeof(rule->anchor->name)) >= sizeof(rule->anchor->name)) errx(1, "pfctl_append_eth_rule: strlcpy"); } if ((rule = calloc(1, sizeof(*rule))) == NULL) err(1, "calloc"); bcopy(r, rule, sizeof(*rule)); TAILQ_INSERT_TAIL(&rs->rules, rule, entries); return (0); } int pfctl_eth_ruleset_trans(struct pfctl *pf, char *path, struct pfctl_eth_anchor *a) { int osize = pf->trans->pfrb_size; if ((pf->loadopt & PFCTL_FLAG_ETH) != 0) { if (pfctl_add_trans(pf->trans, PF_RULESET_ETH, path)) return (1); } if (pfctl_trans(pf->dev, pf->trans, DIOCXBEGIN, osize)) return (5); return (0); } int pfctl_ruleset_trans(struct pfctl *pf, char *path, struct pfctl_anchor *a, bool do_eth) { int osize = pf->trans->pfrb_size; if ((pf->loadopt & PFCTL_FLAG_ETH) != 0 && do_eth) { if (pfctl_add_trans(pf->trans, PF_RULESET_ETH, path)) return (1); } if ((pf->loadopt & PFCTL_FLAG_NAT) != 0) { if (pfctl_add_trans(pf->trans, PF_RULESET_NAT, path) || pfctl_add_trans(pf->trans, PF_RULESET_BINAT, path) || pfctl_add_trans(pf->trans, PF_RULESET_RDR, path)) return (1); } if (a == pf->astack[0] && ((altqsupport && (pf->loadopt & PFCTL_FLAG_ALTQ) != 0))) { if (pfctl_add_trans(pf->trans, PF_RULESET_ALTQ, path)) return (2); } if ((pf->loadopt & PFCTL_FLAG_FILTER) != 0) { if (pfctl_add_trans(pf->trans, PF_RULESET_SCRUB, path) || pfctl_add_trans(pf->trans, PF_RULESET_FILTER, path)) return (3); } if (pf->loadopt & PFCTL_FLAG_TABLE) if (pfctl_add_trans(pf->trans, PF_RULESET_TABLE, path)) return (4); if (pfctl_trans(pf->dev, pf->trans, DIOCXBEGIN, osize)) return (5); return (0); } int pfctl_load_eth_ruleset(struct pfctl *pf, char *path, struct pfctl_eth_ruleset *rs, int depth) { struct pfctl_eth_rule *r; int error, len = strlen(path); int brace = 0; pf->eanchor = rs->anchor; if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", pf->eanchor->name); else snprintf(&path[len], MAXPATHLEN - len, "%s", pf->eanchor->name); if (depth) { if (TAILQ_FIRST(&rs->rules) != NULL) { brace++; if (pf->opts & PF_OPT_VERBOSE) printf(" {\n"); if ((pf->opts & PF_OPT_NOACTION) == 0 && (error = pfctl_eth_ruleset_trans(pf, path, rs->anchor))) { printf("pfctl_load_eth_rulesets: " "pfctl_eth_ruleset_trans %d\n", error); goto error; } } else if (pf->opts & PF_OPT_VERBOSE) printf("\n"); } while ((r = TAILQ_FIRST(&rs->rules)) != NULL) { TAILQ_REMOVE(&rs->rules, r, entries); error = pfctl_load_eth_rule(pf, path, r, depth); if (error) return (error); if (r->anchor) { if ((error = pfctl_load_eth_ruleset(pf, path, &r->anchor->ruleset, depth + 1))) return (error); } else if (pf->opts & PF_OPT_VERBOSE) printf("\n"); free(r); } if (brace && pf->opts & PF_OPT_VERBOSE) { INDENT(depth - 1, (pf->opts & PF_OPT_VERBOSE)); printf("}\n"); } path[len] = '\0'; return (0); error: path[len] = '\0'; return (error); } int pfctl_load_eth_rule(struct pfctl *pf, char *path, struct pfctl_eth_rule *r, int depth) { char *name; char anchor[PF_ANCHOR_NAME_SIZE]; int len = strlen(path); if (strlcpy(anchor, path, sizeof(anchor)) >= sizeof(anchor)) errx(1, "pfctl_load_eth_rule: strlcpy"); if (r->anchor) { if (r->anchor->match) { if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", r->anchor->name); else snprintf(&path[len], MAXPATHLEN - len, "%s", r->anchor->name); name = r->anchor->name; } else name = r->anchor->path; } else name = ""; if ((pf->opts & PF_OPT_NOACTION) == 0) if (pfctl_add_eth_rule(pf->dev, r, anchor, name, pf->eth_ticket)) err(1, "DIOCADDETHRULENV"); if (pf->opts & PF_OPT_VERBOSE) { INDENT(depth, !(pf->opts & PF_OPT_VERBOSE2)); print_eth_rule(r, r->anchor ? r->anchor->name : "", pf->opts & (PF_OPT_VERBOSE2 | PF_OPT_DEBUG)); } path[len] = '\0'; return (0); } int pfctl_load_ruleset(struct pfctl *pf, char *path, struct pfctl_ruleset *rs, int rs_num, int depth) { struct pfctl_rule *r; int error, len = strlen(path); int brace = 0; pf->anchor = rs->anchor; if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", pf->anchor->name); else snprintf(&path[len], MAXPATHLEN - len, "%s", pf->anchor->name); if (depth) { if (TAILQ_FIRST(rs->rules[rs_num].active.ptr) != NULL) { brace++; if (pf->opts & PF_OPT_VERBOSE) printf(" {\n"); if ((pf->opts & PF_OPT_NOACTION) == 0 && (error = pfctl_ruleset_trans(pf, path, rs->anchor, false))) { printf("pfctl_load_rulesets: " "pfctl_ruleset_trans %d\n", error); goto error; } } else if (pf->opts & PF_OPT_VERBOSE) printf("\n"); } if (pf->optimize && rs_num == PF_RULESET_FILTER) pfctl_optimize_ruleset(pf, rs); while ((r = TAILQ_FIRST(rs->rules[rs_num].active.ptr)) != NULL) { TAILQ_REMOVE(rs->rules[rs_num].active.ptr, r, entries); for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) expand_label(r->label[i], PF_RULE_LABEL_SIZE, r); expand_label(r->tagname, PF_TAG_NAME_SIZE, r); expand_label(r->match_tagname, PF_TAG_NAME_SIZE, r); if ((error = pfctl_load_rule(pf, path, r, depth))) goto error; if (r->anchor) { if ((error = pfctl_load_ruleset(pf, path, &r->anchor->ruleset, rs_num, depth + 1))) goto error; } else if (pf->opts & PF_OPT_VERBOSE) printf("\n"); free(r); } if (brace && pf->opts & PF_OPT_VERBOSE) { INDENT(depth - 1, (pf->opts & PF_OPT_VERBOSE)); printf("}\n"); } path[len] = '\0'; return (0); error: path[len] = '\0'; return (error); } int pfctl_load_rule(struct pfctl *pf, char *path, struct pfctl_rule *r, int depth) { u_int8_t rs_num = pf_get_ruleset_number(r->action); char *name; u_int32_t ticket; char anchor[PF_ANCHOR_NAME_SIZE]; int len = strlen(path); int error; bool was_present; /* set up anchor before adding to path for anchor_call */ if ((pf->opts & PF_OPT_NOACTION) == 0) ticket = pfctl_get_ticket(pf->trans, rs_num, path); if (strlcpy(anchor, path, sizeof(anchor)) >= sizeof(anchor)) errx(1, "pfctl_load_rule: strlcpy"); if (r->anchor) { if (r->anchor->match) { if (path[0]) snprintf(&path[len], MAXPATHLEN - len, "/%s", r->anchor->name); else snprintf(&path[len], MAXPATHLEN - len, "%s", r->anchor->name); name = r->anchor->name; } else name = r->anchor->path; } else name = ""; was_present = false; if ((pf->opts & PF_OPT_NOACTION) == 0) { if (pfctl_add_pool(pf, &r->rpool, r->af)) return (1); error = pfctl_add_rule(pf->dev, r, anchor, name, ticket, pf->paddr.ticket); switch (error) { case 0: /* things worked, do nothing */ break; case EEXIST: /* an identical rule is already present */ was_present = true; break; default: err(1, "DIOCADDRULENV"); } } if (pf->opts & PF_OPT_VERBOSE) { INDENT(depth, !(pf->opts & PF_OPT_VERBOSE2)); print_rule(r, name, pf->opts & PF_OPT_VERBOSE2, pf->opts & PF_OPT_NUMERIC); if (was_present) printf(" -- rule was already present"); } path[len] = '\0'; pfctl_clear_pool(&r->rpool); return (0); } int pfctl_add_altq(struct pfctl *pf, struct pf_altq *a) { if (altqsupport && (loadopt & PFCTL_FLAG_ALTQ) != 0) { memcpy(&pf->paltq->altq, a, sizeof(struct pf_altq)); if ((pf->opts & PF_OPT_NOACTION) == 0) { if (ioctl(pf->dev, DIOCADDALTQ, pf->paltq)) { if (errno == ENXIO) errx(1, "qtype not configured"); else if (errno == ENODEV) errx(1, "%s: driver does not support " "altq", a->ifname); else err(1, "DIOCADDALTQ"); } } pfaltq_store(&pf->paltq->altq); } return (0); } int pfctl_rules(int dev, char *filename, int opts, int optimize, char *anchorname, struct pfr_buffer *trans) { #define ERR(x) do { warn(x); goto _error; } while(0) #define ERRX(x) do { warnx(x); goto _error; } while(0) struct pfr_buffer *t, buf; struct pfioc_altq pa; struct pfctl pf; struct pfctl_ruleset *rs; struct pfctl_eth_ruleset *ethrs; struct pfr_table trs; char *path; int osize; RB_INIT(&pf_anchors); memset(&pf_main_anchor, 0, sizeof(pf_main_anchor)); pf_init_ruleset(&pf_main_anchor.ruleset); pf_main_anchor.ruleset.anchor = &pf_main_anchor; memset(&pf_eth_main_anchor, 0, sizeof(pf_eth_main_anchor)); pf_init_eth_ruleset(&pf_eth_main_anchor.ruleset); pf_eth_main_anchor.ruleset.anchor = &pf_eth_main_anchor; if (trans == NULL) { bzero(&buf, sizeof(buf)); buf.pfrb_type = PFRB_TRANS; t = &buf; osize = 0; } else { t = trans; osize = t->pfrb_size; } memset(&pa, 0, sizeof(pa)); pa.version = PFIOC_ALTQ_VERSION; memset(&pf, 0, sizeof(pf)); memset(&trs, 0, sizeof(trs)); if ((path = calloc(1, MAXPATHLEN)) == NULL) ERRX("pfctl_rules: calloc"); if (strlcpy(trs.pfrt_anchor, anchorname, sizeof(trs.pfrt_anchor)) >= sizeof(trs.pfrt_anchor)) ERRX("pfctl_rules: strlcpy"); pf.dev = dev; pf.opts = opts; pf.optimize = optimize; pf.loadopt = loadopt; /* non-brace anchor, create without resolving the path */ if ((pf.anchor = calloc(1, sizeof(*pf.anchor))) == NULL) ERRX("pfctl_rules: calloc"); rs = &pf.anchor->ruleset; pf_init_ruleset(rs); rs->anchor = pf.anchor; if (strlcpy(pf.anchor->path, anchorname, sizeof(pf.anchor->path)) >= sizeof(pf.anchor->path)) errx(1, "pfctl_rules: strlcpy"); if (strlcpy(pf.anchor->name, anchorname, sizeof(pf.anchor->name)) >= sizeof(pf.anchor->name)) errx(1, "pfctl_rules: strlcpy"); pf.astack[0] = pf.anchor; pf.asd = 0; if (anchorname[0]) pf.loadopt &= ~PFCTL_FLAG_ALTQ; pf.paltq = &pa; pf.trans = t; pfctl_init_options(&pf); /* Set up ethernet anchor */ if ((pf.eanchor = calloc(1, sizeof(*pf.eanchor))) == NULL) ERRX("pfctl_rules: calloc"); if (strlcpy(pf.eanchor->path, anchorname, sizeof(pf.eanchor->path)) >= sizeof(pf.eanchor->path)) errx(1, "pfctl_rules: strlcpy"); if (strlcpy(pf.eanchor->name, anchorname, sizeof(pf.eanchor->name)) >= sizeof(pf.eanchor->name)) errx(1, "pfctl_rules: strlcpy"); ethrs = &pf.eanchor->ruleset; pf_init_eth_ruleset(ethrs); ethrs->anchor = pf.eanchor; pf.eastack[0] = pf.eanchor; if ((opts & PF_OPT_NOACTION) == 0) { /* * XXX For the time being we need to open transactions for * the main ruleset before parsing, because tables are still * loaded at parse time. */ if (pfctl_ruleset_trans(&pf, anchorname, pf.anchor, true)) ERRX("pfctl_rules"); if (pf.loadopt & PFCTL_FLAG_ETH) pf.eth_ticket = pfctl_get_ticket(t, PF_RULESET_ETH, anchorname); if (altqsupport && (pf.loadopt & PFCTL_FLAG_ALTQ)) pa.ticket = pfctl_get_ticket(t, PF_RULESET_ALTQ, anchorname); if (pf.loadopt & PFCTL_FLAG_TABLE) pf.astack[0]->ruleset.tticket = pfctl_get_ticket(t, PF_RULESET_TABLE, anchorname); } if (parse_config(filename, &pf) < 0) { if ((opts & PF_OPT_NOACTION) == 0) ERRX("Syntax error in config file: " "pf rules not loaded"); else goto _error; } if (loadopt & PFCTL_FLAG_OPTION) pfctl_adjust_skip_ifaces(&pf); if ((pf.loadopt & PFCTL_FLAG_FILTER && (pfctl_load_ruleset(&pf, path, rs, PF_RULESET_SCRUB, 0))) || (pf.loadopt & PFCTL_FLAG_ETH && (pfctl_load_eth_ruleset(&pf, path, ethrs, 0))) || (pf.loadopt & PFCTL_FLAG_NAT && (pfctl_load_ruleset(&pf, path, rs, PF_RULESET_NAT, 0) || pfctl_load_ruleset(&pf, path, rs, PF_RULESET_RDR, 0) || pfctl_load_ruleset(&pf, path, rs, PF_RULESET_BINAT, 0))) || (pf.loadopt & PFCTL_FLAG_FILTER && pfctl_load_ruleset(&pf, path, rs, PF_RULESET_FILTER, 0))) { if ((opts & PF_OPT_NOACTION) == 0) ERRX("Unable to load rules into kernel"); else goto _error; } if ((altqsupport && (pf.loadopt & PFCTL_FLAG_ALTQ) != 0)) if (check_commit_altq(dev, opts) != 0) ERRX("errors in altq config"); /* process "load anchor" directives */ if (!anchorname[0]) if (pfctl_load_anchors(dev, &pf, t) == -1) ERRX("load anchors"); if (trans == NULL && (opts & PF_OPT_NOACTION) == 0) { if (!anchorname[0]) if (pfctl_load_options(&pf)) goto _error; if (pfctl_trans(dev, t, DIOCXCOMMIT, osize)) ERR("DIOCXCOMMIT"); } free(path); return (0); _error: if (trans == NULL) { /* main ruleset */ if ((opts & PF_OPT_NOACTION) == 0) if (pfctl_trans(dev, t, DIOCXROLLBACK, osize)) err(1, "DIOCXROLLBACK"); exit(1); } else { /* sub ruleset */ free(path); return (-1); } #undef ERR #undef ERRX } FILE * pfctl_fopen(const char *name, const char *mode) { struct stat st; FILE *fp; fp = fopen(name, mode); if (fp == NULL) return (NULL); if (fstat(fileno(fp), &st)) { fclose(fp); return (NULL); } if (S_ISDIR(st.st_mode)) { fclose(fp); errno = EISDIR; return (NULL); } return (fp); } void pfctl_init_options(struct pfctl *pf) { pf->timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; pf->timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; pf->timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; pf->timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; pf->timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; pf->timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; pf->timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; pf->timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; pf->timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; pf->timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; pf->timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; pf->timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; pf->timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; pf->timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; pf->timeout[PFTM_FRAG] = PFTM_FRAG_VAL; pf->timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; pf->timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; pf->timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; pf->timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; pf->timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; pf->limit[PF_LIMIT_STATES] = PFSTATE_HIWAT; pf->limit[PF_LIMIT_FRAGS] = PFFRAG_FRENT_HIWAT; pf->limit[PF_LIMIT_SRC_NODES] = PFSNODE_HIWAT; pf->limit[PF_LIMIT_TABLE_ENTRIES] = PFR_KENTRY_HIWAT; pf->debug = PF_DEBUG_URGENT; pf->reassemble = 0; pf->syncookies = false; pf->syncookieswat[0] = PF_SYNCOOKIES_LOWATPCT; pf->syncookieswat[1] = PF_SYNCOOKIES_HIWATPCT; } int pfctl_load_options(struct pfctl *pf) { int i, error = 0; if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); /* load limits */ for (i = 0; i < PF_LIMIT_MAX; i++) { if ((pf->opts & PF_OPT_MERGE) && !pf->limit_set[i]) continue; if (pfctl_load_limit(pf, i, pf->limit[i])) error = 1; } /* * If we've set the limit, but haven't explicitly set adaptive * timeouts, do it now with a start of 60% and end of 120%. */ if (pf->limit_set[PF_LIMIT_STATES] && !pf->timeout_set[PFTM_ADAPTIVE_START] && !pf->timeout_set[PFTM_ADAPTIVE_END]) { pf->timeout[PFTM_ADAPTIVE_START] = (pf->limit[PF_LIMIT_STATES] / 10) * 6; pf->timeout_set[PFTM_ADAPTIVE_START] = 1; pf->timeout[PFTM_ADAPTIVE_END] = (pf->limit[PF_LIMIT_STATES] / 10) * 12; pf->timeout_set[PFTM_ADAPTIVE_END] = 1; } /* load timeouts */ for (i = 0; i < PFTM_MAX; i++) { if ((pf->opts & PF_OPT_MERGE) && !pf->timeout_set[i]) continue; if (pfctl_load_timeout(pf, i, pf->timeout[i])) error = 1; } /* load debug */ if (!(pf->opts & PF_OPT_MERGE) || pf->debug_set) if (pfctl_load_debug(pf, pf->debug)) error = 1; /* load logif */ if (!(pf->opts & PF_OPT_MERGE) || pf->ifname_set) if (pfctl_load_logif(pf, pf->ifname)) error = 1; /* load hostid */ if (!(pf->opts & PF_OPT_MERGE) || pf->hostid_set) if (pfctl_load_hostid(pf, pf->hostid)) error = 1; /* load reassembly settings */ if (!(pf->opts & PF_OPT_MERGE) || pf->reass_set) if (pfctl_load_reassembly(pf, pf->reassemble)) error = 1; /* load keepcounters */ if (pfctl_set_keepcounters(pf->dev, pf->keep_counters)) error = 1; /* load syncookies settings */ if (pfctl_load_syncookies(pf, pf->syncookies)) error = 1; return (error); } int pfctl_set_limit(struct pfctl *pf, const char *opt, unsigned int limit) { int i; for (i = 0; pf_limits[i].name; i++) { if (strcasecmp(opt, pf_limits[i].name) == 0) { pf->limit[pf_limits[i].index] = limit; pf->limit_set[pf_limits[i].index] = 1; break; } } if (pf_limits[i].name == NULL) { warnx("Bad pool name."); return (1); } if (pf->opts & PF_OPT_VERBOSE) printf("set limit %s %d\n", opt, limit); return (0); } int pfctl_load_limit(struct pfctl *pf, unsigned int index, unsigned int limit) { struct pfioc_limit pl; memset(&pl, 0, sizeof(pl)); pl.index = index; pl.limit = limit; if (ioctl(pf->dev, DIOCSETLIMIT, &pl)) { if (errno == EBUSY) warnx("Current pool size exceeds requested hard limit"); else warnx("DIOCSETLIMIT"); return (1); } return (0); } int pfctl_set_timeout(struct pfctl *pf, const char *opt, int seconds, int quiet) { int i; if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); for (i = 0; pf_timeouts[i].name; i++) { if (strcasecmp(opt, pf_timeouts[i].name) == 0) { pf->timeout[pf_timeouts[i].timeout] = seconds; pf->timeout_set[pf_timeouts[i].timeout] = 1; break; } } if (pf_timeouts[i].name == NULL) { warnx("Bad timeout name."); return (1); } if (pf->opts & PF_OPT_VERBOSE && ! quiet) printf("set timeout %s %d\n", opt, seconds); return (0); } int pfctl_load_timeout(struct pfctl *pf, unsigned int timeout, unsigned int seconds) { struct pfioc_tm pt; memset(&pt, 0, sizeof(pt)); pt.timeout = timeout; pt.seconds = seconds; if (ioctl(pf->dev, DIOCSETTIMEOUT, &pt)) { warnx("DIOCSETTIMEOUT"); return (1); } return (0); } int pfctl_set_reassembly(struct pfctl *pf, int on, int nodf) { if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); pf->reass_set = 1; if (on) { pf->reassemble = PF_REASS_ENABLED; if (nodf) pf->reassemble |= PF_REASS_NODF; } else { pf->reassemble = 0; } if (pf->opts & PF_OPT_VERBOSE) printf("set reassemble %s %s\n", on ? "yes" : "no", nodf ? "no-df" : ""); return (0); } int pfctl_set_optimization(struct pfctl *pf, const char *opt) { const struct pf_hint *hint; int i, r; if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); for (i = 0; pf_hints[i].name; i++) if (strcasecmp(opt, pf_hints[i].name) == 0) break; hint = pf_hints[i].hint; if (hint == NULL) { warnx("invalid state timeouts optimization"); return (1); } for (i = 0; hint[i].name; i++) if ((r = pfctl_set_timeout(pf, hint[i].name, hint[i].timeout, 1))) return (r); if (pf->opts & PF_OPT_VERBOSE) printf("set optimization %s\n", opt); return (0); } int pfctl_set_logif(struct pfctl *pf, char *ifname) { if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); if (!strcmp(ifname, "none")) { free(pf->ifname); pf->ifname = NULL; } else { pf->ifname = strdup(ifname); if (!pf->ifname) errx(1, "pfctl_set_logif: strdup"); } pf->ifname_set = 1; if (pf->opts & PF_OPT_VERBOSE) printf("set loginterface %s\n", ifname); return (0); } int pfctl_load_logif(struct pfctl *pf, char *ifname) { struct pfioc_if pi; memset(&pi, 0, sizeof(pi)); if (ifname && strlcpy(pi.ifname, ifname, sizeof(pi.ifname)) >= sizeof(pi.ifname)) { warnx("pfctl_load_logif: strlcpy"); return (1); } if (ioctl(pf->dev, DIOCSETSTATUSIF, &pi)) { warnx("DIOCSETSTATUSIF"); return (1); } return (0); } int pfctl_set_hostid(struct pfctl *pf, u_int32_t hostid) { if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); HTONL(hostid); pf->hostid = hostid; pf->hostid_set = 1; if (pf->opts & PF_OPT_VERBOSE) printf("set hostid 0x%08x\n", ntohl(hostid)); return (0); } int pfctl_load_hostid(struct pfctl *pf, u_int32_t hostid) { if (ioctl(dev, DIOCSETHOSTID, &hostid)) { warnx("DIOCSETHOSTID"); return (1); } return (0); } int pfctl_load_reassembly(struct pfctl *pf, u_int32_t reassembly) { if (ioctl(dev, DIOCSETREASS, &reassembly)) { warnx("DIOCSETREASS"); return (1); } return (0); } int pfctl_load_syncookies(struct pfctl *pf, u_int8_t val) { struct pfctl_syncookies cookies; bzero(&cookies, sizeof(cookies)); cookies.mode = val; cookies.lowwater = pf->syncookieswat[0]; cookies.highwater = pf->syncookieswat[1]; if (pfctl_set_syncookies(dev, &cookies)) { warnx("DIOCSETSYNCOOKIES"); return (1); } return (0); } int pfctl_cfg_syncookies(struct pfctl *pf, uint8_t val, struct pfctl_watermarks *w) { if (val != PF_SYNCOOKIES_ADAPTIVE && w != NULL) { warnx("syncookies start/end only apply to adaptive"); return (1); } if (val == PF_SYNCOOKIES_ADAPTIVE && w != NULL) { if (!w->hi) w->hi = PF_SYNCOOKIES_HIWATPCT; if (!w->lo) w->lo = w->hi / 2; if (w->lo >= w->hi) { warnx("start must be higher than end"); return (1); } pf->syncookieswat[0] = w->lo; pf->syncookieswat[1] = w->hi; pf->syncookieswat_set = 1; } if (pf->opts & PF_OPT_VERBOSE) { if (val == PF_SYNCOOKIES_NEVER) printf("set syncookies never\n"); else if (val == PF_SYNCOOKIES_ALWAYS) printf("set syncookies always\n"); else if (val == PF_SYNCOOKIES_ADAPTIVE) { if (pf->syncookieswat_set) printf("set syncookies adaptive (start %u%%, " "end %u%%)\n", pf->syncookieswat[1], pf->syncookieswat[0]); else printf("set syncookies adaptive\n"); } else { /* cannot happen */ warnx("king bula ate all syncookies"); return (1); } } pf->syncookies = val; return (0); } int pfctl_set_debug(struct pfctl *pf, char *d) { u_int32_t level; if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); if (!strcmp(d, "none")) pf->debug = PF_DEBUG_NONE; else if (!strcmp(d, "urgent")) pf->debug = PF_DEBUG_URGENT; else if (!strcmp(d, "misc")) pf->debug = PF_DEBUG_MISC; else if (!strcmp(d, "loud")) pf->debug = PF_DEBUG_NOISY; else { warnx("unknown debug level \"%s\"", d); return (-1); } pf->debug_set = 1; level = pf->debug; if ((pf->opts & PF_OPT_NOACTION) == 0) if (ioctl(dev, DIOCSETDEBUG, &level)) err(1, "DIOCSETDEBUG"); if (pf->opts & PF_OPT_VERBOSE) printf("set debug %s\n", d); return (0); } int pfctl_load_debug(struct pfctl *pf, unsigned int level) { if (ioctl(pf->dev, DIOCSETDEBUG, &level)) { warnx("DIOCSETDEBUG"); return (1); } return (0); } int pfctl_set_interface_flags(struct pfctl *pf, char *ifname, int flags, int how) { struct pfioc_iface pi; struct node_host *h = NULL, *n = NULL; if ((loadopt & PFCTL_FLAG_OPTION) == 0) return (0); bzero(&pi, sizeof(pi)); pi.pfiio_flags = flags; /* Make sure our cache matches the kernel. If we set or clear the flag * for a group this applies to all members. */ h = ifa_grouplookup(ifname, 0); for (n = h; n != NULL; n = n->next) pfctl_set_interface_flags(pf, n->ifname, flags, how); if (strlcpy(pi.pfiio_name, ifname, sizeof(pi.pfiio_name)) >= sizeof(pi.pfiio_name)) errx(1, "pfctl_set_interface_flags: strlcpy"); if ((pf->opts & PF_OPT_NOACTION) == 0) { if (how == 0) { if (ioctl(pf->dev, DIOCCLRIFFLAG, &pi)) err(1, "DIOCCLRIFFLAG"); } else { if (ioctl(pf->dev, DIOCSETIFFLAG, &pi)) err(1, "DIOCSETIFFLAG"); pfctl_check_skip_ifaces(ifname); } } return (0); } void pfctl_debug(int dev, u_int32_t level, int opts) { if (ioctl(dev, DIOCSETDEBUG, &level)) err(1, "DIOCSETDEBUG"); if ((opts & PF_OPT_QUIET) == 0) { fprintf(stderr, "debug level set to '"); switch (level) { case PF_DEBUG_NONE: fprintf(stderr, "none"); break; case PF_DEBUG_URGENT: fprintf(stderr, "urgent"); break; case PF_DEBUG_MISC: fprintf(stderr, "misc"); break; case PF_DEBUG_NOISY: fprintf(stderr, "loud"); break; default: fprintf(stderr, ""); break; } fprintf(stderr, "'\n"); } } int pfctl_test_altqsupport(int dev, int opts) { struct pfioc_altq pa; pa.version = PFIOC_ALTQ_VERSION; if (ioctl(dev, DIOCGETALTQS, &pa)) { if (errno == ENODEV) { if (opts & PF_OPT_VERBOSE) fprintf(stderr, "No ALTQ support in kernel\n" "ALTQ related functions disabled\n"); return (0); } else err(1, "DIOCGETALTQS"); } return (1); } int pfctl_show_anchors(int dev, int opts, char *anchorname) { struct pfioc_ruleset pr; u_int32_t mnr, nr; memset(&pr, 0, sizeof(pr)); memcpy(pr.path, anchorname, sizeof(pr.path)); if (ioctl(dev, DIOCGETRULESETS, &pr)) { if (errno == EINVAL) fprintf(stderr, "Anchor '%s' not found.\n", anchorname); else err(1, "DIOCGETRULESETS"); return (-1); } mnr = pr.nr; for (nr = 0; nr < mnr; ++nr) { char sub[MAXPATHLEN]; pr.nr = nr; if (ioctl(dev, DIOCGETRULESET, &pr)) err(1, "DIOCGETRULESET"); if (!strcmp(pr.name, PF_RESERVED_ANCHOR)) continue; sub[0] = 0; if (pr.path[0]) { strlcat(sub, pr.path, sizeof(sub)); strlcat(sub, "/", sizeof(sub)); } strlcat(sub, pr.name, sizeof(sub)); if (sub[0] != '_' || (opts & PF_OPT_VERBOSE)) printf(" %s\n", sub); if ((opts & PF_OPT_VERBOSE) && pfctl_show_anchors(dev, opts, sub)) return (-1); } return (0); } int pfctl_show_eth_anchors(int dev, int opts, char *anchorname) { struct pfctl_eth_rulesets_info ri; struct pfctl_eth_ruleset_info rs; int ret; if ((ret = pfctl_get_eth_rulesets_info(dev, &ri, anchorname)) != 0) { if (ret == ENOENT) fprintf(stderr, "Anchor '%s' not found.\n", anchorname); else err(1, "DIOCGETETHRULESETS"); return (-1); } for (int nr = 0; nr < ri.nr; nr++) { char sub[MAXPATHLEN]; if (pfctl_get_eth_ruleset(dev, anchorname, nr, &rs) != 0) err(1, "DIOCGETETHRULESET"); if (!strcmp(rs.name, PF_RESERVED_ANCHOR)) continue; sub[0] = 0; if (rs.path[0]) { strlcat(sub, rs.path, sizeof(sub)); strlcat(sub, "/", sizeof(sub)); } strlcat(sub, rs.name, sizeof(sub)); if (sub[0] != '_' || (opts & PF_OPT_VERBOSE)) printf(" %s\n", sub); if ((opts & PF_OPT_VERBOSE) && pfctl_show_eth_anchors(dev, opts, sub)) return (-1); } return (0); } const char * pfctl_lookup_option(char *cmd, const char * const *list) { if (cmd != NULL && *cmd) for (; *list; list++) if (!strncmp(cmd, *list, strlen(cmd))) return (*list); return (NULL); } int main(int argc, char *argv[]) { int error = 0; int ch; int mode = O_RDONLY; int opts = 0; int optimize = PF_OPTIMIZE_BASIC; char anchorname[MAXPATHLEN]; char *path; if (argc < 2) usage(); while ((ch = getopt(argc, argv, "a:AdD:eqf:F:ghi:k:K:mMnNOo:Pp:rRs:t:T:vx:z")) != -1) { switch (ch) { case 'a': anchoropt = optarg; break; case 'd': opts |= PF_OPT_DISABLE; mode = O_RDWR; break; case 'D': if (pfctl_cmdline_symset(optarg) < 0) warnx("could not parse macro definition %s", optarg); break; case 'e': opts |= PF_OPT_ENABLE; mode = O_RDWR; break; case 'q': opts |= PF_OPT_QUIET; break; case 'F': clearopt = pfctl_lookup_option(optarg, clearopt_list); if (clearopt == NULL) { warnx("Unknown flush modifier '%s'", optarg); usage(); } mode = O_RDWR; break; case 'i': ifaceopt = optarg; break; case 'k': if (state_killers >= 2) { warnx("can only specify -k twice"); usage(); /* NOTREACHED */ } state_kill[state_killers++] = optarg; mode = O_RDWR; break; case 'K': if (src_node_killers >= 2) { warnx("can only specify -K twice"); usage(); /* NOTREACHED */ } src_node_kill[src_node_killers++] = optarg; mode = O_RDWR; break; case 'm': opts |= PF_OPT_MERGE; break; case 'M': opts |= PF_OPT_KILLMATCH; break; case 'n': opts |= PF_OPT_NOACTION; break; case 'N': loadopt |= PFCTL_FLAG_NAT; break; case 'r': opts |= PF_OPT_USEDNS; break; case 'f': rulesopt = optarg; mode = O_RDWR; break; case 'g': opts |= PF_OPT_DEBUG; break; case 'A': loadopt |= PFCTL_FLAG_ALTQ; break; case 'R': loadopt |= PFCTL_FLAG_FILTER; break; case 'o': optiopt = pfctl_lookup_option(optarg, optiopt_list); if (optiopt == NULL) { warnx("Unknown optimization '%s'", optarg); usage(); } opts |= PF_OPT_OPTIMIZE; break; case 'O': loadopt |= PFCTL_FLAG_OPTION; break; case 'p': pf_device = optarg; break; case 'P': opts |= PF_OPT_NUMERIC; break; case 's': showopt = pfctl_lookup_option(optarg, showopt_list); if (showopt == NULL) { warnx("Unknown show modifier '%s'", optarg); usage(); } break; case 't': tableopt = optarg; break; case 'T': tblcmdopt = pfctl_lookup_option(optarg, tblcmdopt_list); if (tblcmdopt == NULL) { warnx("Unknown table command '%s'", optarg); usage(); } break; case 'v': if (opts & PF_OPT_VERBOSE) opts |= PF_OPT_VERBOSE2; opts |= PF_OPT_VERBOSE; break; case 'x': debugopt = pfctl_lookup_option(optarg, debugopt_list); if (debugopt == NULL) { warnx("Unknown debug level '%s'", optarg); usage(); } mode = O_RDWR; break; case 'z': opts |= PF_OPT_CLRRULECTRS; mode = O_RDWR; break; case 'h': /* FALLTHROUGH */ default: usage(); /* NOTREACHED */ } } if (tblcmdopt != NULL) { argc -= optind; argv += optind; ch = *tblcmdopt; if (ch == 'l') { loadopt |= PFCTL_FLAG_TABLE; tblcmdopt = NULL; } else mode = strchr("acdefkrz", ch) ? O_RDWR : O_RDONLY; } else if (argc != optind) { warnx("unknown command line argument: %s ...", argv[optind]); usage(); /* NOTREACHED */ } if (loadopt == 0) loadopt = ~0; if ((path = calloc(1, MAXPATHLEN)) == NULL) errx(1, "pfctl: calloc"); memset(anchorname, 0, sizeof(anchorname)); if (anchoropt != NULL) { int len = strlen(anchoropt); if (len >= 1 && anchoropt[len - 1] == '*') { if (len >= 2 && anchoropt[len - 2] == '/') anchoropt[len - 2] = '\0'; else anchoropt[len - 1] = '\0'; opts |= PF_OPT_RECURSE; } if (strlcpy(anchorname, anchoropt, sizeof(anchorname)) >= sizeof(anchorname)) errx(1, "anchor name '%s' too long", anchoropt); loadopt &= PFCTL_FLAG_FILTER|PFCTL_FLAG_NAT|PFCTL_FLAG_TABLE|PFCTL_FLAG_ETH; } if ((opts & PF_OPT_NOACTION) == 0) { dev = open(pf_device, mode); if (dev == -1) err(1, "%s", pf_device); altqsupport = pfctl_test_altqsupport(dev, opts); } else { dev = open(pf_device, O_RDONLY); if (dev >= 0) opts |= PF_OPT_DUMMYACTION; /* turn off options */ opts &= ~ (PF_OPT_DISABLE | PF_OPT_ENABLE); clearopt = showopt = debugopt = NULL; #if !defined(ENABLE_ALTQ) altqsupport = 0; #else altqsupport = 1; #endif } if (opts & PF_OPT_DISABLE) if (pfctl_disable(dev, opts)) error = 1; if (showopt != NULL) { switch (*showopt) { case 'A': pfctl_show_anchors(dev, opts, anchorname); if (opts & PF_OPT_VERBOSE2) printf("Ethernet:\n"); pfctl_show_eth_anchors(dev, opts, anchorname); break; case 'r': pfctl_load_fingerprints(dev, opts); pfctl_show_rules(dev, path, opts, PFCTL_SHOW_RULES, anchorname, 0, 0); break; case 'l': pfctl_load_fingerprints(dev, opts); pfctl_show_rules(dev, path, opts, PFCTL_SHOW_LABELS, anchorname, 0, 0); break; case 'n': pfctl_load_fingerprints(dev, opts); pfctl_show_nat(dev, path, opts, anchorname, 0); break; case 'q': pfctl_show_altq(dev, ifaceopt, opts, opts & PF_OPT_VERBOSE2); break; case 's': pfctl_show_states(dev, ifaceopt, opts); break; case 'S': pfctl_show_src_nodes(dev, opts); break; case 'i': pfctl_show_status(dev, opts); break; case 'R': error = pfctl_show_running(dev); break; case 't': pfctl_show_timeouts(dev, opts); break; case 'm': pfctl_show_limits(dev, opts); break; case 'e': pfctl_show_eth_rules(dev, path, opts, 0, anchorname, 0, 0); break; case 'a': opts |= PF_OPT_SHOWALL; pfctl_load_fingerprints(dev, opts); pfctl_show_eth_rules(dev, path, opts, 0, anchorname, 0, 0); pfctl_show_nat(dev, path, opts, anchorname, 0); pfctl_show_rules(dev, path, opts, 0, anchorname, 0, 0); pfctl_show_altq(dev, ifaceopt, opts, 0); pfctl_show_states(dev, ifaceopt, opts); pfctl_show_src_nodes(dev, opts); pfctl_show_status(dev, opts); pfctl_show_rules(dev, path, opts, 1, anchorname, 0, 0); pfctl_show_timeouts(dev, opts); pfctl_show_limits(dev, opts); pfctl_show_tables(anchorname, opts); pfctl_show_fingerprints(opts); break; case 'T': pfctl_show_tables(anchorname, opts); break; case 'o': pfctl_load_fingerprints(dev, opts); pfctl_show_fingerprints(opts); break; case 'I': pfctl_show_ifaces(ifaceopt, opts); break; case 'c': pfctl_show_creators(opts); break; } } if ((opts & PF_OPT_CLRRULECTRS) && showopt == NULL) { pfctl_show_eth_rules(dev, path, opts, PFCTL_SHOW_NOTHING, anchorname, 0, 0); pfctl_show_rules(dev, path, opts, PFCTL_SHOW_NOTHING, anchorname, 0, 0); } if (clearopt != NULL) { if (anchorname[0] == '_' || strstr(anchorname, "/_") != NULL) errx(1, "anchor names beginning with '_' cannot " "be modified from the command line"); switch (*clearopt) { case 'e': pfctl_flush_eth_rules(dev, opts, anchorname); break; case 'r': pfctl_flush_rules(dev, opts, anchorname); break; case 'n': pfctl_flush_nat(dev, opts, anchorname); break; case 'q': pfctl_clear_altq(dev, opts); break; case 's': pfctl_clear_iface_states(dev, ifaceopt, opts); break; case 'S': pfctl_clear_src_nodes(dev, opts); break; case 'i': pfctl_clear_stats(dev, opts); break; case 'a': pfctl_flush_eth_rules(dev, opts, anchorname); pfctl_flush_rules(dev, opts, anchorname); pfctl_flush_nat(dev, opts, anchorname); pfctl_clear_tables(anchorname, opts); if (!*anchorname) { pfctl_clear_altq(dev, opts); pfctl_clear_iface_states(dev, ifaceopt, opts); pfctl_clear_src_nodes(dev, opts); pfctl_clear_stats(dev, opts); pfctl_clear_fingerprints(dev, opts); pfctl_clear_interface_flags(dev, opts); } break; case 'o': pfctl_clear_fingerprints(dev, opts); break; case 'T': pfctl_clear_tables(anchorname, opts); break; } } if (state_killers) { if (!strcmp(state_kill[0], "label")) pfctl_label_kill_states(dev, ifaceopt, opts); else if (!strcmp(state_kill[0], "id")) pfctl_id_kill_states(dev, ifaceopt, opts); else if (!strcmp(state_kill[0], "gateway")) pfctl_gateway_kill_states(dev, ifaceopt, opts); else pfctl_net_kill_states(dev, ifaceopt, opts); } if (src_node_killers) pfctl_kill_src_nodes(dev, ifaceopt, opts); if (tblcmdopt != NULL) { error = pfctl_command_tables(argc, argv, tableopt, tblcmdopt, rulesopt, anchorname, opts); rulesopt = NULL; } if (optiopt != NULL) { switch (*optiopt) { case 'n': optimize = 0; break; case 'b': optimize |= PF_OPTIMIZE_BASIC; break; case 'o': case 'p': optimize |= PF_OPTIMIZE_PROFILE; break; } } if ((rulesopt != NULL) && (loadopt & PFCTL_FLAG_OPTION) && !anchorname[0] && !(opts & PF_OPT_NOACTION)) if (pfctl_get_skip_ifaces()) error = 1; if (rulesopt != NULL && !(opts & (PF_OPT_MERGE|PF_OPT_NOACTION)) && !anchorname[0] && (loadopt & PFCTL_FLAG_OPTION)) if (pfctl_file_fingerprints(dev, opts, PF_OSFP_FILE)) error = 1; if (rulesopt != NULL) { if (anchorname[0] == '_' || strstr(anchorname, "/_") != NULL) errx(1, "anchor names beginning with '_' cannot " "be modified from the command line"); if (pfctl_rules(dev, rulesopt, opts, optimize, anchorname, NULL)) error = 1; else if (!(opts & PF_OPT_NOACTION) && (loadopt & PFCTL_FLAG_TABLE)) warn_namespace_collision(NULL); } if (opts & PF_OPT_ENABLE) if (pfctl_enable(dev, opts)) error = 1; if (debugopt != NULL) { switch (*debugopt) { case 'n': pfctl_debug(dev, PF_DEBUG_NONE, opts); break; case 'u': pfctl_debug(dev, PF_DEBUG_URGENT, opts); break; case 'm': pfctl_debug(dev, PF_DEBUG_MISC, opts); break; case 'l': pfctl_debug(dev, PF_DEBUG_NOISY, opts); break; } } exit(error); } diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h index b2aa1c450c50..6a5f8761755d 100644 --- a/sys/net/pfvar.h +++ b/sys/net/pfvar.h @@ -1,2533 +1,2534 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $OpenBSD: pfvar.h,v 1.282 2009/01/29 15:12:28 pyr Exp $ */ #ifndef _NET_PFVAR_H_ #define _NET_PFVAR_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #include #include #endif #include #include #include #ifdef _KERNEL #if defined(__arm__) #define PF_WANT_32_TO_64_COUNTER #endif /* * A hybrid of 32-bit and 64-bit counters which can be used on platforms where * counter(9) is very expensive. * * As 32-bit counters are expected to overflow, a periodic job sums them up to * a saved 64-bit state. Fetching the value still walks all CPUs to get the most * current snapshot. */ #ifdef PF_WANT_32_TO_64_COUNTER struct pf_counter_u64_pcpu { u_int32_t current; u_int32_t snapshot; }; struct pf_counter_u64 { struct pf_counter_u64_pcpu *pfcu64_pcpu; u_int64_t pfcu64_value; seqc_t pfcu64_seqc; }; static inline int pf_counter_u64_init(struct pf_counter_u64 *pfcu64, int flags) { pfcu64->pfcu64_value = 0; pfcu64->pfcu64_seqc = 0; pfcu64->pfcu64_pcpu = uma_zalloc_pcpu(pcpu_zone_8, flags | M_ZERO); if (__predict_false(pfcu64->pfcu64_pcpu == NULL)) return (ENOMEM); return (0); } static inline void pf_counter_u64_deinit(struct pf_counter_u64 *pfcu64) { uma_zfree_pcpu(pcpu_zone_8, pfcu64->pfcu64_pcpu); } static inline void pf_counter_u64_critical_enter(void) { critical_enter(); } static inline void pf_counter_u64_critical_exit(void) { critical_exit(); } static inline void pf_counter_u64_add_protected(struct pf_counter_u64 *pfcu64, uint32_t n) { struct pf_counter_u64_pcpu *pcpu; u_int32_t val; MPASS(curthread->td_critnest > 0); pcpu = zpcpu_get(pfcu64->pfcu64_pcpu); val = atomic_load_int(&pcpu->current); atomic_store_int(&pcpu->current, val + n); } static inline void pf_counter_u64_add(struct pf_counter_u64 *pfcu64, uint32_t n) { critical_enter(); pf_counter_u64_add_protected(pfcu64, n); critical_exit(); } static inline u_int64_t pf_counter_u64_periodic(struct pf_counter_u64 *pfcu64) { struct pf_counter_u64_pcpu *pcpu; u_int64_t sum; u_int32_t val; int cpu; MPASS(curthread->td_critnest > 0); seqc_write_begin(&pfcu64->pfcu64_seqc); sum = pfcu64->pfcu64_value; CPU_FOREACH(cpu) { pcpu = zpcpu_get_cpu(pfcu64->pfcu64_pcpu, cpu); val = atomic_load_int(&pcpu->current); sum += (uint32_t)(val - pcpu->snapshot); pcpu->snapshot = val; } pfcu64->pfcu64_value = sum; seqc_write_end(&pfcu64->pfcu64_seqc); return (sum); } static inline u_int64_t pf_counter_u64_fetch(const struct pf_counter_u64 *pfcu64) { struct pf_counter_u64_pcpu *pcpu; u_int64_t sum; seqc_t seqc; int cpu; for (;;) { seqc = seqc_read(&pfcu64->pfcu64_seqc); sum = 0; CPU_FOREACH(cpu) { pcpu = zpcpu_get_cpu(pfcu64->pfcu64_pcpu, cpu); sum += (uint32_t)(atomic_load_int(&pcpu->current) -pcpu->snapshot); } sum += pfcu64->pfcu64_value; if (seqc_consistent(&pfcu64->pfcu64_seqc, seqc)) break; } return (sum); } static inline void pf_counter_u64_zero_protected(struct pf_counter_u64 *pfcu64) { struct pf_counter_u64_pcpu *pcpu; int cpu; MPASS(curthread->td_critnest > 0); seqc_write_begin(&pfcu64->pfcu64_seqc); CPU_FOREACH(cpu) { pcpu = zpcpu_get_cpu(pfcu64->pfcu64_pcpu, cpu); pcpu->snapshot = atomic_load_int(&pcpu->current); } pfcu64->pfcu64_value = 0; seqc_write_end(&pfcu64->pfcu64_seqc); } static inline void pf_counter_u64_zero(struct pf_counter_u64 *pfcu64) { critical_enter(); pf_counter_u64_zero_protected(pfcu64); critical_exit(); } #else struct pf_counter_u64 { counter_u64_t counter; }; static inline int pf_counter_u64_init(struct pf_counter_u64 *pfcu64, int flags) { pfcu64->counter = counter_u64_alloc(flags); if (__predict_false(pfcu64->counter == NULL)) return (ENOMEM); return (0); } static inline void pf_counter_u64_deinit(struct pf_counter_u64 *pfcu64) { counter_u64_free(pfcu64->counter); } static inline void pf_counter_u64_critical_enter(void) { } static inline void pf_counter_u64_critical_exit(void) { } static inline void pf_counter_u64_add_protected(struct pf_counter_u64 *pfcu64, uint32_t n) { counter_u64_add(pfcu64->counter, n); } static inline void pf_counter_u64_add(struct pf_counter_u64 *pfcu64, uint32_t n) { pf_counter_u64_add_protected(pfcu64, n); } static inline u_int64_t pf_counter_u64_fetch(const struct pf_counter_u64 *pfcu64) { return (counter_u64_fetch(pfcu64->counter)); } static inline void pf_counter_u64_zero_protected(struct pf_counter_u64 *pfcu64) { counter_u64_zero(pfcu64->counter); } static inline void pf_counter_u64_zero(struct pf_counter_u64 *pfcu64) { pf_counter_u64_zero_protected(pfcu64); } #endif #define pf_get_timestamp(prule)({ \ uint32_t _ts = 0; \ uint32_t __ts; \ int cpu; \ CPU_FOREACH(cpu) { \ __ts = *zpcpu_get_cpu(prule->timestamp, cpu); \ if (__ts > _ts) \ _ts = __ts; \ } \ _ts; \ }) #define pf_update_timestamp(prule) \ do { \ critical_enter(); \ *zpcpu_get((prule)->timestamp) = time_second; \ critical_exit(); \ } while (0) #define pf_timestamp_pcpu_zone (sizeof(time_t) == 4 ? pcpu_zone_4 : pcpu_zone_8) _Static_assert(sizeof(time_t) == 4 || sizeof(time_t) == 8, "unexpected time_t size"); SYSCTL_DECL(_net_pf); MALLOC_DECLARE(M_PFHASH); MALLOC_DECLARE(M_PF_RULE_ITEM); SDT_PROVIDER_DECLARE(pf); struct pfi_dynaddr { TAILQ_ENTRY(pfi_dynaddr) entry; struct pf_addr pfid_addr4; struct pf_addr pfid_mask4; struct pf_addr pfid_addr6; struct pf_addr pfid_mask6; struct pfr_ktable *pfid_kt; struct pfi_kkif *pfid_kif; int pfid_net; /* mask or 128 */ int pfid_acnt4; /* address count IPv4 */ int pfid_acnt6; /* address count IPv6 */ sa_family_t pfid_af; /* rule af */ u_int8_t pfid_iflags; /* PFI_AFLAG_* */ }; /* * Address manipulation macros */ #define HTONL(x) (x) = htonl((__uint32_t)(x)) #define HTONS(x) (x) = htons((__uint16_t)(x)) #define NTOHL(x) (x) = ntohl((__uint32_t)(x)) #define NTOHS(x) (x) = ntohs((__uint16_t)(x)) #define PF_NAME "pf" #define PF_HASHROW_ASSERT(h) mtx_assert(&(h)->lock, MA_OWNED) #define PF_HASHROW_LOCK(h) mtx_lock(&(h)->lock) #define PF_HASHROW_UNLOCK(h) mtx_unlock(&(h)->lock) #ifdef INVARIANTS #define PF_STATE_LOCK(s) \ do { \ struct pf_kstate *_s = (s); \ struct pf_idhash *_ih = &V_pf_idhash[PF_IDHASH(_s)]; \ MPASS(_s->lock == &_ih->lock); \ mtx_lock(_s->lock); \ } while (0) #define PF_STATE_UNLOCK(s) \ do { \ struct pf_kstate *_s = (s); \ struct pf_idhash *_ih = &V_pf_idhash[PF_IDHASH(_s)]; \ MPASS(_s->lock == &_ih->lock); \ mtx_unlock(_s->lock); \ } while (0) #else #define PF_STATE_LOCK(s) mtx_lock(s->lock) #define PF_STATE_UNLOCK(s) mtx_unlock(s->lock) #endif #ifdef INVARIANTS #define PF_STATE_LOCK_ASSERT(s) \ do { \ struct pf_kstate *_s = (s); \ struct pf_idhash *_ih = &V_pf_idhash[PF_IDHASH(_s)]; \ MPASS(_s->lock == &_ih->lock); \ PF_HASHROW_ASSERT(_ih); \ } while (0) #else /* !INVARIANTS */ #define PF_STATE_LOCK_ASSERT(s) do {} while (0) #endif /* INVARIANTS */ #ifdef INVARIANTS #define PF_SRC_NODE_LOCK(sn) \ do { \ struct pf_ksrc_node *_sn = (sn); \ struct pf_srchash *_sh = &V_pf_srchash[ \ pf_hashsrc(&_sn->addr, _sn->af)]; \ MPASS(_sn->lock == &_sh->lock); \ mtx_lock(_sn->lock); \ } while (0) #define PF_SRC_NODE_UNLOCK(sn) \ do { \ struct pf_ksrc_node *_sn = (sn); \ struct pf_srchash *_sh = &V_pf_srchash[ \ pf_hashsrc(&_sn->addr, _sn->af)]; \ MPASS(_sn->lock == &_sh->lock); \ mtx_unlock(_sn->lock); \ } while (0) #else #define PF_SRC_NODE_LOCK(sn) mtx_lock((sn)->lock) #define PF_SRC_NODE_UNLOCK(sn) mtx_unlock((sn)->lock) #endif #ifdef INVARIANTS #define PF_SRC_NODE_LOCK_ASSERT(sn) \ do { \ struct pf_ksrc_node *_sn = (sn); \ struct pf_srchash *_sh = &V_pf_srchash[ \ pf_hashsrc(&_sn->addr, _sn->af)]; \ MPASS(_sn->lock == &_sh->lock); \ PF_HASHROW_ASSERT(_sh); \ } while (0) #else /* !INVARIANTS */ #define PF_SRC_NODE_LOCK_ASSERT(sn) do {} while (0) #endif /* INVARIANTS */ extern struct mtx_padalign pf_unlnkdrules_mtx; #define PF_UNLNKDRULES_LOCK() mtx_lock(&pf_unlnkdrules_mtx) #define PF_UNLNKDRULES_UNLOCK() mtx_unlock(&pf_unlnkdrules_mtx) #define PF_UNLNKDRULES_ASSERT() mtx_assert(&pf_unlnkdrules_mtx, MA_OWNED) extern struct sx pf_config_lock; #define PF_CONFIG_LOCK() sx_xlock(&pf_config_lock) #define PF_CONFIG_UNLOCK() sx_xunlock(&pf_config_lock) #define PF_CONFIG_ASSERT() sx_assert(&pf_config_lock, SA_XLOCKED) VNET_DECLARE(struct rmlock, pf_rules_lock); #define V_pf_rules_lock VNET(pf_rules_lock) #define PF_RULES_RLOCK_TRACKER struct rm_priotracker _pf_rules_tracker #define PF_RULES_RLOCK() rm_rlock(&V_pf_rules_lock, &_pf_rules_tracker) #define PF_RULES_RUNLOCK() rm_runlock(&V_pf_rules_lock, &_pf_rules_tracker) #define PF_RULES_WLOCK() rm_wlock(&V_pf_rules_lock) #define PF_RULES_WUNLOCK() rm_wunlock(&V_pf_rules_lock) #define PF_RULES_WOWNED() rm_wowned(&V_pf_rules_lock) #define PF_RULES_ASSERT() rm_assert(&V_pf_rules_lock, RA_LOCKED) #define PF_RULES_RASSERT() rm_assert(&V_pf_rules_lock, RA_RLOCKED) #define PF_RULES_WASSERT() rm_assert(&V_pf_rules_lock, RA_WLOCKED) extern struct mtx_padalign pf_table_stats_lock; #define PF_TABLE_STATS_LOCK() mtx_lock(&pf_table_stats_lock) #define PF_TABLE_STATS_UNLOCK() mtx_unlock(&pf_table_stats_lock) #define PF_TABLE_STATS_OWNED() mtx_owned(&pf_table_stats_lock) #define PF_TABLE_STATS_ASSERT() mtx_assert(&pf_table_stats_lock, MA_OWNED) extern struct sx pf_end_lock; #define PF_MODVER 1 #define PFLOG_MODVER 1 #define PFSYNC_MODVER 1 #define PFLOG_MINVER 1 #define PFLOG_PREFVER PFLOG_MODVER #define PFLOG_MAXVER 1 #define PFSYNC_MINVER 1 #define PFSYNC_PREFVER PFSYNC_MODVER #define PFSYNC_MAXVER 1 #ifdef INET #ifndef INET6 #define PF_INET_ONLY #endif /* ! INET6 */ #endif /* INET */ #ifdef INET6 #ifndef INET #define PF_INET6_ONLY #endif /* ! INET */ #endif /* INET6 */ #ifdef INET #ifdef INET6 #define PF_INET_INET6 #endif /* INET6 */ #endif /* INET */ #else #define PF_INET_INET6 #endif /* _KERNEL */ /* Both IPv4 and IPv6 */ #ifdef PF_INET_INET6 #define PF_AEQ(a, b, c) \ ((c == AF_INET && (a)->addr32[0] == (b)->addr32[0]) || \ (c == AF_INET6 && (a)->addr32[3] == (b)->addr32[3] && \ (a)->addr32[2] == (b)->addr32[2] && \ (a)->addr32[1] == (b)->addr32[1] && \ (a)->addr32[0] == (b)->addr32[0])) \ #define PF_ANEQ(a, b, c) \ ((c == AF_INET && (a)->addr32[0] != (b)->addr32[0]) || \ (c == AF_INET6 && ((a)->addr32[0] != (b)->addr32[0] || \ (a)->addr32[1] != (b)->addr32[1] || \ (a)->addr32[2] != (b)->addr32[2] || \ (a)->addr32[3] != (b)->addr32[3]))) \ #define PF_AZERO(a, c) \ ((c == AF_INET && !(a)->addr32[0]) || \ (c == AF_INET6 && !(a)->addr32[0] && !(a)->addr32[1] && \ !(a)->addr32[2] && !(a)->addr32[3] )) \ #define PF_MATCHA(n, a, m, b, f) \ pf_match_addr(n, a, m, b, f) #define PF_ACPY(a, b, f) \ pf_addrcpy(a, b, f) #define PF_AINC(a, f) \ pf_addr_inc(a, f) #define PF_POOLMASK(a, b, c, d, f) \ pf_poolmask(a, b, c, d, f) #else /* Just IPv6 */ #ifdef PF_INET6_ONLY #define PF_AEQ(a, b, c) \ ((a)->addr32[3] == (b)->addr32[3] && \ (a)->addr32[2] == (b)->addr32[2] && \ (a)->addr32[1] == (b)->addr32[1] && \ (a)->addr32[0] == (b)->addr32[0]) \ #define PF_ANEQ(a, b, c) \ ((a)->addr32[3] != (b)->addr32[3] || \ (a)->addr32[2] != (b)->addr32[2] || \ (a)->addr32[1] != (b)->addr32[1] || \ (a)->addr32[0] != (b)->addr32[0]) \ #define PF_AZERO(a, c) \ (!(a)->addr32[0] && \ !(a)->addr32[1] && \ !(a)->addr32[2] && \ !(a)->addr32[3] ) \ #define PF_MATCHA(n, a, m, b, f) \ pf_match_addr(n, a, m, b, f) #define PF_ACPY(a, b, f) \ pf_addrcpy(a, b, f) #define PF_AINC(a, f) \ pf_addr_inc(a, f) #define PF_POOLMASK(a, b, c, d, f) \ pf_poolmask(a, b, c, d, f) #else /* Just IPv4 */ #ifdef PF_INET_ONLY #define PF_AEQ(a, b, c) \ ((a)->addr32[0] == (b)->addr32[0]) #define PF_ANEQ(a, b, c) \ ((a)->addr32[0] != (b)->addr32[0]) #define PF_AZERO(a, c) \ (!(a)->addr32[0]) #define PF_MATCHA(n, a, m, b, f) \ pf_match_addr(n, a, m, b, f) #define PF_ACPY(a, b, f) \ (a)->v4.s_addr = (b)->v4.s_addr #define PF_AINC(a, f) \ do { \ (a)->addr32[0] = htonl(ntohl((a)->addr32[0]) + 1); \ } while (0) #define PF_POOLMASK(a, b, c, d, f) \ do { \ (a)->addr32[0] = ((b)->addr32[0] & (c)->addr32[0]) | \ (((c)->addr32[0] ^ 0xffffffff ) & (d)->addr32[0]); \ } while (0) #endif /* PF_INET_ONLY */ #endif /* PF_INET6_ONLY */ #endif /* PF_INET_INET6 */ /* * XXX callers not FIB-aware in our version of pf yet. * OpenBSD fixed it later it seems, 2010/05/07 13:33:16 claudio. */ #define PF_MISMATCHAW(aw, x, af, neg, ifp, rtid) \ ( \ (((aw)->type == PF_ADDR_NOROUTE && \ pf_routable((x), (af), NULL, (rtid))) || \ (((aw)->type == PF_ADDR_URPFFAILED && (ifp) != NULL && \ pf_routable((x), (af), (ifp), (rtid))) || \ ((aw)->type == PF_ADDR_TABLE && \ !pfr_match_addr((aw)->p.tbl, (x), (af))) || \ ((aw)->type == PF_ADDR_DYNIFTL && \ !pfi_match_addr((aw)->p.dyn, (x), (af))) || \ ((aw)->type == PF_ADDR_RANGE && \ !pf_match_addr_range(&(aw)->v.a.addr, \ &(aw)->v.a.mask, (x), (af))) || \ ((aw)->type == PF_ADDR_ADDRMASK && \ !PF_AZERO(&(aw)->v.a.mask, (af)) && \ !PF_MATCHA(0, &(aw)->v.a.addr, \ &(aw)->v.a.mask, (x), (af))))) != \ (neg) \ ) #define PF_ALGNMNT(off) (((off) % 2) == 0) #ifdef _KERNEL struct pf_kpooladdr { struct pf_addr_wrap addr; TAILQ_ENTRY(pf_kpooladdr) entries; char ifname[IFNAMSIZ]; struct pfi_kkif *kif; }; TAILQ_HEAD(pf_kpalist, pf_kpooladdr); struct pf_kpool { struct mtx mtx; struct pf_kpalist list; struct pf_kpooladdr *cur; struct pf_poolhashkey key; struct pf_addr counter; struct pf_mape_portset mape; int tblidx; u_int16_t proxy_port[2]; u_int8_t opts; }; struct pf_rule_actions { int32_t rtableid; uint16_t qid; uint16_t pqid; uint16_t max_mss; uint8_t log; uint8_t set_tos; uint8_t min_ttl; uint16_t dnpipe; uint16_t dnrpipe; /* Reverse direction pipe */ uint32_t flags; uint8_t set_prio[2]; }; union pf_keth_rule_ptr { struct pf_keth_rule *ptr; uint32_t nr; }; struct pf_keth_rule_addr { uint8_t addr[ETHER_ADDR_LEN]; uint8_t mask[ETHER_ADDR_LEN]; bool neg; uint8_t isset; }; struct pf_keth_anchor; TAILQ_HEAD(pf_keth_ruleq, pf_keth_rule); struct pf_keth_ruleset { struct pf_keth_ruleq rules[2]; struct pf_keth_rules { struct pf_keth_ruleq *rules; int open; uint32_t ticket; } active, inactive; struct epoch_context epoch_ctx; struct vnet *vnet; struct pf_keth_anchor *anchor; }; RB_HEAD(pf_keth_anchor_global, pf_keth_anchor); RB_HEAD(pf_keth_anchor_node, pf_keth_anchor); struct pf_keth_anchor { RB_ENTRY(pf_keth_anchor) entry_node; RB_ENTRY(pf_keth_anchor) entry_global; struct pf_keth_anchor *parent; struct pf_keth_anchor_node children; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; struct pf_keth_ruleset ruleset; int refcnt; /* anchor rules */ uint8_t anchor_relative; uint8_t anchor_wildcard; }; RB_PROTOTYPE(pf_keth_anchor_node, pf_keth_anchor, entry_node, pf_keth_anchor_compare); RB_PROTOTYPE(pf_keth_anchor_global, pf_keth_anchor, entry_global, pf_keth_anchor_compare); struct pf_keth_rule { #define PFE_SKIP_IFP 0 #define PFE_SKIP_DIR 1 #define PFE_SKIP_PROTO 2 #define PFE_SKIP_SRC_ADDR 3 #define PFE_SKIP_DST_ADDR 4 #define PFE_SKIP_SRC_IP_ADDR 5 #define PFE_SKIP_DST_IP_ADDR 6 #define PFE_SKIP_COUNT 7 union pf_keth_rule_ptr skip[PFE_SKIP_COUNT]; TAILQ_ENTRY(pf_keth_rule) entries; struct pf_keth_anchor *anchor; u_int8_t anchor_relative; u_int8_t anchor_wildcard; uint32_t nr; bool quick; /* Filter */ char ifname[IFNAMSIZ]; struct pfi_kkif *kif; bool ifnot; uint8_t direction; uint16_t proto; struct pf_keth_rule_addr src, dst; struct pf_rule_addr ipsrc, ipdst; char match_tagname[PF_TAG_NAME_SIZE]; uint16_t match_tag; bool match_tag_not; /* Stats */ counter_u64_t evaluations; counter_u64_t packets[2]; counter_u64_t bytes[2]; time_t *timestamp; /* Action */ char qname[PF_QNAME_SIZE]; int qid; char tagname[PF_TAG_NAME_SIZE]; uint16_t tag; char bridge_to_name[IFNAMSIZ]; struct pfi_kkif *bridge_to; uint8_t action; uint16_t dnpipe; uint32_t dnflags; char label[PF_RULE_MAX_LABEL_COUNT][PF_RULE_LABEL_SIZE]; uint32_t ridentifier; }; union pf_krule_ptr { struct pf_krule *ptr; u_int32_t nr; }; RB_HEAD(pf_krule_global, pf_krule); RB_PROTOTYPE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); struct pf_krule { struct pf_rule_addr src; struct pf_rule_addr dst; union pf_krule_ptr skip[PF_SKIP_COUNT]; char label[PF_RULE_MAX_LABEL_COUNT][PF_RULE_LABEL_SIZE]; uint32_t ridentifier; char ifname[IFNAMSIZ]; char qname[PF_QNAME_SIZE]; char pqname[PF_QNAME_SIZE]; char tagname[PF_TAG_NAME_SIZE]; char match_tagname[PF_TAG_NAME_SIZE]; char overload_tblname[PF_TABLE_NAME_SIZE]; TAILQ_ENTRY(pf_krule) entries; struct pf_kpool rpool; struct pf_counter_u64 evaluations; struct pf_counter_u64 packets[2]; struct pf_counter_u64 bytes[2]; time_t *timestamp; struct pfi_kkif *kif; struct pf_kanchor *anchor; struct pfr_ktable *overload_tbl; pf_osfp_t os_fingerprint; int32_t rtableid; u_int32_t timeout[PFTM_MAX]; u_int32_t max_states; u_int32_t max_src_nodes; u_int32_t max_src_states; u_int32_t max_src_conn; struct { u_int32_t limit; u_int32_t seconds; } max_src_conn_rate; u_int16_t qid; u_int16_t pqid; u_int16_t dnpipe; u_int16_t dnrpipe; u_int32_t free_flags; u_int32_t nr; u_int32_t prob; uid_t cuid; pid_t cpid; counter_u64_t states_cur; counter_u64_t states_tot; counter_u64_t src_nodes; u_int16_t return_icmp; u_int16_t return_icmp6; u_int16_t max_mss; u_int16_t tag; u_int16_t match_tag; u_int16_t scrub_flags; struct pf_rule_uid uid; struct pf_rule_gid gid; u_int32_t rule_flag; uint32_t rule_ref; u_int8_t action; u_int8_t direction; u_int8_t log; u_int8_t logif; u_int8_t quick; u_int8_t ifnot; u_int8_t match_tag_not; u_int8_t natpass; u_int8_t keep_state; sa_family_t af; u_int8_t proto; u_int8_t type; u_int8_t code; u_int8_t flags; u_int8_t flagset; u_int8_t min_ttl; u_int8_t allow_opts; u_int8_t rt; u_int8_t return_ttl; u_int8_t tos; u_int8_t set_tos; u_int8_t anchor_relative; u_int8_t anchor_wildcard; u_int8_t flush; u_int8_t prio; u_int8_t set_prio[2]; struct { struct pf_addr addr; u_int16_t port; } divert; u_int8_t md5sum[PF_MD5_DIGEST_LENGTH]; RB_ENTRY(pf_krule) entry_global; #ifdef PF_WANT_32_TO_64_COUNTER LIST_ENTRY(pf_krule) allrulelist; bool allrulelinked; #endif }; struct pf_krule_item { SLIST_ENTRY(pf_krule_item) entry; struct pf_krule *r; }; SLIST_HEAD(pf_krule_slist, pf_krule_item); struct pf_ksrc_node { LIST_ENTRY(pf_ksrc_node) entry; struct pf_addr addr; struct pf_addr raddr; struct pf_krule_slist match_rules; union pf_krule_ptr rule; struct pfi_kkif *rkif; counter_u64_t bytes[2]; counter_u64_t packets[2]; u_int32_t states; u_int32_t conn; struct pf_threshold conn_rate; u_int32_t creation; u_int32_t expire; sa_family_t af; u_int8_t ruletype; struct mtx *lock; }; #endif struct pf_state_scrub { struct timeval pfss_last; /* time received last packet */ u_int32_t pfss_tsecr; /* last echoed timestamp */ u_int32_t pfss_tsval; /* largest timestamp */ u_int32_t pfss_tsval0; /* original timestamp */ u_int16_t pfss_flags; #define PFSS_TIMESTAMP 0x0001 /* modulate timestamp */ #define PFSS_PAWS 0x0010 /* stricter PAWS checks */ #define PFSS_PAWS_IDLED 0x0020 /* was idle too long. no PAWS */ #define PFSS_DATA_TS 0x0040 /* timestamp on data packets */ #define PFSS_DATA_NOTS 0x0080 /* no timestamp on data packets */ u_int8_t pfss_ttl; /* stashed TTL */ u_int8_t pad; union { u_int32_t pfss_ts_mod; /* timestamp modulation */ u_int32_t pfss_v_tag; /* SCTP verification tag */ }; }; struct pf_state_host { struct pf_addr addr; u_int16_t port; u_int16_t pad; }; struct pf_state_peer { struct pf_state_scrub *scrub; /* state is scrubbed */ u_int32_t seqlo; /* Max sequence number sent */ u_int32_t seqhi; /* Max the other end ACKd + win */ u_int32_t seqdiff; /* Sequence number modulator */ u_int16_t max_win; /* largest window (pre scaling) */ u_int16_t mss; /* Maximum segment size option */ u_int8_t state; /* active state level */ u_int8_t wscale; /* window scaling factor */ u_int8_t tcp_est; /* Did we reach TCPS_ESTABLISHED */ u_int8_t pad[1]; }; /* Keep synced with struct pf_state_key. */ struct pf_state_key_cmp { struct pf_addr addr[2]; u_int16_t port[2]; sa_family_t af; u_int8_t proto; u_int8_t pad[2]; }; struct pf_state_key { struct pf_addr addr[2]; u_int16_t port[2]; sa_family_t af; u_int8_t proto; u_int8_t pad[2]; LIST_ENTRY(pf_state_key) entry; TAILQ_HEAD(, pf_kstate) states[2]; }; /* Keep synced with struct pf_kstate. */ struct pf_state_cmp { u_int64_t id; u_int32_t creatorid; u_int8_t direction; u_int8_t pad[3]; }; struct pf_state_scrub_export { uint16_t pfss_flags; uint8_t pfss_ttl; /* stashed TTL */ #define PF_SCRUB_FLAG_VALID 0x01 uint8_t scrub_flag; uint32_t pfss_ts_mod; /* timestamp modulation */ }; struct pf_state_key_export { struct pf_addr addr[2]; uint16_t port[2]; }; struct pf_state_peer_export { struct pf_state_scrub_export scrub; /* state is scrubbed */ uint32_t seqlo; /* Max sequence number sent */ uint32_t seqhi; /* Max the other end ACKd + win */ uint32_t seqdiff; /* Sequence number modulator */ uint16_t max_win; /* largest window (pre scaling) */ uint16_t mss; /* Maximum segment size option */ uint8_t state; /* active state level */ uint8_t wscale; /* window scaling factor */ uint8_t dummy[6]; }; _Static_assert(sizeof(struct pf_state_peer_export) == 32, "size incorrect"); struct pf_state_export { uint64_t version; #define PF_STATE_VERSION 20230404 uint64_t id; char ifname[IFNAMSIZ]; char orig_ifname[IFNAMSIZ]; struct pf_state_key_export key[2]; struct pf_state_peer_export src; struct pf_state_peer_export dst; struct pf_addr rt_addr; uint32_t rule; uint32_t anchor; uint32_t nat_rule; uint32_t creation; uint32_t expire; uint32_t spare0; uint64_t packets[2]; uint64_t bytes[2]; uint32_t creatorid; uint32_t spare1; sa_family_t af; uint8_t proto; uint8_t direction; uint8_t log; uint8_t state_flags_compat; uint8_t timeout; uint8_t sync_flags; uint8_t updates; uint16_t state_flags; uint16_t qid; uint16_t pqid; uint16_t dnpipe; uint16_t dnrpipe; int32_t rtableid; uint8_t min_ttl; uint8_t set_tos; uint16_t max_mss; uint8_t set_prio[2]; uint8_t rt; char rt_ifname[IFNAMSIZ]; uint8_t spare[72]; }; _Static_assert(sizeof(struct pf_state_export) == 384, "size incorrect"); #ifdef _KERNEL struct pf_kstate { /* * Area shared with pf_state_cmp */ u_int64_t id; u_int32_t creatorid; u_int8_t direction; u_int8_t pad[3]; /* * end of the area */ u_int16_t state_flags; u_int8_t timeout; u_int8_t sync_state; /* PFSYNC_S_x */ u_int8_t sync_updates; /* XXX */ u_int refs; struct mtx *lock; TAILQ_ENTRY(pf_kstate) sync_list; TAILQ_ENTRY(pf_kstate) key_list[2]; LIST_ENTRY(pf_kstate) entry; struct pf_state_peer src; struct pf_state_peer dst; struct pf_krule_slist match_rules; union pf_krule_ptr rule; union pf_krule_ptr anchor; union pf_krule_ptr nat_rule; struct pf_addr rt_addr; struct pf_state_key *key[2]; /* addresses stack and wire */ struct pfi_kkif *kif; struct pfi_kkif *orig_kif; /* The real kif, even if we're a floating state (i.e. if == V_pfi_all). */ struct pfi_kkif *rt_kif; struct pf_ksrc_node *src_node; struct pf_ksrc_node *nat_src_node; u_int64_t packets[2]; u_int64_t bytes[2]; u_int32_t creation; u_int32_t expire; u_int32_t pfsync_time; struct pf_rule_actions act; u_int16_t tag; u_int8_t rt; }; /* * Size <= fits 11 objects per page on LP64. Try to not grow the struct beyond that. */ _Static_assert(sizeof(struct pf_kstate) <= 368, "pf_kstate size crosses 368 bytes"); #endif /* * Unified state structures for pulling states out of the kernel * used by pfsync(4) and the pf(4) ioctl. */ struct pfsync_state_scrub { u_int16_t pfss_flags; u_int8_t pfss_ttl; /* stashed TTL */ #define PFSYNC_SCRUB_FLAG_VALID 0x01 u_int8_t scrub_flag; u_int32_t pfss_ts_mod; /* timestamp modulation */ } __packed; struct pfsync_state_peer { struct pfsync_state_scrub scrub; /* state is scrubbed */ u_int32_t seqlo; /* Max sequence number sent */ u_int32_t seqhi; /* Max the other end ACKd + win */ u_int32_t seqdiff; /* Sequence number modulator */ u_int16_t max_win; /* largest window (pre scaling) */ u_int16_t mss; /* Maximum segment size option */ u_int8_t state; /* active state level */ u_int8_t wscale; /* window scaling factor */ u_int8_t pad[6]; } __packed; struct pfsync_state_key { struct pf_addr addr[2]; u_int16_t port[2]; }; struct pfsync_state_1301 { u_int64_t id; char ifname[IFNAMSIZ]; struct pfsync_state_key key[2]; struct pfsync_state_peer src; struct pfsync_state_peer dst; struct pf_addr rt_addr; u_int32_t rule; u_int32_t anchor; u_int32_t nat_rule; u_int32_t creation; u_int32_t expire; u_int32_t packets[2][2]; u_int32_t bytes[2][2]; u_int32_t creatorid; sa_family_t af; u_int8_t proto; u_int8_t direction; u_int8_t __spare[2]; u_int8_t log; u_int8_t state_flags; u_int8_t timeout; u_int8_t sync_flags; u_int8_t updates; } __packed; struct pfsync_state_1400 { /* The beginning of the struct is compatible with previous versions */ u_int64_t id; char ifname[IFNAMSIZ]; struct pfsync_state_key key[2]; struct pfsync_state_peer src; struct pfsync_state_peer dst; struct pf_addr rt_addr; u_int32_t rule; u_int32_t anchor; u_int32_t nat_rule; u_int32_t creation; u_int32_t expire; u_int32_t packets[2][2]; u_int32_t bytes[2][2]; u_int32_t creatorid; sa_family_t af; u_int8_t proto; u_int8_t direction; u_int16_t state_flags; u_int8_t log; u_int8_t __spare; u_int8_t timeout; u_int8_t sync_flags; u_int8_t updates; /* The rest is not */ u_int16_t qid; u_int16_t pqid; u_int16_t dnpipe; u_int16_t dnrpipe; int32_t rtableid; u_int8_t min_ttl; u_int8_t set_tos; u_int16_t max_mss; u_int8_t set_prio[2]; u_int8_t rt; char rt_ifname[IFNAMSIZ]; } __packed; union pfsync_state_union { struct pfsync_state_1301 pfs_1301; struct pfsync_state_1400 pfs_1400; } __packed; #ifdef _KERNEL /* pfsync */ typedef int pfsync_state_import_t(union pfsync_state_union *, int, int); typedef void pfsync_insert_state_t(struct pf_kstate *); typedef void pfsync_update_state_t(struct pf_kstate *); typedef void pfsync_delete_state_t(struct pf_kstate *); typedef void pfsync_clear_states_t(u_int32_t, const char *); typedef int pfsync_defer_t(struct pf_kstate *, struct mbuf *); typedef void pfsync_detach_ifnet_t(struct ifnet *); VNET_DECLARE(pfsync_state_import_t *, pfsync_state_import_ptr); #define V_pfsync_state_import_ptr VNET(pfsync_state_import_ptr) VNET_DECLARE(pfsync_insert_state_t *, pfsync_insert_state_ptr); #define V_pfsync_insert_state_ptr VNET(pfsync_insert_state_ptr) VNET_DECLARE(pfsync_update_state_t *, pfsync_update_state_ptr); #define V_pfsync_update_state_ptr VNET(pfsync_update_state_ptr) VNET_DECLARE(pfsync_delete_state_t *, pfsync_delete_state_ptr); #define V_pfsync_delete_state_ptr VNET(pfsync_delete_state_ptr) VNET_DECLARE(pfsync_clear_states_t *, pfsync_clear_states_ptr); #define V_pfsync_clear_states_ptr VNET(pfsync_clear_states_ptr) VNET_DECLARE(pfsync_defer_t *, pfsync_defer_ptr); #define V_pfsync_defer_ptr VNET(pfsync_defer_ptr) extern pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; void pfsync_state_export(union pfsync_state_union *, struct pf_kstate *, int); void pf_state_export(struct pf_state_export *, struct pf_kstate *); /* pflog */ struct pf_kruleset; struct pf_pdesc; typedef int pflog_packet_t(struct pfi_kkif *, struct mbuf *, sa_family_t, u_int8_t, struct pf_krule *, struct pf_krule *, struct pf_kruleset *, struct pf_pdesc *, int); extern pflog_packet_t *pflog_packet_ptr; #endif /* _KERNEL */ #define PFSYNC_FLAG_SRCNODE 0x04 #define PFSYNC_FLAG_NATSRCNODE 0x08 /* for copies to/from network byte order */ /* ioctl interface also uses network byte order */ #define pf_state_peer_hton(s,d) do { \ (d)->seqlo = htonl((s)->seqlo); \ (d)->seqhi = htonl((s)->seqhi); \ (d)->seqdiff = htonl((s)->seqdiff); \ (d)->max_win = htons((s)->max_win); \ (d)->mss = htons((s)->mss); \ (d)->state = (s)->state; \ (d)->wscale = (s)->wscale; \ if ((s)->scrub) { \ (d)->scrub.pfss_flags = \ htons((s)->scrub->pfss_flags & PFSS_TIMESTAMP); \ (d)->scrub.pfss_ttl = (s)->scrub->pfss_ttl; \ (d)->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod);\ (d)->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID; \ } \ } while (0) #define pf_state_peer_ntoh(s,d) do { \ (d)->seqlo = ntohl((s)->seqlo); \ (d)->seqhi = ntohl((s)->seqhi); \ (d)->seqdiff = ntohl((s)->seqdiff); \ (d)->max_win = ntohs((s)->max_win); \ (d)->mss = ntohs((s)->mss); \ (d)->state = (s)->state; \ (d)->wscale = (s)->wscale; \ if ((s)->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID && \ (d)->scrub != NULL) { \ (d)->scrub->pfss_flags = \ ntohs((s)->scrub.pfss_flags) & PFSS_TIMESTAMP; \ (d)->scrub->pfss_ttl = (s)->scrub.pfss_ttl; \ (d)->scrub->pfss_ts_mod = ntohl((s)->scrub.pfss_ts_mod);\ } \ } while (0) #define pf_state_counter_hton(s,d) do { \ d[0] = htonl((s>>32)&0xffffffff); \ d[1] = htonl(s&0xffffffff); \ } while (0) #define pf_state_counter_from_pfsync(s) \ (((u_int64_t)(s[0])<<32) | (u_int64_t)(s[1])) #define pf_state_counter_ntoh(s,d) do { \ d = ntohl(s[0]); \ d = d<<32; \ d += ntohl(s[1]); \ } while (0) TAILQ_HEAD(pf_krulequeue, pf_krule); struct pf_kanchor; struct pf_kruleset { struct { struct pf_krulequeue queues[2]; struct { struct pf_krulequeue *ptr; struct pf_krule **ptr_array; u_int32_t rcount; u_int32_t ticket; int open; struct pf_krule_global *tree; } active, inactive; } rules[PF_RULESET_MAX]; struct pf_kanchor *anchor; u_int32_t tticket; int tables; int topen; }; RB_HEAD(pf_kanchor_global, pf_kanchor); RB_HEAD(pf_kanchor_node, pf_kanchor); struct pf_kanchor { RB_ENTRY(pf_kanchor) entry_global; RB_ENTRY(pf_kanchor) entry_node; struct pf_kanchor *parent; struct pf_kanchor_node children; char name[PF_ANCHOR_NAME_SIZE]; char path[MAXPATHLEN]; struct pf_kruleset ruleset; int refcnt; /* anchor rules */ }; RB_PROTOTYPE(pf_kanchor_global, pf_kanchor, entry_global, pf_anchor_compare); RB_PROTOTYPE(pf_kanchor_node, pf_kanchor, entry_node, pf_kanchor_compare); #define PF_RESERVED_ANCHOR "_pf" #define PFR_TFLAG_PERSIST 0x00000001 #define PFR_TFLAG_CONST 0x00000002 #define PFR_TFLAG_ACTIVE 0x00000004 #define PFR_TFLAG_INACTIVE 0x00000008 #define PFR_TFLAG_REFERENCED 0x00000010 #define PFR_TFLAG_REFDANCHOR 0x00000020 #define PFR_TFLAG_COUNTERS 0x00000040 /* Adjust masks below when adding flags. */ #define PFR_TFLAG_USRMASK (PFR_TFLAG_PERSIST | \ PFR_TFLAG_CONST | \ PFR_TFLAG_COUNTERS) #define PFR_TFLAG_SETMASK (PFR_TFLAG_ACTIVE | \ PFR_TFLAG_INACTIVE | \ PFR_TFLAG_REFERENCED | \ PFR_TFLAG_REFDANCHOR) #define PFR_TFLAG_ALLMASK (PFR_TFLAG_PERSIST | \ PFR_TFLAG_CONST | \ PFR_TFLAG_ACTIVE | \ PFR_TFLAG_INACTIVE | \ PFR_TFLAG_REFERENCED | \ PFR_TFLAG_REFDANCHOR | \ PFR_TFLAG_COUNTERS) struct pf_kanchor_stackframe; struct pf_keth_anchor_stackframe; struct pfr_table { char pfrt_anchor[MAXPATHLEN]; char pfrt_name[PF_TABLE_NAME_SIZE]; u_int32_t pfrt_flags; u_int8_t pfrt_fback; }; enum { PFR_FB_NONE, PFR_FB_MATCH, PFR_FB_ADDED, PFR_FB_DELETED, PFR_FB_CHANGED, PFR_FB_CLEARED, PFR_FB_DUPLICATE, PFR_FB_NOTMATCH, PFR_FB_CONFLICT, PFR_FB_NOCOUNT, PFR_FB_MAX }; struct pfr_addr { union { struct in_addr _pfra_ip4addr; struct in6_addr _pfra_ip6addr; } pfra_u; u_int8_t pfra_af; u_int8_t pfra_net; u_int8_t pfra_not; u_int8_t pfra_fback; }; #define pfra_ip4addr pfra_u._pfra_ip4addr #define pfra_ip6addr pfra_u._pfra_ip6addr enum { PFR_DIR_IN, PFR_DIR_OUT, PFR_DIR_MAX }; enum { PFR_OP_BLOCK, PFR_OP_PASS, PFR_OP_ADDR_MAX, PFR_OP_TABLE_MAX }; enum { PFR_TYPE_PACKETS, PFR_TYPE_BYTES, PFR_TYPE_MAX }; #define PFR_NUM_COUNTERS (PFR_DIR_MAX * PFR_OP_ADDR_MAX * PFR_TYPE_MAX) #define PFR_OP_XPASS PFR_OP_ADDR_MAX struct pfr_astats { struct pfr_addr pfras_a; u_int64_t pfras_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; u_int64_t pfras_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; long pfras_tzero; }; enum { PFR_REFCNT_RULE, PFR_REFCNT_ANCHOR, PFR_REFCNT_MAX }; struct pfr_tstats { struct pfr_table pfrts_t; u_int64_t pfrts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; u_int64_t pfrts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; u_int64_t pfrts_match; u_int64_t pfrts_nomatch; long pfrts_tzero; int pfrts_cnt; int pfrts_refcnt[PFR_REFCNT_MAX]; }; #ifdef _KERNEL struct pfr_kstate_counter { counter_u64_t pkc_pcpu; u_int64_t pkc_zero; }; static inline int pfr_kstate_counter_init(struct pfr_kstate_counter *pfrc, int flags) { pfrc->pkc_zero = 0; pfrc->pkc_pcpu = counter_u64_alloc(flags); if (pfrc->pkc_pcpu == NULL) return (ENOMEM); return (0); } static inline void pfr_kstate_counter_deinit(struct pfr_kstate_counter *pfrc) { counter_u64_free(pfrc->pkc_pcpu); } static inline u_int64_t pfr_kstate_counter_fetch(struct pfr_kstate_counter *pfrc) { u_int64_t c; c = counter_u64_fetch(pfrc->pkc_pcpu); c -= pfrc->pkc_zero; return (c); } static inline void pfr_kstate_counter_zero(struct pfr_kstate_counter *pfrc) { u_int64_t c; c = counter_u64_fetch(pfrc->pkc_pcpu); pfrc->pkc_zero = c; } static inline void pfr_kstate_counter_add(struct pfr_kstate_counter *pfrc, int64_t n) { counter_u64_add(pfrc->pkc_pcpu, n); } struct pfr_ktstats { struct pfr_table pfrts_t; struct pfr_kstate_counter pfrkts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; struct pfr_kstate_counter pfrkts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; struct pfr_kstate_counter pfrkts_match; struct pfr_kstate_counter pfrkts_nomatch; long pfrkts_tzero; int pfrkts_cnt; int pfrkts_refcnt[PFR_REFCNT_MAX]; }; #endif /* _KERNEL */ #define pfrts_name pfrts_t.pfrt_name #define pfrts_flags pfrts_t.pfrt_flags #ifndef _SOCKADDR_UNION_DEFINED #define _SOCKADDR_UNION_DEFINED union sockaddr_union { struct sockaddr sa; struct sockaddr_in sin; struct sockaddr_in6 sin6; }; #endif /* _SOCKADDR_UNION_DEFINED */ struct pfr_kcounters { counter_u64_t pfrkc_counters; long pfrkc_tzero; }; #define pfr_kentry_counter(kc, dir, op, t) \ ((kc)->pfrkc_counters + \ (dir) * PFR_OP_ADDR_MAX * PFR_TYPE_MAX + (op) * PFR_TYPE_MAX + (t)) #ifdef _KERNEL SLIST_HEAD(pfr_kentryworkq, pfr_kentry); struct pfr_kentry { struct radix_node pfrke_node[2]; union sockaddr_union pfrke_sa; SLIST_ENTRY(pfr_kentry) pfrke_workq; struct pfr_kcounters pfrke_counters; u_int8_t pfrke_af; u_int8_t pfrke_net; u_int8_t pfrke_not; u_int8_t pfrke_mark; }; SLIST_HEAD(pfr_ktableworkq, pfr_ktable); RB_HEAD(pfr_ktablehead, pfr_ktable); struct pfr_ktable { struct pfr_ktstats pfrkt_kts; RB_ENTRY(pfr_ktable) pfrkt_tree; SLIST_ENTRY(pfr_ktable) pfrkt_workq; struct radix_node_head *pfrkt_ip4; struct radix_node_head *pfrkt_ip6; struct pfr_ktable *pfrkt_shadow; struct pfr_ktable *pfrkt_root; struct pf_kruleset *pfrkt_rs; long pfrkt_larg; int pfrkt_nflags; }; #define pfrkt_t pfrkt_kts.pfrts_t #define pfrkt_name pfrkt_t.pfrt_name #define pfrkt_anchor pfrkt_t.pfrt_anchor #define pfrkt_ruleset pfrkt_t.pfrt_ruleset #define pfrkt_flags pfrkt_t.pfrt_flags #define pfrkt_cnt pfrkt_kts.pfrkts_cnt #define pfrkt_refcnt pfrkt_kts.pfrkts_refcnt #define pfrkt_packets pfrkt_kts.pfrkts_packets #define pfrkt_bytes pfrkt_kts.pfrkts_bytes #define pfrkt_match pfrkt_kts.pfrkts_match #define pfrkt_nomatch pfrkt_kts.pfrkts_nomatch #define pfrkt_tzero pfrkt_kts.pfrkts_tzero #endif #ifdef _KERNEL struct pfi_kkif { char pfik_name[IFNAMSIZ]; union { RB_ENTRY(pfi_kkif) _pfik_tree; LIST_ENTRY(pfi_kkif) _pfik_list; } _pfik_glue; #define pfik_tree _pfik_glue._pfik_tree #define pfik_list _pfik_glue._pfik_list struct pf_counter_u64 pfik_packets[2][2][2]; struct pf_counter_u64 pfik_bytes[2][2][2]; u_int32_t pfik_tzero; u_int pfik_flags; struct ifnet *pfik_ifp; struct ifg_group *pfik_group; u_int pfik_rulerefs; TAILQ_HEAD(, pfi_dynaddr) pfik_dynaddrs; #ifdef PF_WANT_32_TO_64_COUNTER LIST_ENTRY(pfi_kkif) pfik_allkiflist; #endif }; #endif #define PFI_IFLAG_REFS 0x0001 /* has state references */ #define PFI_IFLAG_SKIP 0x0100 /* skip filtering on interface */ #ifdef _KERNEL struct pf_sctp_multihome_job; TAILQ_HEAD(pf_sctp_multihome_jobs, pf_sctp_multihome_job); struct pf_pdesc { struct { int done; uid_t uid; gid_t gid; } lookup; u_int64_t tot_len; /* Make Mickey money */ union pf_headers { struct tcphdr tcp; struct udphdr udp; struct sctphdr sctp; struct icmp icmp; #ifdef INET6 struct icmp6_hdr icmp6; #endif /* INET6 */ char any[0]; } hdr; struct pf_krule *nat_rule; /* nat/rdr rule applied to packet */ struct pf_addr *src; /* src address */ struct pf_addr *dst; /* dst address */ u_int16_t *sport; u_int16_t *dport; struct pf_mtag *pf_mtag; struct pf_rule_actions act; u_int32_t p_len; /* total length of payload */ u_int16_t *ip_sum; u_int16_t *proto_sum; u_int16_t flags; /* Let SCRUB trigger behavior in * state code. Easier than tags */ #define PFDESC_TCP_NORM 0x0001 /* TCP shall be statefully scrubbed */ #define PFDESC_IP_REAS 0x0002 /* IP frags would've been reassembled */ sa_family_t af; u_int8_t proto; u_int8_t tos; u_int8_t dir; /* direction */ u_int8_t sidx; /* key index for source */ u_int8_t didx; /* key index for destination */ #define PFDESC_SCTP_INIT 0x0001 #define PFDESC_SCTP_INIT_ACK 0x0002 #define PFDESC_SCTP_COOKIE 0x0004 #define PFDESC_SCTP_ABORT 0x0008 #define PFDESC_SCTP_SHUTDOWN 0x0010 #define PFDESC_SCTP_SHUTDOWN_COMPLETE 0x0020 #define PFDESC_SCTP_DATA 0x0040 #define PFDESC_SCTP_ASCONF 0x0080 #define PFDESC_SCTP_OTHER 0x0100 #define PFDESC_SCTP_ADD_IP 0x0200 u_int16_t sctp_flags; u_int32_t sctp_initiate_tag; struct pf_sctp_multihome_jobs sctp_multihome_jobs; }; struct pf_sctp_multihome_job { TAILQ_ENTRY(pf_sctp_multihome_job) next; struct pf_pdesc pd; struct pf_addr src; struct pf_addr dst; struct mbuf *m; int op; }; #endif /* flags for RDR options */ #define PF_DPORT_RANGE 0x01 /* Dest port uses range */ #define PF_RPORT_RANGE 0x02 /* RDR'ed port uses range */ /* UDP state enumeration */ #define PFUDPS_NO_TRAFFIC 0 #define PFUDPS_SINGLE 1 #define PFUDPS_MULTIPLE 2 #define PFUDPS_NSTATES 3 /* number of state levels */ #define PFUDPS_NAMES { \ "NO_TRAFFIC", \ "SINGLE", \ "MULTIPLE", \ NULL \ } /* Other protocol state enumeration */ #define PFOTHERS_NO_TRAFFIC 0 #define PFOTHERS_SINGLE 1 #define PFOTHERS_MULTIPLE 2 #define PFOTHERS_NSTATES 3 /* number of state levels */ #define PFOTHERS_NAMES { \ "NO_TRAFFIC", \ "SINGLE", \ "MULTIPLE", \ NULL \ } #define ACTION_SET(a, x) \ do { \ if ((a) != NULL) \ *(a) = (x); \ } while (0) #define REASON_SET(a, x) \ do { \ if ((a) != NULL) \ *(a) = (x); \ if (x < PFRES_MAX) \ counter_u64_add(V_pf_status.counters[x], 1); \ } while (0) enum pf_syncookies_mode { PF_SYNCOOKIES_NEVER = 0, PF_SYNCOOKIES_ALWAYS = 1, PF_SYNCOOKIES_ADAPTIVE = 2, PF_SYNCOOKIES_MODE_MAX = PF_SYNCOOKIES_ADAPTIVE }; #define PF_SYNCOOKIES_HIWATPCT 25 #define PF_SYNCOOKIES_LOWATPCT (PF_SYNCOOKIES_HIWATPCT / 2) #ifdef _KERNEL struct pf_kstatus { counter_u64_t counters[PFRES_MAX]; /* reason for passing/dropping */ counter_u64_t lcounters[KLCNT_MAX]; /* limit counters */ struct pf_counter_u64 fcounters[FCNT_MAX]; /* state operation counters */ counter_u64_t scounters[SCNT_MAX]; /* src_node operation counters */ uint32_t states; uint32_t src_nodes; uint32_t running; uint32_t since; uint32_t debug; uint32_t hostid; char ifname[IFNAMSIZ]; uint8_t pf_chksum[PF_MD5_DIGEST_LENGTH]; bool keep_counters; enum pf_syncookies_mode syncookies_mode; bool syncookies_active; uint64_t syncookies_inflight[2]; uint32_t states_halfopen; uint32_t reass; }; #endif struct pf_divert { union { struct in_addr ipv4; struct in6_addr ipv6; } addr; u_int16_t port; }; #define PFFRAG_FRENT_HIWAT 5000 /* Number of fragment entries */ #define PFR_KENTRY_HIWAT 200000 /* Number of table entries */ /* * Limit the length of the fragment queue traversal. Remember * search entry points based on the fragment offset. */ #define PF_FRAG_ENTRY_POINTS 16 /* * The number of entries in the fragment queue must be limited * to avoid DoS by linear searching. Instead of a global limit, * use a limit per entry point. For large packets these sum up. */ #define PF_FRAG_ENTRY_LIMIT 64 /* * ioctl parameter structures */ struct pfioc_pooladdr { u_int32_t action; u_int32_t ticket; u_int32_t nr; u_int32_t r_num; u_int8_t r_action; u_int8_t r_last; u_int8_t af; char anchor[MAXPATHLEN]; struct pf_pooladdr addr; }; struct pfioc_rule { u_int32_t action; u_int32_t ticket; u_int32_t pool_ticket; u_int32_t nr; char anchor[MAXPATHLEN]; char anchor_call[MAXPATHLEN]; struct pf_rule rule; }; struct pfioc_natlook { struct pf_addr saddr; struct pf_addr daddr; struct pf_addr rsaddr; struct pf_addr rdaddr; u_int16_t sport; u_int16_t dport; u_int16_t rsport; u_int16_t rdport; sa_family_t af; u_int8_t proto; u_int8_t direction; }; struct pfioc_state { struct pfsync_state_1301 state; }; struct pfioc_src_node_kill { sa_family_t psnk_af; struct pf_rule_addr psnk_src; struct pf_rule_addr psnk_dst; u_int psnk_killed; }; #ifdef _KERNEL struct pf_kstate_kill { struct pf_state_cmp psk_pfcmp; sa_family_t psk_af; int psk_proto; struct pf_rule_addr psk_src; struct pf_rule_addr psk_dst; struct pf_rule_addr psk_rt_addr; char psk_ifname[IFNAMSIZ]; char psk_label[PF_RULE_LABEL_SIZE]; u_int psk_killed; bool psk_kill_match; + bool psk_nat; }; #endif struct pfioc_state_kill { struct pf_state_cmp psk_pfcmp; sa_family_t psk_af; int psk_proto; struct pf_rule_addr psk_src; struct pf_rule_addr psk_dst; char psk_ifname[IFNAMSIZ]; char psk_label[PF_RULE_LABEL_SIZE]; u_int psk_killed; }; struct pfioc_states { int ps_len; union { void *ps_buf; struct pfsync_state_1301 *ps_states; }; }; struct pfioc_states_v2 { int ps_len; uint64_t ps_req_version; union { void *ps_buf; struct pf_state_export *ps_states; }; }; struct pfioc_src_nodes { int psn_len; union { void *psn_buf; struct pf_src_node *psn_src_nodes; }; }; struct pfioc_if { char ifname[IFNAMSIZ]; }; struct pfioc_tm { int timeout; int seconds; }; struct pfioc_limit { int index; unsigned limit; }; struct pfioc_altq_v0 { u_int32_t action; u_int32_t ticket; u_int32_t nr; struct pf_altq_v0 altq; }; struct pfioc_altq_v1 { u_int32_t action; u_int32_t ticket; u_int32_t nr; /* * Placed here so code that only uses the above parameters can be * written entirely in terms of the v0 or v1 type. */ u_int32_t version; struct pf_altq_v1 altq; }; /* * Latest version of struct pfioc_altq_vX. This must move in lock-step with * the latest version of struct pf_altq_vX as it has that struct as a * member. */ #define PFIOC_ALTQ_VERSION PF_ALTQ_VERSION struct pfioc_qstats_v0 { u_int32_t ticket; u_int32_t nr; void *buf; int nbytes; u_int8_t scheduler; }; struct pfioc_qstats_v1 { u_int32_t ticket; u_int32_t nr; void *buf; int nbytes; u_int8_t scheduler; /* * Placed here so code that only uses the above parameters can be * written entirely in terms of the v0 or v1 type. */ u_int32_t version; /* Requested version of stats struct */ }; /* Latest version of struct pfioc_qstats_vX */ #define PFIOC_QSTATS_VERSION 1 struct pfioc_ruleset { u_int32_t nr; char path[MAXPATHLEN]; char name[PF_ANCHOR_NAME_SIZE]; }; #define PF_RULESET_ALTQ (PF_RULESET_MAX) #define PF_RULESET_TABLE (PF_RULESET_MAX+1) #define PF_RULESET_ETH (PF_RULESET_MAX+2) struct pfioc_trans { int size; /* number of elements */ int esize; /* size of each element in bytes */ struct pfioc_trans_e { int rs_num; char anchor[MAXPATHLEN]; u_int32_t ticket; } *array; }; #define PFR_FLAG_ATOMIC 0x00000001 /* unused */ #define PFR_FLAG_DUMMY 0x00000002 #define PFR_FLAG_FEEDBACK 0x00000004 #define PFR_FLAG_CLSTATS 0x00000008 #define PFR_FLAG_ADDRSTOO 0x00000010 #define PFR_FLAG_REPLACE 0x00000020 #define PFR_FLAG_ALLRSETS 0x00000040 #define PFR_FLAG_ALLMASK 0x0000007F #ifdef _KERNEL #define PFR_FLAG_USERIOCTL 0x10000000 #endif struct pfioc_table { struct pfr_table pfrio_table; void *pfrio_buffer; int pfrio_esize; int pfrio_size; int pfrio_size2; int pfrio_nadd; int pfrio_ndel; int pfrio_nchange; int pfrio_flags; u_int32_t pfrio_ticket; }; #define pfrio_exists pfrio_nadd #define pfrio_nzero pfrio_nadd #define pfrio_nmatch pfrio_nadd #define pfrio_naddr pfrio_size2 #define pfrio_setflag pfrio_size2 #define pfrio_clrflag pfrio_nadd struct pfioc_iface { char pfiio_name[IFNAMSIZ]; void *pfiio_buffer; int pfiio_esize; int pfiio_size; int pfiio_nzero; int pfiio_flags; }; /* * ioctl operations */ #define DIOCSTART _IO ('D', 1) #define DIOCSTOP _IO ('D', 2) #define DIOCADDRULE _IOWR('D', 4, struct pfioc_rule) #define DIOCADDRULENV _IOWR('D', 4, struct pfioc_nv) #define DIOCGETRULES _IOWR('D', 6, struct pfioc_rule) #define DIOCGETRULENV _IOWR('D', 7, struct pfioc_nv) /* XXX cut 8 - 17 */ #define DIOCCLRSTATESNV _IOWR('D', 18, struct pfioc_nv) #define DIOCGETSTATE _IOWR('D', 19, struct pfioc_state) #define DIOCGETSTATENV _IOWR('D', 19, struct pfioc_nv) #define DIOCSETSTATUSIF _IOWR('D', 20, struct pfioc_if) #define DIOCGETSTATUSNV _IOWR('D', 21, struct pfioc_nv) #define DIOCCLRSTATUS _IO ('D', 22) #define DIOCNATLOOK _IOWR('D', 23, struct pfioc_natlook) #define DIOCSETDEBUG _IOWR('D', 24, u_int32_t) #define DIOCGETSTATES _IOWR('D', 25, struct pfioc_states) #define DIOCCHANGERULE _IOWR('D', 26, struct pfioc_rule) /* XXX cut 26 - 28 */ #define DIOCSETTIMEOUT _IOWR('D', 29, struct pfioc_tm) #define DIOCGETTIMEOUT _IOWR('D', 30, struct pfioc_tm) #define DIOCADDSTATE _IOWR('D', 37, struct pfioc_state) #define DIOCCLRRULECTRS _IO ('D', 38) #define DIOCGETLIMIT _IOWR('D', 39, struct pfioc_limit) #define DIOCSETLIMIT _IOWR('D', 40, struct pfioc_limit) #define DIOCKILLSTATESNV _IOWR('D', 41, struct pfioc_nv) #define DIOCSTARTALTQ _IO ('D', 42) #define DIOCSTOPALTQ _IO ('D', 43) #define DIOCADDALTQV0 _IOWR('D', 45, struct pfioc_altq_v0) #define DIOCADDALTQV1 _IOWR('D', 45, struct pfioc_altq_v1) #define DIOCGETALTQSV0 _IOWR('D', 47, struct pfioc_altq_v0) #define DIOCGETALTQSV1 _IOWR('D', 47, struct pfioc_altq_v1) #define DIOCGETALTQV0 _IOWR('D', 48, struct pfioc_altq_v0) #define DIOCGETALTQV1 _IOWR('D', 48, struct pfioc_altq_v1) #define DIOCCHANGEALTQV0 _IOWR('D', 49, struct pfioc_altq_v0) #define DIOCCHANGEALTQV1 _IOWR('D', 49, struct pfioc_altq_v1) #define DIOCGETQSTATSV0 _IOWR('D', 50, struct pfioc_qstats_v0) #define DIOCGETQSTATSV1 _IOWR('D', 50, struct pfioc_qstats_v1) #define DIOCBEGINADDRS _IOWR('D', 51, struct pfioc_pooladdr) #define DIOCADDADDR _IOWR('D', 52, struct pfioc_pooladdr) #define DIOCGETADDRS _IOWR('D', 53, struct pfioc_pooladdr) #define DIOCGETADDR _IOWR('D', 54, struct pfioc_pooladdr) #define DIOCCHANGEADDR _IOWR('D', 55, struct pfioc_pooladdr) /* XXX cut 55 - 57 */ #define DIOCGETRULESETS _IOWR('D', 58, struct pfioc_ruleset) #define DIOCGETRULESET _IOWR('D', 59, struct pfioc_ruleset) #define DIOCRCLRTABLES _IOWR('D', 60, struct pfioc_table) #define DIOCRADDTABLES _IOWR('D', 61, struct pfioc_table) #define DIOCRDELTABLES _IOWR('D', 62, struct pfioc_table) #define DIOCRGETTABLES _IOWR('D', 63, struct pfioc_table) #define DIOCRGETTSTATS _IOWR('D', 64, struct pfioc_table) #define DIOCRCLRTSTATS _IOWR('D', 65, struct pfioc_table) #define DIOCRCLRADDRS _IOWR('D', 66, struct pfioc_table) #define DIOCRADDADDRS _IOWR('D', 67, struct pfioc_table) #define DIOCRDELADDRS _IOWR('D', 68, struct pfioc_table) #define DIOCRSETADDRS _IOWR('D', 69, struct pfioc_table) #define DIOCRGETADDRS _IOWR('D', 70, struct pfioc_table) #define DIOCRGETASTATS _IOWR('D', 71, struct pfioc_table) #define DIOCRCLRASTATS _IOWR('D', 72, struct pfioc_table) #define DIOCRTSTADDRS _IOWR('D', 73, struct pfioc_table) #define DIOCRSETTFLAGS _IOWR('D', 74, struct pfioc_table) #define DIOCRINADEFINE _IOWR('D', 77, struct pfioc_table) #define DIOCOSFPFLUSH _IO('D', 78) #define DIOCOSFPADD _IOWR('D', 79, struct pf_osfp_ioctl) #define DIOCOSFPGET _IOWR('D', 80, struct pf_osfp_ioctl) #define DIOCXBEGIN _IOWR('D', 81, struct pfioc_trans) #define DIOCXCOMMIT _IOWR('D', 82, struct pfioc_trans) #define DIOCXROLLBACK _IOWR('D', 83, struct pfioc_trans) #define DIOCGETSRCNODES _IOWR('D', 84, struct pfioc_src_nodes) #define DIOCCLRSRCNODES _IO('D', 85) #define DIOCSETHOSTID _IOWR('D', 86, u_int32_t) #define DIOCIGETIFACES _IOWR('D', 87, struct pfioc_iface) #define DIOCSETIFFLAG _IOWR('D', 89, struct pfioc_iface) #define DIOCCLRIFFLAG _IOWR('D', 90, struct pfioc_iface) #define DIOCKILLSRCNODES _IOWR('D', 91, struct pfioc_src_node_kill) #define DIOCGIFSPEEDV0 _IOWR('D', 92, struct pf_ifspeed_v0) #define DIOCGIFSPEEDV1 _IOWR('D', 92, struct pf_ifspeed_v1) #define DIOCGETSTATESV2 _IOWR('D', 93, struct pfioc_states_v2) #define DIOCGETSYNCOOKIES _IOWR('D', 94, struct pfioc_nv) #define DIOCSETSYNCOOKIES _IOWR('D', 95, struct pfioc_nv) #define DIOCKEEPCOUNTERS _IOWR('D', 96, struct pfioc_nv) #define DIOCKEEPCOUNTERS_FREEBSD13 _IOWR('D', 92, struct pfioc_nv) #define DIOCADDETHRULE _IOWR('D', 97, struct pfioc_nv) #define DIOCGETETHRULE _IOWR('D', 98, struct pfioc_nv) #define DIOCGETETHRULES _IOWR('D', 99, struct pfioc_nv) #define DIOCGETETHRULESETS _IOWR('D', 100, struct pfioc_nv) #define DIOCGETETHRULESET _IOWR('D', 101, struct pfioc_nv) #define DIOCSETREASS _IOWR('D', 102, u_int32_t) struct pf_ifspeed_v0 { char ifname[IFNAMSIZ]; u_int32_t baudrate; }; struct pf_ifspeed_v1 { char ifname[IFNAMSIZ]; u_int32_t baudrate32; /* layout identical to struct pf_ifspeed_v0 up to this point */ u_int64_t baudrate; }; /* Latest version of struct pf_ifspeed_vX */ #define PF_IFSPEED_VERSION 1 /* * Compatibility and convenience macros */ #ifndef _KERNEL #ifdef PFIOC_USE_LATEST /* * Maintaining in-tree consumers of the ioctl interface is easier when that * code can be written in terms old names that refer to the latest interface * version as that reduces the required changes in the consumers to those * that are functionally necessary to accommodate a new interface version. */ #define pfioc_altq __CONCAT(pfioc_altq_v, PFIOC_ALTQ_VERSION) #define pfioc_qstats __CONCAT(pfioc_qstats_v, PFIOC_QSTATS_VERSION) #define pf_ifspeed __CONCAT(pf_ifspeed_v, PF_IFSPEED_VERSION) #define DIOCADDALTQ __CONCAT(DIOCADDALTQV, PFIOC_ALTQ_VERSION) #define DIOCGETALTQS __CONCAT(DIOCGETALTQSV, PFIOC_ALTQ_VERSION) #define DIOCGETALTQ __CONCAT(DIOCGETALTQV, PFIOC_ALTQ_VERSION) #define DIOCCHANGEALTQ __CONCAT(DIOCCHANGEALTQV, PFIOC_ALTQ_VERSION) #define DIOCGETQSTATS __CONCAT(DIOCGETQSTATSV, PFIOC_QSTATS_VERSION) #define DIOCGIFSPEED __CONCAT(DIOCGIFSPEEDV, PF_IFSPEED_VERSION) #else /* * When building out-of-tree code that is written for the old interface, * such as may exist in ports for example, resolve the old struct tags and * ioctl command names to the v0 versions. */ #define pfioc_altq __CONCAT(pfioc_altq_v, 0) #define pfioc_qstats __CONCAT(pfioc_qstats_v, 0) #define pf_ifspeed __CONCAT(pf_ifspeed_v, 0) #define DIOCADDALTQ __CONCAT(DIOCADDALTQV, 0) #define DIOCGETALTQS __CONCAT(DIOCGETALTQSV, 0) #define DIOCGETALTQ __CONCAT(DIOCGETALTQV, 0) #define DIOCCHANGEALTQ __CONCAT(DIOCCHANGEALTQV, 0) #define DIOCGETQSTATS __CONCAT(DIOCGETQSTATSV, 0) #define DIOCGIFSPEED __CONCAT(DIOCGIFSPEEDV, 0) #endif /* PFIOC_USE_LATEST */ #endif /* _KERNEL */ #ifdef _KERNEL LIST_HEAD(pf_ksrc_node_list, pf_ksrc_node); struct pf_srchash { struct pf_ksrc_node_list nodes; struct mtx lock; }; struct pf_keyhash { LIST_HEAD(, pf_state_key) keys; struct mtx lock; }; struct pf_idhash { LIST_HEAD(, pf_kstate) states; struct mtx lock; }; extern u_long pf_ioctl_maxcount; extern u_long pf_hashmask; extern u_long pf_srchashmask; #define PF_HASHSIZ (131072) #define PF_SRCHASHSIZ (PF_HASHSIZ/4) VNET_DECLARE(struct pf_keyhash *, pf_keyhash); VNET_DECLARE(struct pf_idhash *, pf_idhash); #define V_pf_keyhash VNET(pf_keyhash) #define V_pf_idhash VNET(pf_idhash) VNET_DECLARE(struct pf_srchash *, pf_srchash); #define V_pf_srchash VNET(pf_srchash) #define PF_IDHASH(s) (be64toh((s)->id) % (pf_hashmask + 1)) VNET_DECLARE(void *, pf_swi_cookie); #define V_pf_swi_cookie VNET(pf_swi_cookie) VNET_DECLARE(struct intr_event *, pf_swi_ie); #define V_pf_swi_ie VNET(pf_swi_ie) VNET_DECLARE(struct unrhdr64, pf_stateid); #define V_pf_stateid VNET(pf_stateid) TAILQ_HEAD(pf_altqqueue, pf_altq); VNET_DECLARE(struct pf_altqqueue, pf_altqs[4]); #define V_pf_altqs VNET(pf_altqs) VNET_DECLARE(struct pf_kpalist, pf_pabuf); #define V_pf_pabuf VNET(pf_pabuf) VNET_DECLARE(u_int32_t, ticket_altqs_active); #define V_ticket_altqs_active VNET(ticket_altqs_active) VNET_DECLARE(u_int32_t, ticket_altqs_inactive); #define V_ticket_altqs_inactive VNET(ticket_altqs_inactive) VNET_DECLARE(int, altqs_inactive_open); #define V_altqs_inactive_open VNET(altqs_inactive_open) VNET_DECLARE(u_int32_t, ticket_pabuf); #define V_ticket_pabuf VNET(ticket_pabuf) VNET_DECLARE(struct pf_altqqueue *, pf_altqs_active); #define V_pf_altqs_active VNET(pf_altqs_active) VNET_DECLARE(struct pf_altqqueue *, pf_altq_ifs_active); #define V_pf_altq_ifs_active VNET(pf_altq_ifs_active) VNET_DECLARE(struct pf_altqqueue *, pf_altqs_inactive); #define V_pf_altqs_inactive VNET(pf_altqs_inactive) VNET_DECLARE(struct pf_altqqueue *, pf_altq_ifs_inactive); #define V_pf_altq_ifs_inactive VNET(pf_altq_ifs_inactive) VNET_DECLARE(struct pf_krulequeue, pf_unlinked_rules); #define V_pf_unlinked_rules VNET(pf_unlinked_rules) #ifdef PF_WANT_32_TO_64_COUNTER LIST_HEAD(allkiflist_head, pfi_kkif); VNET_DECLARE(struct allkiflist_head, pf_allkiflist); #define V_pf_allkiflist VNET(pf_allkiflist) VNET_DECLARE(size_t, pf_allkifcount); #define V_pf_allkifcount VNET(pf_allkifcount) VNET_DECLARE(struct pfi_kkif *, pf_kifmarker); #define V_pf_kifmarker VNET(pf_kifmarker) LIST_HEAD(allrulelist_head, pf_krule); VNET_DECLARE(struct allrulelist_head, pf_allrulelist); #define V_pf_allrulelist VNET(pf_allrulelist) VNET_DECLARE(size_t, pf_allrulecount); #define V_pf_allrulecount VNET(pf_allrulecount) VNET_DECLARE(struct pf_krule *, pf_rulemarker); #define V_pf_rulemarker VNET(pf_rulemarker) #endif int pf_start(void); int pf_stop(void); void pf_initialize(void); void pf_mtag_initialize(void); void pf_mtag_cleanup(void); void pf_cleanup(void); struct pf_mtag *pf_get_mtag(struct mbuf *); extern void pf_calc_skip_steps(struct pf_krulequeue *); #ifdef ALTQ extern void pf_altq_ifnet_event(struct ifnet *, int); #endif VNET_DECLARE(uma_zone_t, pf_state_z); #define V_pf_state_z VNET(pf_state_z) VNET_DECLARE(uma_zone_t, pf_state_key_z); #define V_pf_state_key_z VNET(pf_state_key_z) VNET_DECLARE(uma_zone_t, pf_state_scrub_z); #define V_pf_state_scrub_z VNET(pf_state_scrub_z) extern void pf_purge_thread(void *); extern void pf_unload_vnet_purge(void); extern void pf_intr(void *); extern void pf_purge_expired_src_nodes(void); extern int pf_unlink_state(struct pf_kstate *); extern int pf_state_insert(struct pfi_kkif *, struct pfi_kkif *, struct pf_state_key *, struct pf_state_key *, struct pf_kstate *); extern struct pf_kstate *pf_alloc_state(int); extern void pf_free_state(struct pf_kstate *); static __inline void pf_ref_state(struct pf_kstate *s) { refcount_acquire(&s->refs); } static __inline int pf_release_state(struct pf_kstate *s) { if (refcount_release(&s->refs)) { pf_free_state(s); return (1); } else return (0); } static __inline int pf_release_staten(struct pf_kstate *s, u_int n) { if (refcount_releasen(&s->refs, n)) { pf_free_state(s); return (1); } else return (0); } extern struct pf_kstate *pf_find_state_byid(uint64_t, uint32_t); extern struct pf_kstate *pf_find_state_all(struct pf_state_key_cmp *, u_int, int *); extern bool pf_find_state_all_exists(struct pf_state_key_cmp *, u_int); extern struct pf_ksrc_node *pf_find_src_node(struct pf_addr *, struct pf_krule *, sa_family_t, struct pf_srchash **, bool); extern void pf_unlink_src_node(struct pf_ksrc_node *); extern u_int pf_free_src_nodes(struct pf_ksrc_node_list *); extern void pf_print_state(struct pf_kstate *); extern void pf_print_flags(u_int8_t); extern int pf_addr_wrap_neq(struct pf_addr_wrap *, struct pf_addr_wrap *); extern u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t, u_int8_t); extern u_int16_t pf_proto_cksum_fixup(struct mbuf *, u_int16_t, u_int16_t, u_int16_t, u_int8_t); VNET_DECLARE(struct ifnet *, sync_ifp); #define V_sync_ifp VNET(sync_ifp); VNET_DECLARE(struct pf_krule, pf_default_rule); #define V_pf_default_rule VNET(pf_default_rule) extern void pf_addrcpy(struct pf_addr *, struct pf_addr *, sa_family_t); void pf_free_rule(struct pf_krule *); int pf_test_eth(int, int, struct ifnet *, struct mbuf **, struct inpcb *); #ifdef INET int pf_test(int, int, struct ifnet *, struct mbuf **, struct inpcb *, struct pf_rule_actions *); int pf_normalize_ip(struct mbuf **, struct pfi_kkif *, u_short *, struct pf_pdesc *); #endif /* INET */ #ifdef INET6 int pf_test6(int, int, struct ifnet *, struct mbuf **, struct inpcb *, struct pf_rule_actions *); int pf_normalize_ip6(struct mbuf **, struct pfi_kkif *, u_short *, struct pf_pdesc *); void pf_poolmask(struct pf_addr *, struct pf_addr*, struct pf_addr *, struct pf_addr *, sa_family_t); void pf_addr_inc(struct pf_addr *, sa_family_t); int pf_refragment6(struct ifnet *, struct mbuf **, struct m_tag *, bool); #endif /* INET6 */ int pf_multihome_scan_init(struct mbuf *, int, int, struct pf_pdesc *, struct pfi_kkif *); int pf_multihome_scan_asconf(struct mbuf *, int, int, struct pf_pdesc *, struct pfi_kkif *); u_int32_t pf_new_isn(struct pf_kstate *); void *pf_pull_hdr(struct mbuf *, int, void *, int, u_short *, u_short *, sa_family_t); void pf_change_a(void *, u_int16_t *, u_int32_t, u_int8_t); void pf_change_proto_a(struct mbuf *, void *, u_int16_t *, u_int32_t, u_int8_t); void pf_change_tcp_a(struct mbuf *, void *, u_int16_t *, u_int32_t); void pf_patch_16_unaligned(struct mbuf *, u_int16_t *, void *, u_int16_t, bool, u_int8_t); void pf_patch_32_unaligned(struct mbuf *, u_int16_t *, void *, u_int32_t, bool, u_int8_t); void pf_send_deferred_syn(struct pf_kstate *); int pf_match_addr(u_int8_t, struct pf_addr *, struct pf_addr *, struct pf_addr *, sa_family_t); int pf_match_addr_range(struct pf_addr *, struct pf_addr *, struct pf_addr *, sa_family_t); int pf_match_port(u_int8_t, u_int16_t, u_int16_t, u_int16_t); void pf_normalize_init(void); void pf_normalize_cleanup(void); int pf_normalize_tcp(struct pfi_kkif *, struct mbuf *, int, int, void *, struct pf_pdesc *); void pf_normalize_tcp_cleanup(struct pf_kstate *); int pf_normalize_tcp_init(struct mbuf *, int, struct pf_pdesc *, struct tcphdr *, struct pf_state_peer *, struct pf_state_peer *); int pf_normalize_tcp_stateful(struct mbuf *, int, struct pf_pdesc *, u_short *, struct tcphdr *, struct pf_kstate *, struct pf_state_peer *, struct pf_state_peer *, int *); int pf_normalize_sctp_init(struct mbuf *, int, struct pf_pdesc *, struct pf_state_peer *, struct pf_state_peer *); int pf_normalize_sctp(int, struct pfi_kkif *, struct mbuf *, int, int, void *, struct pf_pdesc *); u_int32_t pf_state_expires(const struct pf_kstate *); void pf_purge_expired_fragments(void); void pf_purge_fragments(uint32_t); int pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *, int); int pf_socket_lookup(struct pf_pdesc *, struct mbuf *); struct pf_state_key *pf_alloc_state_key(int); void pfr_initialize(void); void pfr_cleanup(void); int pfr_match_addr(struct pfr_ktable *, struct pf_addr *, sa_family_t); void pfr_update_stats(struct pfr_ktable *, struct pf_addr *, sa_family_t, u_int64_t, int, int, int); int pfr_pool_get(struct pfr_ktable *, int *, struct pf_addr *, sa_family_t); void pfr_dynaddr_update(struct pfr_ktable *, struct pfi_dynaddr *); struct pfr_ktable * pfr_attach_table(struct pf_kruleset *, char *); struct pfr_ktable * pfr_eth_attach_table(struct pf_keth_ruleset *, char *); void pfr_detach_table(struct pfr_ktable *); int pfr_clr_tables(struct pfr_table *, int *, int); int pfr_add_tables(struct pfr_table *, int, int *, int); int pfr_del_tables(struct pfr_table *, int, int *, int); int pfr_table_count(struct pfr_table *, int); int pfr_get_tables(struct pfr_table *, struct pfr_table *, int *, int); int pfr_get_tstats(struct pfr_table *, struct pfr_tstats *, int *, int); int pfr_clr_tstats(struct pfr_table *, int, int *, int); int pfr_set_tflags(struct pfr_table *, int, int, int, int *, int *, int); int pfr_clr_addrs(struct pfr_table *, int *, int); int pfr_insert_kentry(struct pfr_ktable *, struct pfr_addr *, long); int pfr_add_addrs(struct pfr_table *, struct pfr_addr *, int, int *, int); int pfr_del_addrs(struct pfr_table *, struct pfr_addr *, int, int *, int); int pfr_set_addrs(struct pfr_table *, struct pfr_addr *, int, int *, int *, int *, int *, int, u_int32_t); int pfr_get_addrs(struct pfr_table *, struct pfr_addr *, int *, int); int pfr_get_astats(struct pfr_table *, struct pfr_astats *, int *, int); int pfr_clr_astats(struct pfr_table *, struct pfr_addr *, int, int *, int); int pfr_tst_addrs(struct pfr_table *, struct pfr_addr *, int, int *, int); int pfr_ina_begin(struct pfr_table *, u_int32_t *, int *, int); int pfr_ina_rollback(struct pfr_table *, u_int32_t, int *, int); int pfr_ina_commit(struct pfr_table *, u_int32_t, int *, int *, int); int pfr_ina_define(struct pfr_table *, struct pfr_addr *, int, int *, int *, u_int32_t, int); MALLOC_DECLARE(PFI_MTYPE); VNET_DECLARE(struct pfi_kkif *, pfi_all); #define V_pfi_all VNET(pfi_all) void pfi_initialize(void); void pfi_initialize_vnet(void); void pfi_cleanup(void); void pfi_cleanup_vnet(void); void pfi_kkif_ref(struct pfi_kkif *); void pfi_kkif_unref(struct pfi_kkif *); struct pfi_kkif *pfi_kkif_find(const char *); struct pfi_kkif *pfi_kkif_attach(struct pfi_kkif *, const char *); int pfi_kkif_match(struct pfi_kkif *, struct pfi_kkif *); void pfi_kkif_purge(void); int pfi_match_addr(struct pfi_dynaddr *, struct pf_addr *, sa_family_t); int pfi_dynaddr_setup(struct pf_addr_wrap *, sa_family_t); void pfi_dynaddr_remove(struct pfi_dynaddr *); void pfi_dynaddr_copyout(struct pf_addr_wrap *); void pfi_update_status(const char *, struct pf_status *); void pfi_get_ifaces(const char *, struct pfi_kif *, int *); int pfi_set_flags(const char *, int); int pfi_clear_flags(const char *, int); int pf_match_tag(struct mbuf *, struct pf_krule *, int *, int); int pf_tag_packet(struct mbuf *, struct pf_pdesc *, int); int pf_addr_cmp(struct pf_addr *, struct pf_addr *, sa_family_t); u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, sa_family_t); u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, sa_family_t); struct mbuf *pf_build_tcp(const struct pf_krule *, sa_family_t, const struct pf_addr *, const struct pf_addr *, u_int16_t, u_int16_t, u_int32_t, u_int32_t, u_int8_t, u_int16_t, u_int16_t, u_int8_t, bool, u_int16_t, u_int16_t, int); void pf_send_tcp(const struct pf_krule *, sa_family_t, const struct pf_addr *, const struct pf_addr *, u_int16_t, u_int16_t, u_int32_t, u_int32_t, u_int8_t, u_int16_t, u_int16_t, u_int8_t, bool, u_int16_t, u_int16_t, int); void pf_syncookies_init(void); void pf_syncookies_cleanup(void); int pf_get_syncookies(struct pfioc_nv *); int pf_set_syncookies(struct pfioc_nv *); int pf_synflood_check(struct pf_pdesc *); void pf_syncookie_send(struct mbuf *m, int off, struct pf_pdesc *); bool pf_syncookie_check(struct pf_pdesc *); u_int8_t pf_syncookie_validate(struct pf_pdesc *); struct mbuf * pf_syncookie_recreate_syn(uint8_t, int, struct pf_pdesc *); VNET_DECLARE(struct pf_kstatus, pf_status); #define V_pf_status VNET(pf_status) struct pf_limit { uma_zone_t zone; u_int limit; }; VNET_DECLARE(struct pf_limit, pf_limits[PF_LIMIT_MAX]); #define V_pf_limits VNET(pf_limits) #endif /* _KERNEL */ #ifdef _KERNEL VNET_DECLARE(struct pf_kanchor_global, pf_anchors); #define V_pf_anchors VNET(pf_anchors) VNET_DECLARE(struct pf_kanchor, pf_main_anchor); #define V_pf_main_anchor VNET(pf_main_anchor) VNET_DECLARE(struct pf_keth_anchor_global, pf_keth_anchors); #define V_pf_keth_anchors VNET(pf_keth_anchors) #define pf_main_ruleset V_pf_main_anchor.ruleset VNET_DECLARE(struct pf_keth_anchor, pf_main_keth_anchor); #define V_pf_main_keth_anchor VNET(pf_main_keth_anchor) VNET_DECLARE(struct pf_keth_ruleset*, pf_keth); #define V_pf_keth VNET(pf_keth) void pf_init_kruleset(struct pf_kruleset *); void pf_init_keth(struct pf_keth_ruleset *); int pf_kanchor_setup(struct pf_krule *, const struct pf_kruleset *, const char *); int pf_kanchor_nvcopyout(const struct pf_kruleset *, const struct pf_krule *, nvlist_t *); int pf_kanchor_copyout(const struct pf_kruleset *, const struct pf_krule *, struct pfioc_rule *); void pf_kanchor_remove(struct pf_krule *); void pf_remove_if_empty_kruleset(struct pf_kruleset *); struct pf_kruleset *pf_find_kruleset(const char *); struct pf_kruleset *pf_find_or_create_kruleset(const char *); void pf_rs_initialize(void); struct pf_krule *pf_krule_alloc(void); void pf_remove_if_empty_keth_ruleset( struct pf_keth_ruleset *); struct pf_keth_ruleset *pf_find_keth_ruleset(const char *); struct pf_keth_anchor *pf_find_keth_anchor(const char *); int pf_keth_anchor_setup(struct pf_keth_rule *, const struct pf_keth_ruleset *, const char *); int pf_keth_anchor_nvcopyout( const struct pf_keth_ruleset *, const struct pf_keth_rule *, nvlist_t *); struct pf_keth_ruleset *pf_find_or_create_keth_ruleset(const char *); void pf_keth_anchor_remove(struct pf_keth_rule *); int pf_ioctl_addrule(struct pf_krule *, uint32_t, uint32_t, const char *, const char *, uid_t uid, pid_t); void pf_krule_free(struct pf_krule *); #endif /* The fingerprint functions can be linked into userland programs (tcpdump) */ int pf_osfp_add(struct pf_osfp_ioctl *); #ifdef _KERNEL struct pf_osfp_enlist * pf_osfp_fingerprint(struct pf_pdesc *, struct mbuf *, int, const struct tcphdr *); #endif /* _KERNEL */ void pf_osfp_flush(void); int pf_osfp_get(struct pf_osfp_ioctl *); int pf_osfp_match(struct pf_osfp_enlist *, pf_osfp_t); #ifdef _KERNEL void pf_print_host(struct pf_addr *, u_int16_t, sa_family_t); void pf_step_into_anchor(struct pf_kanchor_stackframe *, int *, struct pf_kruleset **, int, struct pf_krule **, struct pf_krule **, int *); int pf_step_out_of_anchor(struct pf_kanchor_stackframe *, int *, struct pf_kruleset **, int, struct pf_krule **, struct pf_krule **, int *); void pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe *, int *, struct pf_keth_ruleset **, struct pf_keth_rule **, struct pf_keth_rule **, int *); int pf_step_out_of_keth_anchor(struct pf_keth_anchor_stackframe *, int *, struct pf_keth_ruleset **, struct pf_keth_rule **, struct pf_keth_rule **, int *); u_short pf_map_addr(u_int8_t, struct pf_krule *, struct pf_addr *, struct pf_addr *, struct pfi_kkif **nkif, struct pf_addr *, struct pf_ksrc_node **); struct pf_krule *pf_get_translation(struct pf_pdesc *, struct mbuf *, int, struct pfi_kkif *, struct pf_ksrc_node **, struct pf_state_key **, struct pf_state_key **, struct pf_addr *, struct pf_addr *, uint16_t, uint16_t, struct pf_kanchor_stackframe *); struct pf_state_key *pf_state_key_setup(struct pf_pdesc *, struct pf_addr *, struct pf_addr *, u_int16_t, u_int16_t); struct pf_state_key *pf_state_key_clone(struct pf_state_key *); void pf_rule_to_actions(struct pf_krule *, struct pf_rule_actions *); int pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd); #ifdef INET void pf_scrub_ip(struct mbuf **, struct pf_pdesc *); #endif /* INET */ #ifdef INET6 void pf_scrub_ip6(struct mbuf **, struct pf_pdesc *); #endif /* INET6 */ struct pfi_kkif *pf_kkif_create(int); void pf_kkif_free(struct pfi_kkif *); void pf_kkif_zero(struct pfi_kkif *); #endif /* _KERNEL */ #endif /* _NET_PFVAR_H_ */ diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c index 2eae03a908ec..851bf8ee5b63 100644 --- a/sys/netpfil/pf/pf_ioctl.c +++ b/sys/netpfil/pf/pf_ioctl.c @@ -1,6740 +1,6740 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Daniel Hartmeier * Copyright (c) 2002,2003 Henning Brauer * Copyright (c) 2012 Gleb Smirnoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ */ #include #include "opt_inet.h" #include "opt_inet6.h" #include "opt_bpf.h" #include "opt_pf.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #endif /* INET6 */ #ifdef ALTQ #include #endif SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, u_int32_t, u_int8_t, u_int8_t, u_int8_t); static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); static void pf_empty_kpool(struct pf_kpalist *); static int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *); static int pf_begin_eth(uint32_t *, const char *); static void pf_rollback_eth_cb(struct epoch_context *); static int pf_rollback_eth(uint32_t, const char *); static int pf_commit_eth(uint32_t, const char *); static void pf_free_eth_rule(struct pf_keth_rule *); #ifdef ALTQ static int pf_begin_altq(u_int32_t *); static int pf_rollback_altq(u_int32_t); static int pf_commit_altq(u_int32_t); static int pf_enable_altq(struct pf_altq *); static int pf_disable_altq(struct pf_altq *); static uint16_t pf_qname2qid(const char *); static void pf_qid_unref(uint16_t); #endif /* ALTQ */ static int pf_begin_rules(u_int32_t *, int, const char *); static int pf_rollback_rules(u_int32_t, int, char *); static int pf_setup_pfsync_matching(struct pf_kruleset *); static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); static void pf_hash_rule(struct pf_krule *); static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); static int pf_commit_rules(u_int32_t, int, char *); static int pf_addr_setup(struct pf_kruleset *, struct pf_addr_wrap *, sa_family_t); static void pf_addr_copyout(struct pf_addr_wrap *); static void pf_src_node_copy(const struct pf_ksrc_node *, struct pf_src_node *); #ifdef ALTQ static int pf_export_kaltq(struct pf_altq *, struct pfioc_altq_v1 *, size_t); static int pf_import_kaltq(struct pfioc_altq_v1 *, struct pf_altq *, size_t); #endif /* ALTQ */ VNET_DEFINE(struct pf_krule, pf_default_rule); static __inline int pf_krule_compare(struct pf_krule *, struct pf_krule *); RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); #ifdef ALTQ VNET_DEFINE_STATIC(int, pf_altq_running); #define V_pf_altq_running VNET(pf_altq_running) #endif #define TAGID_MAX 50000 struct pf_tagname { TAILQ_ENTRY(pf_tagname) namehash_entries; TAILQ_ENTRY(pf_tagname) taghash_entries; char name[PF_TAG_NAME_SIZE]; uint16_t tag; int ref; }; struct pf_tagset { TAILQ_HEAD(, pf_tagname) *namehash; TAILQ_HEAD(, pf_tagname) *taghash; unsigned int mask; uint32_t seed; BITSET_DEFINE(, TAGID_MAX) avail; }; VNET_DEFINE(struct pf_tagset, pf_tags); #define V_pf_tags VNET(pf_tags) static unsigned int pf_rule_tag_hashsize; #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, "Size of pf(4) rule tag hashtable"); #ifdef ALTQ VNET_DEFINE(struct pf_tagset, pf_qids); #define V_pf_qids VNET(pf_qids) static unsigned int pf_queue_tag_hashsize; #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, "Size of pf(4) queue tag hashtable"); #endif VNET_DEFINE(uma_zone_t, pf_tag_z); #define V_pf_tag_z VNET(pf_tag_z) static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE #endif VNET_DEFINE_STATIC(bool, pf_filter_local) = false; #define V_pf_filter_local VNET(pf_filter_local) SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(pf_filter_local), false, "Enable filtering for packets delivered to local network stack"); #ifdef PF_DEFAULT_TO_DROP VNET_DEFINE_STATIC(bool, default_to_drop) = true; #else VNET_DEFINE_STATIC(bool, default_to_drop); #endif #define V_default_to_drop VNET(default_to_drop) SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET, &VNET_NAME(default_to_drop), false, "Make the default rule drop all packets."); static void pf_init_tagset(struct pf_tagset *, unsigned int *, unsigned int); static void pf_cleanup_tagset(struct pf_tagset *); static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); static u_int16_t tagname2tag(struct pf_tagset *, const char *); static u_int16_t pf_tagname2tag(const char *); static void tag_unref(struct pf_tagset *, u_int16_t); #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x struct cdev *pf_dev; /* * XXX - These are new and need to be checked when moveing to a new version */ static void pf_clear_all_states(void); static unsigned int pf_clear_states(const struct pf_kstate_kill *); static void pf_killstates(struct pf_kstate_kill *, unsigned int *); static int pf_killstates_row(struct pf_kstate_kill *, struct pf_idhash *); static int pf_killstates_nv(struct pfioc_nv *); static int pf_clearstates_nv(struct pfioc_nv *); static int pf_getstate(struct pfioc_nv *); static int pf_getstatus(struct pfioc_nv *); static int pf_clear_tables(void); static void pf_clear_srcnodes(struct pf_ksrc_node *); static void pf_kill_srcnodes(struct pfioc_src_node_kill *); static int pf_keepcounters(struct pfioc_nv *); static void pf_tbladdr_copyout(struct pf_addr_wrap *); /* * Wrapper functions for pfil(9) hooks */ static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); #ifdef INET static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); #endif #ifdef INET6 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp); #endif static void hook_pf_eth(void); static void hook_pf(void); static void dehook_pf_eth(void); static void dehook_pf(void); static int shutdown_pf(void); static int pf_load(void); static void pf_unload(void); static struct cdevsw pf_cdevsw = { .d_ioctl = pfioctl, .d_name = PF_NAME, .d_version = D_VERSION, }; VNET_DEFINE_STATIC(bool, pf_pfil_hooked); #define V_pf_pfil_hooked VNET(pf_pfil_hooked) VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked); #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) /* * We need a flag that is neither hooked nor running to know when * the VNET is "valid". We primarily need this to control (global) * external event, e.g., eventhandlers. */ VNET_DEFINE(int, pf_vnet_active); #define V_pf_vnet_active VNET(pf_vnet_active) int pf_end_threads; struct proc *pf_purge_proc; VNET_DEFINE(struct rmlock, pf_rules_lock); VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock); #define V_pf_ioctl_lock VNET(pf_ioctl_lock) struct sx pf_end_lock; /* pfsync */ VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; /* pflog */ pflog_packet_t *pflog_packet_ptr = NULL; /* * Copy a user-provided string, returning an error if truncation would occur. * Avoid scanning past "sz" bytes in the source string since there's no * guarantee that it's nul-terminated. */ static int pf_user_strcpy(char *dst, const char *src, size_t sz) { if (strnlen(src, sz) == sz) return (EINVAL); (void)strlcpy(dst, src, sz); return (0); } static void pfattach_vnet(void) { u_int32_t *my_timeout = V_pf_default_rule.timeout; bzero(&V_pf_status, sizeof(V_pf_status)); pf_initialize(); pfr_initialize(); pfi_initialize_vnet(); pf_normalize_init(); pf_syncookies_init(); V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; RB_INIT(&V_pf_anchors); pf_init_kruleset(&pf_main_ruleset); pf_init_keth(V_pf_keth); /* default rule should never be garbage collected */ V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS; V_pf_default_rule.nr = -1; V_pf_default_rule.rtableid = -1; pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); for (int i = 0; i < 2; i++) { pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); } V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, M_WAITOK | M_ZERO); #ifdef PF_WANT_32_TO_64_COUNTER V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); PF_RULES_WLOCK(); LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); V_pf_allrulecount++; LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); PF_RULES_WUNLOCK(); #endif /* initialize default timeouts */ my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; V_pf_status.debug = PF_DEBUG_URGENT; /* * XXX This is different than in OpenBSD where reassembly is enabled by * defult. In FreeBSD we expect people to still use scrub rules and * switch to the new syntax later. Only when they switch they must * explicitly enable reassemle. We could change the default once the * scrub rule functionality is hopefully removed some day in future. */ V_pf_status.reass = 0; V_pf_pfil_hooked = false; V_pf_pfil_eth_hooked = false; /* XXX do our best to avoid a conflict */ V_pf_status.hostid = arc4random(); for (int i = 0; i < PFRES_MAX; i++) V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); for (int i = 0; i < KLCNT_MAX; i++) V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); for (int i = 0; i < FCNT_MAX; i++) pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); for (int i = 0; i < SCNT_MAX; i++) V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, INTR_MPSAFE, &V_pf_swi_cookie) != 0) /* XXXGL: leaked all above. */ return; } static struct pf_kpool * pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, u_int32_t rule_number, u_int8_t r_last, u_int8_t active, u_int8_t check_ticket) { struct pf_kruleset *ruleset; struct pf_krule *rule; int rs_num; ruleset = pf_find_kruleset(anchor); if (ruleset == NULL) return (NULL); rs_num = pf_get_ruleset_number(rule_action); if (rs_num >= PF_RULESET_MAX) return (NULL); if (active) { if (check_ticket && ticket != ruleset->rules[rs_num].active.ticket) return (NULL); if (r_last) rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, pf_krulequeue); else rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); } else { if (check_ticket && ticket != ruleset->rules[rs_num].inactive.ticket) return (NULL); if (r_last) rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, pf_krulequeue); else rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); } if (!r_last) { while ((rule != NULL) && (rule->nr != rule_number)) rule = TAILQ_NEXT(rule, entries); } if (rule == NULL) return (NULL); return (&rule->rpool); } static void pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) { struct pf_kpooladdr *mv_pool_pa; while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { TAILQ_REMOVE(poola, mv_pool_pa, entries); TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); } } static void pf_empty_kpool(struct pf_kpalist *poola) { struct pf_kpooladdr *pa; while ((pa = TAILQ_FIRST(poola)) != NULL) { switch (pa->addr.type) { case PF_ADDR_DYNIFTL: pfi_dynaddr_remove(pa->addr.p.dyn); break; case PF_ADDR_TABLE: /* XXX: this could be unfinished pooladdr on pabuf */ if (pa->addr.p.tbl != NULL) pfr_detach_table(pa->addr.p.tbl); break; } if (pa->kif) pfi_kkif_unref(pa->kif); TAILQ_REMOVE(poola, pa, entries); free(pa, M_PFRULE); } } static void pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) { PF_RULES_WASSERT(); PF_UNLNKDRULES_ASSERT(); TAILQ_REMOVE(rulequeue, rule, entries); rule->rule_ref |= PFRULE_REFS; TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); } static void pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) { PF_RULES_WASSERT(); PF_UNLNKDRULES_LOCK(); pf_unlink_rule_locked(rulequeue, rule); PF_UNLNKDRULES_UNLOCK(); } static void pf_free_eth_rule(struct pf_keth_rule *rule) { PF_RULES_WASSERT(); if (rule == NULL) return; if (rule->tag) tag_unref(&V_pf_tags, rule->tag); if (rule->match_tag) tag_unref(&V_pf_tags, rule->match_tag); #ifdef ALTQ pf_qid_unref(rule->qid); #endif if (rule->bridge_to) pfi_kkif_unref(rule->bridge_to); if (rule->kif) pfi_kkif_unref(rule->kif); if (rule->ipsrc.addr.type == PF_ADDR_TABLE) pfr_detach_table(rule->ipsrc.addr.p.tbl); if (rule->ipdst.addr.type == PF_ADDR_TABLE) pfr_detach_table(rule->ipdst.addr.p.tbl); counter_u64_free(rule->evaluations); for (int i = 0; i < 2; i++) { counter_u64_free(rule->packets[i]); counter_u64_free(rule->bytes[i]); } uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); pf_keth_anchor_remove(rule); free(rule, M_PFRULE); } void pf_free_rule(struct pf_krule *rule) { PF_RULES_WASSERT(); PF_CONFIG_ASSERT(); if (rule->tag) tag_unref(&V_pf_tags, rule->tag); if (rule->match_tag) tag_unref(&V_pf_tags, rule->match_tag); #ifdef ALTQ if (rule->pqid != rule->qid) pf_qid_unref(rule->pqid); pf_qid_unref(rule->qid); #endif switch (rule->src.addr.type) { case PF_ADDR_DYNIFTL: pfi_dynaddr_remove(rule->src.addr.p.dyn); break; case PF_ADDR_TABLE: pfr_detach_table(rule->src.addr.p.tbl); break; } switch (rule->dst.addr.type) { case PF_ADDR_DYNIFTL: pfi_dynaddr_remove(rule->dst.addr.p.dyn); break; case PF_ADDR_TABLE: pfr_detach_table(rule->dst.addr.p.tbl); break; } if (rule->overload_tbl) pfr_detach_table(rule->overload_tbl); if (rule->kif) pfi_kkif_unref(rule->kif); pf_kanchor_remove(rule); pf_empty_kpool(&rule->rpool.list); pf_krule_free(rule); } static void pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, unsigned int default_size) { unsigned int i; unsigned int hashsize; if (*tunable_size == 0 || !powerof2(*tunable_size)) *tunable_size = default_size; hashsize = *tunable_size; ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, M_WAITOK); ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, M_WAITOK); ts->mask = hashsize - 1; ts->seed = arc4random(); for (i = 0; i < hashsize; i++) { TAILQ_INIT(&ts->namehash[i]); TAILQ_INIT(&ts->taghash[i]); } BIT_FILL(TAGID_MAX, &ts->avail); } static void pf_cleanup_tagset(struct pf_tagset *ts) { unsigned int i; unsigned int hashsize; struct pf_tagname *t, *tmp; /* * Only need to clean up one of the hashes as each tag is hashed * into each table. */ hashsize = ts->mask + 1; for (i = 0; i < hashsize; i++) TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) uma_zfree(V_pf_tag_z, t); free(ts->namehash, M_PFHASH); free(ts->taghash, M_PFHASH); } static uint16_t tagname2hashindex(const struct pf_tagset *ts, const char *tagname) { size_t len; len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); } static uint16_t tag2hashindex(const struct pf_tagset *ts, uint16_t tag) { return (tag & ts->mask); } static u_int16_t tagname2tag(struct pf_tagset *ts, const char *tagname) { struct pf_tagname *tag; u_int32_t index; u_int16_t new_tagid; PF_RULES_WASSERT(); index = tagname2hashindex(ts, tagname); TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) if (strcmp(tagname, tag->name) == 0) { tag->ref++; return (tag->tag); } /* * new entry * * to avoid fragmentation, we do a linear search from the beginning * and take the first free slot we find. */ new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); /* * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits * set. It may also return a bit number greater than TAGID_MAX due * to rounding of the number of bits in the vector up to a multiple * of the vector word size at declaration/allocation time. */ if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) return (0); /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); /* allocate and fill new struct pf_tagname */ tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); if (tag == NULL) return (0); strlcpy(tag->name, tagname, sizeof(tag->name)); tag->tag = new_tagid; tag->ref = 1; /* Insert into namehash */ TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); /* Insert into taghash */ index = tag2hashindex(ts, new_tagid); TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); return (tag->tag); } static void tag_unref(struct pf_tagset *ts, u_int16_t tag) { struct pf_tagname *t; uint16_t index; PF_RULES_WASSERT(); index = tag2hashindex(ts, tag); TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) if (tag == t->tag) { if (--t->ref == 0) { TAILQ_REMOVE(&ts->taghash[index], t, taghash_entries); index = tagname2hashindex(ts, t->name); TAILQ_REMOVE(&ts->namehash[index], t, namehash_entries); /* Bits are 0-based for BIT_SET() */ BIT_SET(TAGID_MAX, tag - 1, &ts->avail); uma_zfree(V_pf_tag_z, t); } break; } } static uint16_t pf_tagname2tag(const char *tagname) { return (tagname2tag(&V_pf_tags, tagname)); } static int pf_begin_eth(uint32_t *ticket, const char *anchor) { struct pf_keth_rule *rule, *tmp; struct pf_keth_ruleset *rs; PF_RULES_WASSERT(); rs = pf_find_or_create_keth_ruleset(anchor); if (rs == NULL) return (EINVAL); /* Purge old inactive rules. */ TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, tmp) { TAILQ_REMOVE(rs->inactive.rules, rule, entries); pf_free_eth_rule(rule); } *ticket = ++rs->inactive.ticket; rs->inactive.open = 1; return (0); } static void pf_rollback_eth_cb(struct epoch_context *ctx) { struct pf_keth_ruleset *rs; rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); CURVNET_SET(rs->vnet); PF_RULES_WLOCK(); pf_rollback_eth(rs->inactive.ticket, rs->anchor ? rs->anchor->path : ""); PF_RULES_WUNLOCK(); CURVNET_RESTORE(); } static int pf_rollback_eth(uint32_t ticket, const char *anchor) { struct pf_keth_rule *rule, *tmp; struct pf_keth_ruleset *rs; PF_RULES_WASSERT(); rs = pf_find_keth_ruleset(anchor); if (rs == NULL) return (EINVAL); if (!rs->inactive.open || ticket != rs->inactive.ticket) return (0); /* Purge old inactive rules. */ TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, tmp) { TAILQ_REMOVE(rs->inactive.rules, rule, entries); pf_free_eth_rule(rule); } rs->inactive.open = 0; pf_remove_if_empty_keth_ruleset(rs); return (0); } #define PF_SET_SKIP_STEPS(i) \ do { \ while (head[i] != cur) { \ head[i]->skip[i].ptr = cur; \ head[i] = TAILQ_NEXT(head[i], entries); \ } \ } while (0) static void pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) { struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; int i; cur = TAILQ_FIRST(rules); prev = cur; for (i = 0; i < PFE_SKIP_COUNT; ++i) head[i] = cur; while (cur != NULL) { if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) PF_SET_SKIP_STEPS(PFE_SKIP_IFP); if (cur->direction != prev->direction) PF_SET_SKIP_STEPS(PFE_SKIP_DIR); if (cur->proto != prev->proto) PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); if (cur->ipsrc.neg != prev->ipsrc.neg || pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr)) PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR); if (cur->ipdst.neg != prev->ipdst.neg || pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr)) PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR); prev = cur; cur = TAILQ_NEXT(cur, entries); } for (i = 0; i < PFE_SKIP_COUNT; ++i) PF_SET_SKIP_STEPS(i); } static int pf_commit_eth(uint32_t ticket, const char *anchor) { struct pf_keth_ruleq *rules; struct pf_keth_ruleset *rs; rs = pf_find_keth_ruleset(anchor); if (rs == NULL) { return (EINVAL); } if (!rs->inactive.open || ticket != rs->inactive.ticket) return (EBUSY); PF_RULES_WASSERT(); pf_eth_calc_skip_steps(rs->inactive.rules); rules = rs->active.rules; ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); rs->inactive.rules = rules; rs->inactive.ticket = rs->active.ticket; /* Clean up inactive rules (i.e. previously active rules), only when * we're sure they're no longer used. */ NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); return (0); } #ifdef ALTQ static uint16_t pf_qname2qid(const char *qname) { return (tagname2tag(&V_pf_qids, qname)); } static void pf_qid_unref(uint16_t qid) { tag_unref(&V_pf_qids, qid); } static int pf_begin_altq(u_int32_t *ticket) { struct pf_altq *altq, *tmp; int error = 0; PF_RULES_WASSERT(); /* Purge the old altq lists */ TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { /* detach and destroy the discipline */ error = altq_remove(altq); } free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altq_ifs_inactive); TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { pf_qid_unref(altq->qid); free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altqs_inactive); if (error) return (error); *ticket = ++V_ticket_altqs_inactive; V_altqs_inactive_open = 1; return (0); } static int pf_rollback_altq(u_int32_t ticket) { struct pf_altq *altq, *tmp; int error = 0; PF_RULES_WASSERT(); if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) return (0); /* Purge the old altq lists */ TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { /* detach and destroy the discipline */ error = altq_remove(altq); } free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altq_ifs_inactive); TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { pf_qid_unref(altq->qid); free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altqs_inactive); V_altqs_inactive_open = 0; return (error); } static int pf_commit_altq(u_int32_t ticket) { struct pf_altqqueue *old_altqs, *old_altq_ifs; struct pf_altq *altq, *tmp; int err, error = 0; PF_RULES_WASSERT(); if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) return (EBUSY); /* swap altqs, keep the old. */ old_altqs = V_pf_altqs_active; old_altq_ifs = V_pf_altq_ifs_active; V_pf_altqs_active = V_pf_altqs_inactive; V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; V_pf_altqs_inactive = old_altqs; V_pf_altq_ifs_inactive = old_altq_ifs; V_ticket_altqs_active = V_ticket_altqs_inactive; /* Attach new disciplines */ TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { /* attach the discipline */ error = altq_pfattach(altq); if (error == 0 && V_pf_altq_running) error = pf_enable_altq(altq); if (error != 0) return (error); } } /* Purge the old altq lists */ TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { /* detach and destroy the discipline */ if (V_pf_altq_running) error = pf_disable_altq(altq); err = altq_pfdetach(altq); if (err != 0 && error == 0) error = err; err = altq_remove(altq); if (err != 0 && error == 0) error = err; } free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altq_ifs_inactive); TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { pf_qid_unref(altq->qid); free(altq, M_PFALTQ); } TAILQ_INIT(V_pf_altqs_inactive); V_altqs_inactive_open = 0; return (error); } static int pf_enable_altq(struct pf_altq *altq) { struct ifnet *ifp; struct tb_profile tb; int error = 0; if ((ifp = ifunit(altq->ifname)) == NULL) return (EINVAL); if (ifp->if_snd.altq_type != ALTQT_NONE) error = altq_enable(&ifp->if_snd); /* set tokenbucket regulator */ if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { tb.rate = altq->ifbandwidth; tb.depth = altq->tbrsize; error = tbr_set(&ifp->if_snd, &tb); } return (error); } static int pf_disable_altq(struct pf_altq *altq) { struct ifnet *ifp; struct tb_profile tb; int error; if ((ifp = ifunit(altq->ifname)) == NULL) return (EINVAL); /* * when the discipline is no longer referenced, it was overridden * by a new one. if so, just return. */ if (altq->altq_disc != ifp->if_snd.altq_disc) return (0); error = altq_disable(&ifp->if_snd); if (error == 0) { /* clear tokenbucket regulator */ tb.rate = 0; error = tbr_set(&ifp->if_snd, &tb); } return (error); } static int pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, struct pf_altq *altq) { struct ifnet *ifp1; int error = 0; /* Deactivate the interface in question */ altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; if ((ifp1 = ifunit(altq->ifname)) == NULL || (remove && ifp1 == ifp)) { altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; } else { error = altq_add(ifp1, altq); if (ticket != V_ticket_altqs_inactive) error = EBUSY; if (error) free(altq, M_PFALTQ); } return (error); } void pf_altq_ifnet_event(struct ifnet *ifp, int remove) { struct pf_altq *a1, *a2, *a3; u_int32_t ticket; int error = 0; /* * No need to re-evaluate the configuration for events on interfaces * that do not support ALTQ, as it's not possible for such * interfaces to be part of the configuration. */ if (!ALTQ_IS_READY(&ifp->if_snd)) return; /* Interrupt userland queue modifications */ if (V_altqs_inactive_open) pf_rollback_altq(V_ticket_altqs_inactive); /* Start new altq ruleset */ if (pf_begin_altq(&ticket)) return; /* Copy the current active set */ TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); if (a2 == NULL) { error = ENOMEM; break; } bcopy(a1, a2, sizeof(struct pf_altq)); error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); if (error) break; TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); } if (error) goto out; TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); if (a2 == NULL) { error = ENOMEM; break; } bcopy(a1, a2, sizeof(struct pf_altq)); if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { error = EBUSY; free(a2, M_PFALTQ); break; } a2->altq_disc = NULL; TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { if (strncmp(a3->ifname, a2->ifname, IFNAMSIZ) == 0) { a2->altq_disc = a3->altq_disc; break; } } error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); if (error) break; TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); } out: if (error != 0) pf_rollback_altq(ticket); else pf_commit_altq(ticket); } #endif /* ALTQ */ static struct pf_krule_global * pf_rule_tree_alloc(int flags) { struct pf_krule_global *tree; tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); if (tree == NULL) return (NULL); RB_INIT(tree); return (tree); } static void pf_rule_tree_free(struct pf_krule_global *tree) { free(tree, M_TEMP); } static int pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) { struct pf_krule_global *tree; struct pf_kruleset *rs; struct pf_krule *rule; PF_RULES_WASSERT(); if (rs_num < 0 || rs_num >= PF_RULESET_MAX) return (EINVAL); tree = pf_rule_tree_alloc(M_NOWAIT); if (tree == NULL) return (ENOMEM); rs = pf_find_or_create_kruleset(anchor); if (rs == NULL) { free(tree, M_TEMP); return (EINVAL); } pf_rule_tree_free(rs->rules[rs_num].inactive.tree); rs->rules[rs_num].inactive.tree = tree; while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); rs->rules[rs_num].inactive.rcount--; } *ticket = ++rs->rules[rs_num].inactive.ticket; rs->rules[rs_num].inactive.open = 1; return (0); } static int pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) { struct pf_kruleset *rs; struct pf_krule *rule; PF_RULES_WASSERT(); if (rs_num < 0 || rs_num >= PF_RULESET_MAX) return (EINVAL); rs = pf_find_kruleset(anchor); if (rs == NULL || !rs->rules[rs_num].inactive.open || rs->rules[rs_num].inactive.ticket != ticket) return (0); while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); rs->rules[rs_num].inactive.rcount--; } rs->rules[rs_num].inactive.open = 0; return (0); } #define PF_MD5_UPD(st, elm) \ MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) #define PF_MD5_UPD_STR(st, elm) \ MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ (stor) = htonl((st)->elm); \ MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ } while (0) #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ (stor) = htons((st)->elm); \ MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ } while (0) static void pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) { PF_MD5_UPD(pfr, addr.type); switch (pfr->addr.type) { case PF_ADDR_DYNIFTL: PF_MD5_UPD(pfr, addr.v.ifname); PF_MD5_UPD(pfr, addr.iflags); break; case PF_ADDR_TABLE: PF_MD5_UPD(pfr, addr.v.tblname); break; case PF_ADDR_ADDRMASK: /* XXX ignore af? */ PF_MD5_UPD(pfr, addr.v.a.addr.addr32); PF_MD5_UPD(pfr, addr.v.a.mask.addr32); break; } PF_MD5_UPD(pfr, port[0]); PF_MD5_UPD(pfr, port[1]); PF_MD5_UPD(pfr, neg); PF_MD5_UPD(pfr, port_op); } static void pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) { u_int16_t x; u_int32_t y; pf_hash_rule_addr(ctx, &rule->src); pf_hash_rule_addr(ctx, &rule->dst); for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) PF_MD5_UPD_STR(rule, label[i]); PF_MD5_UPD_STR(rule, ifname); PF_MD5_UPD_STR(rule, match_tagname); PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ PF_MD5_UPD_HTONL(rule, os_fingerprint, y); PF_MD5_UPD_HTONL(rule, prob, y); PF_MD5_UPD_HTONL(rule, uid.uid[0], y); PF_MD5_UPD_HTONL(rule, uid.uid[1], y); PF_MD5_UPD(rule, uid.op); PF_MD5_UPD_HTONL(rule, gid.gid[0], y); PF_MD5_UPD_HTONL(rule, gid.gid[1], y); PF_MD5_UPD(rule, gid.op); PF_MD5_UPD_HTONL(rule, rule_flag, y); PF_MD5_UPD(rule, action); PF_MD5_UPD(rule, direction); PF_MD5_UPD(rule, af); PF_MD5_UPD(rule, quick); PF_MD5_UPD(rule, ifnot); PF_MD5_UPD(rule, match_tag_not); PF_MD5_UPD(rule, natpass); PF_MD5_UPD(rule, keep_state); PF_MD5_UPD(rule, proto); PF_MD5_UPD(rule, type); PF_MD5_UPD(rule, code); PF_MD5_UPD(rule, flags); PF_MD5_UPD(rule, flagset); PF_MD5_UPD(rule, allow_opts); PF_MD5_UPD(rule, rt); PF_MD5_UPD(rule, tos); PF_MD5_UPD(rule, scrub_flags); PF_MD5_UPD(rule, min_ttl); PF_MD5_UPD(rule, set_tos); if (rule->anchor != NULL) PF_MD5_UPD_STR(rule, anchor->path); } static void pf_hash_rule(struct pf_krule *rule) { MD5_CTX ctx; MD5Init(&ctx); pf_hash_rule_rolling(&ctx, rule); MD5Final(rule->md5sum, &ctx); } static int pf_krule_compare(struct pf_krule *a, struct pf_krule *b) { return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); } static int pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) { struct pf_kruleset *rs; struct pf_krule *rule, **old_array, *old_rule; struct pf_krulequeue *old_rules; struct pf_krule_global *old_tree; int error; u_int32_t old_rcount; PF_RULES_WASSERT(); if (rs_num < 0 || rs_num >= PF_RULESET_MAX) return (EINVAL); rs = pf_find_kruleset(anchor); if (rs == NULL || !rs->rules[rs_num].inactive.open || ticket != rs->rules[rs_num].inactive.ticket) return (EBUSY); /* Calculate checksum for the main ruleset */ if (rs == &pf_main_ruleset) { error = pf_setup_pfsync_matching(rs); if (error != 0) return (error); } /* Swap rules, keep the old. */ old_rules = rs->rules[rs_num].active.ptr; old_rcount = rs->rules[rs_num].active.rcount; old_array = rs->rules[rs_num].active.ptr_array; old_tree = rs->rules[rs_num].active.tree; rs->rules[rs_num].active.ptr = rs->rules[rs_num].inactive.ptr; rs->rules[rs_num].active.ptr_array = rs->rules[rs_num].inactive.ptr_array; rs->rules[rs_num].active.tree = rs->rules[rs_num].inactive.tree; rs->rules[rs_num].active.rcount = rs->rules[rs_num].inactive.rcount; /* Attempt to preserve counter information. */ if (V_pf_status.keep_counters && old_tree != NULL) { TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, entries) { old_rule = RB_FIND(pf_krule_global, old_tree, rule); if (old_rule == NULL) { continue; } pf_counter_u64_critical_enter(); pf_counter_u64_add_protected(&rule->evaluations, pf_counter_u64_fetch(&old_rule->evaluations)); pf_counter_u64_add_protected(&rule->packets[0], pf_counter_u64_fetch(&old_rule->packets[0])); pf_counter_u64_add_protected(&rule->packets[1], pf_counter_u64_fetch(&old_rule->packets[1])); pf_counter_u64_add_protected(&rule->bytes[0], pf_counter_u64_fetch(&old_rule->bytes[0])); pf_counter_u64_add_protected(&rule->bytes[1], pf_counter_u64_fetch(&old_rule->bytes[1])); pf_counter_u64_critical_exit(); } } rs->rules[rs_num].inactive.ptr = old_rules; rs->rules[rs_num].inactive.ptr_array = old_array; rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ rs->rules[rs_num].inactive.rcount = old_rcount; rs->rules[rs_num].active.ticket = rs->rules[rs_num].inactive.ticket; pf_calc_skip_steps(rs->rules[rs_num].active.ptr); /* Purge the old rule list. */ PF_UNLNKDRULES_LOCK(); while ((rule = TAILQ_FIRST(old_rules)) != NULL) pf_unlink_rule_locked(old_rules, rule); PF_UNLNKDRULES_UNLOCK(); if (rs->rules[rs_num].inactive.ptr_array) free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); rs->rules[rs_num].inactive.ptr_array = NULL; rs->rules[rs_num].inactive.rcount = 0; rs->rules[rs_num].inactive.open = 0; pf_remove_if_empty_kruleset(rs); free(old_tree, M_TEMP); return (0); } static int pf_setup_pfsync_matching(struct pf_kruleset *rs) { MD5_CTX ctx; struct pf_krule *rule; int rs_cnt; u_int8_t digest[PF_MD5_DIGEST_LENGTH]; MD5Init(&ctx); for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { /* XXX PF_RULESET_SCRUB as well? */ if (rs_cnt == PF_RULESET_SCRUB) continue; if (rs->rules[rs_cnt].inactive.ptr_array) free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); rs->rules[rs_cnt].inactive.ptr_array = NULL; if (rs->rules[rs_cnt].inactive.rcount) { rs->rules[rs_cnt].inactive.ptr_array = mallocarray(rs->rules[rs_cnt].inactive.rcount, sizeof(struct pf_rule **), M_TEMP, M_NOWAIT); if (!rs->rules[rs_cnt].inactive.ptr_array) return (ENOMEM); } TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, entries) { pf_hash_rule_rolling(&ctx, rule); (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; } } MD5Final(digest, &ctx); memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); return (0); } static int pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) { int error = 0; switch (addr->type) { case PF_ADDR_TABLE: addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); if (addr->p.tbl == NULL) error = ENOMEM; break; default: error = EINVAL; } return (error); } static int pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, sa_family_t af) { int error = 0; switch (addr->type) { case PF_ADDR_TABLE: addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); if (addr->p.tbl == NULL) error = ENOMEM; break; case PF_ADDR_DYNIFTL: error = pfi_dynaddr_setup(addr, af); break; } return (error); } static void pf_addr_copyout(struct pf_addr_wrap *addr) { switch (addr->type) { case PF_ADDR_DYNIFTL: pfi_dynaddr_copyout(addr); break; case PF_ADDR_TABLE: pf_tbladdr_copyout(addr); break; } } static void pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) { int secs = time_uptime, diff; bzero(out, sizeof(struct pf_src_node)); bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); if (in->rule.ptr != NULL) out->rule.nr = in->rule.ptr->nr; for (int i = 0; i < 2; i++) { out->bytes[i] = counter_u64_fetch(in->bytes[i]); out->packets[i] = counter_u64_fetch(in->packets[i]); } out->states = in->states; out->conn = in->conn; out->af = in->af; out->ruletype = in->ruletype; out->creation = secs - in->creation; if (out->expire > secs) out->expire -= secs; else out->expire = 0; /* Adjust the connection rate estimate. */ diff = secs - in->conn_rate.last; if (diff >= in->conn_rate.seconds) out->conn_rate.count = 0; else out->conn_rate.count -= in->conn_rate.count * diff / in->conn_rate.seconds; } #ifdef ALTQ /* * Handle export of struct pf_kaltq to user binaries that may be using any * version of struct pf_altq. */ static int pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) { u_int32_t version; if (ioc_size == sizeof(struct pfioc_altq_v0)) version = 0; else version = pa->version; if (version > PFIOC_ALTQ_VERSION) return (EINVAL); #define ASSIGN(x) exported_q->x = q->x #define COPY(x) \ bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) switch (version) { case 0: { struct pf_altq_v0 *exported_q = &((struct pfioc_altq_v0 *)pa)->altq; COPY(ifname); ASSIGN(scheduler); ASSIGN(tbrsize); exported_q->tbrsize = SATU16(q->tbrsize); exported_q->ifbandwidth = SATU32(q->ifbandwidth); COPY(qname); COPY(parent); ASSIGN(parent_qid); exported_q->bandwidth = SATU32(q->bandwidth); ASSIGN(priority); ASSIGN(local_flags); ASSIGN(qlimit); ASSIGN(flags); if (q->scheduler == ALTQT_HFSC) { #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ SATU32(q->pq_u.hfsc_opts.x) ASSIGN_OPT_SATU32(rtsc_m1); ASSIGN_OPT(rtsc_d); ASSIGN_OPT_SATU32(rtsc_m2); ASSIGN_OPT_SATU32(lssc_m1); ASSIGN_OPT(lssc_d); ASSIGN_OPT_SATU32(lssc_m2); ASSIGN_OPT_SATU32(ulsc_m1); ASSIGN_OPT(ulsc_d); ASSIGN_OPT_SATU32(ulsc_m2); ASSIGN_OPT(flags); #undef ASSIGN_OPT #undef ASSIGN_OPT_SATU32 } else COPY(pq_u); ASSIGN(qid); break; } case 1: { struct pf_altq_v1 *exported_q = &((struct pfioc_altq_v1 *)pa)->altq; COPY(ifname); ASSIGN(scheduler); ASSIGN(tbrsize); ASSIGN(ifbandwidth); COPY(qname); COPY(parent); ASSIGN(parent_qid); ASSIGN(bandwidth); ASSIGN(priority); ASSIGN(local_flags); ASSIGN(qlimit); ASSIGN(flags); COPY(pq_u); ASSIGN(qid); break; } default: panic("%s: unhandled struct pfioc_altq version", __func__); break; } #undef ASSIGN #undef COPY #undef SATU16 #undef SATU32 return (0); } /* * Handle import to struct pf_kaltq of struct pf_altq from user binaries * that may be using any version of it. */ static int pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) { u_int32_t version; if (ioc_size == sizeof(struct pfioc_altq_v0)) version = 0; else version = pa->version; if (version > PFIOC_ALTQ_VERSION) return (EINVAL); #define ASSIGN(x) q->x = imported_q->x #define COPY(x) \ bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) switch (version) { case 0: { struct pf_altq_v0 *imported_q = &((struct pfioc_altq_v0 *)pa)->altq; COPY(ifname); ASSIGN(scheduler); ASSIGN(tbrsize); /* 16-bit -> 32-bit */ ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ COPY(qname); COPY(parent); ASSIGN(parent_qid); ASSIGN(bandwidth); /* 32-bit -> 64-bit */ ASSIGN(priority); ASSIGN(local_flags); ASSIGN(qlimit); ASSIGN(flags); if (imported_q->scheduler == ALTQT_HFSC) { #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x /* * The m1 and m2 parameters are being copied from * 32-bit to 64-bit. */ ASSIGN_OPT(rtsc_m1); ASSIGN_OPT(rtsc_d); ASSIGN_OPT(rtsc_m2); ASSIGN_OPT(lssc_m1); ASSIGN_OPT(lssc_d); ASSIGN_OPT(lssc_m2); ASSIGN_OPT(ulsc_m1); ASSIGN_OPT(ulsc_d); ASSIGN_OPT(ulsc_m2); ASSIGN_OPT(flags); #undef ASSIGN_OPT } else COPY(pq_u); ASSIGN(qid); break; } case 1: { struct pf_altq_v1 *imported_q = &((struct pfioc_altq_v1 *)pa)->altq; COPY(ifname); ASSIGN(scheduler); ASSIGN(tbrsize); ASSIGN(ifbandwidth); COPY(qname); COPY(parent); ASSIGN(parent_qid); ASSIGN(bandwidth); ASSIGN(priority); ASSIGN(local_flags); ASSIGN(qlimit); ASSIGN(flags); COPY(pq_u); ASSIGN(qid); break; } default: panic("%s: unhandled struct pfioc_altq version", __func__); break; } #undef ASSIGN #undef COPY return (0); } static struct pf_altq * pf_altq_get_nth_active(u_int32_t n) { struct pf_altq *altq; u_int32_t nr; nr = 0; TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { if (nr == n) return (altq); nr++; } TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { if (nr == n) return (altq); nr++; } return (NULL); } #endif /* ALTQ */ struct pf_krule * pf_krule_alloc(void) { struct pf_krule *rule; rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, M_WAITOK | M_ZERO); return (rule); } void pf_krule_free(struct pf_krule *rule) { #ifdef PF_WANT_32_TO_64_COUNTER bool wowned; #endif if (rule == NULL) return; #ifdef PF_WANT_32_TO_64_COUNTER if (rule->allrulelinked) { wowned = PF_RULES_WOWNED(); if (!wowned) PF_RULES_WLOCK(); LIST_REMOVE(rule, allrulelist); V_pf_allrulecount--; if (!wowned) PF_RULES_WUNLOCK(); } #endif pf_counter_u64_deinit(&rule->evaluations); for (int i = 0; i < 2; i++) { pf_counter_u64_deinit(&rule->packets[i]); pf_counter_u64_deinit(&rule->bytes[i]); } counter_u64_free(rule->states_cur); counter_u64_free(rule->states_tot); counter_u64_free(rule->src_nodes); uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); mtx_destroy(&rule->rpool.mtx); free(rule, M_PFRULE); } static void pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, struct pf_pooladdr *pool) { bzero(pool, sizeof(*pool)); bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); } static int pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, struct pf_kpooladdr *kpool) { int ret; bzero(kpool, sizeof(*kpool)); bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); ret = pf_user_strcpy(kpool->ifname, pool->ifname, sizeof(kpool->ifname)); return (ret); } static void pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) { _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); kpool->tblidx = pool->tblidx; kpool->proxy_port[0] = pool->proxy_port[0]; kpool->proxy_port[1] = pool->proxy_port[1]; kpool->opts = pool->opts; } static int pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) { int ret; #ifndef INET if (rule->af == AF_INET) { return (EAFNOSUPPORT); } #endif /* INET */ #ifndef INET6 if (rule->af == AF_INET6) { return (EAFNOSUPPORT); } #endif /* INET6 */ ret = pf_check_rule_addr(&rule->src); if (ret != 0) return (ret); ret = pf_check_rule_addr(&rule->dst); if (ret != 0) return (ret); bcopy(&rule->src, &krule->src, sizeof(rule->src)); bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->tagname, rule->tagname, sizeof(rule->tagname)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, sizeof(rule->match_tagname)); if (ret != 0) return (ret); ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, sizeof(rule->overload_tblname)); if (ret != 0) return (ret); pf_pool_to_kpool(&rule->rpool, &krule->rpool); /* Don't allow userspace to set evaluations, packets or bytes. */ /* kif, anchor, overload_tbl are not copied over. */ krule->os_fingerprint = rule->os_fingerprint; krule->rtableid = rule->rtableid; bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); krule->max_states = rule->max_states; krule->max_src_nodes = rule->max_src_nodes; krule->max_src_states = rule->max_src_states; krule->max_src_conn = rule->max_src_conn; krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; krule->qid = rule->qid; krule->pqid = rule->pqid; krule->nr = rule->nr; krule->prob = rule->prob; krule->cuid = rule->cuid; krule->cpid = rule->cpid; krule->return_icmp = rule->return_icmp; krule->return_icmp6 = rule->return_icmp6; krule->max_mss = rule->max_mss; krule->tag = rule->tag; krule->match_tag = rule->match_tag; krule->scrub_flags = rule->scrub_flags; bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); krule->rule_flag = rule->rule_flag; krule->action = rule->action; krule->direction = rule->direction; krule->log = rule->log; krule->logif = rule->logif; krule->quick = rule->quick; krule->ifnot = rule->ifnot; krule->match_tag_not = rule->match_tag_not; krule->natpass = rule->natpass; krule->keep_state = rule->keep_state; krule->af = rule->af; krule->proto = rule->proto; krule->type = rule->type; krule->code = rule->code; krule->flags = rule->flags; krule->flagset = rule->flagset; krule->min_ttl = rule->min_ttl; krule->allow_opts = rule->allow_opts; krule->rt = rule->rt; krule->return_ttl = rule->return_ttl; krule->tos = rule->tos; krule->set_tos = rule->set_tos; krule->flush = rule->flush; krule->prio = rule->prio; krule->set_prio[0] = rule->set_prio[0]; krule->set_prio[1] = rule->set_prio[1]; bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); return (0); } int pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, uint32_t pool_ticket, const char *anchor, const char *anchor_call, uid_t uid, pid_t pid) { struct pf_kruleset *ruleset; struct pf_krule *tail; struct pf_kpooladdr *pa; struct pfi_kkif *kif = NULL; int rs_num; int error = 0; if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { error = EINVAL; goto errout_unlocked; } #define ERROUT(x) ERROUT_FUNCTION(errout, x) if (rule->ifname[0]) kif = pf_kkif_create(M_WAITOK); pf_counter_u64_init(&rule->evaluations, M_WAITOK); for (int i = 0; i < 2; i++) { pf_counter_u64_init(&rule->packets[i], M_WAITOK); pf_counter_u64_init(&rule->bytes[i], M_WAITOK); } rule->states_cur = counter_u64_alloc(M_WAITOK); rule->states_tot = counter_u64_alloc(M_WAITOK); rule->src_nodes = counter_u64_alloc(M_WAITOK); rule->cuid = uid; rule->cpid = pid; TAILQ_INIT(&rule->rpool.list); PF_CONFIG_LOCK(); PF_RULES_WLOCK(); #ifdef PF_WANT_32_TO_64_COUNTER LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); MPASS(!rule->allrulelinked); rule->allrulelinked = true; V_pf_allrulecount++; #endif ruleset = pf_find_kruleset(anchor); if (ruleset == NULL) ERROUT(EINVAL); rs_num = pf_get_ruleset_number(rule->action); if (rs_num >= PF_RULESET_MAX) ERROUT(EINVAL); if (ticket != ruleset->rules[rs_num].inactive.ticket) { DPFPRINTF(PF_DEBUG_MISC, ("ticket: %d != [%d]%d\n", ticket, rs_num, ruleset->rules[rs_num].inactive.ticket)); ERROUT(EBUSY); } if (pool_ticket != V_ticket_pabuf) { DPFPRINTF(PF_DEBUG_MISC, ("pool_ticket: %d != %d\n", pool_ticket, V_ticket_pabuf)); ERROUT(EBUSY); } /* * XXXMJG hack: there is no mechanism to ensure they started the * transaction. Ticket checked above may happen to match by accident, * even if nobody called DIOCXBEGIN, let alone this process. * Partially work around it by checking if the RB tree got allocated, * see pf_begin_rules. */ if (ruleset->rules[rs_num].inactive.tree == NULL) { ERROUT(EINVAL); } tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, pf_krulequeue); if (tail) rule->nr = tail->nr + 1; else rule->nr = 0; if (rule->ifname[0]) { rule->kif = pfi_kkif_attach(kif, rule->ifname); kif = NULL; pfi_kkif_ref(rule->kif); } else rule->kif = NULL; if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) error = EBUSY; #ifdef ALTQ /* set queue IDs */ if (rule->qname[0] != 0) { if ((rule->qid = pf_qname2qid(rule->qname)) == 0) error = EBUSY; else if (rule->pqname[0] != 0) { if ((rule->pqid = pf_qname2qid(rule->pqname)) == 0) error = EBUSY; } else rule->pqid = rule->qid; } #endif if (rule->tagname[0]) if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) error = EBUSY; if (rule->match_tagname[0]) if ((rule->match_tag = pf_tagname2tag(rule->match_tagname)) == 0) error = EBUSY; if (rule->rt && !rule->direction) error = EINVAL; if (!rule->log) rule->logif = 0; if (rule->logif >= PFLOGIFS_MAX) error = EINVAL; if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) error = ENOMEM; if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) error = ENOMEM; if (pf_kanchor_setup(rule, ruleset, anchor_call)) error = EINVAL; if (rule->scrub_flags & PFSTATE_SETPRIO && (rule->set_prio[0] > PF_PRIO_MAX || rule->set_prio[1] > PF_PRIO_MAX)) error = EINVAL; TAILQ_FOREACH(pa, &V_pf_pabuf, entries) if (pa->addr.type == PF_ADDR_TABLE) { pa->addr.p.tbl = pfr_attach_table(ruleset, pa->addr.v.tblname); if (pa->addr.p.tbl == NULL) error = ENOMEM; } rule->overload_tbl = NULL; if (rule->overload_tblname[0]) { if ((rule->overload_tbl = pfr_attach_table(ruleset, rule->overload_tblname)) == NULL) error = EINVAL; else rule->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; } pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || (rule->action == PF_BINAT)) && rule->anchor == NULL) || (rule->rt > PF_NOPFROUTE)) && (TAILQ_FIRST(&rule->rpool.list) == NULL)) error = EINVAL; if (error) { pf_free_rule(rule); rule = NULL; ERROUT(error); } rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, rule, entries); ruleset->rules[rs_num].inactive.rcount++; PF_RULES_WUNLOCK(); pf_hash_rule(rule); if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { PF_RULES_WLOCK(); TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); ruleset->rules[rs_num].inactive.rcount--; pf_free_rule(rule); rule = NULL; ERROUT(EEXIST); } PF_CONFIG_UNLOCK(); return (0); #undef ERROUT errout: PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); errout_unlocked: pf_kkif_free(kif); pf_krule_free(rule); return (error); } static bool pf_label_match(const struct pf_krule *rule, const char *label) { int i = 0; while (*rule->label[i]) { if (strcmp(rule->label[i], label) == 0) return (true); i++; } return (false); } static unsigned int pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) { struct pf_kstate *s; int more = 0; s = pf_find_state_all(key, dir, &more); if (s == NULL) return (0); if (more) { PF_STATE_UNLOCK(s); return (0); } pf_unlink_state(s); return (1); } static int pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) { struct pf_kstate *s; struct pf_state_key *sk; struct pf_addr *srcaddr, *dstaddr; struct pf_state_key_cmp match_key; int idx, killed = 0; unsigned int dir; u_int16_t srcport, dstport; struct pfi_kkif *kif; relock_DIOCKILLSTATES: PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { /* For floating states look at the original kif. */ kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; - sk = s->key[PF_SK_WIRE]; + sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE]; if (s->direction == PF_OUT) { srcaddr = &sk->addr[1]; dstaddr = &sk->addr[0]; srcport = sk->port[1]; dstport = sk->port[0]; } else { srcaddr = &sk->addr[0]; dstaddr = &sk->addr[1]; srcport = sk->port[0]; dstport = sk->port[1]; } if (psk->psk_af && sk->af != psk->psk_af) continue; if (psk->psk_proto && psk->psk_proto != sk->proto) continue; if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) continue; if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) continue; if (! PF_MATCHA(psk->psk_rt_addr.neg, &psk->psk_rt_addr.addr.v.a.addr, &psk->psk_rt_addr.addr.v.a.mask, &s->rt_addr, sk->af)) continue; if (psk->psk_src.port_op != 0 && ! pf_match_port(psk->psk_src.port_op, psk->psk_src.port[0], psk->psk_src.port[1], srcport)) continue; if (psk->psk_dst.port_op != 0 && ! pf_match_port(psk->psk_dst.port_op, psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) continue; if (psk->psk_label[0] && ! pf_label_match(s->rule.ptr, psk->psk_label)) continue; if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, kif->pfik_name)) continue; if (psk->psk_kill_match) { /* Create the key to find matching states, with lock * held. */ bzero(&match_key, sizeof(match_key)); if (s->direction == PF_OUT) { dir = PF_IN; - idx = PF_SK_STACK; + idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK; } else { dir = PF_OUT; - idx = PF_SK_WIRE; + idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE; } match_key.af = s->key[idx]->af; match_key.proto = s->key[idx]->proto; PF_ACPY(&match_key.addr[0], &s->key[idx]->addr[1], match_key.af); match_key.port[0] = s->key[idx]->port[1]; PF_ACPY(&match_key.addr[1], &s->key[idx]->addr[0], match_key.af); match_key.port[1] = s->key[idx]->port[0]; } pf_unlink_state(s); killed++; if (psk->psk_kill_match) killed += pf_kill_matching_state(&match_key, dir); goto relock_DIOCKILLSTATES; } PF_HASHROW_UNLOCK(ih); return (killed); } int pf_start(void) { int error = 0; sx_xlock(&V_pf_ioctl_lock); if (V_pf_status.running) error = EEXIST; else { hook_pf(); if (! TAILQ_EMPTY(V_pf_keth->active.rules)) hook_pf_eth(); V_pf_status.running = 1; V_pf_status.since = time_second; new_unrhdr64(&V_pf_stateid, time_second); DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); } sx_xunlock(&V_pf_ioctl_lock); return (error); } int pf_stop(void) { int error = 0; sx_xlock(&V_pf_ioctl_lock); if (!V_pf_status.running) error = ENOENT; else { V_pf_status.running = 0; dehook_pf(); dehook_pf_eth(); V_pf_status.since = time_second; DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); } sx_xunlock(&V_pf_ioctl_lock); return (error); } static int pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) { int error = 0; PF_RULES_RLOCK_TRACKER; #define ERROUT_IOCTL(target, x) \ do { \ error = (x); \ SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ goto target; \ } while (0) /* XXX keep in sync with switch() below */ if (securelevel_gt(td->td_ucred, 2)) switch (cmd) { case DIOCGETRULES: case DIOCGETRULENV: case DIOCGETADDRS: case DIOCGETADDR: case DIOCGETSTATE: case DIOCGETSTATENV: case DIOCSETSTATUSIF: case DIOCGETSTATUSNV: case DIOCCLRSTATUS: case DIOCNATLOOK: case DIOCSETDEBUG: #ifdef COMPAT_FREEBSD14 case DIOCGETSTATES: case DIOCGETSTATESV2: #endif case DIOCGETTIMEOUT: case DIOCCLRRULECTRS: case DIOCGETLIMIT: case DIOCGETALTQSV0: case DIOCGETALTQSV1: case DIOCGETALTQV0: case DIOCGETALTQV1: case DIOCGETQSTATSV0: case DIOCGETQSTATSV1: case DIOCGETRULESETS: case DIOCGETRULESET: case DIOCRGETTABLES: case DIOCRGETTSTATS: case DIOCRCLRTSTATS: case DIOCRCLRADDRS: case DIOCRADDADDRS: case DIOCRDELADDRS: case DIOCRSETADDRS: case DIOCRGETADDRS: case DIOCRGETASTATS: case DIOCRCLRASTATS: case DIOCRTSTADDRS: case DIOCOSFPGET: case DIOCGETSRCNODES: case DIOCCLRSRCNODES: case DIOCGETSYNCOOKIES: case DIOCIGETIFACES: case DIOCGIFSPEEDV0: case DIOCGIFSPEEDV1: case DIOCSETIFFLAG: case DIOCCLRIFFLAG: case DIOCGETETHRULES: case DIOCGETETHRULE: case DIOCGETETHRULESETS: case DIOCGETETHRULESET: break; case DIOCRCLRTABLES: case DIOCRADDTABLES: case DIOCRDELTABLES: case DIOCRSETTFLAGS: if (((struct pfioc_table *)addr)->pfrio_flags & PFR_FLAG_DUMMY) break; /* dummy operation ok */ return (EPERM); default: return (EPERM); } if (!(flags & FWRITE)) switch (cmd) { case DIOCGETRULES: case DIOCGETADDRS: case DIOCGETADDR: case DIOCGETSTATE: case DIOCGETSTATENV: case DIOCGETSTATUSNV: #ifdef COMPAT_FREEBSD14 case DIOCGETSTATES: case DIOCGETSTATESV2: #endif case DIOCGETTIMEOUT: case DIOCGETLIMIT: case DIOCGETALTQSV0: case DIOCGETALTQSV1: case DIOCGETALTQV0: case DIOCGETALTQV1: case DIOCGETQSTATSV0: case DIOCGETQSTATSV1: case DIOCGETRULESETS: case DIOCGETRULESET: case DIOCNATLOOK: case DIOCRGETTABLES: case DIOCRGETTSTATS: case DIOCRGETADDRS: case DIOCRGETASTATS: case DIOCRTSTADDRS: case DIOCOSFPGET: case DIOCGETSRCNODES: case DIOCGETSYNCOOKIES: case DIOCIGETIFACES: case DIOCGIFSPEEDV1: case DIOCGIFSPEEDV0: case DIOCGETRULENV: case DIOCGETETHRULES: case DIOCGETETHRULE: case DIOCGETETHRULESETS: case DIOCGETETHRULESET: break; case DIOCRCLRTABLES: case DIOCRADDTABLES: case DIOCRDELTABLES: case DIOCRCLRTSTATS: case DIOCRCLRADDRS: case DIOCRADDADDRS: case DIOCRDELADDRS: case DIOCRSETADDRS: case DIOCRSETTFLAGS: if (((struct pfioc_table *)addr)->pfrio_flags & PFR_FLAG_DUMMY) { flags |= FWRITE; /* need write lock for dummy */ break; /* dummy operation ok */ } return (EACCES); default: return (EACCES); } CURVNET_SET(TD_TO_VNET(td)); switch (cmd) { #ifdef COMPAT_FREEBSD14 case DIOCSTART: error = pf_start(); break; case DIOCSTOP: error = pf_stop(); break; #endif case DIOCGETETHRULES: { struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl; void *packed; struct pf_keth_rule *tail; struct pf_keth_ruleset *rs; u_int32_t ticket, nr; const char *anchor = ""; nvl = NULL; packed = NULL; #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); /* Copy the request in */ packed = malloc(nv->len, M_NVLIST, M_WAITOK); if (packed == NULL) ERROUT(ENOMEM); error = copyin(nv->data, packed, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(packed, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_string(nvl, "anchor")) ERROUT(EBADMSG); anchor = nvlist_get_string(nvl, "anchor"); rs = pf_find_keth_ruleset(anchor); nvlist_destroy(nvl); nvl = NULL; free(packed, M_NVLIST); packed = NULL; if (rs == NULL) ERROUT(ENOENT); /* Reply */ nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); PF_RULES_RLOCK(); ticket = rs->active.ticket; tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); if (tail) nr = tail->nr + 1; else nr = 0; PF_RULES_RUNLOCK(); nvlist_add_number(nvl, "ticket", ticket); nvlist_add_number(nvl, "nr", nr); packed = nvlist_pack(nvl, &nv->len); if (packed == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(packed, nv->data, nv->len); #undef ERROUT DIOCGETETHRULES_error: free(packed, M_NVLIST); nvlist_destroy(nvl); break; } case DIOCGETETHRULE: { struct epoch_tracker et; struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl = NULL; void *nvlpacked = NULL; struct pf_keth_rule *rule = NULL; struct pf_keth_ruleset *rs; u_int32_t ticket, nr; bool clear = false; const char *anchor; #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "ticket")) ERROUT(EBADMSG); ticket = nvlist_get_number(nvl, "ticket"); if (! nvlist_exists_string(nvl, "anchor")) ERROUT(EBADMSG); anchor = nvlist_get_string(nvl, "anchor"); if (nvlist_exists_bool(nvl, "clear")) clear = nvlist_get_bool(nvl, "clear"); if (clear && !(flags & FWRITE)) ERROUT(EACCES); if (! nvlist_exists_number(nvl, "nr")) ERROUT(EBADMSG); nr = nvlist_get_number(nvl, "nr"); PF_RULES_RLOCK(); rs = pf_find_keth_ruleset(anchor); if (rs == NULL) { PF_RULES_RUNLOCK(); ERROUT(ENOENT); } if (ticket != rs->active.ticket) { PF_RULES_RUNLOCK(); ERROUT(EBUSY); } nvlist_destroy(nvl); nvl = NULL; free(nvlpacked, M_NVLIST); nvlpacked = NULL; rule = TAILQ_FIRST(rs->active.rules); while ((rule != NULL) && (rule->nr != nr)) rule = TAILQ_NEXT(rule, entries); if (rule == NULL) { PF_RULES_RUNLOCK(); ERROUT(ENOENT); } /* Make sure rule can't go away. */ NET_EPOCH_ENTER(et); PF_RULES_RUNLOCK(); nvl = pf_keth_rule_to_nveth_rule(rule); if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) ERROUT(EBUSY); NET_EPOCH_EXIT(et); if (nvl == NULL) ERROUT(ENOMEM); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); if (error == 0 && clear) { counter_u64_zero(rule->evaluations); for (int i = 0; i < 2; i++) { counter_u64_zero(rule->packets[i]); counter_u64_zero(rule->bytes[i]); } } #undef ERROUT DIOCGETETHRULE_error: free(nvlpacked, M_NVLIST); nvlist_destroy(nvl); break; } case DIOCADDETHRULE: { struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl = NULL; void *nvlpacked = NULL; struct pf_keth_rule *rule = NULL, *tail = NULL; struct pf_keth_ruleset *ruleset = NULL; struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL; const char *anchor = "", *anchor_call = ""; #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "ticket")) ERROUT(EBADMSG); if (nvlist_exists_string(nvl, "anchor")) anchor = nvlist_get_string(nvl, "anchor"); if (nvlist_exists_string(nvl, "anchor_call")) anchor_call = nvlist_get_string(nvl, "anchor_call"); ruleset = pf_find_keth_ruleset(anchor); if (ruleset == NULL) ERROUT(EINVAL); if (nvlist_get_number(nvl, "ticket") != ruleset->inactive.ticket) { DPFPRINTF(PF_DEBUG_MISC, ("ticket: %d != %d\n", (u_int32_t)nvlist_get_number(nvl, "ticket"), ruleset->inactive.ticket)); ERROUT(EBUSY); } rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); if (rule == NULL) ERROUT(ENOMEM); rule->timestamp = NULL; error = pf_nveth_rule_to_keth_rule(nvl, rule); if (error != 0) ERROUT(error); if (rule->ifname[0]) kif = pf_kkif_create(M_WAITOK); if (rule->bridge_to_name[0]) bridge_to_kif = pf_kkif_create(M_WAITOK); rule->evaluations = counter_u64_alloc(M_WAITOK); for (int i = 0; i < 2; i++) { rule->packets[i] = counter_u64_alloc(M_WAITOK); rule->bytes[i] = counter_u64_alloc(M_WAITOK); } rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, M_WAITOK | M_ZERO); PF_RULES_WLOCK(); if (rule->ifname[0]) { rule->kif = pfi_kkif_attach(kif, rule->ifname); pfi_kkif_ref(rule->kif); } else rule->kif = NULL; if (rule->bridge_to_name[0]) { rule->bridge_to = pfi_kkif_attach(bridge_to_kif, rule->bridge_to_name); pfi_kkif_ref(rule->bridge_to); } else rule->bridge_to = NULL; #ifdef ALTQ /* set queue IDs */ if (rule->qname[0] != 0) { if ((rule->qid = pf_qname2qid(rule->qname)) == 0) error = EBUSY; else rule->qid = rule->qid; } #endif if (rule->tagname[0]) if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) error = EBUSY; if (rule->match_tagname[0]) if ((rule->match_tag = pf_tagname2tag( rule->match_tagname)) == 0) error = EBUSY; if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); if (error) { pf_free_eth_rule(rule); PF_RULES_WUNLOCK(); ERROUT(error); } if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { pf_free_eth_rule(rule); PF_RULES_WUNLOCK(); ERROUT(EINVAL); } tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); if (tail) rule->nr = tail->nr + 1; else rule->nr = 0; TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); PF_RULES_WUNLOCK(); #undef ERROUT DIOCADDETHRULE_error: nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); break; } case DIOCGETETHRULESETS: { struct epoch_tracker et; struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl = NULL; void *nvlpacked = NULL; struct pf_keth_ruleset *ruleset; struct pf_keth_anchor *anchor; int nr = 0; #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_string(nvl, "path")) ERROUT(EBADMSG); NET_EPOCH_ENTER(et); if ((ruleset = pf_find_keth_ruleset( nvlist_get_string(nvl, "path"))) == NULL) { NET_EPOCH_EXIT(et); ERROUT(ENOENT); } if (ruleset->anchor == NULL) { RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) if (anchor->parent == NULL) nr++; } else { RB_FOREACH(anchor, pf_keth_anchor_node, &ruleset->anchor->children) nr++; } NET_EPOCH_EXIT(et); nvlist_destroy(nvl); nvl = NULL; free(nvlpacked, M_NVLIST); nvlpacked = NULL; nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); nvlist_add_number(nvl, "nr", nr); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); #undef ERROUT DIOCGETETHRULESETS_error: free(nvlpacked, M_NVLIST); nvlist_destroy(nvl); break; } case DIOCGETETHRULESET: { struct epoch_tracker et; struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl = NULL; void *nvlpacked = NULL; struct pf_keth_ruleset *ruleset; struct pf_keth_anchor *anchor; int nr = 0, req_nr = 0; bool found = false; #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_string(nvl, "path")) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "nr")) ERROUT(EBADMSG); req_nr = nvlist_get_number(nvl, "nr"); NET_EPOCH_ENTER(et); if ((ruleset = pf_find_keth_ruleset( nvlist_get_string(nvl, "path"))) == NULL) { NET_EPOCH_EXIT(et); ERROUT(ENOENT); } nvlist_destroy(nvl); nvl = NULL; free(nvlpacked, M_NVLIST); nvlpacked = NULL; nvl = nvlist_create(0); if (nvl == NULL) { NET_EPOCH_EXIT(et); ERROUT(ENOMEM); } if (ruleset->anchor == NULL) { RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) { if (anchor->parent == NULL && nr++ == req_nr) { found = true; break; } } } else { RB_FOREACH(anchor, pf_keth_anchor_node, &ruleset->anchor->children) { if (nr++ == req_nr) { found = true; break; } } } NET_EPOCH_EXIT(et); if (found) { nvlist_add_number(nvl, "nr", nr); nvlist_add_string(nvl, "name", anchor->name); if (ruleset->anchor) nvlist_add_string(nvl, "path", ruleset->anchor->path); else nvlist_add_string(nvl, "path", ""); } else { ERROUT(EBUSY); } nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); #undef ERROUT DIOCGETETHRULESET_error: free(nvlpacked, M_NVLIST); nvlist_destroy(nvl); break; } case DIOCADDRULENV: { struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvl = NULL; void *nvlpacked = NULL; struct pf_krule *rule = NULL; const char *anchor = "", *anchor_call = ""; uint32_t ticket = 0, pool_ticket = 0; #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "ticket")) ERROUT(EINVAL); ticket = nvlist_get_number(nvl, "ticket"); if (! nvlist_exists_number(nvl, "pool_ticket")) ERROUT(EINVAL); pool_ticket = nvlist_get_number(nvl, "pool_ticket"); if (! nvlist_exists_nvlist(nvl, "rule")) ERROUT(EINVAL); rule = pf_krule_alloc(); error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), rule); if (error) ERROUT(error); if (nvlist_exists_string(nvl, "anchor")) anchor = nvlist_get_string(nvl, "anchor"); if (nvlist_exists_string(nvl, "anchor_call")) anchor_call = nvlist_get_string(nvl, "anchor_call"); if ((error = nvlist_error(nvl))) ERROUT(error); /* Frees rule on error */ error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, anchor_call, td->td_ucred->cr_ruid, td->td_proc ? td->td_proc->p_pid : 0); nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); break; #undef ERROUT DIOCADDRULENV_error: pf_krule_free(rule); nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); break; } case DIOCADDRULE: { struct pfioc_rule *pr = (struct pfioc_rule *)addr; struct pf_krule *rule; rule = pf_krule_alloc(); error = pf_rule_to_krule(&pr->rule, rule); if (error != 0) { pf_krule_free(rule); break; } pr->anchor[sizeof(pr->anchor) - 1] = 0; /* Frees rule on error */ error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid, td->td_proc ? td->td_proc->p_pid : 0); break; } case DIOCGETRULES: { struct pfioc_rule *pr = (struct pfioc_rule *)addr; struct pf_kruleset *ruleset; struct pf_krule *tail; int rs_num; pr->anchor[sizeof(pr->anchor) - 1] = 0; PF_RULES_WLOCK(); ruleset = pf_find_kruleset(pr->anchor); if (ruleset == NULL) { PF_RULES_WUNLOCK(); error = EINVAL; break; } rs_num = pf_get_ruleset_number(pr->rule.action); if (rs_num >= PF_RULESET_MAX) { PF_RULES_WUNLOCK(); error = EINVAL; break; } tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, pf_krulequeue); if (tail) pr->nr = tail->nr + 1; else pr->nr = 0; pr->ticket = ruleset->rules[rs_num].active.ticket; PF_RULES_WUNLOCK(); break; } case DIOCGETRULENV: { struct pfioc_nv *nv = (struct pfioc_nv *)addr; nvlist_t *nvrule = NULL; nvlist_t *nvl = NULL; struct pf_kruleset *ruleset; struct pf_krule *rule; void *nvlpacked = NULL; int rs_num, nr; bool clear_counter = false; #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); /* Copy the request in */ nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_string(nvl, "anchor")) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "ruleset")) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "ticket")) ERROUT(EBADMSG); if (! nvlist_exists_number(nvl, "nr")) ERROUT(EBADMSG); if (nvlist_exists_bool(nvl, "clear_counter")) clear_counter = nvlist_get_bool(nvl, "clear_counter"); if (clear_counter && !(flags & FWRITE)) ERROUT(EACCES); nr = nvlist_get_number(nvl, "nr"); PF_RULES_WLOCK(); ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); if (ruleset == NULL) { PF_RULES_WUNLOCK(); ERROUT(ENOENT); } rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); if (rs_num >= PF_RULESET_MAX) { PF_RULES_WUNLOCK(); ERROUT(EINVAL); } if (nvlist_get_number(nvl, "ticket") != ruleset->rules[rs_num].active.ticket) { PF_RULES_WUNLOCK(); ERROUT(EBUSY); } if ((error = nvlist_error(nvl))) { PF_RULES_WUNLOCK(); ERROUT(error); } rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); while ((rule != NULL) && (rule->nr != nr)) rule = TAILQ_NEXT(rule, entries); if (rule == NULL) { PF_RULES_WUNLOCK(); ERROUT(EBUSY); } nvrule = pf_krule_to_nvrule(rule); nvlist_destroy(nvl); nvl = nvlist_create(0); if (nvl == NULL) { PF_RULES_WUNLOCK(); ERROUT(ENOMEM); } nvlist_add_number(nvl, "nr", nr); nvlist_add_nvlist(nvl, "rule", nvrule); nvlist_destroy(nvrule); nvrule = NULL; if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { PF_RULES_WUNLOCK(); ERROUT(EBUSY); } free(nvlpacked, M_NVLIST); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) { PF_RULES_WUNLOCK(); ERROUT(ENOMEM); } if (nv->size == 0) { PF_RULES_WUNLOCK(); ERROUT(0); } else if (nv->size < nv->len) { PF_RULES_WUNLOCK(); ERROUT(ENOSPC); } if (clear_counter) { pf_counter_u64_zero(&rule->evaluations); for (int i = 0; i < 2; i++) { pf_counter_u64_zero(&rule->packets[i]); pf_counter_u64_zero(&rule->bytes[i]); } counter_u64_zero(rule->states_tot); } PF_RULES_WUNLOCK(); error = copyout(nvlpacked, nv->data, nv->len); #undef ERROUT DIOCGETRULENV_error: free(nvlpacked, M_NVLIST); nvlist_destroy(nvrule); nvlist_destroy(nvl); break; } case DIOCCHANGERULE: { struct pfioc_rule *pcr = (struct pfioc_rule *)addr; struct pf_kruleset *ruleset; struct pf_krule *oldrule = NULL, *newrule = NULL; struct pfi_kkif *kif = NULL; struct pf_kpooladdr *pa; u_int32_t nr = 0; int rs_num; pcr->anchor[sizeof(pcr->anchor) - 1] = 0; if (pcr->action < PF_CHANGE_ADD_HEAD || pcr->action > PF_CHANGE_GET_TICKET) { error = EINVAL; break; } if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { error = EINVAL; break; } if (pcr->action != PF_CHANGE_REMOVE) { newrule = pf_krule_alloc(); error = pf_rule_to_krule(&pcr->rule, newrule); if (error != 0) { pf_krule_free(newrule); break; } if (newrule->ifname[0]) kif = pf_kkif_create(M_WAITOK); pf_counter_u64_init(&newrule->evaluations, M_WAITOK); for (int i = 0; i < 2; i++) { pf_counter_u64_init(&newrule->packets[i], M_WAITOK); pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); } newrule->states_cur = counter_u64_alloc(M_WAITOK); newrule->states_tot = counter_u64_alloc(M_WAITOK); newrule->src_nodes = counter_u64_alloc(M_WAITOK); newrule->cuid = td->td_ucred->cr_ruid; newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; TAILQ_INIT(&newrule->rpool.list); } #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) PF_CONFIG_LOCK(); PF_RULES_WLOCK(); #ifdef PF_WANT_32_TO_64_COUNTER if (newrule != NULL) { LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); newrule->allrulelinked = true; V_pf_allrulecount++; } #endif if (!(pcr->action == PF_CHANGE_REMOVE || pcr->action == PF_CHANGE_GET_TICKET) && pcr->pool_ticket != V_ticket_pabuf) ERROUT(EBUSY); ruleset = pf_find_kruleset(pcr->anchor); if (ruleset == NULL) ERROUT(EINVAL); rs_num = pf_get_ruleset_number(pcr->rule.action); if (rs_num >= PF_RULESET_MAX) ERROUT(EINVAL); /* * XXXMJG: there is no guarantee that the ruleset was * created by the usual route of calling DIOCXBEGIN. * As a result it is possible the rule tree will not * be allocated yet. Hack around it by doing it here. * Note it is fine to let the tree persist in case of * error as it will be freed down the road on future * updates (if need be). */ if (ruleset->rules[rs_num].active.tree == NULL) { ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); if (ruleset->rules[rs_num].active.tree == NULL) { ERROUT(ENOMEM); } } if (pcr->action == PF_CHANGE_GET_TICKET) { pcr->ticket = ++ruleset->rules[rs_num].active.ticket; ERROUT(0); } else if (pcr->ticket != ruleset->rules[rs_num].active.ticket) ERROUT(EINVAL); if (pcr->action != PF_CHANGE_REMOVE) { if (newrule->ifname[0]) { newrule->kif = pfi_kkif_attach(kif, newrule->ifname); kif = NULL; pfi_kkif_ref(newrule->kif); } else newrule->kif = NULL; if (newrule->rtableid > 0 && newrule->rtableid >= rt_numfibs) error = EBUSY; #ifdef ALTQ /* set queue IDs */ if (newrule->qname[0] != 0) { if ((newrule->qid = pf_qname2qid(newrule->qname)) == 0) error = EBUSY; else if (newrule->pqname[0] != 0) { if ((newrule->pqid = pf_qname2qid(newrule->pqname)) == 0) error = EBUSY; } else newrule->pqid = newrule->qid; } #endif /* ALTQ */ if (newrule->tagname[0]) if ((newrule->tag = pf_tagname2tag(newrule->tagname)) == 0) error = EBUSY; if (newrule->match_tagname[0]) if ((newrule->match_tag = pf_tagname2tag( newrule->match_tagname)) == 0) error = EBUSY; if (newrule->rt && !newrule->direction) error = EINVAL; if (!newrule->log) newrule->logif = 0; if (newrule->logif >= PFLOGIFS_MAX) error = EINVAL; if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) error = ENOMEM; if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) error = ENOMEM; if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) error = EINVAL; TAILQ_FOREACH(pa, &V_pf_pabuf, entries) if (pa->addr.type == PF_ADDR_TABLE) { pa->addr.p.tbl = pfr_attach_table(ruleset, pa->addr.v.tblname); if (pa->addr.p.tbl == NULL) error = ENOMEM; } newrule->overload_tbl = NULL; if (newrule->overload_tblname[0]) { if ((newrule->overload_tbl = pfr_attach_table( ruleset, newrule->overload_tblname)) == NULL) error = EINVAL; else newrule->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; } pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); if (((((newrule->action == PF_NAT) || (newrule->action == PF_RDR) || (newrule->action == PF_BINAT) || (newrule->rt > PF_NOPFROUTE)) && !newrule->anchor)) && (TAILQ_FIRST(&newrule->rpool.list) == NULL)) error = EINVAL; if (error) { pf_free_rule(newrule); PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); break; } newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); } pf_empty_kpool(&V_pf_pabuf); if (pcr->action == PF_CHANGE_ADD_HEAD) oldrule = TAILQ_FIRST( ruleset->rules[rs_num].active.ptr); else if (pcr->action == PF_CHANGE_ADD_TAIL) oldrule = TAILQ_LAST( ruleset->rules[rs_num].active.ptr, pf_krulequeue); else { oldrule = TAILQ_FIRST( ruleset->rules[rs_num].active.ptr); while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) oldrule = TAILQ_NEXT(oldrule, entries); if (oldrule == NULL) { if (newrule != NULL) pf_free_rule(newrule); PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); error = EINVAL; break; } } if (pcr->action == PF_CHANGE_REMOVE) { pf_unlink_rule(ruleset->rules[rs_num].active.ptr, oldrule); RB_REMOVE(pf_krule_global, ruleset->rules[rs_num].active.tree, oldrule); ruleset->rules[rs_num].active.rcount--; } else { pf_hash_rule(newrule); if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].active.tree, newrule) != NULL) { pf_free_rule(newrule); PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); error = EEXIST; break; } if (oldrule == NULL) TAILQ_INSERT_TAIL( ruleset->rules[rs_num].active.ptr, newrule, entries); else if (pcr->action == PF_CHANGE_ADD_HEAD || pcr->action == PF_CHANGE_ADD_BEFORE) TAILQ_INSERT_BEFORE(oldrule, newrule, entries); else TAILQ_INSERT_AFTER( ruleset->rules[rs_num].active.ptr, oldrule, newrule, entries); ruleset->rules[rs_num].active.rcount++; } nr = 0; TAILQ_FOREACH(oldrule, ruleset->rules[rs_num].active.ptr, entries) oldrule->nr = nr++; ruleset->rules[rs_num].active.ticket++; pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); pf_remove_if_empty_kruleset(ruleset); PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); break; #undef ERROUT DIOCCHANGERULE_error: PF_RULES_WUNLOCK(); PF_CONFIG_UNLOCK(); pf_krule_free(newrule); pf_kkif_free(kif); break; } case DIOCCLRSTATESNV: { error = pf_clearstates_nv((struct pfioc_nv *)addr); break; } case DIOCKILLSTATESNV: { error = pf_killstates_nv((struct pfioc_nv *)addr); break; } case DIOCADDSTATE: { struct pfioc_state *ps = (struct pfioc_state *)addr; struct pfsync_state_1301 *sp = &ps->state; if (sp->timeout >= PFTM_MAX) { error = EINVAL; break; } if (V_pfsync_state_import_ptr != NULL) { PF_RULES_RLOCK(); error = V_pfsync_state_import_ptr( (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL, PFSYNC_MSG_VERSION_1301); PF_RULES_RUNLOCK(); } else error = EOPNOTSUPP; break; } case DIOCGETSTATE: { struct pfioc_state *ps = (struct pfioc_state *)addr; struct pf_kstate *s; s = pf_find_state_byid(ps->state.id, ps->state.creatorid); if (s == NULL) { error = ENOENT; break; } pfsync_state_export((union pfsync_state_union*)&ps->state, s, PFSYNC_MSG_VERSION_1301); PF_STATE_UNLOCK(s); break; } case DIOCGETSTATENV: { error = pf_getstate((struct pfioc_nv *)addr); break; } #ifdef COMPAT_FREEBSD14 case DIOCGETSTATES: { struct pfioc_states *ps = (struct pfioc_states *)addr; struct pf_kstate *s; struct pfsync_state_1301 *pstore, *p; int i, nr; size_t slice_count = 16, count; void *out; if (ps->ps_len <= 0) { nr = uma_zone_get_cur(V_pf_state_z); ps->ps_len = sizeof(struct pfsync_state_1301) * nr; break; } out = ps->ps_states; pstore = mallocarray(slice_count, sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO); nr = 0; for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; DIOCGETSTATES_retry: p = pstore; if (LIST_EMPTY(&ih->states)) continue; PF_HASHROW_LOCK(ih); count = 0; LIST_FOREACH(s, &ih->states, entry) { if (s->timeout == PFTM_UNLINKED) continue; count++; } if (count > slice_count) { PF_HASHROW_UNLOCK(ih); free(pstore, M_TEMP); slice_count = count * 2; pstore = mallocarray(slice_count, sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO); goto DIOCGETSTATES_retry; } if ((nr+count) * sizeof(*p) > ps->ps_len) { PF_HASHROW_UNLOCK(ih); goto DIOCGETSTATES_full; } LIST_FOREACH(s, &ih->states, entry) { if (s->timeout == PFTM_UNLINKED) continue; pfsync_state_export((union pfsync_state_union*)p, s, PFSYNC_MSG_VERSION_1301); p++; nr++; } PF_HASHROW_UNLOCK(ih); error = copyout(pstore, out, sizeof(struct pfsync_state_1301) * count); if (error) break; out = ps->ps_states + nr; } DIOCGETSTATES_full: ps->ps_len = sizeof(struct pfsync_state_1301) * nr; free(pstore, M_TEMP); break; } case DIOCGETSTATESV2: { struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; struct pf_kstate *s; struct pf_state_export *pstore, *p; int i, nr; size_t slice_count = 16, count; void *out; if (ps->ps_req_version > PF_STATE_VERSION) { error = ENOTSUP; break; } if (ps->ps_len <= 0) { nr = uma_zone_get_cur(V_pf_state_z); ps->ps_len = sizeof(struct pf_state_export) * nr; break; } out = ps->ps_states; pstore = mallocarray(slice_count, sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); nr = 0; for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; DIOCGETSTATESV2_retry: p = pstore; if (LIST_EMPTY(&ih->states)) continue; PF_HASHROW_LOCK(ih); count = 0; LIST_FOREACH(s, &ih->states, entry) { if (s->timeout == PFTM_UNLINKED) continue; count++; } if (count > slice_count) { PF_HASHROW_UNLOCK(ih); free(pstore, M_TEMP); slice_count = count * 2; pstore = mallocarray(slice_count, sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); goto DIOCGETSTATESV2_retry; } if ((nr+count) * sizeof(*p) > ps->ps_len) { PF_HASHROW_UNLOCK(ih); goto DIOCGETSTATESV2_full; } LIST_FOREACH(s, &ih->states, entry) { if (s->timeout == PFTM_UNLINKED) continue; pf_state_export(p, s); p++; nr++; } PF_HASHROW_UNLOCK(ih); error = copyout(pstore, out, sizeof(struct pf_state_export) * count); if (error) break; out = ps->ps_states + nr; } DIOCGETSTATESV2_full: ps->ps_len = nr * sizeof(struct pf_state_export); free(pstore, M_TEMP); break; } #endif case DIOCGETSTATUSNV: { error = pf_getstatus((struct pfioc_nv *)addr); break; } case DIOCSETSTATUSIF: { struct pfioc_if *pi = (struct pfioc_if *)addr; if (pi->ifname[0] == 0) { bzero(V_pf_status.ifname, IFNAMSIZ); break; } PF_RULES_WLOCK(); error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); PF_RULES_WUNLOCK(); break; } case DIOCCLRSTATUS: { PF_RULES_WLOCK(); for (int i = 0; i < PFRES_MAX; i++) counter_u64_zero(V_pf_status.counters[i]); for (int i = 0; i < FCNT_MAX; i++) pf_counter_u64_zero(&V_pf_status.fcounters[i]); for (int i = 0; i < SCNT_MAX; i++) counter_u64_zero(V_pf_status.scounters[i]); for (int i = 0; i < KLCNT_MAX; i++) counter_u64_zero(V_pf_status.lcounters[i]); V_pf_status.since = time_second; if (*V_pf_status.ifname) pfi_update_status(V_pf_status.ifname, NULL); PF_RULES_WUNLOCK(); break; } case DIOCNATLOOK: { struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; struct pf_state_key *sk; struct pf_kstate *state; struct pf_state_key_cmp key; int m = 0, direction = pnl->direction; int sidx, didx; /* NATLOOK src and dst are reversed, so reverse sidx/didx */ sidx = (direction == PF_IN) ? 1 : 0; didx = (direction == PF_IN) ? 0 : 1; if (!pnl->proto || PF_AZERO(&pnl->saddr, pnl->af) || PF_AZERO(&pnl->daddr, pnl->af) || ((pnl->proto == IPPROTO_TCP || pnl->proto == IPPROTO_UDP) && (!pnl->dport || !pnl->sport))) error = EINVAL; else { bzero(&key, sizeof(key)); key.af = pnl->af; key.proto = pnl->proto; PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); key.port[sidx] = pnl->sport; PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); key.port[didx] = pnl->dport; state = pf_find_state_all(&key, direction, &m); if (state == NULL) { error = ENOENT; } else { if (m > 1) { PF_STATE_UNLOCK(state); error = E2BIG; /* more than one state */ } else { sk = state->key[sidx]; PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); pnl->rsport = sk->port[sidx]; PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); pnl->rdport = sk->port[didx]; PF_STATE_UNLOCK(state); } } } break; } case DIOCSETTIMEOUT: { struct pfioc_tm *pt = (struct pfioc_tm *)addr; int old; if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || pt->seconds < 0) { error = EINVAL; break; } PF_RULES_WLOCK(); old = V_pf_default_rule.timeout[pt->timeout]; if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) pt->seconds = 1; V_pf_default_rule.timeout[pt->timeout] = pt->seconds; if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) wakeup(pf_purge_thread); pt->seconds = old; PF_RULES_WUNLOCK(); break; } case DIOCGETTIMEOUT: { struct pfioc_tm *pt = (struct pfioc_tm *)addr; if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { error = EINVAL; break; } PF_RULES_RLOCK(); pt->seconds = V_pf_default_rule.timeout[pt->timeout]; PF_RULES_RUNLOCK(); break; } case DIOCGETLIMIT: { struct pfioc_limit *pl = (struct pfioc_limit *)addr; if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { error = EINVAL; break; } PF_RULES_RLOCK(); pl->limit = V_pf_limits[pl->index].limit; PF_RULES_RUNLOCK(); break; } case DIOCSETLIMIT: { struct pfioc_limit *pl = (struct pfioc_limit *)addr; int old_limit; PF_RULES_WLOCK(); if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || V_pf_limits[pl->index].zone == NULL) { PF_RULES_WUNLOCK(); error = EINVAL; break; } uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); old_limit = V_pf_limits[pl->index].limit; V_pf_limits[pl->index].limit = pl->limit; pl->limit = old_limit; PF_RULES_WUNLOCK(); break; } case DIOCSETDEBUG: { u_int32_t *level = (u_int32_t *)addr; PF_RULES_WLOCK(); V_pf_status.debug = *level; PF_RULES_WUNLOCK(); break; } case DIOCCLRRULECTRS: { /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ struct pf_kruleset *ruleset = &pf_main_ruleset; struct pf_krule *rule; PF_RULES_WLOCK(); TAILQ_FOREACH(rule, ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { pf_counter_u64_zero(&rule->evaluations); for (int i = 0; i < 2; i++) { pf_counter_u64_zero(&rule->packets[i]); pf_counter_u64_zero(&rule->bytes[i]); } } PF_RULES_WUNLOCK(); break; } case DIOCGIFSPEEDV0: case DIOCGIFSPEEDV1: { struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; struct pf_ifspeed_v1 ps; struct ifnet *ifp; if (psp->ifname[0] == '\0') { error = EINVAL; break; } error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); if (error != 0) break; ifp = ifunit(ps.ifname); if (ifp != NULL) { psp->baudrate32 = (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); if (cmd == DIOCGIFSPEEDV1) psp->baudrate = ifp->if_baudrate; } else { error = EINVAL; } break; } #ifdef ALTQ case DIOCSTARTALTQ: { struct pf_altq *altq; PF_RULES_WLOCK(); /* enable all altq interfaces on active list */ TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { error = pf_enable_altq(altq); if (error != 0) break; } } if (error == 0) V_pf_altq_running = 1; PF_RULES_WUNLOCK(); DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); break; } case DIOCSTOPALTQ: { struct pf_altq *altq; PF_RULES_WLOCK(); /* disable all altq interfaces on active list */ TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { error = pf_disable_altq(altq); if (error != 0) break; } } if (error == 0) V_pf_altq_running = 0; PF_RULES_WUNLOCK(); DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); break; } case DIOCADDALTQV0: case DIOCADDALTQV1: { struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; struct pf_altq *altq, *a; struct ifnet *ifp; altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); if (error) break; altq->local_flags = 0; PF_RULES_WLOCK(); if (pa->ticket != V_ticket_altqs_inactive) { PF_RULES_WUNLOCK(); free(altq, M_PFALTQ); error = EBUSY; break; } /* * if this is for a queue, find the discipline and * copy the necessary fields */ if (altq->qname[0] != 0) { if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { PF_RULES_WUNLOCK(); error = EBUSY; free(altq, M_PFALTQ); break; } altq->altq_disc = NULL; TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { if (strncmp(a->ifname, altq->ifname, IFNAMSIZ) == 0) { altq->altq_disc = a->altq_disc; break; } } } if ((ifp = ifunit(altq->ifname)) == NULL) altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; else error = altq_add(ifp, altq); if (error) { PF_RULES_WUNLOCK(); free(altq, M_PFALTQ); break; } if (altq->qname[0] != 0) TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); else TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); /* version error check done on import above */ pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); PF_RULES_WUNLOCK(); break; } case DIOCGETALTQSV0: case DIOCGETALTQSV1: { struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; struct pf_altq *altq; PF_RULES_RLOCK(); pa->nr = 0; TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) pa->nr++; TAILQ_FOREACH(altq, V_pf_altqs_active, entries) pa->nr++; pa->ticket = V_ticket_altqs_active; PF_RULES_RUNLOCK(); break; } case DIOCGETALTQV0: case DIOCGETALTQV1: { struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; struct pf_altq *altq; PF_RULES_RLOCK(); if (pa->ticket != V_ticket_altqs_active) { PF_RULES_RUNLOCK(); error = EBUSY; break; } altq = pf_altq_get_nth_active(pa->nr); if (altq == NULL) { PF_RULES_RUNLOCK(); error = EBUSY; break; } pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); PF_RULES_RUNLOCK(); break; } case DIOCCHANGEALTQV0: case DIOCCHANGEALTQV1: /* CHANGEALTQ not supported yet! */ error = ENODEV; break; case DIOCGETQSTATSV0: case DIOCGETQSTATSV1: { struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; struct pf_altq *altq; int nbytes; u_int32_t version; PF_RULES_RLOCK(); if (pq->ticket != V_ticket_altqs_active) { PF_RULES_RUNLOCK(); error = EBUSY; break; } nbytes = pq->nbytes; altq = pf_altq_get_nth_active(pq->nr); if (altq == NULL) { PF_RULES_RUNLOCK(); error = EBUSY; break; } if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { PF_RULES_RUNLOCK(); error = ENXIO; break; } PF_RULES_RUNLOCK(); if (cmd == DIOCGETQSTATSV0) version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ else version = pq->version; error = altq_getqstats(altq, pq->buf, &nbytes, version); if (error == 0) { pq->scheduler = altq->scheduler; pq->nbytes = nbytes; } break; } #endif /* ALTQ */ case DIOCBEGINADDRS: { struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; PF_RULES_WLOCK(); pf_empty_kpool(&V_pf_pabuf); pp->ticket = ++V_ticket_pabuf; PF_RULES_WUNLOCK(); break; } case DIOCADDADDR: { struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; struct pf_kpooladdr *pa; struct pfi_kkif *kif = NULL; #ifndef INET if (pp->af == AF_INET) { error = EAFNOSUPPORT; break; } #endif /* INET */ #ifndef INET6 if (pp->af == AF_INET6) { error = EAFNOSUPPORT; break; } #endif /* INET6 */ if (pp->addr.addr.type != PF_ADDR_ADDRMASK && pp->addr.addr.type != PF_ADDR_DYNIFTL && pp->addr.addr.type != PF_ADDR_TABLE) { error = EINVAL; break; } if (pp->addr.addr.p.dyn != NULL) { error = EINVAL; break; } pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); if (error != 0) break; if (pa->ifname[0]) kif = pf_kkif_create(M_WAITOK); PF_RULES_WLOCK(); if (pp->ticket != V_ticket_pabuf) { PF_RULES_WUNLOCK(); if (pa->ifname[0]) pf_kkif_free(kif); free(pa, M_PFRULE); error = EBUSY; break; } if (pa->ifname[0]) { pa->kif = pfi_kkif_attach(kif, pa->ifname); kif = NULL; pfi_kkif_ref(pa->kif); } else pa->kif = NULL; if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { if (pa->ifname[0]) pfi_kkif_unref(pa->kif); PF_RULES_WUNLOCK(); free(pa, M_PFRULE); break; } TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); PF_RULES_WUNLOCK(); break; } case DIOCGETADDRS: { struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; struct pf_kpool *pool; struct pf_kpooladdr *pa; pp->anchor[sizeof(pp->anchor) - 1] = 0; pp->nr = 0; PF_RULES_RLOCK(); pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, pp->r_num, 0, 1, 0); if (pool == NULL) { PF_RULES_RUNLOCK(); error = EBUSY; break; } TAILQ_FOREACH(pa, &pool->list, entries) pp->nr++; PF_RULES_RUNLOCK(); break; } case DIOCGETADDR: { struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; struct pf_kpool *pool; struct pf_kpooladdr *pa; u_int32_t nr = 0; pp->anchor[sizeof(pp->anchor) - 1] = 0; PF_RULES_RLOCK(); pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, pp->r_num, 0, 1, 1); if (pool == NULL) { PF_RULES_RUNLOCK(); error = EBUSY; break; } pa = TAILQ_FIRST(&pool->list); while ((pa != NULL) && (nr < pp->nr)) { pa = TAILQ_NEXT(pa, entries); nr++; } if (pa == NULL) { PF_RULES_RUNLOCK(); error = EBUSY; break; } pf_kpooladdr_to_pooladdr(pa, &pp->addr); pf_addr_copyout(&pp->addr.addr); PF_RULES_RUNLOCK(); break; } case DIOCCHANGEADDR: { struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; struct pf_kpool *pool; struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; struct pf_kruleset *ruleset; struct pfi_kkif *kif = NULL; pca->anchor[sizeof(pca->anchor) - 1] = 0; if (pca->action < PF_CHANGE_ADD_HEAD || pca->action > PF_CHANGE_REMOVE) { error = EINVAL; break; } if (pca->addr.addr.type != PF_ADDR_ADDRMASK && pca->addr.addr.type != PF_ADDR_DYNIFTL && pca->addr.addr.type != PF_ADDR_TABLE) { error = EINVAL; break; } if (pca->addr.addr.p.dyn != NULL) { error = EINVAL; break; } if (pca->action != PF_CHANGE_REMOVE) { #ifndef INET if (pca->af == AF_INET) { error = EAFNOSUPPORT; break; } #endif /* INET */ #ifndef INET6 if (pca->af == AF_INET6) { error = EAFNOSUPPORT; break; } #endif /* INET6 */ newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); if (newpa->ifname[0]) kif = pf_kkif_create(M_WAITOK); newpa->kif = NULL; } #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) PF_RULES_WLOCK(); ruleset = pf_find_kruleset(pca->anchor); if (ruleset == NULL) ERROUT(EBUSY); pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, pca->r_num, pca->r_last, 1, 1); if (pool == NULL) ERROUT(EBUSY); if (pca->action != PF_CHANGE_REMOVE) { if (newpa->ifname[0]) { newpa->kif = pfi_kkif_attach(kif, newpa->ifname); pfi_kkif_ref(newpa->kif); kif = NULL; } switch (newpa->addr.type) { case PF_ADDR_DYNIFTL: error = pfi_dynaddr_setup(&newpa->addr, pca->af); break; case PF_ADDR_TABLE: newpa->addr.p.tbl = pfr_attach_table(ruleset, newpa->addr.v.tblname); if (newpa->addr.p.tbl == NULL) error = ENOMEM; break; } if (error) goto DIOCCHANGEADDR_error; } switch (pca->action) { case PF_CHANGE_ADD_HEAD: oldpa = TAILQ_FIRST(&pool->list); break; case PF_CHANGE_ADD_TAIL: oldpa = TAILQ_LAST(&pool->list, pf_kpalist); break; default: oldpa = TAILQ_FIRST(&pool->list); for (int i = 0; oldpa && i < pca->nr; i++) oldpa = TAILQ_NEXT(oldpa, entries); if (oldpa == NULL) ERROUT(EINVAL); } if (pca->action == PF_CHANGE_REMOVE) { TAILQ_REMOVE(&pool->list, oldpa, entries); switch (oldpa->addr.type) { case PF_ADDR_DYNIFTL: pfi_dynaddr_remove(oldpa->addr.p.dyn); break; case PF_ADDR_TABLE: pfr_detach_table(oldpa->addr.p.tbl); break; } if (oldpa->kif) pfi_kkif_unref(oldpa->kif); free(oldpa, M_PFRULE); } else { if (oldpa == NULL) TAILQ_INSERT_TAIL(&pool->list, newpa, entries); else if (pca->action == PF_CHANGE_ADD_HEAD || pca->action == PF_CHANGE_ADD_BEFORE) TAILQ_INSERT_BEFORE(oldpa, newpa, entries); else TAILQ_INSERT_AFTER(&pool->list, oldpa, newpa, entries); } pool->cur = TAILQ_FIRST(&pool->list); PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); PF_RULES_WUNLOCK(); break; #undef ERROUT DIOCCHANGEADDR_error: if (newpa != NULL) { if (newpa->kif) pfi_kkif_unref(newpa->kif); free(newpa, M_PFRULE); } PF_RULES_WUNLOCK(); pf_kkif_free(kif); break; } case DIOCGETRULESETS: { struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; struct pf_kruleset *ruleset; struct pf_kanchor *anchor; pr->path[sizeof(pr->path) - 1] = 0; PF_RULES_RLOCK(); if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { PF_RULES_RUNLOCK(); error = ENOENT; break; } pr->nr = 0; if (ruleset->anchor == NULL) { /* XXX kludge for pf_main_ruleset */ RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) if (anchor->parent == NULL) pr->nr++; } else { RB_FOREACH(anchor, pf_kanchor_node, &ruleset->anchor->children) pr->nr++; } PF_RULES_RUNLOCK(); break; } case DIOCGETRULESET: { struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; struct pf_kruleset *ruleset; struct pf_kanchor *anchor; u_int32_t nr = 0; pr->path[sizeof(pr->path) - 1] = 0; PF_RULES_RLOCK(); if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { PF_RULES_RUNLOCK(); error = ENOENT; break; } pr->name[0] = 0; if (ruleset->anchor == NULL) { /* XXX kludge for pf_main_ruleset */ RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) if (anchor->parent == NULL && nr++ == pr->nr) { strlcpy(pr->name, anchor->name, sizeof(pr->name)); break; } } else { RB_FOREACH(anchor, pf_kanchor_node, &ruleset->anchor->children) if (nr++ == pr->nr) { strlcpy(pr->name, anchor->name, sizeof(pr->name)); break; } } if (!pr->name[0]) error = EBUSY; PF_RULES_RUNLOCK(); break; } case DIOCRCLRTABLES: { struct pfioc_table *io = (struct pfioc_table *)addr; if (io->pfrio_esize != 0) { error = ENODEV; break; } PF_RULES_WLOCK(); error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); break; } case DIOCRADDTABLES: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_table *pfrts; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { error = ENOMEM; break; } totlen = io->pfrio_size * sizeof(struct pfr_table); pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfrts, totlen); if (error) { free(pfrts, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_add_tables(pfrts, io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); free(pfrts, M_TEMP); break; } case DIOCRDELTABLES: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_table *pfrts; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { error = ENOMEM; break; } totlen = io->pfrio_size * sizeof(struct pfr_table); pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfrts, totlen); if (error) { free(pfrts, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_del_tables(pfrts, io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); free(pfrts, M_TEMP); break; } case DIOCRGETTABLES: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_table *pfrts; size_t totlen; int n; if (io->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } PF_RULES_RLOCK(); n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); if (n < 0) { PF_RULES_RUNLOCK(); error = EINVAL; break; } io->pfrio_size = min(io->pfrio_size, n); totlen = io->pfrio_size * sizeof(struct pfr_table); pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), M_TEMP, M_NOWAIT | M_ZERO); if (pfrts == NULL) { error = ENOMEM; PF_RULES_RUNLOCK(); break; } error = pfr_get_tables(&io->pfrio_table, pfrts, &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); if (error == 0) error = copyout(pfrts, io->pfrio_buffer, totlen); free(pfrts, M_TEMP); break; } case DIOCRGETTSTATS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_tstats *pfrtstats; size_t totlen; int n; if (io->pfrio_esize != sizeof(struct pfr_tstats)) { error = ENODEV; break; } PF_TABLE_STATS_LOCK(); PF_RULES_RLOCK(); n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); if (n < 0) { PF_RULES_RUNLOCK(); PF_TABLE_STATS_UNLOCK(); error = EINVAL; break; } io->pfrio_size = min(io->pfrio_size, n); totlen = io->pfrio_size * sizeof(struct pfr_tstats); pfrtstats = mallocarray(io->pfrio_size, sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); if (pfrtstats == NULL) { error = ENOMEM; PF_RULES_RUNLOCK(); PF_TABLE_STATS_UNLOCK(); break; } error = pfr_get_tstats(&io->pfrio_table, pfrtstats, &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); PF_TABLE_STATS_UNLOCK(); if (error == 0) error = copyout(pfrtstats, io->pfrio_buffer, totlen); free(pfrtstats, M_TEMP); break; } case DIOCRCLRTSTATS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_table *pfrts; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { /* We used to count tables and use the minimum required * size, so we didn't fail on overly large requests. * Keep doing so. */ io->pfrio_size = pf_ioctl_maxcount; break; } totlen = io->pfrio_size * sizeof(struct pfr_table); pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfrts, totlen); if (error) { free(pfrts, M_TEMP); break; } PF_TABLE_STATS_LOCK(); PF_RULES_RLOCK(); error = pfr_clr_tstats(pfrts, io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); PF_TABLE_STATS_UNLOCK(); free(pfrts, M_TEMP); break; } case DIOCRSETTFLAGS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_table *pfrts; size_t totlen; int n; if (io->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } PF_RULES_RLOCK(); n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); if (n < 0) { PF_RULES_RUNLOCK(); error = EINVAL; break; } io->pfrio_size = min(io->pfrio_size, n); PF_RULES_RUNLOCK(); totlen = io->pfrio_size * sizeof(struct pfr_table); pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfrts, totlen); if (error) { free(pfrts, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_set_tflags(pfrts, io->pfrio_size, io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); free(pfrts, M_TEMP); break; } case DIOCRCLRADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; if (io->pfrio_esize != 0) { error = ENODEV; break; } PF_RULES_WLOCK(); error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); break; } case DIOCRADDADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_add_addrs(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRDELADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_del_addrs(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRSETADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen, count; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { error = EINVAL; break; } count = max(io->pfrio_size, io->pfrio_size2); if (count > pf_ioctl_maxcount || WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = count * sizeof(struct pfr_addr); pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_set_addrs(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | PFR_FLAG_USERIOCTL, 0); PF_RULES_WUNLOCK(); if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRGETADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK | M_ZERO); PF_RULES_RLOCK(); error = pfr_get_addrs(&io->pfrio_table, pfras, &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); if (error == 0) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRGETASTATS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_astats *pfrastats; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_astats)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_astats); pfrastats = mallocarray(io->pfrio_size, sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); PF_RULES_RLOCK(); error = pfr_get_astats(&io->pfrio_table, pfrastats, &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); if (error == 0) error = copyout(pfrastats, io->pfrio_buffer, totlen); free(pfrastats, M_TEMP); break; } case DIOCRCLRASTATS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_clr_astats(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRTSTADDRS: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_RLOCK(); error = pfr_tst_addrs(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_RUNLOCK(); if (error == 0) error = copyout(pfras, io->pfrio_buffer, totlen); free(pfras, M_TEMP); break; } case DIOCRINADEFINE: { struct pfioc_table *io = (struct pfioc_table *)addr; struct pfr_addr *pfras; size_t totlen; if (io->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { error = EINVAL; break; } totlen = io->pfrio_size * sizeof(struct pfr_addr); pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), M_TEMP, M_WAITOK); error = copyin(io->pfrio_buffer, pfras, totlen); if (error) { free(pfras, M_TEMP); break; } PF_RULES_WLOCK(); error = pfr_ina_define(&io->pfrio_table, pfras, io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); PF_RULES_WUNLOCK(); free(pfras, M_TEMP); break; } case DIOCOSFPADD: { struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; PF_RULES_WLOCK(); error = pf_osfp_add(io); PF_RULES_WUNLOCK(); break; } case DIOCOSFPGET: { struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; PF_RULES_RLOCK(); error = pf_osfp_get(io); PF_RULES_RUNLOCK(); break; } case DIOCXBEGIN: { struct pfioc_trans *io = (struct pfioc_trans *)addr; struct pfioc_trans_e *ioes, *ioe; size_t totlen; int i; if (io->esize != sizeof(*ioe)) { error = ENODEV; break; } if (io->size < 0 || io->size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { error = EINVAL; break; } totlen = sizeof(struct pfioc_trans_e) * io->size; ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), M_TEMP, M_WAITOK); error = copyin(io->array, ioes, totlen); if (error) { free(ioes, M_TEMP); break; } /* Ensure there's no more ethernet rules to clean up. */ NET_EPOCH_DRAIN_CALLBACKS(); PF_RULES_WLOCK(); for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; switch (ioe->rs_num) { case PF_RULESET_ETH: if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; } break; #ifdef ALTQ case PF_RULESET_ALTQ: if (ioe->anchor[0]) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EINVAL; goto fail; } if ((error = pf_begin_altq(&ioe->ticket))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; } break; #endif /* ALTQ */ case PF_RULESET_TABLE: { struct pfr_table table; bzero(&table, sizeof(table)); strlcpy(table.pfrt_anchor, ioe->anchor, sizeof(table.pfrt_anchor)); if ((error = pfr_ina_begin(&table, &ioe->ticket, NULL, 0))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; } break; } default: if ((error = pf_begin_rules(&ioe->ticket, ioe->rs_num, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; } break; } } PF_RULES_WUNLOCK(); error = copyout(ioes, io->array, totlen); free(ioes, M_TEMP); break; } case DIOCXROLLBACK: { struct pfioc_trans *io = (struct pfioc_trans *)addr; struct pfioc_trans_e *ioe, *ioes; size_t totlen; int i; if (io->esize != sizeof(*ioe)) { error = ENODEV; break; } if (io->size < 0 || io->size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { error = EINVAL; break; } totlen = sizeof(struct pfioc_trans_e) * io->size; ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), M_TEMP, M_WAITOK); error = copyin(io->array, ioes, totlen); if (error) { free(ioes, M_TEMP); break; } PF_RULES_WLOCK(); for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; switch (ioe->rs_num) { case PF_RULESET_ETH: if ((error = pf_rollback_eth(ioe->ticket, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; #ifdef ALTQ case PF_RULESET_ALTQ: if (ioe->anchor[0]) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EINVAL; goto fail; } if ((error = pf_rollback_altq(ioe->ticket))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; #endif /* ALTQ */ case PF_RULESET_TABLE: { struct pfr_table table; bzero(&table, sizeof(table)); strlcpy(table.pfrt_anchor, ioe->anchor, sizeof(table.pfrt_anchor)); if ((error = pfr_ina_rollback(&table, ioe->ticket, NULL, 0))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; } default: if ((error = pf_rollback_rules(ioe->ticket, ioe->rs_num, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; } } PF_RULES_WUNLOCK(); free(ioes, M_TEMP); break; } case DIOCXCOMMIT: { struct pfioc_trans *io = (struct pfioc_trans *)addr; struct pfioc_trans_e *ioe, *ioes; struct pf_kruleset *rs; struct pf_keth_ruleset *ers; size_t totlen; int i; if (io->esize != sizeof(*ioe)) { error = ENODEV; break; } if (io->size < 0 || io->size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { error = EINVAL; break; } totlen = sizeof(struct pfioc_trans_e) * io->size; ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), M_TEMP, M_WAITOK); error = copyin(io->array, ioes, totlen); if (error) { free(ioes, M_TEMP); break; } PF_RULES_WLOCK(); /* First makes sure everything will succeed. */ for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { ioe->anchor[sizeof(ioe->anchor) - 1] = 0; switch (ioe->rs_num) { case PF_RULESET_ETH: ers = pf_find_keth_ruleset(ioe->anchor); if (ers == NULL || ioe->ticket == 0 || ioe->ticket != ers->inactive.ticket) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EINVAL; goto fail; } break; #ifdef ALTQ case PF_RULESET_ALTQ: if (ioe->anchor[0]) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EINVAL; goto fail; } if (!V_altqs_inactive_open || ioe->ticket != V_ticket_altqs_inactive) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EBUSY; goto fail; } break; #endif /* ALTQ */ case PF_RULESET_TABLE: rs = pf_find_kruleset(ioe->anchor); if (rs == NULL || !rs->topen || ioe->ticket != rs->tticket) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EBUSY; goto fail; } break; default: if (ioe->rs_num < 0 || ioe->rs_num >= PF_RULESET_MAX) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EINVAL; goto fail; } rs = pf_find_kruleset(ioe->anchor); if (rs == NULL || !rs->rules[ioe->rs_num].inactive.open || rs->rules[ioe->rs_num].inactive.ticket != ioe->ticket) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); error = EBUSY; goto fail; } break; } } /* Now do the commit - no errors should happen here. */ for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { switch (ioe->rs_num) { case PF_RULESET_ETH: if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; #ifdef ALTQ case PF_RULESET_ALTQ: if ((error = pf_commit_altq(ioe->ticket))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; #endif /* ALTQ */ case PF_RULESET_TABLE: { struct pfr_table table; bzero(&table, sizeof(table)); (void)strlcpy(table.pfrt_anchor, ioe->anchor, sizeof(table.pfrt_anchor)); if ((error = pfr_ina_commit(&table, ioe->ticket, NULL, NULL, 0))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; } default: if ((error = pf_commit_rules(ioe->ticket, ioe->rs_num, ioe->anchor))) { PF_RULES_WUNLOCK(); free(ioes, M_TEMP); goto fail; /* really bad */ } break; } } PF_RULES_WUNLOCK(); /* Only hook into EtherNet taffic if we've got rules for it. */ if (! TAILQ_EMPTY(V_pf_keth->active.rules)) hook_pf_eth(); else dehook_pf_eth(); free(ioes, M_TEMP); break; } case DIOCGETSRCNODES: { struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; struct pf_srchash *sh; struct pf_ksrc_node *n; struct pf_src_node *p, *pstore; uint32_t i, nr = 0; for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH(n, &sh->nodes, entry) nr++; PF_HASHROW_UNLOCK(sh); } psn->psn_len = min(psn->psn_len, sizeof(struct pf_src_node) * nr); if (psn->psn_len == 0) { psn->psn_len = sizeof(struct pf_src_node) * nr; break; } nr = 0; p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH(n, &sh->nodes, entry) { if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) break; pf_src_node_copy(n, p); p++; nr++; } PF_HASHROW_UNLOCK(sh); } error = copyout(pstore, psn->psn_src_nodes, sizeof(struct pf_src_node) * nr); if (error) { free(pstore, M_TEMP); break; } psn->psn_len = sizeof(struct pf_src_node) * nr; free(pstore, M_TEMP); break; } case DIOCCLRSRCNODES: { pf_clear_srcnodes(NULL); pf_purge_expired_src_nodes(); break; } case DIOCKILLSRCNODES: pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); break; #ifdef COMPAT_FREEBSD13 case DIOCKEEPCOUNTERS_FREEBSD13: #endif case DIOCKEEPCOUNTERS: error = pf_keepcounters((struct pfioc_nv *)addr); break; case DIOCGETSYNCOOKIES: error = pf_get_syncookies((struct pfioc_nv *)addr); break; case DIOCSETSYNCOOKIES: error = pf_set_syncookies((struct pfioc_nv *)addr); break; case DIOCSETHOSTID: { u_int32_t *hostid = (u_int32_t *)addr; PF_RULES_WLOCK(); if (*hostid == 0) V_pf_status.hostid = arc4random(); else V_pf_status.hostid = *hostid; PF_RULES_WUNLOCK(); break; } case DIOCOSFPFLUSH: PF_RULES_WLOCK(); pf_osfp_flush(); PF_RULES_WUNLOCK(); break; case DIOCIGETIFACES: { struct pfioc_iface *io = (struct pfioc_iface *)addr; struct pfi_kif *ifstore; size_t bufsiz; if (io->pfiio_esize != sizeof(struct pfi_kif)) { error = ENODEV; break; } if (io->pfiio_size < 0 || io->pfiio_size > pf_ioctl_maxcount || WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { error = EINVAL; break; } io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; bufsiz = io->pfiio_size * sizeof(struct pfi_kif); ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), M_TEMP, M_WAITOK | M_ZERO); PF_RULES_RLOCK(); pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); PF_RULES_RUNLOCK(); error = copyout(ifstore, io->pfiio_buffer, bufsiz); free(ifstore, M_TEMP); break; } case DIOCSETIFFLAG: { struct pfioc_iface *io = (struct pfioc_iface *)addr; io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; PF_RULES_WLOCK(); error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); PF_RULES_WUNLOCK(); break; } case DIOCCLRIFFLAG: { struct pfioc_iface *io = (struct pfioc_iface *)addr; io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; PF_RULES_WLOCK(); error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); PF_RULES_WUNLOCK(); break; } case DIOCSETREASS: { u_int32_t *reass = (u_int32_t *)addr; V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF); /* Removal of DF flag without reassembly enabled is not a * valid combination. Disable reassembly in such case. */ if (!(V_pf_status.reass & PF_REASS_ENABLED)) V_pf_status.reass = 0; break; } default: error = ENODEV; break; } fail: CURVNET_RESTORE(); #undef ERROUT_IOCTL return (error); } void pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version) { bzero(sp, sizeof(union pfsync_state_union)); /* copy from state key */ sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto; sp->pfs_1301.af = st->key[PF_SK_WIRE]->af; /* copy from state */ strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname)); bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr)); sp->pfs_1301.creation = htonl(time_uptime - st->creation); sp->pfs_1301.expire = pf_state_expires(st); if (sp->pfs_1301.expire <= time_uptime) sp->pfs_1301.expire = htonl(0); else sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime); sp->pfs_1301.direction = st->direction; sp->pfs_1301.log = st->act.log; sp->pfs_1301.timeout = st->timeout; switch (msg_version) { case PFSYNC_MSG_VERSION_1301: sp->pfs_1301.state_flags = st->state_flags; break; case PFSYNC_MSG_VERSION_1400: sp->pfs_1400.state_flags = htons(st->state_flags); sp->pfs_1400.qid = htons(st->act.qid); sp->pfs_1400.pqid = htons(st->act.pqid); sp->pfs_1400.dnpipe = htons(st->act.dnpipe); sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe); sp->pfs_1400.rtableid = htonl(st->act.rtableid); sp->pfs_1400.min_ttl = st->act.min_ttl; sp->pfs_1400.set_tos = st->act.set_tos; sp->pfs_1400.max_mss = htons(st->act.max_mss); sp->pfs_1400.set_prio[0] = st->act.set_prio[0]; sp->pfs_1400.set_prio[1] = st->act.set_prio[1]; sp->pfs_1400.rt = st->rt; if (st->rt_kif) strlcpy(sp->pfs_1400.rt_ifname, st->rt_kif->pfik_name, sizeof(sp->pfs_1400.rt_ifname)); break; default: panic("%s: Unsupported pfsync_msg_version %d", __func__, msg_version); } if (st->src_node) sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE; if (st->nat_src_node) sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE; sp->pfs_1301.id = st->id; sp->pfs_1301.creatorid = st->creatorid; pf_state_peer_hton(&st->src, &sp->pfs_1301.src); pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst); if (st->rule.ptr == NULL) sp->pfs_1301.rule = htonl(-1); else sp->pfs_1301.rule = htonl(st->rule.ptr->nr); if (st->anchor.ptr == NULL) sp->pfs_1301.anchor = htonl(-1); else sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr); if (st->nat_rule.ptr == NULL) sp->pfs_1301.nat_rule = htonl(-1); else sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr); pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]); pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]); pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]); pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]); } void pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) { bzero(sp, sizeof(*sp)); sp->version = PF_STATE_VERSION; /* copy from state key */ sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; sp->proto = st->key[PF_SK_WIRE]->proto; sp->af = st->key[PF_SK_WIRE]->af; /* copy from state */ strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, sizeof(sp->orig_ifname)); bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); sp->creation = htonl(time_uptime - st->creation); sp->expire = pf_state_expires(st); if (sp->expire <= time_uptime) sp->expire = htonl(0); else sp->expire = htonl(sp->expire - time_uptime); sp->direction = st->direction; sp->log = st->act.log; sp->timeout = st->timeout; /* 8 bits for the old libpfctl, 16 bits for the new libpfctl */ sp->state_flags_compat = st->state_flags; sp->state_flags = htons(st->state_flags); if (st->src_node) sp->sync_flags |= PFSYNC_FLAG_SRCNODE; if (st->nat_src_node) sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; sp->id = st->id; sp->creatorid = st->creatorid; pf_state_peer_hton(&st->src, &sp->src); pf_state_peer_hton(&st->dst, &sp->dst); if (st->rule.ptr == NULL) sp->rule = htonl(-1); else sp->rule = htonl(st->rule.ptr->nr); if (st->anchor.ptr == NULL) sp->anchor = htonl(-1); else sp->anchor = htonl(st->anchor.ptr->nr); if (st->nat_rule.ptr == NULL) sp->nat_rule = htonl(-1); else sp->nat_rule = htonl(st->nat_rule.ptr->nr); sp->packets[0] = st->packets[0]; sp->packets[1] = st->packets[1]; sp->bytes[0] = st->bytes[0]; sp->bytes[1] = st->bytes[1]; sp->qid = htons(st->act.qid); sp->pqid = htons(st->act.pqid); sp->dnpipe = htons(st->act.dnpipe); sp->dnrpipe = htons(st->act.dnrpipe); sp->rtableid = htonl(st->act.rtableid); sp->min_ttl = st->act.min_ttl; sp->set_tos = st->act.set_tos; sp->max_mss = htons(st->act.max_mss); sp->rt = st->rt; if (st->rt_kif) strlcpy(sp->rt_ifname, st->rt_kif->pfik_name, sizeof(sp->rt_ifname)); sp->set_prio[0] = st->act.set_prio[0]; sp->set_prio[1] = st->act.set_prio[1]; } static void pf_tbladdr_copyout(struct pf_addr_wrap *aw) { struct pfr_ktable *kt; KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); kt = aw->p.tbl; if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) kt = kt->pfrkt_root; aw->p.tbl = NULL; aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? kt->pfrkt_cnt : -1; } static int pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, size_t number, char **names) { nvlist_t *nvc; nvc = nvlist_create(0); if (nvc == NULL) return (ENOMEM); for (int i = 0; i < number; i++) { nvlist_append_number_array(nvc, "counters", counter_u64_fetch(counters[i])); nvlist_append_string_array(nvc, "names", names[i]); nvlist_append_number_array(nvc, "ids", i); } nvlist_add_nvlist(nvl, name, nvc); nvlist_destroy(nvc); return (0); } static int pf_getstatus(struct pfioc_nv *nv) { nvlist_t *nvl = NULL, *nvc = NULL; void *nvlpacked = NULL; int error; struct pf_status s; char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; PF_RULES_RLOCK_TRACKER; #define ERROUT(x) ERROUT_FUNCTION(errout, x) PF_RULES_RLOCK(); nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); nvlist_add_bool(nvl, "running", V_pf_status.running); nvlist_add_number(nvl, "since", V_pf_status.since); nvlist_add_number(nvl, "debug", V_pf_status.debug); nvlist_add_number(nvl, "hostid", V_pf_status.hostid); nvlist_add_number(nvl, "states", V_pf_status.states); nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); nvlist_add_number(nvl, "reass", V_pf_status.reass); nvlist_add_bool(nvl, "syncookies_active", V_pf_status.syncookies_active); /* counters */ error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, PFRES_MAX, pf_reasons); if (error != 0) ERROUT(error); /* lcounters */ error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, KLCNT_MAX, pf_lcounter); if (error != 0) ERROUT(error); /* fcounters */ nvc = nvlist_create(0); if (nvc == NULL) ERROUT(ENOMEM); for (int i = 0; i < FCNT_MAX; i++) { nvlist_append_number_array(nvc, "counters", pf_counter_u64_fetch(&V_pf_status.fcounters[i])); nvlist_append_string_array(nvc, "names", pf_fcounter[i]); nvlist_append_number_array(nvc, "ids", i); } nvlist_add_nvlist(nvl, "fcounters", nvc); nvlist_destroy(nvc); nvc = NULL; /* scounters */ error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, SCNT_MAX, pf_fcounter); if (error != 0) ERROUT(error); nvlist_add_string(nvl, "ifname", V_pf_status.ifname); nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH); pfi_update_status(V_pf_status.ifname, &s); /* pcounters / bcounters */ for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 2; k++) { nvlist_append_number_array(nvl, "pcounters", s.pcounters[i][j][k]); } nvlist_append_number_array(nvl, "bcounters", s.bcounters[i][j]); } } nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); PF_RULES_RUNLOCK(); error = copyout(nvlpacked, nv->data, nv->len); goto done; #undef ERROUT errout: PF_RULES_RUNLOCK(); done: free(nvlpacked, M_NVLIST); nvlist_destroy(nvc); nvlist_destroy(nvl); return (error); } /* * XXX - Check for version mismatch!!! */ static void pf_clear_all_states(void) { struct pf_kstate *s; u_int i; for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; relock: PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { s->timeout = PFTM_PURGE; /* Don't send out individual delete messages. */ s->state_flags |= PFSTATE_NOSYNC; pf_unlink_state(s); goto relock; } PF_HASHROW_UNLOCK(ih); } } static int pf_clear_tables(void) { struct pfioc_table io; int error; bzero(&io, sizeof(io)); error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, io.pfrio_flags); return (error); } static void pf_clear_srcnodes(struct pf_ksrc_node *n) { struct pf_kstate *s; int i; for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { if (n == NULL || n == s->src_node) s->src_node = NULL; if (n == NULL || n == s->nat_src_node) s->nat_src_node = NULL; } PF_HASHROW_UNLOCK(ih); } if (n == NULL) { struct pf_srchash *sh; for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH(n, &sh->nodes, entry) { n->expire = 1; n->states = 0; } PF_HASHROW_UNLOCK(sh); } } else { /* XXX: hash slot should already be locked here. */ n->expire = 1; n->states = 0; } } static void pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) { struct pf_ksrc_node_list kill; LIST_INIT(&kill); for (int i = 0; i <= pf_srchashmask; i++) { struct pf_srchash *sh = &V_pf_srchash[i]; struct pf_ksrc_node *sn, *tmp; PF_HASHROW_LOCK(sh); LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) if (PF_MATCHA(psnk->psnk_src.neg, &psnk->psnk_src.addr.v.a.addr, &psnk->psnk_src.addr.v.a.mask, &sn->addr, sn->af) && PF_MATCHA(psnk->psnk_dst.neg, &psnk->psnk_dst.addr.v.a.addr, &psnk->psnk_dst.addr.v.a.mask, &sn->raddr, sn->af)) { pf_unlink_src_node(sn); LIST_INSERT_HEAD(&kill, sn, entry); sn->expire = 1; } PF_HASHROW_UNLOCK(sh); } for (int i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; struct pf_kstate *s; PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { if (s->src_node && s->src_node->expire == 1) s->src_node = NULL; if (s->nat_src_node && s->nat_src_node->expire == 1) s->nat_src_node = NULL; } PF_HASHROW_UNLOCK(ih); } psnk->psnk_killed = pf_free_src_nodes(&kill); } static int pf_keepcounters(struct pfioc_nv *nv) { nvlist_t *nvl = NULL; void *nvlpacked = NULL; int error = 0; #define ERROUT(x) ERROUT_FUNCTION(on_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); if (! nvlist_exists_bool(nvl, "keep_counters")) ERROUT(EBADMSG); V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); on_error: nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); return (error); } static unsigned int pf_clear_states(const struct pf_kstate_kill *kill) { struct pf_state_key_cmp match_key; struct pf_kstate *s; struct pfi_kkif *kif; int idx; unsigned int killed = 0, dir; for (unsigned int i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; relock_DIOCCLRSTATES: PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) { /* For floating states look at the original kif. */ kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; if (kill->psk_ifname[0] && strcmp(kill->psk_ifname, kif->pfik_name)) continue; if (kill->psk_kill_match) { bzero(&match_key, sizeof(match_key)); if (s->direction == PF_OUT) { dir = PF_IN; idx = PF_SK_STACK; } else { dir = PF_OUT; idx = PF_SK_WIRE; } match_key.af = s->key[idx]->af; match_key.proto = s->key[idx]->proto; PF_ACPY(&match_key.addr[0], &s->key[idx]->addr[1], match_key.af); match_key.port[0] = s->key[idx]->port[1]; PF_ACPY(&match_key.addr[1], &s->key[idx]->addr[0], match_key.af); match_key.port[1] = s->key[idx]->port[0]; } /* * Don't send out individual * delete messages. */ s->state_flags |= PFSTATE_NOSYNC; pf_unlink_state(s); killed++; if (kill->psk_kill_match) killed += pf_kill_matching_state(&match_key, dir); goto relock_DIOCCLRSTATES; } PF_HASHROW_UNLOCK(ih); } if (V_pfsync_clear_states_ptr != NULL) V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); return (killed); } static void pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) { struct pf_kstate *s; if (kill->psk_pfcmp.id) { if (kill->psk_pfcmp.creatorid == 0) kill->psk_pfcmp.creatorid = V_pf_status.hostid; if ((s = pf_find_state_byid(kill->psk_pfcmp.id, kill->psk_pfcmp.creatorid))) { pf_unlink_state(s); *killed = 1; } return; } for (unsigned int i = 0; i <= pf_hashmask; i++) *killed += pf_killstates_row(kill, &V_pf_idhash[i]); return; } static int pf_killstates_nv(struct pfioc_nv *nv) { struct pf_kstate_kill kill; nvlist_t *nvl = NULL; void *nvlpacked = NULL; int error = 0; unsigned int killed = 0; #define ERROUT(x) ERROUT_FUNCTION(on_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); if (error) ERROUT(error); pf_killstates(&kill, &killed); free(nvlpacked, M_NVLIST); nvlpacked = NULL; nvlist_destroy(nvl); nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); nvlist_add_number(nvl, "killed", killed); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); on_error: nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); return (error); } static int pf_clearstates_nv(struct pfioc_nv *nv) { struct pf_kstate_kill kill; nvlist_t *nvl = NULL; void *nvlpacked = NULL; int error = 0; unsigned int killed; #define ERROUT(x) ERROUT_FUNCTION(on_error, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); if (error) ERROUT(error); killed = pf_clear_states(&kill); free(nvlpacked, M_NVLIST); nvlpacked = NULL; nvlist_destroy(nvl); nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); nvlist_add_number(nvl, "killed", killed); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); #undef ERROUT on_error: nvlist_destroy(nvl); free(nvlpacked, M_NVLIST); return (error); } static int pf_getstate(struct pfioc_nv *nv) { nvlist_t *nvl = NULL, *nvls; void *nvlpacked = NULL; struct pf_kstate *s = NULL; int error = 0; uint64_t id, creatorid; #define ERROUT(x) ERROUT_FUNCTION(errout, x) if (nv->len > pf_ioctl_maxcount) ERROUT(ENOMEM); nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); if (nvlpacked == NULL) ERROUT(ENOMEM); error = copyin(nv->data, nvlpacked, nv->len); if (error) ERROUT(error); nvl = nvlist_unpack(nvlpacked, nv->len, 0); if (nvl == NULL) ERROUT(EBADMSG); PFNV_CHK(pf_nvuint64(nvl, "id", &id)); PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); s = pf_find_state_byid(id, creatorid); if (s == NULL) ERROUT(ENOENT); free(nvlpacked, M_NVLIST); nvlpacked = NULL; nvlist_destroy(nvl); nvl = nvlist_create(0); if (nvl == NULL) ERROUT(ENOMEM); nvls = pf_state_to_nvstate(s); if (nvls == NULL) ERROUT(ENOMEM); nvlist_add_nvlist(nvl, "state", nvls); nvlist_destroy(nvls); nvlpacked = nvlist_pack(nvl, &nv->len); if (nvlpacked == NULL) ERROUT(ENOMEM); if (nv->size == 0) ERROUT(0); else if (nv->size < nv->len) ERROUT(ENOSPC); error = copyout(nvlpacked, nv->data, nv->len); #undef ERROUT errout: if (s != NULL) PF_STATE_UNLOCK(s); free(nvlpacked, M_NVLIST); nvlist_destroy(nvl); return (error); } /* * XXX - Check for version mismatch!!! */ /* * Duplicate pfctl -Fa operation to get rid of as much as we can. */ static int shutdown_pf(void) { int error = 0; u_int32_t t[5]; char nn = '\0'; do { if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); break; } if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); break; /* XXX: rollback? */ } if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); break; /* XXX: rollback? */ } if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); break; /* XXX: rollback? */ } if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); break; /* XXX: rollback? */ } /* XXX: these should always succeed here */ pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); pf_commit_rules(t[2], PF_RULESET_NAT, &nn); pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); pf_commit_rules(t[4], PF_RULESET_RDR, &nn); if ((error = pf_clear_tables()) != 0) break; if ((error = pf_begin_eth(&t[0], &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); break; } pf_commit_eth(t[0], &nn); #ifdef ALTQ if ((error = pf_begin_altq(&t[0])) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); break; } pf_commit_altq(t[0]); #endif pf_clear_all_states(); pf_clear_srcnodes(NULL); /* status does not use malloced mem so no need to cleanup */ /* fingerprints and interfaces have their own cleanup code */ } while(0); return (error); } static pfil_return_t pf_check_return(int chk, struct mbuf **m) { switch (chk) { case PF_PASS: if (*m == NULL) return (PFIL_CONSUMED); else return (PFIL_PASS); break; default: if (*m != NULL) { m_freem(*m); *m = NULL; } return (PFIL_DROPPED); } } static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp) { int chk; chk = pf_test_eth(PF_IN, flags, ifp, m, inp); return (pf_check_return(chk, m)); } static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp) { int chk; chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); return (pf_check_return(chk, m)); } #ifdef INET static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp) { int chk; chk = pf_test(PF_IN, flags, ifp, m, inp, NULL); return (pf_check_return(chk, m)); } static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp) { int chk; chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL); return (pf_check_return(chk, m)); } #endif #ifdef INET6 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp) { int chk; /* * In case of loopback traffic IPv6 uses the real interface in * order to support scoped addresses. In order to support stateful * filtering we have change this to lo0 as it is the case in IPv4. */ CURVNET_SET(ifp->if_vnet); chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp, NULL); CURVNET_RESTORE(); return (pf_check_return(chk, m)); } static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, void *ruleset __unused, struct inpcb *inp) { int chk; CURVNET_SET(ifp->if_vnet); chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL); CURVNET_RESTORE(); return (pf_check_return(chk, m)); } #endif /* INET6 */ VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); #define V_pf_eth_in_hook VNET(pf_eth_in_hook) #define V_pf_eth_out_hook VNET(pf_eth_out_hook) #ifdef INET VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) #endif #ifdef INET6 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) #endif static void hook_pf_eth(void) { struct pfil_hook_args pha = { .pa_version = PFIL_VERSION, .pa_modname = "pf", .pa_type = PFIL_TYPE_ETHERNET, }; struct pfil_link_args pla = { .pa_version = PFIL_VERSION, }; int ret __diagused; if (atomic_load_bool(&V_pf_pfil_eth_hooked)) return; pha.pa_mbuf_chk = pf_eth_check_in; pha.pa_flags = PFIL_IN; pha.pa_rulname = "eth-in"; V_pf_eth_in_hook = pfil_add_hook(&pha); pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; pla.pa_head = V_link_pfil_head; pla.pa_hook = V_pf_eth_in_hook; ret = pfil_link(&pla); MPASS(ret == 0); pha.pa_mbuf_chk = pf_eth_check_out; pha.pa_flags = PFIL_OUT; pha.pa_rulname = "eth-out"; V_pf_eth_out_hook = pfil_add_hook(&pha); pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; pla.pa_head = V_link_pfil_head; pla.pa_hook = V_pf_eth_out_hook; ret = pfil_link(&pla); MPASS(ret == 0); atomic_store_bool(&V_pf_pfil_eth_hooked, true); } static void hook_pf(void) { struct pfil_hook_args pha = { .pa_version = PFIL_VERSION, .pa_modname = "pf", }; struct pfil_link_args pla = { .pa_version = PFIL_VERSION, }; int ret __diagused; if (atomic_load_bool(&V_pf_pfil_hooked)) return; #ifdef INET pha.pa_type = PFIL_TYPE_IP4; pha.pa_mbuf_chk = pf_check_in; pha.pa_flags = PFIL_IN; pha.pa_rulname = "default-in"; V_pf_ip4_in_hook = pfil_add_hook(&pha); pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; pla.pa_head = V_inet_pfil_head; pla.pa_hook = V_pf_ip4_in_hook; ret = pfil_link(&pla); MPASS(ret == 0); pha.pa_mbuf_chk = pf_check_out; pha.pa_flags = PFIL_OUT; pha.pa_rulname = "default-out"; V_pf_ip4_out_hook = pfil_add_hook(&pha); pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; pla.pa_head = V_inet_pfil_head; pla.pa_hook = V_pf_ip4_out_hook; ret = pfil_link(&pla); MPASS(ret == 0); if (V_pf_filter_local) { pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; pla.pa_head = V_inet_local_pfil_head; pla.pa_hook = V_pf_ip4_out_hook; ret = pfil_link(&pla); MPASS(ret == 0); } #endif #ifdef INET6 pha.pa_type = PFIL_TYPE_IP6; pha.pa_mbuf_chk = pf_check6_in; pha.pa_flags = PFIL_IN; pha.pa_rulname = "default-in6"; V_pf_ip6_in_hook = pfil_add_hook(&pha); pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; pla.pa_head = V_inet6_pfil_head; pla.pa_hook = V_pf_ip6_in_hook; ret = pfil_link(&pla); MPASS(ret == 0); pha.pa_mbuf_chk = pf_check6_out; pha.pa_rulname = "default-out6"; pha.pa_flags = PFIL_OUT; V_pf_ip6_out_hook = pfil_add_hook(&pha); pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; pla.pa_head = V_inet6_pfil_head; pla.pa_hook = V_pf_ip6_out_hook; ret = pfil_link(&pla); MPASS(ret == 0); if (V_pf_filter_local) { pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; pla.pa_head = V_inet6_local_pfil_head; pla.pa_hook = V_pf_ip6_out_hook; ret = pfil_link(&pla); MPASS(ret == 0); } #endif atomic_store_bool(&V_pf_pfil_hooked, true); } static void dehook_pf_eth(void) { if (!atomic_load_bool(&V_pf_pfil_eth_hooked)) return; pfil_remove_hook(V_pf_eth_in_hook); pfil_remove_hook(V_pf_eth_out_hook); atomic_store_bool(&V_pf_pfil_eth_hooked, false); } static void dehook_pf(void) { if (!atomic_load_bool(&V_pf_pfil_hooked)) return; #ifdef INET pfil_remove_hook(V_pf_ip4_in_hook); pfil_remove_hook(V_pf_ip4_out_hook); #endif #ifdef INET6 pfil_remove_hook(V_pf_ip6_in_hook); pfil_remove_hook(V_pf_ip6_out_hook); #endif atomic_store_bool(&V_pf_pfil_hooked, false); } static void pf_load_vnet(void) { V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE); sx_init(&V_pf_ioctl_lock, "pf ioctl"); pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT); #ifdef ALTQ pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT); #endif V_pf_keth = &V_pf_main_keth_anchor.ruleset; pfattach_vnet(); V_pf_vnet_active = 1; } static int pf_load(void) { int error; sx_init(&pf_end_lock, "pf end thread"); pf_mtag_initialize(); pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); if (pf_dev == NULL) return (ENOMEM); pf_end_threads = 0; error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); if (error != 0) return (error); pfi_initialize(); return (0); } static void pf_unload_vnet(void) { int ret __diagused; V_pf_vnet_active = 0; V_pf_status.running = 0; dehook_pf(); dehook_pf_eth(); PF_RULES_WLOCK(); pf_syncookies_cleanup(); shutdown_pf(); PF_RULES_WUNLOCK(); /* Make sure we've cleaned up ethernet rules before we continue. */ NET_EPOCH_DRAIN_CALLBACKS(); ret = swi_remove(V_pf_swi_cookie); MPASS(ret == 0); ret = intr_event_destroy(V_pf_swi_ie); MPASS(ret == 0); pf_unload_vnet_purge(); pf_normalize_cleanup(); PF_RULES_WLOCK(); pfi_cleanup_vnet(); PF_RULES_WUNLOCK(); pfr_cleanup(); pf_osfp_flush(); pf_cleanup(); if (IS_DEFAULT_VNET(curvnet)) pf_mtag_cleanup(); pf_cleanup_tagset(&V_pf_tags); #ifdef ALTQ pf_cleanup_tagset(&V_pf_qids); #endif uma_zdestroy(V_pf_tag_z); #ifdef PF_WANT_32_TO_64_COUNTER PF_RULES_WLOCK(); LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); MPASS(LIST_EMPTY(&V_pf_allkiflist)); MPASS(V_pf_allkifcount == 0); LIST_REMOVE(&V_pf_default_rule, allrulelist); V_pf_allrulecount--; LIST_REMOVE(V_pf_rulemarker, allrulelist); /* * There are known pf rule leaks when running the test suite. */ #ifdef notyet MPASS(LIST_EMPTY(&V_pf_allrulelist)); MPASS(V_pf_allrulecount == 0); #endif PF_RULES_WUNLOCK(); free(V_pf_kifmarker, PFI_MTYPE); free(V_pf_rulemarker, M_PFRULE); #endif /* Free counters last as we updated them during shutdown. */ pf_counter_u64_deinit(&V_pf_default_rule.evaluations); for (int i = 0; i < 2; i++) { pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); } counter_u64_free(V_pf_default_rule.states_cur); counter_u64_free(V_pf_default_rule.states_tot); counter_u64_free(V_pf_default_rule.src_nodes); uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp); for (int i = 0; i < PFRES_MAX; i++) counter_u64_free(V_pf_status.counters[i]); for (int i = 0; i < KLCNT_MAX; i++) counter_u64_free(V_pf_status.lcounters[i]); for (int i = 0; i < FCNT_MAX; i++) pf_counter_u64_deinit(&V_pf_status.fcounters[i]); for (int i = 0; i < SCNT_MAX; i++) counter_u64_free(V_pf_status.scounters[i]); rm_destroy(&V_pf_rules_lock); sx_destroy(&V_pf_ioctl_lock); } static void pf_unload(void) { sx_xlock(&pf_end_lock); pf_end_threads = 1; while (pf_end_threads < 2) { wakeup_one(pf_purge_thread); sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); } sx_xunlock(&pf_end_lock); pf_nl_unregister(); if (pf_dev != NULL) destroy_dev(pf_dev); pfi_cleanup(); sx_destroy(&pf_end_lock); } static void vnet_pf_init(void *unused __unused) { pf_load_vnet(); } VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, vnet_pf_init, NULL); static void vnet_pf_uninit(const void *unused __unused) { pf_unload_vnet(); } SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, vnet_pf_uninit, NULL); static int pf_modevent(module_t mod, int type, void *data) { int error = 0; switch(type) { case MOD_LOAD: error = pf_load(); pf_nl_register(); break; case MOD_UNLOAD: /* Handled in SYSUNINIT(pf_unload) to ensure it's done after * the vnet_pf_uninit()s */ break; default: error = EINVAL; break; } return (error); } static moduledata_t pf_mod = { "pf", pf_modevent, 0 }; DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); MODULE_DEPEND(pf, netlink, 1, 1, 1); MODULE_VERSION(pf, PF_MODVER); diff --git a/sys/netpfil/pf/pf_nv.c b/sys/netpfil/pf/pf_nv.c index c4fa276da8fe..721d35be8916 100644 --- a/sys/netpfil/pf/pf_nv.c +++ b/sys/netpfil/pf/pf_nv.c @@ -1,1222 +1,1225 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #define PF_NV_IMPL_UINT(fnname, type, max) \ int \ pf_nv ## fnname ## _opt(const nvlist_t *nvl, const char *name, \ type *val, type dflt) \ { \ uint64_t raw; \ if (! nvlist_exists_number(nvl, name)) { \ *val = dflt; \ return (0); \ } \ raw = nvlist_get_number(nvl, name); \ if (raw > max) \ return (ERANGE); \ *val = (type)raw; \ return (0); \ } \ int \ pf_nv ## fnname(const nvlist_t *nvl, const char *name, type *val) \ { \ uint64_t raw; \ if (! nvlist_exists_number(nvl, name)) \ return (EINVAL); \ raw = nvlist_get_number(nvl, name); \ if (raw > max) \ return (ERANGE); \ *val = (type)raw; \ return (0); \ } \ int \ pf_nv ## fnname ## _array(const nvlist_t *nvl, const char *name, \ type *array, size_t maxelems, size_t *nelems) \ { \ const uint64_t *n; \ size_t nitems; \ bzero(array, sizeof(type) * maxelems); \ if (! nvlist_exists_number_array(nvl, name)) \ return (EINVAL); \ n = nvlist_get_number_array(nvl, name, &nitems); \ if (nitems > maxelems) \ return (E2BIG); \ if (nelems != NULL) \ *nelems = nitems; \ for (size_t i = 0; i < nitems; i++) { \ if (n[i] > max) \ return (ERANGE); \ array[i] = (type)n[i]; \ } \ return (0); \ } \ void \ pf_ ## fnname ## _array_nv(nvlist_t *nvl, const char *name, \ const type *numbers, size_t count) \ { \ uint64_t tmp; \ for (size_t i = 0; i < count; i++) { \ tmp = numbers[i]; \ nvlist_append_number_array(nvl, name, tmp); \ } \ } int pf_nvbool(const nvlist_t *nvl, const char *name, bool *val) { if (! nvlist_exists_bool(nvl, name)) return (EINVAL); *val = nvlist_get_bool(nvl, name); return (0); } int pf_nvbinary(const nvlist_t *nvl, const char *name, void *data, size_t expected_size) { const uint8_t *nvdata; size_t len; bzero(data, expected_size); if (! nvlist_exists_binary(nvl, name)) return (EINVAL); nvdata = (const uint8_t *)nvlist_get_binary(nvl, name, &len); if (len > expected_size) return (EINVAL); memcpy(data, nvdata, len); return (0); } PF_NV_IMPL_UINT(uint8, uint8_t, UINT8_MAX); PF_NV_IMPL_UINT(uint16, uint16_t, UINT16_MAX); PF_NV_IMPL_UINT(uint32, uint32_t, UINT32_MAX); PF_NV_IMPL_UINT(uint64, uint64_t, UINT64_MAX); int pf_nvint(const nvlist_t *nvl, const char *name, int *val) { int64_t raw; if (! nvlist_exists_number(nvl, name)) return (EINVAL); raw = nvlist_get_number(nvl, name); if (raw > INT_MAX || raw < INT_MIN) return (ERANGE); *val = (int)raw; return (0); } int pf_nvstring(const nvlist_t *nvl, const char *name, char *str, size_t maxlen) { int ret; if (! nvlist_exists_string(nvl, name)) return (EINVAL); ret = strlcpy(str, nvlist_get_string(nvl, name), maxlen); if (ret >= maxlen) return (EINVAL); return (0); } static int pf_nvaddr_to_addr(const nvlist_t *nvl, struct pf_addr *paddr) { return (pf_nvbinary(nvl, "addr", paddr, sizeof(*paddr))); } static nvlist_t * pf_addr_to_nvaddr(const struct pf_addr *paddr) { nvlist_t *nvl; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); nvlist_add_binary(nvl, "addr", paddr, sizeof(*paddr)); return (nvl); } static int pf_nvmape_to_mape(const nvlist_t *nvl, struct pf_mape_portset *mape) { int error = 0; bzero(mape, sizeof(*mape)); PFNV_CHK(pf_nvuint8(nvl, "offset", &mape->offset)); PFNV_CHK(pf_nvuint8(nvl, "psidlen", &mape->psidlen)); PFNV_CHK(pf_nvuint16(nvl, "psid", &mape->psid)); errout: return (error); } static nvlist_t * pf_mape_to_nvmape(const struct pf_mape_portset *mape) { nvlist_t *nvl; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); nvlist_add_number(nvl, "offset", mape->offset); nvlist_add_number(nvl, "psidlen", mape->psidlen); nvlist_add_number(nvl, "psid", mape->psid); return (nvl); } static int pf_nvpool_to_pool(const nvlist_t *nvl, struct pf_kpool *kpool) { int error = 0; PFNV_CHK(pf_nvbinary(nvl, "key", &kpool->key, sizeof(kpool->key))); if (nvlist_exists_nvlist(nvl, "counter")) { PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "counter"), &kpool->counter)); } PFNV_CHK(pf_nvint(nvl, "tblidx", &kpool->tblidx)); PFNV_CHK(pf_nvuint16_array(nvl, "proxy_port", kpool->proxy_port, 2, NULL)); PFNV_CHK(pf_nvuint8(nvl, "opts", &kpool->opts)); if (nvlist_exists_nvlist(nvl, "mape")) { PFNV_CHK(pf_nvmape_to_mape(nvlist_get_nvlist(nvl, "mape"), &kpool->mape)); } errout: return (error); } static nvlist_t * pf_pool_to_nvpool(const struct pf_kpool *pool) { nvlist_t *nvl; nvlist_t *tmp; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); nvlist_add_binary(nvl, "key", &pool->key, sizeof(pool->key)); tmp = pf_addr_to_nvaddr(&pool->counter); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "counter", tmp); nvlist_destroy(tmp); nvlist_add_number(nvl, "tblidx", pool->tblidx); pf_uint16_array_nv(nvl, "proxy_port", pool->proxy_port, 2); nvlist_add_number(nvl, "opts", pool->opts); tmp = pf_mape_to_nvmape(&pool->mape); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "mape", tmp); nvlist_destroy(tmp); return (nvl); error: nvlist_destroy(nvl); return (NULL); } static int pf_nvaddr_wrap_to_addr_wrap(const nvlist_t *nvl, struct pf_addr_wrap *addr) { int error = 0; bzero(addr, sizeof(*addr)); PFNV_CHK(pf_nvuint8(nvl, "type", &addr->type)); PFNV_CHK(pf_nvuint8(nvl, "iflags", &addr->iflags)); if (addr->type == PF_ADDR_DYNIFTL) PFNV_CHK(pf_nvstring(nvl, "ifname", addr->v.ifname, sizeof(addr->v.ifname))); if (addr->type == PF_ADDR_TABLE) PFNV_CHK(pf_nvstring(nvl, "tblname", addr->v.tblname, sizeof(addr->v.tblname))); if (! nvlist_exists_nvlist(nvl, "addr")) return (EINVAL); PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "addr"), &addr->v.a.addr)); if (! nvlist_exists_nvlist(nvl, "mask")) return (EINVAL); PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvl, "mask"), &addr->v.a.mask)); switch (addr->type) { case PF_ADDR_DYNIFTL: case PF_ADDR_TABLE: case PF_ADDR_RANGE: case PF_ADDR_ADDRMASK: case PF_ADDR_NOROUTE: case PF_ADDR_URPFFAILED: break; default: return (EINVAL); } errout: return (error); } static nvlist_t * pf_addr_wrap_to_nvaddr_wrap(const struct pf_addr_wrap *addr) { nvlist_t *nvl; nvlist_t *tmp; uint64_t num; struct pfr_ktable *kt; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); nvlist_add_number(nvl, "type", addr->type); nvlist_add_number(nvl, "iflags", addr->iflags); if (addr->type == PF_ADDR_DYNIFTL) { nvlist_add_string(nvl, "ifname", addr->v.ifname); num = 0; if (addr->p.dyn != NULL) num = addr->p.dyn->pfid_acnt4 + addr->p.dyn->pfid_acnt6; nvlist_add_number(nvl, "dyncnt", num); } if (addr->type == PF_ADDR_TABLE) { nvlist_add_string(nvl, "tblname", addr->v.tblname); num = -1; kt = addr->p.tbl; if ((kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) kt = kt->pfrkt_root; if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) num = kt->pfrkt_cnt; nvlist_add_number(nvl, "tblcnt", num); } tmp = pf_addr_to_nvaddr(&addr->v.a.addr); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "addr", tmp); nvlist_destroy(tmp); tmp = pf_addr_to_nvaddr(&addr->v.a.mask); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "mask", tmp); nvlist_destroy(tmp); return (nvl); error: nvlist_destroy(nvl); return (NULL); } static int pf_validate_op(uint8_t op) { switch (op) { case PF_OP_NONE: case PF_OP_IRG: case PF_OP_EQ: case PF_OP_NE: case PF_OP_LT: case PF_OP_LE: case PF_OP_GT: case PF_OP_GE: case PF_OP_XRG: case PF_OP_RRG: break; default: return (EINVAL); } return (0); } static int pf_nvrule_addr_to_rule_addr(const nvlist_t *nvl, struct pf_rule_addr *addr) { int error = 0; if (! nvlist_exists_nvlist(nvl, "addr")) return (EINVAL); PFNV_CHK(pf_nvaddr_wrap_to_addr_wrap(nvlist_get_nvlist(nvl, "addr"), &addr->addr)); PFNV_CHK(pf_nvuint16_array(nvl, "port", addr->port, 2, NULL)); PFNV_CHK(pf_nvuint8(nvl, "neg", &addr->neg)); PFNV_CHK(pf_nvuint8(nvl, "port_op", &addr->port_op)); PFNV_CHK(pf_validate_op(addr->port_op)); errout: return (error); } static nvlist_t * pf_rule_addr_to_nvrule_addr(const struct pf_rule_addr *addr) { nvlist_t *nvl; nvlist_t *tmp; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); tmp = pf_addr_wrap_to_nvaddr_wrap(&addr->addr); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "addr", tmp); nvlist_destroy(tmp); pf_uint16_array_nv(nvl, "port", addr->port, 2); nvlist_add_number(nvl, "neg", addr->neg); nvlist_add_number(nvl, "port_op", addr->port_op); return (nvl); error: nvlist_destroy(nvl); return (NULL); } static int pf_nvrule_uid_to_rule_uid(const nvlist_t *nvl, struct pf_rule_uid *uid) { int error = 0; bzero(uid, sizeof(*uid)); PFNV_CHK(pf_nvuint32_array(nvl, "uid", uid->uid, 2, NULL)); PFNV_CHK(pf_nvuint8(nvl, "op", &uid->op)); PFNV_CHK(pf_validate_op(uid->op)); errout: return (error); } static nvlist_t * pf_rule_uid_to_nvrule_uid(const struct pf_rule_uid *uid) { nvlist_t *nvl; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); pf_uint32_array_nv(nvl, "uid", uid->uid, 2); nvlist_add_number(nvl, "op", uid->op); return (nvl); } static int pf_nvrule_gid_to_rule_gid(const nvlist_t *nvl, struct pf_rule_gid *gid) { /* Cheat a little. These stucts are the same, other than the name of * the first field. */ return (pf_nvrule_uid_to_rule_uid(nvl, (struct pf_rule_uid *)gid)); } int pf_check_rule_addr(const struct pf_rule_addr *addr) { switch (addr->addr.type) { case PF_ADDR_ADDRMASK: case PF_ADDR_NOROUTE: case PF_ADDR_DYNIFTL: case PF_ADDR_TABLE: case PF_ADDR_URPFFAILED: case PF_ADDR_RANGE: break; default: return (EINVAL); } if (addr->addr.p.dyn != NULL) { return (EINVAL); } return (0); } int pf_nvrule_to_krule(const nvlist_t *nvl, struct pf_krule *rule) { int error = 0; #define ERROUT(x) ERROUT_FUNCTION(errout, x) PFNV_CHK(pf_nvuint32(nvl, "nr", &rule->nr)); if (! nvlist_exists_nvlist(nvl, "src")) ERROUT(EINVAL); error = pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"), &rule->src); if (error != 0) ERROUT(error); if (! nvlist_exists_nvlist(nvl, "dst")) ERROUT(EINVAL); PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"), &rule->dst)); if (nvlist_exists_string(nvl, "label")) { PFNV_CHK(pf_nvstring(nvl, "label", rule->label[0], sizeof(rule->label[0]))); } else if (nvlist_exists_string_array(nvl, "labels")) { const char *const *strs; size_t items; int ret; strs = nvlist_get_string_array(nvl, "labels", &items); if (items > PF_RULE_MAX_LABEL_COUNT) ERROUT(E2BIG); for (size_t i = 0; i < items; i++) { ret = strlcpy(rule->label[i], strs[i], sizeof(rule->label[0])); if (ret >= sizeof(rule->label[0])) ERROUT(E2BIG); } } PFNV_CHK(pf_nvuint32_opt(nvl, "ridentifier", &rule->ridentifier, 0)); PFNV_CHK(pf_nvstring(nvl, "ifname", rule->ifname, sizeof(rule->ifname))); PFNV_CHK(pf_nvstring(nvl, "qname", rule->qname, sizeof(rule->qname))); PFNV_CHK(pf_nvstring(nvl, "pqname", rule->pqname, sizeof(rule->pqname))); PFNV_CHK(pf_nvstring(nvl, "tagname", rule->tagname, sizeof(rule->tagname))); PFNV_CHK(pf_nvuint16_opt(nvl, "dnpipe", &rule->dnpipe, 0)); PFNV_CHK(pf_nvuint16_opt(nvl, "dnrpipe", &rule->dnrpipe, 0)); PFNV_CHK(pf_nvuint32_opt(nvl, "dnflags", &rule->free_flags, 0)); PFNV_CHK(pf_nvstring(nvl, "match_tagname", rule->match_tagname, sizeof(rule->match_tagname))); PFNV_CHK(pf_nvstring(nvl, "overload_tblname", rule->overload_tblname, sizeof(rule->overload_tblname))); if (! nvlist_exists_nvlist(nvl, "rpool")) ERROUT(EINVAL); PFNV_CHK(pf_nvpool_to_pool(nvlist_get_nvlist(nvl, "rpool"), &rule->rpool)); PFNV_CHK(pf_nvuint32(nvl, "os_fingerprint", &rule->os_fingerprint)); PFNV_CHK(pf_nvint(nvl, "rtableid", &rule->rtableid)); PFNV_CHK(pf_nvuint32_array(nvl, "timeout", rule->timeout, PFTM_MAX, NULL)); PFNV_CHK(pf_nvuint32(nvl, "max_states", &rule->max_states)); PFNV_CHK(pf_nvuint32(nvl, "max_src_nodes", &rule->max_src_nodes)); PFNV_CHK(pf_nvuint32(nvl, "max_src_states", &rule->max_src_states)); PFNV_CHK(pf_nvuint32(nvl, "max_src_conn", &rule->max_src_conn)); PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.limit", &rule->max_src_conn_rate.limit)); PFNV_CHK(pf_nvuint32(nvl, "max_src_conn_rate.seconds", &rule->max_src_conn_rate.seconds)); PFNV_CHK(pf_nvuint32(nvl, "prob", &rule->prob)); PFNV_CHK(pf_nvuint32(nvl, "cuid", &rule->cuid)); PFNV_CHK(pf_nvuint32(nvl, "cpid", &rule->cpid)); PFNV_CHK(pf_nvuint16(nvl, "return_icmp", &rule->return_icmp)); PFNV_CHK(pf_nvuint16(nvl, "return_icmp6", &rule->return_icmp6)); PFNV_CHK(pf_nvuint16(nvl, "max_mss", &rule->max_mss)); PFNV_CHK(pf_nvuint16(nvl, "scrub_flags", &rule->scrub_flags)); if (! nvlist_exists_nvlist(nvl, "uid")) ERROUT(EINVAL); PFNV_CHK(pf_nvrule_uid_to_rule_uid(nvlist_get_nvlist(nvl, "uid"), &rule->uid)); if (! nvlist_exists_nvlist(nvl, "gid")) ERROUT(EINVAL); PFNV_CHK(pf_nvrule_gid_to_rule_gid(nvlist_get_nvlist(nvl, "gid"), &rule->gid)); PFNV_CHK(pf_nvuint32(nvl, "rule_flag", &rule->rule_flag)); PFNV_CHK(pf_nvuint8(nvl, "action", &rule->action)); PFNV_CHK(pf_nvuint8(nvl, "direction", &rule->direction)); PFNV_CHK(pf_nvuint8(nvl, "log", &rule->log)); PFNV_CHK(pf_nvuint8(nvl, "logif", &rule->logif)); PFNV_CHK(pf_nvuint8(nvl, "quick", &rule->quick)); PFNV_CHK(pf_nvuint8(nvl, "ifnot", &rule->ifnot)); PFNV_CHK(pf_nvuint8(nvl, "match_tag_not", &rule->match_tag_not)); PFNV_CHK(pf_nvuint8(nvl, "natpass", &rule->natpass)); PFNV_CHK(pf_nvuint8(nvl, "keep_state", &rule->keep_state)); PFNV_CHK(pf_nvuint8(nvl, "af", &rule->af)); PFNV_CHK(pf_nvuint8(nvl, "proto", &rule->proto)); PFNV_CHK(pf_nvuint8(nvl, "type", &rule->type)); PFNV_CHK(pf_nvuint8(nvl, "code", &rule->code)); PFNV_CHK(pf_nvuint8(nvl, "flags", &rule->flags)); PFNV_CHK(pf_nvuint8(nvl, "flagset", &rule->flagset)); PFNV_CHK(pf_nvuint8(nvl, "min_ttl", &rule->min_ttl)); PFNV_CHK(pf_nvuint8(nvl, "allow_opts", &rule->allow_opts)); PFNV_CHK(pf_nvuint8(nvl, "rt", &rule->rt)); PFNV_CHK(pf_nvuint8(nvl, "return_ttl", &rule->return_ttl)); PFNV_CHK(pf_nvuint8(nvl, "tos", &rule->tos)); PFNV_CHK(pf_nvuint8(nvl, "set_tos", &rule->set_tos)); PFNV_CHK(pf_nvuint8(nvl, "flush", &rule->flush)); PFNV_CHK(pf_nvuint8(nvl, "prio", &rule->prio)); PFNV_CHK(pf_nvuint8_array(nvl, "set_prio", rule->set_prio, 2, NULL)); if (nvlist_exists_nvlist(nvl, "divert")) { const nvlist_t *nvldivert = nvlist_get_nvlist(nvl, "divert"); if (! nvlist_exists_nvlist(nvldivert, "addr")) ERROUT(EINVAL); PFNV_CHK(pf_nvaddr_to_addr(nvlist_get_nvlist(nvldivert, "addr"), &rule->divert.addr)); PFNV_CHK(pf_nvuint16(nvldivert, "port", &rule->divert.port)); } /* Validation */ #ifndef INET if (rule->af == AF_INET) ERROUT(EAFNOSUPPORT); #endif /* INET */ #ifndef INET6 if (rule->af == AF_INET6) ERROUT(EAFNOSUPPORT); #endif /* INET6 */ PFNV_CHK(pf_check_rule_addr(&rule->src)); PFNV_CHK(pf_check_rule_addr(&rule->dst)); return (0); #undef ERROUT errout: return (error); } static nvlist_t * pf_divert_to_nvdivert(const struct pf_krule *rule) { nvlist_t *nvl; nvlist_t *tmp; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); tmp = pf_addr_to_nvaddr(&rule->divert.addr); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "addr", tmp); nvlist_destroy(tmp); nvlist_add_number(nvl, "port", rule->divert.port); return (nvl); error: nvlist_destroy(nvl); return (NULL); } nvlist_t * pf_krule_to_nvrule(struct pf_krule *rule) { nvlist_t *nvl, *tmp; nvl = nvlist_create(0); if (nvl == NULL) return (nvl); nvlist_add_number(nvl, "nr", rule->nr); tmp = pf_rule_addr_to_nvrule_addr(&rule->src); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "src", tmp); nvlist_destroy(tmp); tmp = pf_rule_addr_to_nvrule_addr(&rule->dst); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "dst", tmp); nvlist_destroy(tmp); for (int i = 0; i < PF_SKIP_COUNT; i++) { nvlist_append_number_array(nvl, "skip", rule->skip[i].ptr ? rule->skip[i].ptr->nr : -1); } for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) { nvlist_append_string_array(nvl, "labels", rule->label[i]); } nvlist_add_string(nvl, "label", rule->label[0]); nvlist_add_number(nvl, "ridentifier", rule->ridentifier); nvlist_add_string(nvl, "ifname", rule->ifname); nvlist_add_string(nvl, "qname", rule->qname); nvlist_add_string(nvl, "pqname", rule->pqname); nvlist_add_number(nvl, "dnpipe", rule->dnpipe); nvlist_add_number(nvl, "dnrpipe", rule->dnrpipe); nvlist_add_number(nvl, "dnflags", rule->free_flags); nvlist_add_string(nvl, "tagname", rule->tagname); nvlist_add_string(nvl, "match_tagname", rule->match_tagname); nvlist_add_string(nvl, "overload_tblname", rule->overload_tblname); tmp = pf_pool_to_nvpool(&rule->rpool); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "rpool", tmp); nvlist_destroy(tmp); nvlist_add_number(nvl, "evaluations", pf_counter_u64_fetch(&rule->evaluations)); for (int i = 0; i < 2; i++) { nvlist_append_number_array(nvl, "packets", pf_counter_u64_fetch(&rule->packets[i])); nvlist_append_number_array(nvl, "bytes", pf_counter_u64_fetch(&rule->bytes[i])); } nvlist_add_number(nvl, "timestamp", pf_get_timestamp(rule)); nvlist_add_number(nvl, "os_fingerprint", rule->os_fingerprint); nvlist_add_number(nvl, "rtableid", rule->rtableid); pf_uint32_array_nv(nvl, "timeout", rule->timeout, PFTM_MAX); nvlist_add_number(nvl, "max_states", rule->max_states); nvlist_add_number(nvl, "max_src_nodes", rule->max_src_nodes); nvlist_add_number(nvl, "max_src_states", rule->max_src_states); nvlist_add_number(nvl, "max_src_conn", rule->max_src_conn); nvlist_add_number(nvl, "max_src_conn_rate.limit", rule->max_src_conn_rate.limit); nvlist_add_number(nvl, "max_src_conn_rate.seconds", rule->max_src_conn_rate.seconds); nvlist_add_number(nvl, "qid", rule->qid); nvlist_add_number(nvl, "pqid", rule->pqid); nvlist_add_number(nvl, "prob", rule->prob); nvlist_add_number(nvl, "cuid", rule->cuid); nvlist_add_number(nvl, "cpid", rule->cpid); nvlist_add_number(nvl, "states_cur", counter_u64_fetch(rule->states_cur)); nvlist_add_number(nvl, "states_tot", counter_u64_fetch(rule->states_tot)); nvlist_add_number(nvl, "src_nodes", counter_u64_fetch(rule->src_nodes)); nvlist_add_number(nvl, "return_icmp", rule->return_icmp); nvlist_add_number(nvl, "return_icmp6", rule->return_icmp6); nvlist_add_number(nvl, "max_mss", rule->max_mss); nvlist_add_number(nvl, "scrub_flags", rule->scrub_flags); tmp = pf_rule_uid_to_nvrule_uid(&rule->uid); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "uid", tmp); nvlist_destroy(tmp); tmp = pf_rule_uid_to_nvrule_uid((const struct pf_rule_uid *)&rule->gid); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "gid", tmp); nvlist_destroy(tmp); nvlist_add_number(nvl, "rule_flag", rule->rule_flag); nvlist_add_number(nvl, "action", rule->action); nvlist_add_number(nvl, "direction", rule->direction); nvlist_add_number(nvl, "log", rule->log); nvlist_add_number(nvl, "logif", rule->logif); nvlist_add_number(nvl, "quick", rule->quick); nvlist_add_number(nvl, "ifnot", rule->ifnot); nvlist_add_number(nvl, "match_tag_not", rule->match_tag_not); nvlist_add_number(nvl, "natpass", rule->natpass); nvlist_add_number(nvl, "keep_state", rule->keep_state); nvlist_add_number(nvl, "af", rule->af); nvlist_add_number(nvl, "proto", rule->proto); nvlist_add_number(nvl, "type", rule->type); nvlist_add_number(nvl, "code", rule->code); nvlist_add_number(nvl, "flags", rule->flags); nvlist_add_number(nvl, "flagset", rule->flagset); nvlist_add_number(nvl, "min_ttl", rule->min_ttl); nvlist_add_number(nvl, "allow_opts", rule->allow_opts); nvlist_add_number(nvl, "rt", rule->rt); nvlist_add_number(nvl, "return_ttl", rule->return_ttl); nvlist_add_number(nvl, "tos", rule->tos); nvlist_add_number(nvl, "set_tos", rule->set_tos); nvlist_add_number(nvl, "anchor_relative", rule->anchor_relative); nvlist_add_number(nvl, "anchor_wildcard", rule->anchor_wildcard); nvlist_add_number(nvl, "flush", rule->flush); nvlist_add_number(nvl, "prio", rule->prio); pf_uint8_array_nv(nvl, "set_prio", rule->set_prio, 2); tmp = pf_divert_to_nvdivert(rule); if (tmp == NULL) goto error; nvlist_add_nvlist(nvl, "divert", tmp); nvlist_destroy(tmp); return (nvl); error: nvlist_destroy(nvl); return (NULL); } static int pf_nvstate_cmp_to_state_cmp(const nvlist_t *nvl, struct pf_state_cmp *cmp) { int error = 0; bzero(cmp, sizeof(*cmp)); PFNV_CHK(pf_nvuint64(nvl, "id", &cmp->id)); PFNV_CHK(pf_nvuint32(nvl, "creatorid", &cmp->creatorid)); PFNV_CHK(pf_nvuint8(nvl, "direction", &cmp->direction)); errout: return (error); } int pf_nvstate_kill_to_kstate_kill(const nvlist_t *nvl, struct pf_kstate_kill *kill) { int error = 0; bzero(kill, sizeof(*kill)); if (! nvlist_exists_nvlist(nvl, "cmp")) return (EINVAL); PFNV_CHK(pf_nvstate_cmp_to_state_cmp(nvlist_get_nvlist(nvl, "cmp"), &kill->psk_pfcmp)); PFNV_CHK(pf_nvuint8(nvl, "af", &kill->psk_af)); PFNV_CHK(pf_nvint(nvl, "proto", &kill->psk_proto)); if (! nvlist_exists_nvlist(nvl, "src")) return (EINVAL); PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "src"), &kill->psk_src)); if (! nvlist_exists_nvlist(nvl, "dst")) return (EINVAL); PFNV_CHK(pf_nvrule_addr_to_rule_addr(nvlist_get_nvlist(nvl, "dst"), &kill->psk_dst)); if (nvlist_exists_nvlist(nvl, "rt_addr")) { PFNV_CHK(pf_nvrule_addr_to_rule_addr( nvlist_get_nvlist(nvl, "rt_addr"), &kill->psk_rt_addr)); } PFNV_CHK(pf_nvstring(nvl, "ifname", kill->psk_ifname, sizeof(kill->psk_ifname))); PFNV_CHK(pf_nvstring(nvl, "label", kill->psk_label, sizeof(kill->psk_label))); PFNV_CHK(pf_nvbool(nvl, "kill_match", &kill->psk_kill_match)); + if (nvlist_exists_bool(nvl, "nat")) + PFNV_CHK(pf_nvbool(nvl, "nat", &kill->psk_nat)); + errout: return (error); } static nvlist_t * pf_state_key_to_nvstate_key(const struct pf_state_key *key) { nvlist_t *nvl, *tmp; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); for (int i = 0; i < 2; i++) { tmp = pf_addr_to_nvaddr(&key->addr[i]); if (tmp == NULL) goto errout; nvlist_append_nvlist_array(nvl, "addr", tmp); nvlist_destroy(tmp); nvlist_append_number_array(nvl, "port", key->port[i]); } nvlist_add_number(nvl, "af", key->af); nvlist_add_number(nvl, "proto", key->proto); return (nvl); errout: nvlist_destroy(nvl); return (NULL); } static nvlist_t * pf_state_peer_to_nvstate_peer(const struct pf_state_peer *peer) { nvlist_t *nvl; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); nvlist_add_number(nvl, "seqlo", peer->seqlo); nvlist_add_number(nvl, "seqhi", peer->seqhi); nvlist_add_number(nvl, "seqdiff", peer->seqdiff); nvlist_add_number(nvl, "state", peer->state); nvlist_add_number(nvl, "wscale", peer->wscale); return (nvl); } nvlist_t * pf_state_to_nvstate(const struct pf_kstate *s) { nvlist_t *nvl, *tmp; uint32_t expire, flags = 0; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); nvlist_add_number(nvl, "id", s->id); nvlist_add_string(nvl, "ifname", s->kif->pfik_name); nvlist_add_string(nvl, "orig_ifname", s->orig_kif->pfik_name); tmp = pf_state_key_to_nvstate_key(s->key[PF_SK_STACK]); if (tmp == NULL) goto errout; nvlist_add_nvlist(nvl, "stack_key", tmp); nvlist_destroy(tmp); tmp = pf_state_key_to_nvstate_key(s->key[PF_SK_WIRE]); if (tmp == NULL) goto errout; nvlist_add_nvlist(nvl, "wire_key", tmp); nvlist_destroy(tmp); tmp = pf_state_peer_to_nvstate_peer(&s->src); if (tmp == NULL) goto errout; nvlist_add_nvlist(nvl, "src", tmp); nvlist_destroy(tmp); tmp = pf_state_peer_to_nvstate_peer(&s->dst); if (tmp == NULL) goto errout; nvlist_add_nvlist(nvl, "dst", tmp); nvlist_destroy(tmp); tmp = pf_addr_to_nvaddr(&s->rt_addr); if (tmp == NULL) goto errout; nvlist_add_nvlist(nvl, "rt_addr", tmp); nvlist_destroy(tmp); nvlist_add_number(nvl, "rule", s->rule.ptr ? s->rule.ptr->nr : -1); nvlist_add_number(nvl, "anchor", s->anchor.ptr ? s->anchor.ptr->nr : -1); nvlist_add_number(nvl, "nat_rule", s->nat_rule.ptr ? s->nat_rule.ptr->nr : -1); nvlist_add_number(nvl, "creation", s->creation); expire = pf_state_expires(s); if (expire <= time_uptime) expire = 0; else expire = expire - time_uptime; nvlist_add_number(nvl, "expire", expire); for (int i = 0; i < 2; i++) { nvlist_append_number_array(nvl, "packets", s->packets[i]); nvlist_append_number_array(nvl, "bytes", s->bytes[i]); } nvlist_add_number(nvl, "creatorid", s->creatorid); nvlist_add_number(nvl, "direction", s->direction); nvlist_add_number(nvl, "state_flags", s->state_flags); if (s->src_node) flags |= PFSYNC_FLAG_SRCNODE; if (s->nat_src_node) flags |= PFSYNC_FLAG_NATSRCNODE; nvlist_add_number(nvl, "sync_flags", flags); return (nvl); errout: nvlist_destroy(nvl); return (NULL); } static int pf_nveth_rule_addr_to_keth_rule_addr(const nvlist_t *nvl, struct pf_keth_rule_addr *krule) { static const u_int8_t EMPTY_MAC[ETHER_ADDR_LEN] = { 0 }; int error = 0; PFNV_CHK(pf_nvbinary(nvl, "addr", &krule->addr, sizeof(krule->addr))); PFNV_CHK(pf_nvbool(nvl, "neg", &krule->neg)); if (nvlist_exists_binary(nvl, "mask")) PFNV_CHK(pf_nvbinary(nvl, "mask", &krule->mask, sizeof(krule->mask))); /* To make checks for 'is this address set?' easier. */ if (memcmp(krule->addr, EMPTY_MAC, ETHER_ADDR_LEN) != 0) krule->isset = 1; errout: return (error); } static nvlist_t* pf_keth_rule_addr_to_nveth_rule_addr(const struct pf_keth_rule_addr *krule) { nvlist_t *nvl; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); nvlist_add_binary(nvl, "addr", &krule->addr, sizeof(krule->addr)); nvlist_add_binary(nvl, "mask", &krule->mask, sizeof(krule->mask)); nvlist_add_bool(nvl, "neg", krule->neg); return (nvl); } nvlist_t* pf_keth_rule_to_nveth_rule(const struct pf_keth_rule *krule) { nvlist_t *nvl, *addr; nvl = nvlist_create(0); if (nvl == NULL) return (NULL); for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) { nvlist_append_string_array(nvl, "labels", krule->label[i]); } nvlist_add_number(nvl, "ridentifier", krule->ridentifier); nvlist_add_number(nvl, "nr", krule->nr); nvlist_add_bool(nvl, "quick", krule->quick); nvlist_add_string(nvl, "ifname", krule->ifname); nvlist_add_bool(nvl, "ifnot", krule->ifnot); nvlist_add_number(nvl, "direction", krule->direction); nvlist_add_number(nvl, "proto", krule->proto); nvlist_add_string(nvl, "match_tagname", krule->match_tagname); nvlist_add_number(nvl, "match_tag", krule->match_tag); nvlist_add_bool(nvl, "match_tag_not", krule->match_tag_not); addr = pf_keth_rule_addr_to_nveth_rule_addr(&krule->src); if (addr == NULL) { nvlist_destroy(nvl); return (NULL); } nvlist_add_nvlist(nvl, "src", addr); nvlist_destroy(addr); addr = pf_keth_rule_addr_to_nveth_rule_addr(&krule->dst); if (addr == NULL) { nvlist_destroy(nvl); return (NULL); } nvlist_add_nvlist(nvl, "dst", addr); nvlist_destroy(addr); addr = pf_rule_addr_to_nvrule_addr(&krule->ipsrc); if (addr == NULL) { nvlist_destroy(nvl); return (NULL); } nvlist_add_nvlist(nvl, "ipsrc", addr); nvlist_destroy(addr); addr = pf_rule_addr_to_nvrule_addr(&krule->ipdst); if (addr == NULL) { nvlist_destroy(nvl); return (NULL); } nvlist_add_nvlist(nvl, "ipdst", addr); nvlist_destroy(addr); nvlist_add_number(nvl, "evaluations", counter_u64_fetch(krule->evaluations)); nvlist_add_number(nvl, "packets-in", counter_u64_fetch(krule->packets[0])); nvlist_add_number(nvl, "packets-out", counter_u64_fetch(krule->packets[1])); nvlist_add_number(nvl, "bytes-in", counter_u64_fetch(krule->bytes[0])); nvlist_add_number(nvl, "bytes-out", counter_u64_fetch(krule->bytes[1])); nvlist_add_number(nvl, "timestamp", pf_get_timestamp(krule)); nvlist_add_string(nvl, "qname", krule->qname); nvlist_add_string(nvl, "tagname", krule->tagname); nvlist_add_number(nvl, "dnpipe", krule->dnpipe); nvlist_add_number(nvl, "dnflags", krule->dnflags); nvlist_add_number(nvl, "anchor_relative", krule->anchor_relative); nvlist_add_number(nvl, "anchor_wildcard", krule->anchor_wildcard); nvlist_add_string(nvl, "bridge_to", krule->bridge_to_name); nvlist_add_number(nvl, "action", krule->action); return (nvl); } int pf_nveth_rule_to_keth_rule(const nvlist_t *nvl, struct pf_keth_rule *krule) { int error = 0; #define ERROUT(x) ERROUT_FUNCTION(errout, x) bzero(krule, sizeof(*krule)); if (nvlist_exists_string_array(nvl, "labels")) { const char *const *strs; size_t items; int ret; strs = nvlist_get_string_array(nvl, "labels", &items); if (items > PF_RULE_MAX_LABEL_COUNT) ERROUT(E2BIG); for (size_t i = 0; i < items; i++) { ret = strlcpy(krule->label[i], strs[i], sizeof(krule->label[0])); if (ret >= sizeof(krule->label[0])) ERROUT(E2BIG); } } PFNV_CHK(pf_nvuint32_opt(nvl, "ridentifier", &krule->ridentifier, 0)); PFNV_CHK(pf_nvuint32(nvl, "nr", &krule->nr)); PFNV_CHK(pf_nvbool(nvl, "quick", &krule->quick)); PFNV_CHK(pf_nvstring(nvl, "ifname", krule->ifname, sizeof(krule->ifname))); PFNV_CHK(pf_nvbool(nvl, "ifnot", &krule->ifnot)); PFNV_CHK(pf_nvuint8(nvl, "direction", &krule->direction)); PFNV_CHK(pf_nvuint16(nvl, "proto", &krule->proto)); if (nvlist_exists_nvlist(nvl, "src")) { error = pf_nveth_rule_addr_to_keth_rule_addr( nvlist_get_nvlist(nvl, "src"), &krule->src); if (error) return (error); } if (nvlist_exists_nvlist(nvl, "dst")) { error = pf_nveth_rule_addr_to_keth_rule_addr( nvlist_get_nvlist(nvl, "dst"), &krule->dst); if (error) return (error); } if (nvlist_exists_nvlist(nvl, "ipsrc")) { error = pf_nvrule_addr_to_rule_addr( nvlist_get_nvlist(nvl, "ipsrc"), &krule->ipsrc); if (error != 0) return (error); if (krule->ipsrc.addr.type != PF_ADDR_ADDRMASK && krule->ipsrc.addr.type != PF_ADDR_TABLE) return (EINVAL); } if (nvlist_exists_nvlist(nvl, "ipdst")) { error = pf_nvrule_addr_to_rule_addr( nvlist_get_nvlist(nvl, "ipdst"), &krule->ipdst); if (error != 0) return (error); if (krule->ipdst.addr.type != PF_ADDR_ADDRMASK && krule->ipdst.addr.type != PF_ADDR_TABLE) return (EINVAL); } if (nvlist_exists_string(nvl, "match_tagname")) { PFNV_CHK(pf_nvstring(nvl, "match_tagname", krule->match_tagname, sizeof(krule->match_tagname))); PFNV_CHK(pf_nvbool(nvl, "match_tag_not", &krule->match_tag_not)); } PFNV_CHK(pf_nvstring(nvl, "qname", krule->qname, sizeof(krule->qname))); PFNV_CHK(pf_nvstring(nvl, "tagname", krule->tagname, sizeof(krule->tagname))); PFNV_CHK(pf_nvuint16_opt(nvl, "dnpipe", &krule->dnpipe, 0)); PFNV_CHK(pf_nvuint32_opt(nvl, "dnflags", &krule->dnflags, 0)); PFNV_CHK(pf_nvstring(nvl, "bridge_to", krule->bridge_to_name, sizeof(krule->bridge_to_name))); PFNV_CHK(pf_nvuint8(nvl, "action", &krule->action)); if (krule->action != PF_PASS && krule->action != PF_DROP && krule->action != PF_MATCH) return (EBADMSG); #undef ERROUT errout: return (error); }