diff --git a/sys/netpfil/ipfw/dn_aqm.h b/sys/netpfil/ipfw/dn_aqm.h index 8bbe9fe69e86..b0eaf2ecfc8a 100644 --- a/sys/netpfil/ipfw/dn_aqm.h +++ b/sys/netpfil/ipfw/dn_aqm.h @@ -1,162 +1,162 @@ /*- * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * API for writing an Active Queue Management algorithm for Dummynet * * $FreeBSD$ */ #ifndef _IP_DN_AQM_H #define _IP_DN_AQM_H /* NOW is the current time in millisecond*/ -#define NOW ((dn_cfg.curr_time * tick) / 1000) +#define NOW ((V_dn_cfg.curr_time * tick) / 1000) -#define AQM_UNOW (dn_cfg.curr_time * tick) +#define AQM_UNOW (V_dn_cfg.curr_time * tick) #define AQM_TIME_1US ((aqm_time_t)(1)) #define AQM_TIME_1MS ((aqm_time_t)(1000)) #define AQM_TIME_1S ((aqm_time_t)(AQM_TIME_1MS * 1000)) /* aqm time allows to store up to 4294 seconds */ typedef uint32_t aqm_time_t; typedef int32_t aqm_stime_t; #define DN_AQM_MTAG_TS 55345 /* Macro for variable bounding */ #define BOUND_VAR(x,l,h) ((x) > (h)? (h) : ((x) > (l)? (x) : (l))) /* * Structure for holding data and function pointers that together represent a * AQM algorithm. */ struct dn_aqm { #define DN_AQM_NAME_MAX 50 char name[DN_AQM_NAME_MAX]; /* name of AQM algorithm */ uint32_t type; /* AQM type number */ /* Methods implemented by AQM algorithm: * * enqueue enqueue packet 'm' on queue 'q'. * Return 0 on success, 1 on drop. * * dequeue dequeue a packet from queue 'q'. * Return a packet, NULL if no packet available. * * config configure AQM algorithm * If required, this function should allocate space to store * the configurations and set 'fs->aqmcfg' to point to this space. * 'dn_extra_parms' includes array of parameters send * from ipfw userland command. * Return 0 on success, non-zero otherwise. * * deconfig deconfigure AQM algorithm. * The allocated configuration memory space should be freed here. * Return 0 on success, non-zero otherwise. * * init initialise AQM status variables of queue 'q' * This function is used to allocate space and init AQM status for a * queue and q->aqm_status to point to this space. * Return 0 on success, non-zero otherwise. * * cleanup cleanup AQM status variables of queue 'q' * The allocated memory space for AQM status should be freed here. * Return 0 on success, non-zero otherwise. * * getconfig retrieve AQM configurations * This function is used to return AQM parameters to userland * command. The function should fill 'dn_extra_parms' struct with * the AQM configurations using 'par' array. * */ int (*enqueue)(struct dn_queue *, struct mbuf *); struct mbuf * (*dequeue)(struct dn_queue *); int (*config)(struct dn_fsk *, struct dn_extra_parms *ep, int); int (*deconfig)(struct dn_fsk *); int (*init)(struct dn_queue *); int (*cleanup)(struct dn_queue *); int (*getconfig)(struct dn_fsk *, struct dn_extra_parms *); int ref_count; /*Number of queues instances in the system */ int cfg_ref_count; /*Number of AQM instances in the system */ SLIST_ENTRY (dn_aqm) next; /* Next AQM in the list */ }; /* Helper function to update queue and scheduler statistics. * negative len + drop -> drop * negative len -> dequeue * positive len -> enqueue * positive len + drop -> drop during enqueue */ __inline static void update_stats(struct dn_queue *q, int len, int drop) { int inc = 0; struct dn_flow *sni; struct dn_flow *qni; sni = &q->_si->ni; qni = &q->ni; if (len < 0) inc = -1; else if(len > 0) inc = 1; if (drop) { qni->drops++; sni->drops++; - dn_cfg.io_pkt_drop++; + V_dn_cfg.io_pkt_drop++; } else { /*update queue stats */ qni->length += inc; qni->len_bytes += len; /*update scheduler instance stats */ sni->length += inc; sni->len_bytes += len; } /* tot_pkts is updated in dn_enqueue function */ } /* kernel module related function */ int dn_aqm_modevent(module_t mod, int cmd, void *arg); #define DECLARE_DNAQM_MODULE(name, dnaqm) \ static moduledata_t name##_mod = { \ #name, dn_aqm_modevent, dnaqm \ }; \ DECLARE_MODULE(name, name##_mod, \ SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY); \ MODULE_DEPEND(name, dummynet, 3, 3, 3) #endif diff --git a/sys/netpfil/ipfw/dn_aqm_codel.c b/sys/netpfil/ipfw/dn_aqm_codel.c index a1f90461ecab..79c6afd8b635 100644 --- a/sys/netpfil/ipfw/dn_aqm_codel.c +++ b/sys/netpfil/ipfw/dn_aqm_codel.c @@ -1,443 +1,443 @@ /* * Codel - The Controlled-Delay Active Queue Management algorithm. * * $FreeBSD$ * * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include #include #include #include /* ip_len, ip_off */ #include /* ip_output(), IP_FORWARDING */ #include #include #include /* various ether_* routines */ #include /* for ip6_input, ip6_output prototypes */ #include #include #ifdef NEW_AQM #include #include #include #include #include #define DN_AQM_CODEL 1 static struct dn_aqm codel_desc; /* default codel parameters */ struct dn_aqm_codel_parms codel_sysctl = {5000 * AQM_TIME_1US, 100000 * AQM_TIME_1US, 0}; static int codel_sysctl_interval_handler(SYSCTL_HANDLER_ARGS) { int error; long value; value = codel_sysctl.interval; value /= AQM_TIME_1US; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 100 * AQM_TIME_1S) return (EINVAL); codel_sysctl.interval = value * AQM_TIME_1US ; return (0); } static int codel_sysctl_target_handler(SYSCTL_HANDLER_ARGS) { int error; long value; value = codel_sysctl.target; value /= AQM_TIME_1US; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); D("%ld", value); if (value < 1 || value > 5 * AQM_TIME_1S) return (EINVAL); codel_sysctl.target = value * AQM_TIME_1US ; return (0); } /* defining Codel sysctl variables */ SYSBEGIN(f4) SYSCTL_DECL(_net_inet); SYSCTL_DECL(_net_inet_ip); SYSCTL_DECL(_net_inet_ip_dummynet); static SYSCTL_NODE(_net_inet_ip_dummynet, OID_AUTO, codel, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "CODEL"); #ifdef SYSCTL_NODE SYSCTL_PROC(_net_inet_ip_dummynet_codel, OID_AUTO, target, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,codel_sysctl_target_handler, "L", "CoDel target in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_codel, OID_AUTO, interval, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, codel_sysctl_interval_handler, "L", "CoDel interval in microsecond"); #endif /* This function computes codel_interval/sqrt(count) * Newton's method of approximation is used to compute 1/sqrt(count). * http://betterexplained.com/articles/ * understanding-quakes-fast-inverse-square-root/ */ aqm_time_t control_law(struct codel_status *cst, struct dn_aqm_codel_parms *cprms, aqm_time_t t) { uint32_t count; uint64_t temp; count = cst->count; /* we don't calculate isqrt(1) to get more accurate result*/ if (count == 1) { /* prepare isqrt (old guess) for the next iteration i.e. 1/sqrt(2)*/ cst->isqrt = (1UL<< FIX_POINT_BITS) * 7/10; /* return time + isqrt(1)*interval */ return t + cprms->interval; } /* newguess = g(1.5 - 0.5*c*g^2) * Multiplying both sides by 2 to make all the constants intergers * newguess * 2 = g(3 - c*g^2) g=old guess, c=count * So, newguess = newguess /2 * Fixed point operations are used here. */ /* Calculate g^2 */ temp = (uint32_t) cst->isqrt * cst->isqrt; /* Calculate (3 - c*g^2) i.e. (3 - c * temp) */ temp = (3ULL<< (FIX_POINT_BITS*2)) - (count * temp); /* * Divide by 2 because we multiplied the original equation by two * Also, we shift the result by 8 bits to prevent overflow. * */ temp >>= (1 + 8); /* Now, temp = (1.5 - 0.5*c*g^2) * Calculate g (1.5 - 0.5*c*g^2) i.e. g * temp */ temp = (cst->isqrt * temp) >> (FIX_POINT_BITS + FIX_POINT_BITS - 8); cst->isqrt = temp; /* calculate codel_interval/sqrt(count) */ return t + ((cprms->interval * temp) >> FIX_POINT_BITS); } /* * Extract a packet from the head of queue 'q' * Return a packet or NULL if the queue is empty. * Also extract packet's timestamp from mtag. */ struct mbuf * codel_extract_head(struct dn_queue *q, aqm_time_t *pkt_ts) { struct m_tag *mtag; struct mbuf *m = q->mq.head; if (m == NULL) return m; q->mq.head = m->m_nextpkt; /* Update stats */ update_stats(q, -m->m_pkthdr.len, 0); if (q->ni.length == 0) /* queue is now idle */ - q->q_time = dn_cfg.curr_time; + q->q_time = V_dn_cfg.curr_time; /* extract packet TS*/ mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) { D("Codel timestamp mtag not found!"); *pkt_ts = 0; } else { *pkt_ts = *(aqm_time_t *)(mtag + 1); m_tag_delete(m,mtag); } return m; } /* * Enqueue a packet 'm' in queue 'q' */ static int aqm_codel_enqueue(struct dn_queue *q, struct mbuf *m) { struct dn_fs *f; uint64_t len; struct codel_status *cst; /*codel status variables */ struct m_tag *mtag; f = &(q->fs->fs); len = m->m_pkthdr.len; cst = q->aqm_status; if(!cst) { D("Codel queue is not initialized\n"); goto drop; } /* Finding maximum packet size */ // XXX we can get MTU from driver instead if (len > cst->maxpkt_size) cst->maxpkt_size = len; /* check for queue size and drop the tail if exceed queue limit*/ if (f->flags & DN_QSIZE_BYTES) { if ( q->ni.len_bytes > f->qsize) goto drop; } else { if ( q->ni.length >= f->qsize) goto drop; } /* Add timestamp as mtag */ mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t), M_NOWAIT); if (mtag == NULL) goto drop; *(aqm_time_t *)(mtag + 1) = AQM_UNOW; m_tag_prepend(m, mtag); mq_append(&q->mq, m); update_stats(q, len, 0); return (0); drop: update_stats(q, 0, 1); FREE_PKT(m); return (1); } /* Dequeue a pcaket from queue q */ static struct mbuf * aqm_codel_dequeue(struct dn_queue *q) { return codel_dequeue(q); } /* * initialize Codel for queue 'q' * First allocate memory for codel status. */ static int aqm_codel_init(struct dn_queue *q) { struct codel_status *cst; if (!q->fs->aqmcfg) { D("Codel is not configure!d"); return EINVAL; } q->aqm_status = malloc(sizeof(struct codel_status), M_DUMMYNET, M_NOWAIT | M_ZERO); if (q->aqm_status == NULL) { D("Cannot allocate AQM_codel private data"); return ENOMEM ; } /* init codel status variables */ cst = q->aqm_status; cst->dropping=0; cst->first_above_time=0; cst->drop_next_time=0; cst->count=0; cst->maxpkt_size = 500; /* increase reference counters */ codel_desc.ref_count++; return 0; } /* * Clean up Codel status for queue 'q' * Destroy memory allocated for codel status. */ static int aqm_codel_cleanup(struct dn_queue *q) { if (q && q->aqm_status) { free(q->aqm_status, M_DUMMYNET); q->aqm_status = NULL; /* decrease reference counters */ codel_desc.ref_count--; } else D("Codel already cleaned up"); return 0; } /* * Config codel parameters * also allocate memory for codel configurations */ static int aqm_codel_config(struct dn_fsk* fs, struct dn_extra_parms *ep, int len) { struct dn_aqm_codel_parms *ccfg; int l = sizeof(struct dn_extra_parms); if (len < l) { D("invalid sched parms length got %d need %d", len, l); return EINVAL; } /* we free the old cfg because maybe the original allocation * not the same size as the new one (different AQM type). */ if (fs->aqmcfg) { free(fs->aqmcfg, M_DUMMYNET); fs->aqmcfg = NULL; } fs->aqmcfg = malloc(sizeof(struct dn_aqm_codel_parms), M_DUMMYNET, M_NOWAIT | M_ZERO); if (fs->aqmcfg== NULL) { D("cannot allocate AQM_codel configuration parameters"); return ENOMEM; } /* configure codel parameters */ ccfg = fs->aqmcfg; if (ep->par[0] < 0) ccfg->target = codel_sysctl.target; else ccfg->target = ep->par[0] * AQM_TIME_1US; if (ep->par[1] < 0) ccfg->interval = codel_sysctl.interval; else ccfg->interval = ep->par[1] * AQM_TIME_1US; if (ep->par[2] < 0) ccfg->flags = 0; else ccfg->flags = ep->par[2]; /* bound codel configurations */ ccfg->target = BOUND_VAR(ccfg->target,1, 5 * AQM_TIME_1S); ccfg->interval = BOUND_VAR(ccfg->interval,1, 5 * AQM_TIME_1S); /* increase config reference counter */ codel_desc.cfg_ref_count++; return 0; } /* * Deconfigure Codel and free memory allocation */ static int aqm_codel_deconfig(struct dn_fsk* fs) { if (fs && fs->aqmcfg) { free(fs->aqmcfg, M_DUMMYNET); fs->aqmcfg = NULL; fs->aqmfp = NULL; /* decrease config reference counter */ codel_desc.cfg_ref_count--; } return 0; } /* * Retrieve Codel configuration parameters. */ static int aqm_codel_getconfig(struct dn_fsk *fs, struct dn_extra_parms * ep) { struct dn_aqm_codel_parms *ccfg; if (fs->aqmcfg) { strlcpy(ep->name, codel_desc.name, sizeof(ep->name)); ccfg = fs->aqmcfg; ep->par[0] = ccfg->target / AQM_TIME_1US; ep->par[1] = ccfg->interval / AQM_TIME_1US; ep->par[2] = ccfg->flags; return 0; } return 1; } static struct dn_aqm codel_desc = { _SI( .type = ) DN_AQM_CODEL, _SI( .name = ) "CODEL", _SI( .enqueue = ) aqm_codel_enqueue, _SI( .dequeue = ) aqm_codel_dequeue, _SI( .config = ) aqm_codel_config, _SI( .getconfig = ) aqm_codel_getconfig, _SI( .deconfig = ) aqm_codel_deconfig, _SI( .init = ) aqm_codel_init, _SI( .cleanup = ) aqm_codel_cleanup, }; DECLARE_DNAQM_MODULE(dn_aqm_codel, &codel_desc); #endif diff --git a/sys/netpfil/ipfw/dn_aqm_pie.c b/sys/netpfil/ipfw/dn_aqm_pie.c index 2d5d500e275c..4a55aed662f7 100644 --- a/sys/netpfil/ipfw/dn_aqm_pie.c +++ b/sys/netpfil/ipfw/dn_aqm_pie.c @@ -1,809 +1,809 @@ /* * PIE - Proportional Integral controller Enhanced AQM algorithm. * * $FreeBSD$ * * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include #include #include #include /* ip_len, ip_off */ #include /* ip_output(), IP_FORWARDING */ #include #include #include /* various ether_* routines */ #include /* for ip6_input, ip6_output prototypes */ #include #include #ifdef NEW_AQM #include #include #include #include #include /* for debugging */ #include static struct dn_aqm pie_desc; /* PIE defaults * target=15ms, tupdate=15ms, max_burst=150ms, * max_ecnth=0.1, alpha=0.125, beta=1.25, */ struct dn_aqm_pie_parms pie_sysctl = { 15 * AQM_TIME_1MS, 15 * AQM_TIME_1MS, 150 * AQM_TIME_1MS, PIE_SCALE/10 , PIE_SCALE * 0.125, PIE_SCALE * 1.25 , PIE_CAPDROP_ENABLED | PIE_DEPRATEEST_ENABLED | PIE_DERAND_ENABLED }; static int pie_sysctl_alpha_beta_handler(SYSCTL_HANDLER_ARGS) { int error; long value; if (!strcmp(oidp->oid_name,"alpha")) value = pie_sysctl.alpha; else value = pie_sysctl.beta; value = value * 1000 / PIE_SCALE; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 7 * PIE_SCALE) return (EINVAL); value = (value * PIE_SCALE) / 1000; if (!strcmp(oidp->oid_name,"alpha")) pie_sysctl.alpha = value; else pie_sysctl.beta = value; return (0); } static int pie_sysctl_target_tupdate_maxb_handler(SYSCTL_HANDLER_ARGS) { int error; long value; if (!strcmp(oidp->oid_name,"target")) value = pie_sysctl.qdelay_ref; else if (!strcmp(oidp->oid_name,"tupdate")) value = pie_sysctl.tupdate; else value = pie_sysctl.max_burst; value = value / AQM_TIME_1US; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 10 * AQM_TIME_1S) return (EINVAL); value = value * AQM_TIME_1US; if (!strcmp(oidp->oid_name,"target")) pie_sysctl.qdelay_ref = value; else if (!strcmp(oidp->oid_name,"tupdate")) pie_sysctl.tupdate = value; else pie_sysctl.max_burst = value; return (0); } static int pie_sysctl_max_ecnth_handler(SYSCTL_HANDLER_ARGS) { int error; long value; value = pie_sysctl.max_ecnth; value = value * 1000 / PIE_SCALE; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > PIE_SCALE) return (EINVAL); value = (value * PIE_SCALE) / 1000; pie_sysctl.max_ecnth = value; return (0); } /* define PIE sysctl variables */ SYSBEGIN(f4) SYSCTL_DECL(_net_inet); SYSCTL_DECL(_net_inet_ip); SYSCTL_DECL(_net_inet_ip_dummynet); static SYSCTL_NODE(_net_inet_ip_dummynet, OID_AUTO, pie, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "PIE"); #ifdef SYSCTL_NODE SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, target, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_target_tupdate_maxb_handler, "L", "queue target in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, tupdate, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_target_tupdate_maxb_handler, "L", "the frequency of drop probability calculation in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, max_burst, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_target_tupdate_maxb_handler, "L", "Burst allowance interval in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, max_ecnth, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_max_ecnth_handler, "L", "ECN safeguard threshold scaled by 1000"); SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, alpha, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_alpha_beta_handler, "L", "PIE alpha scaled by 1000"); SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, beta, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_alpha_beta_handler, "L", "beta scaled by 1000"); #endif /* * Callout function for drop probability calculation * This function is called over tupdate ms and takes pointer of PIE * status variables as an argument */ static void calculate_drop_prob(void *x) { int64_t p, prob, oldprob; struct dn_aqm_pie_parms *pprms; struct pie_status *pst = (struct pie_status *) x; int p_isneg; pprms = pst->parms; prob = pst->drop_prob; /* calculate current qdelay using DRE method. * If TS is used and no data in the queue, reset current_qdelay * as it stays at last value during dequeue process. */ if (pprms->flags & PIE_DEPRATEEST_ENABLED) pst->current_qdelay = ((uint64_t)pst->pq->ni.len_bytes * pst->avg_dq_time) >> PIE_DQ_THRESHOLD_BITS; else if (!pst->pq->ni.len_bytes) pst->current_qdelay = 0; /* calculate drop probability */ p = (int64_t)pprms->alpha * ((int64_t)pst->current_qdelay - (int64_t)pprms->qdelay_ref); p +=(int64_t) pprms->beta * ((int64_t)pst->current_qdelay - (int64_t)pst->qdelay_old); /* take absolute value so right shift result is well defined */ p_isneg = p < 0; if (p_isneg) { p = -p; } /* We PIE_MAX_PROB shift by 12-bits to increase the division precision */ p *= (PIE_MAX_PROB << 12) / AQM_TIME_1S; /* auto-tune drop probability */ if (prob < (PIE_MAX_PROB / 1000000)) /* 0.000001 */ p >>= 11 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 100000)) /* 0.00001 */ p >>= 9 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 10000)) /* 0.0001 */ p >>= 7 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 1000)) /* 0.001 */ p >>= 5 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 100)) /* 0.01 */ p >>= 3 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 10)) /* 0.1 */ p >>= 1 + PIE_FIX_POINT_BITS + 12; else p >>= PIE_FIX_POINT_BITS + 12; oldprob = prob; if (p_isneg) { prob = prob - p; /* check for multiplication underflow */ if (prob > oldprob) { prob= 0; D("underflow"); } } else { /* Cap Drop adjustment */ if ((pprms->flags & PIE_CAPDROP_ENABLED) && prob >= PIE_MAX_PROB / 10 && p > PIE_MAX_PROB / 50 ) { p = PIE_MAX_PROB / 50; } prob = prob + p; /* check for multiplication overflow */ if (probcurrent_qdelay == 0 && pst->qdelay_old == 0) { /* 0.98 ~= 1- 1/64 */ prob = prob - (prob >> 6); } if (prob > PIE_MAX_PROB) { prob = PIE_MAX_PROB; } } pst->drop_prob = prob; /* store current queue delay value in old queue delay*/ pst->qdelay_old = pst->current_qdelay; /* update burst allowance */ if ((pst->sflags & PIE_ACTIVE) && pst->burst_allowance>0) { if (pst->burst_allowance > pprms->tupdate ) pst->burst_allowance -= pprms->tupdate; else pst->burst_allowance = 0; } /* reschedule calculate_drop_prob function */ if (pst->sflags & PIE_ACTIVE) callout_reset_sbt(&pst->aqm_pie_callout, (uint64_t)pprms->tupdate * SBT_1US, 0, calculate_drop_prob, pst, 0); mtx_unlock(&pst->lock_mtx); } /* * Extract a packet from the head of queue 'q' * Return a packet or NULL if the queue is empty. * If getts is set, also extract packet's timestamp from mtag. */ static struct mbuf * pie_extract_head(struct dn_queue *q, aqm_time_t *pkt_ts, int getts) { struct m_tag *mtag; struct mbuf *m = q->mq.head; if (m == NULL) return m; q->mq.head = m->m_nextpkt; /* Update stats */ update_stats(q, -m->m_pkthdr.len, 0); if (q->ni.length == 0) /* queue is now idle */ - q->q_time = dn_cfg.curr_time; + q->q_time = V_dn_cfg.curr_time; if (getts) { /* extract packet TS*/ mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) { D("PIE timestamp mtag not found!"); *pkt_ts = 0; } else { *pkt_ts = *(aqm_time_t *)(mtag + 1); m_tag_delete(m,mtag); } } return m; } /* * Initiate PIE variable and optionally activate it */ __inline static void init_activate_pie(struct pie_status *pst, int resettimer) { struct dn_aqm_pie_parms *pprms; mtx_lock(&pst->lock_mtx); pprms = pst->parms; pst->drop_prob = 0; pst->qdelay_old = 0; pst->burst_allowance = pprms->max_burst; pst->accu_prob = 0; pst->dq_count = 0; pst->avg_dq_time = 0; pst->sflags = PIE_INMEASUREMENT; pst->measurement_start = AQM_UNOW; if (resettimer) { pst->sflags |= PIE_ACTIVE; callout_reset_sbt(&pst->aqm_pie_callout, (uint64_t)pprms->tupdate * SBT_1US, 0, calculate_drop_prob, pst, 0); } //DX(2, "PIE Activated"); mtx_unlock(&pst->lock_mtx); } /* * Deactivate PIE and stop probe update callout */ __inline static void deactivate_pie(struct pie_status *pst) { mtx_lock(&pst->lock_mtx); pst->sflags &= ~(PIE_ACTIVE | PIE_INMEASUREMENT); callout_stop(&pst->aqm_pie_callout); //D("PIE Deactivated"); mtx_unlock(&pst->lock_mtx); } /* * Dequeue and return a pcaket from queue 'q' or NULL if 'q' is empty. * Also, caculate depature time or queue delay using timestamp */ static struct mbuf * aqm_pie_dequeue(struct dn_queue *q) { struct mbuf *m; struct dn_flow *ni; /* stats for scheduler instance */ struct dn_aqm_pie_parms *pprms; struct pie_status *pst; aqm_time_t now; aqm_time_t pkt_ts, dq_time; int32_t w; pst = q->aqm_status; pprms = pst->parms; ni = &q->_si->ni; /*we extarct packet ts only when Departure Rate Estimation dis not used*/ m = pie_extract_head(q, &pkt_ts, !(pprms->flags & PIE_DEPRATEEST_ENABLED)); if (!m || !(pst->sflags & PIE_ACTIVE)) return m; now = AQM_UNOW; if (pprms->flags & PIE_DEPRATEEST_ENABLED) { /* calculate average depature time */ if(pst->sflags & PIE_INMEASUREMENT) { pst->dq_count += m->m_pkthdr.len; if (pst->dq_count >= PIE_DQ_THRESHOLD) { dq_time = now - pst->measurement_start; /* * if we don't have old avg dq_time i.e PIE is (re)initialized, * don't use weight to calculate new avg_dq_time */ if(pst->avg_dq_time == 0) pst->avg_dq_time = dq_time; else { /* * weight = PIE_DQ_THRESHOLD/2^6, but we scaled * weight by 2^8. Thus, scaled * weight = PIE_DQ_THRESHOLD /2^8 * */ w = PIE_DQ_THRESHOLD >> 8; pst->avg_dq_time = (dq_time* w + (pst->avg_dq_time * ((1L << 8) - w))) >> 8; pst->sflags &= ~PIE_INMEASUREMENT; } } } /* * Start new measurment cycle when the queue has * PIE_DQ_THRESHOLD worth of bytes. */ if(!(pst->sflags & PIE_INMEASUREMENT) && q->ni.len_bytes >= PIE_DQ_THRESHOLD) { pst->sflags |= PIE_INMEASUREMENT; pst->measurement_start = now; pst->dq_count = 0; } } /* Optionally, use packet timestamp to estimate queue delay */ else pst->current_qdelay = now - pkt_ts; return m; } /* * Enqueue a packet in q, subject to space and PIE queue management policy * (whose parameters are in q->fs). * Update stats for the queue and the scheduler. * Return 0 on success, 1 on drop. The packet is consumed anyways. */ static int aqm_pie_enqueue(struct dn_queue *q, struct mbuf* m) { struct dn_fs *f; uint64_t len; uint32_t qlen; struct pie_status *pst; struct dn_aqm_pie_parms *pprms; int t; len = m->m_pkthdr.len; pst = q->aqm_status; if(!pst) { DX(2, "PIE queue is not initialized\n"); update_stats(q, 0, 1); FREE_PKT(m); return 1; } f = &(q->fs->fs); pprms = pst->parms; t = ENQUE; /* get current queue length in bytes or packets*/ qlen = (f->flags & DN_QSIZE_BYTES) ? q->ni.len_bytes : q->ni.length; /* check for queue size and drop the tail if exceed queue limit*/ if (qlen >= f->qsize) t = DROP; /* drop/mark the packet when PIE is active and burst time elapsed */ else if ((pst->sflags & PIE_ACTIVE) && pst->burst_allowance==0 && drop_early(pst, q->ni.len_bytes) == DROP) { /* * if drop_prob over ECN threshold, drop the packet * otherwise mark and enqueue it. */ if ((pprms->flags & PIE_ECN_ENABLED) && pst->drop_prob < (pprms->max_ecnth << (PIE_PROB_BITS - PIE_FIX_POINT_BITS)) && ecn_mark(m)) t = ENQUE; else t = DROP; } /* Turn PIE on when 1/3 of the queue is full */ if (!(pst->sflags & PIE_ACTIVE) && qlen >= pst->one_third_q_size) { init_activate_pie(pst, 1); } /* Reset burst tolerance and optinally turn PIE off*/ if ((pst->sflags & PIE_ACTIVE) && pst->drop_prob == 0 && pst->current_qdelay < (pprms->qdelay_ref >> 1) && pst->qdelay_old < (pprms->qdelay_ref >> 1)) { pst->burst_allowance = pprms->max_burst; if ((pprms->flags & PIE_ON_OFF_MODE_ENABLED) && qlen<=0) deactivate_pie(pst); } /* Timestamp the packet if Departure Rate Estimation is disabled */ if (t != DROP && !(pprms->flags & PIE_DEPRATEEST_ENABLED)) { /* Add TS to mbuf as a TAG */ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t), M_NOWAIT); if (mtag == NULL) { t = DROP; } else { *(aqm_time_t *)(mtag + 1) = AQM_UNOW; m_tag_prepend(m, mtag); } } if (t != DROP) { mq_append(&q->mq, m); update_stats(q, len, 0); return (0); } else { update_stats(q, 0, 1); /* reset accu_prob after packet drop */ pst->accu_prob = 0; FREE_PKT(m); return 1; } return 0; } /* * initialize PIE for queue 'q' * First allocate memory for PIE status. */ static int aqm_pie_init(struct dn_queue *q) { struct pie_status *pst; struct dn_aqm_pie_parms *pprms; int err = 0; pprms = q->fs->aqmcfg; do { /* exit with break when error occurs*/ if (!pprms){ DX(2, "AQM_PIE is not configured"); err = EINVAL; break; } q->aqm_status = malloc(sizeof(struct pie_status), M_DUMMYNET, M_NOWAIT | M_ZERO); if (q->aqm_status == NULL) { D("cannot allocate PIE private data"); err = ENOMEM ; break; } pst = q->aqm_status; /* increase reference count for PIE module */ pie_desc.ref_count++; pst->pq = q; pst->parms = pprms; /* For speed optimization, we caculate 1/3 queue size once here */ // we can use x/3 = (x >>2) + (x >>4) + (x >>7) pst->one_third_q_size = q->fs->fs.qsize/3; mtx_init(&pst->lock_mtx, "mtx_pie", NULL, MTX_DEF); callout_init_mtx(&pst->aqm_pie_callout, &pst->lock_mtx, CALLOUT_RETURNUNLOCKED); pst->current_qdelay = 0; init_activate_pie(pst, !(pprms->flags & PIE_ON_OFF_MODE_ENABLED)); //DX(2, "aqm_PIE_init"); } while(0); return err; } /* * Callout function to destroy pie mtx and free PIE status memory */ static void pie_callout_cleanup(void *x) { struct pie_status *pst = (struct pie_status *) x; mtx_unlock(&pst->lock_mtx); mtx_destroy(&pst->lock_mtx); free(x, M_DUMMYNET); DN_BH_WLOCK(); pie_desc.ref_count--; DN_BH_WUNLOCK(); } /* * Clean up PIE status for queue 'q' * Destroy memory allocated for PIE status. */ static int aqm_pie_cleanup(struct dn_queue *q) { if(!q) { D("q is null"); return 0; } struct pie_status *pst = q->aqm_status; if(!pst) { //D("queue is already cleaned up"); return 0; } if(!q->fs || !q->fs->aqmcfg) { D("fs is null or no cfg"); return 1; } if (q->fs->aqmfp && q->fs->aqmfp->type !=DN_AQM_PIE) { D("Not PIE fs (%d)", q->fs->fs.fs_nr); return 1; } /* * Free PIE status allocated memory using pie_callout_cleanup() callout * function to avoid any potential race. * We reset aqm_pie_callout to call pie_callout_cleanup() in next 1um. This * stops the scheduled calculate_drop_prob() callout and call pie_callout_cleanup() * which does memory freeing. */ mtx_lock(&pst->lock_mtx); callout_reset_sbt(&pst->aqm_pie_callout, SBT_1US, 0, pie_callout_cleanup, pst, 0); q->aqm_status = NULL; mtx_unlock(&pst->lock_mtx); return 0; } /* * Config PIE parameters * also allocate memory for PIE configurations */ static int aqm_pie_config(struct dn_fsk* fs, struct dn_extra_parms *ep, int len) { struct dn_aqm_pie_parms *pcfg; int l = sizeof(struct dn_extra_parms); if (len < l) { D("invalid sched parms length got %d need %d", len, l); return EINVAL; } /* we free the old cfg because maybe the orignal allocation * was used for diffirent AQM type. */ if (fs->aqmcfg) { free(fs->aqmcfg, M_DUMMYNET); fs->aqmcfg = NULL; } fs->aqmcfg = malloc(sizeof(struct dn_aqm_pie_parms), M_DUMMYNET, M_NOWAIT | M_ZERO); if (fs->aqmcfg== NULL) { D("cannot allocate PIE configuration parameters"); return ENOMEM; } /* par array contains pie configuration as follow * 0- qdelay_ref,1- tupdate, 2- max_burst * 3- max_ecnth, 4- alpha, 5- beta, 6- flags */ /* configure PIE parameters */ pcfg = fs->aqmcfg; if (ep->par[0] < 0) pcfg->qdelay_ref = pie_sysctl.qdelay_ref * AQM_TIME_1US; else pcfg->qdelay_ref = ep->par[0]; if (ep->par[1] < 0) pcfg->tupdate = pie_sysctl.tupdate * AQM_TIME_1US; else pcfg->tupdate = ep->par[1]; if (ep->par[2] < 0) pcfg->max_burst = pie_sysctl.max_burst * AQM_TIME_1US; else pcfg->max_burst = ep->par[2]; if (ep->par[3] < 0) pcfg->max_ecnth = pie_sysctl.max_ecnth; else pcfg->max_ecnth = ep->par[3]; if (ep->par[4] < 0) pcfg->alpha = pie_sysctl.alpha; else pcfg->alpha = ep->par[4]; if (ep->par[5] < 0) pcfg->beta = pie_sysctl.beta; else pcfg->beta = ep->par[5]; if (ep->par[6] < 0) pcfg->flags = pie_sysctl.flags; else pcfg->flags = ep->par[6]; /* bound PIE configurations */ pcfg->qdelay_ref = BOUND_VAR(pcfg->qdelay_ref, 1, 10 * AQM_TIME_1S); pcfg->tupdate = BOUND_VAR(pcfg->tupdate, 1, 10 * AQM_TIME_1S); pcfg->max_burst = BOUND_VAR(pcfg->max_burst, 0, 10 * AQM_TIME_1S); pcfg->max_ecnth = BOUND_VAR(pcfg->max_ecnth, 0, PIE_SCALE); pcfg->alpha = BOUND_VAR(pcfg->alpha, 0, 7 * PIE_SCALE); pcfg->beta = BOUND_VAR(pcfg->beta, 0 , 7 * PIE_SCALE); pie_desc.cfg_ref_count++; //D("pie cfg_ref_count=%d", pie_desc.cfg_ref_count); return 0; } /* * Deconfigure PIE and free memory allocation */ static int aqm_pie_deconfig(struct dn_fsk* fs) { if (fs && fs->aqmcfg) { free(fs->aqmcfg, M_DUMMYNET); fs->aqmcfg = NULL; pie_desc.cfg_ref_count--; } return 0; } /* * Retrieve PIE configuration parameters. */ static int aqm_pie_getconfig (struct dn_fsk *fs, struct dn_extra_parms * ep) { struct dn_aqm_pie_parms *pcfg; if (fs->aqmcfg) { strlcpy(ep->name, pie_desc.name, sizeof(ep->name)); pcfg = fs->aqmcfg; ep->par[0] = pcfg->qdelay_ref / AQM_TIME_1US; ep->par[1] = pcfg->tupdate / AQM_TIME_1US; ep->par[2] = pcfg->max_burst / AQM_TIME_1US; ep->par[3] = pcfg->max_ecnth; ep->par[4] = pcfg->alpha; ep->par[5] = pcfg->beta; ep->par[6] = pcfg->flags; return 0; } return 1; } static struct dn_aqm pie_desc = { _SI( .type = ) DN_AQM_PIE, _SI( .name = ) "PIE", _SI( .ref_count = ) 0, _SI( .cfg_ref_count = ) 0, _SI( .enqueue = ) aqm_pie_enqueue, _SI( .dequeue = ) aqm_pie_dequeue, _SI( .config = ) aqm_pie_config, _SI( .deconfig = ) aqm_pie_deconfig, _SI( .getconfig = ) aqm_pie_getconfig, _SI( .init = ) aqm_pie_init, _SI( .cleanup = ) aqm_pie_cleanup, }; DECLARE_DNAQM_MODULE(dn_aqm_pie, &pie_desc); #endif diff --git a/sys/netpfil/ipfw/dn_sched.h b/sys/netpfil/ipfw/dn_sched.h index 9bbd9019d623..1aa885ce3ccf 100644 --- a/sys/netpfil/ipfw/dn_sched.h +++ b/sys/netpfil/ipfw/dn_sched.h @@ -1,203 +1,203 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Riccardo Panicucci, Luigi Rizzo, Universita` di Pisa * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * The API to write a packet scheduling algorithm for dummynet. * * $FreeBSD$ */ #ifndef _DN_SCHED_H #define _DN_SCHED_H #define DN_MULTIQUEUE 0x01 /* * Descriptor for a scheduling algorithm. * Contains all function pointers for a given scheduler * This is typically created when a module is loaded, and stored * in a global list of schedulers. */ struct dn_alg { uint32_t type; /* the scheduler type */ const char *name; /* scheduler name */ uint32_t flags; /* DN_MULTIQUEUE if supports multiple queues */ /* * The following define the size of 3 optional data structures * that may need to be allocated at runtime, and are appended * to each of the base data structures: scheduler, sched.inst, * and queue. We don't have a per-flowset structure. */ /* + parameters attached to the template, e.g. * default queue sizes, weights, quantum size, and so on; */ size_t schk_datalen; /* + per-instance parameters, such as timestamps, * containers for queues, etc; */ size_t si_datalen; size_t q_datalen; /* per-queue parameters (e.g. S,F) */ /* * Methods implemented by the scheduler: * enqueue enqueue packet 'm' on scheduler 's', queue 'q'. * q is NULL for !MULTIQUEUE. * Return 0 on success, 1 on drop (packet consumed anyways). * Note that q should be interpreted only as a hint * on the flow that the mbuf belongs to: while a * scheduler will normally enqueue m into q, it is ok * to leave q alone and put the mbuf elsewhere. * This function is called in two cases: * - when a new packet arrives to the scheduler; * - when a scheduler is reconfigured. In this case the * call is issued by the new_queue callback, with a * non empty queue (q) and m pointing to the first * mbuf in the queue. For this reason, the function * should internally check for (m != q->mq.head) * before calling dn_enqueue(). * * dequeue Called when scheduler instance 's' can * dequeue a packet. Return NULL if none are available. * XXX what about non work-conserving ? * * config called on 'sched X config ...', normally writes * in the area of size sch_arg * * destroy called on 'sched delete', frees everything * in sch_arg (other parts are handled by more specific * functions) * * new_sched called when a new instance is created, e.g. * to create the local queue for !MULTIQUEUE, set V or * copy parameters for WFQ, and so on. * * free_sched called when deleting an instance, cleans * extra data in the per-instance area. * * new_fsk called when a flowset is linked to a scheduler, * e.g. to validate parameters such as weights etc. * free_fsk when a flowset is unlinked from a scheduler. * (probably unnecessary) * * new_queue called to set the per-queue parameters, * e.g. S and F, adjust sum of weights in the parent, etc. * * The new_queue callback is normally called from when * creating a new queue. In some cases (such as a * scheduler change or reconfiguration) it can be called * with a non empty queue. In this case, the queue * In case of non empty queue, the new_queue callback could * need to call the enqueue function. In this case, * the callback should eventually call enqueue() passing * as m the first element in the queue. * * free_queue actions related to a queue removal, e.g. undo * all the above. If the queue has data in it, also remove * from the scheduler. This can e.g. happen during a reconfigure. */ int (*enqueue)(struct dn_sch_inst *, struct dn_queue *, struct mbuf *); struct mbuf * (*dequeue)(struct dn_sch_inst *); int (*config)(struct dn_schk *); int (*destroy)(struct dn_schk*); int (*new_sched)(struct dn_sch_inst *); int (*free_sched)(struct dn_sch_inst *); int (*new_fsk)(struct dn_fsk *f); int (*free_fsk)(struct dn_fsk *f); int (*new_queue)(struct dn_queue *q); int (*free_queue)(struct dn_queue *q); #ifdef NEW_AQM /* Getting scheduler extra parameters */ int (*getconfig)(struct dn_schk *, struct dn_extra_parms *); #endif /* run-time fields */ int ref_count; /* XXX number of instances in the system */ SLIST_ENTRY(dn_alg) next; /* Next scheduler in the list */ }; /* MSVC does not support initializers so we need this ugly macro */ #ifdef _WIN32 #define _SI(fld) #else #define _SI(fld) fld #endif /* * Additionally, dummynet exports some functions and macros * to be used by schedulers: */ void dn_free_pkts(struct mbuf *mnext); int dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop); /* bound a variable between min and max */ int ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg); /* * Extract the head of a queue, update stats. Must be the very last * thing done on a dequeue as the queue itself may go away. */ static __inline struct mbuf* dn_dequeue(struct dn_queue *q) { struct mbuf *m = q->mq.head; if (m == NULL) return NULL; #ifdef NEW_AQM /* Call AQM dequeue function */ if (q->fs->aqmfp && q->fs->aqmfp->dequeue ) return q->fs->aqmfp->dequeue(q); #endif q->mq.head = m->m_nextpkt; q->mq.count--; /* Update stats for the queue */ q->ni.length--; q->ni.len_bytes -= m->m_pkthdr.len; if (q->_si) { q->_si->ni.length--; q->_si->ni.len_bytes -= m->m_pkthdr.len; } if (q->ni.length == 0) /* queue is now idle */ - q->q_time = dn_cfg.curr_time; + q->q_time = V_dn_cfg.curr_time; return m; } int dn_sched_modevent(module_t mod, int cmd, void *arg); #define DECLARE_DNSCHED_MODULE(name, dnsched) \ static moduledata_t name##_mod = { \ #name, dn_sched_modevent, dnsched \ }; \ DECLARE_MODULE(name, name##_mod, \ SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY); \ MODULE_DEPEND(name, dummynet, 3, 3, 3) #endif /* _DN_SCHED_H */ diff --git a/sys/netpfil/ipfw/dn_sched_fq_codel.c b/sys/netpfil/ipfw/dn_sched_fq_codel.c index bc61be867d36..97341f5a9a60 100644 --- a/sys/netpfil/ipfw/dn_sched_fq_codel.c +++ b/sys/netpfil/ipfw/dn_sched_fq_codel.c @@ -1,614 +1,614 @@ /* * FQ_Codel - The FlowQueue-Codel scheduler/AQM * * $FreeBSD$ * * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef _KERNEL #include #include //#include #include #include #include #include /* IFNAMSIZ */ #include #include /* ipfw_rule_ref */ #include /* flow_id */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #else #include #endif /* NOTE: In fq_codel module, we reimplements CoDel AQM functions * because fq_codel use different flows (sub-queues) structure and * dn_queue includes many variables not needed by a flow (sub-queue * )i.e. avoid extra overhead (88 bytes vs 208 bytes). * Also, CoDel functions manages stats of sub-queues as well as the main queue. */ #define DN_SCHED_FQ_CODEL 6 static struct dn_alg fq_codel_desc; /* fq_codel default parameters including codel */ struct dn_sch_fq_codel_parms fq_codel_sysctl = {{5000 * AQM_TIME_1US, 100000 * AQM_TIME_1US, CODEL_ECN_ENABLED}, 1024, 10240, 1514}; static int fqcodel_sysctl_interval_handler(SYSCTL_HANDLER_ARGS) { int error; long value; value = fq_codel_sysctl.ccfg.interval; value /= AQM_TIME_1US; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 100 * AQM_TIME_1S) return (EINVAL); fq_codel_sysctl.ccfg.interval = value * AQM_TIME_1US ; return (0); } static int fqcodel_sysctl_target_handler(SYSCTL_HANDLER_ARGS) { int error; long value; value = fq_codel_sysctl.ccfg.target; value /= AQM_TIME_1US; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 5 * AQM_TIME_1S) return (EINVAL); fq_codel_sysctl.ccfg.target = value * AQM_TIME_1US ; return (0); } SYSBEGIN(f4) SYSCTL_DECL(_net_inet); SYSCTL_DECL(_net_inet_ip); SYSCTL_DECL(_net_inet_ip_dummynet); static SYSCTL_NODE(_net_inet_ip_dummynet, OID_AUTO, fqcodel, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "FQ_CODEL"); #ifdef SYSCTL_NODE SYSCTL_PROC(_net_inet_ip_dummynet_fqcodel, OID_AUTO, target, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqcodel_sysctl_target_handler, "L", "FQ_CoDel target in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_fqcodel, OID_AUTO, interval, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqcodel_sysctl_interval_handler, "L", "FQ_CoDel interval in microsecond"); SYSCTL_UINT(_net_inet_ip_dummynet_fqcodel, OID_AUTO, quantum, CTLFLAG_RW, &fq_codel_sysctl.quantum, 1514, "FQ_CoDel quantum"); SYSCTL_UINT(_net_inet_ip_dummynet_fqcodel, OID_AUTO, flows, CTLFLAG_RW, &fq_codel_sysctl.flows_cnt, 1024, "Number of queues for FQ_CoDel"); SYSCTL_UINT(_net_inet_ip_dummynet_fqcodel, OID_AUTO, limit, CTLFLAG_RW, &fq_codel_sysctl.limit, 10240, "FQ_CoDel queues size limit"); #endif /* Drop a packet form the head of codel queue */ static void codel_drop_head(struct fq_codel_flow *q, struct fq_codel_si *si) { struct mbuf *m = q->mq.head; if (m == NULL) return; q->mq.head = m->m_nextpkt; fq_update_stats(q, si, -m->m_pkthdr.len, 1); if (si->main_q.ni.length == 0) /* queue is now idle */ - si->main_q.q_time = dn_cfg.curr_time; + si->main_q.q_time = V_dn_cfg.curr_time; FREE_PKT(m); } /* Enqueue a packet 'm' to a queue 'q' and add timestamp to that packet. * Return 1 when unable to add timestamp, otherwise return 0 */ static int codel_enqueue(struct fq_codel_flow *q, struct mbuf *m, struct fq_codel_si *si) { uint64_t len; len = m->m_pkthdr.len; /* finding maximum packet size */ if (len > q->cst.maxpkt_size) q->cst.maxpkt_size = len; /* Add timestamp to mbuf as MTAG */ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t), M_NOWAIT); if (mtag == NULL) goto drop; *(aqm_time_t *)(mtag + 1) = AQM_UNOW; m_tag_prepend(m, mtag); mq_append(&q->mq, m); fq_update_stats(q, si, len, 0); return 0; drop: fq_update_stats(q, si, len, 1); m_freem(m); return 1; } /* * Classify a packet to queue number using Jenkins hash function. * Return: queue number * the input of the hash are protocol no, perturbation, src IP, dst IP, * src port, dst port, */ static inline int fq_codel_classify_flow(struct mbuf *m, uint16_t fcount, struct fq_codel_si *si) { struct ip *ip; struct tcphdr *th; struct udphdr *uh; uint8_t tuple[41]; uint16_t hash=0; ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); //#ifdef INET6 struct ip6_hdr *ip6; int isip6; isip6 = (ip->ip_v == 6); if(isip6) { ip6 = (struct ip6_hdr *)ip; *((uint8_t *) &tuple[0]) = ip6->ip6_nxt; *((uint32_t *) &tuple[1]) = si->perturbation; memcpy(&tuple[5], ip6->ip6_src.s6_addr, 16); memcpy(&tuple[21], ip6->ip6_dst.s6_addr, 16); switch (ip6->ip6_nxt) { case IPPROTO_TCP: th = (struct tcphdr *)(ip6 + 1); *((uint16_t *) &tuple[37]) = th->th_dport; *((uint16_t *) &tuple[39]) = th->th_sport; break; case IPPROTO_UDP: uh = (struct udphdr *)(ip6 + 1); *((uint16_t *) &tuple[37]) = uh->uh_dport; *((uint16_t *) &tuple[39]) = uh->uh_sport; break; default: memset(&tuple[37], 0, 4); } hash = jenkins_hash(tuple, 41, HASHINIT) % fcount; return hash; } //#endif /* IPv4 */ *((uint8_t *) &tuple[0]) = ip->ip_p; *((uint32_t *) &tuple[1]) = si->perturbation; *((uint32_t *) &tuple[5]) = ip->ip_src.s_addr; *((uint32_t *) &tuple[9]) = ip->ip_dst.s_addr; switch (ip->ip_p) { case IPPROTO_TCP: th = (struct tcphdr *)(ip + 1); *((uint16_t *) &tuple[13]) = th->th_dport; *((uint16_t *) &tuple[15]) = th->th_sport; break; case IPPROTO_UDP: uh = (struct udphdr *)(ip + 1); *((uint16_t *) &tuple[13]) = uh->uh_dport; *((uint16_t *) &tuple[15]) = uh->uh_sport; break; default: memset(&tuple[13], 0, 4); } hash = jenkins_hash(tuple, 17, HASHINIT) % fcount; return hash; } /* * Enqueue a packet into an appropriate queue according to * FQ_CODEL algorithm. */ static int fq_codel_enqueue(struct dn_sch_inst *_si, struct dn_queue *_q, struct mbuf *m) { struct fq_codel_si *si; struct fq_codel_schk *schk; struct dn_sch_fq_codel_parms *param; struct dn_queue *mainq; int idx, drop, i, maxidx; mainq = (struct dn_queue *)(_si + 1); si = (struct fq_codel_si *)_si; schk = (struct fq_codel_schk *)(si->_si.sched+1); param = &schk->cfg; /* classify a packet to queue number*/ idx = fq_codel_classify_flow(m, param->flows_cnt, si); /* enqueue packet into appropriate queue using CoDel AQM. * Note: 'codel_enqueue' function returns 1 only when it unable to * add timestamp to packet (no limit check)*/ drop = codel_enqueue(&si->flows[idx], m, si); /* codel unable to timestamp a packet */ if (drop) return 1; /* If the flow (sub-queue) is not active ,then add it to the tail of * new flows list, initialize and activate it. */ if (!si->flows[idx].active ) { STAILQ_INSERT_TAIL(&si->newflows, &si->flows[idx], flowchain); si->flows[idx].deficit = param->quantum; si->flows[idx].cst.dropping = false; si->flows[idx].cst.first_above_time = 0; si->flows[idx].active = 1; //D("activate %d",idx); } /* check the limit for all queues and remove a packet from the * largest one */ if (mainq->ni.length > schk->cfg.limit) { D("over limit"); /* find first active flow */ for (maxidx = 0; maxidx < schk->cfg.flows_cnt; maxidx++) if (si->flows[maxidx].active) break; if (maxidx < schk->cfg.flows_cnt) { /* find the largest sub- queue */ for (i = maxidx + 1; i < schk->cfg.flows_cnt; i++) if (si->flows[i].active && si->flows[i].stats.length > si->flows[maxidx].stats.length) maxidx = i; codel_drop_head(&si->flows[maxidx], si); D("maxidx = %d",maxidx); drop = 1; } } return drop; } /* * Dequeue a packet from an appropriate queue according to * FQ_CODEL algorithm. */ static struct mbuf * fq_codel_dequeue(struct dn_sch_inst *_si) { struct fq_codel_si *si; struct fq_codel_schk *schk; struct dn_sch_fq_codel_parms *param; struct fq_codel_flow *f; struct mbuf *mbuf; struct fq_codel_list *fq_codel_flowlist; si = (struct fq_codel_si *)_si; schk = (struct fq_codel_schk *)(si->_si.sched+1); param = &schk->cfg; do { /* select a list to start with */ if (STAILQ_EMPTY(&si->newflows)) fq_codel_flowlist = &si->oldflows; else fq_codel_flowlist = &si->newflows; /* Both new and old queue lists are empty, return NULL */ if (STAILQ_EMPTY(fq_codel_flowlist)) return NULL; f = STAILQ_FIRST(fq_codel_flowlist); while (f != NULL) { /* if there is no flow(sub-queue) deficit, increase deficit * by quantum, move the flow to the tail of old flows list * and try another flow. * Otherwise, the flow will be used for dequeue. */ if (f->deficit < 0) { f->deficit += param->quantum; STAILQ_REMOVE_HEAD(fq_codel_flowlist, flowchain); STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain); } else break; f = STAILQ_FIRST(fq_codel_flowlist); } /* the new flows list is empty, try old flows list */ if (STAILQ_EMPTY(fq_codel_flowlist)) continue; /* Dequeue a packet from the selected flow */ mbuf = fqc_codel_dequeue(f, si); /* Codel did not return a packet */ if (!mbuf) { /* If the selected flow belongs to new flows list, then move * it to the tail of old flows list. Otherwise, deactivate it and * remove it from the old list and */ if (fq_codel_flowlist == &si->newflows) { STAILQ_REMOVE_HEAD(fq_codel_flowlist, flowchain); STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain); } else { f->active = 0; STAILQ_REMOVE_HEAD(fq_codel_flowlist, flowchain); } /* start again */ continue; } /* we have a packet to return, * update flow deficit and return the packet*/ f->deficit -= mbuf->m_pkthdr.len; return mbuf; } while (1); /* unreachable point */ return NULL; } /* * Initialize fq_codel scheduler instance. * also, allocate memory for flows array. */ static int fq_codel_new_sched(struct dn_sch_inst *_si) { struct fq_codel_si *si; struct dn_queue *q; struct fq_codel_schk *schk; int i; si = (struct fq_codel_si *)_si; schk = (struct fq_codel_schk *)(_si->sched+1); if(si->flows) { D("si already configured!"); return 0; } /* init the main queue */ q = &si->main_q; set_oid(&q->ni.oid, DN_QUEUE, sizeof(*q)); q->_si = _si; q->fs = _si->sched->fs; /* allocate memory for flows array */ si->flows = mallocarray(schk->cfg.flows_cnt, sizeof(struct fq_codel_flow), M_DUMMYNET, M_NOWAIT | M_ZERO); if (si->flows == NULL) { D("cannot allocate memory for fq_codel configuration parameters"); return ENOMEM ; } /* init perturbation for this si */ si->perturbation = random(); /* init the old and new flows lists */ STAILQ_INIT(&si->newflows); STAILQ_INIT(&si->oldflows); /* init the flows (sub-queues) */ for (i = 0; i < schk->cfg.flows_cnt; i++) { /* init codel */ si->flows[i].cst.maxpkt_size = 500; } fq_codel_desc.ref_count++; return 0; } /* * Free fq_codel scheduler instance. */ static int fq_codel_free_sched(struct dn_sch_inst *_si) { struct fq_codel_si *si = (struct fq_codel_si *)_si ; /* free the flows array */ free(si->flows , M_DUMMYNET); si->flows = NULL; fq_codel_desc.ref_count--; return 0; } /* * Configure fq_codel scheduler. * the configurations for the scheduler is passed from userland. */ static int fq_codel_config(struct dn_schk *_schk) { struct fq_codel_schk *schk; struct dn_extra_parms *ep; struct dn_sch_fq_codel_parms *fqc_cfg; schk = (struct fq_codel_schk *)(_schk+1); ep = (struct dn_extra_parms *) _schk->cfg; /* par array contains fq_codel configuration as follow * Codel: 0- target,1- interval, 2- flags * FQ_CODEL: 3- quantum, 4- limit, 5- flows */ if (ep && ep->oid.len ==sizeof(*ep) && ep->oid.subtype == DN_SCH_PARAMS) { fqc_cfg = &schk->cfg; if (ep->par[0] < 0) fqc_cfg->ccfg.target = fq_codel_sysctl.ccfg.target; else fqc_cfg->ccfg.target = ep->par[0] * AQM_TIME_1US; if (ep->par[1] < 0) fqc_cfg->ccfg.interval = fq_codel_sysctl.ccfg.interval; else fqc_cfg->ccfg.interval = ep->par[1] * AQM_TIME_1US; if (ep->par[2] < 0) fqc_cfg->ccfg.flags = 0; else fqc_cfg->ccfg.flags = ep->par[2]; /* FQ configurations */ if (ep->par[3] < 0) fqc_cfg->quantum = fq_codel_sysctl.quantum; else fqc_cfg->quantum = ep->par[3]; if (ep->par[4] < 0) fqc_cfg->limit = fq_codel_sysctl.limit; else fqc_cfg->limit = ep->par[4]; if (ep->par[5] < 0) fqc_cfg->flows_cnt = fq_codel_sysctl.flows_cnt; else fqc_cfg->flows_cnt = ep->par[5]; /* Bound the configurations */ fqc_cfg->ccfg.target = BOUND_VAR(fqc_cfg->ccfg.target, 1 , 5 * AQM_TIME_1S); ; fqc_cfg->ccfg.interval = BOUND_VAR(fqc_cfg->ccfg.interval, 1, 100 * AQM_TIME_1S); fqc_cfg->quantum = BOUND_VAR(fqc_cfg->quantum,1, 9000); fqc_cfg->limit= BOUND_VAR(fqc_cfg->limit,1,20480); fqc_cfg->flows_cnt= BOUND_VAR(fqc_cfg->flows_cnt,1,65536); } else return 1; return 0; } /* * Return fq_codel scheduler configurations * the configurations for the scheduler is passed to userland. */ static int fq_codel_getconfig (struct dn_schk *_schk, struct dn_extra_parms *ep) { struct fq_codel_schk *schk = (struct fq_codel_schk *)(_schk+1); struct dn_sch_fq_codel_parms *fqc_cfg; fqc_cfg = &schk->cfg; strcpy(ep->name, fq_codel_desc.name); ep->par[0] = fqc_cfg->ccfg.target / AQM_TIME_1US; ep->par[1] = fqc_cfg->ccfg.interval / AQM_TIME_1US; ep->par[2] = fqc_cfg->ccfg.flags; ep->par[3] = fqc_cfg->quantum; ep->par[4] = fqc_cfg->limit; ep->par[5] = fqc_cfg->flows_cnt; return 0; } /* * fq_codel scheduler descriptor * contains the type of the scheduler, the name, the size of extra * data structures, and function pointers. */ static struct dn_alg fq_codel_desc = { _SI( .type = ) DN_SCHED_FQ_CODEL, _SI( .name = ) "FQ_CODEL", _SI( .flags = ) 0, _SI( .schk_datalen = ) sizeof(struct fq_codel_schk), _SI( .si_datalen = ) sizeof(struct fq_codel_si) - sizeof(struct dn_sch_inst), _SI( .q_datalen = ) 0, _SI( .enqueue = ) fq_codel_enqueue, _SI( .dequeue = ) fq_codel_dequeue, _SI( .config = ) fq_codel_config, /* new sched i.e. sched X config ...*/ _SI( .destroy = ) NULL, /*sched x delete */ _SI( .new_sched = ) fq_codel_new_sched, /* new schd instance */ _SI( .free_sched = ) fq_codel_free_sched, /* delete schd instance */ _SI( .new_fsk = ) NULL, _SI( .free_fsk = ) NULL, _SI( .new_queue = ) NULL, _SI( .free_queue = ) NULL, _SI( .getconfig = ) fq_codel_getconfig, _SI( .ref_count = ) 0 }; DECLARE_DNSCHED_MODULE(dn_fq_codel, &fq_codel_desc); diff --git a/sys/netpfil/ipfw/dn_sched_fq_codel.h b/sys/netpfil/ipfw/dn_sched_fq_codel.h index a8369ac83129..dcdbc6f32e7a 100644 --- a/sys/netpfil/ipfw/dn_sched_fq_codel.h +++ b/sys/netpfil/ipfw/dn_sched_fq_codel.h @@ -1,166 +1,169 @@ /*- * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * FQ_Codel Structures and helper functions * * $FreeBSD$ */ #ifndef _IP_DN_SCHED_FQ_CODEL_H #define _IP_DN_SCHED_FQ_CODEL_H +VNET_DECLARE(unsigned long, io_pkt_drop); +#define V_io_pkt_drop VNET(io_pkt_drop) + /* list of queues */ STAILQ_HEAD(fq_codel_list, fq_codel_flow) ; /* fq_codel parameters including codel */ struct dn_sch_fq_codel_parms { struct dn_aqm_codel_parms ccfg; /* CoDel Parameters */ /* FQ_CODEL Parameters */ uint32_t flows_cnt; /* number of flows */ uint32_t limit; /* hard limit of fq_codel queue size*/ uint32_t quantum; }; /* defaults */ /* flow (sub-queue) stats */ struct flow_stats { uint64_t tot_pkts; /* statistics counters */ uint64_t tot_bytes; uint32_t length; /* Queue length, in packets */ uint32_t len_bytes; /* Queue length, in bytes */ uint32_t drops; }; /* A flow of packets (sub-queue).*/ struct fq_codel_flow { struct mq mq; /* list of packets */ struct flow_stats stats; /* statistics */ int deficit; int active; /* 1: flow is active (in a list) */ struct codel_status cst; STAILQ_ENTRY(fq_codel_flow) flowchain; }; /* extra fq_codel scheduler configurations */ struct fq_codel_schk { struct dn_sch_fq_codel_parms cfg; }; /* fq_codel scheduler instance */ struct fq_codel_si { struct dn_sch_inst _si; /* standard scheduler instance */ struct dn_queue main_q; /* main queue is after si directly */ struct fq_codel_flow *flows; /* array of flows (queues) */ uint32_t perturbation; /* random value */ struct fq_codel_list newflows; /* list of new queues */ struct fq_codel_list oldflows; /* list of old queues */ }; /* Helper function to update queue&main-queue and scheduler statistics. * negative len + drop -> drop * negative len -> dequeue * positive len -> enqueue * positive len + drop -> drop during enqueue */ __inline static void fq_update_stats(struct fq_codel_flow *q, struct fq_codel_si *si, int len, int drop) { int inc = 0; if (len < 0) inc = -1; else if (len > 0) inc = 1; if (drop) { si->main_q.ni.drops ++; q->stats.drops ++; si->_si.ni.drops ++; - dn_cfg.io_pkt_drop ++; + V_dn_cfg.io_pkt_drop ++; } if (!drop || (drop && len < 0)) { /* Update stats for the main queue */ si->main_q.ni.length += inc; si->main_q.ni.len_bytes += len; /*update sub-queue stats */ q->stats.length += inc; q->stats.len_bytes += len; /*update scheduler instance stats */ si->_si.ni.length += inc; si->_si.ni.len_bytes += len; } if (inc > 0) { si->main_q.ni.tot_bytes += len; si->main_q.ni.tot_pkts ++; q->stats.tot_bytes +=len; q->stats.tot_pkts++; si->_si.ni.tot_bytes +=len; si->_si.ni.tot_pkts ++; } } /* extract the head of fq_codel sub-queue */ __inline static struct mbuf * fq_codel_extract_head(struct fq_codel_flow *q, aqm_time_t *pkt_ts, struct fq_codel_si *si) { struct mbuf *m = q->mq.head; if (m == NULL) return m; q->mq.head = m->m_nextpkt; fq_update_stats(q, si, -m->m_pkthdr.len, 0); if (si->main_q.ni.length == 0) /* queue is now idle */ - si->main_q.q_time = dn_cfg.curr_time; + si->main_q.q_time = V_dn_cfg.curr_time; /* extract packet timestamp*/ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL){ D("timestamp tag is not found!"); *pkt_ts = 0; } else { *pkt_ts = *(aqm_time_t *)(mtag + 1); m_tag_delete(m,mtag); } return m; } #endif diff --git a/sys/netpfil/ipfw/dn_sched_fq_pie.c b/sys/netpfil/ipfw/dn_sched_fq_pie.c index 809ca2b5f4e8..76215aed610a 100644 --- a/sys/netpfil/ipfw/dn_sched_fq_pie.c +++ b/sys/netpfil/ipfw/dn_sched_fq_pie.c @@ -1,1230 +1,1233 @@ /* * FQ_PIE - The FlowQueue-PIE scheduler/AQM * * $FreeBSD$ * * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Important note: * As there is no an office document for FQ-PIE specification, we used * FQ-CoDel algorithm with some modifications to implement FQ-PIE. * This FQ-PIE implementation is a beta version and have not been tested * extensively. Our FQ-PIE uses stand-alone PIE AQM per sub-queue. By * default, timestamp is used to calculate queue delay instead of departure * rate estimation method. Although departure rate estimation is available * as testing option, the results could be incorrect. Moreover, turning PIE on * and off option is available but it does not work properly in this version. */ #ifdef _KERNEL #include #include #include #include #include #include #include #include /* IFNAMSIZ */ #include #include /* ipfw_rule_ref */ #include /* flow_id */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #else #include #endif #define DN_SCHED_FQ_PIE 7 +VNET_DECLARE(unsigned long, io_pkt_drop); +#define V_io_pkt_drop VNET(io_pkt_drop) + /* list of queues */ STAILQ_HEAD(fq_pie_list, fq_pie_flow) ; /* FQ_PIE parameters including PIE */ struct dn_sch_fq_pie_parms { struct dn_aqm_pie_parms pcfg; /* PIE configuration Parameters */ /* FQ_PIE Parameters */ uint32_t flows_cnt; /* number of flows */ uint32_t limit; /* hard limit of FQ_PIE queue size*/ uint32_t quantum; }; /* flow (sub-queue) stats */ struct flow_stats { uint64_t tot_pkts; /* statistics counters */ uint64_t tot_bytes; uint32_t length; /* Queue length, in packets */ uint32_t len_bytes; /* Queue length, in bytes */ uint32_t drops; }; /* A flow of packets (sub-queue)*/ struct fq_pie_flow { struct mq mq; /* list of packets */ struct flow_stats stats; /* statistics */ int deficit; int active; /* 1: flow is active (in a list) */ struct pie_status pst; /* pie status variables */ struct fq_pie_si_extra *psi_extra; STAILQ_ENTRY(fq_pie_flow) flowchain; }; /* extra fq_pie scheduler configurations */ struct fq_pie_schk { struct dn_sch_fq_pie_parms cfg; }; /* fq_pie scheduler instance extra state vars. * The purpose of separation this structure is to preserve number of active * sub-queues and the flows array pointer even after the scheduler instance * is destroyed. * Preserving these varaiables allows freeing the allocated memory by * fqpie_callout_cleanup() independently from fq_pie_free_sched(). */ struct fq_pie_si_extra { uint32_t nr_active_q; /* number of active queues */ struct fq_pie_flow *flows; /* array of flows (queues) */ }; /* fq_pie scheduler instance */ struct fq_pie_si { struct dn_sch_inst _si; /* standard scheduler instance. SHOULD BE FIRST */ struct dn_queue main_q; /* main queue is after si directly */ uint32_t perturbation; /* random value */ struct fq_pie_list newflows; /* list of new queues */ struct fq_pie_list oldflows; /* list of old queues */ struct fq_pie_si_extra *si_extra; /* extra state vars*/ }; static struct dn_alg fq_pie_desc; /* Default FQ-PIE parameters including PIE */ /* PIE defaults * target=15ms, max_burst=150ms, max_ecnth=0.1, * alpha=0.125, beta=1.25, tupdate=15ms * FQ- * flows=1024, limit=10240, quantum =1514 */ struct dn_sch_fq_pie_parms fq_pie_sysctl = {{15000 * AQM_TIME_1US, 15000 * AQM_TIME_1US, 150000 * AQM_TIME_1US, PIE_SCALE * 0.1, PIE_SCALE * 0.125, PIE_SCALE * 1.25, PIE_CAPDROP_ENABLED | PIE_DERAND_ENABLED}, 1024, 10240, 1514}; static int fqpie_sysctl_alpha_beta_handler(SYSCTL_HANDLER_ARGS) { int error; long value; if (!strcmp(oidp->oid_name,"alpha")) value = fq_pie_sysctl.pcfg.alpha; else value = fq_pie_sysctl.pcfg.beta; value = value * 1000 / PIE_SCALE; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 7 * PIE_SCALE) return (EINVAL); value = (value * PIE_SCALE) / 1000; if (!strcmp(oidp->oid_name,"alpha")) fq_pie_sysctl.pcfg.alpha = value; else fq_pie_sysctl.pcfg.beta = value; return (0); } static int fqpie_sysctl_target_tupdate_maxb_handler(SYSCTL_HANDLER_ARGS) { int error; long value; if (!strcmp(oidp->oid_name,"target")) value = fq_pie_sysctl.pcfg.qdelay_ref; else if (!strcmp(oidp->oid_name,"tupdate")) value = fq_pie_sysctl.pcfg.tupdate; else value = fq_pie_sysctl.pcfg.max_burst; value = value / AQM_TIME_1US; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 10 * AQM_TIME_1S) return (EINVAL); value = value * AQM_TIME_1US; if (!strcmp(oidp->oid_name,"target")) fq_pie_sysctl.pcfg.qdelay_ref = value; else if (!strcmp(oidp->oid_name,"tupdate")) fq_pie_sysctl.pcfg.tupdate = value; else fq_pie_sysctl.pcfg.max_burst = value; return (0); } static int fqpie_sysctl_max_ecnth_handler(SYSCTL_HANDLER_ARGS) { int error; long value; value = fq_pie_sysctl.pcfg.max_ecnth; value = value * 1000 / PIE_SCALE; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > PIE_SCALE) return (EINVAL); value = (value * PIE_SCALE) / 1000; fq_pie_sysctl.pcfg.max_ecnth = value; return (0); } /* define FQ- PIE sysctl variables */ SYSBEGIN(f4) SYSCTL_DECL(_net_inet); SYSCTL_DECL(_net_inet_ip); SYSCTL_DECL(_net_inet_ip_dummynet); static SYSCTL_NODE(_net_inet_ip_dummynet, OID_AUTO, fqpie, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "FQ_PIE"); #ifdef SYSCTL_NODE SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, target, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_target_tupdate_maxb_handler, "L", "queue target in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, tupdate, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_target_tupdate_maxb_handler, "L", "the frequency of drop probability calculation in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, max_burst, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_target_tupdate_maxb_handler, "L", "Burst allowance interval in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, max_ecnth, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_max_ecnth_handler, "L", "ECN safeguard threshold scaled by 1000"); SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, alpha, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_alpha_beta_handler, "L", "PIE alpha scaled by 1000"); SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, beta, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_alpha_beta_handler, "L", "beta scaled by 1000"); SYSCTL_UINT(_net_inet_ip_dummynet_fqpie, OID_AUTO, quantum, CTLFLAG_RW, &fq_pie_sysctl.quantum, 1514, "quantum for FQ_PIE"); SYSCTL_UINT(_net_inet_ip_dummynet_fqpie, OID_AUTO, flows, CTLFLAG_RW, &fq_pie_sysctl.flows_cnt, 1024, "Number of queues for FQ_PIE"); SYSCTL_UINT(_net_inet_ip_dummynet_fqpie, OID_AUTO, limit, CTLFLAG_RW, &fq_pie_sysctl.limit, 10240, "limit for FQ_PIE"); #endif /* Helper function to update queue&main-queue and scheduler statistics. * negative len & drop -> drop * negative len -> dequeue * positive len -> enqueue * positive len + drop -> drop during enqueue */ __inline static void fq_update_stats(struct fq_pie_flow *q, struct fq_pie_si *si, int len, int drop) { int inc = 0; if (len < 0) inc = -1; else if (len > 0) inc = 1; if (drop) { si->main_q.ni.drops ++; q->stats.drops ++; si->_si.ni.drops ++; - dn_cfg.io_pkt_drop ++; + V_dn_cfg.io_pkt_drop ++; } if (!drop || (drop && len < 0)) { /* Update stats for the main queue */ si->main_q.ni.length += inc; si->main_q.ni.len_bytes += len; /*update sub-queue stats */ q->stats.length += inc; q->stats.len_bytes += len; /*update scheduler instance stats */ si->_si.ni.length += inc; si->_si.ni.len_bytes += len; } if (inc > 0) { si->main_q.ni.tot_bytes += len; si->main_q.ni.tot_pkts ++; q->stats.tot_bytes +=len; q->stats.tot_pkts++; si->_si.ni.tot_bytes +=len; si->_si.ni.tot_pkts ++; } } /* * Extract a packet from the head of sub-queue 'q' * Return a packet or NULL if the queue is empty. * If getts is set, also extract packet's timestamp from mtag. */ __inline static struct mbuf * fq_pie_extract_head(struct fq_pie_flow *q, aqm_time_t *pkt_ts, struct fq_pie_si *si, int getts) { struct mbuf *m = q->mq.head; if (m == NULL) return m; q->mq.head = m->m_nextpkt; fq_update_stats(q, si, -m->m_pkthdr.len, 0); if (si->main_q.ni.length == 0) /* queue is now idle */ - si->main_q.q_time = dn_cfg.curr_time; + si->main_q.q_time = V_dn_cfg.curr_time; if (getts) { /* extract packet timestamp*/ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL){ D("PIE timestamp mtag not found!"); *pkt_ts = 0; } else { *pkt_ts = *(aqm_time_t *)(mtag + 1); m_tag_delete(m,mtag); } } return m; } /* * Callout function for drop probability calculation * This function is called over tupdate ms and takes pointer of FQ-PIE * flow as an argument */ static void fq_calculate_drop_prob(void *x) { struct fq_pie_flow *q = (struct fq_pie_flow *) x; struct pie_status *pst = &q->pst; struct dn_aqm_pie_parms *pprms; int64_t p, prob, oldprob; aqm_time_t now; int p_isneg; now = AQM_UNOW; pprms = pst->parms; prob = pst->drop_prob; /* calculate current qdelay using DRE method. * If TS is used and no data in the queue, reset current_qdelay * as it stays at last value during dequeue process. */ if (pprms->flags & PIE_DEPRATEEST_ENABLED) pst->current_qdelay = ((uint64_t)q->stats.len_bytes * pst->avg_dq_time) >> PIE_DQ_THRESHOLD_BITS; else if (!q->stats.len_bytes) pst->current_qdelay = 0; /* calculate drop probability */ p = (int64_t)pprms->alpha * ((int64_t)pst->current_qdelay - (int64_t)pprms->qdelay_ref); p +=(int64_t) pprms->beta * ((int64_t)pst->current_qdelay - (int64_t)pst->qdelay_old); /* take absolute value so right shift result is well defined */ p_isneg = p < 0; if (p_isneg) { p = -p; } /* We PIE_MAX_PROB shift by 12-bits to increase the division precision */ p *= (PIE_MAX_PROB << 12) / AQM_TIME_1S; /* auto-tune drop probability */ if (prob < (PIE_MAX_PROB / 1000000)) /* 0.000001 */ p >>= 11 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 100000)) /* 0.00001 */ p >>= 9 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 10000)) /* 0.0001 */ p >>= 7 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 1000)) /* 0.001 */ p >>= 5 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 100)) /* 0.01 */ p >>= 3 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 10)) /* 0.1 */ p >>= 1 + PIE_FIX_POINT_BITS + 12; else p >>= PIE_FIX_POINT_BITS + 12; oldprob = prob; if (p_isneg) { prob = prob - p; /* check for multiplication underflow */ if (prob > oldprob) { prob= 0; D("underflow"); } } else { /* Cap Drop adjustment */ if ((pprms->flags & PIE_CAPDROP_ENABLED) && prob >= PIE_MAX_PROB / 10 && p > PIE_MAX_PROB / 50 ) { p = PIE_MAX_PROB / 50; } prob = prob + p; /* check for multiplication overflow */ if (probcurrent_qdelay == 0 && pst->qdelay_old == 0) { /* 0.98 ~= 1- 1/64 */ prob = prob - (prob >> 6); } if (prob > PIE_MAX_PROB) { prob = PIE_MAX_PROB; } } pst->drop_prob = prob; /* store current delay value */ pst->qdelay_old = pst->current_qdelay; /* update burst allowance */ if ((pst->sflags & PIE_ACTIVE) && pst->burst_allowance) { if (pst->burst_allowance > pprms->tupdate) pst->burst_allowance -= pprms->tupdate; else pst->burst_allowance = 0; } if (pst->sflags & PIE_ACTIVE) callout_reset_sbt(&pst->aqm_pie_callout, (uint64_t)pprms->tupdate * SBT_1US, 0, fq_calculate_drop_prob, q, 0); mtx_unlock(&pst->lock_mtx); } /* * Reset PIE variables & activate the queue */ __inline static void fq_activate_pie(struct fq_pie_flow *q) { struct pie_status *pst = &q->pst; struct dn_aqm_pie_parms *pprms; mtx_lock(&pst->lock_mtx); pprms = pst->parms; pprms = pst->parms; pst->drop_prob = 0; pst->qdelay_old = 0; pst->burst_allowance = pprms->max_burst; pst->accu_prob = 0; pst->dq_count = 0; pst->avg_dq_time = 0; pst->sflags = PIE_INMEASUREMENT | PIE_ACTIVE; pst->measurement_start = AQM_UNOW; callout_reset_sbt(&pst->aqm_pie_callout, (uint64_t)pprms->tupdate * SBT_1US, 0, fq_calculate_drop_prob, q, 0); mtx_unlock(&pst->lock_mtx); } /* * Deactivate PIE and stop probe update callout */ __inline static void fq_deactivate_pie(struct pie_status *pst) { mtx_lock(&pst->lock_mtx); pst->sflags &= ~(PIE_ACTIVE | PIE_INMEASUREMENT); callout_stop(&pst->aqm_pie_callout); //D("PIE Deactivated"); mtx_unlock(&pst->lock_mtx); } /* * Initialize PIE for sub-queue 'q' */ static int pie_init(struct fq_pie_flow *q, struct fq_pie_schk *fqpie_schk) { struct pie_status *pst=&q->pst; struct dn_aqm_pie_parms *pprms = pst->parms; int err = 0; if (!pprms){ D("AQM_PIE is not configured"); err = EINVAL; } else { q->psi_extra->nr_active_q++; /* For speed optimization, we caculate 1/3 queue size once here */ // XXX limit divided by number of queues divided by 3 ??? pst->one_third_q_size = (fqpie_schk->cfg.limit / fqpie_schk->cfg.flows_cnt) / 3; mtx_init(&pst->lock_mtx, "mtx_pie", NULL, MTX_DEF); callout_init_mtx(&pst->aqm_pie_callout, &pst->lock_mtx, CALLOUT_RETURNUNLOCKED); } return err; } /* * callout function to destroy PIE lock, and free fq_pie flows and fq_pie si * extra memory when number of active sub-queues reaches zero. * 'x' is a fq_pie_flow to be destroyed */ static void fqpie_callout_cleanup(void *x) { struct fq_pie_flow *q = x; struct pie_status *pst = &q->pst; struct fq_pie_si_extra *psi_extra; mtx_unlock(&pst->lock_mtx); mtx_destroy(&pst->lock_mtx); psi_extra = q->psi_extra; DN_BH_WLOCK(); psi_extra->nr_active_q--; /* when all sub-queues are destroyed, free flows fq_pie extra vars memory */ if (!psi_extra->nr_active_q) { free(psi_extra->flows, M_DUMMYNET); free(psi_extra, M_DUMMYNET); fq_pie_desc.ref_count--; } DN_BH_WUNLOCK(); } /* * Clean up PIE status for sub-queue 'q' * Stop callout timer and destroy mtx using fqpie_callout_cleanup() callout. */ static int pie_cleanup(struct fq_pie_flow *q) { struct pie_status *pst = &q->pst; mtx_lock(&pst->lock_mtx); callout_reset_sbt(&pst->aqm_pie_callout, SBT_1US, 0, fqpie_callout_cleanup, q, 0); mtx_unlock(&pst->lock_mtx); return 0; } /* * Dequeue and return a pcaket from sub-queue 'q' or NULL if 'q' is empty. * Also, caculate depature time or queue delay using timestamp */ static struct mbuf * pie_dequeue(struct fq_pie_flow *q, struct fq_pie_si *si) { struct mbuf *m; struct dn_aqm_pie_parms *pprms; struct pie_status *pst; aqm_time_t now; aqm_time_t pkt_ts, dq_time; int32_t w; pst = &q->pst; pprms = q->pst.parms; /*we extarct packet ts only when Departure Rate Estimation dis not used*/ m = fq_pie_extract_head(q, &pkt_ts, si, !(pprms->flags & PIE_DEPRATEEST_ENABLED)); if (!m || !(pst->sflags & PIE_ACTIVE)) return m; now = AQM_UNOW; if (pprms->flags & PIE_DEPRATEEST_ENABLED) { /* calculate average depature time */ if(pst->sflags & PIE_INMEASUREMENT) { pst->dq_count += m->m_pkthdr.len; if (pst->dq_count >= PIE_DQ_THRESHOLD) { dq_time = now - pst->measurement_start; /* * if we don't have old avg dq_time i.e PIE is (re)initialized, * don't use weight to calculate new avg_dq_time */ if(pst->avg_dq_time == 0) pst->avg_dq_time = dq_time; else { /* * weight = PIE_DQ_THRESHOLD/2^6, but we scaled * weight by 2^8. Thus, scaled * weight = PIE_DQ_THRESHOLD /2^8 * */ w = PIE_DQ_THRESHOLD >> 8; pst->avg_dq_time = (dq_time* w + (pst->avg_dq_time * ((1L << 8) - w))) >> 8; pst->sflags &= ~PIE_INMEASUREMENT; } } } /* * Start new measurment cycle when the queue has * PIE_DQ_THRESHOLD worth of bytes. */ if(!(pst->sflags & PIE_INMEASUREMENT) && q->stats.len_bytes >= PIE_DQ_THRESHOLD) { pst->sflags |= PIE_INMEASUREMENT; pst->measurement_start = now; pst->dq_count = 0; } } /* Optionally, use packet timestamp to estimate queue delay */ else pst->current_qdelay = now - pkt_ts; return m; } /* * Enqueue a packet in q, subject to space and FQ-PIE queue management policy * (whose parameters are in q->fs). * Update stats for the queue and the scheduler. * Return 0 on success, 1 on drop. The packet is consumed anyways. */ static int pie_enqueue(struct fq_pie_flow *q, struct mbuf* m, struct fq_pie_si *si) { uint64_t len; struct pie_status *pst; struct dn_aqm_pie_parms *pprms; int t; len = m->m_pkthdr.len; pst = &q->pst; pprms = pst->parms; t = ENQUE; /* drop/mark the packet when PIE is active and burst time elapsed */ if (pst->sflags & PIE_ACTIVE && pst->burst_allowance == 0 && drop_early(pst, q->stats.len_bytes) == DROP) { /* * if drop_prob over ECN threshold, drop the packet * otherwise mark and enqueue it. */ if (pprms->flags & PIE_ECN_ENABLED && pst->drop_prob < (pprms->max_ecnth << (PIE_PROB_BITS - PIE_FIX_POINT_BITS)) && ecn_mark(m)) t = ENQUE; else t = DROP; } /* Turn PIE on when 1/3 of the queue is full */ if (!(pst->sflags & PIE_ACTIVE) && q->stats.len_bytes >= pst->one_third_q_size) { fq_activate_pie(q); } /* reset burst tolerance and optinally turn PIE off*/ if (pst->drop_prob == 0 && pst->current_qdelay < (pprms->qdelay_ref >> 1) && pst->qdelay_old < (pprms->qdelay_ref >> 1)) { pst->burst_allowance = pprms->max_burst; if (pprms->flags & PIE_ON_OFF_MODE_ENABLED && q->stats.len_bytes<=0) fq_deactivate_pie(pst); } /* Use timestamp if Departure Rate Estimation mode is disabled */ if (t != DROP && !(pprms->flags & PIE_DEPRATEEST_ENABLED)) { /* Add TS to mbuf as a TAG */ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t), M_NOWAIT); if (mtag == NULL) { t = DROP; } else { *(aqm_time_t *)(mtag + 1) = AQM_UNOW; m_tag_prepend(m, mtag); } } if (t != DROP) { mq_append(&q->mq, m); fq_update_stats(q, si, len, 0); return 0; } else { fq_update_stats(q, si, len, 1); pst->accu_prob = 0; FREE_PKT(m); return 1; } return 0; } /* Drop a packet form the head of FQ-PIE sub-queue */ static void pie_drop_head(struct fq_pie_flow *q, struct fq_pie_si *si) { struct mbuf *m = q->mq.head; if (m == NULL) return; q->mq.head = m->m_nextpkt; fq_update_stats(q, si, -m->m_pkthdr.len, 1); if (si->main_q.ni.length == 0) /* queue is now idle */ - si->main_q.q_time = dn_cfg.curr_time; + si->main_q.q_time = V_dn_cfg.curr_time; /* reset accu_prob after packet drop */ q->pst.accu_prob = 0; FREE_PKT(m); } /* * Classify a packet to queue number using Jenkins hash function. * Return: queue number * the input of the hash are protocol no, perturbation, src IP, dst IP, * src port, dst port, */ static inline int fq_pie_classify_flow(struct mbuf *m, uint16_t fcount, struct fq_pie_si *si) { struct ip *ip; struct tcphdr *th; struct udphdr *uh; uint8_t tuple[41]; uint16_t hash=0; ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); //#ifdef INET6 struct ip6_hdr *ip6; int isip6; isip6 = (ip->ip_v == 6); if(isip6) { ip6 = (struct ip6_hdr *)ip; *((uint8_t *) &tuple[0]) = ip6->ip6_nxt; *((uint32_t *) &tuple[1]) = si->perturbation; memcpy(&tuple[5], ip6->ip6_src.s6_addr, 16); memcpy(&tuple[21], ip6->ip6_dst.s6_addr, 16); switch (ip6->ip6_nxt) { case IPPROTO_TCP: th = (struct tcphdr *)(ip6 + 1); *((uint16_t *) &tuple[37]) = th->th_dport; *((uint16_t *) &tuple[39]) = th->th_sport; break; case IPPROTO_UDP: uh = (struct udphdr *)(ip6 + 1); *((uint16_t *) &tuple[37]) = uh->uh_dport; *((uint16_t *) &tuple[39]) = uh->uh_sport; break; default: memset(&tuple[37], 0, 4); } hash = jenkins_hash(tuple, 41, HASHINIT) % fcount; return hash; } //#endif /* IPv4 */ *((uint8_t *) &tuple[0]) = ip->ip_p; *((uint32_t *) &tuple[1]) = si->perturbation; *((uint32_t *) &tuple[5]) = ip->ip_src.s_addr; *((uint32_t *) &tuple[9]) = ip->ip_dst.s_addr; switch (ip->ip_p) { case IPPROTO_TCP: th = (struct tcphdr *)(ip + 1); *((uint16_t *) &tuple[13]) = th->th_dport; *((uint16_t *) &tuple[15]) = th->th_sport; break; case IPPROTO_UDP: uh = (struct udphdr *)(ip + 1); *((uint16_t *) &tuple[13]) = uh->uh_dport; *((uint16_t *) &tuple[15]) = uh->uh_sport; break; default: memset(&tuple[13], 0, 4); } hash = jenkins_hash(tuple, 17, HASHINIT) % fcount; return hash; } /* * Enqueue a packet into an appropriate queue according to * FQ-CoDe; algorithm. */ static int fq_pie_enqueue(struct dn_sch_inst *_si, struct dn_queue *_q, struct mbuf *m) { struct fq_pie_si *si; struct fq_pie_schk *schk; struct dn_sch_fq_pie_parms *param; struct dn_queue *mainq; struct fq_pie_flow *flows; int idx, drop, i, maxidx; mainq = (struct dn_queue *)(_si + 1); si = (struct fq_pie_si *)_si; flows = si->si_extra->flows; schk = (struct fq_pie_schk *)(si->_si.sched+1); param = &schk->cfg; /* classify a packet to queue number*/ idx = fq_pie_classify_flow(m, param->flows_cnt, si); /* enqueue packet into appropriate queue using PIE AQM. * Note: 'pie_enqueue' function returns 1 only when it unable to * add timestamp to packet (no limit check)*/ drop = pie_enqueue(&flows[idx], m, si); /* pie unable to timestamp a packet */ if (drop) return 1; /* If the flow (sub-queue) is not active ,then add it to tail of * new flows list, initialize and activate it. */ if (!flows[idx].active) { STAILQ_INSERT_TAIL(&si->newflows, &flows[idx], flowchain); flows[idx].deficit = param->quantum; fq_activate_pie(&flows[idx]); flows[idx].active = 1; } /* check the limit for all queues and remove a packet from the * largest one */ if (mainq->ni.length > schk->cfg.limit) { /* find first active flow */ for (maxidx = 0; maxidx < schk->cfg.flows_cnt; maxidx++) if (flows[maxidx].active) break; if (maxidx < schk->cfg.flows_cnt) { /* find the largest sub- queue */ for (i = maxidx + 1; i < schk->cfg.flows_cnt; i++) if (flows[i].active && flows[i].stats.length > flows[maxidx].stats.length) maxidx = i; pie_drop_head(&flows[maxidx], si); drop = 1; } } return drop; } /* * Dequeue a packet from an appropriate queue according to * FQ-CoDel algorithm. */ static struct mbuf * fq_pie_dequeue(struct dn_sch_inst *_si) { struct fq_pie_si *si; struct fq_pie_schk *schk; struct dn_sch_fq_pie_parms *param; struct fq_pie_flow *f; struct mbuf *mbuf; struct fq_pie_list *fq_pie_flowlist; si = (struct fq_pie_si *)_si; schk = (struct fq_pie_schk *)(si->_si.sched+1); param = &schk->cfg; do { /* select a list to start with */ if (STAILQ_EMPTY(&si->newflows)) fq_pie_flowlist = &si->oldflows; else fq_pie_flowlist = &si->newflows; /* Both new and old queue lists are empty, return NULL */ if (STAILQ_EMPTY(fq_pie_flowlist)) return NULL; f = STAILQ_FIRST(fq_pie_flowlist); while (f != NULL) { /* if there is no flow(sub-queue) deficit, increase deficit * by quantum, move the flow to the tail of old flows list * and try another flow. * Otherwise, the flow will be used for dequeue. */ if (f->deficit < 0) { f->deficit += param->quantum; STAILQ_REMOVE_HEAD(fq_pie_flowlist, flowchain); STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain); } else break; f = STAILQ_FIRST(fq_pie_flowlist); } /* the new flows list is empty, try old flows list */ if (STAILQ_EMPTY(fq_pie_flowlist)) continue; /* Dequeue a packet from the selected flow */ mbuf = pie_dequeue(f, si); /* pie did not return a packet */ if (!mbuf) { /* If the selected flow belongs to new flows list, then move * it to the tail of old flows list. Otherwise, deactivate it and * remove it from the old list and */ if (fq_pie_flowlist == &si->newflows) { STAILQ_REMOVE_HEAD(fq_pie_flowlist, flowchain); STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain); } else { f->active = 0; fq_deactivate_pie(&f->pst); STAILQ_REMOVE_HEAD(fq_pie_flowlist, flowchain); } /* start again */ continue; } /* we have a packet to return, * update flow deficit and return the packet*/ f->deficit -= mbuf->m_pkthdr.len; return mbuf; } while (1); /* unreachable point */ return NULL; } /* * Initialize fq_pie scheduler instance. * also, allocate memory for flows array. */ static int fq_pie_new_sched(struct dn_sch_inst *_si) { struct fq_pie_si *si; struct dn_queue *q; struct fq_pie_schk *schk; struct fq_pie_flow *flows; int i; si = (struct fq_pie_si *)_si; schk = (struct fq_pie_schk *)(_si->sched+1); if(si->si_extra) { D("si already configured!"); return 0; } /* init the main queue */ q = &si->main_q; set_oid(&q->ni.oid, DN_QUEUE, sizeof(*q)); q->_si = _si; q->fs = _si->sched->fs; /* allocate memory for scheduler instance extra vars */ si->si_extra = malloc(sizeof(struct fq_pie_si_extra), M_DUMMYNET, M_NOWAIT | M_ZERO); if (si->si_extra == NULL) { D("cannot allocate memory for fq_pie si extra vars"); return ENOMEM ; } /* allocate memory for flows array */ si->si_extra->flows = mallocarray(schk->cfg.flows_cnt, sizeof(struct fq_pie_flow), M_DUMMYNET, M_NOWAIT | M_ZERO); flows = si->si_extra->flows; if (flows == NULL) { free(si->si_extra, M_DUMMYNET); si->si_extra = NULL; D("cannot allocate memory for fq_pie flows"); return ENOMEM ; } /* init perturbation for this si */ si->perturbation = random(); si->si_extra->nr_active_q = 0; /* init the old and new flows lists */ STAILQ_INIT(&si->newflows); STAILQ_INIT(&si->oldflows); /* init the flows (sub-queues) */ for (i = 0; i < schk->cfg.flows_cnt; i++) { flows[i].pst.parms = &schk->cfg.pcfg; flows[i].psi_extra = si->si_extra; pie_init(&flows[i], schk); } fq_pie_desc.ref_count++; return 0; } /* * Free fq_pie scheduler instance. */ static int fq_pie_free_sched(struct dn_sch_inst *_si) { struct fq_pie_si *si; struct fq_pie_schk *schk; struct fq_pie_flow *flows; int i; si = (struct fq_pie_si *)_si; schk = (struct fq_pie_schk *)(_si->sched+1); flows = si->si_extra->flows; for (i = 0; i < schk->cfg.flows_cnt; i++) { pie_cleanup(&flows[i]); } si->si_extra = NULL; return 0; } /* * Configure FQ-PIE scheduler. * the configurations for the scheduler is passed fromipfw userland. */ static int fq_pie_config(struct dn_schk *_schk) { struct fq_pie_schk *schk; struct dn_extra_parms *ep; struct dn_sch_fq_pie_parms *fqp_cfg; schk = (struct fq_pie_schk *)(_schk+1); ep = (struct dn_extra_parms *) _schk->cfg; /* par array contains fq_pie configuration as follow * PIE: 0- qdelay_ref,1- tupdate, 2- max_burst * 3- max_ecnth, 4- alpha, 5- beta, 6- flags * FQ_PIE: 7- quantum, 8- limit, 9- flows */ if (ep && ep->oid.len ==sizeof(*ep) && ep->oid.subtype == DN_SCH_PARAMS) { fqp_cfg = &schk->cfg; if (ep->par[0] < 0) fqp_cfg->pcfg.qdelay_ref = fq_pie_sysctl.pcfg.qdelay_ref; else fqp_cfg->pcfg.qdelay_ref = ep->par[0]; if (ep->par[1] < 0) fqp_cfg->pcfg.tupdate = fq_pie_sysctl.pcfg.tupdate; else fqp_cfg->pcfg.tupdate = ep->par[1]; if (ep->par[2] < 0) fqp_cfg->pcfg.max_burst = fq_pie_sysctl.pcfg.max_burst; else fqp_cfg->pcfg.max_burst = ep->par[2]; if (ep->par[3] < 0) fqp_cfg->pcfg.max_ecnth = fq_pie_sysctl.pcfg.max_ecnth; else fqp_cfg->pcfg.max_ecnth = ep->par[3]; if (ep->par[4] < 0) fqp_cfg->pcfg.alpha = fq_pie_sysctl.pcfg.alpha; else fqp_cfg->pcfg.alpha = ep->par[4]; if (ep->par[5] < 0) fqp_cfg->pcfg.beta = fq_pie_sysctl.pcfg.beta; else fqp_cfg->pcfg.beta = ep->par[5]; if (ep->par[6] < 0) fqp_cfg->pcfg.flags = 0; else fqp_cfg->pcfg.flags = ep->par[6]; /* FQ configurations */ if (ep->par[7] < 0) fqp_cfg->quantum = fq_pie_sysctl.quantum; else fqp_cfg->quantum = ep->par[7]; if (ep->par[8] < 0) fqp_cfg->limit = fq_pie_sysctl.limit; else fqp_cfg->limit = ep->par[8]; if (ep->par[9] < 0) fqp_cfg->flows_cnt = fq_pie_sysctl.flows_cnt; else fqp_cfg->flows_cnt = ep->par[9]; /* Bound the configurations */ fqp_cfg->pcfg.qdelay_ref = BOUND_VAR(fqp_cfg->pcfg.qdelay_ref, 1, 5 * AQM_TIME_1S); fqp_cfg->pcfg.tupdate = BOUND_VAR(fqp_cfg->pcfg.tupdate, 1, 5 * AQM_TIME_1S); fqp_cfg->pcfg.max_burst = BOUND_VAR(fqp_cfg->pcfg.max_burst, 0, 5 * AQM_TIME_1S); fqp_cfg->pcfg.max_ecnth = BOUND_VAR(fqp_cfg->pcfg.max_ecnth, 0, PIE_SCALE); fqp_cfg->pcfg.alpha = BOUND_VAR(fqp_cfg->pcfg.alpha, 0, 7 * PIE_SCALE); fqp_cfg->pcfg.beta = BOUND_VAR(fqp_cfg->pcfg.beta, 0, 7 * PIE_SCALE); fqp_cfg->quantum = BOUND_VAR(fqp_cfg->quantum,1,9000); fqp_cfg->limit= BOUND_VAR(fqp_cfg->limit,1,20480); fqp_cfg->flows_cnt= BOUND_VAR(fqp_cfg->flows_cnt,1,65536); } else { D("Wrong parameters for fq_pie scheduler"); return 1; } return 0; } /* * Return FQ-PIE scheduler configurations * the configurations for the scheduler is passed to userland. */ static int fq_pie_getconfig (struct dn_schk *_schk, struct dn_extra_parms *ep) { struct fq_pie_schk *schk = (struct fq_pie_schk *)(_schk+1); struct dn_sch_fq_pie_parms *fqp_cfg; fqp_cfg = &schk->cfg; strcpy(ep->name, fq_pie_desc.name); ep->par[0] = fqp_cfg->pcfg.qdelay_ref; ep->par[1] = fqp_cfg->pcfg.tupdate; ep->par[2] = fqp_cfg->pcfg.max_burst; ep->par[3] = fqp_cfg->pcfg.max_ecnth; ep->par[4] = fqp_cfg->pcfg.alpha; ep->par[5] = fqp_cfg->pcfg.beta; ep->par[6] = fqp_cfg->pcfg.flags; ep->par[7] = fqp_cfg->quantum; ep->par[8] = fqp_cfg->limit; ep->par[9] = fqp_cfg->flows_cnt; return 0; } /* * FQ-PIE scheduler descriptor * contains the type of the scheduler, the name, the size of extra * data structures, and function pointers. */ static struct dn_alg fq_pie_desc = { _SI( .type = ) DN_SCHED_FQ_PIE, _SI( .name = ) "FQ_PIE", _SI( .flags = ) 0, _SI( .schk_datalen = ) sizeof(struct fq_pie_schk), _SI( .si_datalen = ) sizeof(struct fq_pie_si) - sizeof(struct dn_sch_inst), _SI( .q_datalen = ) 0, _SI( .enqueue = ) fq_pie_enqueue, _SI( .dequeue = ) fq_pie_dequeue, _SI( .config = ) fq_pie_config, /* new sched i.e. sched X config ...*/ _SI( .destroy = ) NULL, /*sched x delete */ _SI( .new_sched = ) fq_pie_new_sched, /* new schd instance */ _SI( .free_sched = ) fq_pie_free_sched, /* delete schd instance */ _SI( .new_fsk = ) NULL, _SI( .free_fsk = ) NULL, _SI( .new_queue = ) NULL, _SI( .free_queue = ) NULL, _SI( .getconfig = ) fq_pie_getconfig, _SI( .ref_count = ) 0 }; DECLARE_DNSCHED_MODULE(dn_fq_pie, &fq_pie_desc); diff --git a/sys/netpfil/ipfw/ip_dn_glue.c b/sys/netpfil/ipfw/ip_dn_glue.c index 5a39a1a47282..83f26cb23680 100644 --- a/sys/netpfil/ipfw/ip_dn_glue.c +++ b/sys/netpfil/ipfw/ip_dn_glue.c @@ -1,846 +1,846 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Riccardo Panicucci, Universita` di Pisa * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * Binary compatibility support for /sbin/ipfw RELENG_7 and RELENG_8 */ #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include #include /* ip_output(), IP_FORWARDING */ #include #include #include #include #include #ifdef NEW_AQM #include #endif #include /* FREEBSD7.2 ip_dummynet.h r191715*/ struct dn_heap_entry7 { int64_t key; /* sorting key. Topmost element is smallest one */ void *object; /* object pointer */ }; struct dn_heap7 { int size; int elements; int offset; /* XXX if > 0 this is the offset of direct ptr to obj */ struct dn_heap_entry7 *p; /* really an array of "size" entries */ }; /* Common to 7.2 and 8 */ struct dn_flow_set { SLIST_ENTRY(dn_flow_set) next; /* linked list in a hash slot */ u_short fs_nr ; /* flow_set number */ u_short flags_fs; #define DNOLD_HAVE_FLOW_MASK 0x0001 #define DNOLD_IS_RED 0x0002 #define DNOLD_IS_GENTLE_RED 0x0004 #define DNOLD_QSIZE_IS_BYTES 0x0008 /* queue size is measured in bytes */ #define DNOLD_NOERROR 0x0010 /* do not report ENOBUFS on drops */ #define DNOLD_HAS_PROFILE 0x0020 /* the pipe has a delay profile. */ #define DNOLD_IS_PIPE 0x4000 #define DNOLD_IS_QUEUE 0x8000 struct dn_pipe7 *pipe ; /* pointer to parent pipe */ u_short parent_nr ; /* parent pipe#, 0 if local to a pipe */ int weight ; /* WFQ queue weight */ int qsize ; /* queue size in slots or bytes */ int plr ; /* pkt loss rate (2^31-1 means 100%) */ struct ipfw_flow_id flow_mask ; /* hash table of queues onto this flow_set */ int rq_size ; /* number of slots */ int rq_elements ; /* active elements */ struct dn_flow_queue7 **rq; /* array of rq_size entries */ u_int32_t last_expired ; /* do not expire too frequently */ int backlogged ; /* #active queues for this flowset */ /* RED parameters */ #define SCALE_RED 16 #define SCALE(x) ( (x) << SCALE_RED ) #define SCALE_VAL(x) ( (x) >> SCALE_RED ) #define SCALE_MUL(x,y) ( ( (x) * (y) ) >> SCALE_RED ) int w_q ; /* queue weight (scaled) */ int max_th ; /* maximum threshold for queue (scaled) */ int min_th ; /* minimum threshold for queue (scaled) */ int max_p ; /* maximum value for p_b (scaled) */ u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */ u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */ u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */ u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */ u_int * w_q_lookup ; /* lookup table for computing (1-w_q)^t */ u_int lookup_depth ; /* depth of lookup table */ int lookup_step ; /* granularity inside the lookup table */ int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */ int avg_pkt_size ; /* medium packet size */ int max_pkt_size ; /* max packet size */ }; SLIST_HEAD(dn_flow_set_head, dn_flow_set); #define DN_IS_PIPE 0x4000 #define DN_IS_QUEUE 0x8000 struct dn_flow_queue7 { struct dn_flow_queue7 *next ; struct ipfw_flow_id id ; struct mbuf *head, *tail ; /* queue of packets */ u_int len ; u_int len_bytes ; u_long numbytes; u_int64_t tot_pkts ; /* statistics counters */ u_int64_t tot_bytes ; u_int32_t drops ; int hash_slot ; /* debugging/diagnostic */ /* RED parameters */ int avg ; /* average queue length est. (scaled) */ int count ; /* arrivals since last RED drop */ int random ; /* random value (scaled) */ u_int32_t q_time; /* start of queue idle time */ /* WF2Q+ support */ struct dn_flow_set *fs ; /* parent flow set */ int heap_pos ; /* position (index) of struct in heap */ int64_t sched_time ; /* current time when queue enters ready_heap */ int64_t S,F ; /* start time, finish time */ }; struct dn_pipe7 { /* a pipe */ SLIST_ENTRY(dn_pipe7) next; /* linked list in a hash slot */ int pipe_nr ; /* number */ int bandwidth; /* really, bytes/tick. */ int delay ; /* really, ticks */ struct mbuf *head, *tail ; /* packets in delay line */ /* WF2Q+ */ struct dn_heap7 scheduler_heap ; /* top extract - key Finish time*/ struct dn_heap7 not_eligible_heap; /* top extract- key Start time */ struct dn_heap7 idle_heap ; /* random extract - key Start=Finish time */ int64_t V ; /* virtual time */ int sum; /* sum of weights of all active sessions */ int numbytes; int64_t sched_time ; /* time pipe was scheduled in ready_heap */ /* * When the tx clock come from an interface (if_name[0] != '\0'), its name * is stored below, whereas the ifp is filled when the rule is configured. */ char if_name[IFNAMSIZ]; struct ifnet *ifp ; int ready ; /* set if ifp != NULL and we got a signal from it */ struct dn_flow_set fs ; /* used with fixed-rate flows */ }; SLIST_HEAD(dn_pipe_head7, dn_pipe7); /* FREEBSD8 ip_dummynet.h r196045 */ struct dn_flow_queue8 { struct dn_flow_queue8 *next ; struct ipfw_flow_id id ; struct mbuf *head, *tail ; /* queue of packets */ u_int len ; u_int len_bytes ; uint64_t numbytes ; /* credit for transmission (dynamic queues) */ int64_t extra_bits; /* extra bits simulating unavailable channel */ u_int64_t tot_pkts ; /* statistics counters */ u_int64_t tot_bytes ; u_int32_t drops ; int hash_slot ; /* debugging/diagnostic */ /* RED parameters */ int avg ; /* average queue length est. (scaled) */ int count ; /* arrivals since last RED drop */ int random ; /* random value (scaled) */ int64_t idle_time; /* start of queue idle time */ /* WF2Q+ support */ struct dn_flow_set *fs ; /* parent flow set */ int heap_pos ; /* position (index) of struct in heap */ int64_t sched_time ; /* current time when queue enters ready_heap */ int64_t S,F ; /* start time, finish time */ }; struct dn_pipe8 { /* a pipe */ SLIST_ENTRY(dn_pipe8) next; /* linked list in a hash slot */ int pipe_nr ; /* number */ int bandwidth; /* really, bytes/tick. */ int delay ; /* really, ticks */ struct mbuf *head, *tail ; /* packets in delay line */ /* WF2Q+ */ struct dn_heap7 scheduler_heap ; /* top extract - key Finish time*/ struct dn_heap7 not_eligible_heap; /* top extract- key Start time */ struct dn_heap7 idle_heap ; /* random extract - key Start=Finish time */ int64_t V ; /* virtual time */ int sum; /* sum of weights of all active sessions */ /* Same as in dn_flow_queue, numbytes can become large */ int64_t numbytes; /* bits I can transmit (more or less). */ uint64_t burst; /* burst size, scaled: bits * hz */ int64_t sched_time ; /* time pipe was scheduled in ready_heap */ int64_t idle_time; /* start of pipe idle time */ char if_name[IFNAMSIZ]; struct ifnet *ifp ; int ready ; /* set if ifp != NULL and we got a signal from it */ struct dn_flow_set fs ; /* used with fixed-rate flows */ /* fields to simulate a delay profile */ #define ED_MAX_NAME_LEN 32 char name[ED_MAX_NAME_LEN]; int loss_level; int samples_no; int *samples; }; #define ED_MAX_SAMPLES_NO 1024 struct dn_pipe_max8 { struct dn_pipe8 pipe; int samples[ED_MAX_SAMPLES_NO]; }; SLIST_HEAD(dn_pipe_head8, dn_pipe8); /* * Changes from 7.2 to 8: * dn_pipe: * numbytes from int to int64_t * add burst (int64_t) * add idle_time (int64_t) * add profile * add struct dn_pipe_max * add flag DN_HAS_PROFILE * * dn_flow_queue * numbytes from u_long to int64_t * add extra_bits (int64_t) * q_time from u_int32_t to int64_t and name idle_time * * dn_flow_set unchanged * */ /* NOTE:XXX copied from dummynet.c */ #define O_NEXT(p, len) ((void *)((char *)p + len)) static void oid_fill(struct dn_id *oid, int len, int type, uintptr_t id) { oid->len = len; oid->type = type; oid->subtype = 0; oid->id = id; } /* make room in the buffer and move the pointer forward */ static void * o_next(struct dn_id **o, int len, int type) { struct dn_id *ret = *o; oid_fill(ret, len, type, 0); *o = O_NEXT(*o, len); return ret; } static size_t pipesize7 = sizeof(struct dn_pipe7); static size_t pipesize8 = sizeof(struct dn_pipe8); static size_t pipesizemax8 = sizeof(struct dn_pipe_max8); /* Indicate 'ipfw' version * 1: from FreeBSD 7.2 * 0: from FreeBSD 8 * -1: unknown (for now is unused) * * It is update when a IP_DUMMYNET_DEL or IP_DUMMYNET_CONFIGURE request arrives * NOTE: if a IP_DUMMYNET_GET arrives and the 'ipfw' version is unknown, * it is suppose to be the FreeBSD 8 version. */ static int is7 = 0; static int convertflags2new(int src) { int dst = 0; if (src & DNOLD_HAVE_FLOW_MASK) dst |= DN_HAVE_MASK; if (src & DNOLD_QSIZE_IS_BYTES) dst |= DN_QSIZE_BYTES; if (src & DNOLD_NOERROR) dst |= DN_NOERROR; if (src & DNOLD_IS_RED) dst |= DN_IS_RED; if (src & DNOLD_IS_GENTLE_RED) dst |= DN_IS_GENTLE_RED; if (src & DNOLD_HAS_PROFILE) dst |= DN_HAS_PROFILE; return dst; } static int convertflags2old(int src) { int dst = 0; if (src & DN_HAVE_MASK) dst |= DNOLD_HAVE_FLOW_MASK; if (src & DN_IS_RED) dst |= DNOLD_IS_RED; if (src & DN_IS_GENTLE_RED) dst |= DNOLD_IS_GENTLE_RED; if (src & DN_NOERROR) dst |= DNOLD_NOERROR; if (src & DN_HAS_PROFILE) dst |= DNOLD_HAS_PROFILE; if (src & DN_QSIZE_BYTES) dst |= DNOLD_QSIZE_IS_BYTES; return dst; } static int dn_compat_del(void *v) { struct dn_pipe7 *p = (struct dn_pipe7 *) v; struct dn_pipe8 *p8 = (struct dn_pipe8 *) v; struct { struct dn_id oid; uintptr_t a[1]; /* add more if we want a list */ } cmd; /* XXX DN_API_VERSION ??? */ oid_fill((void *)&cmd, sizeof(cmd), DN_CMD_DELETE, DN_API_VERSION); if (is7) { if (p->pipe_nr == 0 && p->fs.fs_nr == 0) return EINVAL; if (p->pipe_nr != 0 && p->fs.fs_nr != 0) return EINVAL; } else { if (p8->pipe_nr == 0 && p8->fs.fs_nr == 0) return EINVAL; if (p8->pipe_nr != 0 && p8->fs.fs_nr != 0) return EINVAL; } if (p->pipe_nr != 0) { /* pipe x delete */ cmd.a[0] = p->pipe_nr; cmd.oid.subtype = DN_LINK; } else { /* queue x delete */ cmd.oid.subtype = DN_FS; cmd.a[0] = (is7) ? p->fs.fs_nr : p8->fs.fs_nr; } return do_config(&cmd, cmd.oid.len); } static int dn_compat_config_queue(struct dn_fs *fs, void* v) { struct dn_pipe7 *p7 = (struct dn_pipe7 *)v; struct dn_pipe8 *p8 = (struct dn_pipe8 *)v; struct dn_flow_set *f; if (is7) f = &p7->fs; else f = &p8->fs; fs->fs_nr = f->fs_nr; fs->sched_nr = f->parent_nr; fs->flow_mask = f->flow_mask; fs->buckets = f->rq_size; fs->qsize = f->qsize; fs->plr = f->plr; fs->par[0] = f->weight; fs->flags = convertflags2new(f->flags_fs); if (fs->flags & DN_IS_GENTLE_RED || fs->flags & DN_IS_RED) { fs->w_q = f->w_q; fs->max_th = f->max_th; fs->min_th = f->min_th; fs->max_p = f->max_p; } return 0; } static int dn_compat_config_pipe(struct dn_sch *sch, struct dn_link *p, struct dn_fs *fs, void* v) { struct dn_pipe7 *p7 = (struct dn_pipe7 *)v; struct dn_pipe8 *p8 = (struct dn_pipe8 *)v; int i = p7->pipe_nr; sch->sched_nr = i; sch->oid.subtype = 0; p->link_nr = i; fs->fs_nr = i + 2*DN_MAX_ID; fs->sched_nr = i + DN_MAX_ID; /* Common to 7 and 8 */ p->bandwidth = p7->bandwidth; p->delay = p7->delay; if (!is7) { /* FreeBSD 8 has burst */ p->burst = p8->burst; } /* fill the fifo flowset */ dn_compat_config_queue(fs, v); fs->fs_nr = i + 2*DN_MAX_ID; fs->sched_nr = i + DN_MAX_ID; /* Move scheduler related parameter from fs to sch */ sch->buckets = fs->buckets; /*XXX*/ fs->buckets = 0; if (fs->flags & DN_HAVE_MASK) { sch->flags |= DN_HAVE_MASK; fs->flags &= ~DN_HAVE_MASK; sch->sched_mask = fs->flow_mask; bzero(&fs->flow_mask, sizeof(struct ipfw_flow_id)); } return 0; } static int dn_compat_config_profile(struct dn_profile *pf, struct dn_link *p, void *v) { struct dn_pipe8 *p8 = (struct dn_pipe8 *)v; p8->samples = &(((struct dn_pipe_max8 *)p8)->samples[0]); pf->link_nr = p->link_nr; pf->loss_level = p8->loss_level; // pf->bandwidth = p->bandwidth; //XXX bandwidth redundant? pf->samples_no = p8->samples_no; strncpy(pf->name, p8->name,sizeof(pf->name)); bcopy(p8->samples, pf->samples, sizeof(pf->samples)); return 0; } /* * If p->pipe_nr != 0 the command is 'pipe x config', so need to create * the three main struct, else only a flowset is created */ static int dn_compat_configure(void *v) { struct dn_id *buf = NULL, *base; struct dn_sch *sch = NULL; struct dn_link *p = NULL; struct dn_fs *fs = NULL; struct dn_profile *pf = NULL; int lmax; int error; struct dn_pipe7 *p7 = (struct dn_pipe7 *)v; struct dn_pipe8 *p8 = (struct dn_pipe8 *)v; int i; /* number of object to configure */ lmax = sizeof(struct dn_id); /* command header */ lmax += sizeof(struct dn_sch) + sizeof(struct dn_link) + sizeof(struct dn_fs) + sizeof(struct dn_profile); base = buf = malloc(lmax, M_DUMMYNET, M_WAITOK|M_ZERO); o_next(&buf, sizeof(struct dn_id), DN_CMD_CONFIG); base->id = DN_API_VERSION; /* pipe_nr is the same in p7 and p8 */ i = p7->pipe_nr; if (i != 0) { /* pipe config */ sch = o_next(&buf, sizeof(*sch), DN_SCH); p = o_next(&buf, sizeof(*p), DN_LINK); fs = o_next(&buf, sizeof(*fs), DN_FS); error = dn_compat_config_pipe(sch, p, fs, v); if (error) { free(buf, M_DUMMYNET); return error; } if (!is7 && p8->samples_no > 0) { /* Add profiles*/ pf = o_next(&buf, sizeof(*pf), DN_PROFILE); error = dn_compat_config_profile(pf, p, v); if (error) { free(buf, M_DUMMYNET); return error; } } } else { /* queue config */ fs = o_next(&buf, sizeof(*fs), DN_FS); error = dn_compat_config_queue(fs, v); if (error) { free(buf, M_DUMMYNET); return error; } } error = do_config(base, (char *)buf - (char *)base); if (buf) free(buf, M_DUMMYNET); return error; } int dn_compat_calc_size(void) { int need = 0; /* XXX use FreeBSD 8 struct size */ /* NOTE: * - half scheduler: schk_count/2 * - all flowset: fsk_count * - all flowset queues: queue_count * - all pipe queue: si_count */ - need += dn_cfg.schk_count * sizeof(struct dn_pipe8) / 2; - need += dn_cfg.fsk_count * sizeof(struct dn_flow_set); - need += dn_cfg.si_count * sizeof(struct dn_flow_queue8); - need += dn_cfg.queue_count * sizeof(struct dn_flow_queue8); + need += V_dn_cfg.schk_count * sizeof(struct dn_pipe8) / 2; + need += V_dn_cfg.fsk_count * sizeof(struct dn_flow_set); + need += V_dn_cfg.si_count * sizeof(struct dn_flow_queue8); + need += V_dn_cfg.queue_count * sizeof(struct dn_flow_queue8); return need; } int dn_c_copy_q (void *_ni, void *arg) { struct copy_args *a = arg; struct dn_flow_queue7 *fq7 = (struct dn_flow_queue7 *)*a->start; struct dn_flow_queue8 *fq8 = (struct dn_flow_queue8 *)*a->start; struct dn_flow *ni = (struct dn_flow *)_ni; int size = 0; /* XXX hash slot not set */ /* No difference between 7.2/8 */ fq7->len = ni->length; fq7->len_bytes = ni->len_bytes; fq7->id = ni->fid; if (is7) { size = sizeof(struct dn_flow_queue7); fq7->tot_pkts = ni->tot_pkts; fq7->tot_bytes = ni->tot_bytes; fq7->drops = ni->drops; } else { size = sizeof(struct dn_flow_queue8); fq8->tot_pkts = ni->tot_pkts; fq8->tot_bytes = ni->tot_bytes; fq8->drops = ni->drops; } *a->start += size; return 0; } int dn_c_copy_pipe(struct dn_schk *s, struct copy_args *a, int nq) { struct dn_link *l = &s->link; struct dn_fsk *f = s->fs; struct dn_pipe7 *pipe7 = (struct dn_pipe7 *)*a->start; struct dn_pipe8 *pipe8 = (struct dn_pipe8 *)*a->start; struct dn_flow_set *fs; int size = 0; if (is7) { fs = &pipe7->fs; size = sizeof(struct dn_pipe7); } else { fs = &pipe8->fs; size = sizeof(struct dn_pipe8); } /* These 4 field are the same in pipe7 and pipe8 */ pipe7->next.sle_next = (struct dn_pipe7 *)DN_IS_PIPE; pipe7->bandwidth = l->bandwidth; pipe7->delay = l->delay * 1000 / hz; pipe7->pipe_nr = l->link_nr - DN_MAX_ID; if (!is7) { if (s->profile) { struct dn_profile *pf = s->profile; strncpy(pipe8->name, pf->name, sizeof(pf->name)); pipe8->loss_level = pf->loss_level; pipe8->samples_no = pf->samples_no; } pipe8->burst = div64(l->burst , 8 * hz); } fs->flow_mask = s->sch.sched_mask; fs->rq_size = s->sch.buckets ? s->sch.buckets : 1; fs->parent_nr = l->link_nr - DN_MAX_ID; fs->qsize = f->fs.qsize; fs->plr = f->fs.plr; fs->w_q = f->fs.w_q; fs->max_th = f->max_th; fs->min_th = f->min_th; fs->max_p = f->fs.max_p; fs->rq_elements = nq; fs->flags_fs = convertflags2old(f->fs.flags); *a->start += size; return 0; } int dn_compat_copy_pipe(struct copy_args *a, void *_o) { int have = a->end - *a->start; int need = 0; int pipe_size = sizeof(struct dn_pipe8); int queue_size = sizeof(struct dn_flow_queue8); int n_queue = 0; /* number of queues */ struct dn_schk *s = (struct dn_schk *)_o; /* calculate needed space: * - struct dn_pipe * - if there are instances, dn_queue * n_instances */ n_queue = (s->sch.flags & DN_HAVE_MASK ? dn_ht_entries(s->siht) : (s->siht ? 1 : 0)); need = pipe_size + queue_size * n_queue; if (have < need) { D("have %d < need %d", have, need); return 1; } /* copy pipe */ dn_c_copy_pipe(s, a, n_queue); /* copy queues */ if (s->sch.flags & DN_HAVE_MASK) dn_ht_scan(s->siht, dn_c_copy_q, a); else if (s->siht) dn_c_copy_q(s->siht, a); return 0; } int dn_c_copy_fs(struct dn_fsk *f, struct copy_args *a, int nq) { struct dn_flow_set *fs = (struct dn_flow_set *)*a->start; fs->next.sle_next = (struct dn_flow_set *)DN_IS_QUEUE; fs->fs_nr = f->fs.fs_nr; fs->qsize = f->fs.qsize; fs->plr = f->fs.plr; fs->w_q = f->fs.w_q; fs->max_th = f->max_th; fs->min_th = f->min_th; fs->max_p = f->fs.max_p; fs->flow_mask = f->fs.flow_mask; fs->rq_elements = nq; fs->rq_size = (f->fs.buckets ? f->fs.buckets : 1); fs->parent_nr = f->fs.sched_nr; fs->weight = f->fs.par[0]; fs->flags_fs = convertflags2old(f->fs.flags); *a->start += sizeof(struct dn_flow_set); return 0; } int dn_compat_copy_queue(struct copy_args *a, void *_o) { int have = a->end - *a->start; int need = 0; int fs_size = sizeof(struct dn_flow_set); int queue_size = sizeof(struct dn_flow_queue8); struct dn_fsk *fs = (struct dn_fsk *)_o; int n_queue = 0; /* number of queues */ n_queue = (fs->fs.flags & DN_HAVE_MASK ? dn_ht_entries(fs->qht) : (fs->qht ? 1 : 0)); need = fs_size + queue_size * n_queue; if (have < need) { D("have < need"); return 1; } /* copy flowset */ dn_c_copy_fs(fs, a, n_queue); /* copy queues */ if (fs->fs.flags & DN_HAVE_MASK) dn_ht_scan(fs->qht, dn_c_copy_q, a); else if (fs->qht) dn_c_copy_q(fs->qht, a); return 0; } int copy_data_helper_compat(void *_o, void *_arg) { struct copy_args *a = _arg; if (a->type == DN_COMPAT_PIPE) { struct dn_schk *s = _o; if (s->sch.oid.subtype != 1 || s->sch.sched_nr <= DN_MAX_ID) { return 0; /* not old type */ } /* copy pipe parameters, and if instance exists, copy * other parameters and eventually queues. */ if(dn_compat_copy_pipe(a, _o)) return DNHT_SCAN_END; } else if (a->type == DN_COMPAT_QUEUE) { struct dn_fsk *fs = _o; if (fs->fs.fs_nr >= DN_MAX_ID) return 0; if (dn_compat_copy_queue(a, _o)) return DNHT_SCAN_END; } return 0; } /* Main function to manage old requests */ int ip_dummynet_compat(struct sockopt *sopt) { int error=0; void *v = NULL; struct dn_id oid; /* Length of data, used to found ipfw version... */ int len = sopt->sopt_valsize; /* len can be 0 if command was dummynet_flush */ if (len == pipesize7) { D("setting compatibility with FreeBSD 7.2"); is7 = 1; } else if (len == pipesize8 || len == pipesizemax8) { D("setting compatibility with FreeBSD 8"); is7 = 0; } switch (sopt->sopt_name) { default: printf("dummynet: -- unknown option %d", sopt->sopt_name); error = EINVAL; break; case IP_DUMMYNET_FLUSH: oid_fill(&oid, sizeof(oid), DN_CMD_FLUSH, DN_API_VERSION); do_config(&oid, oid.len); break; case IP_DUMMYNET_DEL: v = malloc(len, M_TEMP, M_WAITOK); error = sooptcopyin(sopt, v, len, len); if (error) break; error = dn_compat_del(v); free(v, M_TEMP); break; case IP_DUMMYNET_CONFIGURE: v = malloc(len, M_TEMP, M_WAITOK); error = sooptcopyin(sopt, v, len, len); if (error) break; error = dn_compat_configure(v); free(v, M_TEMP); break; case IP_DUMMYNET_GET: { void *buf; int ret; int original_size = sopt->sopt_valsize; int size; ret = dummynet_get(sopt, &buf); if (ret) return 0;//XXX ? size = sopt->sopt_valsize; sopt->sopt_valsize = original_size; D("size=%d, buf=%p", size, buf); ret = sooptcopyout(sopt, buf, size); if (ret) printf(" %s ERROR sooptcopyout\n", __FUNCTION__); if (buf) free(buf, M_DUMMYNET); } } return error; } diff --git a/sys/netpfil/ipfw/ip_dn_io.c b/sys/netpfil/ipfw/ip_dn_io.c index 4a65bd0ef798..39bea3eb99dd 100644 --- a/sys/netpfil/ipfw/ip_dn_io.c +++ b/sys/netpfil/ipfw/ip_dn_io.c @@ -1,957 +1,967 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Dummynet portions related to packet handling. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include /* NET_EPOCH_... */ #include #include #include #include /* ip_len, ip_off */ #include /* ip_output(), IP_FORWARDING */ #include #include #include /* various ether_* routines */ #include /* for ip6_input, ip6_output prototypes */ #include #include #include #include #ifdef NEW_AQM #include #endif #include /* * We keep a private variable for the simulation time, but we could * probably use an existing one ("softticks" in sys/kern/kern_timeout.c) - * instead of dn_cfg.curr_time + * instead of V_dn_cfg.curr_time */ - -struct dn_parms dn_cfg; -//VNET_DEFINE(struct dn_parms, _base_dn_cfg); +VNET_DEFINE(struct dn_parms, dn_cfg); +#define V_dn_cfg VNET(dn_cfg) /* * We use a heap to store entities for which we have pending timer events. * The heap is checked at every tick and all entities with expired events * are extracted. */ MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap"); extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *); #ifdef SYSCTL_NODE /* * Because of the way the SYSBEGIN/SYSEND macros work on other * platforms, there should not be functions between them. * So keep the handlers outside the block. */ static int sysctl_hash_size(SYSCTL_HANDLER_ARGS) { int error, value; - value = dn_cfg.hash_size; + value = V_dn_cfg.hash_size; error = sysctl_handle_int(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 16 || value > 65536) return (EINVAL); - dn_cfg.hash_size = value; + V_dn_cfg.hash_size = value; return (0); } static int sysctl_limits(SYSCTL_HANDLER_ARGS) { int error; long value; if (arg2 != 0) - value = dn_cfg.slot_limit; + value = V_dn_cfg.slot_limit; else - value = dn_cfg.byte_limit; + value = V_dn_cfg.byte_limit; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (arg2 != 0) { if (value < 1) return (EINVAL); - dn_cfg.slot_limit = value; + V_dn_cfg.slot_limit = value; } else { if (value < 1500) return (EINVAL); - dn_cfg.byte_limit = value; + V_dn_cfg.byte_limit = value; } return (0); } SYSBEGIN(f4) SYSCTL_DECL(_net_inet); SYSCTL_DECL(_net_inet_ip); #ifdef NEW_AQM SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Dummynet"); #else static SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Dummynet"); #endif -/* wrapper to pass dn_cfg fields to SYSCTL_* */ -//#define DC(x) (&(VNET_NAME(_base_dn_cfg).x)) -#define DC(x) (&(dn_cfg.x)) +/* wrapper to pass V_dn_cfg fields to SYSCTL_* */ +#define DC(x) (&(VNET_NAME(dn_cfg).x)) + /* parameters */ SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sysctl_hash_size, "I", "Default hash table size"); SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 1, sysctl_limits, "L", "Upper limit in slots for pipe queue."); SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sysctl_limits, "L", "Upper limit in bytes for pipe queue."); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast, CTLFLAG_RW, DC(io_fast), 0, "Enable fast dummynet io."); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW, DC(debug), 0, "Dummynet debug level"); /* RED parameters */ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD, DC(red_lookup_depth), 0, "Depth of RED lookup table"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD, DC(red_avg_pkt_size), 0, "RED Medium packet size"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD, DC(red_max_pkt_size), 0, "RED Max packet size"); /* time adjustment */ SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta, CTLFLAG_RD, DC(tick_delta), 0, "Last vs standard tick difference (usec)."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum, CTLFLAG_RD, DC(tick_delta_sum), 0, "Accumulated tick difference (usec)."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment, CTLFLAG_RD, DC(tick_adjustment), 0, "Tick adjustments done."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff, CTLFLAG_RD, DC(tick_diff), 0, "Adjusted vs non-adjusted curr_time difference (ticks)."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost, CTLFLAG_RD, DC(tick_lost), 0, "Number of ticks coalesced by dummynet taskqueue."); /* Drain parameters */ SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW, DC(expire), 0, "Expire empty queues/pipes"); SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle, CTLFLAG_RD, DC(expire_cycle), 0, "Expire cycle for queues/pipes"); /* statistics */ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count, CTLFLAG_RD, DC(schk_count), 0, "Number of schedulers"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count, CTLFLAG_RD, DC(si_count), 0, "Number of scheduler instances"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count, CTLFLAG_RD, DC(fsk_count), 0, "Number of flowsets"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count, CTLFLAG_RD, DC(queue_count), 0, "Number of queues"); SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt, CTLFLAG_RD, DC(io_pkt), 0, "Number of packets passed to dummynet."); SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast, CTLFLAG_RD, DC(io_pkt_fast), 0, "Number of packets bypassed dummynet scheduler."); SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop, CTLFLAG_RD, DC(io_pkt_drop), 0, "Number of packets dropped by dummynet."); #undef DC SYSEND #endif static void dummynet_send(struct mbuf *); /* * Return the mbuf tag holding the dummynet state (it should * be the first one on the list). */ struct dn_pkt_tag * dn_tag_get(struct mbuf *m) { struct m_tag *mtag = m_tag_first(m); #ifdef NEW_AQM /* XXX: to skip ts m_tag. For Debugging only*/ if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) { m_tag_delete(m,mtag); mtag = m_tag_first(m); D("skip TS tag"); } #endif KASSERT(mtag != NULL && mtag->m_tag_cookie == MTAG_ABI_COMPAT && mtag->m_tag_id == PACKET_TAG_DUMMYNET, ("packet on dummynet queue w/o dummynet tag!")); return (struct dn_pkt_tag *)(mtag+1); } #ifndef NEW_AQM static inline void mq_append(struct mq *q, struct mbuf *m) { #ifdef USERSPACE // buffers from netmap need to be copied // XXX note that the routine is not expected to fail ND("append %p to %p", m, q); if (m->m_flags & M_STACK) { struct mbuf *m_new; void *p; int l, ofs; ofs = m->m_data - m->__m_extbuf; // XXX allocate MGETHDR(m_new, M_NOWAIT, MT_DATA); ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p", m, m->__m_extbuf, m->__m_extlen, ofs, m_new); p = m_new->__m_extbuf; /* new pointer */ l = m_new->__m_extlen; /* new len */ if (l <= m->__m_extlen) { panic("extlen too large"); } *m_new = *m; // copy m_new->m_flags &= ~M_STACK; m_new->__m_extbuf = p; // point to new buffer _pkt_copy(m->__m_extbuf, p, m->__m_extlen); m_new->m_data = p + ofs; m = m_new; } #endif /* USERSPACE */ if (q->head == NULL) q->head = m; else q->tail->m_nextpkt = m; q->count++; q->tail = m; m->m_nextpkt = NULL; } #endif /* * Dispose a list of packet. Use a functions so if we need to do * more work, this is a central point to do it. */ void dn_free_pkts(struct mbuf *mnext) { struct mbuf *m; while ((m = mnext) != NULL) { mnext = m->m_nextpkt; FREE_PKT(m); } } static int red_drops (struct dn_queue *q, int len) { /* * RED algorithm * * RED calculates the average queue size (avg) using a low-pass filter * with an exponential weighted (w_q) moving average: * avg <- (1-w_q) * avg + w_q * q_size * where q_size is the queue length (measured in bytes or * packets). * * If q_size == 0, we compute the idle time for the link, and set * avg = (1 - w_q)^(idle/s) * where s is the time needed for transmitting a medium-sized packet. * * Now, if avg < min_th the packet is enqueued. * If avg > max_th the packet is dropped. Otherwise, the packet is * dropped with probability P function of avg. */ struct dn_fsk *fs = q->fs; int64_t p_b = 0; /* Queue in bytes or packets? */ uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ? q->ni.len_bytes : q->ni.length; /* Average queue size estimation. */ if (q_size != 0) { /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */ int diff = SCALE(q_size) - q->avg; int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q); q->avg += (int)v; } else { /* * Queue is empty, find for how long the queue has been * empty and use a lookup table for computing * (1 - * w_q)^(idle_time/s) where s is the time to send a * (small) packet. * XXX check wraps... */ if (q->avg) { - u_int t = div64((dn_cfg.curr_time - q->q_time), fs->lookup_step); + u_int t = div64((V_dn_cfg.curr_time - q->q_time), fs->lookup_step); q->avg = (t < fs->lookup_depth) ? SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; } } /* Should i drop? */ if (q->avg < fs->min_th) { q->count = -1; return (0); /* accept packet */ } if (q->avg >= fs->max_th) { /* average queue >= max threshold */ if (fs->fs.flags & DN_IS_ECN) return (1); if (fs->fs.flags & DN_IS_GENTLE_RED) { /* * According to Gentle-RED, if avg is greater than * max_th the packet is dropped with a probability * p_b = c_3 * avg - c_4 * where c_3 = (1 - max_p) / max_th * c_4 = 1 - 2 * max_p */ p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4; } else { q->count = -1; return (1); } } else if (q->avg > fs->min_th) { if (fs->fs.flags & DN_IS_ECN) return (1); /* * We compute p_b using the linear dropping function * p_b = c_1 * avg - c_2 * where c_1 = max_p / (max_th - min_th) * c_2 = max_p * min_th / (max_th - min_th) */ p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2; } if (fs->fs.flags & DN_QSIZE_BYTES) p_b = div64((p_b * len) , fs->max_pkt_size); if (++q->count == 0) q->random = random() & 0xffff; else { /* * q->count counts packets arrived since last drop, so a greater * value of q->count means a greater packet drop probability. */ if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) { q->count = 0; /* After a drop we calculate a new random value. */ q->random = random() & 0xffff; return (1); /* drop */ } } /* End of RED algorithm. */ return (0); /* accept */ } /* * ECN/ECT Processing (partially adopted from altq) */ #ifndef NEW_AQM static #endif int ecn_mark(struct mbuf* m) { struct ip *ip; ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); switch (ip->ip_v) { case IPVERSION: { uint16_t old; if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT) return (0); /* not-ECT */ if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) return (1); /* already marked */ /* * ecn-capable but not marked, * mark CE and update checksum */ old = *(uint16_t *)ip; ip->ip_tos |= IPTOS_ECN_CE; ip->ip_sum = cksum_adjust(ip->ip_sum, old, *(uint16_t *)ip); return (1); } #ifdef INET6 case (IPV6_VERSION >> 4): { struct ip6_hdr *ip6 = (struct ip6_hdr *)ip; u_int32_t flowlabel; flowlabel = ntohl(ip6->ip6_flow); if ((flowlabel >> 28) != 6) return (0); /* version mismatch! */ if ((flowlabel & (IPTOS_ECN_MASK << 20)) == (IPTOS_ECN_NOTECT << 20)) return (0); /* not-ECT */ if ((flowlabel & (IPTOS_ECN_MASK << 20)) == (IPTOS_ECN_CE << 20)) return (1); /* already marked */ /* * ecn-capable but not marked, mark CE */ flowlabel |= (IPTOS_ECN_CE << 20); ip6->ip6_flow = htonl(flowlabel); return (1); } #endif } return (0); } /* * Enqueue a packet in q, subject to space and queue management policy * (whose parameters are in q->fs). * Update stats for the queue and the scheduler. * Return 0 on success, 1 on drop. The packet is consumed anyways. */ int dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop) { struct dn_fs *f; struct dn_flow *ni; /* stats for scheduler instance */ uint64_t len; if (q->fs == NULL || q->_si == NULL) { printf("%s fs %p si %p, dropping\n", __FUNCTION__, q->fs, q->_si); FREE_PKT(m); return 1; } f = &(q->fs->fs); ni = &q->_si->ni; len = m->m_pkthdr.len; /* Update statistics, then check reasons to drop pkt. */ q->ni.tot_bytes += len; q->ni.tot_pkts++; ni->tot_bytes += len; ni->tot_pkts++; if (drop) goto drop; if (f->plr && random() < f->plr) goto drop; #ifdef NEW_AQM /* Call AQM enqueue function */ if (q->fs->aqmfp) return q->fs->aqmfp->enqueue(q ,m); #endif if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) { if (!(f->flags & DN_IS_ECN) || !ecn_mark(m)) goto drop; } if (f->flags & DN_QSIZE_BYTES) { if (q->ni.len_bytes > f->qsize) goto drop; } else if (q->ni.length >= f->qsize) { goto drop; } mq_append(&q->mq, m); q->ni.length++; q->ni.len_bytes += len; ni->length++; ni->len_bytes += len; return (0); drop: - dn_cfg.io_pkt_drop++; + V_dn_cfg.io_pkt_drop++; q->ni.drops++; ni->drops++; FREE_PKT(m); return (1); } /* * Fetch packets from the delay line which are due now. If there are * leftover packets, reinsert the delay line in the heap. * Runs under scheduler lock. */ static void transmit_event(struct mq *q, struct delay_line *dline, uint64_t now) { struct mbuf *m; struct dn_pkt_tag *pkt = NULL; dline->oid.subtype = 0; /* not in heap */ while ((m = dline->mq.head) != NULL) { pkt = dn_tag_get(m); if (!DN_KEY_LEQ(pkt->output_time, now)) break; dline->mq.head = m->m_nextpkt; dline->mq.count--; mq_append(q, m); } if (m != NULL) { dline->oid.subtype = 1; /* in heap */ - heap_insert(&dn_cfg.evheap, pkt->output_time, dline); + heap_insert(&V_dn_cfg.evheap, pkt->output_time, dline); } } /* * Convert the additional MAC overheads/delays into an equivalent * number of bits for the given data rate. The samples are * in milliseconds so we need to divide by 1000. */ static uint64_t extra_bits(struct mbuf *m, struct dn_schk *s) { int index; uint64_t bits; struct dn_profile *pf = s->profile; if (!pf || pf->samples_no == 0) return 0; index = random() % pf->samples_no; bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000); if (index >= pf->loss_level) { struct dn_pkt_tag *dt = dn_tag_get(m); if (dt) dt->dn_dir = DIR_DROP; } return bits; } /* * Send traffic from a scheduler instance due by 'now'. * Return a pointer to the head of the queue. */ static struct mbuf * serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now) { struct mq def_q; struct dn_schk *s = si->sched; struct mbuf *m = NULL; int delay_line_idle = (si->dline.mq.head == NULL); int done, bw; if (q == NULL) { q = &def_q; q->head = NULL; } bw = s->link.bandwidth; si->kflags &= ~DN_ACTIVE; if (bw > 0) si->credit += (now - si->sched_time) * bw; else si->credit = 0; si->sched_time = now; done = 0; while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) { uint64_t len_scaled; done++; len_scaled = (bw == 0) ? 0 : hz * (m->m_pkthdr.len * 8 + extra_bits(m, s)); si->credit -= len_scaled; /* Move packet in the delay line */ - dn_tag_get(m)->output_time = dn_cfg.curr_time + s->link.delay ; + dn_tag_get(m)->output_time = V_dn_cfg.curr_time + s->link.delay ; mq_append(&si->dline.mq, m); } /* * If credit >= 0 the instance is idle, mark time. * Otherwise put back in the heap, and adjust the output * time of the last inserted packet, m, which was too early. */ if (si->credit >= 0) { si->idle_time = now; } else { uint64_t t; KASSERT (bw > 0, ("bw=0 and credit<0 ?")); t = div64(bw - 1 - si->credit, bw); if (m) dn_tag_get(m)->output_time += t; si->kflags |= DN_ACTIVE; - heap_insert(&dn_cfg.evheap, now + t, si); + heap_insert(&V_dn_cfg.evheap, now + t, si); } if (delay_line_idle && done) transmit_event(q, &si->dline, now); return q->head; } /* * The timer handler for dummynet. Time is computed in ticks, but * but the code is tolerant to the actual rate at which this is called. * Once complete, the function reschedules itself for the next tick. */ void dummynet_task(void *context, int pending) { struct timeval t; struct mq q = { NULL, NULL }; /* queue to accumulate results */ + struct epoch_tracker et; - CURVNET_SET((struct vnet *)context); + VNET_ITERATOR_DECL(vnet_iter); + VNET_LIST_RLOCK(); + NET_EPOCH_ENTER(et); - DN_BH_WLOCK(); + VNET_FOREACH(vnet_iter) { + memset(&q, 0, sizeof(struct mq)); + CURVNET_SET(vnet_iter); - /* Update number of lost(coalesced) ticks. */ - dn_cfg.tick_lost += pending - 1; + DN_BH_WLOCK(); - getmicrouptime(&t); - /* Last tick duration (usec). */ - dn_cfg.tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 + - (t.tv_usec - dn_cfg.prev_t.tv_usec); - /* Last tick vs standard tick difference (usec). */ - dn_cfg.tick_delta = (dn_cfg.tick_last * hz - 1000000) / hz; - /* Accumulated tick difference (usec). */ - dn_cfg.tick_delta_sum += dn_cfg.tick_delta; + /* Update number of lost(coalesced) ticks. */ + V_dn_cfg.tick_lost += pending - 1; - dn_cfg.prev_t = t; + getmicrouptime(&t); + /* Last tick duration (usec). */ + V_dn_cfg.tick_last = (t.tv_sec - V_dn_cfg.prev_t.tv_sec) * 1000000 + + (t.tv_usec - V_dn_cfg.prev_t.tv_usec); + /* Last tick vs standard tick difference (usec). */ + V_dn_cfg.tick_delta = (V_dn_cfg.tick_last * hz - 1000000) / hz; + /* Accumulated tick difference (usec). */ + V_dn_cfg.tick_delta_sum += V_dn_cfg.tick_delta; - /* - * Adjust curr_time if the accumulated tick difference is - * greater than the 'standard' tick. Since curr_time should - * be monotonically increasing, we do positive adjustments - * as required, and throttle curr_time in case of negative - * adjustment. - */ - dn_cfg.curr_time++; - if (dn_cfg.tick_delta_sum - tick >= 0) { - int diff = dn_cfg.tick_delta_sum / tick; - - dn_cfg.curr_time += diff; - dn_cfg.tick_diff += diff; - dn_cfg.tick_delta_sum %= tick; - dn_cfg.tick_adjustment++; - } else if (dn_cfg.tick_delta_sum + tick <= 0) { - dn_cfg.curr_time--; - dn_cfg.tick_diff--; - dn_cfg.tick_delta_sum += tick; - dn_cfg.tick_adjustment++; - } + V_dn_cfg.prev_t = t; - /* serve pending events, accumulate in q */ - for (;;) { - struct dn_id *p; /* generic parameter to handler */ + /* + * Adjust curr_time if the accumulated tick difference is + * greater than the 'standard' tick. Since curr_time should + * be monotonically increasing, we do positive adjustments + * as required, and throttle curr_time in case of negative + * adjustment. + */ + V_dn_cfg.curr_time++; + if (V_dn_cfg.tick_delta_sum - tick >= 0) { + int diff = V_dn_cfg.tick_delta_sum / tick; + + V_dn_cfg.curr_time += diff; + V_dn_cfg.tick_diff += diff; + V_dn_cfg.tick_delta_sum %= tick; + V_dn_cfg.tick_adjustment++; + } else if (V_dn_cfg.tick_delta_sum + tick <= 0) { + V_dn_cfg.curr_time--; + V_dn_cfg.tick_diff--; + V_dn_cfg.tick_delta_sum += tick; + V_dn_cfg.tick_adjustment++; + } - if (dn_cfg.evheap.elements == 0 || - DN_KEY_LT(dn_cfg.curr_time, HEAP_TOP(&dn_cfg.evheap)->key)) - break; - p = HEAP_TOP(&dn_cfg.evheap)->object; - heap_extract(&dn_cfg.evheap, NULL); + /* serve pending events, accumulate in q */ + for (;;) { + struct dn_id *p; /* generic parameter to handler */ - if (p->type == DN_SCH_I) { - serve_sched(&q, (struct dn_sch_inst *)p, dn_cfg.curr_time); - } else { /* extracted a delay line */ - transmit_event(&q, (struct delay_line *)p, dn_cfg.curr_time); + if (V_dn_cfg.evheap.elements == 0 || + DN_KEY_LT(V_dn_cfg.curr_time, HEAP_TOP(&V_dn_cfg.evheap)->key)) + break; + p = HEAP_TOP(&V_dn_cfg.evheap)->object; + heap_extract(&V_dn_cfg.evheap, NULL); + if (p->type == DN_SCH_I) { + serve_sched(&q, (struct dn_sch_inst *)p, V_dn_cfg.curr_time); + } else { /* extracted a delay line */ + transmit_event(&q, (struct delay_line *)p, V_dn_cfg.curr_time); + } } + if (V_dn_cfg.expire && ++V_dn_cfg.expire_cycle >= V_dn_cfg.expire) { + V_dn_cfg.expire_cycle = 0; + dn_drain_scheduler(); + dn_drain_queue(); + } + DN_BH_WUNLOCK(); + if (q.head != NULL) + dummynet_send(q.head); + + CURVNET_RESTORE(); } - if (dn_cfg.expire && ++dn_cfg.expire_cycle >= dn_cfg.expire) { - dn_cfg.expire_cycle = 0; - dn_drain_scheduler(); - dn_drain_queue(); - } + NET_EPOCH_EXIT(et); + VNET_LIST_RUNLOCK(); + /* Schedule our next run. */ dn_reschedule(); - DN_BH_WUNLOCK(); - if (q.head != NULL) - dummynet_send(q.head); - CURVNET_RESTORE(); } /* * forward a chain of packets to the proper destination. * This runs outside the dummynet lock. */ static void dummynet_send(struct mbuf *m) { struct mbuf *n; NET_EPOCH_ASSERT(); for (; m != NULL; m = n) { struct ifnet *ifp = NULL; /* gcc 3.4.6 complains */ struct m_tag *tag; int dst; n = m->m_nextpkt; m->m_nextpkt = NULL; tag = m_tag_first(m); if (tag == NULL) { /* should not happen */ dst = DIR_DROP; } else { struct dn_pkt_tag *pkt = dn_tag_get(m); /* extract the dummynet info, rename the tag * to carry reinject info. */ if (pkt->dn_dir == (DIR_OUT | PROTO_LAYER2) && pkt->ifp == NULL) { dst = DIR_DROP; } else { dst = pkt->dn_dir; ifp = pkt->ifp; tag->m_tag_cookie = MTAG_IPFW_RULE; tag->m_tag_id = 0; } } switch (dst) { case DIR_OUT: ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); break ; case DIR_IN : netisr_dispatch(NETISR_IP, m); break; #ifdef INET6 case DIR_IN | PROTO_IPV6: netisr_dispatch(NETISR_IPV6, m); break; case DIR_OUT | PROTO_IPV6: ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL); break; #endif case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */ if (bridge_dn_p != NULL) ((*bridge_dn_p)(m, ifp)); else printf("dummynet: if_bridge not loaded\n"); break; case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */ /* * The Ethernet code assumes the Ethernet header is * contiguous in the first mbuf header. * Insure this is true. */ if (m->m_len < ETHER_HDR_LEN && (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) { printf("dummynet/ether: pullup failed, " "dropping packet\n"); break; } ether_demux(m->m_pkthdr.rcvif, m); break; case DIR_OUT | PROTO_LAYER2: /* DN_TO_ETH_OUT: */ ether_output_frame(ifp, m); break; case DIR_DROP: /* drop the packet after some time */ FREE_PKT(m); break; default: printf("dummynet: bad switch %d!\n", dst); FREE_PKT(m); break; } } } static inline int tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa) { struct dn_pkt_tag *dt; struct m_tag *mtag; mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*dt), M_NOWAIT | M_ZERO); if (mtag == NULL) return 1; /* Cannot allocate packet header. */ m_tag_prepend(m, mtag); /* Attach to mbuf chain. */ dt = (struct dn_pkt_tag *)(mtag + 1); dt->rule = fwa->rule; dt->rule.info &= IPFW_ONEPASS; /* only keep this info */ dt->dn_dir = dir; dt->ifp = fwa->flags & IPFW_ARGS_OUT ? fwa->ifp : NULL; /* dt->output tame is updated as we move through */ - dt->output_time = dn_cfg.curr_time; + dt->output_time = V_dn_cfg.curr_time; dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0; return 0; } /* * dummynet hook for packets. * We use the argument to locate the flowset fs and the sched_set sch * associated to it. The we apply flow_mask and sched_mask to * determine the queue and scheduler instances. */ int dummynet_io(struct mbuf **m0, struct ip_fw_args *fwa) { struct mbuf *m = *m0; struct dn_fsk *fs = NULL; struct dn_sch_inst *si; struct dn_queue *q = NULL; /* default */ int fs_id, dir; fs_id = (fwa->rule.info & IPFW_INFO_MASK) + ((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0); /* XXXGL: convert args to dir */ if (fwa->flags & IPFW_ARGS_IN) dir = DIR_IN; else dir = DIR_OUT; if (fwa->flags & IPFW_ARGS_ETHER) dir |= PROTO_LAYER2; else if (fwa->flags & IPFW_ARGS_IP6) dir |= PROTO_IPV6; DN_BH_WLOCK(); - dn_cfg.io_pkt++; + V_dn_cfg.io_pkt++; /* we could actually tag outside the lock, but who cares... */ if (tag_mbuf(m, dir, fwa)) goto dropit; /* XXX locate_flowset could be optimised with a direct ref. */ - fs = dn_ht_find(dn_cfg.fshash, fs_id, 0, NULL); + fs = dn_ht_find(V_dn_cfg.fshash, fs_id, 0, NULL); if (fs == NULL) goto dropit; /* This queue/pipe does not exist! */ if (fs->sched == NULL) /* should not happen */ goto dropit; /* find scheduler instance, possibly applying sched_mask */ si = ipdn_si_find(fs->sched, &(fwa->f_id)); if (si == NULL) goto dropit; /* * If the scheduler supports multiple queues, find the right one * (otherwise it will be ignored by enqueue). */ if (fs->sched->fp->flags & DN_MULTIQUEUE) { q = ipdn_q_find(fs, si, &(fwa->f_id)); if (q == NULL) goto dropit; } if (fs->sched->fp->enqueue(si, q, m)) { /* packet was dropped by enqueue() */ m = *m0 = NULL; /* dn_enqueue already increases io_pkt_drop */ - dn_cfg.io_pkt_drop--; + V_dn_cfg.io_pkt_drop--; goto dropit; } if (si->kflags & DN_ACTIVE) { m = *m0 = NULL; /* consumed */ goto done; /* already active, nothing to do */ } /* compute the initial allowance */ - if (si->idle_time < dn_cfg.curr_time) { + if (si->idle_time < V_dn_cfg.curr_time) { /* Do this only on the first packet on an idle pipe */ struct dn_link *p = &fs->sched->link; - si->sched_time = dn_cfg.curr_time; - si->credit = dn_cfg.io_fast ? p->bandwidth : 0; + si->sched_time = V_dn_cfg.curr_time; + si->credit = V_dn_cfg.io_fast ? p->bandwidth : 0; if (p->burst) { - uint64_t burst = (dn_cfg.curr_time - si->idle_time) * p->bandwidth; + uint64_t burst = (V_dn_cfg.curr_time - si->idle_time) * p->bandwidth; if (burst > p->burst) burst = p->burst; si->credit += burst; } } /* pass through scheduler and delay line */ - m = serve_sched(NULL, si, dn_cfg.curr_time); + m = serve_sched(NULL, si, V_dn_cfg.curr_time); /* optimization -- pass it back to ipfw for immediate send */ /* XXX Don't call dummynet_send() if scheduler return the packet * just enqueued. This avoid a lock order reversal. * */ - if (/*dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) { + if (/*V_dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) { /* fast io, rename the tag * to carry reinject info. */ struct m_tag *tag = m_tag_first(m); tag->m_tag_cookie = MTAG_IPFW_RULE; tag->m_tag_id = 0; - dn_cfg.io_pkt_fast++; + V_dn_cfg.io_pkt_fast++; if (m->m_nextpkt != NULL) { printf("dummynet: fast io: pkt chain detected!\n"); m->m_nextpkt = NULL; } m = NULL; } else { *m0 = NULL; } done: DN_BH_WUNLOCK(); if (m) dummynet_send(m); return 0; dropit: - dn_cfg.io_pkt_drop++; + V_dn_cfg.io_pkt_drop++; DN_BH_WUNLOCK(); if (m) FREE_PKT(m); *m0 = NULL; return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS; } diff --git a/sys/netpfil/ipfw/ip_dn_private.h b/sys/netpfil/ipfw/ip_dn_private.h index e6e699bf35b2..8dedd071bd81 100644 --- a/sys/netpfil/ipfw/ip_dn_private.h +++ b/sys/netpfil/ipfw/ip_dn_private.h @@ -1,503 +1,499 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * internal dummynet APIs. * * $FreeBSD$ */ #ifndef _IP_DN_PRIVATE_H #define _IP_DN_PRIVATE_H /* debugging support * use ND() to remove debugging, D() to print a line, * DX(level, ...) to print above a certain level * If you redefine D() you are expected to redefine all. */ #ifndef D #define ND(fmt, ...) do {} while (0) #define D1(fmt, ...) do {} while (0) #define D(fmt, ...) printf("%-10s " fmt "\n", \ __FUNCTION__, ## __VA_ARGS__) #define DX(lev, fmt, ...) do { \ - if (dn_cfg.debug > lev) D(fmt, ## __VA_ARGS__); } while (0) + if (V_dn_cfg.debug > lev) D(fmt, ## __VA_ARGS__); } while (0) #endif MALLOC_DECLARE(M_DUMMYNET); #ifndef __linux__ #define div64(a, b) ((int64_t)(a) / (int64_t)(b)) #endif #define DN_LOCK_INIT() do { \ - mtx_init(&dn_cfg.uh_mtx, "dn_uh", NULL, MTX_DEF); \ - mtx_init(&dn_cfg.bh_mtx, "dn_bh", NULL, MTX_DEF); \ + mtx_init(&V_dn_cfg.uh_mtx, "dn_uh", NULL, MTX_DEF); \ + mtx_init(&V_dn_cfg.bh_mtx, "dn_bh", NULL, MTX_DEF); \ } while (0) #define DN_LOCK_DESTROY() do { \ - mtx_destroy(&dn_cfg.uh_mtx); \ - mtx_destroy(&dn_cfg.bh_mtx); \ + mtx_destroy(&V_dn_cfg.uh_mtx); \ + mtx_destroy(&V_dn_cfg.bh_mtx); \ } while (0) #if 0 /* not used yet */ -#define DN_UH_RLOCK() mtx_lock(&dn_cfg.uh_mtx) -#define DN_UH_RUNLOCK() mtx_unlock(&dn_cfg.uh_mtx) -#define DN_UH_WLOCK() mtx_lock(&dn_cfg.uh_mtx) -#define DN_UH_WUNLOCK() mtx_unlock(&dn_cfg.uh_mtx) -#define DN_UH_LOCK_ASSERT() mtx_assert(&dn_cfg.uh_mtx, MA_OWNED) +#define DN_UH_RLOCK() mtx_lock(&V_dn_cfg.uh_mtx) +#define DN_UH_RUNLOCK() mtx_unlock(&V_dn_cfg.uh_mtx) +#define DN_UH_WLOCK() mtx_lock(&V_dn_cfg.uh_mtx) +#define DN_UH_WUNLOCK() mtx_unlock(&V_dn_cfg.uh_mtx) +#define DN_UH_LOCK_ASSERT() mtx_assert(&V_dn_cfg.uh_mtx, MA_OWNED) #endif -#define DN_BH_RLOCK() mtx_lock(&dn_cfg.uh_mtx) -#define DN_BH_RUNLOCK() mtx_unlock(&dn_cfg.uh_mtx) -#define DN_BH_WLOCK() mtx_lock(&dn_cfg.uh_mtx) -#define DN_BH_WUNLOCK() mtx_unlock(&dn_cfg.uh_mtx) -#define DN_BH_LOCK_ASSERT() mtx_assert(&dn_cfg.uh_mtx, MA_OWNED) +#define DN_BH_RLOCK() mtx_lock(&V_dn_cfg.uh_mtx) +#define DN_BH_RUNLOCK() mtx_unlock(&V_dn_cfg.uh_mtx) +#define DN_BH_WLOCK() mtx_lock(&V_dn_cfg.uh_mtx) +#define DN_BH_WUNLOCK() mtx_unlock(&V_dn_cfg.uh_mtx) +#define DN_BH_LOCK_ASSERT() mtx_assert(&V_dn_cfg.uh_mtx, MA_OWNED) SLIST_HEAD(dn_schk_head, dn_schk); SLIST_HEAD(dn_sch_inst_head, dn_sch_inst); SLIST_HEAD(dn_fsk_head, dn_fsk); SLIST_HEAD(dn_queue_head, dn_queue); SLIST_HEAD(dn_alg_head, dn_alg); #ifdef NEW_AQM SLIST_HEAD(dn_aqm_head, dn_aqm); /* for new AQMs */ #endif struct mq { /* a basic queue of packets*/ struct mbuf *head, *tail; int count; }; static inline void set_oid(struct dn_id *o, int type, int len) { o->type = type; o->len = len; o->subtype = 0; } /* - * configuration and global data for a dummynet instance + * configuration and data for a dummynet instance * * When a configuration is modified from userland, 'id' is incremented * so we can use the value to check for stale pointers. */ struct dn_parms { uint32_t id; /* configuration version */ /* defaults (sysctl-accessible) */ int red_lookup_depth; int red_avg_pkt_size; int red_max_pkt_size; int hash_size; int max_hash_size; long byte_limit; /* max queue sizes */ long slot_limit; int io_fast; int debug; /* timekeeping */ struct timeval prev_t; /* last time dummynet_tick ran */ struct dn_heap evheap; /* scheduled events */ long tick_last; /* Last tick duration (usec). */ long tick_delta; /* Last vs standard tick diff (usec). */ long tick_delta_sum; /* Accumulated tick difference (usec).*/ long tick_adjustment; /* Tick adjustments done. */ long tick_lost; /* Lost(coalesced) ticks number. */ /* Adjusted vs non-adjusted curr_time difference (ticks). */ long tick_diff; /* counters of objects -- used for reporting space */ int schk_count; int si_count; int fsk_count; int queue_count; /* packet counters */ unsigned long io_pkt; unsigned long io_pkt_fast; unsigned long io_pkt_drop; /* ticks and other stuff */ uint64_t curr_time; /* flowsets and schedulers are in hash tables, with 'hash_size' * buckets. fshash is looked up at every packet arrival * so better be generous if we expect many entries. */ struct dn_ht *fshash; struct dn_ht *schedhash; /* list of flowsets without a scheduler -- use sch_chain */ struct dn_fsk_head fsu; /* list of unlinked flowsets */ - struct dn_alg_head schedlist; /* list of algorithms */ -#ifdef NEW_AQM - struct dn_aqm_head aqmlist; /* list of AQMs */ -#endif /* Store the fs/sch to scan when draining. The value is the * bucket number of the hash table. Expire can be disabled * with net.inet.ip.dummynet.expire=0, or it happens every * expire ticks. **/ int drain_fs; int drain_sch; uint32_t expire; uint32_t expire_cycle; /* tick count */ int init_done; #ifdef _KERNEL /* * This file is normally used in the kernel, unless we do * some userland tests, in which case we do not need a mtx. * uh_mtx arbitrates between system calls and also * protects fshash, schedhash and fsunlinked. * These structures are readonly for the lower half. * bh_mtx protects all other structures which may be * modified upon packet arrivals */ #if defined( __linux__ ) || defined( _WIN32 ) spinlock_t uh_mtx; spinlock_t bh_mtx; #else struct mtx uh_mtx; struct mtx bh_mtx; #endif #endif /* _KERNEL */ }; /* * Delay line, contains all packets on output from a link. * Every scheduler instance has one. */ struct delay_line { struct dn_id oid; struct dn_sch_inst *si; struct mq mq; }; /* * The kernel side of a flowset. It is linked in a hash table * of flowsets, and in a list of children of their parent scheduler. * qht is either the queue or (if HAVE_MASK) a hash table queues. * Note that the mask to use is the (flow_mask|sched_mask), which * changes as we attach/detach schedulers. So we store it here. * * XXX If we want to add scheduler-specific parameters, we need to * put them in external storage because the scheduler may not be * available when the fsk is created. */ struct dn_fsk { /* kernel side of a flowset */ struct dn_fs fs; SLIST_ENTRY(dn_fsk) fsk_next; /* hash chain for fshash */ struct ipfw_flow_id fsk_mask; /* qht is a hash table of queues, or just a single queue * a bit in fs.flags tells us which one */ struct dn_ht *qht; struct dn_schk *sched; /* Sched we are linked to */ SLIST_ENTRY(dn_fsk) sch_chain; /* list of fsk attached to sched */ /* bucket index used by drain routine to drain queues for this * flowset */ int drain_bucket; /* Parameter realted to RED / GRED */ /* original values are in dn_fs*/ int w_q ; /* queue weight (scaled) */ int max_th ; /* maximum threshold for queue (scaled) */ int min_th ; /* minimum threshold for queue (scaled) */ int max_p ; /* maximum value for p_b (scaled) */ u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */ u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */ u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */ u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */ u_int * w_q_lookup ; /* lookup table for computing (1-w_q)^t */ u_int lookup_depth ; /* depth of lookup table */ int lookup_step ; /* granularity inside the lookup table */ int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */ int avg_pkt_size ; /* medium packet size */ int max_pkt_size ; /* max packet size */ #ifdef NEW_AQM struct dn_aqm *aqmfp; /* Pointer to AQM functions */ void *aqmcfg; /* configuration parameters for AQM */ #endif }; /* * A queue is created as a child of a flowset unless it belongs to * a !MULTIQUEUE scheduler. It is normally in a hash table in the * flowset. fs always points to the parent flowset. * si normally points to the sch_inst, unless the flowset has been * detached from the scheduler -- in this case si == NULL and we * should not enqueue. */ struct dn_queue { struct dn_flow ni; /* oid, flow_id, stats */ struct mq mq; /* packets queue */ struct dn_sch_inst *_si; /* owner scheduler instance */ SLIST_ENTRY(dn_queue) q_next; /* hash chain list for qht */ struct dn_fsk *fs; /* parent flowset. */ /* RED parameters */ int avg; /* average queue length est. (scaled) */ int count; /* arrivals since last RED drop */ int random; /* random value (scaled) */ uint64_t q_time; /* start of queue idle time */ #ifdef NEW_AQM void *aqm_status; /* per-queue status variables*/ #endif }; /* * The kernel side of a scheduler. Contains the userland config, * a link, pointer to extra config arguments from command line, * kernel flags, and a pointer to the scheduler methods. * It is stored in a hash table, and holds a list of all * flowsets and scheduler instances. * XXX sch must be at the beginning, see schk_hash(). */ struct dn_schk { struct dn_sch sch; struct dn_alg *fp; /* Pointer to scheduler functions */ struct dn_link link; /* The link, embedded */ struct dn_profile *profile; /* delay profile, if any */ struct dn_id *cfg; /* extra config arguments */ SLIST_ENTRY(dn_schk) schk_next; /* hash chain for schedhash */ struct dn_fsk_head fsk_list; /* all fsk linked to me */ struct dn_fsk *fs; /* Flowset for !MULTIQUEUE */ /* bucket index used by the drain routine to drain the scheduler * instance for this flowset. */ int drain_bucket; /* Hash table of all instances (through sch.sched_mask) * or single instance if no mask. Always valid. */ struct dn_ht *siht; }; /* * Scheduler instance. * Contains variables and all queues relative to a this instance. * This struct is created a runtime. */ struct dn_sch_inst { struct dn_flow ni; /* oid, flowid and stats */ SLIST_ENTRY(dn_sch_inst) si_next; /* hash chain for siht */ struct delay_line dline; struct dn_schk *sched; /* the template */ int kflags; /* DN_ACTIVE */ int64_t credit; /* bits I can transmit (more or less). */ uint64_t sched_time; /* time link was scheduled in ready_heap */ uint64_t idle_time; /* start of scheduler instance idle time */ /* q_count is the number of queues that this instance is using. * The counter is incremented or decremented when * a reference from the queue is created or deleted. * It is used to make sure that a scheduler instance can be safely * deleted by the drain routine. See notes below. */ int q_count; }; /* * NOTE about object drain. * The system will automatically (XXX check when) drain queues and * scheduler instances when they are idle. * A queue is idle when it has no packets; an instance is idle when * it is not in the evheap heap, and the corresponding delay line is empty. * A queue can be safely deleted when it is idle because of the scheduler * function xxx_free_queue() will remove any references to it. * An instance can be only deleted when no queues reference it. To be sure * of that, a counter (q_count) stores the number of queues that are pointing * to the instance. * * XXX * Order of scan: * - take all flowset in a bucket for the flowset hash table * - take all queues in a bucket for the flowset * - increment the queue bucket * - scan next flowset bucket * Nothing is done if a bucket contains no entries. * * The same schema is used for sceduler instances */ /* kernel-side flags. Linux has DN_DELETE in fcntl.h */ enum { /* 1 and 2 are reserved for the SCAN flags */ DN_DESTROY = 0x0004, /* destroy */ DN_DELETE_FS = 0x0008, /* destroy flowset */ DN_DETACH = 0x0010, DN_ACTIVE = 0x0020, /* object is in evheap */ DN_F_DLINE = 0x0040, /* object is a delay line */ DN_DEL_SAFE = 0x0080, /* delete a queue only if no longer needed * by scheduler */ DN_QHT_IS_Q = 0x0100, /* in flowset, qht is a single queue */ }; /* * Packets processed by dummynet have an mbuf tag associated with * them that carries their dummynet state. * Outside dummynet, only the 'rule' field is relevant, and it must * be at the beginning of the structure. */ struct dn_pkt_tag { struct ipfw_rule_ref rule; /* matching rule */ /* second part, dummynet specific */ int dn_dir; /* action when packet comes out.*/ /* see ip_fw_private.h */ uint64_t output_time; /* when the pkt is due for delivery*/ struct ifnet *ifp; /* interface, for ip_output */ struct _ip6dn_args ip6opt; /* XXX ipv6 options */ uint16_t iphdr_off; /* IP header offset for mtodo() */ }; /* * Possible values for dn_dir. XXXGL: this needs to be reviewed * and converted to same values ip_fw_args.flags use. */ enum { DIR_OUT = 0, DIR_IN = 1, DIR_FWD = 2, DIR_DROP = 3, PROTO_LAYER2 = 0x4, /* set for layer 2 */ PROTO_IPV4 = 0x08, PROTO_IPV6 = 0x10, PROTO_IFB = 0x0c, /* layer2 + ifbridge */ }; -extern struct dn_parms dn_cfg; -//VNET_DECLARE(struct dn_parms, _base_dn_cfg); -//#define dn_cfg VNET(_base_dn_cfg) +//extern struct dn_parms V_dn_cfg; +VNET_DECLARE(struct dn_parms, dn_cfg); +#define V_dn_cfg VNET(dn_cfg) int dummynet_io(struct mbuf **, struct ip_fw_args *); void dummynet_task(void *context, int pending); void dn_reschedule(void); struct dn_pkt_tag * dn_tag_get(struct mbuf *m); struct dn_queue *ipdn_q_find(struct dn_fsk *, struct dn_sch_inst *, struct ipfw_flow_id *); struct dn_sch_inst *ipdn_si_find(struct dn_schk *, struct ipfw_flow_id *); /* * copy_range is a template for requests for ranges of pipes/queues/scheds. * The number of ranges is variable and can be derived by o.len. * As a default, we use a small number of entries so that the struct * fits easily on the stack and is sufficient for most common requests. */ #define DEFAULT_RANGES 5 struct copy_range { struct dn_id o; uint32_t r[ 2 * DEFAULT_RANGES ]; }; struct copy_args { char **start; char *end; int flags; int type; struct copy_range *extra; /* extra filtering */ }; struct sockopt; int ip_dummynet_compat(struct sockopt *sopt); int dummynet_get(struct sockopt *sopt, void **compat); int dn_c_copy_q (void *_ni, void *arg); int dn_c_copy_pipe(struct dn_schk *s, struct copy_args *a, int nq); int dn_c_copy_fs(struct dn_fsk *f, struct copy_args *a, int nq); int dn_compat_copy_queue(struct copy_args *a, void *_o); int dn_compat_copy_pipe(struct copy_args *a, void *_o); int copy_data_helper_compat(void *_o, void *_arg); int dn_compat_calc_size(void); int do_config(void *p, int l); /* function to drain idle object */ void dn_drain_scheduler(void); void dn_drain_queue(void); #ifdef NEW_AQM int ecn_mark(struct mbuf* m); /* moved from ip_dn_io.c to here to be available for AQMs modules*/ static inline void mq_append(struct mq *q, struct mbuf *m) { #ifdef USERSPACE // buffers from netmap need to be copied // XXX note that the routine is not expected to fail ND("append %p to %p", m, q); if (m->m_flags & M_STACK) { struct mbuf *m_new; void *p; int l, ofs; ofs = m->m_data - m->__m_extbuf; // XXX allocate MGETHDR(m_new, M_NOWAIT, MT_DATA); ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p", m, m->__m_extbuf, m->__m_extlen, ofs, m_new); p = m_new->__m_extbuf; /* new pointer */ l = m_new->__m_extlen; /* new len */ if (l <= m->__m_extlen) { panic("extlen too large"); } *m_new = *m; // copy m_new->m_flags &= ~M_STACK; m_new->__m_extbuf = p; // point to new buffer _pkt_copy(m->__m_extbuf, p, m->__m_extlen); m_new->m_data = p + ofs; m = m_new; } #endif /* USERSPACE */ if (q->head == NULL) q->head = m; else q->tail->m_nextpkt = m; q->count++; q->tail = m; m->m_nextpkt = NULL; } #endif /* NEW_AQM */ #endif /* _IP_DN_PRIVATE_H */ diff --git a/sys/netpfil/ipfw/ip_dummynet.c b/sys/netpfil/ipfw/ip_dummynet.c index 17f3e364756e..3abc78fc1410 100644 --- a/sys/netpfil/ipfw/ip_dummynet.c +++ b/sys/netpfil/ipfw/ip_dummynet.c @@ -1,2794 +1,2813 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Codel/FQ_Codel and PIE/FQ-PIE Code: * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Copyright (c) 1998-2002,2010 Luigi Rizzo, Universita` di Pisa * Portions Copyright (c) 2000 Akamba Corp. * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Configuration and internal object management for dummynet. */ #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include #include /* ip_output(), IP_FORWARDING */ #include #include +#include #include #include #include #ifdef NEW_AQM #include #endif #include /* which objects to copy */ #define DN_C_LINK 0x01 #define DN_C_SCH 0x02 #define DN_C_FLOW 0x04 #define DN_C_FS 0x08 #define DN_C_QUEUE 0x10 /* we use this argument in case of a schk_new */ struct schk_new_arg { struct dn_alg *fp; struct dn_sch *sch; }; /*---- callout hooks. ----*/ static struct callout dn_timeout; +static int dn_tasks_started = 0; static int dn_gone; static struct task dn_task; static struct taskqueue *dn_tq = NULL; +/* global scheduler list */ +struct dn_alg_head schedlist; +#ifdef NEW_AQM +struct dn_aqm_head aqmlist; /* list of AQMs */ +#endif + static void dummynet(void *arg) { (void)arg; /* UNUSED */ taskqueue_enqueue(dn_tq, &dn_task); } void dn_reschedule(void) { if (dn_gone != 0) return; callout_reset_sbt(&dn_timeout, tick_sbt, 0, dummynet, NULL, C_HARDCLOCK | C_DIRECT_EXEC); } /*----- end of callout hooks -----*/ #ifdef NEW_AQM /* Return AQM descriptor for given type or name. */ static struct dn_aqm * find_aqm_type(int type, char *name) { struct dn_aqm *d; - SLIST_FOREACH(d, &dn_cfg.aqmlist, next) { + SLIST_FOREACH(d, &aqmlist, next) { if (d->type == type || (name && !strcasecmp(d->name, name))) return d; } return NULL; /* not found */ } #endif /* Return a scheduler descriptor given the type or name. */ static struct dn_alg * find_sched_type(int type, char *name) { struct dn_alg *d; - SLIST_FOREACH(d, &dn_cfg.schedlist, next) { + SLIST_FOREACH(d, &schedlist, next) { if (d->type == type || (name && !strcasecmp(d->name, name))) return d; } return NULL; /* not found */ } int ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg) { int oldv = *v; const char *op = NULL; if (dflt < lo) dflt = lo; if (dflt > hi) dflt = hi; if (oldv < lo) { *v = dflt; op = "Bump"; } else if (oldv > hi) { *v = hi; op = "Clamp"; } else return *v; if (op && msg) printf("%s %s to %d (was %d)\n", op, msg, *v, oldv); return *v; } /*---- flow_id mask, hash and compare functions ---*/ /* * The flow_id includes the 5-tuple, the queue/pipe number * which we store in the extra area in host order, * and for ipv6 also the flow_id6. * XXX see if we want the tos byte (can store in 'flags') */ static struct ipfw_flow_id * flow_id_mask(struct ipfw_flow_id *mask, struct ipfw_flow_id *id) { int is_v6 = IS_IP6_FLOW_ID(id); id->dst_port &= mask->dst_port; id->src_port &= mask->src_port; id->proto &= mask->proto; id->extra &= mask->extra; if (is_v6) { APPLY_MASK(&id->dst_ip6, &mask->dst_ip6); APPLY_MASK(&id->src_ip6, &mask->src_ip6); id->flow_id6 &= mask->flow_id6; } else { id->dst_ip &= mask->dst_ip; id->src_ip &= mask->src_ip; } return id; } /* computes an OR of two masks, result in dst and also returned */ static struct ipfw_flow_id * flow_id_or(struct ipfw_flow_id *src, struct ipfw_flow_id *dst) { int is_v6 = IS_IP6_FLOW_ID(dst); dst->dst_port |= src->dst_port; dst->src_port |= src->src_port; dst->proto |= src->proto; dst->extra |= src->extra; if (is_v6) { #define OR_MASK(_d, _s) \ (_d)->__u6_addr.__u6_addr32[0] |= (_s)->__u6_addr.__u6_addr32[0]; \ (_d)->__u6_addr.__u6_addr32[1] |= (_s)->__u6_addr.__u6_addr32[1]; \ (_d)->__u6_addr.__u6_addr32[2] |= (_s)->__u6_addr.__u6_addr32[2]; \ (_d)->__u6_addr.__u6_addr32[3] |= (_s)->__u6_addr.__u6_addr32[3]; OR_MASK(&dst->dst_ip6, &src->dst_ip6); OR_MASK(&dst->src_ip6, &src->src_ip6); #undef OR_MASK dst->flow_id6 |= src->flow_id6; } else { dst->dst_ip |= src->dst_ip; dst->src_ip |= src->src_ip; } return dst; } static int nonzero_mask(struct ipfw_flow_id *m) { if (m->dst_port || m->src_port || m->proto || m->extra) return 1; if (IS_IP6_FLOW_ID(m)) { return m->dst_ip6.__u6_addr.__u6_addr32[0] || m->dst_ip6.__u6_addr.__u6_addr32[1] || m->dst_ip6.__u6_addr.__u6_addr32[2] || m->dst_ip6.__u6_addr.__u6_addr32[3] || m->src_ip6.__u6_addr.__u6_addr32[0] || m->src_ip6.__u6_addr.__u6_addr32[1] || m->src_ip6.__u6_addr.__u6_addr32[2] || m->src_ip6.__u6_addr.__u6_addr32[3] || m->flow_id6; } else { return m->dst_ip || m->src_ip; } } /* XXX we may want a better hash function */ static uint32_t flow_id_hash(struct ipfw_flow_id *id) { uint32_t i; if (IS_IP6_FLOW_ID(id)) { uint32_t *d = (uint32_t *)&id->dst_ip6; uint32_t *s = (uint32_t *)&id->src_ip6; i = (d[0] ) ^ (d[1]) ^ (d[2] ) ^ (d[3]) ^ (d[0] >> 15) ^ (d[1] >> 15) ^ (d[2] >> 15) ^ (d[3] >> 15) ^ (s[0] << 1) ^ (s[1] << 1) ^ (s[2] << 1) ^ (s[3] << 1) ^ (s[0] << 16) ^ (s[1] << 16) ^ (s[2] << 16) ^ (s[3] << 16) ^ (id->dst_port << 1) ^ (id->src_port) ^ (id->extra) ^ (id->proto ) ^ (id->flow_id6); } else { i = (id->dst_ip) ^ (id->dst_ip >> 15) ^ (id->src_ip << 1) ^ (id->src_ip >> 16) ^ (id->extra) ^ (id->dst_port << 1) ^ (id->src_port) ^ (id->proto); } return i; } /* Like bcmp, returns 0 if ids match, 1 otherwise. */ static int flow_id_cmp(struct ipfw_flow_id *id1, struct ipfw_flow_id *id2) { int is_v6 = IS_IP6_FLOW_ID(id1); if (!is_v6) { if (IS_IP6_FLOW_ID(id2)) return 1; /* different address families */ return (id1->dst_ip == id2->dst_ip && id1->src_ip == id2->src_ip && id1->dst_port == id2->dst_port && id1->src_port == id2->src_port && id1->proto == id2->proto && id1->extra == id2->extra) ? 0 : 1; } /* the ipv6 case */ return ( !bcmp(&id1->dst_ip6,&id2->dst_ip6, sizeof(id1->dst_ip6)) && !bcmp(&id1->src_ip6,&id2->src_ip6, sizeof(id1->src_ip6)) && id1->dst_port == id2->dst_port && id1->src_port == id2->src_port && id1->proto == id2->proto && id1->extra == id2->extra && id1->flow_id6 == id2->flow_id6) ? 0 : 1; } /*--------- end of flow-id mask, hash and compare ---------*/ /*--- support functions for the qht hashtable ---- * Entries are hashed by flow-id */ static uint32_t q_hash(uintptr_t key, int flags, void *arg) { /* compute the hash slot from the flow id */ struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ? &((struct dn_queue *)key)->ni.fid : (struct ipfw_flow_id *)key; return flow_id_hash(id); } static int q_match(void *obj, uintptr_t key, int flags, void *arg) { struct dn_queue *o = (struct dn_queue *)obj; struct ipfw_flow_id *id2; if (flags & DNHT_KEY_IS_OBJ) { /* compare pointers */ id2 = &((struct dn_queue *)key)->ni.fid; } else { id2 = (struct ipfw_flow_id *)key; } return (0 == flow_id_cmp(&o->ni.fid, id2)); } /* * create a new queue instance for the given 'key'. */ static void * q_new(uintptr_t key, int flags, void *arg) { struct dn_queue *q, *template = arg; struct dn_fsk *fs = template->fs; int size = sizeof(*q) + fs->sched->fp->q_datalen; q = malloc(size, M_DUMMYNET, M_NOWAIT | M_ZERO); if (q == NULL) { D("no memory for new queue"); return NULL; } set_oid(&q->ni.oid, DN_QUEUE, size); if (fs->fs.flags & DN_QHT_HASH) q->ni.fid = *(struct ipfw_flow_id *)key; q->fs = fs; q->_si = template->_si; q->_si->q_count++; if (fs->sched->fp->new_queue) fs->sched->fp->new_queue(q); #ifdef NEW_AQM /* call AQM init function after creating a queue*/ if (fs->aqmfp && fs->aqmfp->init) if(fs->aqmfp->init(q)) D("unable to init AQM for fs %d", fs->fs.fs_nr); #endif - dn_cfg.queue_count++; + V_dn_cfg.queue_count++; return q; } /* * Notify schedulers that a queue is going away. * If (flags & DN_DESTROY), also free the packets. * The version for callbacks is called q_delete_cb(). */ static void dn_delete_queue(struct dn_queue *q, int flags) { struct dn_fsk *fs = q->fs; #ifdef NEW_AQM /* clean up AQM status for queue 'q' * cleanup here is called just with MULTIQUEUE */ if (fs && fs->aqmfp && fs->aqmfp->cleanup) fs->aqmfp->cleanup(q); #endif // D("fs %p si %p\n", fs, q->_si); /* notify the parent scheduler that the queue is going away */ if (fs && fs->sched->fp->free_queue) fs->sched->fp->free_queue(q); q->_si->q_count--; q->_si = NULL; if (flags & DN_DESTROY) { if (q->mq.head) dn_free_pkts(q->mq.head); bzero(q, sizeof(*q)); // safety free(q, M_DUMMYNET); - dn_cfg.queue_count--; + V_dn_cfg.queue_count--; } } static int q_delete_cb(void *q, void *arg) { int flags = (int)(uintptr_t)arg; dn_delete_queue(q, flags); return (flags & DN_DESTROY) ? DNHT_SCAN_DEL : 0; } /* * calls dn_delete_queue/q_delete_cb on all queues, * which notifies the parent scheduler and possibly drains packets. * flags & DN_DESTROY: drains queues and destroy qht; */ static void qht_delete(struct dn_fsk *fs, int flags) { ND("fs %d start flags %d qht %p", fs->fs.fs_nr, flags, fs->qht); if (!fs->qht) return; if (fs->fs.flags & DN_QHT_HASH) { dn_ht_scan(fs->qht, q_delete_cb, (void *)(uintptr_t)flags); if (flags & DN_DESTROY) { dn_ht_free(fs->qht, 0); fs->qht = NULL; } } else { dn_delete_queue((struct dn_queue *)(fs->qht), flags); if (flags & DN_DESTROY) fs->qht = NULL; } } /* * Find and possibly create the queue for a MULTIQUEUE scheduler. * We never call it for !MULTIQUEUE (the queue is in the sch_inst). */ struct dn_queue * ipdn_q_find(struct dn_fsk *fs, struct dn_sch_inst *si, struct ipfw_flow_id *id) { struct dn_queue template; template._si = si; template.fs = fs; if (fs->fs.flags & DN_QHT_HASH) { struct ipfw_flow_id masked_id; if (fs->qht == NULL) { fs->qht = dn_ht_init(NULL, fs->fs.buckets, offsetof(struct dn_queue, q_next), q_hash, q_match, q_new); if (fs->qht == NULL) return NULL; } masked_id = *id; flow_id_mask(&fs->fsk_mask, &masked_id); return dn_ht_find(fs->qht, (uintptr_t)&masked_id, DNHT_INSERT, &template); } else { if (fs->qht == NULL) fs->qht = q_new(0, 0, &template); return (struct dn_queue *)fs->qht; } } /*--- end of queue hash table ---*/ /*--- support functions for the sch_inst hashtable ---- * * These are hashed by flow-id */ static uint32_t si_hash(uintptr_t key, int flags, void *arg) { /* compute the hash slot from the flow id */ struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ? &((struct dn_sch_inst *)key)->ni.fid : (struct ipfw_flow_id *)key; return flow_id_hash(id); } static int si_match(void *obj, uintptr_t key, int flags, void *arg) { struct dn_sch_inst *o = obj; struct ipfw_flow_id *id2; id2 = (flags & DNHT_KEY_IS_OBJ) ? &((struct dn_sch_inst *)key)->ni.fid : (struct ipfw_flow_id *)key; return flow_id_cmp(&o->ni.fid, id2) == 0; } /* * create a new instance for the given 'key' * Allocate memory for instance, delay line and scheduler private data. */ static void * si_new(uintptr_t key, int flags, void *arg) { struct dn_schk *s = arg; struct dn_sch_inst *si; int l = sizeof(*si) + s->fp->si_datalen; si = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO); if (si == NULL) goto error; /* Set length only for the part passed up to userland. */ set_oid(&si->ni.oid, DN_SCH_I, sizeof(struct dn_flow)); set_oid(&(si->dline.oid), DN_DELAY_LINE, sizeof(struct delay_line)); /* mark si and dline as outside the event queue */ si->ni.oid.id = si->dline.oid.id = -1; si->sched = s; si->dline.si = si; if (s->fp->new_sched && s->fp->new_sched(si)) { D("new_sched error"); goto error; } if (s->sch.flags & DN_HAVE_MASK) si->ni.fid = *(struct ipfw_flow_id *)key; #ifdef NEW_AQM /* init AQM status for !DN_MULTIQUEUE sched*/ if (!(s->fp->flags & DN_MULTIQUEUE)) if (s->fs->aqmfp && s->fs->aqmfp->init) if(s->fs->aqmfp->init((struct dn_queue *)(si + 1))) { D("unable to init AQM for fs %d", s->fs->fs.fs_nr); goto error; } #endif - dn_cfg.si_count++; + V_dn_cfg.si_count++; return si; error: if (si) { bzero(si, sizeof(*si)); // safety free(si, M_DUMMYNET); } return NULL; } /* * Callback from siht to delete all scheduler instances. Remove * si and delay line from the system heap, destroy all queues. * We assume that all flowset have been notified and do not * point to us anymore. */ static int si_destroy(void *_si, void *arg) { struct dn_sch_inst *si = _si; struct dn_schk *s = si->sched; struct delay_line *dl = &si->dline; if (dl->oid.subtype) /* remove delay line from event heap */ - heap_extract(&dn_cfg.evheap, dl); + heap_extract(&V_dn_cfg.evheap, dl); dn_free_pkts(dl->mq.head); /* drain delay line */ if (si->kflags & DN_ACTIVE) /* remove si from event heap */ - heap_extract(&dn_cfg.evheap, si); + heap_extract(&V_dn_cfg.evheap, si); #ifdef NEW_AQM /* clean up AQM status for !DN_MULTIQUEUE sched * Note that all queues belong to fs were cleaned up in fsk_detach. * When drain_scheduler is called s->fs and q->fs are pointing * to a correct fs, so we can use fs in this case. */ if (!(s->fp->flags & DN_MULTIQUEUE)) { struct dn_queue *q = (struct dn_queue *)(si + 1); if (q->aqm_status && q->fs->aqmfp) if (q->fs->aqmfp->cleanup) q->fs->aqmfp->cleanup(q); } #endif if (s->fp->free_sched) s->fp->free_sched(si); bzero(si, sizeof(*si)); /* safety */ free(si, M_DUMMYNET); - dn_cfg.si_count--; + V_dn_cfg.si_count--; return DNHT_SCAN_DEL; } /* * Find the scheduler instance for this packet. If we need to apply * a mask, do on a local copy of the flow_id to preserve the original. * Assume siht is always initialized if we have a mask. */ struct dn_sch_inst * ipdn_si_find(struct dn_schk *s, struct ipfw_flow_id *id) { if (s->sch.flags & DN_HAVE_MASK) { struct ipfw_flow_id id_t = *id; flow_id_mask(&s->sch.sched_mask, &id_t); return dn_ht_find(s->siht, (uintptr_t)&id_t, DNHT_INSERT, s); } if (!s->siht) s->siht = si_new(0, 0, s); return (struct dn_sch_inst *)s->siht; } /* callback to flush credit for the scheduler instance */ static int si_reset_credit(void *_si, void *arg) { struct dn_sch_inst *si = _si; struct dn_link *p = &si->sched->link; - si->credit = p->burst + (dn_cfg.io_fast ? p->bandwidth : 0); + si->credit = p->burst + (V_dn_cfg.io_fast ? p->bandwidth : 0); return 0; } static void schk_reset_credit(struct dn_schk *s) { if (s->sch.flags & DN_HAVE_MASK) dn_ht_scan(s->siht, si_reset_credit, NULL); else if (s->siht) si_reset_credit(s->siht, NULL); } /*---- end of sch_inst hashtable ---------------------*/ /*------------------------------------------------------- * flowset hash (fshash) support. Entries are hashed by fs_nr. * New allocations are put in the fsunlinked list, from which * they are removed when they point to a specific scheduler. */ static uint32_t fsk_hash(uintptr_t key, int flags, void *arg) { uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key : ((struct dn_fsk *)key)->fs.fs_nr; return ( (i>>8)^(i>>4)^i ); } static int fsk_match(void *obj, uintptr_t key, int flags, void *arg) { struct dn_fsk *fs = obj; int i = !(flags & DNHT_KEY_IS_OBJ) ? key : ((struct dn_fsk *)key)->fs.fs_nr; return (fs->fs.fs_nr == i); } static void * fsk_new(uintptr_t key, int flags, void *arg) { struct dn_fsk *fs; fs = malloc(sizeof(*fs), M_DUMMYNET, M_NOWAIT | M_ZERO); if (fs) { set_oid(&fs->fs.oid, DN_FS, sizeof(fs->fs)); - dn_cfg.fsk_count++; + V_dn_cfg.fsk_count++; fs->drain_bucket = 0; - SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain); + SLIST_INSERT_HEAD(&V_dn_cfg.fsu, fs, sch_chain); } return fs; } #ifdef NEW_AQM /* callback function for cleaning up AQM queue status belongs to a flowset * connected to scheduler instance '_si' (for !DN_MULTIQUEUE only). */ static int si_cleanup_q(void *_si, void *arg) { struct dn_sch_inst *si = _si; if (!(si->sched->fp->flags & DN_MULTIQUEUE)) { if (si->sched->fs->aqmfp && si->sched->fs->aqmfp->cleanup) si->sched->fs->aqmfp->cleanup((struct dn_queue *) (si+1)); } return 0; } /* callback to clean up queue AQM status.*/ static int q_cleanup_q(void *_q, void *arg) { struct dn_queue *q = _q; q->fs->aqmfp->cleanup(q); return 0; } /* Clean up all AQM queues status belongs to flowset 'fs' and then * deconfig AQM for flowset 'fs' */ static void aqm_cleanup_deconfig_fs(struct dn_fsk *fs) { struct dn_sch_inst *si; /* clean up AQM status for all queues for !DN_MULTIQUEUE sched*/ if (fs->fs.fs_nr > DN_MAX_ID) { if (fs->sched && !(fs->sched->fp->flags & DN_MULTIQUEUE)) { if (fs->sched->sch.flags & DN_HAVE_MASK) dn_ht_scan(fs->sched->siht, si_cleanup_q, NULL); else { /* single si i.e. no sched mask */ si = (struct dn_sch_inst *) fs->sched->siht; if (si && fs->aqmfp && fs->aqmfp->cleanup) fs->aqmfp->cleanup((struct dn_queue *) (si+1)); } } } /* clean up AQM status for all queues for DN_MULTIQUEUE sched*/ if (fs->sched && fs->sched->fp->flags & DN_MULTIQUEUE && fs->qht) { if (fs->fs.flags & DN_QHT_HASH) dn_ht_scan(fs->qht, q_cleanup_q, NULL); else fs->aqmfp->cleanup((struct dn_queue *)(fs->qht)); } /* deconfig AQM */ if(fs->aqmcfg && fs->aqmfp && fs->aqmfp->deconfig) fs->aqmfp->deconfig(fs); } #endif /* * detach flowset from its current scheduler. Flags as follows: * DN_DETACH removes from the fsk_list * DN_DESTROY deletes individual queues * DN_DELETE_FS destroys the flowset (otherwise goes in unlinked). */ static void fsk_detach(struct dn_fsk *fs, int flags) { if (flags & DN_DELETE_FS) flags |= DN_DESTROY; ND("fs %d from sched %d flags %s %s %s", fs->fs.fs_nr, fs->fs.sched_nr, (flags & DN_DELETE_FS) ? "DEL_FS":"", (flags & DN_DESTROY) ? "DEL":"", (flags & DN_DETACH) ? "DET":""); if (flags & DN_DETACH) { /* detach from the list */ struct dn_fsk_head *h; - h = fs->sched ? &fs->sched->fsk_list : &dn_cfg.fsu; + h = fs->sched ? &fs->sched->fsk_list : &V_dn_cfg.fsu; SLIST_REMOVE(h, fs, dn_fsk, sch_chain); } /* Free the RED parameters, they will be recomputed on * subsequent attach if needed. */ if (fs->w_q_lookup) free(fs->w_q_lookup, M_DUMMYNET); fs->w_q_lookup = NULL; qht_delete(fs, flags); #ifdef NEW_AQM aqm_cleanup_deconfig_fs(fs); #endif if (fs->sched && fs->sched->fp->free_fsk) fs->sched->fp->free_fsk(fs); fs->sched = NULL; if (flags & DN_DELETE_FS) { bzero(fs, sizeof(*fs)); /* safety */ free(fs, M_DUMMYNET); - dn_cfg.fsk_count--; + V_dn_cfg.fsk_count--; } else { - SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain); + SLIST_INSERT_HEAD(&V_dn_cfg.fsu, fs, sch_chain); } } /* * Detach or destroy all flowsets in a list. * flags specifies what to do: * DN_DESTROY: flush all queues * DN_DELETE_FS: DN_DESTROY + destroy flowset * DN_DELETE_FS implies DN_DESTROY */ static void fsk_detach_list(struct dn_fsk_head *h, int flags) { struct dn_fsk *fs; int n = 0; /* only for stats */ ND("head %p flags %x", h, flags); while ((fs = SLIST_FIRST(h))) { SLIST_REMOVE_HEAD(h, sch_chain); n++; fsk_detach(fs, flags); } ND("done %d flowsets", n); } /* * called on 'queue X delete' -- removes the flowset from fshash, * deletes all queues for the flowset, and removes the flowset. */ static int delete_fs(int i, int locked) { struct dn_fsk *fs; int err = 0; if (!locked) DN_BH_WLOCK(); - fs = dn_ht_find(dn_cfg.fshash, i, DNHT_REMOVE, NULL); + fs = dn_ht_find(V_dn_cfg.fshash, i, DNHT_REMOVE, NULL); ND("fs %d found %p", i, fs); if (fs) { fsk_detach(fs, DN_DETACH | DN_DELETE_FS); err = 0; } else err = EINVAL; if (!locked) DN_BH_WUNLOCK(); return err; } /*----- end of flowset hashtable support -------------*/ /*------------------------------------------------------------ * Scheduler hash. When searching by index we pass sched_nr, * otherwise we pass struct dn_sch * which is the first field in * struct dn_schk so we can cast between the two. We use this trick * because in the create phase (but it should be fixed). */ static uint32_t schk_hash(uintptr_t key, int flags, void *_arg) { uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key : ((struct dn_schk *)key)->sch.sched_nr; return ( (i>>8)^(i>>4)^i ); } static int schk_match(void *obj, uintptr_t key, int flags, void *_arg) { struct dn_schk *s = (struct dn_schk *)obj; int i = !(flags & DNHT_KEY_IS_OBJ) ? key : ((struct dn_schk *)key)->sch.sched_nr; return (s->sch.sched_nr == i); } /* * Create the entry and intialize with the sched hash if needed. * Leave s->fp unset so we can tell whether a dn_ht_find() returns * a new object or a previously existing one. */ static void * schk_new(uintptr_t key, int flags, void *arg) { struct schk_new_arg *a = arg; struct dn_schk *s; int l = sizeof(*s) +a->fp->schk_datalen; s = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO); if (s == NULL) return NULL; set_oid(&s->link.oid, DN_LINK, sizeof(s->link)); s->sch = *a->sch; // copy initial values s->link.link_nr = s->sch.sched_nr; SLIST_INIT(&s->fsk_list); /* initialize the hash table or create the single instance */ s->fp = a->fp; /* si_new needs this */ s->drain_bucket = 0; if (s->sch.flags & DN_HAVE_MASK) { s->siht = dn_ht_init(NULL, s->sch.buckets, offsetof(struct dn_sch_inst, si_next), si_hash, si_match, si_new); if (s->siht == NULL) { free(s, M_DUMMYNET); return NULL; } } s->fp = NULL; /* mark as a new scheduler */ - dn_cfg.schk_count++; + V_dn_cfg.schk_count++; return s; } /* * Callback for sched delete. Notify all attached flowsets to * detach from the scheduler, destroy the internal flowset, and * all instances. The scheduler goes away too. * arg is 0 (only detach flowsets and destroy instances) * DN_DESTROY (detach & delete queues, delete schk) * or DN_DELETE_FS (delete queues and flowsets, delete schk) */ static int schk_delete_cb(void *obj, void *arg) { struct dn_schk *s = obj; #if 0 int a = (int)arg; ND("sched %d arg %s%s", s->sch.sched_nr, a&DN_DESTROY ? "DEL ":"", a&DN_DELETE_FS ? "DEL_FS":""); #endif fsk_detach_list(&s->fsk_list, arg ? DN_DESTROY : 0); /* no more flowset pointing to us now */ if (s->sch.flags & DN_HAVE_MASK) { dn_ht_scan(s->siht, si_destroy, NULL); dn_ht_free(s->siht, 0); } else if (s->siht) si_destroy(s->siht, NULL); if (s->profile) { free(s->profile, M_DUMMYNET); s->profile = NULL; } s->siht = NULL; if (s->fp->destroy) s->fp->destroy(s); bzero(s, sizeof(*s)); // safety free(obj, M_DUMMYNET); - dn_cfg.schk_count--; + V_dn_cfg.schk_count--; return DNHT_SCAN_DEL; } /* * called on a 'sched X delete' command. Deletes a single scheduler. * This is done by removing from the schedhash, unlinking all * flowsets and deleting their traffic. */ static int delete_schk(int i) { struct dn_schk *s; - s = dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL); + s = dn_ht_find(V_dn_cfg.schedhash, i, DNHT_REMOVE, NULL); ND("%d %p", i, s); if (!s) return EINVAL; delete_fs(i + DN_MAX_ID, 1); /* first delete internal fs */ /* then detach flowsets, delete traffic */ schk_delete_cb(s, (void*)(uintptr_t)DN_DESTROY); return 0; } /*--- end of schk hashtable support ---*/ static int copy_obj(char **start, char *end, void *_o, const char *msg, int i) { struct dn_id o; union { struct dn_link l; struct dn_schk s; } dn; int have = end - *start; memcpy(&o, _o, sizeof(o)); if (have < o.len || o.len == 0 || o.type == 0) { D("(WARN) type %d %s %d have %d need %d", o.type, msg, i, have, o.len); return 1; } ND("type %d %s %d len %d", o.type, msg, i, o.len); if (o.type == DN_LINK) { memcpy(&dn.l, _o, sizeof(dn.l)); /* Adjust burst parameter for link */ dn.l.burst = div64(dn.l.burst, 8 * hz); dn.l.delay = dn.l.delay * 1000 / hz; memcpy(*start, &dn.l, sizeof(dn.l)); } else if (o.type == DN_SCH) { /* Set dn.s.sch.oid.id to the number of instances */ memcpy(&dn.s, _o, sizeof(dn.s)); dn.s.sch.oid.id = (dn.s.sch.flags & DN_HAVE_MASK) ? dn_ht_entries(dn.s.siht) : (dn.s.siht ? 1 : 0); memcpy(*start, &dn.s, sizeof(dn.s)); } else memcpy(*start, _o, o.len); *start += o.len; return 0; } /* Specific function to copy a queue. * Copies only the user-visible part of a queue (which is in * a struct dn_flow), and sets len accordingly. */ static int copy_obj_q(char **start, char *end, void *_o, const char *msg, int i) { struct dn_id *o = _o; int have = end - *start; int len = sizeof(struct dn_flow); /* see above comment */ if (have < len || o->len == 0 || o->type != DN_QUEUE) { D("ERROR type %d %s %d have %d need %d", o->type, msg, i, have, len); return 1; } ND("type %d %s %d len %d", o->type, msg, i, len); memcpy(*start, _o, len); ((struct dn_id*)(*start))->len = len; *start += len; return 0; } static int copy_q_cb(void *obj, void *arg) { struct dn_queue *q = obj; struct copy_args *a = arg; struct dn_flow *ni = (struct dn_flow *)(*a->start); if (copy_obj_q(a->start, a->end, &q->ni, "queue", -1)) return DNHT_SCAN_END; ni->oid.type = DN_FLOW; /* override the DN_QUEUE */ ni->oid.id = si_hash((uintptr_t)&ni->fid, 0, NULL); return 0; } static int copy_q(struct copy_args *a, struct dn_fsk *fs, int flags) { if (!fs->qht) return 0; if (fs->fs.flags & DN_QHT_HASH) dn_ht_scan(fs->qht, copy_q_cb, a); else copy_q_cb(fs->qht, a); return 0; } /* * This routine only copies the initial part of a profile ? XXX */ static int copy_profile(struct copy_args *a, struct dn_profile *p) { int have = a->end - *a->start; /* XXX here we check for max length */ int profile_len = sizeof(struct dn_profile) - ED_MAX_SAMPLES_NO*sizeof(int); if (p == NULL) return 0; if (have < profile_len) { D("error have %d need %d", have, profile_len); return 1; } memcpy(*a->start, p, profile_len); ((struct dn_id *)(*a->start))->len = profile_len; *a->start += profile_len; return 0; } static int copy_flowset(struct copy_args *a, struct dn_fsk *fs, int flags) { struct dn_fs *ufs = (struct dn_fs *)(*a->start); if (!fs) return 0; ND("flowset %d", fs->fs.fs_nr); if (copy_obj(a->start, a->end, &fs->fs, "flowset", fs->fs.fs_nr)) return DNHT_SCAN_END; ufs->oid.id = (fs->fs.flags & DN_QHT_HASH) ? dn_ht_entries(fs->qht) : (fs->qht ? 1 : 0); if (flags) { /* copy queues */ copy_q(a, fs, 0); } return 0; } static int copy_si_cb(void *obj, void *arg) { struct dn_sch_inst *si = obj; struct copy_args *a = arg; struct dn_flow *ni = (struct dn_flow *)(*a->start); if (copy_obj(a->start, a->end, &si->ni, "inst", si->sched->sch.sched_nr)) return DNHT_SCAN_END; ni->oid.type = DN_FLOW; /* override the DN_SCH_I */ ni->oid.id = si_hash((uintptr_t)si, DNHT_KEY_IS_OBJ, NULL); return 0; } static int copy_si(struct copy_args *a, struct dn_schk *s, int flags) { if (s->sch.flags & DN_HAVE_MASK) dn_ht_scan(s->siht, copy_si_cb, a); else if (s->siht) copy_si_cb(s->siht, a); return 0; } /* * compute a list of children of a scheduler and copy up */ static int copy_fsk_list(struct copy_args *a, struct dn_schk *s, int flags) { struct dn_fsk *fs; struct dn_id *o; uint32_t *p; int n = 0, space = sizeof(*o); SLIST_FOREACH(fs, &s->fsk_list, sch_chain) { if (fs->fs.fs_nr < DN_MAX_ID) n++; } space += n * sizeof(uint32_t); DX(3, "sched %d has %d flowsets", s->sch.sched_nr, n); if (a->end - *(a->start) < space) return DNHT_SCAN_END; o = (struct dn_id *)(*(a->start)); o->len = space; *a->start += o->len; o->type = DN_TEXT; p = (uint32_t *)(o+1); SLIST_FOREACH(fs, &s->fsk_list, sch_chain) if (fs->fs.fs_nr < DN_MAX_ID) *p++ = fs->fs.fs_nr; return 0; } static int copy_data_helper(void *_o, void *_arg) { struct copy_args *a = _arg; uint32_t *r = a->extra->r; /* start of first range */ uint32_t *lim; /* first invalid pointer */ int n; lim = (uint32_t *)((char *)(a->extra) + a->extra->o.len); if (a->type == DN_LINK || a->type == DN_SCH) { /* pipe|sched show, we receive a dn_schk */ struct dn_schk *s = _o; n = s->sch.sched_nr; if (a->type == DN_SCH && n >= DN_MAX_ID) return 0; /* not a scheduler */ if (a->type == DN_LINK && n <= DN_MAX_ID) return 0; /* not a pipe */ /* see if the object is within one of our ranges */ for (;r < lim; r += 2) { if (n < r[0] || n > r[1]) continue; /* Found a valid entry, copy and we are done */ if (a->flags & DN_C_LINK) { if (copy_obj(a->start, a->end, &s->link, "link", n)) return DNHT_SCAN_END; if (copy_profile(a, s->profile)) return DNHT_SCAN_END; if (copy_flowset(a, s->fs, 0)) return DNHT_SCAN_END; } if (a->flags & DN_C_SCH) { if (copy_obj(a->start, a->end, &s->sch, "sched", n)) return DNHT_SCAN_END; /* list all attached flowsets */ if (copy_fsk_list(a, s, 0)) return DNHT_SCAN_END; } if (a->flags & DN_C_FLOW) copy_si(a, s, 0); break; } } else if (a->type == DN_FS) { /* queue show, skip internal flowsets */ struct dn_fsk *fs = _o; n = fs->fs.fs_nr; if (n >= DN_MAX_ID) return 0; /* see if the object is within one of our ranges */ for (;r < lim; r += 2) { if (n < r[0] || n > r[1]) continue; if (copy_flowset(a, fs, 0)) return DNHT_SCAN_END; copy_q(a, fs, 0); break; /* we are done */ } } return 0; } static inline struct dn_schk * locate_scheduler(int i) { - return dn_ht_find(dn_cfg.schedhash, i, 0, NULL); + return dn_ht_find(V_dn_cfg.schedhash, i, 0, NULL); } /* * red parameters are in fixed point arithmetic. */ static int config_red(struct dn_fsk *fs) { int64_t s, idle, weight, w0; int t, i; fs->w_q = fs->fs.w_q; fs->max_p = fs->fs.max_p; ND("called"); /* Doing stuff that was in userland */ i = fs->sched->link.bandwidth; s = (i <= 0) ? 0 : - hz * dn_cfg.red_avg_pkt_size * 8 * SCALE(1) / i; + hz * V_dn_cfg.red_avg_pkt_size * 8 * SCALE(1) / i; idle = div64((s * 3) , fs->w_q); /* s, fs->w_q scaled; idle not scaled */ - fs->lookup_step = div64(idle , dn_cfg.red_lookup_depth); + fs->lookup_step = div64(idle , V_dn_cfg.red_lookup_depth); /* fs->lookup_step not scaled, */ if (!fs->lookup_step) fs->lookup_step = 1; w0 = weight = SCALE(1) - fs->w_q; //fs->w_q scaled for (t = fs->lookup_step; t > 1; --t) weight = SCALE_MUL(weight, w0); fs->lookup_weight = (int)(weight); // scaled /* Now doing stuff that was in kerneland */ fs->min_th = SCALE(fs->fs.min_th); fs->max_th = SCALE(fs->fs.max_th); if (fs->fs.max_th == fs->fs.min_th) fs->c_1 = fs->max_p; else fs->c_1 = SCALE((int64_t)(fs->max_p)) / (fs->fs.max_th - fs->fs.min_th); fs->c_2 = SCALE_MUL(fs->c_1, SCALE(fs->fs.min_th)); if (fs->fs.flags & DN_IS_GENTLE_RED) { fs->c_3 = (SCALE(1) - fs->max_p) / fs->fs.max_th; fs->c_4 = SCALE(1) - 2 * fs->max_p; } /* If the lookup table already exist, free and create it again. */ if (fs->w_q_lookup) { free(fs->w_q_lookup, M_DUMMYNET); fs->w_q_lookup = NULL; } - if (dn_cfg.red_lookup_depth == 0) { + if (V_dn_cfg.red_lookup_depth == 0) { printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth" "must be > 0\n"); fs->fs.flags &= ~DN_IS_RED; fs->fs.flags &= ~DN_IS_GENTLE_RED; return (EINVAL); } - fs->lookup_depth = dn_cfg.red_lookup_depth; + fs->lookup_depth = V_dn_cfg.red_lookup_depth; fs->w_q_lookup = (u_int *)malloc(fs->lookup_depth * sizeof(int), M_DUMMYNET, M_NOWAIT); if (fs->w_q_lookup == NULL) { printf("dummynet: sorry, cannot allocate red lookup table\n"); fs->fs.flags &= ~DN_IS_RED; fs->fs.flags &= ~DN_IS_GENTLE_RED; return(ENOSPC); } /* Fill the lookup table with (1 - w_q)^x */ fs->w_q_lookup[0] = SCALE(1) - fs->w_q; for (i = 1; i < fs->lookup_depth; i++) fs->w_q_lookup[i] = SCALE_MUL(fs->w_q_lookup[i - 1], fs->lookup_weight); - if (dn_cfg.red_avg_pkt_size < 1) - dn_cfg.red_avg_pkt_size = 512; - fs->avg_pkt_size = dn_cfg.red_avg_pkt_size; - if (dn_cfg.red_max_pkt_size < 1) - dn_cfg.red_max_pkt_size = 1500; - fs->max_pkt_size = dn_cfg.red_max_pkt_size; + if (V_dn_cfg.red_avg_pkt_size < 1) + V_dn_cfg.red_avg_pkt_size = 512; + fs->avg_pkt_size = V_dn_cfg.red_avg_pkt_size; + if (V_dn_cfg.red_max_pkt_size < 1) + V_dn_cfg.red_max_pkt_size = 1500; + fs->max_pkt_size = V_dn_cfg.red_max_pkt_size; ND("exit"); return 0; } /* Scan all flowset attached to this scheduler and update red */ static void update_red(struct dn_schk *s) { struct dn_fsk *fs; SLIST_FOREACH(fs, &s->fsk_list, sch_chain) { if (fs && (fs->fs.flags & DN_IS_RED)) config_red(fs); } } /* attach flowset to scheduler s, possibly requeue */ static void fsk_attach(struct dn_fsk *fs, struct dn_schk *s) { ND("remove fs %d from fsunlinked, link to sched %d", fs->fs.fs_nr, s->sch.sched_nr); - SLIST_REMOVE(&dn_cfg.fsu, fs, dn_fsk, sch_chain); + SLIST_REMOVE(&V_dn_cfg.fsu, fs, dn_fsk, sch_chain); fs->sched = s; SLIST_INSERT_HEAD(&s->fsk_list, fs, sch_chain); if (s->fp->new_fsk) s->fp->new_fsk(fs); /* XXX compute fsk_mask */ fs->fsk_mask = fs->fs.flow_mask; if (fs->sched->sch.flags & DN_HAVE_MASK) flow_id_or(&fs->sched->sch.sched_mask, &fs->fsk_mask); if (fs->qht) { /* * we must drain qht according to the old * type, and reinsert according to the new one. * The requeue is complex -- in general we need to * reclassify every single packet. * For the time being, let's hope qht is never set * when we reach this point. */ D("XXX TODO requeue from fs %d to sch %d", fs->fs.fs_nr, s->sch.sched_nr); fs->qht = NULL; } /* set the new type for qht */ if (nonzero_mask(&fs->fsk_mask)) fs->fs.flags |= DN_QHT_HASH; else fs->fs.flags &= ~DN_QHT_HASH; /* XXX config_red() can fail... */ if (fs->fs.flags & DN_IS_RED) config_red(fs); } /* update all flowsets which may refer to this scheduler */ static void update_fs(struct dn_schk *s) { struct dn_fsk *fs, *tmp; - SLIST_FOREACH_SAFE(fs, &dn_cfg.fsu, sch_chain, tmp) { + SLIST_FOREACH_SAFE(fs, &V_dn_cfg.fsu, sch_chain, tmp) { if (s->sch.sched_nr != fs->fs.sched_nr) { D("fs %d for sch %d not %d still unlinked", fs->fs.fs_nr, fs->fs.sched_nr, s->sch.sched_nr); continue; } fsk_attach(fs, s); } } #ifdef NEW_AQM /* Retrieve AQM configurations to ipfw userland */ static int get_aqm_parms(struct sockopt *sopt) { struct dn_extra_parms *ep; struct dn_fsk *fs; size_t sopt_valsize; int l, err = 0; sopt_valsize = sopt->sopt_valsize; l = sizeof(*ep); if (sopt->sopt_valsize < l) { D("bad len sopt->sopt_valsize %d len %d", (int) sopt->sopt_valsize , l); err = EINVAL; return err; } ep = malloc(l, M_DUMMYNET, M_WAITOK); if(!ep) { err = ENOMEM ; return err; } do { err = sooptcopyin(sopt, ep, l, l); if(err) break; sopt->sopt_valsize = sopt_valsize; if (ep->oid.len < l) { err = EINVAL; break; } - fs = dn_ht_find(dn_cfg.fshash, ep->nr, 0, NULL); + fs = dn_ht_find(V_dn_cfg.fshash, ep->nr, 0, NULL); if (!fs) { D("fs %d not found", ep->nr); err = EINVAL; break; } if (fs->aqmfp && fs->aqmfp->getconfig) { if(fs->aqmfp->getconfig(fs, ep)) { D("Error while trying to get AQM params"); err = EINVAL; break; } ep->oid.len = l; err = sooptcopyout(sopt, ep, l); } }while(0); free(ep, M_DUMMYNET); return err; } /* Retrieve AQM configurations to ipfw userland */ static int get_sched_parms(struct sockopt *sopt) { struct dn_extra_parms *ep; struct dn_schk *schk; size_t sopt_valsize; int l, err = 0; sopt_valsize = sopt->sopt_valsize; l = sizeof(*ep); if (sopt->sopt_valsize < l) { D("bad len sopt->sopt_valsize %d len %d", (int) sopt->sopt_valsize , l); err = EINVAL; return err; } ep = malloc(l, M_DUMMYNET, M_WAITOK); if(!ep) { err = ENOMEM ; return err; } do { err = sooptcopyin(sopt, ep, l, l); if(err) break; sopt->sopt_valsize = sopt_valsize; if (ep->oid.len < l) { err = EINVAL; break; } schk = locate_scheduler(ep->nr); if (!schk) { D("sched %d not found", ep->nr); err = EINVAL; break; } if (schk->fp && schk->fp->getconfig) { if(schk->fp->getconfig(schk, ep)) { D("Error while trying to get sched params"); err = EINVAL; break; } ep->oid.len = l; err = sooptcopyout(sopt, ep, l); } }while(0); free(ep, M_DUMMYNET); return err; } /* Configure AQM for flowset 'fs'. * extra parameters are passed from userland. */ static int config_aqm(struct dn_fsk *fs, struct dn_extra_parms *ep, int busy) { int err = 0; do { /* no configurations */ if (!ep) { err = 0; break; } /* no AQM for this flowset*/ if (!strcmp(ep->name,"")) { err = 0; break; } if (ep->oid.len < sizeof(*ep)) { D("short aqm len %d", ep->oid.len); err = EINVAL; break; } if (busy) { D("Unable to configure flowset, flowset busy!"); err = EINVAL; break; } /* deconfigure old aqm if exist */ if (fs->aqmcfg && fs->aqmfp && fs->aqmfp->deconfig) { aqm_cleanup_deconfig_fs(fs); } if (!(fs->aqmfp = find_aqm_type(0, ep->name))) { D("AQM functions not found for type %s!", ep->name); fs->fs.flags &= ~DN_IS_AQM; err = EINVAL; break; } else fs->fs.flags |= DN_IS_AQM; if (ep->oid.subtype != DN_AQM_PARAMS) { D("Wrong subtype"); err = EINVAL; break; } if (fs->aqmfp->config) { err = fs->aqmfp->config(fs, ep, ep->oid.len); if (err) { D("Unable to configure AQM for FS %d", fs->fs.fs_nr ); fs->fs.flags &= ~DN_IS_AQM; fs->aqmfp = NULL; break; } } } while(0); return err; } #endif /* * Configuration -- to preserve backward compatibility we use * the following scheme (N is 65536) * NUMBER SCHED LINK FLOWSET * 1 .. N-1 (1)WFQ (2)WFQ (3)queue * N+1 .. 2N-1 (4)FIFO (5)FIFO (6)FIFO for sched 1..N-1 * 2N+1 .. 3N-1 -- -- (7)FIFO for sched N+1..2N-1 * * "pipe i config" configures #1, #2 and #3 * "sched i config" configures #1 and possibly #6 * "queue i config" configures #3 * #1 is configured with 'pipe i config' or 'sched i config' * #2 is configured with 'pipe i config', and created if not * existing with 'sched i config' * #3 is configured with 'queue i config' * #4 is automatically configured after #1, can only be FIFO * #5 is automatically configured after #2 * #6 is automatically created when #1 is !MULTIQUEUE, * and can be updated. * #7 is automatically configured after #2 */ /* * configure a link (and its FIFO instance) */ static int config_link(struct dn_link *p, struct dn_id *arg) { int i; if (p->oid.len != sizeof(*p)) { D("invalid pipe len %d", p->oid.len); return EINVAL; } i = p->link_nr; if (i <= 0 || i >= DN_MAX_ID) return EINVAL; /* * The config program passes parameters as follows: * bw = bits/second (0 means no limits), * delay = ms, must be translated into ticks. * qsize = slots/bytes * burst ??? */ p->delay = (p->delay * hz) / 1000; /* Scale burst size: bytes -> bits * hz */ p->burst *= 8 * hz; DN_BH_WLOCK(); /* do it twice, base link and FIFO link */ for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) { struct dn_schk *s = locate_scheduler(i); if (s == NULL) { DN_BH_WUNLOCK(); D("sched %d not found", i); return EINVAL; } /* remove profile if exists */ if (s->profile) { free(s->profile, M_DUMMYNET); s->profile = NULL; } /* copy all parameters */ s->link.oid = p->oid; s->link.link_nr = i; s->link.delay = p->delay; if (s->link.bandwidth != p->bandwidth) { /* XXX bandwidth changes, need to update red params */ s->link.bandwidth = p->bandwidth; update_red(s); } s->link.burst = p->burst; schk_reset_credit(s); } - dn_cfg.id++; + V_dn_cfg.id++; DN_BH_WUNLOCK(); return 0; } /* * configure a flowset. Can be called from inside with locked=1, */ static struct dn_fsk * config_fs(struct dn_fs *nfs, struct dn_id *arg, int locked) { int i; struct dn_fsk *fs; #ifdef NEW_AQM struct dn_extra_parms *ep; #endif if (nfs->oid.len != sizeof(*nfs)) { D("invalid flowset len %d", nfs->oid.len); return NULL; } i = nfs->fs_nr; if (i <= 0 || i >= 3*DN_MAX_ID) return NULL; #ifdef NEW_AQM ep = NULL; if (arg != NULL) { ep = malloc(sizeof(*ep), M_TEMP, locked ? M_NOWAIT : M_WAITOK); if (ep == NULL) return (NULL); memcpy(ep, arg, sizeof(*ep)); } #endif ND("flowset %d", i); /* XXX other sanity checks */ if (nfs->flags & DN_QSIZE_BYTES) { ipdn_bound_var(&nfs->qsize, 16384, - 1500, dn_cfg.byte_limit, NULL); // "queue byte size"); + 1500, V_dn_cfg.byte_limit, NULL); // "queue byte size"); } else { ipdn_bound_var(&nfs->qsize, 50, - 1, dn_cfg.slot_limit, NULL); // "queue slot size"); + 1, V_dn_cfg.slot_limit, NULL); // "queue slot size"); } if (nfs->flags & DN_HAVE_MASK) { /* make sure we have some buckets */ - ipdn_bound_var((int *)&nfs->buckets, dn_cfg.hash_size, - 1, dn_cfg.max_hash_size, "flowset buckets"); + ipdn_bound_var((int *)&nfs->buckets, V_dn_cfg.hash_size, + 1, V_dn_cfg.max_hash_size, "flowset buckets"); } else { nfs->buckets = 1; /* we only need 1 */ } if (!locked) DN_BH_WLOCK(); do { /* exit with break when done */ struct dn_schk *s; int flags = nfs->sched_nr ? DNHT_INSERT : 0; int j; - int oldc = dn_cfg.fsk_count; - fs = dn_ht_find(dn_cfg.fshash, i, flags, NULL); + int oldc = V_dn_cfg.fsk_count; + fs = dn_ht_find(V_dn_cfg.fshash, i, flags, NULL); if (fs == NULL) { D("missing sched for flowset %d", i); break; } /* grab some defaults from the existing one */ if (nfs->sched_nr == 0) /* reuse */ nfs->sched_nr = fs->fs.sched_nr; for (j = 0; j < sizeof(nfs->par)/sizeof(nfs->par[0]); j++) { if (nfs->par[j] == -1) /* reuse */ nfs->par[j] = fs->fs.par[j]; } if (bcmp(&fs->fs, nfs, sizeof(*nfs)) == 0) { ND("flowset %d unchanged", i); #ifdef NEW_AQM if (ep != NULL) { /* * Reconfigure AQM as the parameters can be changed. * We consider the flowset as busy if it has scheduler * instance(s). */ s = locate_scheduler(nfs->sched_nr); config_aqm(fs, ep, s != NULL && s->siht != NULL); } #endif break; /* no change, nothing to do */ } - if (oldc != dn_cfg.fsk_count) /* new item */ - dn_cfg.id++; + if (oldc != V_dn_cfg.fsk_count) /* new item */ + V_dn_cfg.id++; s = locate_scheduler(nfs->sched_nr); /* detach from old scheduler if needed, preserving * queues if we need to reattach. Then update the * configuration, and possibly attach to the new sched. */ DX(2, "fs %d changed sched %d@%p to %d@%p", fs->fs.fs_nr, fs->fs.sched_nr, fs->sched, nfs->sched_nr, s); if (fs->sched) { int flags = s ? DN_DETACH : (DN_DETACH | DN_DESTROY); flags |= DN_DESTROY; /* XXX temporary */ fsk_detach(fs, flags); } fs->fs = *nfs; /* copy configuration */ #ifdef NEW_AQM fs->aqmfp = NULL; if (ep != NULL) config_aqm(fs, ep, s != NULL && s->siht != NULL); #endif if (s != NULL) fsk_attach(fs, s); } while (0); if (!locked) DN_BH_WUNLOCK(); #ifdef NEW_AQM if (ep != NULL) free(ep, M_TEMP); #endif return fs; } /* * config/reconfig a scheduler and its FIFO variant. * For !MULTIQUEUE schedulers, also set up the flowset. * * On reconfigurations (detected because s->fp is set), * detach existing flowsets preserving traffic, preserve link, * and delete the old scheduler creating a new one. */ static int config_sched(struct dn_sch *_nsch, struct dn_id *arg) { struct dn_schk *s; struct schk_new_arg a; /* argument for schk_new */ int i; struct dn_link p; /* copy of oldlink */ struct dn_profile *pf = NULL; /* copy of old link profile */ /* Used to preserv mask parameter */ struct ipfw_flow_id new_mask; int new_buckets = 0; int new_flags = 0; int pipe_cmd; int err = ENOMEM; a.sch = _nsch; if (a.sch->oid.len != sizeof(*a.sch)) { D("bad sched len %d", a.sch->oid.len); return EINVAL; } i = a.sch->sched_nr; if (i <= 0 || i >= DN_MAX_ID) return EINVAL; /* make sure we have some buckets */ if (a.sch->flags & DN_HAVE_MASK) - ipdn_bound_var((int *)&a.sch->buckets, dn_cfg.hash_size, - 1, dn_cfg.max_hash_size, "sched buckets"); + ipdn_bound_var((int *)&a.sch->buckets, V_dn_cfg.hash_size, + 1, V_dn_cfg.max_hash_size, "sched buckets"); /* XXX other sanity checks */ bzero(&p, sizeof(p)); pipe_cmd = a.sch->flags & DN_PIPE_CMD; a.sch->flags &= ~DN_PIPE_CMD; //XXX do it even if is not set? if (pipe_cmd) { /* Copy mask parameter */ new_mask = a.sch->sched_mask; new_buckets = a.sch->buckets; new_flags = a.sch->flags; } DN_BH_WLOCK(); again: /* run twice, for wfq and fifo */ /* * lookup the type. If not supplied, use the previous one * or default to WF2Q+. Otherwise, return an error. */ - dn_cfg.id++; + V_dn_cfg.id++; a.fp = find_sched_type(a.sch->oid.subtype, a.sch->name); if (a.fp != NULL) { /* found. Lookup or create entry */ - s = dn_ht_find(dn_cfg.schedhash, i, DNHT_INSERT, &a); + s = dn_ht_find(V_dn_cfg.schedhash, i, DNHT_INSERT, &a); } else if (a.sch->oid.subtype == 0 && !a.sch->name[0]) { /* No type. search existing s* or retry with WF2Q+ */ - s = dn_ht_find(dn_cfg.schedhash, i, 0, &a); + s = dn_ht_find(V_dn_cfg.schedhash, i, 0, &a); if (s != NULL) { a.fp = s->fp; /* Scheduler exists, skip to FIFO scheduler * if command was pipe config... */ if (pipe_cmd) goto next; } else { /* New scheduler, create a wf2q+ with no mask * if command was pipe config... */ if (pipe_cmd) { /* clear mask parameter */ bzero(&a.sch->sched_mask, sizeof(new_mask)); a.sch->buckets = 0; a.sch->flags &= ~DN_HAVE_MASK; } a.sch->oid.subtype = DN_SCHED_WF2QP; goto again; } } else { D("invalid scheduler type %d %s", a.sch->oid.subtype, a.sch->name); err = EINVAL; goto error; } /* normalize name and subtype */ a.sch->oid.subtype = a.fp->type; bzero(a.sch->name, sizeof(a.sch->name)); strlcpy(a.sch->name, a.fp->name, sizeof(a.sch->name)); if (s == NULL) { D("cannot allocate scheduler %d", i); goto error; } /* restore existing link if any */ if (p.link_nr) { s->link = p; if (!pf || pf->link_nr != p.link_nr) { /* no saved value */ s->profile = NULL; /* XXX maybe not needed */ } else { s->profile = malloc(sizeof(struct dn_profile), M_DUMMYNET, M_NOWAIT | M_ZERO); if (s->profile == NULL) { D("cannot allocate profile"); goto error; //XXX } memcpy(s->profile, pf, sizeof(*pf)); } } p.link_nr = 0; if (s->fp == NULL) { DX(2, "sched %d new type %s", i, a.fp->name); } else if (s->fp != a.fp || bcmp(a.sch, &s->sch, sizeof(*a.sch)) ) { /* already existing. */ DX(2, "sched %d type changed from %s to %s", i, s->fp->name, a.fp->name); DX(4, " type/sub %d/%d -> %d/%d", s->sch.oid.type, s->sch.oid.subtype, a.sch->oid.type, a.sch->oid.subtype); if (s->link.link_nr == 0) D("XXX WARNING link 0 for sched %d", i); p = s->link; /* preserve link */ if (s->profile) {/* preserve profile */ if (!pf) pf = malloc(sizeof(*pf), M_DUMMYNET, M_NOWAIT | M_ZERO); if (pf) /* XXX should issue a warning otherwise */ memcpy(pf, s->profile, sizeof(*pf)); } /* remove from the hash */ - dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL); + dn_ht_find(V_dn_cfg.schedhash, i, DNHT_REMOVE, NULL); /* Detach flowsets, preserve queues. */ // schk_delete_cb(s, NULL); // XXX temporarily, kill queues schk_delete_cb(s, (void *)DN_DESTROY); goto again; } else { DX(4, "sched %d unchanged type %s", i, a.fp->name); } /* complete initialization */ s->sch = *a.sch; s->fp = a.fp; s->cfg = arg; // XXX schk_reset_credit(s); /* create the internal flowset if needed, * trying to reuse existing ones if available */ if (!(s->fp->flags & DN_MULTIQUEUE) && !s->fs) { - s->fs = dn_ht_find(dn_cfg.fshash, i, 0, NULL); + s->fs = dn_ht_find(V_dn_cfg.fshash, i, 0, NULL); if (!s->fs) { struct dn_fs fs; bzero(&fs, sizeof(fs)); set_oid(&fs.oid, DN_FS, sizeof(fs)); fs.fs_nr = i + DN_MAX_ID; fs.sched_nr = i; s->fs = config_fs(&fs, NULL, 1 /* locked */); } if (!s->fs) { schk_delete_cb(s, (void *)DN_DESTROY); D("error creating internal fs for %d", i); goto error; } } /* call init function after the flowset is created */ if (s->fp->config) s->fp->config(s); update_fs(s); next: if (i < DN_MAX_ID) { /* now configure the FIFO instance */ i += DN_MAX_ID; if (pipe_cmd) { /* Restore mask parameter for FIFO */ a.sch->sched_mask = new_mask; a.sch->buckets = new_buckets; a.sch->flags = new_flags; } else { /* sched config shouldn't modify the FIFO scheduler */ - if (dn_ht_find(dn_cfg.schedhash, i, 0, &a) != NULL) { + if (dn_ht_find(V_dn_cfg.schedhash, i, 0, &a) != NULL) { /* FIFO already exist, don't touch it */ err = 0; /* and this is not an error */ goto error; } } a.sch->sched_nr = i; a.sch->oid.subtype = DN_SCHED_FIFO; bzero(a.sch->name, sizeof(a.sch->name)); goto again; } err = 0; error: DN_BH_WUNLOCK(); if (pf) free(pf, M_DUMMYNET); return err; } /* * attach a profile to a link */ static int config_profile(struct dn_profile *pf, struct dn_id *arg) { struct dn_schk *s; int i, olen, err = 0; if (pf->oid.len < sizeof(*pf)) { D("short profile len %d", pf->oid.len); return EINVAL; } i = pf->link_nr; if (i <= 0 || i >= DN_MAX_ID) return EINVAL; /* XXX other sanity checks */ DN_BH_WLOCK(); for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) { s = locate_scheduler(i); if (s == NULL) { err = EINVAL; break; } - dn_cfg.id++; + V_dn_cfg.id++; /* * If we had a profile and the new one does not fit, * or it is deleted, then we need to free memory. */ if (s->profile && (pf->samples_no == 0 || s->profile->oid.len < pf->oid.len)) { free(s->profile, M_DUMMYNET); s->profile = NULL; } if (pf->samples_no == 0) continue; /* * new profile, possibly allocate memory * and copy data. */ if (s->profile == NULL) s->profile = malloc(pf->oid.len, M_DUMMYNET, M_NOWAIT | M_ZERO); if (s->profile == NULL) { D("no memory for profile %d", i); err = ENOMEM; break; } /* preserve larger length XXX double check */ olen = s->profile->oid.len; if (olen < pf->oid.len) olen = pf->oid.len; memcpy(s->profile, pf, pf->oid.len); s->profile->oid.len = olen; } DN_BH_WUNLOCK(); return err; } /* * Delete all objects: */ static void dummynet_flush(void) { /* delete all schedulers and related links/queues/flowsets */ - dn_ht_scan(dn_cfg.schedhash, schk_delete_cb, + dn_ht_scan(V_dn_cfg.schedhash, schk_delete_cb, (void *)(uintptr_t)DN_DELETE_FS); /* delete all remaining (unlinked) flowsets */ - DX(4, "still %d unlinked fs", dn_cfg.fsk_count); - dn_ht_free(dn_cfg.fshash, DNHT_REMOVE); - fsk_detach_list(&dn_cfg.fsu, DN_DELETE_FS); + DX(4, "still %d unlinked fs", V_dn_cfg.fsk_count); + dn_ht_free(V_dn_cfg.fshash, DNHT_REMOVE); + fsk_detach_list(&V_dn_cfg.fsu, DN_DELETE_FS); /* Reinitialize system heap... */ - heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id)); + heap_init(&V_dn_cfg.evheap, 16, offsetof(struct dn_id, id)); } /* * Main handler for configuration. We are guaranteed to be called * with an oid which is at least a dn_id. * - the first object is the command (config, delete, flush, ...) * - config_link must be issued after the corresponding config_sched * - parameters (DN_TXT) for an object must precede the object * processed on a config_sched. */ int do_config(void *p, int l) { struct dn_id o; union { struct dn_profile profile; struct dn_fs fs; struct dn_link link; struct dn_sch sched; } *dn; struct dn_id *arg; uintptr_t a; int err, err2, off; memcpy(&o, p, sizeof(o)); if (o.id != DN_API_VERSION) { D("invalid api version got %d need %d", o.id, DN_API_VERSION); return EINVAL; } arg = NULL; dn = NULL; for (off = 0; l >= sizeof(o); memcpy(&o, (char *)p + off, sizeof(o))) { if (o.len < sizeof(o) || l < o.len) { D("bad len o.len %d len %d", o.len, l); err = EINVAL; break; } l -= o.len; err = 0; switch (o.type) { default: D("cmd %d not implemented", o.type); break; #ifdef EMULATE_SYSCTL /* sysctl emulation. * if we recognize the command, jump to the correct * handler and return */ case DN_SYSCTL_SET: err = kesysctl_emu_set(p, l); return err; #endif case DN_CMD_CONFIG: /* simply a header */ break; case DN_CMD_DELETE: /* the argument is in the first uintptr_t after o */ if (o.len < sizeof(o) + sizeof(a)) { err = EINVAL; break; } memcpy(&a, (char *)p + off + sizeof(o), sizeof(a)); switch (o.subtype) { case DN_LINK: /* delete base and derived schedulers */ DN_BH_WLOCK(); err = delete_schk(a); err2 = delete_schk(a + DN_MAX_ID); DN_BH_WUNLOCK(); if (!err) err = err2; break; default: D("invalid delete type %d", o.subtype); err = EINVAL; break; case DN_FS: err = (a < 1 || a >= DN_MAX_ID) ? EINVAL : delete_fs(a, 0) ; break; } break; case DN_CMD_FLUSH: DN_BH_WLOCK(); dummynet_flush(); DN_BH_WUNLOCK(); break; case DN_TEXT: /* store argument of next block */ if (arg != NULL) free(arg, M_TEMP); arg = malloc(o.len, M_TEMP, M_WAITOK); memcpy(arg, (char *)p + off, o.len); break; case DN_LINK: if (dn == NULL) dn = malloc(sizeof(*dn), M_TEMP, M_WAITOK); memcpy(&dn->link, (char *)p + off, sizeof(dn->link)); err = config_link(&dn->link, arg); break; case DN_PROFILE: if (dn == NULL) dn = malloc(sizeof(*dn), M_TEMP, M_WAITOK); memcpy(&dn->profile, (char *)p + off, sizeof(dn->profile)); err = config_profile(&dn->profile, arg); break; case DN_SCH: if (dn == NULL) dn = malloc(sizeof(*dn), M_TEMP, M_WAITOK); memcpy(&dn->sched, (char *)p + off, sizeof(dn->sched)); err = config_sched(&dn->sched, arg); break; case DN_FS: if (dn == NULL) dn = malloc(sizeof(*dn), M_TEMP, M_WAITOK); memcpy(&dn->fs, (char *)p + off, sizeof(dn->fs)); err = (NULL == config_fs(&dn->fs, arg, 0)); break; } if (err != 0) break; off += o.len; } if (arg != NULL) free(arg, M_TEMP); if (dn != NULL) free(dn, M_TEMP); return err; } static int compute_space(struct dn_id *cmd, struct copy_args *a) { int x = 0, need = 0; int profile_size = sizeof(struct dn_profile) - ED_MAX_SAMPLES_NO*sizeof(int); /* NOTE about compute space: - * NP = dn_cfg.schk_count - * NSI = dn_cfg.si_count - * NF = dn_cfg.fsk_count - * NQ = dn_cfg.queue_count + * NP = V_dn_cfg.schk_count + * NSI = V_dn_cfg.si_count + * NF = V_dn_cfg.fsk_count + * NQ = V_dn_cfg.queue_count * - ipfw pipe show * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler * link, scheduler template, flowset * integrated in scheduler and header * for flowset list * (NSI)*(dn_flow) all scheduler instance (includes * the queue instance) * - ipfw sched show * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler * link, scheduler template, flowset * integrated in scheduler and header * for flowset list * (NSI * dn_flow) all scheduler instances * (NF * sizeof(uint_32)) space for flowset list linked to scheduler * (NQ * dn_queue) all queue [XXXfor now not listed] * - ipfw queue show * (NF * dn_fs) all flowset * (NQ * dn_queue) all queues */ switch (cmd->subtype) { default: return -1; /* XXX where do LINK and SCH differ ? */ /* 'ipfw sched show' could list all queues associated to * a scheduler. This feature for now is disabled */ case DN_LINK: /* pipe show */ x = DN_C_LINK | DN_C_SCH | DN_C_FLOW; - need += dn_cfg.schk_count * + need += V_dn_cfg.schk_count * (sizeof(struct dn_fs) + profile_size) / 2; - need += dn_cfg.fsk_count * sizeof(uint32_t); + need += V_dn_cfg.fsk_count * sizeof(uint32_t); break; case DN_SCH: /* sched show */ - need += dn_cfg.schk_count * + need += V_dn_cfg.schk_count * (sizeof(struct dn_fs) + profile_size) / 2; - need += dn_cfg.fsk_count * sizeof(uint32_t); + need += V_dn_cfg.fsk_count * sizeof(uint32_t); x = DN_C_SCH | DN_C_LINK | DN_C_FLOW; break; case DN_FS: /* queue show */ x = DN_C_FS | DN_C_QUEUE; break; case DN_GET_COMPAT: /* compatibility mode */ need = dn_compat_calc_size(); break; } a->flags = x; if (x & DN_C_SCH) { - need += dn_cfg.schk_count * sizeof(struct dn_sch) / 2; + need += V_dn_cfg.schk_count * sizeof(struct dn_sch) / 2; /* NOT also, each fs might be attached to a sched */ - need += dn_cfg.schk_count * sizeof(struct dn_id) / 2; + need += V_dn_cfg.schk_count * sizeof(struct dn_id) / 2; } if (x & DN_C_FS) - need += dn_cfg.fsk_count * sizeof(struct dn_fs); + need += V_dn_cfg.fsk_count * sizeof(struct dn_fs); if (x & DN_C_LINK) { - need += dn_cfg.schk_count * sizeof(struct dn_link) / 2; + need += V_dn_cfg.schk_count * sizeof(struct dn_link) / 2; } /* * When exporting a queue to userland, only pass up the * struct dn_flow, which is the only visible part. */ if (x & DN_C_QUEUE) - need += dn_cfg.queue_count * sizeof(struct dn_flow); + need += V_dn_cfg.queue_count * sizeof(struct dn_flow); if (x & DN_C_FLOW) - need += dn_cfg.si_count * (sizeof(struct dn_flow)); + need += V_dn_cfg.si_count * (sizeof(struct dn_flow)); return need; } /* * If compat != NULL dummynet_get is called in compatibility mode. * *compat will be the pointer to the buffer to pass to ipfw */ int dummynet_get(struct sockopt *sopt, void **compat) { int have, i, need, error; char *start = NULL, *buf; size_t sopt_valsize; struct dn_id *cmd; struct copy_args a; struct copy_range r; int l = sizeof(struct dn_id); bzero(&a, sizeof(a)); bzero(&r, sizeof(r)); /* save and restore original sopt_valsize around copyin */ sopt_valsize = sopt->sopt_valsize; cmd = &r.o; if (!compat) { /* copy at least an oid, and possibly a full object */ error = sooptcopyin(sopt, cmd, sizeof(r), sizeof(*cmd)); sopt->sopt_valsize = sopt_valsize; if (error) goto done; l = cmd->len; #ifdef EMULATE_SYSCTL /* sysctl emulation. */ if (cmd->type == DN_SYSCTL_GET) return kesysctl_emu_get(sopt); #endif if (l > sizeof(r)) { /* request larger than default, allocate buffer */ cmd = malloc(l, M_DUMMYNET, M_WAITOK); error = sooptcopyin(sopt, cmd, l, l); sopt->sopt_valsize = sopt_valsize; if (error) goto done; } } else { /* compatibility */ error = 0; cmd->type = DN_CMD_GET; cmd->len = sizeof(struct dn_id); cmd->subtype = DN_GET_COMPAT; // cmd->id = sopt_valsize; D("compatibility mode"); } #ifdef NEW_AQM /* get AQM params */ if(cmd->subtype == DN_AQM_PARAMS) { error = get_aqm_parms(sopt); goto done; /* get Scheduler params */ } else if (cmd->subtype == DN_SCH_PARAMS) { error = get_sched_parms(sopt); goto done; } #endif a.extra = (struct copy_range *)cmd; if (cmd->len == sizeof(*cmd)) { /* no range, create a default */ uint32_t *rp = (uint32_t *)(cmd + 1); cmd->len += 2* sizeof(uint32_t); rp[0] = 1; rp[1] = DN_MAX_ID - 1; if (cmd->subtype == DN_LINK) { rp[0] += DN_MAX_ID; rp[1] += DN_MAX_ID; } } /* Count space (under lock) and allocate (outside lock). * Exit with lock held if we manage to get enough buffer. * Try a few times then give up. */ for (have = 0, i = 0; i < 10; i++) { DN_BH_WLOCK(); need = compute_space(cmd, &a); /* if there is a range, ignore value from compute_space() */ if (l > sizeof(*cmd)) need = sopt_valsize - sizeof(*cmd); if (need < 0) { DN_BH_WUNLOCK(); error = EINVAL; goto done; } need += sizeof(*cmd); cmd->id = need; if (have >= need) break; DN_BH_WUNLOCK(); if (start) free(start, M_DUMMYNET); start = NULL; if (need > sopt_valsize) break; have = need; start = malloc(have, M_DUMMYNET, M_WAITOK | M_ZERO); } if (start == NULL) { if (compat) { *compat = NULL; error = 1; // XXX } else { error = sooptcopyout(sopt, cmd, sizeof(*cmd)); } goto done; } ND("have %d:%d sched %d, %d:%d links %d, %d:%d flowsets %d, " "%d:%d si %d, %d:%d queues %d", - dn_cfg.schk_count, sizeof(struct dn_sch), DN_SCH, - dn_cfg.schk_count, sizeof(struct dn_link), DN_LINK, - dn_cfg.fsk_count, sizeof(struct dn_fs), DN_FS, - dn_cfg.si_count, sizeof(struct dn_flow), DN_SCH_I, - dn_cfg.queue_count, sizeof(struct dn_queue), DN_QUEUE); + V_dn_cfg.schk_count, sizeof(struct dn_sch), DN_SCH, + V_dn_cfg.schk_count, sizeof(struct dn_link), DN_LINK, + V_dn_cfg.fsk_count, sizeof(struct dn_fs), DN_FS, + V_dn_cfg.si_count, sizeof(struct dn_flow), DN_SCH_I, + V_dn_cfg.queue_count, sizeof(struct dn_queue), DN_QUEUE); sopt->sopt_valsize = sopt_valsize; a.type = cmd->subtype; if (compat == NULL) { memcpy(start, cmd, sizeof(*cmd)); ((struct dn_id*)(start))->len = sizeof(struct dn_id); buf = start + sizeof(*cmd); } else buf = start; a.start = &buf; a.end = start + have; /* start copying other objects */ if (compat) { a.type = DN_COMPAT_PIPE; - dn_ht_scan(dn_cfg.schedhash, copy_data_helper_compat, &a); + dn_ht_scan(V_dn_cfg.schedhash, copy_data_helper_compat, &a); a.type = DN_COMPAT_QUEUE; - dn_ht_scan(dn_cfg.fshash, copy_data_helper_compat, &a); + dn_ht_scan(V_dn_cfg.fshash, copy_data_helper_compat, &a); } else if (a.type == DN_FS) { - dn_ht_scan(dn_cfg.fshash, copy_data_helper, &a); + dn_ht_scan(V_dn_cfg.fshash, copy_data_helper, &a); } else { - dn_ht_scan(dn_cfg.schedhash, copy_data_helper, &a); + dn_ht_scan(V_dn_cfg.schedhash, copy_data_helper, &a); } DN_BH_WUNLOCK(); if (compat) { *compat = start; sopt->sopt_valsize = buf - start; /* free() is done by ip_dummynet_compat() */ start = NULL; //XXX hack } else { error = sooptcopyout(sopt, start, buf - start); } done: if (cmd && cmd != &r.o) free(cmd, M_DUMMYNET); if (start) free(start, M_DUMMYNET); return error; } /* Callback called on scheduler instance to delete it if idle */ static int drain_scheduler_cb(void *_si, void *arg) { struct dn_sch_inst *si = _si; if ((si->kflags & DN_ACTIVE) || si->dline.mq.head != NULL) return 0; if (si->sched->fp->flags & DN_MULTIQUEUE) { if (si->q_count == 0) return si_destroy(si, NULL); else return 0; } else { /* !DN_MULTIQUEUE */ if ((si+1)->ni.length == 0) return si_destroy(si, NULL); else return 0; } return 0; /* unreachable */ } /* Callback called on scheduler to check if it has instances */ static int drain_scheduler_sch_cb(void *_s, void *arg) { struct dn_schk *s = _s; if (s->sch.flags & DN_HAVE_MASK) { dn_ht_scan_bucket(s->siht, &s->drain_bucket, drain_scheduler_cb, NULL); s->drain_bucket++; } else { if (s->siht) { if (drain_scheduler_cb(s->siht, NULL) == DNHT_SCAN_DEL) s->siht = NULL; } } return 0; } /* Called every tick, try to delete a 'bucket' of scheduler */ void dn_drain_scheduler(void) { - dn_ht_scan_bucket(dn_cfg.schedhash, &dn_cfg.drain_sch, + dn_ht_scan_bucket(V_dn_cfg.schedhash, &V_dn_cfg.drain_sch, drain_scheduler_sch_cb, NULL); - dn_cfg.drain_sch++; + V_dn_cfg.drain_sch++; } /* Callback called on queue to delete if it is idle */ static int drain_queue_cb(void *_q, void *arg) { struct dn_queue *q = _q; if (q->ni.length == 0) { dn_delete_queue(q, DN_DESTROY); return DNHT_SCAN_DEL; /* queue is deleted */ } return 0; /* queue isn't deleted */ } /* Callback called on flowset used to check if it has queues */ static int drain_queue_fs_cb(void *_fs, void *arg) { struct dn_fsk *fs = _fs; if (fs->fs.flags & DN_QHT_HASH) { /* Flowset has a hash table for queues */ dn_ht_scan_bucket(fs->qht, &fs->drain_bucket, drain_queue_cb, NULL); fs->drain_bucket++; } else { /* No hash table for this flowset, null the pointer * if the queue is deleted */ if (fs->qht) { if (drain_queue_cb(fs->qht, NULL) == DNHT_SCAN_DEL) fs->qht = NULL; } } return 0; } /* Called every tick, try to delete a 'bucket' of queue */ void dn_drain_queue(void) { /* scan a bucket of flowset */ - dn_ht_scan_bucket(dn_cfg.fshash, &dn_cfg.drain_fs, + dn_ht_scan_bucket(V_dn_cfg.fshash, &V_dn_cfg.drain_fs, drain_queue_fs_cb, NULL); - dn_cfg.drain_fs++; + V_dn_cfg.drain_fs++; } /* * Handler for the various dummynet socket options */ static int ip_dn_ctl(struct sockopt *sopt) { void *p = NULL; int error, l; error = priv_check(sopt->sopt_td, PRIV_NETINET_DUMMYNET); if (error) return (error); /* Disallow sets in really-really secure mode. */ if (sopt->sopt_dir == SOPT_SET) { error = securelevel_ge(sopt->sopt_td->td_ucred, 3); if (error) return (error); } switch (sopt->sopt_name) { default : D("dummynet: unknown option %d", sopt->sopt_name); error = EINVAL; break; case IP_DUMMYNET_FLUSH: case IP_DUMMYNET_CONFIGURE: case IP_DUMMYNET_DEL: /* remove a pipe or queue */ case IP_DUMMYNET_GET: D("dummynet: compat option %d", sopt->sopt_name); error = ip_dummynet_compat(sopt); break; case IP_DUMMYNET3 : if (sopt->sopt_dir == SOPT_GET) { error = dummynet_get(sopt, NULL); break; } l = sopt->sopt_valsize; if (l < sizeof(struct dn_id) || l > 12000) { D("argument len %d invalid", l); break; } p = malloc(l, M_TEMP, M_WAITOK); // XXX can it fail ? error = sooptcopyin(sopt, p, l, l); if (error) break ; error = do_config(p, l); break; } if (p != NULL) free(p, M_TEMP); return error ; } static void -ip_dn_init(void) +ip_dn_vnet_init(void) { - if (dn_cfg.init_done) + if (V_dn_cfg.init_done) return; - dn_cfg.init_done = 1; + V_dn_cfg.init_done = 1; /* Set defaults here. MSVC does not accept initializers, * and this is also useful for vimages */ /* queue limits */ - dn_cfg.slot_limit = 100; /* Foot shooting limit for queues. */ - dn_cfg.byte_limit = 1024 * 1024; - dn_cfg.expire = 1; + V_dn_cfg.slot_limit = 100; /* Foot shooting limit for queues. */ + V_dn_cfg.byte_limit = 1024 * 1024; + V_dn_cfg.expire = 1; /* RED parameters */ - dn_cfg.red_lookup_depth = 256; /* default lookup table depth */ - dn_cfg.red_avg_pkt_size = 512; /* default medium packet size */ - dn_cfg.red_max_pkt_size = 1500; /* default max packet size */ + V_dn_cfg.red_lookup_depth = 256; /* default lookup table depth */ + V_dn_cfg.red_avg_pkt_size = 512; /* default medium packet size */ + V_dn_cfg.red_max_pkt_size = 1500; /* default max packet size */ /* hash tables */ - dn_cfg.max_hash_size = 65536; /* max in the hash tables */ - dn_cfg.hash_size = 64; /* default hash size */ + V_dn_cfg.max_hash_size = 65536; /* max in the hash tables */ + V_dn_cfg.hash_size = 64; /* default hash size */ /* create hash tables for schedulers and flowsets. * In both we search by key and by pointer. */ - dn_cfg.schedhash = dn_ht_init(NULL, dn_cfg.hash_size, + V_dn_cfg.schedhash = dn_ht_init(NULL, V_dn_cfg.hash_size, offsetof(struct dn_schk, schk_next), schk_hash, schk_match, schk_new); - dn_cfg.fshash = dn_ht_init(NULL, dn_cfg.hash_size, + V_dn_cfg.fshash = dn_ht_init(NULL, V_dn_cfg.hash_size, offsetof(struct dn_fsk, fsk_next), fsk_hash, fsk_match, fsk_new); /* bucket index to drain object */ - dn_cfg.drain_fs = 0; - dn_cfg.drain_sch = 0; + V_dn_cfg.drain_fs = 0; + V_dn_cfg.drain_sch = 0; - heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id)); - SLIST_INIT(&dn_cfg.fsu); - SLIST_INIT(&dn_cfg.schedlist); + heap_init(&V_dn_cfg.evheap, 16, offsetof(struct dn_id, id)); + SLIST_INIT(&V_dn_cfg.fsu); DN_LOCK_INIT(); - NET_TASK_INIT(&dn_task, 0, dummynet_task, curvnet); + /* Initialize curr_time adjustment mechanics. */ + getmicrouptime(&V_dn_cfg.prev_t); +} + +static void +ip_dn_vnet_destroy(void) +{ + DN_BH_WLOCK(); + dummynet_flush(); + DN_BH_WUNLOCK(); + + dn_ht_free(V_dn_cfg.schedhash, 0); + dn_ht_free(V_dn_cfg.fshash, 0); + heap_free(&V_dn_cfg.evheap); + + DN_LOCK_DESTROY(); +} + +static void +ip_dn_init(void) +{ + if (dn_tasks_started) + return; + dn_tasks_started = 1; + TASK_INIT(&dn_task, 0, dummynet_task, NULL); dn_tq = taskqueue_create_fast("dummynet", M_WAITOK, taskqueue_thread_enqueue, &dn_tq); taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet"); + SLIST_INIT(&schedlist); callout_init(&dn_timeout, 1); dn_reschedule(); - - /* Initialize curr_time adjustment mechanics. */ - getmicrouptime(&dn_cfg.prev_t); } static void ip_dn_destroy(int last) { - DN_BH_WLOCK(); /* ensure no more callouts are started */ dn_gone = 1; /* check for last */ if (last) { ND("removing last instance\n"); ip_dn_ctl_ptr = NULL; ip_dn_io_ptr = NULL; } - dummynet_flush(); - DN_BH_WUNLOCK(); - callout_drain(&dn_timeout); taskqueue_drain(dn_tq, &dn_task); taskqueue_free(dn_tq); - - dn_ht_free(dn_cfg.schedhash, 0); - dn_ht_free(dn_cfg.fshash, 0); - heap_free(&dn_cfg.evheap); - - DN_LOCK_DESTROY(); } static int dummynet_modevent(module_t mod, int type, void *data) { if (type == MOD_LOAD) { if (ip_dn_io_ptr) { printf("DUMMYNET already loaded\n"); return EEXIST ; } ip_dn_init(); ip_dn_ctl_ptr = ip_dn_ctl; ip_dn_io_ptr = dummynet_io; return 0; } else if (type == MOD_UNLOAD) { ip_dn_destroy(1 /* last */); return 0; } else return EOPNOTSUPP; } /* modevent helpers for the modules */ static int load_dn_sched(struct dn_alg *d) { struct dn_alg *s; if (d == NULL) return 1; /* error */ ip_dn_init(); /* just in case, we need the lock */ /* Check that mandatory funcs exists */ if (d->enqueue == NULL || d->dequeue == NULL) { D("missing enqueue or dequeue for %s", d->name); return 1; } /* Search if scheduler already exists */ DN_BH_WLOCK(); - SLIST_FOREACH(s, &dn_cfg.schedlist, next) { + SLIST_FOREACH(s, &schedlist, next) { if (strcmp(s->name, d->name) == 0) { D("%s already loaded", d->name); break; /* scheduler already exists */ } } if (s == NULL) - SLIST_INSERT_HEAD(&dn_cfg.schedlist, d, next); + SLIST_INSERT_HEAD(&schedlist, d, next); DN_BH_WUNLOCK(); D("dn_sched %s %sloaded", d->name, s ? "not ":""); return s ? 1 : 0; } static int unload_dn_sched(struct dn_alg *s) { struct dn_alg *tmp, *r; int err = EINVAL; ND("called for %s", s->name); DN_BH_WLOCK(); - SLIST_FOREACH_SAFE(r, &dn_cfg.schedlist, next, tmp) { + SLIST_FOREACH_SAFE(r, &schedlist, next, tmp) { if (strcmp(s->name, r->name) != 0) continue; ND("ref_count = %d", r->ref_count); err = (r->ref_count != 0) ? EBUSY : 0; if (err == 0) - SLIST_REMOVE(&dn_cfg.schedlist, r, dn_alg, next); + SLIST_REMOVE(&schedlist, r, dn_alg, next); break; } DN_BH_WUNLOCK(); D("dn_sched %s %sunloaded", s->name, err ? "not ":""); return err; } int dn_sched_modevent(module_t mod, int cmd, void *arg) { struct dn_alg *sch = arg; if (cmd == MOD_LOAD) return load_dn_sched(sch); else if (cmd == MOD_UNLOAD) return unload_dn_sched(sch); else return EINVAL; } static moduledata_t dummynet_mod = { "dummynet", dummynet_modevent, NULL }; #define DN_SI_SUB SI_SUB_PROTO_FIREWALL #define DN_MODEV_ORD (SI_ORDER_ANY - 128) /* after ipfw */ DECLARE_MODULE(dummynet, dummynet_mod, DN_SI_SUB, DN_MODEV_ORD); MODULE_DEPEND(dummynet, ipfw, 3, 3, 3); MODULE_VERSION(dummynet, 3); /* * Starting up. Done in order after dummynet_modevent() has been called. * VNET_SYSINIT is also called for each existing vnet and each new vnet. */ -//VNET_SYSINIT(vnet_dn_init, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_init, NULL); +VNET_SYSINIT(vnet_dn_init, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_vnet_init, NULL); /* * Shutdown handlers up shop. These are done in REVERSE ORDER, but still * after dummynet_modevent() has been called. Not called on reboot. * VNET_SYSUNINIT is also called for each exiting vnet as it exits. * or when the module is unloaded. */ -//VNET_SYSUNINIT(vnet_dn_uninit, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_destroy, NULL); +VNET_SYSUNINIT(vnet_dn_uninit, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_vnet_destroy, NULL); #ifdef NEW_AQM /* modevent helpers for the AQM modules */ static int load_dn_aqm(struct dn_aqm *d) { struct dn_aqm *aqm=NULL; if (d == NULL) return 1; /* error */ ip_dn_init(); /* just in case, we need the lock */ /* Check that mandatory funcs exists */ if (d->enqueue == NULL || d->dequeue == NULL) { D("missing enqueue or dequeue for %s", d->name); return 1; } /* Search if AQM already exists */ - DN_BH_WLOCK(); - SLIST_FOREACH(aqm, &dn_cfg.aqmlist, next) { + DN_BH_WLOCK(); /* XXX Global lock? */ + SLIST_FOREACH(aqm, &aqmlist, next) { if (strcmp(aqm->name, d->name) == 0) { D("%s already loaded", d->name); break; /* AQM already exists */ } } if (aqm == NULL) - SLIST_INSERT_HEAD(&dn_cfg.aqmlist, d, next); + SLIST_INSERT_HEAD(&aqmlist, d, next); DN_BH_WUNLOCK(); D("dn_aqm %s %sloaded", d->name, aqm ? "not ":""); return aqm ? 1 : 0; } /* Callback to clean up AQM status for queues connected to a flowset * and then deconfigure the flowset. * This function is called before an AQM module is unloaded */ static int fs_cleanup(void *_fs, void *arg) { struct dn_fsk *fs = _fs; uint32_t type = *(uint32_t *)arg; if (fs->aqmfp && fs->aqmfp->type == type) aqm_cleanup_deconfig_fs(fs); return 0; } static int unload_dn_aqm(struct dn_aqm *aqm) { struct dn_aqm *tmp, *r; int err = EINVAL; err = 0; ND("called for %s", aqm->name); DN_BH_WLOCK(); /* clean up AQM status and deconfig flowset */ - dn_ht_scan(dn_cfg.fshash, fs_cleanup, &aqm->type); + dn_ht_scan(V_dn_cfg.fshash, fs_cleanup, &aqm->type); - SLIST_FOREACH_SAFE(r, &dn_cfg.aqmlist, next, tmp) { + SLIST_FOREACH_SAFE(r, &aqmlist, next, tmp) { if (strcmp(aqm->name, r->name) != 0) continue; ND("ref_count = %d", r->ref_count); err = (r->ref_count != 0 || r->cfg_ref_count != 0) ? EBUSY : 0; if (err == 0) - SLIST_REMOVE(&dn_cfg.aqmlist, r, dn_aqm, next); + SLIST_REMOVE(&aqmlist, r, dn_aqm, next); break; } DN_BH_WUNLOCK(); D("%s %sunloaded", aqm->name, err ? "not ":""); if (err) D("ref_count=%d, cfg_ref_count=%d", r->ref_count, r->cfg_ref_count); return err; } int dn_aqm_modevent(module_t mod, int cmd, void *arg) { struct dn_aqm *aqm = arg; if (cmd == MOD_LOAD) return load_dn_aqm(aqm); else if (cmd == MOD_UNLOAD) return unload_dn_aqm(aqm); else return EINVAL; } #endif /* end of file */