diff --git a/sys/netpfil/ipfw/dn_aqm_codel.c b/sys/netpfil/ipfw/dn_aqm_codel.c index 79c6afd8b635..2f6d145485c6 100644 --- a/sys/netpfil/ipfw/dn_aqm_codel.c +++ b/sys/netpfil/ipfw/dn_aqm_codel.c @@ -1,443 +1,449 @@ /* * Codel - The Controlled-Delay Active Queue Management algorithm. * * $FreeBSD$ * * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include #include #include #include /* ip_len, ip_off */ #include /* ip_output(), IP_FORWARDING */ #include #include #include /* various ether_* routines */ #include /* for ip6_input, ip6_output prototypes */ #include #include #ifdef NEW_AQM #include #include #include #include #include #define DN_AQM_CODEL 1 static struct dn_aqm codel_desc; /* default codel parameters */ struct dn_aqm_codel_parms codel_sysctl = {5000 * AQM_TIME_1US, 100000 * AQM_TIME_1US, 0}; static int codel_sysctl_interval_handler(SYSCTL_HANDLER_ARGS) { int error; long value; value = codel_sysctl.interval; value /= AQM_TIME_1US; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 100 * AQM_TIME_1S) return (EINVAL); codel_sysctl.interval = value * AQM_TIME_1US ; return (0); } static int codel_sysctl_target_handler(SYSCTL_HANDLER_ARGS) { int error; long value; value = codel_sysctl.target; value /= AQM_TIME_1US; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); D("%ld", value); if (value < 1 || value > 5 * AQM_TIME_1S) return (EINVAL); codel_sysctl.target = value * AQM_TIME_1US ; return (0); } /* defining Codel sysctl variables */ SYSBEGIN(f4) SYSCTL_DECL(_net_inet); SYSCTL_DECL(_net_inet_ip); SYSCTL_DECL(_net_inet_ip_dummynet); static SYSCTL_NODE(_net_inet_ip_dummynet, OID_AUTO, codel, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "CODEL"); #ifdef SYSCTL_NODE SYSCTL_PROC(_net_inet_ip_dummynet_codel, OID_AUTO, target, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,codel_sysctl_target_handler, "L", "CoDel target in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_codel, OID_AUTO, interval, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, codel_sysctl_interval_handler, "L", "CoDel interval in microsecond"); #endif /* This function computes codel_interval/sqrt(count) * Newton's method of approximation is used to compute 1/sqrt(count). * http://betterexplained.com/articles/ * understanding-quakes-fast-inverse-square-root/ */ aqm_time_t control_law(struct codel_status *cst, struct dn_aqm_codel_parms *cprms, aqm_time_t t) { uint32_t count; uint64_t temp; count = cst->count; /* we don't calculate isqrt(1) to get more accurate result*/ if (count == 1) { /* prepare isqrt (old guess) for the next iteration i.e. 1/sqrt(2)*/ cst->isqrt = (1UL<< FIX_POINT_BITS) * 7/10; /* return time + isqrt(1)*interval */ return t + cprms->interval; } /* newguess = g(1.5 - 0.5*c*g^2) * Multiplying both sides by 2 to make all the constants intergers * newguess * 2 = g(3 - c*g^2) g=old guess, c=count * So, newguess = newguess /2 * Fixed point operations are used here. */ /* Calculate g^2 */ temp = (uint32_t) cst->isqrt * cst->isqrt; /* Calculate (3 - c*g^2) i.e. (3 - c * temp) */ temp = (3ULL<< (FIX_POINT_BITS*2)) - (count * temp); /* * Divide by 2 because we multiplied the original equation by two * Also, we shift the result by 8 bits to prevent overflow. * */ temp >>= (1 + 8); /* Now, temp = (1.5 - 0.5*c*g^2) * Calculate g (1.5 - 0.5*c*g^2) i.e. g * temp */ temp = (cst->isqrt * temp) >> (FIX_POINT_BITS + FIX_POINT_BITS - 8); cst->isqrt = temp; /* calculate codel_interval/sqrt(count) */ return t + ((cprms->interval * temp) >> FIX_POINT_BITS); } /* * Extract a packet from the head of queue 'q' * Return a packet or NULL if the queue is empty. * Also extract packet's timestamp from mtag. */ struct mbuf * codel_extract_head(struct dn_queue *q, aqm_time_t *pkt_ts) { struct m_tag *mtag; - struct mbuf *m = q->mq.head; + struct mbuf *m; +next: m = q->mq.head; if (m == NULL) return m; q->mq.head = m->m_nextpkt; /* Update stats */ update_stats(q, -m->m_pkthdr.len, 0); if (q->ni.length == 0) /* queue is now idle */ q->q_time = V_dn_cfg.curr_time; /* extract packet TS*/ mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) { D("Codel timestamp mtag not found!"); *pkt_ts = 0; } else { *pkt_ts = *(aqm_time_t *)(mtag + 1); m_tag_delete(m,mtag); } + if (m->m_pkthdr.rcvif != NULL && + __predict_false(m_rcvif_restore(m) == NULL)) { + m_freem(m); + goto next; + } return m; } /* * Enqueue a packet 'm' in queue 'q' */ static int aqm_codel_enqueue(struct dn_queue *q, struct mbuf *m) { struct dn_fs *f; uint64_t len; struct codel_status *cst; /*codel status variables */ struct m_tag *mtag; f = &(q->fs->fs); len = m->m_pkthdr.len; cst = q->aqm_status; if(!cst) { D("Codel queue is not initialized\n"); goto drop; } /* Finding maximum packet size */ // XXX we can get MTU from driver instead if (len > cst->maxpkt_size) cst->maxpkt_size = len; /* check for queue size and drop the tail if exceed queue limit*/ if (f->flags & DN_QSIZE_BYTES) { if ( q->ni.len_bytes > f->qsize) goto drop; } else { if ( q->ni.length >= f->qsize) goto drop; } /* Add timestamp as mtag */ mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t), M_NOWAIT); if (mtag == NULL) goto drop; *(aqm_time_t *)(mtag + 1) = AQM_UNOW; m_tag_prepend(m, mtag); mq_append(&q->mq, m); update_stats(q, len, 0); return (0); drop: update_stats(q, 0, 1); FREE_PKT(m); return (1); } /* Dequeue a pcaket from queue q */ static struct mbuf * aqm_codel_dequeue(struct dn_queue *q) { return codel_dequeue(q); } /* * initialize Codel for queue 'q' * First allocate memory for codel status. */ static int aqm_codel_init(struct dn_queue *q) { struct codel_status *cst; if (!q->fs->aqmcfg) { D("Codel is not configure!d"); return EINVAL; } q->aqm_status = malloc(sizeof(struct codel_status), M_DUMMYNET, M_NOWAIT | M_ZERO); if (q->aqm_status == NULL) { D("Cannot allocate AQM_codel private data"); return ENOMEM ; } /* init codel status variables */ cst = q->aqm_status; cst->dropping=0; cst->first_above_time=0; cst->drop_next_time=0; cst->count=0; cst->maxpkt_size = 500; /* increase reference counters */ codel_desc.ref_count++; return 0; } /* * Clean up Codel status for queue 'q' * Destroy memory allocated for codel status. */ static int aqm_codel_cleanup(struct dn_queue *q) { if (q && q->aqm_status) { free(q->aqm_status, M_DUMMYNET); q->aqm_status = NULL; /* decrease reference counters */ codel_desc.ref_count--; } else D("Codel already cleaned up"); return 0; } /* * Config codel parameters * also allocate memory for codel configurations */ static int aqm_codel_config(struct dn_fsk* fs, struct dn_extra_parms *ep, int len) { struct dn_aqm_codel_parms *ccfg; int l = sizeof(struct dn_extra_parms); if (len < l) { D("invalid sched parms length got %d need %d", len, l); return EINVAL; } /* we free the old cfg because maybe the original allocation * not the same size as the new one (different AQM type). */ if (fs->aqmcfg) { free(fs->aqmcfg, M_DUMMYNET); fs->aqmcfg = NULL; } fs->aqmcfg = malloc(sizeof(struct dn_aqm_codel_parms), M_DUMMYNET, M_NOWAIT | M_ZERO); if (fs->aqmcfg== NULL) { D("cannot allocate AQM_codel configuration parameters"); return ENOMEM; } /* configure codel parameters */ ccfg = fs->aqmcfg; if (ep->par[0] < 0) ccfg->target = codel_sysctl.target; else ccfg->target = ep->par[0] * AQM_TIME_1US; if (ep->par[1] < 0) ccfg->interval = codel_sysctl.interval; else ccfg->interval = ep->par[1] * AQM_TIME_1US; if (ep->par[2] < 0) ccfg->flags = 0; else ccfg->flags = ep->par[2]; /* bound codel configurations */ ccfg->target = BOUND_VAR(ccfg->target,1, 5 * AQM_TIME_1S); ccfg->interval = BOUND_VAR(ccfg->interval,1, 5 * AQM_TIME_1S); /* increase config reference counter */ codel_desc.cfg_ref_count++; return 0; } /* * Deconfigure Codel and free memory allocation */ static int aqm_codel_deconfig(struct dn_fsk* fs) { if (fs && fs->aqmcfg) { free(fs->aqmcfg, M_DUMMYNET); fs->aqmcfg = NULL; fs->aqmfp = NULL; /* decrease config reference counter */ codel_desc.cfg_ref_count--; } return 0; } /* * Retrieve Codel configuration parameters. */ static int aqm_codel_getconfig(struct dn_fsk *fs, struct dn_extra_parms * ep) { struct dn_aqm_codel_parms *ccfg; if (fs->aqmcfg) { strlcpy(ep->name, codel_desc.name, sizeof(ep->name)); ccfg = fs->aqmcfg; ep->par[0] = ccfg->target / AQM_TIME_1US; ep->par[1] = ccfg->interval / AQM_TIME_1US; ep->par[2] = ccfg->flags; return 0; } return 1; } static struct dn_aqm codel_desc = { _SI( .type = ) DN_AQM_CODEL, _SI( .name = ) "CODEL", _SI( .enqueue = ) aqm_codel_enqueue, _SI( .dequeue = ) aqm_codel_dequeue, _SI( .config = ) aqm_codel_config, _SI( .getconfig = ) aqm_codel_getconfig, _SI( .deconfig = ) aqm_codel_deconfig, _SI( .init = ) aqm_codel_init, _SI( .cleanup = ) aqm_codel_cleanup, }; DECLARE_DNAQM_MODULE(dn_aqm_codel, &codel_desc); #endif diff --git a/sys/netpfil/ipfw/dn_aqm_pie.c b/sys/netpfil/ipfw/dn_aqm_pie.c index 746b9f031220..5eadf08a4860 100644 --- a/sys/netpfil/ipfw/dn_aqm_pie.c +++ b/sys/netpfil/ipfw/dn_aqm_pie.c @@ -1,807 +1,813 @@ /* * PIE - Proportional Integral controller Enhanced AQM algorithm. * * $FreeBSD$ * * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include #include #include #include /* ip_len, ip_off */ #include /* ip_output(), IP_FORWARDING */ #include #include #include /* various ether_* routines */ #include /* for ip6_input, ip6_output prototypes */ #include #include #ifdef NEW_AQM #include #include #include #include #include /* for debugging */ #include static struct dn_aqm pie_desc; /* PIE defaults * target=15ms, tupdate=15ms, max_burst=150ms, * max_ecnth=0.1, alpha=0.125, beta=1.25, */ struct dn_aqm_pie_parms pie_sysctl = { 15 * AQM_TIME_1MS, 15 * AQM_TIME_1MS, 150 * AQM_TIME_1MS, PIE_SCALE/10 , PIE_SCALE * 0.125, PIE_SCALE * 1.25 , PIE_CAPDROP_ENABLED | PIE_DEPRATEEST_ENABLED | PIE_DERAND_ENABLED }; static int pie_sysctl_alpha_beta_handler(SYSCTL_HANDLER_ARGS) { int error; long value; if (!strcmp(oidp->oid_name,"alpha")) value = pie_sysctl.alpha; else value = pie_sysctl.beta; value = value * 1000 / PIE_SCALE; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 7 * PIE_SCALE) return (EINVAL); value = (value * PIE_SCALE) / 1000; if (!strcmp(oidp->oid_name,"alpha")) pie_sysctl.alpha = value; else pie_sysctl.beta = value; return (0); } static int pie_sysctl_target_tupdate_maxb_handler(SYSCTL_HANDLER_ARGS) { int error; long value; if (!strcmp(oidp->oid_name,"target")) value = pie_sysctl.qdelay_ref; else if (!strcmp(oidp->oid_name,"tupdate")) value = pie_sysctl.tupdate; else value = pie_sysctl.max_burst; value = value / AQM_TIME_1US; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 10 * AQM_TIME_1S) return (EINVAL); value = value * AQM_TIME_1US; if (!strcmp(oidp->oid_name,"target")) pie_sysctl.qdelay_ref = value; else if (!strcmp(oidp->oid_name,"tupdate")) pie_sysctl.tupdate = value; else pie_sysctl.max_burst = value; return (0); } static int pie_sysctl_max_ecnth_handler(SYSCTL_HANDLER_ARGS) { int error; long value; value = pie_sysctl.max_ecnth; value = value * 1000 / PIE_SCALE; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > PIE_SCALE) return (EINVAL); value = (value * PIE_SCALE) / 1000; pie_sysctl.max_ecnth = value; return (0); } /* define PIE sysctl variables */ SYSBEGIN(f4) SYSCTL_DECL(_net_inet); SYSCTL_DECL(_net_inet_ip); SYSCTL_DECL(_net_inet_ip_dummynet); static SYSCTL_NODE(_net_inet_ip_dummynet, OID_AUTO, pie, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "PIE"); #ifdef SYSCTL_NODE SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, target, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_target_tupdate_maxb_handler, "L", "queue target in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, tupdate, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_target_tupdate_maxb_handler, "L", "the frequency of drop probability calculation in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, max_burst, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_target_tupdate_maxb_handler, "L", "Burst allowance interval in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, max_ecnth, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_max_ecnth_handler, "L", "ECN safeguard threshold scaled by 1000"); SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, alpha, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_alpha_beta_handler, "L", "PIE alpha scaled by 1000"); SYSCTL_PROC(_net_inet_ip_dummynet_pie, OID_AUTO, beta, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, pie_sysctl_alpha_beta_handler, "L", "beta scaled by 1000"); #endif /* * Callout function for drop probability calculation * This function is called over tupdate ms and takes pointer of PIE * status variables as an argument */ static void calculate_drop_prob(void *x) { int64_t p, prob, oldprob; struct dn_aqm_pie_parms *pprms; struct pie_status *pst = (struct pie_status *) x; int p_isneg; pprms = pst->parms; prob = pst->drop_prob; /* calculate current qdelay using DRE method. * If TS is used and no data in the queue, reset current_qdelay * as it stays at last value during dequeue process. */ if (pprms->flags & PIE_DEPRATEEST_ENABLED) pst->current_qdelay = ((uint64_t)pst->pq->ni.len_bytes * pst->avg_dq_time) >> PIE_DQ_THRESHOLD_BITS; else if (!pst->pq->ni.len_bytes) pst->current_qdelay = 0; /* calculate drop probability */ p = (int64_t)pprms->alpha * ((int64_t)pst->current_qdelay - (int64_t)pprms->qdelay_ref); p +=(int64_t) pprms->beta * ((int64_t)pst->current_qdelay - (int64_t)pst->qdelay_old); /* take absolute value so right shift result is well defined */ p_isneg = p < 0; if (p_isneg) { p = -p; } /* We PIE_MAX_PROB shift by 12-bits to increase the division precision */ p *= (PIE_MAX_PROB << 12) / AQM_TIME_1S; /* auto-tune drop probability */ if (prob < (PIE_MAX_PROB / 1000000)) /* 0.000001 */ p >>= 11 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 100000)) /* 0.00001 */ p >>= 9 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 10000)) /* 0.0001 */ p >>= 7 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 1000)) /* 0.001 */ p >>= 5 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 100)) /* 0.01 */ p >>= 3 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 10)) /* 0.1 */ p >>= 1 + PIE_FIX_POINT_BITS + 12; else p >>= PIE_FIX_POINT_BITS + 12; oldprob = prob; if (p_isneg) { prob = prob - p; /* check for multiplication underflow */ if (prob > oldprob) { prob= 0; D("underflow"); } } else { /* Cap Drop adjustment */ if ((pprms->flags & PIE_CAPDROP_ENABLED) && prob >= PIE_MAX_PROB / 10 && p > PIE_MAX_PROB / 50 ) { p = PIE_MAX_PROB / 50; } prob = prob + p; /* check for multiplication overflow */ if (probcurrent_qdelay == 0 && pst->qdelay_old == 0) { /* 0.98 ~= 1- 1/64 */ prob = prob - (prob >> 6); } if (prob > PIE_MAX_PROB) { prob = PIE_MAX_PROB; } } pst->drop_prob = prob; /* store current queue delay value in old queue delay*/ pst->qdelay_old = pst->current_qdelay; /* update burst allowance */ if ((pst->sflags & PIE_ACTIVE) && pst->burst_allowance>0) { if (pst->burst_allowance > pprms->tupdate ) pst->burst_allowance -= pprms->tupdate; else pst->burst_allowance = 0; } /* reschedule calculate_drop_prob function */ if (pst->sflags & PIE_ACTIVE) callout_reset_sbt(&pst->aqm_pie_callout, (uint64_t)pprms->tupdate * SBT_1US, 0, calculate_drop_prob, pst, 0); mtx_unlock(&pst->lock_mtx); } /* * Extract a packet from the head of queue 'q' * Return a packet or NULL if the queue is empty. * If getts is set, also extract packet's timestamp from mtag. */ static struct mbuf * pie_extract_head(struct dn_queue *q, aqm_time_t *pkt_ts, int getts) { struct m_tag *mtag; - struct mbuf *m = q->mq.head; + struct mbuf *m; +next: m = q->mq.head; if (m == NULL) return m; q->mq.head = m->m_nextpkt; /* Update stats */ update_stats(q, -m->m_pkthdr.len, 0); if (q->ni.length == 0) /* queue is now idle */ q->q_time = V_dn_cfg.curr_time; if (getts) { /* extract packet TS*/ mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) { D("PIE timestamp mtag not found!"); *pkt_ts = 0; } else { *pkt_ts = *(aqm_time_t *)(mtag + 1); m_tag_delete(m,mtag); } } + if (m->m_pkthdr.rcvif != NULL && + __predict_false(m_rcvif_restore(m) == NULL)) { + m_freem(m); + goto next; + } return m; } /* * Initiate PIE variable and optionally activate it */ __inline static void init_activate_pie(struct pie_status *pst, int resettimer) { struct dn_aqm_pie_parms *pprms; mtx_lock(&pst->lock_mtx); pprms = pst->parms; pst->drop_prob = 0; pst->qdelay_old = 0; pst->burst_allowance = pprms->max_burst; pst->accu_prob = 0; pst->dq_count = 0; pst->avg_dq_time = 0; pst->sflags = PIE_INMEASUREMENT; pst->measurement_start = AQM_UNOW; if (resettimer) { pst->sflags |= PIE_ACTIVE; callout_reset_sbt(&pst->aqm_pie_callout, (uint64_t)pprms->tupdate * SBT_1US, 0, calculate_drop_prob, pst, 0); } //DX(2, "PIE Activated"); mtx_unlock(&pst->lock_mtx); } /* * Deactivate PIE and stop probe update callout */ __inline static void deactivate_pie(struct pie_status *pst) { mtx_lock(&pst->lock_mtx); pst->sflags &= ~(PIE_ACTIVE | PIE_INMEASUREMENT); callout_stop(&pst->aqm_pie_callout); //D("PIE Deactivated"); mtx_unlock(&pst->lock_mtx); } /* * Dequeue and return a pcaket from queue 'q' or NULL if 'q' is empty. * Also, caculate depature time or queue delay using timestamp */ static struct mbuf * aqm_pie_dequeue(struct dn_queue *q) { struct mbuf *m; struct dn_aqm_pie_parms *pprms; struct pie_status *pst; aqm_time_t now; aqm_time_t pkt_ts, dq_time; int32_t w; pst = q->aqm_status; pprms = pst->parms; /*we extarct packet ts only when Departure Rate Estimation dis not used*/ m = pie_extract_head(q, &pkt_ts, !(pprms->flags & PIE_DEPRATEEST_ENABLED)); if (!m || !(pst->sflags & PIE_ACTIVE)) return m; now = AQM_UNOW; if (pprms->flags & PIE_DEPRATEEST_ENABLED) { /* calculate average depature time */ if(pst->sflags & PIE_INMEASUREMENT) { pst->dq_count += m->m_pkthdr.len; if (pst->dq_count >= PIE_DQ_THRESHOLD) { dq_time = now - pst->measurement_start; /* * if we don't have old avg dq_time i.e PIE is (re)initialized, * don't use weight to calculate new avg_dq_time */ if(pst->avg_dq_time == 0) pst->avg_dq_time = dq_time; else { /* * weight = PIE_DQ_THRESHOLD/2^6, but we scaled * weight by 2^8. Thus, scaled * weight = PIE_DQ_THRESHOLD /2^8 * */ w = PIE_DQ_THRESHOLD >> 8; pst->avg_dq_time = (dq_time* w + (pst->avg_dq_time * ((1L << 8) - w))) >> 8; pst->sflags &= ~PIE_INMEASUREMENT; } } } /* * Start new measurment cycle when the queue has * PIE_DQ_THRESHOLD worth of bytes. */ if(!(pst->sflags & PIE_INMEASUREMENT) && q->ni.len_bytes >= PIE_DQ_THRESHOLD) { pst->sflags |= PIE_INMEASUREMENT; pst->measurement_start = now; pst->dq_count = 0; } } /* Optionally, use packet timestamp to estimate queue delay */ else pst->current_qdelay = now - pkt_ts; return m; } /* * Enqueue a packet in q, subject to space and PIE queue management policy * (whose parameters are in q->fs). * Update stats for the queue and the scheduler. * Return 0 on success, 1 on drop. The packet is consumed anyways. */ static int aqm_pie_enqueue(struct dn_queue *q, struct mbuf* m) { struct dn_fs *f; uint64_t len; uint32_t qlen; struct pie_status *pst; struct dn_aqm_pie_parms *pprms; int t; len = m->m_pkthdr.len; pst = q->aqm_status; if(!pst) { DX(2, "PIE queue is not initialized\n"); update_stats(q, 0, 1); FREE_PKT(m); return 1; } f = &(q->fs->fs); pprms = pst->parms; t = ENQUE; /* get current queue length in bytes or packets*/ qlen = (f->flags & DN_QSIZE_BYTES) ? q->ni.len_bytes : q->ni.length; /* check for queue size and drop the tail if exceed queue limit*/ if (qlen >= f->qsize) t = DROP; /* drop/mark the packet when PIE is active and burst time elapsed */ else if ((pst->sflags & PIE_ACTIVE) && pst->burst_allowance==0 && drop_early(pst, q->ni.len_bytes) == DROP) { /* * if drop_prob over ECN threshold, drop the packet * otherwise mark and enqueue it. */ if ((pprms->flags & PIE_ECN_ENABLED) && pst->drop_prob < (pprms->max_ecnth << (PIE_PROB_BITS - PIE_FIX_POINT_BITS)) && ecn_mark(m)) t = ENQUE; else t = DROP; } /* Turn PIE on when 1/3 of the queue is full */ if (!(pst->sflags & PIE_ACTIVE) && qlen >= pst->one_third_q_size) { init_activate_pie(pst, 1); } /* Reset burst tolerance and optinally turn PIE off*/ if ((pst->sflags & PIE_ACTIVE) && pst->drop_prob == 0 && pst->current_qdelay < (pprms->qdelay_ref >> 1) && pst->qdelay_old < (pprms->qdelay_ref >> 1)) { pst->burst_allowance = pprms->max_burst; if ((pprms->flags & PIE_ON_OFF_MODE_ENABLED) && qlen<=0) deactivate_pie(pst); } /* Timestamp the packet if Departure Rate Estimation is disabled */ if (t != DROP && !(pprms->flags & PIE_DEPRATEEST_ENABLED)) { /* Add TS to mbuf as a TAG */ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t), M_NOWAIT); if (mtag == NULL) { t = DROP; } else { *(aqm_time_t *)(mtag + 1) = AQM_UNOW; m_tag_prepend(m, mtag); } } if (t != DROP) { mq_append(&q->mq, m); update_stats(q, len, 0); return (0); } else { update_stats(q, 0, 1); /* reset accu_prob after packet drop */ pst->accu_prob = 0; FREE_PKT(m); return 1; } return 0; } /* * initialize PIE for queue 'q' * First allocate memory for PIE status. */ static int aqm_pie_init(struct dn_queue *q) { struct pie_status *pst; struct dn_aqm_pie_parms *pprms; int err = 0; pprms = q->fs->aqmcfg; do { /* exit with break when error occurs*/ if (!pprms){ DX(2, "AQM_PIE is not configured"); err = EINVAL; break; } q->aqm_status = malloc(sizeof(struct pie_status), M_DUMMYNET, M_NOWAIT | M_ZERO); if (q->aqm_status == NULL) { D("cannot allocate PIE private data"); err = ENOMEM ; break; } pst = q->aqm_status; /* increase reference count for PIE module */ pie_desc.ref_count++; pst->pq = q; pst->parms = pprms; /* For speed optimization, we caculate 1/3 queue size once here */ // we can use x/3 = (x >>2) + (x >>4) + (x >>7) pst->one_third_q_size = q->fs->fs.qsize/3; mtx_init(&pst->lock_mtx, "mtx_pie", NULL, MTX_DEF); callout_init_mtx(&pst->aqm_pie_callout, &pst->lock_mtx, CALLOUT_RETURNUNLOCKED); pst->current_qdelay = 0; init_activate_pie(pst, !(pprms->flags & PIE_ON_OFF_MODE_ENABLED)); //DX(2, "aqm_PIE_init"); } while(0); return err; } /* * Callout function to destroy pie mtx and free PIE status memory */ static void pie_callout_cleanup(void *x) { struct pie_status *pst = (struct pie_status *) x; mtx_unlock(&pst->lock_mtx); mtx_destroy(&pst->lock_mtx); free(x, M_DUMMYNET); DN_BH_WLOCK(); pie_desc.ref_count--; DN_BH_WUNLOCK(); } /* * Clean up PIE status for queue 'q' * Destroy memory allocated for PIE status. */ static int aqm_pie_cleanup(struct dn_queue *q) { if(!q) { D("q is null"); return 0; } struct pie_status *pst = q->aqm_status; if(!pst) { //D("queue is already cleaned up"); return 0; } if(!q->fs || !q->fs->aqmcfg) { D("fs is null or no cfg"); return 1; } if (q->fs->aqmfp && q->fs->aqmfp->type !=DN_AQM_PIE) { D("Not PIE fs (%d)", q->fs->fs.fs_nr); return 1; } /* * Free PIE status allocated memory using pie_callout_cleanup() callout * function to avoid any potential race. * We reset aqm_pie_callout to call pie_callout_cleanup() in next 1um. This * stops the scheduled calculate_drop_prob() callout and call pie_callout_cleanup() * which does memory freeing. */ mtx_lock(&pst->lock_mtx); callout_reset_sbt(&pst->aqm_pie_callout, SBT_1US, 0, pie_callout_cleanup, pst, 0); q->aqm_status = NULL; mtx_unlock(&pst->lock_mtx); return 0; } /* * Config PIE parameters * also allocate memory for PIE configurations */ static int aqm_pie_config(struct dn_fsk* fs, struct dn_extra_parms *ep, int len) { struct dn_aqm_pie_parms *pcfg; int l = sizeof(struct dn_extra_parms); if (len < l) { D("invalid sched parms length got %d need %d", len, l); return EINVAL; } /* we free the old cfg because maybe the orignal allocation * was used for diffirent AQM type. */ if (fs->aqmcfg) { free(fs->aqmcfg, M_DUMMYNET); fs->aqmcfg = NULL; } fs->aqmcfg = malloc(sizeof(struct dn_aqm_pie_parms), M_DUMMYNET, M_NOWAIT | M_ZERO); if (fs->aqmcfg== NULL) { D("cannot allocate PIE configuration parameters"); return ENOMEM; } /* par array contains pie configuration as follow * 0- qdelay_ref,1- tupdate, 2- max_burst * 3- max_ecnth, 4- alpha, 5- beta, 6- flags */ /* configure PIE parameters */ pcfg = fs->aqmcfg; if (ep->par[0] < 0) pcfg->qdelay_ref = pie_sysctl.qdelay_ref * AQM_TIME_1US; else pcfg->qdelay_ref = ep->par[0]; if (ep->par[1] < 0) pcfg->tupdate = pie_sysctl.tupdate * AQM_TIME_1US; else pcfg->tupdate = ep->par[1]; if (ep->par[2] < 0) pcfg->max_burst = pie_sysctl.max_burst * AQM_TIME_1US; else pcfg->max_burst = ep->par[2]; if (ep->par[3] < 0) pcfg->max_ecnth = pie_sysctl.max_ecnth; else pcfg->max_ecnth = ep->par[3]; if (ep->par[4] < 0) pcfg->alpha = pie_sysctl.alpha; else pcfg->alpha = ep->par[4]; if (ep->par[5] < 0) pcfg->beta = pie_sysctl.beta; else pcfg->beta = ep->par[5]; if (ep->par[6] < 0) pcfg->flags = pie_sysctl.flags; else pcfg->flags = ep->par[6]; /* bound PIE configurations */ pcfg->qdelay_ref = BOUND_VAR(pcfg->qdelay_ref, 1, 10 * AQM_TIME_1S); pcfg->tupdate = BOUND_VAR(pcfg->tupdate, 1, 10 * AQM_TIME_1S); pcfg->max_burst = BOUND_VAR(pcfg->max_burst, 0, 10 * AQM_TIME_1S); pcfg->max_ecnth = BOUND_VAR(pcfg->max_ecnth, 0, PIE_SCALE); pcfg->alpha = BOUND_VAR(pcfg->alpha, 0, 7 * PIE_SCALE); pcfg->beta = BOUND_VAR(pcfg->beta, 0 , 7 * PIE_SCALE); pie_desc.cfg_ref_count++; //D("pie cfg_ref_count=%d", pie_desc.cfg_ref_count); return 0; } /* * Deconfigure PIE and free memory allocation */ static int aqm_pie_deconfig(struct dn_fsk* fs) { if (fs && fs->aqmcfg) { free(fs->aqmcfg, M_DUMMYNET); fs->aqmcfg = NULL; pie_desc.cfg_ref_count--; } return 0; } /* * Retrieve PIE configuration parameters. */ static int aqm_pie_getconfig (struct dn_fsk *fs, struct dn_extra_parms * ep) { struct dn_aqm_pie_parms *pcfg; if (fs->aqmcfg) { strlcpy(ep->name, pie_desc.name, sizeof(ep->name)); pcfg = fs->aqmcfg; ep->par[0] = pcfg->qdelay_ref / AQM_TIME_1US; ep->par[1] = pcfg->tupdate / AQM_TIME_1US; ep->par[2] = pcfg->max_burst / AQM_TIME_1US; ep->par[3] = pcfg->max_ecnth; ep->par[4] = pcfg->alpha; ep->par[5] = pcfg->beta; ep->par[6] = pcfg->flags; return 0; } return 1; } static struct dn_aqm pie_desc = { _SI( .type = ) DN_AQM_PIE, _SI( .name = ) "PIE", _SI( .ref_count = ) 0, _SI( .cfg_ref_count = ) 0, _SI( .enqueue = ) aqm_pie_enqueue, _SI( .dequeue = ) aqm_pie_dequeue, _SI( .config = ) aqm_pie_config, _SI( .deconfig = ) aqm_pie_deconfig, _SI( .getconfig = ) aqm_pie_getconfig, _SI( .init = ) aqm_pie_init, _SI( .cleanup = ) aqm_pie_cleanup, }; DECLARE_DNAQM_MODULE(dn_aqm_pie, &pie_desc); #endif diff --git a/sys/netpfil/ipfw/dn_sched.h b/sys/netpfil/ipfw/dn_sched.h index 5c506c1d30ac..ef7242cd7355 100644 --- a/sys/netpfil/ipfw/dn_sched.h +++ b/sys/netpfil/ipfw/dn_sched.h @@ -1,205 +1,213 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Riccardo Panicucci, Luigi Rizzo, Universita` di Pisa * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * The API to write a packet scheduling algorithm for dummynet. * * $FreeBSD$ */ #ifndef _DN_SCHED_H #define _DN_SCHED_H #include #define DN_MULTIQUEUE 0x01 /* * Descriptor for a scheduling algorithm. * Contains all function pointers for a given scheduler * This is typically created when a module is loaded, and stored * in a global list of schedulers. */ struct dn_alg { uint32_t type; /* the scheduler type */ const char *name; /* scheduler name */ uint32_t flags; /* DN_MULTIQUEUE if supports multiple queues */ /* * The following define the size of 3 optional data structures * that may need to be allocated at runtime, and are appended * to each of the base data structures: scheduler, sched.inst, * and queue. We don't have a per-flowset structure. */ /* + parameters attached to the template, e.g. * default queue sizes, weights, quantum size, and so on; */ size_t schk_datalen; /* + per-instance parameters, such as timestamps, * containers for queues, etc; */ size_t si_datalen; size_t q_datalen; /* per-queue parameters (e.g. S,F) */ /* * Methods implemented by the scheduler: * enqueue enqueue packet 'm' on scheduler 's', queue 'q'. * q is NULL for !MULTIQUEUE. * Return 0 on success, 1 on drop (packet consumed anyways). * Note that q should be interpreted only as a hint * on the flow that the mbuf belongs to: while a * scheduler will normally enqueue m into q, it is ok * to leave q alone and put the mbuf elsewhere. * This function is called in two cases: * - when a new packet arrives to the scheduler; * - when a scheduler is reconfigured. In this case the * call is issued by the new_queue callback, with a * non empty queue (q) and m pointing to the first * mbuf in the queue. For this reason, the function * should internally check for (m != q->mq.head) * before calling dn_enqueue(). * * dequeue Called when scheduler instance 's' can * dequeue a packet. Return NULL if none are available. * XXX what about non work-conserving ? * * config called on 'sched X config ...', normally writes * in the area of size sch_arg * * destroy called on 'sched delete', frees everything * in sch_arg (other parts are handled by more specific * functions) * * new_sched called when a new instance is created, e.g. * to create the local queue for !MULTIQUEUE, set V or * copy parameters for WFQ, and so on. * * free_sched called when deleting an instance, cleans * extra data in the per-instance area. * * new_fsk called when a flowset is linked to a scheduler, * e.g. to validate parameters such as weights etc. * free_fsk when a flowset is unlinked from a scheduler. * (probably unnecessary) * * new_queue called to set the per-queue parameters, * e.g. S and F, adjust sum of weights in the parent, etc. * * The new_queue callback is normally called from when * creating a new queue. In some cases (such as a * scheduler change or reconfiguration) it can be called * with a non empty queue. In this case, the queue * In case of non empty queue, the new_queue callback could * need to call the enqueue function. In this case, * the callback should eventually call enqueue() passing * as m the first element in the queue. * * free_queue actions related to a queue removal, e.g. undo * all the above. If the queue has data in it, also remove * from the scheduler. This can e.g. happen during a reconfigure. */ int (*enqueue)(struct dn_sch_inst *, struct dn_queue *, struct mbuf *); struct mbuf * (*dequeue)(struct dn_sch_inst *); int (*config)(struct dn_schk *); int (*destroy)(struct dn_schk*); int (*new_sched)(struct dn_sch_inst *); int (*free_sched)(struct dn_sch_inst *); int (*new_fsk)(struct dn_fsk *f); int (*free_fsk)(struct dn_fsk *f); int (*new_queue)(struct dn_queue *q); int (*free_queue)(struct dn_queue *q); #ifdef NEW_AQM /* Getting scheduler extra parameters */ int (*getconfig)(struct dn_schk *, struct dn_extra_parms *); #endif /* run-time fields */ int ref_count; /* XXX number of instances in the system */ CK_LIST_ENTRY(dn_alg) next; /* Next scheduler in the list */ }; /* MSVC does not support initializers so we need this ugly macro */ #ifdef _WIN32 #define _SI(fld) #else #define _SI(fld) fld #endif /* * Additionally, dummynet exports some functions and macros * to be used by schedulers: */ void dn_free_pkts(struct mbuf *mnext); int dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop); /* bound a variable between min and max */ int ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg); /* * Extract the head of a queue, update stats. Must be the very last * thing done on a dequeue as the queue itself may go away. */ static __inline struct mbuf* dn_dequeue(struct dn_queue *q) { - struct mbuf *m = q->mq.head; + struct mbuf *m; + +next: + m = q->mq.head; if (m == NULL) return NULL; #ifdef NEW_AQM /* Call AQM dequeue function */ if (q->fs->aqmfp && q->fs->aqmfp->dequeue ) return q->fs->aqmfp->dequeue(q); #endif q->mq.head = m->m_nextpkt; q->mq.count--; /* Update stats for the queue */ q->ni.length--; q->ni.len_bytes -= m->m_pkthdr.len; if (q->_si) { q->_si->ni.length--; q->_si->ni.len_bytes -= m->m_pkthdr.len; } if (q->ni.length == 0) /* queue is now idle */ q->q_time = V_dn_cfg.curr_time; + if (m->m_pkthdr.rcvif != NULL && + __predict_false(m_rcvif_restore(m) == NULL)) { + m_freem(m); + goto next; + } return m; } int dn_sched_modevent(module_t mod, int cmd, void *arg); #define DECLARE_DNSCHED_MODULE(name, dnsched) \ static moduledata_t name##_mod = { \ #name, dn_sched_modevent, dnsched \ }; \ DECLARE_MODULE(name, name##_mod, \ SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY); \ MODULE_DEPEND(name, dummynet, 3, 3, 3) #endif /* _DN_SCHED_H */ diff --git a/sys/netpfil/ipfw/dn_sched_fq_codel.h b/sys/netpfil/ipfw/dn_sched_fq_codel.h index 2f82a63ca093..b84dfe7579fb 100644 --- a/sys/netpfil/ipfw/dn_sched_fq_codel.h +++ b/sys/netpfil/ipfw/dn_sched_fq_codel.h @@ -1,166 +1,171 @@ /*- * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * FQ_Codel Structures and helper functions * * $FreeBSD$ */ #ifndef _IP_DN_SCHED_FQ_CODEL_H #define _IP_DN_SCHED_FQ_CODEL_H /* list of queues */ STAILQ_HEAD(fq_codel_list, fq_codel_flow); /* fq_codel parameters including codel */ struct dn_sch_fq_codel_parms { struct dn_aqm_codel_parms ccfg; /* CoDel Parameters */ /* FQ_CODEL Parameters */ uint32_t flows_cnt; /* number of flows */ uint32_t limit; /* hard limit of fq_codel queue size*/ uint32_t quantum; }; /* defaults */ /* flow (sub-queue) stats */ struct flow_stats { uint64_t tot_pkts; /* statistics counters */ uint64_t tot_bytes; uint32_t length; /* Queue length, in packets */ uint32_t len_bytes; /* Queue length, in bytes */ uint32_t drops; }; /* A flow of packets (sub-queue).*/ struct fq_codel_flow { struct mq mq; /* list of packets */ struct flow_stats stats; /* statistics */ int deficit; int active; /* 1: flow is active (in a list) */ struct codel_status cst; STAILQ_ENTRY(fq_codel_flow) flowchain; }; /* extra fq_codel scheduler configurations */ struct fq_codel_schk { struct dn_sch_fq_codel_parms cfg; }; /* fq_codel scheduler instance */ struct fq_codel_si { struct dn_sch_inst _si; /* standard scheduler instance */ struct dn_queue main_q; /* main queue is after si directly */ struct fq_codel_flow *flows; /* array of flows (queues) */ uint32_t perturbation; /* random value */ struct fq_codel_list newflows; /* list of new queues */ struct fq_codel_list oldflows; /* list of old queues */ }; /* Helper function to update queue&main-queue and scheduler statistics. * negative len + drop -> drop * negative len -> dequeue * positive len -> enqueue * positive len + drop -> drop during enqueue */ __inline static void fq_update_stats(struct fq_codel_flow *q, struct fq_codel_si *si, int len, int drop) { int inc = 0; if (len < 0) inc = -1; else if (len > 0) inc = 1; if (drop) { si->main_q.ni.drops ++; q->stats.drops ++; si->_si.ni.drops ++; V_dn_cfg.io_pkt_drop ++; } if (!drop || (drop && len < 0)) { /* Update stats for the main queue */ si->main_q.ni.length += inc; si->main_q.ni.len_bytes += len; /*update sub-queue stats */ q->stats.length += inc; q->stats.len_bytes += len; /*update scheduler instance stats */ si->_si.ni.length += inc; si->_si.ni.len_bytes += len; } if (inc > 0) { si->main_q.ni.tot_bytes += len; si->main_q.ni.tot_pkts ++; q->stats.tot_bytes +=len; q->stats.tot_pkts++; si->_si.ni.tot_bytes +=len; si->_si.ni.tot_pkts ++; } } /* extract the head of fq_codel sub-queue */ __inline static struct mbuf * fq_codel_extract_head(struct fq_codel_flow *q, aqm_time_t *pkt_ts, struct fq_codel_si *si) { - struct mbuf *m = q->mq.head; + struct mbuf *m; +next: m = q->mq.head; if (m == NULL) return m; q->mq.head = m->m_nextpkt; fq_update_stats(q, si, -m->m_pkthdr.len, 0); if (si->main_q.ni.length == 0) /* queue is now idle */ si->main_q.q_time = V_dn_cfg.curr_time; /* extract packet timestamp*/ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL){ D("timestamp tag is not found!"); *pkt_ts = 0; } else { *pkt_ts = *(aqm_time_t *)(mtag + 1); m_tag_delete(m,mtag); } - + if (m->m_pkthdr.rcvif != NULL && + __predict_false(m_rcvif_restore(m) == NULL)) { + m_freem(m); + goto next; + } return m; } #endif diff --git a/sys/netpfil/ipfw/dn_sched_fq_pie.c b/sys/netpfil/ipfw/dn_sched_fq_pie.c index c3de665687a3..f589614be436 100644 --- a/sys/netpfil/ipfw/dn_sched_fq_pie.c +++ b/sys/netpfil/ipfw/dn_sched_fq_pie.c @@ -1,1228 +1,1234 @@ /* * FQ_PIE - The FlowQueue-PIE scheduler/AQM * * $FreeBSD$ * * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Important note: * As there is no an office document for FQ-PIE specification, we used * FQ-CoDel algorithm with some modifications to implement FQ-PIE. * This FQ-PIE implementation is a beta version and have not been tested * extensively. Our FQ-PIE uses stand-alone PIE AQM per sub-queue. By * default, timestamp is used to calculate queue delay instead of departure * rate estimation method. Although departure rate estimation is available * as testing option, the results could be incorrect. Moreover, turning PIE on * and off option is available but it does not work properly in this version. */ #ifdef _KERNEL #include #include #include #include #include #include #include #include /* IFNAMSIZ */ #include #include /* ipfw_rule_ref */ #include /* flow_id */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #else #include #endif #define DN_SCHED_FQ_PIE 7 /* list of queues */ STAILQ_HEAD(fq_pie_list, fq_pie_flow); /* FQ_PIE parameters including PIE */ struct dn_sch_fq_pie_parms { struct dn_aqm_pie_parms pcfg; /* PIE configuration Parameters */ /* FQ_PIE Parameters */ uint32_t flows_cnt; /* number of flows */ uint32_t limit; /* hard limit of FQ_PIE queue size*/ uint32_t quantum; }; /* flow (sub-queue) stats */ struct flow_stats { uint64_t tot_pkts; /* statistics counters */ uint64_t tot_bytes; uint32_t length; /* Queue length, in packets */ uint32_t len_bytes; /* Queue length, in bytes */ uint32_t drops; }; /* A flow of packets (sub-queue)*/ struct fq_pie_flow { struct mq mq; /* list of packets */ struct flow_stats stats; /* statistics */ int deficit; int active; /* 1: flow is active (in a list) */ struct pie_status pst; /* pie status variables */ struct fq_pie_si_extra *psi_extra; STAILQ_ENTRY(fq_pie_flow) flowchain; }; /* extra fq_pie scheduler configurations */ struct fq_pie_schk { struct dn_sch_fq_pie_parms cfg; }; /* fq_pie scheduler instance extra state vars. * The purpose of separation this structure is to preserve number of active * sub-queues and the flows array pointer even after the scheduler instance * is destroyed. * Preserving these varaiables allows freeing the allocated memory by * fqpie_callout_cleanup() independently from fq_pie_free_sched(). */ struct fq_pie_si_extra { uint32_t nr_active_q; /* number of active queues */ struct fq_pie_flow *flows; /* array of flows (queues) */ }; /* fq_pie scheduler instance */ struct fq_pie_si { struct dn_sch_inst _si; /* standard scheduler instance. SHOULD BE FIRST */ struct dn_queue main_q; /* main queue is after si directly */ uint32_t perturbation; /* random value */ struct fq_pie_list newflows; /* list of new queues */ struct fq_pie_list oldflows; /* list of old queues */ struct fq_pie_si_extra *si_extra; /* extra state vars*/ }; static struct dn_alg fq_pie_desc; /* Default FQ-PIE parameters including PIE */ /* PIE defaults * target=15ms, max_burst=150ms, max_ecnth=0.1, * alpha=0.125, beta=1.25, tupdate=15ms * FQ- * flows=1024, limit=10240, quantum =1514 */ struct dn_sch_fq_pie_parms fq_pie_sysctl = {{15000 * AQM_TIME_1US, 15000 * AQM_TIME_1US, 150000 * AQM_TIME_1US, PIE_SCALE * 0.1, PIE_SCALE * 0.125, PIE_SCALE * 1.25, PIE_CAPDROP_ENABLED | PIE_DERAND_ENABLED}, 1024, 10240, 1514}; static int fqpie_sysctl_alpha_beta_handler(SYSCTL_HANDLER_ARGS) { int error; long value; if (!strcmp(oidp->oid_name,"alpha")) value = fq_pie_sysctl.pcfg.alpha; else value = fq_pie_sysctl.pcfg.beta; value = value * 1000 / PIE_SCALE; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 7 * PIE_SCALE) return (EINVAL); value = (value * PIE_SCALE) / 1000; if (!strcmp(oidp->oid_name,"alpha")) fq_pie_sysctl.pcfg.alpha = value; else fq_pie_sysctl.pcfg.beta = value; return (0); } static int fqpie_sysctl_target_tupdate_maxb_handler(SYSCTL_HANDLER_ARGS) { int error; long value; if (!strcmp(oidp->oid_name,"target")) value = fq_pie_sysctl.pcfg.qdelay_ref; else if (!strcmp(oidp->oid_name,"tupdate")) value = fq_pie_sysctl.pcfg.tupdate; else value = fq_pie_sysctl.pcfg.max_burst; value = value / AQM_TIME_1US; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > 10 * AQM_TIME_1S) return (EINVAL); value = value * AQM_TIME_1US; if (!strcmp(oidp->oid_name,"target")) fq_pie_sysctl.pcfg.qdelay_ref = value; else if (!strcmp(oidp->oid_name,"tupdate")) fq_pie_sysctl.pcfg.tupdate = value; else fq_pie_sysctl.pcfg.max_burst = value; return (0); } static int fqpie_sysctl_max_ecnth_handler(SYSCTL_HANDLER_ARGS) { int error; long value; value = fq_pie_sysctl.pcfg.max_ecnth; value = value * 1000 / PIE_SCALE; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 1 || value > PIE_SCALE) return (EINVAL); value = (value * PIE_SCALE) / 1000; fq_pie_sysctl.pcfg.max_ecnth = value; return (0); } /* define FQ- PIE sysctl variables */ SYSBEGIN(f4) SYSCTL_DECL(_net_inet); SYSCTL_DECL(_net_inet_ip); SYSCTL_DECL(_net_inet_ip_dummynet); static SYSCTL_NODE(_net_inet_ip_dummynet, OID_AUTO, fqpie, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "FQ_PIE"); #ifdef SYSCTL_NODE SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, target, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_target_tupdate_maxb_handler, "L", "queue target in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, tupdate, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_target_tupdate_maxb_handler, "L", "the frequency of drop probability calculation in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, max_burst, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_target_tupdate_maxb_handler, "L", "Burst allowance interval in microsecond"); SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, max_ecnth, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_max_ecnth_handler, "L", "ECN safeguard threshold scaled by 1000"); SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, alpha, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_alpha_beta_handler, "L", "PIE alpha scaled by 1000"); SYSCTL_PROC(_net_inet_ip_dummynet_fqpie, OID_AUTO, beta, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, fqpie_sysctl_alpha_beta_handler, "L", "beta scaled by 1000"); SYSCTL_UINT(_net_inet_ip_dummynet_fqpie, OID_AUTO, quantum, CTLFLAG_RW, &fq_pie_sysctl.quantum, 1514, "quantum for FQ_PIE"); SYSCTL_UINT(_net_inet_ip_dummynet_fqpie, OID_AUTO, flows, CTLFLAG_RW, &fq_pie_sysctl.flows_cnt, 1024, "Number of queues for FQ_PIE"); SYSCTL_UINT(_net_inet_ip_dummynet_fqpie, OID_AUTO, limit, CTLFLAG_RW, &fq_pie_sysctl.limit, 10240, "limit for FQ_PIE"); #endif /* Helper function to update queue&main-queue and scheduler statistics. * negative len & drop -> drop * negative len -> dequeue * positive len -> enqueue * positive len + drop -> drop during enqueue */ __inline static void fq_update_stats(struct fq_pie_flow *q, struct fq_pie_si *si, int len, int drop) { int inc = 0; if (len < 0) inc = -1; else if (len > 0) inc = 1; if (drop) { si->main_q.ni.drops ++; q->stats.drops ++; si->_si.ni.drops ++; V_dn_cfg.io_pkt_drop ++; } if (!drop || (drop && len < 0)) { /* Update stats for the main queue */ si->main_q.ni.length += inc; si->main_q.ni.len_bytes += len; /*update sub-queue stats */ q->stats.length += inc; q->stats.len_bytes += len; /*update scheduler instance stats */ si->_si.ni.length += inc; si->_si.ni.len_bytes += len; } if (inc > 0) { si->main_q.ni.tot_bytes += len; si->main_q.ni.tot_pkts ++; q->stats.tot_bytes +=len; q->stats.tot_pkts++; si->_si.ni.tot_bytes +=len; si->_si.ni.tot_pkts ++; } } /* * Extract a packet from the head of sub-queue 'q' * Return a packet or NULL if the queue is empty. * If getts is set, also extract packet's timestamp from mtag. */ __inline static struct mbuf * fq_pie_extract_head(struct fq_pie_flow *q, aqm_time_t *pkt_ts, struct fq_pie_si *si, int getts) { - struct mbuf *m = q->mq.head; + struct mbuf *m; +next: m = q->mq.head; if (m == NULL) return m; q->mq.head = m->m_nextpkt; fq_update_stats(q, si, -m->m_pkthdr.len, 0); if (si->main_q.ni.length == 0) /* queue is now idle */ si->main_q.q_time = V_dn_cfg.curr_time; if (getts) { /* extract packet timestamp*/ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL){ D("PIE timestamp mtag not found!"); *pkt_ts = 0; } else { *pkt_ts = *(aqm_time_t *)(mtag + 1); m_tag_delete(m,mtag); } } + if (m->m_pkthdr.rcvif != NULL && + __predict_false(m_rcvif_restore(m) == NULL)) { + m_freem(m); + goto next; + } return m; } /* * Callout function for drop probability calculation * This function is called over tupdate ms and takes pointer of FQ-PIE * flow as an argument */ static void fq_calculate_drop_prob(void *x) { struct fq_pie_flow *q = (struct fq_pie_flow *) x; struct pie_status *pst = &q->pst; struct dn_aqm_pie_parms *pprms; int64_t p, prob, oldprob; int p_isneg; pprms = pst->parms; prob = pst->drop_prob; /* calculate current qdelay using DRE method. * If TS is used and no data in the queue, reset current_qdelay * as it stays at last value during dequeue process. */ if (pprms->flags & PIE_DEPRATEEST_ENABLED) pst->current_qdelay = ((uint64_t)q->stats.len_bytes * pst->avg_dq_time) >> PIE_DQ_THRESHOLD_BITS; else if (!q->stats.len_bytes) pst->current_qdelay = 0; /* calculate drop probability */ p = (int64_t)pprms->alpha * ((int64_t)pst->current_qdelay - (int64_t)pprms->qdelay_ref); p +=(int64_t) pprms->beta * ((int64_t)pst->current_qdelay - (int64_t)pst->qdelay_old); /* take absolute value so right shift result is well defined */ p_isneg = p < 0; if (p_isneg) { p = -p; } /* We PIE_MAX_PROB shift by 12-bits to increase the division precision */ p *= (PIE_MAX_PROB << 12) / AQM_TIME_1S; /* auto-tune drop probability */ if (prob < (PIE_MAX_PROB / 1000000)) /* 0.000001 */ p >>= 11 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 100000)) /* 0.00001 */ p >>= 9 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 10000)) /* 0.0001 */ p >>= 7 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 1000)) /* 0.001 */ p >>= 5 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 100)) /* 0.01 */ p >>= 3 + PIE_FIX_POINT_BITS + 12; else if (prob < (PIE_MAX_PROB / 10)) /* 0.1 */ p >>= 1 + PIE_FIX_POINT_BITS + 12; else p >>= PIE_FIX_POINT_BITS + 12; oldprob = prob; if (p_isneg) { prob = prob - p; /* check for multiplication underflow */ if (prob > oldprob) { prob= 0; D("underflow"); } } else { /* Cap Drop adjustment */ if ((pprms->flags & PIE_CAPDROP_ENABLED) && prob >= PIE_MAX_PROB / 10 && p > PIE_MAX_PROB / 50 ) { p = PIE_MAX_PROB / 50; } prob = prob + p; /* check for multiplication overflow */ if (probcurrent_qdelay == 0 && pst->qdelay_old == 0) { /* 0.98 ~= 1- 1/64 */ prob = prob - (prob >> 6); } if (prob > PIE_MAX_PROB) { prob = PIE_MAX_PROB; } } pst->drop_prob = prob; /* store current delay value */ pst->qdelay_old = pst->current_qdelay; /* update burst allowance */ if ((pst->sflags & PIE_ACTIVE) && pst->burst_allowance) { if (pst->burst_allowance > pprms->tupdate) pst->burst_allowance -= pprms->tupdate; else pst->burst_allowance = 0; } if (pst->sflags & PIE_ACTIVE) callout_reset_sbt(&pst->aqm_pie_callout, (uint64_t)pprms->tupdate * SBT_1US, 0, fq_calculate_drop_prob, q, 0); mtx_unlock(&pst->lock_mtx); } /* * Reset PIE variables & activate the queue */ __inline static void fq_activate_pie(struct fq_pie_flow *q) { struct pie_status *pst = &q->pst; struct dn_aqm_pie_parms *pprms; mtx_lock(&pst->lock_mtx); pprms = pst->parms; pprms = pst->parms; pst->drop_prob = 0; pst->qdelay_old = 0; pst->burst_allowance = pprms->max_burst; pst->accu_prob = 0; pst->dq_count = 0; pst->avg_dq_time = 0; pst->sflags = PIE_INMEASUREMENT | PIE_ACTIVE; pst->measurement_start = AQM_UNOW; callout_reset_sbt(&pst->aqm_pie_callout, (uint64_t)pprms->tupdate * SBT_1US, 0, fq_calculate_drop_prob, q, 0); mtx_unlock(&pst->lock_mtx); } /* * Deactivate PIE and stop probe update callout */ __inline static void fq_deactivate_pie(struct pie_status *pst) { mtx_lock(&pst->lock_mtx); pst->sflags &= ~(PIE_ACTIVE | PIE_INMEASUREMENT); callout_stop(&pst->aqm_pie_callout); //D("PIE Deactivated"); mtx_unlock(&pst->lock_mtx); } /* * Initialize PIE for sub-queue 'q' */ static int pie_init(struct fq_pie_flow *q, struct fq_pie_schk *fqpie_schk) { struct pie_status *pst=&q->pst; struct dn_aqm_pie_parms *pprms = pst->parms; int err = 0; if (!pprms){ D("AQM_PIE is not configured"); err = EINVAL; } else { q->psi_extra->nr_active_q++; /* For speed optimization, we caculate 1/3 queue size once here */ // XXX limit divided by number of queues divided by 3 ??? pst->one_third_q_size = (fqpie_schk->cfg.limit / fqpie_schk->cfg.flows_cnt) / 3; mtx_init(&pst->lock_mtx, "mtx_pie", NULL, MTX_DEF); callout_init_mtx(&pst->aqm_pie_callout, &pst->lock_mtx, CALLOUT_RETURNUNLOCKED); } return err; } /* * callout function to destroy PIE lock, and free fq_pie flows and fq_pie si * extra memory when number of active sub-queues reaches zero. * 'x' is a fq_pie_flow to be destroyed */ static void fqpie_callout_cleanup(void *x) { struct fq_pie_flow *q = x; struct pie_status *pst = &q->pst; struct fq_pie_si_extra *psi_extra; mtx_unlock(&pst->lock_mtx); mtx_destroy(&pst->lock_mtx); psi_extra = q->psi_extra; DN_BH_WLOCK(); psi_extra->nr_active_q--; /* when all sub-queues are destroyed, free flows fq_pie extra vars memory */ if (!psi_extra->nr_active_q) { free(psi_extra->flows, M_DUMMYNET); free(psi_extra, M_DUMMYNET); fq_pie_desc.ref_count--; } DN_BH_WUNLOCK(); } /* * Clean up PIE status for sub-queue 'q' * Stop callout timer and destroy mtx using fqpie_callout_cleanup() callout. */ static int pie_cleanup(struct fq_pie_flow *q) { struct pie_status *pst = &q->pst; mtx_lock(&pst->lock_mtx); callout_reset_sbt(&pst->aqm_pie_callout, SBT_1US, 0, fqpie_callout_cleanup, q, 0); mtx_unlock(&pst->lock_mtx); return 0; } /* * Dequeue and return a pcaket from sub-queue 'q' or NULL if 'q' is empty. * Also, caculate depature time or queue delay using timestamp */ static struct mbuf * pie_dequeue(struct fq_pie_flow *q, struct fq_pie_si *si) { struct mbuf *m; struct dn_aqm_pie_parms *pprms; struct pie_status *pst; aqm_time_t now; aqm_time_t pkt_ts, dq_time; int32_t w; pst = &q->pst; pprms = q->pst.parms; /*we extarct packet ts only when Departure Rate Estimation dis not used*/ m = fq_pie_extract_head(q, &pkt_ts, si, !(pprms->flags & PIE_DEPRATEEST_ENABLED)); if (!m || !(pst->sflags & PIE_ACTIVE)) return m; now = AQM_UNOW; if (pprms->flags & PIE_DEPRATEEST_ENABLED) { /* calculate average depature time */ if(pst->sflags & PIE_INMEASUREMENT) { pst->dq_count += m->m_pkthdr.len; if (pst->dq_count >= PIE_DQ_THRESHOLD) { dq_time = now - pst->measurement_start; /* * if we don't have old avg dq_time i.e PIE is (re)initialized, * don't use weight to calculate new avg_dq_time */ if(pst->avg_dq_time == 0) pst->avg_dq_time = dq_time; else { /* * weight = PIE_DQ_THRESHOLD/2^6, but we scaled * weight by 2^8. Thus, scaled * weight = PIE_DQ_THRESHOLD /2^8 * */ w = PIE_DQ_THRESHOLD >> 8; pst->avg_dq_time = (dq_time* w + (pst->avg_dq_time * ((1L << 8) - w))) >> 8; pst->sflags &= ~PIE_INMEASUREMENT; } } } /* * Start new measurment cycle when the queue has * PIE_DQ_THRESHOLD worth of bytes. */ if(!(pst->sflags & PIE_INMEASUREMENT) && q->stats.len_bytes >= PIE_DQ_THRESHOLD) { pst->sflags |= PIE_INMEASUREMENT; pst->measurement_start = now; pst->dq_count = 0; } } /* Optionally, use packet timestamp to estimate queue delay */ else pst->current_qdelay = now - pkt_ts; return m; } /* * Enqueue a packet in q, subject to space and FQ-PIE queue management policy * (whose parameters are in q->fs). * Update stats for the queue and the scheduler. * Return 0 on success, 1 on drop. The packet is consumed anyways. */ static int pie_enqueue(struct fq_pie_flow *q, struct mbuf* m, struct fq_pie_si *si) { uint64_t len; struct pie_status *pst; struct dn_aqm_pie_parms *pprms; int t; len = m->m_pkthdr.len; pst = &q->pst; pprms = pst->parms; t = ENQUE; /* drop/mark the packet when PIE is active and burst time elapsed */ if (pst->sflags & PIE_ACTIVE && pst->burst_allowance == 0 && drop_early(pst, q->stats.len_bytes) == DROP) { /* * if drop_prob over ECN threshold, drop the packet * otherwise mark and enqueue it. */ if (pprms->flags & PIE_ECN_ENABLED && pst->drop_prob < (pprms->max_ecnth << (PIE_PROB_BITS - PIE_FIX_POINT_BITS)) && ecn_mark(m)) t = ENQUE; else t = DROP; } /* Turn PIE on when 1/3 of the queue is full */ if (!(pst->sflags & PIE_ACTIVE) && q->stats.len_bytes >= pst->one_third_q_size) { fq_activate_pie(q); } /* reset burst tolerance and optinally turn PIE off*/ if (pst->drop_prob == 0 && pst->current_qdelay < (pprms->qdelay_ref >> 1) && pst->qdelay_old < (pprms->qdelay_ref >> 1)) { pst->burst_allowance = pprms->max_burst; if (pprms->flags & PIE_ON_OFF_MODE_ENABLED && q->stats.len_bytes<=0) fq_deactivate_pie(pst); } /* Use timestamp if Departure Rate Estimation mode is disabled */ if (t != DROP && !(pprms->flags & PIE_DEPRATEEST_ENABLED)) { /* Add TS to mbuf as a TAG */ struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t), M_NOWAIT); if (mtag == NULL) { t = DROP; } else { *(aqm_time_t *)(mtag + 1) = AQM_UNOW; m_tag_prepend(m, mtag); } } if (t != DROP) { mq_append(&q->mq, m); fq_update_stats(q, si, len, 0); return 0; } else { fq_update_stats(q, si, len, 1); pst->accu_prob = 0; FREE_PKT(m); return 1; } return 0; } /* Drop a packet form the head of FQ-PIE sub-queue */ static void pie_drop_head(struct fq_pie_flow *q, struct fq_pie_si *si) { struct mbuf *m = q->mq.head; if (m == NULL) return; q->mq.head = m->m_nextpkt; fq_update_stats(q, si, -m->m_pkthdr.len, 1); if (si->main_q.ni.length == 0) /* queue is now idle */ si->main_q.q_time = V_dn_cfg.curr_time; /* reset accu_prob after packet drop */ q->pst.accu_prob = 0; FREE_PKT(m); } /* * Classify a packet to queue number using Jenkins hash function. * Return: queue number * the input of the hash are protocol no, perturbation, src IP, dst IP, * src port, dst port, */ static inline int fq_pie_classify_flow(struct mbuf *m, uint16_t fcount, struct fq_pie_si *si) { struct ip *ip; struct tcphdr *th; struct udphdr *uh; uint8_t tuple[41]; uint16_t hash=0; ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); //#ifdef INET6 struct ip6_hdr *ip6; int isip6; isip6 = (ip->ip_v == 6); if(isip6) { ip6 = (struct ip6_hdr *)ip; *((uint8_t *) &tuple[0]) = ip6->ip6_nxt; *((uint32_t *) &tuple[1]) = si->perturbation; memcpy(&tuple[5], ip6->ip6_src.s6_addr, 16); memcpy(&tuple[21], ip6->ip6_dst.s6_addr, 16); switch (ip6->ip6_nxt) { case IPPROTO_TCP: th = (struct tcphdr *)(ip6 + 1); *((uint16_t *) &tuple[37]) = th->th_dport; *((uint16_t *) &tuple[39]) = th->th_sport; break; case IPPROTO_UDP: uh = (struct udphdr *)(ip6 + 1); *((uint16_t *) &tuple[37]) = uh->uh_dport; *((uint16_t *) &tuple[39]) = uh->uh_sport; break; default: memset(&tuple[37], 0, 4); } hash = jenkins_hash(tuple, 41, HASHINIT) % fcount; return hash; } //#endif /* IPv4 */ *((uint8_t *) &tuple[0]) = ip->ip_p; *((uint32_t *) &tuple[1]) = si->perturbation; *((uint32_t *) &tuple[5]) = ip->ip_src.s_addr; *((uint32_t *) &tuple[9]) = ip->ip_dst.s_addr; switch (ip->ip_p) { case IPPROTO_TCP: th = (struct tcphdr *)(ip + 1); *((uint16_t *) &tuple[13]) = th->th_dport; *((uint16_t *) &tuple[15]) = th->th_sport; break; case IPPROTO_UDP: uh = (struct udphdr *)(ip + 1); *((uint16_t *) &tuple[13]) = uh->uh_dport; *((uint16_t *) &tuple[15]) = uh->uh_sport; break; default: memset(&tuple[13], 0, 4); } hash = jenkins_hash(tuple, 17, HASHINIT) % fcount; return hash; } /* * Enqueue a packet into an appropriate queue according to * FQ-CoDe; algorithm. */ static int fq_pie_enqueue(struct dn_sch_inst *_si, struct dn_queue *_q, struct mbuf *m) { struct fq_pie_si *si; struct fq_pie_schk *schk; struct dn_sch_fq_pie_parms *param; struct dn_queue *mainq; struct fq_pie_flow *flows; int idx, drop, i, maxidx; mainq = (struct dn_queue *)(_si + 1); si = (struct fq_pie_si *)_si; flows = si->si_extra->flows; schk = (struct fq_pie_schk *)(si->_si.sched+1); param = &schk->cfg; /* classify a packet to queue number*/ idx = fq_pie_classify_flow(m, param->flows_cnt, si); /* enqueue packet into appropriate queue using PIE AQM. * Note: 'pie_enqueue' function returns 1 only when it unable to * add timestamp to packet (no limit check)*/ drop = pie_enqueue(&flows[idx], m, si); /* pie unable to timestamp a packet */ if (drop) return 1; /* If the flow (sub-queue) is not active ,then add it to tail of * new flows list, initialize and activate it. */ if (!flows[idx].active) { STAILQ_INSERT_TAIL(&si->newflows, &flows[idx], flowchain); flows[idx].deficit = param->quantum; fq_activate_pie(&flows[idx]); flows[idx].active = 1; } /* check the limit for all queues and remove a packet from the * largest one */ if (mainq->ni.length > schk->cfg.limit) { /* find first active flow */ for (maxidx = 0; maxidx < schk->cfg.flows_cnt; maxidx++) if (flows[maxidx].active) break; if (maxidx < schk->cfg.flows_cnt) { /* find the largest sub- queue */ for (i = maxidx + 1; i < schk->cfg.flows_cnt; i++) if (flows[i].active && flows[i].stats.length > flows[maxidx].stats.length) maxidx = i; pie_drop_head(&flows[maxidx], si); drop = 1; } } return drop; } /* * Dequeue a packet from an appropriate queue according to * FQ-CoDel algorithm. */ static struct mbuf * fq_pie_dequeue(struct dn_sch_inst *_si) { struct fq_pie_si *si; struct fq_pie_schk *schk; struct dn_sch_fq_pie_parms *param; struct fq_pie_flow *f; struct mbuf *mbuf; struct fq_pie_list *fq_pie_flowlist; si = (struct fq_pie_si *)_si; schk = (struct fq_pie_schk *)(si->_si.sched+1); param = &schk->cfg; do { /* select a list to start with */ if (STAILQ_EMPTY(&si->newflows)) fq_pie_flowlist = &si->oldflows; else fq_pie_flowlist = &si->newflows; /* Both new and old queue lists are empty, return NULL */ if (STAILQ_EMPTY(fq_pie_flowlist)) return NULL; f = STAILQ_FIRST(fq_pie_flowlist); while (f != NULL) { /* if there is no flow(sub-queue) deficit, increase deficit * by quantum, move the flow to the tail of old flows list * and try another flow. * Otherwise, the flow will be used for dequeue. */ if (f->deficit < 0) { f->deficit += param->quantum; STAILQ_REMOVE_HEAD(fq_pie_flowlist, flowchain); STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain); } else break; f = STAILQ_FIRST(fq_pie_flowlist); } /* the new flows list is empty, try old flows list */ if (STAILQ_EMPTY(fq_pie_flowlist)) continue; /* Dequeue a packet from the selected flow */ mbuf = pie_dequeue(f, si); /* pie did not return a packet */ if (!mbuf) { /* If the selected flow belongs to new flows list, then move * it to the tail of old flows list. Otherwise, deactivate it and * remove it from the old list and */ if (fq_pie_flowlist == &si->newflows) { STAILQ_REMOVE_HEAD(fq_pie_flowlist, flowchain); STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain); } else { f->active = 0; fq_deactivate_pie(&f->pst); STAILQ_REMOVE_HEAD(fq_pie_flowlist, flowchain); } /* start again */ continue; } /* we have a packet to return, * update flow deficit and return the packet*/ f->deficit -= mbuf->m_pkthdr.len; return mbuf; } while (1); /* unreachable point */ return NULL; } /* * Initialize fq_pie scheduler instance. * also, allocate memory for flows array. */ static int fq_pie_new_sched(struct dn_sch_inst *_si) { struct fq_pie_si *si; struct dn_queue *q; struct fq_pie_schk *schk; struct fq_pie_flow *flows; int i; si = (struct fq_pie_si *)_si; schk = (struct fq_pie_schk *)(_si->sched+1); if(si->si_extra) { D("si already configured!"); return 0; } /* init the main queue */ q = &si->main_q; set_oid(&q->ni.oid, DN_QUEUE, sizeof(*q)); q->_si = _si; q->fs = _si->sched->fs; /* allocate memory for scheduler instance extra vars */ si->si_extra = malloc(sizeof(struct fq_pie_si_extra), M_DUMMYNET, M_NOWAIT | M_ZERO); if (si->si_extra == NULL) { D("cannot allocate memory for fq_pie si extra vars"); return ENOMEM ; } /* allocate memory for flows array */ si->si_extra->flows = mallocarray(schk->cfg.flows_cnt, sizeof(struct fq_pie_flow), M_DUMMYNET, M_NOWAIT | M_ZERO); flows = si->si_extra->flows; if (flows == NULL) { free(si->si_extra, M_DUMMYNET); si->si_extra = NULL; D("cannot allocate memory for fq_pie flows"); return ENOMEM ; } /* init perturbation for this si */ si->perturbation = random(); si->si_extra->nr_active_q = 0; /* init the old and new flows lists */ STAILQ_INIT(&si->newflows); STAILQ_INIT(&si->oldflows); /* init the flows (sub-queues) */ for (i = 0; i < schk->cfg.flows_cnt; i++) { flows[i].pst.parms = &schk->cfg.pcfg; flows[i].psi_extra = si->si_extra; pie_init(&flows[i], schk); } fq_pie_desc.ref_count++; return 0; } /* * Free fq_pie scheduler instance. */ static int fq_pie_free_sched(struct dn_sch_inst *_si) { struct fq_pie_si *si; struct fq_pie_schk *schk; struct fq_pie_flow *flows; int i; si = (struct fq_pie_si *)_si; schk = (struct fq_pie_schk *)(_si->sched+1); flows = si->si_extra->flows; for (i = 0; i < schk->cfg.flows_cnt; i++) { pie_cleanup(&flows[i]); } si->si_extra = NULL; return 0; } /* * Configure FQ-PIE scheduler. * the configurations for the scheduler is passed fromipfw userland. */ static int fq_pie_config(struct dn_schk *_schk) { struct fq_pie_schk *schk; struct dn_extra_parms *ep; struct dn_sch_fq_pie_parms *fqp_cfg; schk = (struct fq_pie_schk *)(_schk+1); ep = (struct dn_extra_parms *) _schk->cfg; /* par array contains fq_pie configuration as follow * PIE: 0- qdelay_ref,1- tupdate, 2- max_burst * 3- max_ecnth, 4- alpha, 5- beta, 6- flags * FQ_PIE: 7- quantum, 8- limit, 9- flows */ if (ep && ep->oid.len ==sizeof(*ep) && ep->oid.subtype == DN_SCH_PARAMS) { fqp_cfg = &schk->cfg; if (ep->par[0] < 0) fqp_cfg->pcfg.qdelay_ref = fq_pie_sysctl.pcfg.qdelay_ref; else fqp_cfg->pcfg.qdelay_ref = ep->par[0]; if (ep->par[1] < 0) fqp_cfg->pcfg.tupdate = fq_pie_sysctl.pcfg.tupdate; else fqp_cfg->pcfg.tupdate = ep->par[1]; if (ep->par[2] < 0) fqp_cfg->pcfg.max_burst = fq_pie_sysctl.pcfg.max_burst; else fqp_cfg->pcfg.max_burst = ep->par[2]; if (ep->par[3] < 0) fqp_cfg->pcfg.max_ecnth = fq_pie_sysctl.pcfg.max_ecnth; else fqp_cfg->pcfg.max_ecnth = ep->par[3]; if (ep->par[4] < 0) fqp_cfg->pcfg.alpha = fq_pie_sysctl.pcfg.alpha; else fqp_cfg->pcfg.alpha = ep->par[4]; if (ep->par[5] < 0) fqp_cfg->pcfg.beta = fq_pie_sysctl.pcfg.beta; else fqp_cfg->pcfg.beta = ep->par[5]; if (ep->par[6] < 0) fqp_cfg->pcfg.flags = 0; else fqp_cfg->pcfg.flags = ep->par[6]; /* FQ configurations */ if (ep->par[7] < 0) fqp_cfg->quantum = fq_pie_sysctl.quantum; else fqp_cfg->quantum = ep->par[7]; if (ep->par[8] < 0) fqp_cfg->limit = fq_pie_sysctl.limit; else fqp_cfg->limit = ep->par[8]; if (ep->par[9] < 0) fqp_cfg->flows_cnt = fq_pie_sysctl.flows_cnt; else fqp_cfg->flows_cnt = ep->par[9]; /* Bound the configurations */ fqp_cfg->pcfg.qdelay_ref = BOUND_VAR(fqp_cfg->pcfg.qdelay_ref, 1, 5 * AQM_TIME_1S); fqp_cfg->pcfg.tupdate = BOUND_VAR(fqp_cfg->pcfg.tupdate, 1, 5 * AQM_TIME_1S); fqp_cfg->pcfg.max_burst = BOUND_VAR(fqp_cfg->pcfg.max_burst, 0, 5 * AQM_TIME_1S); fqp_cfg->pcfg.max_ecnth = BOUND_VAR(fqp_cfg->pcfg.max_ecnth, 0, PIE_SCALE); fqp_cfg->pcfg.alpha = BOUND_VAR(fqp_cfg->pcfg.alpha, 0, 7 * PIE_SCALE); fqp_cfg->pcfg.beta = BOUND_VAR(fqp_cfg->pcfg.beta, 0, 7 * PIE_SCALE); fqp_cfg->quantum = BOUND_VAR(fqp_cfg->quantum,1,9000); fqp_cfg->limit= BOUND_VAR(fqp_cfg->limit,1,20480); fqp_cfg->flows_cnt= BOUND_VAR(fqp_cfg->flows_cnt,1,65536); } else { D("Wrong parameters for fq_pie scheduler"); return 1; } return 0; } /* * Return FQ-PIE scheduler configurations * the configurations for the scheduler is passed to userland. */ static int fq_pie_getconfig (struct dn_schk *_schk, struct dn_extra_parms *ep) { struct fq_pie_schk *schk = (struct fq_pie_schk *)(_schk+1); struct dn_sch_fq_pie_parms *fqp_cfg; fqp_cfg = &schk->cfg; strcpy(ep->name, fq_pie_desc.name); ep->par[0] = fqp_cfg->pcfg.qdelay_ref; ep->par[1] = fqp_cfg->pcfg.tupdate; ep->par[2] = fqp_cfg->pcfg.max_burst; ep->par[3] = fqp_cfg->pcfg.max_ecnth; ep->par[4] = fqp_cfg->pcfg.alpha; ep->par[5] = fqp_cfg->pcfg.beta; ep->par[6] = fqp_cfg->pcfg.flags; ep->par[7] = fqp_cfg->quantum; ep->par[8] = fqp_cfg->limit; ep->par[9] = fqp_cfg->flows_cnt; return 0; } /* * FQ-PIE scheduler descriptor * contains the type of the scheduler, the name, the size of extra * data structures, and function pointers. */ static struct dn_alg fq_pie_desc = { _SI( .type = ) DN_SCHED_FQ_PIE, _SI( .name = ) "FQ_PIE", _SI( .flags = ) 0, _SI( .schk_datalen = ) sizeof(struct fq_pie_schk), _SI( .si_datalen = ) sizeof(struct fq_pie_si) - sizeof(struct dn_sch_inst), _SI( .q_datalen = ) 0, _SI( .enqueue = ) fq_pie_enqueue, _SI( .dequeue = ) fq_pie_dequeue, _SI( .config = ) fq_pie_config, /* new sched i.e. sched X config ...*/ _SI( .destroy = ) NULL, /*sched x delete */ _SI( .new_sched = ) fq_pie_new_sched, /* new schd instance */ _SI( .free_sched = ) fq_pie_free_sched, /* delete schd instance */ _SI( .new_fsk = ) NULL, _SI( .free_fsk = ) NULL, _SI( .new_queue = ) NULL, _SI( .free_queue = ) NULL, _SI( .getconfig = ) fq_pie_getconfig, _SI( .ref_count = ) 0 }; DECLARE_DNSCHED_MODULE(dn_fq_pie, &fq_pie_desc); diff --git a/sys/netpfil/ipfw/ip_dn_io.c b/sys/netpfil/ipfw/ip_dn_io.c index 11ad498505f4..824e7450fb8f 100644 --- a/sys/netpfil/ipfw/ip_dn_io.c +++ b/sys/netpfil/ipfw/ip_dn_io.c @@ -1,969 +1,977 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Dummynet portions related to packet handling. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include /* NET_EPOCH_... */ #include #include #include #include /* ip_len, ip_off */ #include /* ip_output(), IP_FORWARDING */ #include #include #include /* various ether_* routines */ #include /* for ip6_input, ip6_output prototypes */ #include #include #include #include #ifdef NEW_AQM #include #endif #include /* * We keep a private variable for the simulation time, but we could * probably use an existing one ("softticks" in sys/kern/kern_timeout.c) * instead of V_dn_cfg.curr_time */ VNET_DEFINE(struct dn_parms, dn_cfg); #define V_dn_cfg VNET(dn_cfg) /* * We use a heap to store entities for which we have pending timer events. * The heap is checked at every tick and all entities with expired events * are extracted. */ MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap"); extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *); #ifdef SYSCTL_NODE /* * Because of the way the SYSBEGIN/SYSEND macros work on other * platforms, there should not be functions between them. * So keep the handlers outside the block. */ static int sysctl_hash_size(SYSCTL_HANDLER_ARGS) { int error, value; value = V_dn_cfg.hash_size; error = sysctl_handle_int(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 16 || value > 65536) return (EINVAL); V_dn_cfg.hash_size = value; return (0); } static int sysctl_limits(SYSCTL_HANDLER_ARGS) { int error; long value; if (arg2 != 0) value = V_dn_cfg.slot_limit; else value = V_dn_cfg.byte_limit; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (arg2 != 0) { if (value < 1) return (EINVAL); V_dn_cfg.slot_limit = value; } else { if (value < 1500) return (EINVAL); V_dn_cfg.byte_limit = value; } return (0); } SYSBEGIN(f4) SYSCTL_DECL(_net_inet); SYSCTL_DECL(_net_inet_ip); #ifdef NEW_AQM SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Dummynet"); #else static SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Dummynet"); #endif /* wrapper to pass V_dn_cfg fields to SYSCTL_* */ #define DC(x) (&(VNET_NAME(dn_cfg).x)) /* parameters */ SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sysctl_hash_size, "I", "Default hash table size"); SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 1, sysctl_limits, "L", "Upper limit in slots for pipe queue."); SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sysctl_limits, "L", "Upper limit in bytes for pipe queue."); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast, CTLFLAG_RW | CTLFLAG_VNET, DC(io_fast), 0, "Enable fast dummynet io."); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_VNET, DC(debug), 0, "Dummynet debug level"); /* RED parameters */ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD | CTLFLAG_VNET, DC(red_lookup_depth), 0, "Depth of RED lookup table"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD | CTLFLAG_VNET, DC(red_avg_pkt_size), 0, "RED Medium packet size"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD | CTLFLAG_VNET, DC(red_max_pkt_size), 0, "RED Max packet size"); /* time adjustment */ SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta, CTLFLAG_RD | CTLFLAG_VNET, DC(tick_delta), 0, "Last vs standard tick difference (usec)."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum, CTLFLAG_RD | CTLFLAG_VNET, DC(tick_delta_sum), 0, "Accumulated tick difference (usec)."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment, CTLFLAG_RD | CTLFLAG_VNET, DC(tick_adjustment), 0, "Tick adjustments done."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff, CTLFLAG_RD | CTLFLAG_VNET, DC(tick_diff), 0, "Adjusted vs non-adjusted curr_time difference (ticks)."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost, CTLFLAG_RD | CTLFLAG_VNET, DC(tick_lost), 0, "Number of ticks coalesced by dummynet taskqueue."); /* Drain parameters */ SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW | CTLFLAG_VNET, DC(expire), 0, "Expire empty queues/pipes"); SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle, CTLFLAG_RD | CTLFLAG_VNET, DC(expire_cycle), 0, "Expire cycle for queues/pipes"); /* statistics */ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count, CTLFLAG_RD | CTLFLAG_VNET, DC(schk_count), 0, "Number of schedulers"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count, CTLFLAG_RD | CTLFLAG_VNET, DC(si_count), 0, "Number of scheduler instances"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count, CTLFLAG_RD | CTLFLAG_VNET, DC(fsk_count), 0, "Number of flowsets"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count, CTLFLAG_RD | CTLFLAG_VNET, DC(queue_count), 0, "Number of queues"); SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt, CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt), 0, "Number of packets passed to dummynet."); SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast, CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt_fast), 0, "Number of packets bypassed dummynet scheduler."); SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop, CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt_drop), 0, "Number of packets dropped by dummynet."); #undef DC SYSEND #endif static void dummynet_send(struct mbuf *); /* * Return the mbuf tag holding the dummynet state (it should * be the first one on the list). */ struct dn_pkt_tag * dn_tag_get(struct mbuf *m) { struct m_tag *mtag = m_tag_first(m); #ifdef NEW_AQM /* XXX: to skip ts m_tag. For Debugging only*/ if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) { m_tag_delete(m,mtag); mtag = m_tag_first(m); D("skip TS tag"); } #endif KASSERT(mtag != NULL && mtag->m_tag_cookie == MTAG_ABI_COMPAT && mtag->m_tag_id == PACKET_TAG_DUMMYNET, ("packet on dummynet queue w/o dummynet tag!")); return (struct dn_pkt_tag *)(mtag+1); } #ifndef NEW_AQM static inline void mq_append(struct mq *q, struct mbuf *m) { #ifdef USERSPACE // buffers from netmap need to be copied // XXX note that the routine is not expected to fail ND("append %p to %p", m, q); if (m->m_flags & M_STACK) { struct mbuf *m_new; void *p; int l, ofs; ofs = m->m_data - m->__m_extbuf; // XXX allocate MGETHDR(m_new, M_NOWAIT, MT_DATA); ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p", m, m->__m_extbuf, m->__m_extlen, ofs, m_new); p = m_new->__m_extbuf; /* new pointer */ l = m_new->__m_extlen; /* new len */ if (l <= m->__m_extlen) { panic("extlen too large"); } *m_new = *m; // copy m_new->m_flags &= ~M_STACK; m_new->__m_extbuf = p; // point to new buffer _pkt_copy(m->__m_extbuf, p, m->__m_extlen); m_new->m_data = p + ofs; m = m_new; } #endif /* USERSPACE */ if (q->head == NULL) q->head = m; else q->tail->m_nextpkt = m; q->count++; q->tail = m; m->m_nextpkt = NULL; } #endif /* * Dispose a list of packet. Use a functions so if we need to do * more work, this is a central point to do it. */ void dn_free_pkts(struct mbuf *mnext) { struct mbuf *m; while ((m = mnext) != NULL) { mnext = m->m_nextpkt; FREE_PKT(m); } } static int red_drops (struct dn_queue *q, int len) { /* * RED algorithm * * RED calculates the average queue size (avg) using a low-pass filter * with an exponential weighted (w_q) moving average: * avg <- (1-w_q) * avg + w_q * q_size * where q_size is the queue length (measured in bytes or * packets). * * If q_size == 0, we compute the idle time for the link, and set * avg = (1 - w_q)^(idle/s) * where s is the time needed for transmitting a medium-sized packet. * * Now, if avg < min_th the packet is enqueued. * If avg > max_th the packet is dropped. Otherwise, the packet is * dropped with probability P function of avg. */ struct dn_fsk *fs = q->fs; int64_t p_b = 0; /* Queue in bytes or packets? */ uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ? q->ni.len_bytes : q->ni.length; /* Average queue size estimation. */ if (q_size != 0) { /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */ int diff = SCALE(q_size) - q->avg; int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q); q->avg += (int)v; } else { /* * Queue is empty, find for how long the queue has been * empty and use a lookup table for computing * (1 - * w_q)^(idle_time/s) where s is the time to send a * (small) packet. * XXX check wraps... */ if (q->avg) { u_int t = div64((V_dn_cfg.curr_time - q->q_time), fs->lookup_step); q->avg = (t < fs->lookup_depth) ? SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; } } /* Should i drop? */ if (q->avg < fs->min_th) { q->count = -1; return (0); /* accept packet */ } if (q->avg >= fs->max_th) { /* average queue >= max threshold */ if (fs->fs.flags & DN_IS_ECN) return (1); if (fs->fs.flags & DN_IS_GENTLE_RED) { /* * According to Gentle-RED, if avg is greater than * max_th the packet is dropped with a probability * p_b = c_3 * avg - c_4 * where c_3 = (1 - max_p) / max_th * c_4 = 1 - 2 * max_p */ p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4; } else { q->count = -1; return (1); } } else if (q->avg > fs->min_th) { if (fs->fs.flags & DN_IS_ECN) return (1); /* * We compute p_b using the linear dropping function * p_b = c_1 * avg - c_2 * where c_1 = max_p / (max_th - min_th) * c_2 = max_p * min_th / (max_th - min_th) */ p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2; } if (fs->fs.flags & DN_QSIZE_BYTES) p_b = div64((p_b * len) , fs->max_pkt_size); if (++q->count == 0) q->random = random() & 0xffff; else { /* * q->count counts packets arrived since last drop, so a greater * value of q->count means a greater packet drop probability. */ if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) { q->count = 0; /* After a drop we calculate a new random value. */ q->random = random() & 0xffff; return (1); /* drop */ } } /* End of RED algorithm. */ return (0); /* accept */ } /* * ECN/ECT Processing (partially adopted from altq) */ #ifndef NEW_AQM static #endif int ecn_mark(struct mbuf* m) { struct ip *ip; ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); switch (ip->ip_v) { case IPVERSION: { uint16_t old; if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT) return (0); /* not-ECT */ if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) return (1); /* already marked */ /* * ecn-capable but not marked, * mark CE and update checksum */ old = *(uint16_t *)ip; ip->ip_tos |= IPTOS_ECN_CE; ip->ip_sum = cksum_adjust(ip->ip_sum, old, *(uint16_t *)ip); return (1); } #ifdef INET6 case (IPV6_VERSION >> 4): { struct ip6_hdr *ip6 = (struct ip6_hdr *)ip; u_int32_t flowlabel; flowlabel = ntohl(ip6->ip6_flow); if ((flowlabel >> 28) != 6) return (0); /* version mismatch! */ if ((flowlabel & (IPTOS_ECN_MASK << 20)) == (IPTOS_ECN_NOTECT << 20)) return (0); /* not-ECT */ if ((flowlabel & (IPTOS_ECN_MASK << 20)) == (IPTOS_ECN_CE << 20)) return (1); /* already marked */ /* * ecn-capable but not marked, mark CE */ flowlabel |= (IPTOS_ECN_CE << 20); ip6->ip6_flow = htonl(flowlabel); return (1); } #endif } return (0); } /* * Enqueue a packet in q, subject to space and queue management policy * (whose parameters are in q->fs). * Update stats for the queue and the scheduler. * Return 0 on success, 1 on drop. The packet is consumed anyways. */ int dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop) { struct dn_fs *f; struct dn_flow *ni; /* stats for scheduler instance */ uint64_t len; if (q->fs == NULL || q->_si == NULL) { printf("%s fs %p si %p, dropping\n", __FUNCTION__, q->fs, q->_si); FREE_PKT(m); return 1; } f = &(q->fs->fs); ni = &q->_si->ni; len = m->m_pkthdr.len; /* Update statistics, then check reasons to drop pkt. */ q->ni.tot_bytes += len; q->ni.tot_pkts++; ni->tot_bytes += len; ni->tot_pkts++; if (drop) goto drop; if (f->plr && random() < f->plr) goto drop; + if (m->m_pkthdr.rcvif != NULL) + m_rcvif_serialize(m); #ifdef NEW_AQM /* Call AQM enqueue function */ if (q->fs->aqmfp) return q->fs->aqmfp->enqueue(q ,m); #endif if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) { if (!(f->flags & DN_IS_ECN) || !ecn_mark(m)) goto drop; } if (f->flags & DN_QSIZE_BYTES) { if (q->ni.len_bytes > f->qsize) goto drop; } else if (q->ni.length >= f->qsize) { goto drop; } mq_append(&q->mq, m); q->ni.length++; q->ni.len_bytes += len; ni->length++; ni->len_bytes += len; return (0); drop: V_dn_cfg.io_pkt_drop++; q->ni.drops++; ni->drops++; FREE_PKT(m); return (1); } /* * Fetch packets from the delay line which are due now. If there are * leftover packets, reinsert the delay line in the heap. * Runs under scheduler lock. */ static void transmit_event(struct mq *q, struct delay_line *dline, uint64_t now) { struct mbuf *m; struct dn_pkt_tag *pkt = NULL; dline->oid.subtype = 0; /* not in heap */ while ((m = dline->mq.head) != NULL) { pkt = dn_tag_get(m); if (!DN_KEY_LEQ(pkt->output_time, now)) break; dline->mq.head = m->m_nextpkt; dline->mq.count--; - mq_append(q, m); + if (m->m_pkthdr.rcvif != NULL && + __predict_false(m_rcvif_restore(m) == NULL)) + m_freem(m); + else + mq_append(q, m); } if (m != NULL) { dline->oid.subtype = 1; /* in heap */ heap_insert(&V_dn_cfg.evheap, pkt->output_time, dline); } } /* * Convert the additional MAC overheads/delays into an equivalent * number of bits for the given data rate. The samples are * in milliseconds so we need to divide by 1000. */ static uint64_t extra_bits(struct mbuf *m, struct dn_schk *s) { int index; uint64_t bits; struct dn_profile *pf = s->profile; if (!pf || pf->samples_no == 0) return 0; index = random() % pf->samples_no; bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000); if (index >= pf->loss_level) { struct dn_pkt_tag *dt = dn_tag_get(m); if (dt) dt->dn_dir = DIR_DROP; } return bits; } /* * Send traffic from a scheduler instance due by 'now'. * Return a pointer to the head of the queue. */ static struct mbuf * serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now) { struct mq def_q; struct dn_schk *s = si->sched; struct mbuf *m = NULL; int delay_line_idle = (si->dline.mq.head == NULL); int done; uint32_t bw; if (q == NULL) { q = &def_q; q->head = NULL; } bw = s->link.bandwidth; si->kflags &= ~DN_ACTIVE; if (bw > 0) si->credit += (now - si->sched_time) * bw; else si->credit = 0; si->sched_time = now; done = 0; while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) { uint64_t len_scaled; done++; len_scaled = (bw == 0) ? 0 : hz * (m->m_pkthdr.len * 8 + extra_bits(m, s)); si->credit -= len_scaled; /* Move packet in the delay line */ dn_tag_get(m)->output_time = V_dn_cfg.curr_time + s->link.delay ; + if (m->m_pkthdr.rcvif != NULL) + m_rcvif_serialize(m); mq_append(&si->dline.mq, m); } /* * If credit >= 0 the instance is idle, mark time. * Otherwise put back in the heap, and adjust the output * time of the last inserted packet, m, which was too early. */ if (si->credit >= 0) { si->idle_time = now; } else { uint64_t t; KASSERT (bw > 0, ("bw=0 and credit<0 ?")); t = div64(bw - 1 - si->credit, bw); if (m) dn_tag_get(m)->output_time += t; si->kflags |= DN_ACTIVE; heap_insert(&V_dn_cfg.evheap, now + t, si); } if (delay_line_idle && done) transmit_event(q, &si->dline, now); return q->head; } /* * The timer handler for dummynet. Time is computed in ticks, but * but the code is tolerant to the actual rate at which this is called. * Once complete, the function reschedules itself for the next tick. */ void dummynet_task(void *context, int pending) { struct timeval t; struct mq q = { NULL, NULL }; /* queue to accumulate results */ struct epoch_tracker et; VNET_ITERATOR_DECL(vnet_iter); VNET_LIST_RLOCK(); NET_EPOCH_ENTER(et); VNET_FOREACH(vnet_iter) { memset(&q, 0, sizeof(struct mq)); CURVNET_SET(vnet_iter); DN_BH_WLOCK(); /* Update number of lost(coalesced) ticks. */ V_dn_cfg.tick_lost += pending - 1; getmicrouptime(&t); /* Last tick duration (usec). */ V_dn_cfg.tick_last = (t.tv_sec - V_dn_cfg.prev_t.tv_sec) * 1000000 + (t.tv_usec - V_dn_cfg.prev_t.tv_usec); /* Last tick vs standard tick difference (usec). */ V_dn_cfg.tick_delta = (V_dn_cfg.tick_last * hz - 1000000) / hz; /* Accumulated tick difference (usec). */ V_dn_cfg.tick_delta_sum += V_dn_cfg.tick_delta; V_dn_cfg.prev_t = t; /* * Adjust curr_time if the accumulated tick difference is * greater than the 'standard' tick. Since curr_time should * be monotonically increasing, we do positive adjustments * as required, and throttle curr_time in case of negative * adjustment. */ V_dn_cfg.curr_time++; if (V_dn_cfg.tick_delta_sum - tick >= 0) { int diff = V_dn_cfg.tick_delta_sum / tick; V_dn_cfg.curr_time += diff; V_dn_cfg.tick_diff += diff; V_dn_cfg.tick_delta_sum %= tick; V_dn_cfg.tick_adjustment++; } else if (V_dn_cfg.tick_delta_sum + tick <= 0) { V_dn_cfg.curr_time--; V_dn_cfg.tick_diff--; V_dn_cfg.tick_delta_sum += tick; V_dn_cfg.tick_adjustment++; } /* serve pending events, accumulate in q */ for (;;) { struct dn_id *p; /* generic parameter to handler */ if (V_dn_cfg.evheap.elements == 0 || DN_KEY_LT(V_dn_cfg.curr_time, HEAP_TOP(&V_dn_cfg.evheap)->key)) break; p = HEAP_TOP(&V_dn_cfg.evheap)->object; heap_extract(&V_dn_cfg.evheap, NULL); if (p->type == DN_SCH_I) { serve_sched(&q, (struct dn_sch_inst *)p, V_dn_cfg.curr_time); } else { /* extracted a delay line */ transmit_event(&q, (struct delay_line *)p, V_dn_cfg.curr_time); } } if (V_dn_cfg.expire && ++V_dn_cfg.expire_cycle >= V_dn_cfg.expire) { V_dn_cfg.expire_cycle = 0; dn_drain_scheduler(); dn_drain_queue(); } DN_BH_WUNLOCK(); if (q.head != NULL) dummynet_send(q.head); CURVNET_RESTORE(); } NET_EPOCH_EXIT(et); VNET_LIST_RUNLOCK(); /* Schedule our next run. */ dn_reschedule(); } /* * forward a chain of packets to the proper destination. * This runs outside the dummynet lock. */ static void dummynet_send(struct mbuf *m) { struct mbuf *n; NET_EPOCH_ASSERT(); for (; m != NULL; m = n) { struct ifnet *ifp = NULL; /* gcc 3.4.6 complains */ struct m_tag *tag; int dst; n = m->m_nextpkt; m->m_nextpkt = NULL; tag = m_tag_first(m); if (tag == NULL) { /* should not happen */ dst = DIR_DROP; } else { struct dn_pkt_tag *pkt = dn_tag_get(m); /* extract the dummynet info, rename the tag * to carry reinject info. */ if (pkt->dn_dir == (DIR_OUT | PROTO_LAYER2) && pkt->ifp == NULL) { dst = DIR_DROP; } else { dst = pkt->dn_dir; ifp = pkt->ifp; tag->m_tag_cookie = MTAG_IPFW_RULE; tag->m_tag_id = 0; } } switch (dst) { case DIR_OUT: ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); break ; case DIR_IN : netisr_dispatch(NETISR_IP, m); break; #ifdef INET6 case DIR_IN | PROTO_IPV6: netisr_dispatch(NETISR_IPV6, m); break; case DIR_OUT | PROTO_IPV6: ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL); break; #endif case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */ if (bridge_dn_p != NULL) ((*bridge_dn_p)(m, ifp)); else printf("dummynet: if_bridge not loaded\n"); break; case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */ /* * The Ethernet code assumes the Ethernet header is * contiguous in the first mbuf header. * Insure this is true. */ if (m->m_len < ETHER_HDR_LEN && (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) { printf("dummynet/ether: pullup failed, " "dropping packet\n"); break; } ether_demux(m->m_pkthdr.rcvif, m); break; case DIR_OUT | PROTO_LAYER2: /* DN_TO_ETH_OUT: */ ether_output_frame(ifp, m); break; case DIR_DROP: /* drop the packet after some time */ FREE_PKT(m); break; default: printf("dummynet: bad switch %d!\n", dst); FREE_PKT(m); break; } } } static inline int tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa) { struct dn_pkt_tag *dt; struct m_tag *mtag; mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*dt), M_NOWAIT | M_ZERO); if (mtag == NULL) return 1; /* Cannot allocate packet header. */ m_tag_prepend(m, mtag); /* Attach to mbuf chain. */ dt = (struct dn_pkt_tag *)(mtag + 1); dt->rule = fwa->rule; /* only keep this info */ dt->rule.info &= (IPFW_ONEPASS | IPFW_IS_DUMMYNET); dt->dn_dir = dir; dt->ifp = fwa->flags & IPFW_ARGS_OUT ? fwa->ifp : NULL; /* dt->output tame is updated as we move through */ dt->output_time = V_dn_cfg.curr_time; dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0; return 0; } /* * dummynet hook for packets. * We use the argument to locate the flowset fs and the sched_set sch * associated to it. The we apply flow_mask and sched_mask to * determine the queue and scheduler instances. */ int dummynet_io(struct mbuf **m0, struct ip_fw_args *fwa) { struct mbuf *m = *m0; struct dn_fsk *fs = NULL; struct dn_sch_inst *si; struct dn_queue *q = NULL; /* default */ int fs_id, dir; fs_id = (fwa->rule.info & IPFW_INFO_MASK) + ((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0); /* XXXGL: convert args to dir */ if (fwa->flags & IPFW_ARGS_IN) dir = DIR_IN; else dir = DIR_OUT; if (fwa->flags & IPFW_ARGS_ETHER) dir |= PROTO_LAYER2; else if (fwa->flags & IPFW_ARGS_IP6) dir |= PROTO_IPV6; DN_BH_WLOCK(); V_dn_cfg.io_pkt++; /* we could actually tag outside the lock, but who cares... */ if (tag_mbuf(m, dir, fwa)) goto dropit; /* XXX locate_flowset could be optimised with a direct ref. */ fs = dn_ht_find(V_dn_cfg.fshash, fs_id, 0, NULL); if (fs == NULL) goto dropit; /* This queue/pipe does not exist! */ if (fs->sched == NULL) /* should not happen */ goto dropit; /* find scheduler instance, possibly applying sched_mask */ si = ipdn_si_find(fs->sched, &(fwa->f_id)); if (si == NULL) goto dropit; /* * If the scheduler supports multiple queues, find the right one * (otherwise it will be ignored by enqueue). */ if (fs->sched->fp->flags & DN_MULTIQUEUE) { q = ipdn_q_find(fs, si, &(fwa->f_id)); if (q == NULL) goto dropit; } if (fs->sched->fp->enqueue(si, q, m)) { /* packet was dropped by enqueue() */ m = *m0 = NULL; /* dn_enqueue already increases io_pkt_drop */ V_dn_cfg.io_pkt_drop--; goto dropit; } if (si->kflags & DN_ACTIVE) { m = *m0 = NULL; /* consumed */ goto done; /* already active, nothing to do */ } /* compute the initial allowance */ if (si->idle_time < V_dn_cfg.curr_time) { /* Do this only on the first packet on an idle pipe */ struct dn_link *p = &fs->sched->link; si->sched_time = V_dn_cfg.curr_time; si->credit = V_dn_cfg.io_fast ? p->bandwidth : 0; if (p->burst) { uint64_t burst = (V_dn_cfg.curr_time - si->idle_time) * p->bandwidth; if (burst > p->burst) burst = p->burst; si->credit += burst; } } /* pass through scheduler and delay line */ m = serve_sched(NULL, si, V_dn_cfg.curr_time); /* optimization -- pass it back to ipfw for immediate send */ /* XXX Don't call dummynet_send() if scheduler return the packet * just enqueued. This avoid a lock order reversal. * */ if (/*V_dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) { /* fast io, rename the tag * to carry reinject info. */ struct m_tag *tag = m_tag_first(m); tag->m_tag_cookie = MTAG_IPFW_RULE; tag->m_tag_id = 0; V_dn_cfg.io_pkt_fast++; if (m->m_nextpkt != NULL) { printf("dummynet: fast io: pkt chain detected!\n"); m->m_nextpkt = NULL; } m = NULL; } else { *m0 = NULL; } done: DN_BH_WUNLOCK(); if (m) dummynet_send(m); return 0; dropit: V_dn_cfg.io_pkt_drop++; DN_BH_WUNLOCK(); if (m) FREE_PKT(m); *m0 = NULL; return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS; }