diff --git a/sys/net/altq/altq_cbq.c b/sys/net/altq/altq_cbq.c index bcba09267289..07c61fccb19b 100644 --- a/sys/net/altq/altq_cbq.c +++ b/sys/net/altq/altq_cbq.c @@ -1,562 +1,564 @@ /*- * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the SMCC Technology * Development Group at Sun Microsystems, Inc. * * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The software is * provided "as is" without express or implied warranty of any kind. * * These notices must be retained in any copies of any part of this software. * * $KAME: altq_cbq.c,v 1.19 2003/09/17 14:23:25 kjc Exp $ */ #include "opt_altq.h" #include "opt_inet.h" #include "opt_inet6.h" #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Forward Declarations. */ static int cbq_class_destroy(cbq_state_t *, struct rm_class *); static struct rm_class *clh_to_clp(cbq_state_t *, u_int32_t); static int cbq_clear_interface(cbq_state_t *); static int cbq_request(struct ifaltq *, int, void *); static int cbq_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *); static struct mbuf *cbq_dequeue(struct ifaltq *, int); static void cbqrestart(struct ifaltq *); static void get_class_stats(class_stats_t *, struct rm_class *); static void cbq_purge(cbq_state_t *); /* * int * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This * function destroys a given traffic class. Before destroying * the class, all traffic for that class is released. */ static int cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl) { int i; /* delete the class */ rmc_delete_class(&cbqp->ifnp, cl); /* * free the class handle */ for (i = 0; i < CBQ_MAX_CLASSES; i++) if (cbqp->cbq_class_tbl[i] == cl) cbqp->cbq_class_tbl[i] = NULL; if (cl == cbqp->ifnp.root_) cbqp->ifnp.root_ = NULL; if (cl == cbqp->ifnp.default_) cbqp->ifnp.default_ = NULL; return (0); } /* convert class handle to class pointer */ static struct rm_class * clh_to_clp(cbq_state_t *cbqp, u_int32_t chandle) { int i; struct rm_class *cl; if (chandle == 0) return (NULL); /* * first, try optimistically the slot matching the lower bits of * the handle. if it fails, do the linear table search. */ i = chandle % CBQ_MAX_CLASSES; if ((cl = cbqp->cbq_class_tbl[i]) != NULL && cl->stats_.handle == chandle) return (cl); for (i = 0; i < CBQ_MAX_CLASSES; i++) if ((cl = cbqp->cbq_class_tbl[i]) != NULL && cl->stats_.handle == chandle) return (cl); return (NULL); } static int cbq_clear_interface(cbq_state_t *cbqp) { int again, i; struct rm_class *cl; #ifdef ALTQ3_CLFIER_COMPAT /* free the filters for this interface */ acc_discard_filters(&cbqp->cbq_classifier, NULL, 1); #endif /* clear out the classes now */ do { again = 0; for (i = 0; i < CBQ_MAX_CLASSES; i++) { if ((cl = cbqp->cbq_class_tbl[i]) != NULL) { if (is_a_parent_class(cl)) again++; else { cbq_class_destroy(cbqp, cl); cbqp->cbq_class_tbl[i] = NULL; if (cl == cbqp->ifnp.root_) cbqp->ifnp.root_ = NULL; if (cl == cbqp->ifnp.default_) cbqp->ifnp.default_ = NULL; } } } } while (again); return (0); } static int cbq_request(struct ifaltq *ifq, int req, void *arg) { cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc; IFQ_LOCK_ASSERT(ifq); switch (req) { case ALTRQ_PURGE: cbq_purge(cbqp); break; } return (0); } /* copy the stats info in rm_class to class_states_t */ static void get_class_stats(class_stats_t *statsp, struct rm_class *cl) { + memset(statsp, 0, sizeof(*statsp)); + statsp->xmit_cnt = cl->stats_.xmit_cnt; statsp->drop_cnt = cl->stats_.drop_cnt; statsp->over = cl->stats_.over; statsp->borrows = cl->stats_.borrows; statsp->overactions = cl->stats_.overactions; statsp->delays = cl->stats_.delays; statsp->depth = cl->depth_; statsp->priority = cl->pri_; statsp->maxidle = cl->maxidle_; statsp->minidle = cl->minidle_; statsp->offtime = cl->offtime_; statsp->qmax = qlimit(cl->q_); statsp->ns_per_byte = cl->ns_per_byte_; statsp->wrr_allot = cl->w_allotment_; statsp->qcnt = qlen(cl->q_); statsp->avgidle = cl->avgidle_; statsp->qtype = qtype(cl->q_); #ifdef ALTQ_RED if (q_is_red(cl->q_)) red_getstats(cl->red_, &statsp->red[0]); #endif #ifdef ALTQ_RIO if (q_is_rio(cl->q_)) rio_getstats((rio_t *)cl->red_, &statsp->red[0]); #endif #ifdef ALTQ_CODEL if (q_is_codel(cl->q_)) codel_getstats(cl->codel_, &statsp->codel); #endif } int cbq_pfattach(struct pf_altq *a) { struct ifnet *ifp; int s, error; if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL) return (EINVAL); s = splnet(); error = altq_attach(&ifp->if_snd, ALTQT_CBQ, a->altq_disc, cbq_enqueue, cbq_dequeue, cbq_request, NULL, NULL); splx(s); return (error); } int cbq_add_altq(struct ifnet *ifp, struct pf_altq *a) { cbq_state_t *cbqp; if (ifp == NULL) return (EINVAL); if (!ALTQ_IS_READY(&ifp->if_snd)) return (ENODEV); /* allocate and initialize cbq_state_t */ cbqp = malloc(sizeof(cbq_state_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (cbqp == NULL) return (ENOMEM); CALLOUT_INIT(&cbqp->cbq_callout); cbqp->cbq_qlen = 0; cbqp->ifnp.ifq_ = &ifp->if_snd; /* keep the ifq */ /* keep the state in pf_altq */ a->altq_disc = cbqp; return (0); } int cbq_remove_altq(struct pf_altq *a) { cbq_state_t *cbqp; if ((cbqp = a->altq_disc) == NULL) return (EINVAL); a->altq_disc = NULL; cbq_clear_interface(cbqp); if (cbqp->ifnp.default_) cbq_class_destroy(cbqp, cbqp->ifnp.default_); if (cbqp->ifnp.root_) cbq_class_destroy(cbqp, cbqp->ifnp.root_); /* deallocate cbq_state_t */ free(cbqp, M_DEVBUF); return (0); } int cbq_add_queue(struct pf_altq *a) { struct rm_class *borrow, *parent; cbq_state_t *cbqp; struct rm_class *cl; struct cbq_opts *opts; int i; if ((cbqp = a->altq_disc) == NULL) return (EINVAL); if (a->qid == 0) return (EINVAL); /* * find a free slot in the class table. if the slot matching * the lower bits of qid is free, use this slot. otherwise, * use the first free slot. */ i = a->qid % CBQ_MAX_CLASSES; if (cbqp->cbq_class_tbl[i] != NULL) { for (i = 0; i < CBQ_MAX_CLASSES; i++) if (cbqp->cbq_class_tbl[i] == NULL) break; if (i == CBQ_MAX_CLASSES) return (EINVAL); } opts = &a->pq_u.cbq_opts; /* check parameters */ if (a->priority >= CBQ_MAXPRI) return (EINVAL); /* Get pointers to parent and borrow classes. */ parent = clh_to_clp(cbqp, a->parent_qid); if (opts->flags & CBQCLF_BORROW) borrow = parent; else borrow = NULL; /* * A class must borrow from it's parent or it can not * borrow at all. Hence, borrow can be null. */ if (parent == NULL && (opts->flags & CBQCLF_ROOTCLASS) == 0) { printf("cbq_add_queue: no parent class!\n"); return (EINVAL); } if ((borrow != parent) && (borrow != NULL)) { printf("cbq_add_class: borrow class != parent\n"); return (EINVAL); } /* * check parameters */ switch (opts->flags & CBQCLF_CLASSMASK) { case CBQCLF_ROOTCLASS: if (parent != NULL) return (EINVAL); if (cbqp->ifnp.root_) return (EINVAL); break; case CBQCLF_DEFCLASS: if (cbqp->ifnp.default_) return (EINVAL); break; case 0: if (a->qid == 0) return (EINVAL); break; default: /* more than two flags bits set */ return (EINVAL); } /* * create a class. if this is a root class, initialize the * interface. */ if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) { rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, opts->ns_per_byte, cbqrestart, a->qlimit, RM_MAXQUEUED, opts->maxidle, opts->minidle, opts->offtime, opts->flags); cl = cbqp->ifnp.root_; } else { cl = rmc_newclass(a->priority, &cbqp->ifnp, opts->ns_per_byte, rmc_delay_action, a->qlimit, parent, borrow, opts->maxidle, opts->minidle, opts->offtime, opts->pktsize, opts->flags); } if (cl == NULL) return (ENOMEM); /* return handle to user space. */ cl->stats_.handle = a->qid; cl->stats_.depth = cl->depth_; /* save the allocated class */ cbqp->cbq_class_tbl[i] = cl; if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS) cbqp->ifnp.default_ = cl; return (0); } int cbq_remove_queue(struct pf_altq *a) { struct rm_class *cl; cbq_state_t *cbqp; int i; if ((cbqp = a->altq_disc) == NULL) return (EINVAL); if ((cl = clh_to_clp(cbqp, a->qid)) == NULL) return (EINVAL); /* if we are a parent class, then return an error. */ if (is_a_parent_class(cl)) return (EINVAL); /* delete the class */ rmc_delete_class(&cbqp->ifnp, cl); /* * free the class handle */ for (i = 0; i < CBQ_MAX_CLASSES; i++) if (cbqp->cbq_class_tbl[i] == cl) { cbqp->cbq_class_tbl[i] = NULL; if (cl == cbqp->ifnp.root_) cbqp->ifnp.root_ = NULL; if (cl == cbqp->ifnp.default_) cbqp->ifnp.default_ = NULL; break; } return (0); } int cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes, int version) { cbq_state_t *cbqp; struct rm_class *cl; class_stats_t stats; int error = 0; if ((cbqp = altq_lookup(a->ifname, ALTQT_CBQ)) == NULL) return (EBADF); if ((cl = clh_to_clp(cbqp, a->qid)) == NULL) return (EINVAL); if (*nbytes < sizeof(stats)) return (EINVAL); get_class_stats(&stats, cl); if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) return (error); *nbytes = sizeof(stats); return (0); } /* * int * cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pattr) * - Queue data packets. * * cbq_enqueue is set to ifp->if_altqenqueue and called by an upper * layer (e.g. ether_output). cbq_enqueue queues the given packet * to the cbq, then invokes the driver's start routine. * * Assumptions: called in splimp * Returns: 0 if the queueing is successful. * ENOBUFS if a packet dropping occurred as a result of * the queueing. */ static int cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr) { cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc; struct rm_class *cl; struct pf_mtag *t; int len; IFQ_LOCK_ASSERT(ifq); /* grab class set by classifier */ if ((m->m_flags & M_PKTHDR) == 0) { /* should not happen */ printf("altq: packet for %s does not have pkthdr\n", ifq->altq_ifp->if_xname); m_freem(m); return (ENOBUFS); } cl = NULL; if ((t = pf_find_mtag(m)) != NULL) cl = clh_to_clp(cbqp, t->qid); if (cl == NULL) { cl = cbqp->ifnp.default_; if (cl == NULL) { m_freem(m); return (ENOBUFS); } } cl->pktattr_ = NULL; len = m_pktlen(m); if (rmc_queue_packet(cl, m) != 0) { /* drop occurred. some mbuf was freed in rmc_queue_packet. */ PKTCNTR_ADD(&cl->stats_.drop_cnt, len); return (ENOBUFS); } /* successfully queued. */ ++cbqp->cbq_qlen; IFQ_INC_LEN(ifq); return (0); } static struct mbuf * cbq_dequeue(struct ifaltq *ifq, int op) { cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc; struct mbuf *m; IFQ_LOCK_ASSERT(ifq); m = rmc_dequeue_next(&cbqp->ifnp, op); if (m && op == ALTDQ_REMOVE) { --cbqp->cbq_qlen; /* decrement # of packets in cbq */ IFQ_DEC_LEN(ifq); /* Update the class. */ rmc_update_class_util(&cbqp->ifnp); } return (m); } /* * void * cbqrestart(queue_t *) - Restart sending of data. * called from rmc_restart in splimp via timeout after waking up * a suspended class. * Returns: NONE */ static void cbqrestart(struct ifaltq *ifq) { cbq_state_t *cbqp; struct ifnet *ifp; IFQ_LOCK_ASSERT(ifq); if (!ALTQ_IS_ENABLED(ifq)) /* cbq must have been detached */ return; if ((cbqp = (cbq_state_t *)ifq->altq_disc) == NULL) /* should not happen */ return; ifp = ifq->altq_ifp; if (ifp->if_start && cbqp->cbq_qlen > 0 && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { IFQ_UNLOCK(ifq); (*ifp->if_start)(ifp); IFQ_LOCK(ifq); } } static void cbq_purge(cbq_state_t *cbqp) { struct rm_class *cl; int i; for (i = 0; i < CBQ_MAX_CLASSES; i++) if ((cl = cbqp->cbq_class_tbl[i]) != NULL) rmc_dropall(cl); if (ALTQ_IS_ENABLED(cbqp->ifnp.ifq_)) cbqp->ifnp.ifq_->ifq_len = 0; } #endif /* ALTQ_CBQ */ diff --git a/sys/net/altq/altq_fairq.c b/sys/net/altq/altq_fairq.c index e20eea91b1a1..49046da24594 100644 --- a/sys/net/altq/altq_fairq.c +++ b/sys/net/altq/altq_fairq.c @@ -1,906 +1,908 @@ /* * Copyright (c) 2008 The DragonFly Project. All rights reserved. * * This code is derived from software contributed to The DragonFly Project * by Matthew Dillon * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of The DragonFly Project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific, prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $DragonFly: src/sys/net/altq/altq_fairq.c,v 1.1 2008/04/06 18:58:15 dillon Exp $ */ /* * Matt: I gutted altq_priq.c and used it as a skeleton on which to build * fairq. The fairq algorithm is completely different then priq, of course, * but because I used priq's skeleton I believe I should include priq's * copyright. * * Copyright (C) 2000-2003 * Sony Computer Science Laboratories Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * FAIRQ - take traffic classified by keep state (hashed into * mbuf->m_pkthdr.altq_state_hash) and bucketize it. Fairly extract * the first packet from each bucket in a round-robin fashion. * * TODO - better overall qlimit support (right now it is per-bucket). * - NOTE: red etc is per bucket, not overall. * - better service curve support. * * EXAMPLE: * * altq on em0 fairq bandwidth 650Kb queue { std, bulk } * queue std priority 3 bandwidth 400Kb \ * fairq (buckets 64, default, hogs 1Kb) qlimit 50 * queue bulk priority 2 bandwidth 100Kb \ * fairq (buckets 64, hogs 1Kb) qlimit 50 * * pass out on em0 from any to any keep state queue std * pass out on em0 inet proto tcp ..... port ... keep state queue bulk */ #include "opt_altq.h" #include "opt_inet.h" #include "opt_inet6.h" #ifdef ALTQ_FAIRQ /* fairq is enabled in the kernel conf */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * function prototypes */ static int fairq_clear_interface(struct fairq_if *); static int fairq_request(struct ifaltq *, int, void *); static void fairq_purge(struct fairq_if *); static struct fairq_class *fairq_class_create(struct fairq_if *, int, int, u_int, struct fairq_opts *, int); static int fairq_class_destroy(struct fairq_class *); static int fairq_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *); static struct mbuf *fairq_dequeue(struct ifaltq *, int); static int fairq_addq(struct fairq_class *, struct mbuf *, u_int32_t); static struct mbuf *fairq_getq(struct fairq_class *, uint64_t); static struct mbuf *fairq_pollq(struct fairq_class *, uint64_t, int *); static fairq_bucket_t *fairq_selectq(struct fairq_class *, int); static void fairq_purgeq(struct fairq_class *); static void get_class_stats(struct fairq_classstats *, struct fairq_class *); static struct fairq_class *clh_to_clp(struct fairq_if *, uint32_t); int fairq_pfattach(struct pf_altq *a) { struct ifnet *ifp; int error; if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL) return (EINVAL); error = altq_attach(&ifp->if_snd, ALTQT_FAIRQ, a->altq_disc, fairq_enqueue, fairq_dequeue, fairq_request, NULL, NULL); return (error); } int fairq_add_altq(struct ifnet *ifp, struct pf_altq *a) { struct fairq_if *pif; if (ifp == NULL) return (EINVAL); if (!ALTQ_IS_READY(&ifp->if_snd)) return (ENODEV); pif = malloc(sizeof(struct fairq_if), M_DEVBUF, M_WAITOK | M_ZERO); pif->pif_bandwidth = a->ifbandwidth; pif->pif_maxpri = -1; pif->pif_ifq = &ifp->if_snd; /* keep the state in pf_altq */ a->altq_disc = pif; return (0); } int fairq_remove_altq(struct pf_altq *a) { struct fairq_if *pif; if ((pif = a->altq_disc) == NULL) return (EINVAL); a->altq_disc = NULL; fairq_clear_interface(pif); free(pif, M_DEVBUF); return (0); } int fairq_add_queue(struct pf_altq *a) { struct fairq_if *pif; struct fairq_class *cl; if ((pif = a->altq_disc) == NULL) return (EINVAL); /* check parameters */ if (a->priority >= FAIRQ_MAXPRI) return (EINVAL); if (a->qid == 0) return (EINVAL); if (pif->pif_classes[a->priority] != NULL) return (EBUSY); if (clh_to_clp(pif, a->qid) != NULL) return (EBUSY); cl = fairq_class_create(pif, a->priority, a->qlimit, a->bandwidth, &a->pq_u.fairq_opts, a->qid); if (cl == NULL) return (ENOMEM); return (0); } int fairq_remove_queue(struct pf_altq *a) { struct fairq_if *pif; struct fairq_class *cl; if ((pif = a->altq_disc) == NULL) return (EINVAL); if ((cl = clh_to_clp(pif, a->qid)) == NULL) return (EINVAL); return (fairq_class_destroy(cl)); } int fairq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes, int version) { struct fairq_if *pif; struct fairq_class *cl; struct fairq_classstats stats; int error = 0; if ((pif = altq_lookup(a->ifname, ALTQT_FAIRQ)) == NULL) return (EBADF); if ((cl = clh_to_clp(pif, a->qid)) == NULL) return (EINVAL); if (*nbytes < sizeof(stats)) return (EINVAL); get_class_stats(&stats, cl); if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) return (error); *nbytes = sizeof(stats); return (0); } /* * bring the interface back to the initial state by discarding * all the filters and classes. */ static int fairq_clear_interface(struct fairq_if *pif) { struct fairq_class *cl; int pri; /* clear out the classes */ for (pri = 0; pri <= pif->pif_maxpri; pri++) { if ((cl = pif->pif_classes[pri]) != NULL) fairq_class_destroy(cl); } return (0); } static int fairq_request(struct ifaltq *ifq, int req, void *arg) { struct fairq_if *pif = (struct fairq_if *)ifq->altq_disc; IFQ_LOCK_ASSERT(ifq); switch (req) { case ALTRQ_PURGE: fairq_purge(pif); break; } return (0); } /* discard all the queued packets on the interface */ static void fairq_purge(struct fairq_if *pif) { struct fairq_class *cl; int pri; for (pri = 0; pri <= pif->pif_maxpri; pri++) { if ((cl = pif->pif_classes[pri]) != NULL && cl->cl_head) fairq_purgeq(cl); } if (ALTQ_IS_ENABLED(pif->pif_ifq)) pif->pif_ifq->ifq_len = 0; } static struct fairq_class * fairq_class_create(struct fairq_if *pif, int pri, int qlimit, u_int bandwidth, struct fairq_opts *opts, int qid) { struct fairq_class *cl; int flags = opts->flags; u_int nbuckets = opts->nbuckets; int i; #ifndef ALTQ_RED if (flags & FARF_RED) { #ifdef ALTQ_DEBUG printf("fairq_class_create: RED not configured for FAIRQ!\n"); #endif return (NULL); } #endif #ifndef ALTQ_CODEL if (flags & FARF_CODEL) { #ifdef ALTQ_DEBUG printf("fairq_class_create: CODEL not configured for FAIRQ!\n"); #endif return (NULL); } #endif if (nbuckets == 0) nbuckets = 256; if (nbuckets > FAIRQ_MAX_BUCKETS) nbuckets = FAIRQ_MAX_BUCKETS; /* enforce power-of-2 size */ while ((nbuckets ^ (nbuckets - 1)) != ((nbuckets << 1) - 1)) ++nbuckets; if ((cl = pif->pif_classes[pri]) != NULL) { /* modify the class instead of creating a new one */ IFQ_LOCK(cl->cl_pif->pif_ifq); if (cl->cl_head) fairq_purgeq(cl); IFQ_UNLOCK(cl->cl_pif->pif_ifq); #ifdef ALTQ_RIO if (cl->cl_qtype == Q_RIO) rio_destroy((rio_t *)cl->cl_red); #endif #ifdef ALTQ_RED if (cl->cl_qtype == Q_RED) red_destroy(cl->cl_red); #endif #ifdef ALTQ_CODEL if (cl->cl_qtype == Q_CODEL) codel_destroy(cl->cl_codel); #endif } else { cl = malloc(sizeof(struct fairq_class), M_DEVBUF, M_WAITOK | M_ZERO); cl->cl_nbuckets = nbuckets; cl->cl_nbucket_mask = nbuckets - 1; cl->cl_buckets = malloc( sizeof(struct fairq_bucket) * cl->cl_nbuckets, M_DEVBUF, M_WAITOK | M_ZERO); cl->cl_head = NULL; } pif->pif_classes[pri] = cl; if (flags & FARF_DEFAULTCLASS) pif->pif_default = cl; if (qlimit == 0) qlimit = 50; /* use default */ cl->cl_qlimit = qlimit; for (i = 0; i < cl->cl_nbuckets; ++i) { qlimit(&cl->cl_buckets[i].queue) = qlimit; } cl->cl_bandwidth = bandwidth / 8; cl->cl_qtype = Q_DROPTAIL; cl->cl_flags = flags & FARF_USERFLAGS; cl->cl_pri = pri; if (pri > pif->pif_maxpri) pif->pif_maxpri = pri; cl->cl_pif = pif; cl->cl_handle = qid; cl->cl_hogs_m1 = opts->hogs_m1 / 8; cl->cl_lssc_m1 = opts->lssc_m1 / 8; /* NOT YET USED */ #ifdef ALTQ_RED if (flags & (FARF_RED|FARF_RIO)) { int red_flags, red_pkttime; red_flags = 0; if (flags & FARF_ECN) red_flags |= REDF_ECN; #ifdef ALTQ_RIO if (flags & FARF_CLEARDSCP) red_flags |= RIOF_CLEARDSCP; #endif if (pif->pif_bandwidth < 8) red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ else red_pkttime = (int64_t)pif->pif_ifq->altq_ifp->if_mtu * 1000 * 1000 * 1000 / (pif->pif_bandwidth / 8); #ifdef ALTQ_RIO if (flags & FARF_RIO) { cl->cl_red = (red_t *)rio_alloc(0, NULL, red_flags, red_pkttime); if (cl->cl_red != NULL) cl->cl_qtype = Q_RIO; } else #endif if (flags & FARF_RED) { cl->cl_red = red_alloc(0, 0, cl->cl_qlimit * 10/100, cl->cl_qlimit * 30/100, red_flags, red_pkttime); if (cl->cl_red != NULL) cl->cl_qtype = Q_RED; } } #endif /* ALTQ_RED */ #ifdef ALTQ_CODEL if (flags & FARF_CODEL) { cl->cl_codel = codel_alloc(5, 100, 0); if (cl->cl_codel != NULL) cl->cl_qtype = Q_CODEL; } #endif return (cl); } static int fairq_class_destroy(struct fairq_class *cl) { struct fairq_if *pif; int pri; IFQ_LOCK(cl->cl_pif->pif_ifq); if (cl->cl_head) fairq_purgeq(cl); pif = cl->cl_pif; pif->pif_classes[cl->cl_pri] = NULL; if (pif->pif_poll_cache == cl) pif->pif_poll_cache = NULL; if (pif->pif_maxpri == cl->cl_pri) { for (pri = cl->cl_pri; pri >= 0; pri--) if (pif->pif_classes[pri] != NULL) { pif->pif_maxpri = pri; break; } if (pri < 0) pif->pif_maxpri = -1; } IFQ_UNLOCK(cl->cl_pif->pif_ifq); if (cl->cl_red != NULL) { #ifdef ALTQ_RIO if (cl->cl_qtype == Q_RIO) rio_destroy((rio_t *)cl->cl_red); #endif #ifdef ALTQ_RED if (cl->cl_qtype == Q_RED) red_destroy(cl->cl_red); #endif #ifdef ALTQ_CODEL if (cl->cl_qtype == Q_CODEL) codel_destroy(cl->cl_codel); #endif } free(cl->cl_buckets, M_DEVBUF); free(cl, M_DEVBUF); return (0); } /* * fairq_enqueue is an enqueue function to be registered to * (*altq_enqueue) in struct ifaltq. */ static int fairq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr) { struct fairq_if *pif = (struct fairq_if *)ifq->altq_disc; struct fairq_class *cl = NULL; /* Make compiler happy */ struct pf_mtag *t; u_int32_t qid_hash = 0; int len; IFQ_LOCK_ASSERT(ifq); /* grab class set by classifier */ if ((m->m_flags & M_PKTHDR) == 0) { /* should not happen */ printf("altq: packet for %s does not have pkthdr\n", ifq->altq_ifp->if_xname); m_freem(m); return (ENOBUFS); } if ((t = pf_find_mtag(m)) != NULL) { cl = clh_to_clp(pif, t->qid); qid_hash = t->qid_hash; } if (cl == NULL) { cl = pif->pif_default; if (cl == NULL) { m_freem(m); return (ENOBUFS); } } cl->cl_flags |= FARF_HAS_PACKETS; cl->cl_pktattr = NULL; len = m_pktlen(m); if (fairq_addq(cl, m, qid_hash) != 0) { /* drop occurred. mbuf was freed in fairq_addq. */ PKTCNTR_ADD(&cl->cl_dropcnt, len); return (ENOBUFS); } IFQ_INC_LEN(ifq); return (0); } /* * fairq_dequeue is a dequeue function to be registered to * (*altq_dequeue) in struct ifaltq. * * note: ALTDQ_POLL returns the next packet without removing the packet * from the queue. ALTDQ_REMOVE is a normal dequeue operation. * ALTDQ_REMOVE must return the same packet if called immediately * after ALTDQ_POLL. */ static struct mbuf * fairq_dequeue(struct ifaltq *ifq, int op) { struct fairq_if *pif = (struct fairq_if *)ifq->altq_disc; struct fairq_class *cl; struct fairq_class *best_cl; struct mbuf *best_m; struct mbuf *m = NULL; uint64_t cur_time = read_machclk(); int pri; int hit_limit; IFQ_LOCK_ASSERT(ifq); if (IFQ_IS_EMPTY(ifq)) { return (NULL); } if (pif->pif_poll_cache && op == ALTDQ_REMOVE) { best_cl = pif->pif_poll_cache; m = fairq_getq(best_cl, cur_time); pif->pif_poll_cache = NULL; if (m) { IFQ_DEC_LEN(ifq); PKTCNTR_ADD(&best_cl->cl_xmitcnt, m_pktlen(m)); return (m); } } else { best_cl = NULL; best_m = NULL; for (pri = pif->pif_maxpri; pri >= 0; pri--) { if ((cl = pif->pif_classes[pri]) == NULL) continue; if ((cl->cl_flags & FARF_HAS_PACKETS) == 0) continue; m = fairq_pollq(cl, cur_time, &hit_limit); if (m == NULL) { cl->cl_flags &= ~FARF_HAS_PACKETS; continue; } /* * Only override the best choice if we are under * the BW limit. */ if (hit_limit == 0 || best_cl == NULL) { best_cl = cl; best_m = m; } /* * Remember the highest priority mbuf in case we * do not find any lower priority mbufs. */ if (hit_limit) continue; break; } if (op == ALTDQ_POLL) { pif->pif_poll_cache = best_cl; m = best_m; } else if (best_cl) { m = fairq_getq(best_cl, cur_time); if (m != NULL) { IFQ_DEC_LEN(ifq); PKTCNTR_ADD(&best_cl->cl_xmitcnt, m_pktlen(m)); } } return (m); } return (NULL); } static int fairq_addq(struct fairq_class *cl, struct mbuf *m, u_int32_t bucketid) { fairq_bucket_t *b; u_int hindex; uint64_t bw; /* * If the packet doesn't have any keep state put it on the end of * our queue. XXX this can result in out of order delivery. */ if (bucketid == 0) { if (cl->cl_head) b = cl->cl_head->prev; else b = &cl->cl_buckets[0]; } else { hindex = bucketid & cl->cl_nbucket_mask; b = &cl->cl_buckets[hindex]; } /* * Add the bucket to the end of the circular list of active buckets. * * As a special case we add the bucket to the beginning of the list * instead of the end if it was not previously on the list and if * its traffic is less then the hog level. */ if (b->in_use == 0) { b->in_use = 1; if (cl->cl_head == NULL) { cl->cl_head = b; b->next = b; b->prev = b; } else { b->next = cl->cl_head; b->prev = cl->cl_head->prev; b->prev->next = b; b->next->prev = b; if (b->bw_delta && cl->cl_hogs_m1) { bw = b->bw_bytes * machclk_freq / b->bw_delta; if (bw < cl->cl_hogs_m1) cl->cl_head = b; } } } #ifdef ALTQ_RIO if (cl->cl_qtype == Q_RIO) return rio_addq((rio_t *)cl->cl_red, &b->queue, m, cl->cl_pktattr); #endif #ifdef ALTQ_RED if (cl->cl_qtype == Q_RED) return red_addq(cl->cl_red, &b->queue, m, cl->cl_pktattr); #endif #ifdef ALTQ_CODEL if (cl->cl_qtype == Q_CODEL) return codel_addq(cl->cl_codel, &b->queue, m); #endif if (qlen(&b->queue) >= qlimit(&b->queue)) { m_freem(m); return (-1); } if (cl->cl_flags & FARF_CLEARDSCP) write_dsfield(m, cl->cl_pktattr, 0); _addq(&b->queue, m); return (0); } static struct mbuf * fairq_getq(struct fairq_class *cl, uint64_t cur_time) { fairq_bucket_t *b; struct mbuf *m; b = fairq_selectq(cl, 0); if (b == NULL) m = NULL; #ifdef ALTQ_RIO else if (cl->cl_qtype == Q_RIO) m = rio_getq((rio_t *)cl->cl_red, &b->queue); #endif #ifdef ALTQ_RED else if (cl->cl_qtype == Q_RED) m = red_getq(cl->cl_red, &b->queue); #endif #ifdef ALTQ_CODEL else if (cl->cl_qtype == Q_CODEL) m = codel_getq(cl->cl_codel, &b->queue); #endif else m = _getq(&b->queue); /* * Calculate the BW change */ if (m != NULL) { uint64_t delta; /* * Per-class bandwidth calculation */ delta = (cur_time - cl->cl_last_time); if (delta > machclk_freq * 8) delta = machclk_freq * 8; cl->cl_bw_delta += delta; cl->cl_bw_bytes += m->m_pkthdr.len; cl->cl_last_time = cur_time; cl->cl_bw_delta -= cl->cl_bw_delta >> 3; cl->cl_bw_bytes -= cl->cl_bw_bytes >> 3; /* * Per-bucket bandwidth calculation */ delta = (cur_time - b->last_time); if (delta > machclk_freq * 8) delta = machclk_freq * 8; b->bw_delta += delta; b->bw_bytes += m->m_pkthdr.len; b->last_time = cur_time; b->bw_delta -= b->bw_delta >> 3; b->bw_bytes -= b->bw_bytes >> 3; } return(m); } /* * Figure out what the next packet would be if there were no limits. If * this class hits its bandwidth limit *hit_limit is set to no-zero, otherwise * it is set to 0. A non-NULL mbuf is returned either way. */ static struct mbuf * fairq_pollq(struct fairq_class *cl, uint64_t cur_time, int *hit_limit) { fairq_bucket_t *b; struct mbuf *m; uint64_t delta; uint64_t bw; *hit_limit = 0; b = fairq_selectq(cl, 1); if (b == NULL) return(NULL); m = qhead(&b->queue); /* * Did this packet exceed the class bandwidth? Calculate the * bandwidth component of the packet. * * - Calculate bytes per second */ delta = cur_time - cl->cl_last_time; if (delta > machclk_freq * 8) delta = machclk_freq * 8; cl->cl_bw_delta += delta; cl->cl_last_time = cur_time; if (cl->cl_bw_delta) { bw = cl->cl_bw_bytes * machclk_freq / cl->cl_bw_delta; if (bw > cl->cl_bandwidth) *hit_limit = 1; #ifdef ALTQ_DEBUG printf("BW %6ju relative to %6u %d queue %p\n", (uintmax_t)bw, cl->cl_bandwidth, *hit_limit, b); #endif } return(m); } /* * Locate the next queue we want to pull a packet out of. This code * is also responsible for removing empty buckets from the circular list. */ static fairq_bucket_t * fairq_selectq(struct fairq_class *cl, int ispoll) { fairq_bucket_t *b; uint64_t bw; if (ispoll == 0 && cl->cl_polled) { b = cl->cl_polled; cl->cl_polled = NULL; return(b); } while ((b = cl->cl_head) != NULL) { /* * Remove empty queues from consideration */ if (qempty(&b->queue)) { b->in_use = 0; cl->cl_head = b->next; if (cl->cl_head == b) { cl->cl_head = NULL; } else { b->next->prev = b->prev; b->prev->next = b->next; } continue; } /* * Advance the round robin. Queues with bandwidths less * then the hog bandwidth are allowed to burst. */ if (cl->cl_hogs_m1 == 0) { cl->cl_head = b->next; } else if (b->bw_delta) { bw = b->bw_bytes * machclk_freq / b->bw_delta; if (bw >= cl->cl_hogs_m1) { cl->cl_head = b->next; } /* * XXX TODO - */ } /* * Return bucket b. */ break; } if (ispoll) cl->cl_polled = b; return(b); } static void fairq_purgeq(struct fairq_class *cl) { fairq_bucket_t *b; struct mbuf *m; while ((b = fairq_selectq(cl, 0)) != NULL) { while ((m = _getq(&b->queue)) != NULL) { PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m)); m_freem(m); } ASSERT(qlen(&b->queue) == 0); } } static void get_class_stats(struct fairq_classstats *sp, struct fairq_class *cl) { fairq_bucket_t *b; + memset(sp, 0, sizeof(*sp)); + sp->class_handle = cl->cl_handle; sp->qlimit = cl->cl_qlimit; sp->xmit_cnt = cl->cl_xmitcnt; sp->drop_cnt = cl->cl_dropcnt; sp->qtype = cl->cl_qtype; sp->qlength = 0; if (cl->cl_head) { b = cl->cl_head; do { sp->qlength += qlen(&b->queue); b = b->next; } while (b != cl->cl_head); } #ifdef ALTQ_RED if (cl->cl_qtype == Q_RED) red_getstats(cl->cl_red, &sp->red[0]); #endif #ifdef ALTQ_RIO if (cl->cl_qtype == Q_RIO) rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); #endif #ifdef ALTQ_CODEL if (cl->cl_qtype == Q_CODEL) codel_getstats(cl->cl_codel, &sp->codel); #endif } /* convert a class handle to the corresponding class pointer */ static struct fairq_class * clh_to_clp(struct fairq_if *pif, uint32_t chandle) { struct fairq_class *cl; int idx; if (chandle == 0) return (NULL); for (idx = pif->pif_maxpri; idx >= 0; idx--) if ((cl = pif->pif_classes[idx]) != NULL && cl->cl_handle == chandle) return (cl); return (NULL); } #endif /* ALTQ_FAIRQ */ diff --git a/sys/net/altq/altq_priq.c b/sys/net/altq/altq_priq.c index 32ebfdefbfbe..8023dc12e029 100644 --- a/sys/net/altq/altq_priq.c +++ b/sys/net/altq/altq_priq.c @@ -1,640 +1,642 @@ /*- * Copyright (C) 2000-2003 * Sony Computer Science Laboratories Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $KAME: altq_priq.c,v 1.11 2003/09/17 14:23:25 kjc Exp $ */ /* * priority queue */ #include "opt_altq.h" #include "opt_inet.h" #include "opt_inet6.h" #ifdef ALTQ_PRIQ /* priq is enabled by ALTQ_PRIQ option in opt_altq.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * function prototypes */ static int priq_clear_interface(struct priq_if *); static int priq_request(struct ifaltq *, int, void *); static void priq_purge(struct priq_if *); static struct priq_class *priq_class_create(struct priq_if *, int, int, int, int); static int priq_class_destroy(struct priq_class *); static int priq_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *); static struct mbuf *priq_dequeue(struct ifaltq *, int); static int priq_addq(struct priq_class *, struct mbuf *); static struct mbuf *priq_getq(struct priq_class *); static struct mbuf *priq_pollq(struct priq_class *); static void priq_purgeq(struct priq_class *); static void get_class_stats(struct priq_classstats *, struct priq_class *); static struct priq_class *clh_to_clp(struct priq_if *, u_int32_t); int priq_pfattach(struct pf_altq *a) { struct ifnet *ifp; int s, error; if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL) return (EINVAL); s = splnet(); error = altq_attach(&ifp->if_snd, ALTQT_PRIQ, a->altq_disc, priq_enqueue, priq_dequeue, priq_request, NULL, NULL); splx(s); return (error); } int priq_add_altq(struct ifnet * ifp, struct pf_altq *a) { struct priq_if *pif; if (ifp == NULL) return (EINVAL); if (!ALTQ_IS_READY(&ifp->if_snd)) return (ENODEV); pif = malloc(sizeof(struct priq_if), M_DEVBUF, M_NOWAIT | M_ZERO); if (pif == NULL) return (ENOMEM); pif->pif_bandwidth = a->ifbandwidth; pif->pif_maxpri = -1; pif->pif_ifq = &ifp->if_snd; /* keep the state in pf_altq */ a->altq_disc = pif; return (0); } int priq_remove_altq(struct pf_altq *a) { struct priq_if *pif; if ((pif = a->altq_disc) == NULL) return (EINVAL); a->altq_disc = NULL; (void)priq_clear_interface(pif); free(pif, M_DEVBUF); return (0); } int priq_add_queue(struct pf_altq *a) { struct priq_if *pif; struct priq_class *cl; if ((pif = a->altq_disc) == NULL) return (EINVAL); /* check parameters */ if (a->priority >= PRIQ_MAXPRI) return (EINVAL); if (a->qid == 0) return (EINVAL); if (pif->pif_classes[a->priority] != NULL) return (EBUSY); if (clh_to_clp(pif, a->qid) != NULL) return (EBUSY); cl = priq_class_create(pif, a->priority, a->qlimit, a->pq_u.priq_opts.flags, a->qid); if (cl == NULL) return (ENOMEM); return (0); } int priq_remove_queue(struct pf_altq *a) { struct priq_if *pif; struct priq_class *cl; if ((pif = a->altq_disc) == NULL) return (EINVAL); if ((cl = clh_to_clp(pif, a->qid)) == NULL) return (EINVAL); return (priq_class_destroy(cl)); } int priq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes, int version) { struct priq_if *pif; struct priq_class *cl; struct priq_classstats stats; int error = 0; if ((pif = altq_lookup(a->ifname, ALTQT_PRIQ)) == NULL) return (EBADF); if ((cl = clh_to_clp(pif, a->qid)) == NULL) return (EINVAL); if (*nbytes < sizeof(stats)) return (EINVAL); get_class_stats(&stats, cl); if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) return (error); *nbytes = sizeof(stats); return (0); } /* * bring the interface back to the initial state by discarding * all the filters and classes. */ static int priq_clear_interface(struct priq_if *pif) { struct priq_class *cl; int pri; #ifdef ALTQ3_CLFIER_COMPAT /* free the filters for this interface */ acc_discard_filters(&pif->pif_classifier, NULL, 1); #endif /* clear out the classes */ for (pri = 0; pri <= pif->pif_maxpri; pri++) if ((cl = pif->pif_classes[pri]) != NULL) priq_class_destroy(cl); return (0); } static int priq_request(struct ifaltq *ifq, int req, void *arg) { struct priq_if *pif = (struct priq_if *)ifq->altq_disc; IFQ_LOCK_ASSERT(ifq); switch (req) { case ALTRQ_PURGE: priq_purge(pif); break; } return (0); } /* discard all the queued packets on the interface */ static void priq_purge(struct priq_if *pif) { struct priq_class *cl; int pri; for (pri = 0; pri <= pif->pif_maxpri; pri++) { if ((cl = pif->pif_classes[pri]) != NULL && !qempty(cl->cl_q)) priq_purgeq(cl); } if (ALTQ_IS_ENABLED(pif->pif_ifq)) pif->pif_ifq->ifq_len = 0; } static struct priq_class * priq_class_create(struct priq_if *pif, int pri, int qlimit, int flags, int qid) { struct priq_class *cl; int s; #ifndef ALTQ_RED if (flags & PRCF_RED) { #ifdef ALTQ_DEBUG printf("priq_class_create: RED not configured for PRIQ!\n"); #endif return (NULL); } #endif #ifndef ALTQ_CODEL if (flags & PRCF_CODEL) { #ifdef ALTQ_DEBUG printf("priq_class_create: CODEL not configured for PRIQ!\n"); #endif return (NULL); } #endif if ((cl = pif->pif_classes[pri]) != NULL) { /* modify the class instead of creating a new one */ s = splnet(); IFQ_LOCK(cl->cl_pif->pif_ifq); if (!qempty(cl->cl_q)) priq_purgeq(cl); IFQ_UNLOCK(cl->cl_pif->pif_ifq); splx(s); #ifdef ALTQ_RIO if (q_is_rio(cl->cl_q)) rio_destroy((rio_t *)cl->cl_red); #endif #ifdef ALTQ_RED if (q_is_red(cl->cl_q)) red_destroy(cl->cl_red); #endif #ifdef ALTQ_CODEL if (q_is_codel(cl->cl_q)) codel_destroy(cl->cl_codel); #endif } else { cl = malloc(sizeof(struct priq_class), M_DEVBUF, M_NOWAIT | M_ZERO); if (cl == NULL) return (NULL); cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (cl->cl_q == NULL) goto err_ret; } pif->pif_classes[pri] = cl; if (flags & PRCF_DEFAULTCLASS) pif->pif_default = cl; if (qlimit == 0) qlimit = 50; /* use default */ qlimit(cl->cl_q) = qlimit; qtype(cl->cl_q) = Q_DROPTAIL; qlen(cl->cl_q) = 0; qsize(cl->cl_q) = 0; cl->cl_flags = flags; cl->cl_pri = pri; if (pri > pif->pif_maxpri) pif->pif_maxpri = pri; cl->cl_pif = pif; cl->cl_handle = qid; #ifdef ALTQ_RED if (flags & (PRCF_RED|PRCF_RIO)) { int red_flags, red_pkttime; red_flags = 0; if (flags & PRCF_ECN) red_flags |= REDF_ECN; #ifdef ALTQ_RIO if (flags & PRCF_CLEARDSCP) red_flags |= RIOF_CLEARDSCP; #endif if (pif->pif_bandwidth < 8) red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ else red_pkttime = (int64_t)pif->pif_ifq->altq_ifp->if_mtu * 1000 * 1000 * 1000 / (pif->pif_bandwidth / 8); #ifdef ALTQ_RIO if (flags & PRCF_RIO) { cl->cl_red = (red_t *)rio_alloc(0, NULL, red_flags, red_pkttime); if (cl->cl_red == NULL) goto err_ret; qtype(cl->cl_q) = Q_RIO; } else #endif if (flags & PRCF_RED) { cl->cl_red = red_alloc(0, 0, qlimit(cl->cl_q) * 10/100, qlimit(cl->cl_q) * 30/100, red_flags, red_pkttime); if (cl->cl_red == NULL) goto err_ret; qtype(cl->cl_q) = Q_RED; } } #endif /* ALTQ_RED */ #ifdef ALTQ_CODEL if (flags & PRCF_CODEL) { cl->cl_codel = codel_alloc(5, 100, 0); if (cl->cl_codel != NULL) qtype(cl->cl_q) = Q_CODEL; } #endif return (cl); err_ret: if (cl->cl_red != NULL) { #ifdef ALTQ_RIO if (q_is_rio(cl->cl_q)) rio_destroy((rio_t *)cl->cl_red); #endif #ifdef ALTQ_RED if (q_is_red(cl->cl_q)) red_destroy(cl->cl_red); #endif #ifdef ALTQ_CODEL if (q_is_codel(cl->cl_q)) codel_destroy(cl->cl_codel); #endif } if (cl->cl_q != NULL) free(cl->cl_q, M_DEVBUF); free(cl, M_DEVBUF); return (NULL); } static int priq_class_destroy(struct priq_class *cl) { struct priq_if *pif; int s, pri; s = splnet(); IFQ_LOCK(cl->cl_pif->pif_ifq); #ifdef ALTQ3_CLFIER_COMPAT /* delete filters referencing to this class */ acc_discard_filters(&cl->cl_pif->pif_classifier, cl, 0); #endif if (!qempty(cl->cl_q)) priq_purgeq(cl); pif = cl->cl_pif; pif->pif_classes[cl->cl_pri] = NULL; if (pif->pif_maxpri == cl->cl_pri) { for (pri = cl->cl_pri; pri >= 0; pri--) if (pif->pif_classes[pri] != NULL) { pif->pif_maxpri = pri; break; } if (pri < 0) pif->pif_maxpri = -1; } IFQ_UNLOCK(cl->cl_pif->pif_ifq); splx(s); if (cl->cl_red != NULL) { #ifdef ALTQ_RIO if (q_is_rio(cl->cl_q)) rio_destroy((rio_t *)cl->cl_red); #endif #ifdef ALTQ_RED if (q_is_red(cl->cl_q)) red_destroy(cl->cl_red); #endif #ifdef ALTQ_CODEL if (q_is_codel(cl->cl_q)) codel_destroy(cl->cl_codel); #endif } free(cl->cl_q, M_DEVBUF); free(cl, M_DEVBUF); return (0); } /* * priq_enqueue is an enqueue function to be registered to * (*altq_enqueue) in struct ifaltq. */ static int priq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr) { struct priq_if *pif = (struct priq_if *)ifq->altq_disc; struct priq_class *cl; struct pf_mtag *t; int len; IFQ_LOCK_ASSERT(ifq); /* grab class set by classifier */ if ((m->m_flags & M_PKTHDR) == 0) { /* should not happen */ printf("altq: packet for %s does not have pkthdr\n", ifq->altq_ifp->if_xname); m_freem(m); return (ENOBUFS); } cl = NULL; if ((t = pf_find_mtag(m)) != NULL) cl = clh_to_clp(pif, t->qid); if (cl == NULL) { cl = pif->pif_default; if (cl == NULL) { m_freem(m); return (ENOBUFS); } } cl->cl_pktattr = NULL; len = m_pktlen(m); if (priq_addq(cl, m) != 0) { /* drop occurred. mbuf was freed in priq_addq. */ PKTCNTR_ADD(&cl->cl_dropcnt, len); return (ENOBUFS); } IFQ_INC_LEN(ifq); /* successfully queued. */ return (0); } /* * priq_dequeue is a dequeue function to be registered to * (*altq_dequeue) in struct ifaltq. * * note: ALTDQ_POLL returns the next packet without removing the packet * from the queue. ALTDQ_REMOVE is a normal dequeue operation. * ALTDQ_REMOVE must return the same packet if called immediately * after ALTDQ_POLL. */ static struct mbuf * priq_dequeue(struct ifaltq *ifq, int op) { struct priq_if *pif = (struct priq_if *)ifq->altq_disc; struct priq_class *cl; struct mbuf *m; int pri; IFQ_LOCK_ASSERT(ifq); if (IFQ_IS_EMPTY(ifq)) /* no packet in the queue */ return (NULL); for (pri = pif->pif_maxpri; pri >= 0; pri--) { if ((cl = pif->pif_classes[pri]) != NULL && !qempty(cl->cl_q)) { if (op == ALTDQ_POLL) return (priq_pollq(cl)); m = priq_getq(cl); if (m != NULL) { IFQ_DEC_LEN(ifq); if (qempty(cl->cl_q)) cl->cl_period++; PKTCNTR_ADD(&cl->cl_xmitcnt, m_pktlen(m)); } return (m); } } return (NULL); } static int priq_addq(struct priq_class *cl, struct mbuf *m) { #ifdef ALTQ_RIO if (q_is_rio(cl->cl_q)) return rio_addq((rio_t *)cl->cl_red, cl->cl_q, m, cl->cl_pktattr); #endif #ifdef ALTQ_RED if (q_is_red(cl->cl_q)) return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); #endif #ifdef ALTQ_CODEL if (q_is_codel(cl->cl_q)) return codel_addq(cl->cl_codel, cl->cl_q, m); #endif if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { m_freem(m); return (-1); } if (cl->cl_flags & PRCF_CLEARDSCP) write_dsfield(m, cl->cl_pktattr, 0); _addq(cl->cl_q, m); return (0); } static struct mbuf * priq_getq(struct priq_class *cl) { #ifdef ALTQ_RIO if (q_is_rio(cl->cl_q)) return rio_getq((rio_t *)cl->cl_red, cl->cl_q); #endif #ifdef ALTQ_RED if (q_is_red(cl->cl_q)) return red_getq(cl->cl_red, cl->cl_q); #endif #ifdef ALTQ_CODEL if (q_is_codel(cl->cl_q)) return codel_getq(cl->cl_codel, cl->cl_q); #endif return _getq(cl->cl_q); } static struct mbuf * priq_pollq(cl) struct priq_class *cl; { return qhead(cl->cl_q); } static void priq_purgeq(struct priq_class *cl) { struct mbuf *m; if (qempty(cl->cl_q)) return; while ((m = _getq(cl->cl_q)) != NULL) { PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m)); m_freem(m); } ASSERT(qlen(cl->cl_q) == 0); } static void get_class_stats(struct priq_classstats *sp, struct priq_class *cl) { + memset(sp, 0, sizeof(*sp)); + sp->class_handle = cl->cl_handle; sp->qlength = qlen(cl->cl_q); sp->qlimit = qlimit(cl->cl_q); sp->period = cl->cl_period; sp->xmitcnt = cl->cl_xmitcnt; sp->dropcnt = cl->cl_dropcnt; sp->qtype = qtype(cl->cl_q); #ifdef ALTQ_RED if (q_is_red(cl->cl_q)) red_getstats(cl->cl_red, &sp->red[0]); #endif #ifdef ALTQ_RIO if (q_is_rio(cl->cl_q)) rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); #endif #ifdef ALTQ_CODEL if (q_is_codel(cl->cl_q)) codel_getstats(cl->cl_codel, &sp->codel); #endif } /* convert a class handle to the corresponding class pointer */ static struct priq_class * clh_to_clp(struct priq_if *pif, u_int32_t chandle) { struct priq_class *cl; int idx; if (chandle == 0) return (NULL); for (idx = pif->pif_maxpri; idx >= 0; idx--) if ((cl = pif->pif_classes[idx]) != NULL && cl->cl_handle == chandle) return (cl); return (NULL); } #endif /* ALTQ_PRIQ */