Index: sys/dev/cxgb/cxgb_adapter.h =================================================================== --- sys/dev/cxgb/cxgb_adapter.h +++ sys/dev/cxgb/cxgb_adapter.h @@ -59,7 +59,6 @@ #include #include -#include struct adapter; struct sge_qset; @@ -251,7 +250,7 @@ bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; bus_dma_tag_t entry_tag; - struct mbuf_head sendq; + struct mbufq sendq; struct buf_ring *txq_mr; struct ifaltq *txq_ifq; Index: sys/dev/cxgb/cxgb_sge.c =================================================================== --- sys/dev/cxgb/cxgb_sge.c +++ sys/dev/cxgb/cxgb_sge.c @@ -1117,9 +1117,10 @@ qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; - mbufq_init(&qs->txq[TXQ_ETH].sendq); - mbufq_init(&qs->txq[TXQ_OFLD].sendq); - mbufq_init(&qs->txq[TXQ_CTRL].sendq); + /* XXX: a sane limit is needed instead of INT_MAX */ + mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX); + mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX); + mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX); } @@ -1820,8 +1821,8 @@ * the control queue is only used for binding qsets which happens * at init time so we are guaranteed enough descriptors */ - if (__predict_false(!mbufq_empty(&q->sendq))) { -addq_exit: mbufq_tail(&q->sendq, m); + if (__predict_false(mbufq_len(&q->sendq))) { +addq_exit: (void )mbufq_enqueue(&q->sendq, m); return 1; } if (__predict_false(q->size - q->in_use < ndesc)) { @@ -1936,7 +1937,7 @@ } q->in_use++; } - if (!mbufq_empty(&q->sendq)) { + if (mbufq_len(&q->sendq)) { setbit(&qs->txq_stopped, TXQ_CTRL); if (should_restart_tx(q) && @@ -2319,7 +2320,7 @@ TXQ_LOCK(qs); again: cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD); - while ((m = mbufq_peek(&q->sendq)) != NULL) { + while ((m = mbufq_first(&q->sendq)) != NULL) { unsigned int gen, pidx; struct ofld_hdr *oh = mtod(m, struct ofld_hdr *); unsigned int ndesc = G_HDR_NDESC(oh->flags); @@ -2485,7 +2486,7 @@ printf("error %d from alloc ring tx %i\n", ret, i); goto err; } - mbufq_init(&q->txq[i].sendq); + mbufq_init(&q->txq[i].sendq, INT_MAX); q->txq[i].gen = 1; q->txq[i].size = p->txq_size[i]; } @@ -3521,7 +3522,7 @@ CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops, "#tunneled packets dropped"); SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen", - CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen, + CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len, 0, "#tunneled packets waiting to be sent"); #if 0 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx", Index: sys/dev/cxgb/sys/mbufq.h =================================================================== --- sys/dev/cxgb/sys/mbufq.h +++ /dev/null @@ -1,123 +0,0 @@ -/************************************************************************** - -Copyright (c) 2007-2008, Chelsio Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Neither the name of the Chelsio Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -$FreeBSD$ - -***************************************************************************/ - -#ifndef CXGB_MBUFQ_H_ -#define CXGB_MBUFQ_H_ - -struct mbuf_head { - struct mbuf *head; - struct mbuf *tail; - uint32_t qlen; - uint32_t qsize; - struct mtx lock; -}; - -static __inline void -mbufq_init(struct mbuf_head *l) -{ - l->head = l->tail = NULL; - l->qlen = l->qsize = 0; -} - -static __inline int -mbufq_empty(struct mbuf_head *l) -{ - return (l->head == NULL); -} - -static __inline int -mbufq_len(struct mbuf_head *l) -{ - return (l->qlen); -} - -static __inline int -mbufq_size(struct mbuf_head *l) -{ - return (l->qsize); -} - -static __inline int -mbufq_head_size(struct mbuf_head *l) -{ - return (l->head ? l->head->m_pkthdr.len : 0); -} - -static __inline void -mbufq_tail(struct mbuf_head *l, struct mbuf *m) -{ - l->qlen++; - if (l->head == NULL) - l->head = m; - else - l->tail->m_nextpkt = m; - l->tail = m; - l->qsize += m->m_pkthdr.len; -} - -static __inline struct mbuf * -mbufq_dequeue(struct mbuf_head *l) -{ - struct mbuf *m; - - m = l->head; - if (m) { - if (m == l->tail) - l->head = l->tail = NULL; - else - l->head = m->m_nextpkt; - m->m_nextpkt = NULL; - l->qlen--; - l->qsize -= m->m_pkthdr.len; - } - - return (m); -} - -static __inline struct mbuf * -mbufq_peek(const struct mbuf_head *l) -{ - return (l->head); -} - -static __inline void -mbufq_append(struct mbuf_head *a, struct mbuf_head *b) -{ - if (a->tail) - a->tail->m_nextpkt = b->head; - if (b->tail) - a->tail = b->tail; - a->qlen += b->qlen; - a->qsize += b->qsize; - - -} -#endif /* CXGB_MBUFQ_H_ */ Index: sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c =================================================================== --- sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c +++ sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c @@ -1088,7 +1088,7 @@ req->cmd = CPL_ABORT_SEND_RST; if (tp->t_state == TCPS_SYN_SENT) - mbufq_tail(&toep->out_of_order_queue, m); /* defer */ + (void )mbufq_enqueue(&toep->out_of_order_queue, m); /* defer */ else l2t_send(sc, m, toep->tp_l2t); } Index: sys/dev/cxgb/ulp/tom/cxgb_toepcb.h =================================================================== --- sys/dev/cxgb/ulp/tom/cxgb_toepcb.h +++ sys/dev/cxgb/ulp/tom/cxgb_toepcb.h @@ -30,7 +30,7 @@ #define CXGB_TOEPCB_H_ #include #include -#include +#include #define TP_DATASENT (1 << 0) #define TP_TX_WAIT_IDLE (1 << 1) @@ -64,26 +64,26 @@ struct inpcb *tp_inp; struct mbuf *tp_m_last; - struct mbuf_head wr_list; - struct mbuf_head out_of_order_queue; + struct mbufq wr_list; + struct mbufq out_of_order_queue; }; static inline void reset_wr_list(struct toepcb *toep) { - mbufq_init(&toep->wr_list); + mbufq_init(&toep->wr_list, INT_MAX); /* XXX: sane limit needed */ } static inline void enqueue_wr(struct toepcb *toep, struct mbuf *m) { - mbufq_tail(&toep->wr_list, m); + (void )mbufq_enqueue(&toep->wr_list, m); } static inline struct mbuf * peek_wr(const struct toepcb *toep) { - return (mbufq_peek(&toep->wr_list)); + return (mbufq_first(&toep->wr_list)); } static inline struct mbuf * Index: sys/dev/xen/netfront/mbufq.h =================================================================== --- sys/dev/xen/netfront/mbufq.h +++ /dev/null @@ -1,123 +0,0 @@ -/************************************************************************** - -Copyright (c) 2007, Chelsio Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Neither the name of the Chelsio Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -$FreeBSD$ - -***************************************************************************/ - -#ifndef CXGB_MBUFQ_H_ -#define CXGB_MBUFQ_H_ - -struct mbuf_head { - struct mbuf *head; - struct mbuf *tail; - uint32_t qlen; - uint32_t qsize; - struct mtx lock; -}; - -static __inline void -mbufq_init(struct mbuf_head *l) -{ - l->head = l->tail = NULL; - l->qlen = l->qsize = 0; -} - -static __inline int -mbufq_empty(struct mbuf_head *l) -{ - return (l->head == NULL); -} - -static __inline int -mbufq_len(struct mbuf_head *l) -{ - return (l->qlen); -} - -static __inline int -mbufq_size(struct mbuf_head *l) -{ - return (l->qsize); -} - -static __inline int -mbufq_head_size(struct mbuf_head *l) -{ - return (l->head ? l->head->m_pkthdr.len : 0); -} - -static __inline void -mbufq_tail(struct mbuf_head *l, struct mbuf *m) -{ - l->qlen++; - if (l->head == NULL) - l->head = m; - else - l->tail->m_nextpkt = m; - l->tail = m; - l->qsize += m->m_pkthdr.len; -} - -static __inline struct mbuf * -mbufq_dequeue(struct mbuf_head *l) -{ - struct mbuf *m; - - m = l->head; - if (m) { - if (m == l->tail) - l->head = l->tail = NULL; - else - l->head = m->m_nextpkt; - m->m_nextpkt = NULL; - l->qlen--; - l->qsize -= m->m_pkthdr.len; - } - - return (m); -} - -static __inline struct mbuf * -mbufq_peek(struct mbuf_head *l) -{ - return (l->head); -} - -static __inline void -mbufq_append(struct mbuf_head *a, struct mbuf_head *b) -{ - if (a->tail) - a->tail->m_nextpkt = b->head; - if (b->tail) - a->tail = b->tail; - a->qlen += b->qlen; - a->qsize += b->qsize; - - -} -#endif /* CXGB_MBUFQ_H_ */ Index: sys/dev/xen/netfront/netfront.c =================================================================== --- sys/dev/xen/netfront/netfront.c +++ sys/dev/xen/netfront/netfront.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -87,8 +88,6 @@ #include -#include - #include "xenbus_if.h" /* Features supported by all backends. TSO and LRO can be negotiated */ @@ -277,7 +276,7 @@ int rx_ring_ref; uint8_t mac[ETHER_ADDR_LEN]; struct xn_chain_data xn_cdata; /* mbufs */ - struct mbuf_head xn_rx_batch; /* head of the batch queue */ + struct mbufq xn_rx_batch; /* batch queue */ int xn_if_flags; struct callout xn_stat_ch; @@ -837,7 +836,7 @@ m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; /* queue the mbufs allocated */ - mbufq_tail(&sc->xn_rx_batch, m_new); + (void )mbufq_enqueue(&sc->xn_rx_batch, m_new); } /* @@ -973,7 +972,7 @@ RING_IDX i, rp; multicall_entry_t *mcl; struct mbuf *m; - struct mbuf_head rxq, errq; + struct mbufq rxq, errq; int err, pages_flipped = 0, work_to_do; do { @@ -981,8 +980,9 @@ if (!netfront_carrier_ok(np)) return; - mbufq_init(&errq); - mbufq_init(&rxq); + /* XXX: there should be some sane limit. */ + mbufq_init(&errq, INT_MAX); + mbufq_init(&rxq, INT_MAX); ifp = np->xn_ifp; @@ -1000,7 +1000,7 @@ if (__predict_false(err)) { if (m) - mbufq_tail(&errq, m); + (void )mbufq_enqueue(&errq, m); np->stats.rx_errors++; continue; } @@ -1022,7 +1022,7 @@ np->stats.rx_packets++; np->stats.rx_bytes += m->m_pkthdr.len; - mbufq_tail(&rxq, m); + (void )mbufq_enqueue(&rxq, m); np->rx.rsp_cons = i; } @@ -1046,8 +1046,7 @@ } } - while ((m = mbufq_dequeue(&errq))) - m_freem(m); + mbufq_drain(&errq); /* * Process all the mbufs after the remapping is complete. Index: sys/netinet/igmp.c =================================================================== --- sys/netinet/igmp.c +++ sys/netinet/igmp.c @@ -87,7 +87,7 @@ static struct igmp_ifinfo * igi_alloc_locked(struct ifnet *); static void igi_delete_locked(const struct ifnet *); -static void igmp_dispatch_queue(struct ifqueue *, int, const int); +static void igmp_dispatch_queue(struct mbufq *, int, const int); static void igmp_fasttimo_vnet(void); static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *); static int igmp_handle_state_change(struct in_multi *, @@ -122,15 +122,15 @@ static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *); static struct mbuf * igmp_v3_encap_report(struct ifnet *, struct mbuf *); -static int igmp_v3_enqueue_group_record(struct ifqueue *, +static int igmp_v3_enqueue_group_record(struct mbufq *, struct in_multi *, const int, const int, const int); -static int igmp_v3_enqueue_filter_change(struct ifqueue *, +static int igmp_v3_enqueue_filter_change(struct mbufq *, struct in_multi *); static void igmp_v3_process_group_timers(struct igmp_ifinfo *, - struct ifqueue *, struct ifqueue *, struct in_multi *, + struct mbufq *, struct mbufq *, struct in_multi *, const int); static int igmp_v3_merge_state_changes(struct in_multi *, - struct ifqueue *); + struct mbufq *); static void igmp_v3_suppress_group_record(struct in_multi *); static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS); static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS); @@ -475,15 +475,12 @@ * VIMAGE: Assumes the vnet pointer has been set. */ static void -igmp_dispatch_queue(struct ifqueue *ifq, int limit, const int loop) +igmp_dispatch_queue(struct mbufq *mq, int limit, const int loop) { struct mbuf *m; - for (;;) { - _IF_DEQUEUE(ifq, m); - if (m == NULL) - break; - CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, ifq, m); + while ((m = mbufq_dequeue(mq)) != NULL) { + CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, mq, m); if (loop) m->m_flags |= M_IGMP_LOOP; netisr_dispatch(NETISR_IGMP, m); @@ -579,13 +576,8 @@ igi->igi_qi = IGMP_QI_INIT; igi->igi_qri = IGMP_QRI_INIT; igi->igi_uri = IGMP_URI_INIT; - SLIST_INIT(&igi->igi_relinmhead); - - /* - * Responses to general queries are subject to bounds. - */ - IFQ_SET_MAXLEN(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS); + mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS); LIST_INSERT_HEAD(&V_igi_head, igi, igi_link); @@ -683,7 +675,7 @@ /* * Free deferred General Query responses. */ - _IF_DRAIN(&igi->igi_gq); + mbufq_drain(&igi->igi_gq); LIST_REMOVE(igi, igi_link); @@ -1643,8 +1635,8 @@ static void igmp_fasttimo_vnet(void) { - struct ifqueue scq; /* State-change packets */ - struct ifqueue qrq; /* Query response packets */ + struct mbufq scq; /* State-change packets */ + struct mbufq qrq; /* Query response packets */ struct ifnet *ifp; struct igmp_ifinfo *igi; struct ifmultiaddr *ifma; @@ -1705,12 +1697,8 @@ loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0; uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri * PR_FASTHZ); - - memset(&qrq, 0, sizeof(struct ifqueue)); - IFQ_SET_MAXLEN(&qrq, IGMP_MAX_G_GS_PACKETS); - - memset(&scq, 0, sizeof(struct ifqueue)); - IFQ_SET_MAXLEN(&scq, IGMP_MAX_STATE_CHANGE_PACKETS); + mbufq_init(&qrq, IGMP_MAX_G_GS_PACKETS); + mbufq_init(&scq, IGMP_MAX_STATE_CHANGE_PACKETS); } IF_ADDR_RLOCK(ifp); @@ -1809,7 +1797,7 @@ */ static void igmp_v3_process_group_timers(struct igmp_ifinfo *igi, - struct ifqueue *qrq, struct ifqueue *scq, + struct mbufq *qrq, struct mbufq *scq, struct in_multi *inm, const int uri_fasthz) { int query_response_timer_expired; @@ -2071,7 +2059,7 @@ */ inm->inm_sctimer = 0; inm->inm_timer = 0; - _IF_DRAIN(&inm->inm_scq); + mbufq_drain(&inm->inm_scq); } IF_ADDR_RUNLOCK(ifp); SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, tinm) { @@ -2344,7 +2332,7 @@ igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi) { struct ifnet *ifp; - struct ifqueue *ifq; + struct mbufq *mq; int error, retval, syncstates; CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)", @@ -2418,9 +2406,9 @@ * Don't kick the timers if there is nothing to do, * or if an error occurred. */ - ifq = &inm->inm_scq; - _IF_DRAIN(ifq); - retval = igmp_v3_enqueue_group_record(ifq, inm, 1, + mq = &inm->inm_scq; + mbufq_drain(mq); + retval = igmp_v3_enqueue_group_record(mq, inm, 1, 0, 0); CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval); @@ -2500,7 +2488,7 @@ return (0); } - _IF_DRAIN(&inm->inm_scq); + mbufq_drain(&inm->inm_scq); retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0); CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval); @@ -2569,7 +2557,7 @@ * TO_IN {} to be sent on the next fast timeout, * giving us an opportunity to merge reports. */ - _IF_DRAIN(&inm->inm_scq); + mbufq_drain(&inm->inm_scq); inm->inm_timer = 0; if (igi->igi_flags & IGIF_LOOPBACK) { inm->inm_scrv = 1; @@ -2647,7 +2635,7 @@ * no record(s) were appended. */ static int -igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, +igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm, const int is_state_change, const int is_group_query, const int is_source_query) { @@ -2737,7 +2725,7 @@ * Generate the filter list changes using a separate function. */ if (is_filter_list_change) - return (igmp_v3_enqueue_filter_change(ifq, inm)); + return (igmp_v3_enqueue_filter_change(mq, inm)); if (type == IGMP_DO_NOTHING) { CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s", @@ -2767,7 +2755,7 @@ * Note: Group records for G/GSR query responses MUST be sent * in their own packet. */ - m0 = ifq->ifq_tail; + m0 = mbufq_last(mq); if (!is_group_query && m0 != NULL && (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) && @@ -2778,7 +2766,7 @@ m = m0; CTR1(KTR_IGMPV3, "%s: use existing packet", __func__); } else { - if (_IF_QFULL(ifq)) { + if (mbufq_full(mq)) { CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__); return (-ENOMEM); } @@ -2891,7 +2879,7 @@ if (m != m0) { CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__); m->m_pkthdr.PH_vt.vt_nrecs = 1; - _IF_ENQUEUE(ifq, m); + mbufq_enqueue(mq, m); } else m->m_pkthdr.PH_vt.vt_nrecs++; @@ -2907,7 +2895,7 @@ * Always try for a cluster first. */ while (nims != NULL) { - if (_IF_QFULL(ifq)) { + if (mbufq_full(mq)) { CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__); return (-ENOMEM); } @@ -2970,7 +2958,7 @@ nbytes += (msrcs * sizeof(in_addr_t)); CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__); - _IF_ENQUEUE(ifq, m); + mbufq_enqueue(mq, m); } return (nbytes); @@ -3010,7 +2998,7 @@ * no record(s) were appended. */ static int -igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) +igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm) { static const int MINRECLEN = sizeof(struct igmp_grouprec) + sizeof(in_addr_t); @@ -3054,7 +3042,7 @@ */ while (drt != REC_FULL) { do { - m0 = ifq->ifq_tail; + m0 = mbufq_last(mq); if (m0 != NULL && (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) && @@ -3201,7 +3189,7 @@ */ m->m_pkthdr.PH_vt.vt_nrecs++; if (m != m0) - _IF_ENQUEUE(ifq, m); + mbufq_enqueue(mq, m); nbytes += npbytes; } while (nims != NULL); drt |= crt; @@ -3215,9 +3203,9 @@ } static int -igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) +igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq) { - struct ifqueue *gq; + struct mbufq *gq; struct mbuf *m; /* pending state-change */ struct mbuf *m0; /* copy of pending state-change */ struct mbuf *mt; /* last state-change in packet */ @@ -3240,13 +3228,13 @@ gq = &inm->inm_scq; #ifdef KTR - if (gq->ifq_head == NULL) { + if (mbufq_first(gq) == NULL) { CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty", __func__, inm); } #endif - m = gq->ifq_head; + m = mbufq_first(gq); while (m != NULL) { /* * Only merge the report into the current packet if @@ -3257,7 +3245,7 @@ * allocated clusters. */ domerge = 0; - mt = ifscq->ifq_tail; + mt = mbufq_last(scq); if (mt != NULL) { recslen = m_length(m, NULL); @@ -3269,7 +3257,7 @@ domerge = 1; } - if (!domerge && _IF_QFULL(gq)) { + if (!domerge && mbufq_full(gq)) { CTR2(KTR_IGMPV3, "%s: outbound queue full, skipping whole packet %p", __func__, m); @@ -3282,7 +3270,7 @@ if (!docopy) { CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m); - _IF_DEQUEUE(gq, m0); + m0 = mbufq_dequeue(gq); m = m0->m_nextpkt; } else { CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m); @@ -3294,13 +3282,13 @@ } if (!domerge) { - CTR3(KTR_IGMPV3, "%s: queueing %p to ifscq %p)", - __func__, m0, ifscq); - _IF_ENQUEUE(ifscq, m0); + CTR3(KTR_IGMPV3, "%s: queueing %p to scq %p)", + __func__, m0, scq); + mbufq_enqueue(scq, m0); } else { struct mbuf *mtl; /* last mbuf of packet mt */ - CTR3(KTR_IGMPV3, "%s: merging %p with ifscq tail %p)", + CTR3(KTR_IGMPV3, "%s: merging %p with scq tail %p)", __func__, m0, mt); mtl = m_last(mt); @@ -3374,7 +3362,7 @@ /* * Slew transmission of bursts over 500ms intervals. */ - if (igi->igi_gq.ifq_head != NULL) { + if (mbufq_first(&igi->igi_gq) != NULL) { igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY( IGMP_RESPONSE_BURST_INTERVAL); V_interface_timers_running = 1; Index: sys/netinet/in_mcast.c =================================================================== --- sys/netinet/in_mcast.c +++ sys/netinet/in_mcast.c @@ -523,12 +523,7 @@ inm->inm_ifma = ifma; inm->inm_refcount = 1; inm->inm_state = IGMP_NOT_MEMBER; - - /* - * Pending state-changes per group are subject to a bounds check. - */ - IFQ_SET_MAXLEN(&inm->inm_scq, IGMP_MAX_STATE_CHANGES); - + mbufq_init(&inm->inm_scq, IGMP_MAX_STATE_CHANGES); inm->inm_st[0].iss_fmode = MCAST_UNDEFINED; inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; RB_INIT(&inm->inm_srcs); Index: sys/netinet/in_var.h =================================================================== --- sys/netinet/in_var.h +++ sys/netinet/in_var.h @@ -210,7 +210,7 @@ uint32_t igi_qri; /* IGMPv3 Query Response Interval (s) */ uint32_t igi_uri; /* IGMPv3 Unsolicited Report Interval (s) */ SLIST_HEAD(,in_multi) igi_relinmhead; /* released groups */ - struct ifqueue igi_gq; /* queue of general query responses */ + struct mbufq igi_gq; /* queue of general query responses */ }; #define IGIF_SILENT 0x00000001 /* Do not use IGMP on this ifp */ @@ -299,7 +299,7 @@ struct ip_msource_tree inm_srcs; /* tree of sources */ u_long inm_nsrc; /* # of tree entries */ - struct ifqueue inm_scq; /* queue of pending + struct mbufq inm_scq; /* queue of pending * state-change packets */ struct timeval inm_lastgsrtv; /* Time of last G-S-R query */ uint16_t inm_sctimer; /* state-change timer */ Index: sys/netinet6/in6_mcast.c =================================================================== --- sys/netinet6/in6_mcast.c +++ sys/netinet6/in6_mcast.c @@ -479,7 +479,7 @@ inm->in6m_ifma = ifma; inm->in6m_refcount = 1; inm->in6m_state = MLD_NOT_MEMBER; - IFQ_SET_MAXLEN(&inm->in6m_scq, MLD_MAX_STATE_CHANGES); + mbufq_init(&inm->in6m_scq, MLD_MAX_STATE_CHANGES); inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED; inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; @@ -1074,7 +1074,7 @@ inm->in6m_nsrc--; } /* Free state-change requests that might be queued. */ - _IF_DRAIN(&inm->in6m_scq); + mbufq_drain(&inm->in6m_scq); } /* @@ -2804,7 +2804,7 @@ inm->in6m_timer, in6m_state_str(inm->in6m_state), inm->in6m_refcount, - inm->in6m_scq.ifq_len); + mbufq_len(&inm->in6m_scq)); printf("mli %p nsrc %lu sctimer %u scrv %u\n", inm->in6m_mli, inm->in6m_nsrc, Index: sys/netinet6/in6_var.h =================================================================== --- sys/netinet6/in6_var.h +++ sys/netinet6/in6_var.h @@ -652,7 +652,7 @@ struct ip6_msource_tree in6m_srcs; /* tree of sources */ u_long in6m_nsrc; /* # of tree entries */ - struct ifqueue in6m_scq; /* queue of pending + struct mbufq in6m_scq; /* queue of pending * state-change packets */ struct timeval in6m_lastgsrtv; /* last G-S-R query */ uint16_t in6m_sctimer; /* state-change timer */ Index: sys/netinet6/mld6.c =================================================================== --- sys/netinet6/mld6.c +++ sys/netinet6/mld6.c @@ -106,7 +106,7 @@ mli_alloc_locked(struct ifnet *); static void mli_delete_locked(const struct ifnet *); static void mld_dispatch_packet(struct mbuf *); -static void mld_dispatch_queue(struct ifqueue *, int); +static void mld_dispatch_queue(struct mbufq *, int); static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *); static void mld_fasttimo_vnet(void); static int mld_handle_state_change(struct in6_multi *, @@ -131,17 +131,17 @@ static void mld_v2_dispatch_general_query(struct mld_ifinfo *); static struct mbuf * mld_v2_encap_report(struct ifnet *, struct mbuf *); -static int mld_v2_enqueue_filter_change(struct ifqueue *, +static int mld_v2_enqueue_filter_change(struct mbufq *, struct in6_multi *); -static int mld_v2_enqueue_group_record(struct ifqueue *, +static int mld_v2_enqueue_group_record(struct mbufq *, struct in6_multi *, const int, const int, const int, const int); static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *, struct mbuf *, const int, const int); static int mld_v2_merge_state_changes(struct in6_multi *, - struct ifqueue *); + struct mbufq *); static void mld_v2_process_group_timers(struct mld_ifinfo *, - struct ifqueue *, struct ifqueue *, + struct mbufq *, struct mbufq *, struct in6_multi *, const int); static int mld_v2_process_group_query(struct in6_multi *, struct mld_ifinfo *mli, int, struct mbuf *, const int); @@ -406,15 +406,12 @@ * VIMAGE: Assumes the vnet pointer has been set. */ static void -mld_dispatch_queue(struct ifqueue *ifq, int limit) +mld_dispatch_queue(struct mbufq *mq, int limit) { struct mbuf *m; - for (;;) { - _IF_DEQUEUE(ifq, m); - if (m == NULL) - break; - CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, ifq, m); + while ((m = mbufq_dequeue(mq)) != NULL) { + CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, mq, m); mld_dispatch_packet(m); if (--limit == 0) break; @@ -499,13 +496,8 @@ mli->mli_qi = MLD_QI_INIT; mli->mli_qri = MLD_QRI_INIT; mli->mli_uri = MLD_URI_INIT; - SLIST_INIT(&mli->mli_relinmhead); - - /* - * Responses to general queries are subject to bounds. - */ - IFQ_SET_MAXLEN(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS); + mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS); LIST_INSERT_HEAD(&V_mli_head, mli, mli_link); @@ -598,7 +590,7 @@ /* * Free deferred General Query responses. */ - _IF_DRAIN(&mli->mli_gq); + mbufq_drain(&mli->mli_gq); LIST_REMOVE(mli, mli_link); @@ -1326,8 +1318,8 @@ static void mld_fasttimo_vnet(void) { - struct ifqueue scq; /* State-change packets */ - struct ifqueue qrq; /* Query response packets */ + struct mbufq scq; /* State-change packets */ + struct mbufq qrq; /* Query response packets */ struct ifnet *ifp; struct mld_ifinfo *mli; struct ifmultiaddr *ifma; @@ -1386,12 +1378,8 @@ if (mli->mli_version == MLD_VERSION_2) { uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri * PR_FASTHZ); - - memset(&qrq, 0, sizeof(struct ifqueue)); - IFQ_SET_MAXLEN(&qrq, MLD_MAX_G_GS_PACKETS); - - memset(&scq, 0, sizeof(struct ifqueue)); - IFQ_SET_MAXLEN(&scq, MLD_MAX_STATE_CHANGE_PACKETS); + mbufq_init(&qrq, MLD_MAX_G_GS_PACKETS); + mbufq_init(&scq, MLD_MAX_STATE_CHANGE_PACKETS); } IF_ADDR_RLOCK(ifp); @@ -1503,7 +1491,7 @@ */ static void mld_v2_process_group_timers(struct mld_ifinfo *mli, - struct ifqueue *qrq, struct ifqueue *scq, + struct mbufq *qrq, struct mbufq *scq, struct in6_multi *inm, const int uri_fasthz) { int query_response_timer_expired; @@ -1711,7 +1699,7 @@ /* * Free any pending MLDv2 state-change records. */ - _IF_DRAIN(&inm->in6m_scq); + mbufq_drain(&inm->in6m_scq); break; } } @@ -1950,7 +1938,7 @@ const int delay) { struct ifnet *ifp; - struct ifqueue *ifq; + struct mbufq *mq; int error, retval, syncstates; int odelay; #ifdef KTR @@ -2037,9 +2025,9 @@ * Don't kick the timers if there is nothing to do, * or if an error occurred. */ - ifq = &inm->in6m_scq; - _IF_DRAIN(ifq); - retval = mld_v2_enqueue_group_record(ifq, inm, 1, + mq = &inm->in6m_scq; + mbufq_drain(mq); + retval = mld_v2_enqueue_group_record(mq, inm, 1, 0, 0, (mli->mli_flags & MLIF_USEALLOW)); CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval); @@ -2131,7 +2119,7 @@ return (0); } - _IF_DRAIN(&inm->in6m_scq); + mbufq_drain(&inm->in6m_scq); retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0, (mli->mli_flags & MLIF_USEALLOW)); @@ -2204,7 +2192,7 @@ * TO_IN {} to be sent on the next fast timeout, * giving us an opportunity to merge reports. */ - _IF_DRAIN(&inm->in6m_scq); + mbufq_drain(&inm->in6m_scq); inm->in6m_timer = 0; inm->in6m_scrv = mli->mli_rv; CTR4(KTR_MLD, "%s: Leaving %s/%s with %d " @@ -2280,7 +2268,7 @@ * no record(s) were appended. */ static int -mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, +mld_v2_enqueue_group_record(struct mbufq *mq, struct in6_multi *inm, const int is_state_change, const int is_group_query, const int is_source_query, const int use_block_allow) { @@ -2395,7 +2383,7 @@ * Generate the filter list changes using a separate function. */ if (is_filter_list_change) - return (mld_v2_enqueue_filter_change(ifq, inm)); + return (mld_v2_enqueue_filter_change(mq, inm)); if (type == MLD_DO_NOTHING) { CTR3(KTR_MLD, "%s: nothing to do for %s/%s", @@ -2426,7 +2414,7 @@ * Note: Group records for G/GSR query responses MUST be sent * in their own packet. */ - m0 = ifq->ifq_tail; + m0 = mbufq_last(mq); if (!is_group_query && m0 != NULL && (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) && @@ -2438,7 +2426,7 @@ m = m0; CTR1(KTR_MLD, "%s: use existing packet", __func__); } else { - if (_IF_QFULL(ifq)) { + if (mbufq_full(mq)) { CTR1(KTR_MLD, "%s: outbound queue full", __func__); return (-ENOMEM); } @@ -2551,7 +2539,7 @@ if (m != m0) { CTR1(KTR_MLD, "%s: enqueueing first packet", __func__); m->m_pkthdr.PH_vt.vt_nrecs = 1; - _IF_ENQUEUE(ifq, m); + mbufq_enqueue(mq, m); } else m->m_pkthdr.PH_vt.vt_nrecs++; @@ -2567,7 +2555,7 @@ * Always try for a cluster first. */ while (nims != NULL) { - if (_IF_QFULL(ifq)) { + if (mbufq_full(mq)) { CTR1(KTR_MLD, "%s: outbound queue full", __func__); return (-ENOMEM); } @@ -2626,7 +2614,7 @@ nbytes += (msrcs * sizeof(struct in6_addr)); CTR1(KTR_MLD, "%s: enqueueing next packet", __func__); - _IF_ENQUEUE(ifq, m); + mbufq_enqueue(mq, m); } return (nbytes); @@ -2666,7 +2654,7 @@ * no record(s) were appended. */ static int -mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) +mld_v2_enqueue_filter_change(struct mbufq *mq, struct in6_multi *inm) { static const int MINRECLEN = sizeof(struct mldv2_record) + sizeof(struct in6_addr); @@ -2712,7 +2700,7 @@ */ while (drt != REC_FULL) { do { - m0 = ifq->ifq_tail; + m0 = mbufq_last(mq); if (m0 != NULL && (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) && @@ -2856,7 +2844,7 @@ */ m->m_pkthdr.PH_vt.vt_nrecs++; if (m != m0) - _IF_ENQUEUE(ifq, m); + mbufq_enqueue(mq, m); nbytes += npbytes; } while (nims != NULL); drt |= crt; @@ -2870,9 +2858,9 @@ } static int -mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) +mld_v2_merge_state_changes(struct in6_multi *inm, struct mbufq *scq) { - struct ifqueue *gq; + struct mbufq *gq; struct mbuf *m; /* pending state-change */ struct mbuf *m0; /* copy of pending state-change */ struct mbuf *mt; /* last state-change in packet */ @@ -2895,13 +2883,13 @@ gq = &inm->in6m_scq; #ifdef KTR - if (gq->ifq_head == NULL) { + if (mbufq_first(gq) == NULL) { CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty", __func__, inm); } #endif - m = gq->ifq_head; + m = mbufq_first(gq); while (m != NULL) { /* * Only merge the report into the current packet if @@ -2912,7 +2900,7 @@ * allocated clusters. */ domerge = 0; - mt = ifscq->ifq_tail; + mt = mbufq_last(scq); if (mt != NULL) { recslen = m_length(m, NULL); @@ -2924,7 +2912,7 @@ domerge = 1; } - if (!domerge && _IF_QFULL(gq)) { + if (!domerge && mbufq_full(gq)) { CTR2(KTR_MLD, "%s: outbound queue full, skipping whole packet %p", __func__, m); @@ -2937,7 +2925,7 @@ if (!docopy) { CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m); - _IF_DEQUEUE(gq, m0); + m0 = mbufq_dequeue(gq); m = m0->m_nextpkt; } else { CTR2(KTR_MLD, "%s: copying %p", __func__, m); @@ -2949,9 +2937,9 @@ } if (!domerge) { - CTR3(KTR_MLD, "%s: queueing %p to ifscq %p)", - __func__, m0, ifscq); - _IF_ENQUEUE(ifscq, m0); + CTR3(KTR_MLD, "%s: queueing %p to scq %p)", + __func__, m0, scq); + mbufq_enqueue(scq, m0); } else { struct mbuf *mtl; /* last mbuf of packet mt */ @@ -3028,7 +3016,7 @@ /* * Slew transmission of bursts over 500ms intervals. */ - if (mli->mli_gq.ifq_head != NULL) { + if (mbufq_first(&mli->mli_gq) != NULL) { mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY( MLD_RESPONSE_BURST_INTERVAL); V_interface_timers_running6 = 1; Index: sys/netinet6/mld6_var.h =================================================================== --- sys/netinet6/mld6_var.h +++ sys/netinet6/mld6_var.h @@ -52,7 +52,7 @@ uint32_t mli_qri; /* MLDv2 Query Response Interval (s) */ uint32_t mli_uri; /* MLDv2 Unsolicited Report Interval (s) */ SLIST_HEAD(,in6_multi) mli_relinmhead; /* released groups */ - struct ifqueue mli_gq; /* queue of general query responses */ + struct mbufq mli_gq; /* queue of general query responses */ }; #define MLIF_SILENT 0x00000001 /* Do not use MLD on this ifp */ #define MLIF_USEALLOW 0x00000002 /* Use ALLOW/BLOCK for joins/leaves */ Index: sys/sys/mbuf.h =================================================================== --- sys/sys/mbuf.h +++ sys/sys/mbuf.h @@ -1199,5 +1199,101 @@ #define M_PROFILE(m) #endif +struct mbufq { + STAILQ_HEAD(, mbuf) mq_head; + int mq_len; + int mq_maxlen; +}; + +static inline void +mbufq_init(struct mbufq *mq, int maxlen) +{ + + STAILQ_INIT(&mq->mq_head); + mq->mq_maxlen = maxlen; + mq->mq_len = 0; +} + +static inline struct mbuf * +mbufq_flush(struct mbufq *mq) +{ + struct mbuf *m; + + m = STAILQ_FIRST(&mq->mq_head); + STAILQ_INIT(&mq->mq_head); + mq->mq_len = 0; + return (m); +} + +static inline void +mbufq_drain(struct mbufq *mq) +{ + struct mbuf *m, *n; + + n = mbufq_flush(mq); + while ((m = n) != NULL) { + n = STAILQ_NEXT(m, m_stailqpkt); + m_freem(m); + } +} + +static inline struct mbuf * +mbufq_first(const struct mbufq *mq) +{ + + return (STAILQ_FIRST(&mq->mq_head)); +} + +static inline struct mbuf * +mbufq_last(const struct mbufq *mq) +{ + + return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt)); +} +static inline int +mbufq_full(const struct mbufq *mq) +{ + + return (mq->mq_len >= mq->mq_maxlen); +} + +static inline int +mbufq_len(const struct mbufq *mq) +{ + + return (mq->mq_len); +} + +static inline int +mbufq_enqueue(struct mbufq *mq, struct mbuf *m) +{ + + if (mbufq_full(mq)) + return (ENOBUFS); + STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt); + mq->mq_len++; + return (0); +} + +static inline struct mbuf * +mbufq_dequeue(struct mbufq *mq) +{ + struct mbuf *m; + + m = STAILQ_FIRST(&mq->mq_head); + if (m) { + STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt); + mq->mq_len--; + } + return (m); +} + +static inline void +mbufq_prepend(struct mbufq *mq, struct mbuf *m) +{ + + STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt); + mq->mq_len++; +} #endif /* !_SYS_MBUF_H_ */