Index: head/sys/net/ifq.h =================================================================== --- head/sys/net/ifq.h (revision 296177) +++ head/sys/net/ifq.h (revision 296178) @@ -1,484 +1,484 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: @(#)if.h 8.1 (Berkeley) 6/10/93 * $FreeBSD$ */ #ifndef _NET_IFQ_H_ #define _NET_IFQ_H_ #ifdef _KERNEL #include /* ifqueue only? */ #include #include #endif /* _KERNEL */ #include /* XXX */ #include /* struct ifqueue */ /* * Couple of ugly extra definitions that are required since ifq.h * is splitted from if_var.h. */ #define IF_DUNIT_NONE -1 #include /* * Structure defining a queue for a network interface. */ struct ifqueue { struct mbuf *ifq_head; struct mbuf *ifq_tail; int ifq_len; int ifq_maxlen; struct mtx ifq_mtx; }; #ifdef _KERNEL /* * Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq) * are queues of messages stored on ifqueue structures * (defined above). Entries are added to and deleted from these structures * by these macros. */ #define IF_LOCK(ifq) mtx_lock(&(ifq)->ifq_mtx) #define IF_UNLOCK(ifq) mtx_unlock(&(ifq)->ifq_mtx) #define IF_LOCK_ASSERT(ifq) mtx_assert(&(ifq)->ifq_mtx, MA_OWNED) #define _IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) #define _IF_QLEN(ifq) ((ifq)->ifq_len) #define _IF_ENQUEUE(ifq, m) do { \ (m)->m_nextpkt = NULL; \ if ((ifq)->ifq_tail == NULL) \ (ifq)->ifq_head = m; \ else \ (ifq)->ifq_tail->m_nextpkt = m; \ (ifq)->ifq_tail = m; \ (ifq)->ifq_len++; \ } while (0) #define IF_ENQUEUE(ifq, m) do { \ IF_LOCK(ifq); \ _IF_ENQUEUE(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define _IF_PREPEND(ifq, m) do { \ (m)->m_nextpkt = (ifq)->ifq_head; \ if ((ifq)->ifq_tail == NULL) \ (ifq)->ifq_tail = (m); \ (ifq)->ifq_head = (m); \ (ifq)->ifq_len++; \ } while (0) #define IF_PREPEND(ifq, m) do { \ IF_LOCK(ifq); \ _IF_PREPEND(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define _IF_DEQUEUE(ifq, m) do { \ (m) = (ifq)->ifq_head; \ if (m) { \ if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL) \ (ifq)->ifq_tail = NULL; \ (m)->m_nextpkt = NULL; \ (ifq)->ifq_len--; \ } \ } while (0) #define IF_DEQUEUE(ifq, m) do { \ IF_LOCK(ifq); \ _IF_DEQUEUE(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define _IF_DEQUEUE_ALL(ifq, m) do { \ (m) = (ifq)->ifq_head; \ (ifq)->ifq_head = (ifq)->ifq_tail = NULL; \ (ifq)->ifq_len = 0; \ } while (0) #define IF_DEQUEUE_ALL(ifq, m) do { \ IF_LOCK(ifq); \ _IF_DEQUEUE_ALL(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define _IF_POLL(ifq, m) ((m) = (ifq)->ifq_head) #define IF_POLL(ifq, m) _IF_POLL(ifq, m) #define _IF_DRAIN(ifq) do { \ struct mbuf *m; \ for (;;) { \ _IF_DEQUEUE(ifq, m); \ if (m == NULL) \ break; \ m_freem(m); \ } \ } while (0) #define IF_DRAIN(ifq) do { \ IF_LOCK(ifq); \ _IF_DRAIN(ifq); \ IF_UNLOCK(ifq); \ } while(0) int if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust); #define IF_HANDOFF(ifq, m, ifp) \ if_handoff((struct ifqueue *)ifq, m, ifp, 0) #define IF_HANDOFF_ADJ(ifq, m, ifp, adj) \ if_handoff((struct ifqueue *)ifq, m, ifp, adj) void if_start(struct ifnet *); #define IFQ_ENQUEUE(ifq, m, err) \ do { \ IF_LOCK(ifq); \ if (ALTQ_IS_ENABLED(ifq)) \ ALTQ_ENQUEUE(ifq, m, NULL, err); \ else { \ if (_IF_QFULL(ifq)) { \ m_freem(m); \ (err) = ENOBUFS; \ } else { \ _IF_ENQUEUE(ifq, m); \ (err) = 0; \ } \ } \ IF_UNLOCK(ifq); \ } while (0) #define IFQ_DEQUEUE_NOLOCK(ifq, m) \ do { \ if (TBR_IS_ENABLED(ifq)) \ (m) = tbr_dequeue_ptr(ifq, ALTDQ_REMOVE); \ else if (ALTQ_IS_ENABLED(ifq)) \ ALTQ_DEQUEUE(ifq, m); \ else \ _IF_DEQUEUE(ifq, m); \ } while (0) #define IFQ_DEQUEUE(ifq, m) \ do { \ IF_LOCK(ifq); \ IFQ_DEQUEUE_NOLOCK(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define IFQ_POLL_NOLOCK(ifq, m) \ do { \ if (TBR_IS_ENABLED(ifq)) \ (m) = tbr_dequeue_ptr(ifq, ALTDQ_POLL); \ else if (ALTQ_IS_ENABLED(ifq)) \ ALTQ_POLL(ifq, m); \ else \ _IF_POLL(ifq, m); \ } while (0) #define IFQ_POLL(ifq, m) \ do { \ IF_LOCK(ifq); \ IFQ_POLL_NOLOCK(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define IFQ_PURGE_NOLOCK(ifq) \ do { \ if (ALTQ_IS_ENABLED(ifq)) { \ ALTQ_PURGE(ifq); \ } else \ _IF_DRAIN(ifq); \ } while (0) #define IFQ_PURGE(ifq) \ do { \ IF_LOCK(ifq); \ IFQ_PURGE_NOLOCK(ifq); \ IF_UNLOCK(ifq); \ } while (0) #define IFQ_SET_READY(ifq) \ do { ((ifq)->altq_flags |= ALTQF_READY); } while (0) #define IFQ_LOCK(ifq) IF_LOCK(ifq) #define IFQ_UNLOCK(ifq) IF_UNLOCK(ifq) #define IFQ_LOCK_ASSERT(ifq) IF_LOCK_ASSERT(ifq) #define IFQ_IS_EMPTY(ifq) ((ifq)->ifq_len == 0) #define IFQ_INC_LEN(ifq) ((ifq)->ifq_len++) #define IFQ_DEC_LEN(ifq) (--(ifq)->ifq_len) #define IFQ_SET_MAXLEN(ifq, len) ((ifq)->ifq_maxlen = (len)) /* * The IFF_DRV_OACTIVE test should really occur in the device driver, not in * the handoff logic, as that flag is locked by the device driver. */ #define IFQ_HANDOFF_ADJ(ifp, m, adj, err) \ do { \ int len; \ short mflags; \ \ len = (m)->m_pkthdr.len; \ mflags = (m)->m_flags; \ IFQ_ENQUEUE(&(ifp)->if_snd, m, err); \ if ((err) == 0) { \ if_inc_counter((ifp), IFCOUNTER_OBYTES, len + (adj)); \ if (mflags & M_MCAST) \ if_inc_counter((ifp), IFCOUNTER_OMCASTS, 1); \ if (((ifp)->if_drv_flags & IFF_DRV_OACTIVE) == 0) \ if_start(ifp); \ } else \ if_inc_counter((ifp), IFCOUNTER_OQDROPS, 1); \ } while (0) #define IFQ_HANDOFF(ifp, m, err) \ IFQ_HANDOFF_ADJ(ifp, m, 0, err) #define IFQ_DRV_DEQUEUE(ifq, m) \ do { \ (m) = (ifq)->ifq_drv_head; \ if (m) { \ if (((ifq)->ifq_drv_head = (m)->m_nextpkt) == NULL) \ (ifq)->ifq_drv_tail = NULL; \ (m)->m_nextpkt = NULL; \ (ifq)->ifq_drv_len--; \ } else { \ IFQ_LOCK(ifq); \ IFQ_DEQUEUE_NOLOCK(ifq, m); \ while ((ifq)->ifq_drv_len < (ifq)->ifq_drv_maxlen) { \ struct mbuf *m0; \ IFQ_DEQUEUE_NOLOCK(ifq, m0); \ if (m0 == NULL) \ break; \ m0->m_nextpkt = NULL; \ if ((ifq)->ifq_drv_tail == NULL) \ (ifq)->ifq_drv_head = m0; \ else \ (ifq)->ifq_drv_tail->m_nextpkt = m0; \ (ifq)->ifq_drv_tail = m0; \ (ifq)->ifq_drv_len++; \ } \ IFQ_UNLOCK(ifq); \ } \ } while (0) #define IFQ_DRV_PREPEND(ifq, m) \ do { \ (m)->m_nextpkt = (ifq)->ifq_drv_head; \ if ((ifq)->ifq_drv_tail == NULL) \ (ifq)->ifq_drv_tail = (m); \ (ifq)->ifq_drv_head = (m); \ (ifq)->ifq_drv_len++; \ } while (0) #define IFQ_DRV_IS_EMPTY(ifq) \ (((ifq)->ifq_drv_len == 0) && ((ifq)->ifq_len == 0)) #define IFQ_DRV_PURGE(ifq) \ do { \ struct mbuf *m, *n = (ifq)->ifq_drv_head; \ while((m = n) != NULL) { \ n = m->m_nextpkt; \ m_freem(m); \ } \ (ifq)->ifq_drv_head = (ifq)->ifq_drv_tail = NULL; \ (ifq)->ifq_drv_len = 0; \ IFQ_PURGE(ifq); \ } while (0) static __inline int drbr_enqueue(struct ifnet *ifp, struct buf_ring *br, struct mbuf *m) { int error = 0; #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) { IFQ_ENQUEUE(&ifp->if_snd, m, error); if (error) if_inc_counter((ifp), IFCOUNTER_OQDROPS, 1); return (error); } #endif error = buf_ring_enqueue(br, m); if (error) m_freem(m); return (error); } static __inline void drbr_putback(struct ifnet *ifp, struct buf_ring *br, struct mbuf *new) { /* * The top of the list needs to be swapped * for this one. */ #ifdef ALTQ if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { /* * Peek in altq case dequeued it * so put it back. */ IFQ_DRV_PREPEND(&ifp->if_snd, new); return; } #endif buf_ring_putback_sc(br, new); } static __inline struct mbuf * drbr_peek(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ struct mbuf *m; if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { /* * Pull it off like a dequeue * since drbr_advance() does nothing * for altq and drbr_putback() will * use the old prepend function. */ IFQ_DEQUEUE(&ifp->if_snd, m); return (m); } #endif - return(buf_ring_peek(br)); + return(buf_ring_peek_clear_sc(br)); } static __inline void drbr_flush(struct ifnet *ifp, struct buf_ring *br) { struct mbuf *m; #ifdef ALTQ if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) IFQ_PURGE(&ifp->if_snd); #endif while ((m = buf_ring_dequeue_sc(br)) != NULL) m_freem(m); } static __inline void drbr_free(struct buf_ring *br, struct malloc_type *type) { drbr_flush(NULL, br); buf_ring_free(br, type); } static __inline struct mbuf * drbr_dequeue(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ struct mbuf *m; if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { IFQ_DEQUEUE(&ifp->if_snd, m); return (m); } #endif return (buf_ring_dequeue_sc(br)); } static __inline void drbr_advance(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ /* Nothing to do here since peek dequeues in altq case */ if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) return; #endif return (buf_ring_advance_sc(br)); } static __inline struct mbuf * drbr_dequeue_cond(struct ifnet *ifp, struct buf_ring *br, int (*func) (struct mbuf *, void *), void *arg) { struct mbuf *m; #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) { IFQ_LOCK(&ifp->if_snd); IFQ_POLL_NOLOCK(&ifp->if_snd, m); if (m != NULL && func(m, arg) == 0) { IFQ_UNLOCK(&ifp->if_snd); return (NULL); } IFQ_DEQUEUE_NOLOCK(&ifp->if_snd, m); IFQ_UNLOCK(&ifp->if_snd); return (m); } #endif m = buf_ring_peek(br); if (m == NULL || func(m, arg) == 0) return (NULL); return (buf_ring_dequeue_sc(br)); } static __inline int drbr_empty(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) return (IFQ_IS_EMPTY(&ifp->if_snd)); #endif return (buf_ring_empty(br)); } static __inline int drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) return (1); #endif return (!buf_ring_empty(br)); } static __inline int drbr_inuse(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) return (ifp->if_snd.ifq_len); #endif return (buf_ring_count(br)); } extern int ifqmaxlen; void if_qflush(struct ifnet *); void ifq_init(struct ifaltq *, struct ifnet *ifp); void ifq_delete(struct ifaltq *); #endif /* _KERNEL */ #endif /* !_NET_IFQ_H_ */ Index: head/sys/sys/buf_ring.h =================================================================== --- head/sys/sys/buf_ring.h (revision 296177) +++ head/sys/sys/buf_ring.h (revision 296178) @@ -1,299 +1,330 @@ /*- * Copyright (c) 2007-2009 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _SYS_BUF_RING_H_ #define _SYS_BUF_RING_H_ #include #if defined(INVARIANTS) && !defined(DEBUG_BUFRING) #define DEBUG_BUFRING 1 #endif #ifdef DEBUG_BUFRING #include #include #endif struct buf_ring { volatile uint32_t br_prod_head; volatile uint32_t br_prod_tail; int br_prod_size; int br_prod_mask; uint64_t br_drops; volatile uint32_t br_cons_head __aligned(CACHE_LINE_SIZE); volatile uint32_t br_cons_tail; int br_cons_size; int br_cons_mask; #ifdef DEBUG_BUFRING struct mtx *br_lock; #endif void *br_ring[0] __aligned(CACHE_LINE_SIZE); }; /* * multi-producer safe lock-free ring buffer enqueue * */ static __inline int buf_ring_enqueue(struct buf_ring *br, void *buf) { uint32_t prod_head, prod_next, cons_tail; #ifdef DEBUG_BUFRING int i; for (i = br->br_cons_head; i != br->br_prod_head; i = ((i + 1) & br->br_cons_mask)) if(br->br_ring[i] == buf) panic("buf=%p already enqueue at %d prod=%d cons=%d", buf, i, br->br_prod_tail, br->br_cons_tail); #endif critical_enter(); do { prod_head = br->br_prod_head; prod_next = (prod_head + 1) & br->br_prod_mask; cons_tail = br->br_cons_tail; if (prod_next == cons_tail) { rmb(); if (prod_head == br->br_prod_head && cons_tail == br->br_cons_tail) { br->br_drops++; critical_exit(); return (ENOBUFS); } continue; } } while (!atomic_cmpset_acq_int(&br->br_prod_head, prod_head, prod_next)); #ifdef DEBUG_BUFRING if (br->br_ring[prod_head] != NULL) panic("dangling value in enqueue"); #endif br->br_ring[prod_head] = buf; /* * If there are other enqueues in progress * that preceeded us, we need to wait for them * to complete */ while (br->br_prod_tail != prod_head) cpu_spinwait(); atomic_store_rel_int(&br->br_prod_tail, prod_next); critical_exit(); return (0); } /* * multi-consumer safe dequeue * */ static __inline void * buf_ring_dequeue_mc(struct buf_ring *br) { uint32_t cons_head, cons_next; void *buf; critical_enter(); do { cons_head = br->br_cons_head; cons_next = (cons_head + 1) & br->br_cons_mask; if (cons_head == br->br_prod_tail) { critical_exit(); return (NULL); } } while (!atomic_cmpset_acq_int(&br->br_cons_head, cons_head, cons_next)); buf = br->br_ring[cons_head]; #ifdef DEBUG_BUFRING br->br_ring[cons_head] = NULL; #endif /* * If there are other dequeues in progress * that preceeded us, we need to wait for them * to complete */ while (br->br_cons_tail != cons_head) cpu_spinwait(); atomic_store_rel_int(&br->br_cons_tail, cons_next); critical_exit(); return (buf); } /* * single-consumer dequeue * use where dequeue is protected by a lock * e.g. a network driver's tx queue lock */ static __inline void * buf_ring_dequeue_sc(struct buf_ring *br) { uint32_t cons_head, cons_next; #ifdef PREFETCH_DEFINED uint32_t cons_next_next; #endif uint32_t prod_tail; void *buf; cons_head = br->br_cons_head; prod_tail = br->br_prod_tail; cons_next = (cons_head + 1) & br->br_cons_mask; #ifdef PREFETCH_DEFINED cons_next_next = (cons_head + 2) & br->br_cons_mask; #endif if (cons_head == prod_tail) return (NULL); #ifdef PREFETCH_DEFINED if (cons_next != prod_tail) { prefetch(br->br_ring[cons_next]); if (cons_next_next != prod_tail) prefetch(br->br_ring[cons_next_next]); } #endif br->br_cons_head = cons_next; buf = br->br_ring[cons_head]; #ifdef DEBUG_BUFRING br->br_ring[cons_head] = NULL; if (!mtx_owned(br->br_lock)) panic("lock not held on single consumer dequeue"); if (br->br_cons_tail != cons_head) panic("inconsistent list cons_tail=%d cons_head=%d", br->br_cons_tail, cons_head); #endif br->br_cons_tail = cons_next; return (buf); } /* * single-consumer advance after a peek * use where it is protected by a lock * e.g. a network driver's tx queue lock */ static __inline void buf_ring_advance_sc(struct buf_ring *br) { uint32_t cons_head, cons_next; uint32_t prod_tail; cons_head = br->br_cons_head; prod_tail = br->br_prod_tail; cons_next = (cons_head + 1) & br->br_cons_mask; if (cons_head == prod_tail) return; br->br_cons_head = cons_next; #ifdef DEBUG_BUFRING br->br_ring[cons_head] = NULL; #endif br->br_cons_tail = cons_next; } /* * Used to return a buffer (most likely already there) * to the top od the ring. The caller should *not* * have used any dequeue to pull it out of the ring * but instead should have used the peek() function. * This is normally used where the transmit queue * of a driver is full, and an mubf must be returned. * Most likely whats in the ring-buffer is what * is being put back (since it was not removed), but * sometimes the lower transmit function may have * done a pullup or other function that will have * changed it. As an optimzation we always put it * back (since jhb says the store is probably cheaper), * if we have to do a multi-queue version we will need * the compare and an atomic. */ static __inline void buf_ring_putback_sc(struct buf_ring *br, void *new) { KASSERT(br->br_cons_head != br->br_prod_tail, ("Buf-Ring has none in putback")) ; br->br_ring[br->br_cons_head] = new; } /* * return a pointer to the first entry in the ring * without modifying it, or NULL if the ring is empty * race-prone if not protected by a lock */ static __inline void * buf_ring_peek(struct buf_ring *br) { #ifdef DEBUG_BUFRING if ((br->br_lock != NULL) && !mtx_owned(br->br_lock)) panic("lock not held on single consumer dequeue"); #endif /* * I believe it is safe to not have a memory barrier * here because we control cons and tail is worst case * a lagging indicator so we worst case we might * return NULL immediately after a buffer has been enqueued */ if (br->br_cons_head == br->br_prod_tail) return (NULL); return (br->br_ring[br->br_cons_head]); } +static __inline void * +buf_ring_peek_clear_sc(struct buf_ring *br) +{ +#ifdef DEBUG_BUFRING + void *ret; + + if (!mtx_owned(br->br_lock)) + panic("lock not held on single consumer dequeue"); +#endif + /* + * I believe it is safe to not have a memory barrier + * here because we control cons and tail is worst case + * a lagging indicator so we worst case we might + * return NULL immediately after a buffer has been enqueued + */ + if (br->br_cons_head == br->br_prod_tail) + return (NULL); + +#ifdef DEBUG_BUFRING + /* + * Single consumer, i.e. cons_head will not move while we are + * running, so atomic_swap_ptr() is not necessary here. + */ + ret = br->br_ring[br->br_cons_head]; + br->br_ring[br->br_cons_head] = NULL; + return (ret); +#else + return (br->br_ring[br->br_cons_head]); +#endif +} + static __inline int buf_ring_full(struct buf_ring *br) { return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail); } static __inline int buf_ring_empty(struct buf_ring *br) { return (br->br_cons_head == br->br_prod_tail); } static __inline int buf_ring_count(struct buf_ring *br) { return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail) & br->br_prod_mask); } struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags, struct mtx *); void buf_ring_free(struct buf_ring *br, struct malloc_type *type); #endif