Changeset View
Changeset View
Standalone View
Standalone View
sys/sys/buf_ring.h
Show All 35 Lines | |||||
#define DEBUG_BUFRING 1 | #define DEBUG_BUFRING 1 | ||||
#endif | #endif | ||||
#ifdef DEBUG_BUFRING | #ifdef DEBUG_BUFRING | ||||
#include <sys/lock.h> | #include <sys/lock.h> | ||||
#include <sys/mutex.h> | #include <sys/mutex.h> | ||||
#endif | #endif | ||||
void if_rexmt_start(int qid, int nqs); | |||||
/* cache line align buf ring entries */ | /* cache line align buf ring entries */ | ||||
#define BR_FLAGS_ALIGNED 0x1 | #define BR_FLAGS_ALIGNED 0x1 | ||||
struct br_entry_ { | struct br_entry_ { | ||||
volatile void *bre_ptr; | volatile void *bre_ptr; | ||||
}; | }; | ||||
struct buf_ring { | struct buf_ring { | ||||
volatile uint32_t br_prod_head; | volatile uint32_t br_prod_head; | ||||
volatile uint32_t br_prod_tail; | volatile uint32_t br_prod_tail; | ||||
int br_prod_size; | int br_prod_size; | ||||
int br_prod_mask; | int br_prod_mask; | ||||
uint64_t br_drops; | uint64_t br_drops; | ||||
/* cache line aligned to avoid cache line invalidate traffic | /* cache line aligned to avoid cache line invalidate traffic | ||||
* between consumer and producer (false sharing) | * between consumer and producer (false sharing) | ||||
*/ | */ | ||||
volatile uint32_t br_cons_head __aligned(CACHE_LINE_SIZE); | volatile uint32_t br_cons_head __aligned(CACHE_LINE_SIZE); | ||||
volatile uint32_t br_cons_tail; | volatile uint32_t br_cons_tail; | ||||
int br_cons_size; | int br_cons_size; | ||||
int br_cons_mask; | int br_cons_mask; | ||||
int br_id; | |||||
int br_nqs; | |||||
int br_closed; | |||||
#ifdef DEBUG_BUFRING | #ifdef DEBUG_BUFRING | ||||
struct mtx *br_lock; | struct mtx *br_lock; | ||||
#endif | #endif | ||||
/* cache line aligned to avoid false sharing with other data structures | /* cache line aligned to avoid false sharing with other data structures | ||||
*/ | */ | ||||
int br_flags __aligned(CACHE_LINE_SIZE); | int br_flags __aligned(CACHE_LINE_SIZE); | ||||
struct br_entry_ br_ring[0] __aligned(CACHE_LINE_SIZE); | struct br_entry_ br_ring[0] __aligned(CACHE_LINE_SIZE); | ||||
}; | }; | ||||
static __inline int buf_ring_count(struct buf_ring *br); | |||||
/* | /* | ||||
* ring entry accessors to allow us to make ring entry | * ring entry accessors to allow us to make ring entry | ||||
* alignment determined at runtime | * alignment determined at runtime | ||||
*/ | */ | ||||
static __inline void * | static __inline void * | ||||
br_entry_get(struct buf_ring *br, int i) | br_entry_get(struct buf_ring *br, int i) | ||||
{ | { | ||||
volatile void *ent; | volatile void *ent; | ||||
▲ Show 20 Lines • Show All 105 Lines • ▼ Show 20 Lines | #ifdef DEBUG_BUFRING | ||||
int i; | int i; | ||||
for (i = br->br_cons_head; i != br->br_prod_head; | for (i = br->br_cons_head; i != br->br_prod_head; | ||||
i = ((i + 1) & br->br_cons_mask)) | i = ((i + 1) & br->br_cons_mask)) | ||||
if(br->br_ring[i].bre_ptr == buf) | if(br->br_ring[i].bre_ptr == buf) | ||||
panic("buf=%p already enqueue at %d prod=%d cons=%d", | panic("buf=%p already enqueue at %d prod=%d cons=%d", | ||||
buf, i, br->br_prod_tail, br->br_cons_tail); | buf, i, br->br_prod_tail, br->br_cons_tail); | ||||
#endif | #endif | ||||
critical_enter(); | critical_enter(); | ||||
if (br->br_closed == TRUE) { | |||||
critical_exit(); | |||||
return (ENOBUFS); | |||||
} | |||||
do { | do { | ||||
prod_head = br->br_prod_head; | prod_head = br->br_prod_head; | ||||
prod_next = (prod_head + 1) & br->br_prod_mask; | prod_next = (prod_head + 1) & br->br_prod_mask; | ||||
cons_tail = br->br_cons_tail; | cons_tail = br->br_cons_tail; | ||||
if (prod_next == cons_tail) { | if (prod_next == cons_tail) { | ||||
/* ensure that we only return ENOBUFS | /* ensure that we only return ENOBUFS | ||||
* if the latest value matches what we read | * if the latest value matches what we read | ||||
*/ | */ | ||||
if (prod_head != atomic_load_acq_32(&br->br_prod_head) || | if (prod_head != atomic_load_acq_32(&br->br_prod_head) || | ||||
cons_tail != atomic_load_acq_32(&br->br_cons_tail)) | cons_tail != atomic_load_acq_32(&br->br_cons_tail)) | ||||
continue; | continue; | ||||
br->br_drops++; | br->br_drops++; | ||||
br->br_closed = TRUE; | |||||
critical_exit(); | critical_exit(); | ||||
return (ENOBUFS); | return (ENOBUFS); | ||||
} | } | ||||
} while (!atomic_cmpset_acq_32(&br->br_prod_head, prod_head, prod_next)); | } while (!atomic_cmpset_acq_32(&br->br_prod_head, prod_head, prod_next)); | ||||
#ifdef DEBUG_BUFRING | #ifdef DEBUG_BUFRING | ||||
if (br->br_ring[prod_head].bre_ptr != NULL) | if (br->br_ring[prod_head].bre_ptr != NULL) | ||||
panic("dangling value in enqueue"); | panic("dangling value in enqueue"); | ||||
#endif | #endif | ||||
▲ Show 20 Lines • Show All 111 Lines • ▼ Show 20 Lines | |||||
#ifdef DEBUG_BUFRING | #ifdef DEBUG_BUFRING | ||||
if (!mtx_owned(br->br_lock)) | if (!mtx_owned(br->br_lock)) | ||||
panic("lock not held on single consumer dequeue"); | panic("lock not held on single consumer dequeue"); | ||||
if (br->br_cons_tail != cons_head) | if (br->br_cons_tail != cons_head) | ||||
panic("inconsistent list cons_tail=%d cons_head=%d", | panic("inconsistent list cons_tail=%d cons_head=%d", | ||||
br->br_cons_tail, cons_head); | br->br_cons_tail, cons_head); | ||||
#endif | #endif | ||||
atomic_store_rel_32(&br->br_cons_tail, cons_next); | atomic_store_rel_32(&br->br_cons_tail, cons_next); | ||||
if (br->br_closed == TRUE && buf_ring_count(br) < (br->br_prod_size >> 1)) { | |||||
br->br_closed = FALSE; | |||||
if_rexmt_start(br->br_id, br->br_nqs); | |||||
} | |||||
return ((void *)(uintptr_t)buf); | return ((void *)(uintptr_t)buf); | ||||
} | } | ||||
/* | /* | ||||
* single-consumer advance after a peek | * single-consumer advance after a peek | ||||
* use where it is protected by a lock | * use where it is protected by a lock | ||||
* e.g. a network driver's tx queue lock | * e.g. a network driver's tx queue lock | ||||
*/ | */ | ||||
Show All 16 Lines | buf_ring_advance_sc(struct buf_ring *br) | ||||
* 1) it assures that the load of ring[cons_head] has completed | * 1) it assures that the load of ring[cons_head] has completed | ||||
* (only the most perverted architecture or compiler would | * (only the most perverted architecture or compiler would | ||||
* consider re-ordering a = *x; *x = b) | * consider re-ordering a = *x; *x = b) | ||||
* 2) it allows us to enforce global ordering of the cons_tail | * 2) it allows us to enforce global ordering of the cons_tail | ||||
* update with an atomic_store_rel_32 | * update with an atomic_store_rel_32 | ||||
*/ | */ | ||||
br->br_ring[cons_head].bre_ptr = NULL; | br->br_ring[cons_head].bre_ptr = NULL; | ||||
atomic_store_rel_32(&br->br_cons_tail, cons_next); | atomic_store_rel_32(&br->br_cons_tail, cons_next); | ||||
if (br->br_closed == TRUE && buf_ring_count(br) < (br->br_prod_size >> 1)) { | |||||
br->br_closed = FALSE; | |||||
if_rexmt_start(br->br_id, br->br_nqs); | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* Used to return a buffer (most likely already there) | * Used to return a buffer (most likely already there) | ||||
* to the top od the ring. The caller should *not* | * to the top od the ring. The caller should *not* | ||||
* have used any dequeue to pull it out of the ring | * have used any dequeue to pull it out of the ring | ||||
* but instead should have used the peek() function. | * but instead should have used the peek() function. | ||||
* This is normally used where the transmit queue | * This is normally used where the transmit queue | ||||
* of a driver is full, and an mubf must be returned. | * of a driver is full, and an mubf must be returned. | ||||
▲ Show 20 Lines • Show All 68 Lines • ▼ Show 20 Lines | buf_ring_count(struct buf_ring *br) | ||||
* understands that this is only a point in time snapshot | * understands that this is only a point in time snapshot | ||||
*/ | */ | ||||
return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail) | return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail) | ||||
& br->br_prod_mask); | & br->br_prod_mask); | ||||
} | } | ||||
struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags, | struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags, | ||||
struct mtx *); | struct mtx *, int id, int nqs); | ||||
struct buf_ring *buf_ring_aligned_alloc(int count, struct malloc_type *type, int flags, | struct buf_ring *buf_ring_aligned_alloc(int count, struct malloc_type *type, int flags, | ||||
struct mtx *); | struct mtx *); | ||||
void buf_ring_free(struct buf_ring *br, struct malloc_type *type); | void buf_ring_free(struct buf_ring *br, struct malloc_type *type); | ||||
#endif | #endif |