Index: cddl/contrib/opensolaris/lib/libzpool/common/taskq.c =================================================================== --- cddl/contrib/opensolaris/lib/libzpool/common/taskq.c +++ cddl/contrib/opensolaris/lib/libzpool/common/taskq.c @@ -174,6 +174,7 @@ t->tqent_prev->tqent_next = t; t->tqent_func = func; t->tqent_arg = arg; + t->tqent_flags = 0; cv_signal(&tq->tq_dispatch_cv); mutex_exit(&tq->tq_lock); } Index: share/man/man9/taskqueue.9 =================================================================== --- share/man/man9/taskqueue.9 +++ share/man/man9/taskqueue.9 @@ -285,10 +285,9 @@ .Fn taskqueue_drain_all function is used to wait for all pending and running tasks that are enqueued on the taskqueue to finish. -The caller must arrange that the tasks are not re-enqueued. -Note that +Tasks posted to the taskqueue after .Fn taskqueue_drain_all -currently does not handle tasks with delayed enqueueing. +begins processing are ignored. .Pp The .Fn taskqueue_block Index: share/mk/bsd.lib.mk =================================================================== --- share/mk/bsd.lib.mk +++ share/mk/bsd.lib.mk @@ -81,12 +81,15 @@ .cc.o .C.o .cpp.o .cxx.o: ${CXX} ${STATIC_CXXFLAGS} ${CXXFLAGS} -c ${.IMPSRC} -o ${.TARGET} + ${CTFCONVERT_CMD} .cc.po .C.po .cpp.po .cxx.po: ${CXX} ${PO_FLAG} ${STATIC_CXXFLAGS} ${PO_CXXFLAGS} -c ${.IMPSRC} -o ${.TARGET} + ${CTFCONVERT_CMD} .cc.So .C.So .cpp.So .cxx.So: ${CXX} ${PICFLAG} -DPIC ${SHARED_CXXFLAGS} ${CXXFLAGS} -c ${.IMPSRC} -o ${.TARGET} + ${CTFCONVERT_CMD} .f.po: ${FC} -pg ${FFLAGS} -o ${.TARGET} -c ${.IMPSRC} Index: share/mk/sys.mk =================================================================== --- share/mk/sys.mk +++ share/mk/sys.mk @@ -242,9 +242,11 @@ .cc .cpp .cxx .C: ${CXX} ${CXXFLAGS} ${LDFLAGS} ${.IMPSRC} ${LDLIBS} -o ${.TARGET} + ${CTFCONVERT_CMD} .cc.o .cpp.o .cxx.o .C.o: ${CXX} ${CXXFLAGS} -c ${.IMPSRC} + ${CTFCONVERT_CMD} .m.o: ${OBJC} ${OBJCFLAGS} -c ${.IMPSRC} Index: sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c =================================================================== --- sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c +++ sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c @@ -300,10 +300,9 @@ } /* - * Check the other, lower-index DVAs to see if they're on the same - * vdev as the child we picked. If they are, use them since they - * are likely to have been allocated from the primary metaslab in - * use at the time, and hence are more likely to have locality with + * Return the lowest-indexed, accessible, DVA of the child we picked. + * Lower DVAs are likely to have been allocated from the primary metaslab + * in use at the time, and hence are more likely to have locality with * single-copy data. */ static int @@ -320,7 +319,7 @@ if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred])) preferred = c; } - return (preferred); + return (preferred_child); } static int @@ -333,7 +332,6 @@ p = spa_get_random(mm->mm_preferred_cnt); return (vdev_mirror_dva_select(zio, p)); } - /* * To ensure we don't always favour the first matching vdev, * which could lead to wear leveling issues on SSD's, we Index: sys/dev/xen/netfront/netfront.c =================================================================== --- sys/dev/xen/netfront/netfront.c +++ sys/dev/xen/netfront/netfront.c @@ -286,6 +286,8 @@ multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; mmu_update_t rx_mmu[NET_RX_RING_SIZE]; struct ifmedia sc_media; + + bool xn_resuming; }; #define rx_mbufs xn_cdata.xn_rx_chain @@ -501,7 +503,9 @@ { struct netfront_info *info = device_get_softc(dev); + info->xn_resuming = true; netif_disconnect_backend(info); + info->xn_resuming = false; return (0); } @@ -2002,13 +2006,28 @@ int err; err = 0; + + if (!np->xn_resuming || + ((np->xn_ifp->if_capenable & np->xn_ifp->if_capabilities) + != np->xn_ifp->if_capenable)) { + /* + * Check if current enabled capabilities are available, + * if not switch to default capabilities. + */ #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) - if ((np->xn_ifp->if_capenable & IFCAP_LRO) != 0) - tcp_lro_free(&np->xn_lro); + if ((np->xn_ifp->if_capenable & IFCAP_LRO) != 0) + tcp_lro_free(&np->xn_lro); #endif - np->xn_ifp->if_capenable = - np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4); - np->xn_ifp->if_hwassist &= ~CSUM_TSO; + np->xn_ifp->if_capenable = + np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4); + np->xn_ifp->if_hwassist &= ~CSUM_TSO; + } else { + /* + * What we have currently enabled is supported by the + * new host, no need to change anything. + */ + return (0); + } #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6)) if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) != 0) { err = tcp_lro_init(&np->xn_lro); @@ -2147,7 +2166,7 @@ netif_free(info); - return 0; + return (0); } static void Index: sys/kern/subr_taskqueue.c =================================================================== --- sys/kern/subr_taskqueue.c +++ sys/kern/subr_taskqueue.c @@ -56,6 +56,8 @@ TAILQ_ENTRY(taskqueue_busy) tb_link; }; +struct task * const TB_DRAIN_WAITER = (struct task *)0x1; + struct taskqueue { STAILQ_HEAD(, task) tq_queue; taskqueue_enqueue_fn tq_enqueue; @@ -241,6 +243,7 @@ /* Return with lock released. */ return (0); } + int taskqueue_enqueue(struct taskqueue *queue, struct task *task) { @@ -302,14 +305,84 @@ } static void -taskqueue_drain_running(struct taskqueue *queue) +taskqueue_task_nop_fn(void *context, int pending) { +} - while (!TAILQ_EMPTY(&queue->tq_active)) - TQ_SLEEP(queue, &queue->tq_active, &queue->tq_mutex, - PWAIT, "-", 0); +/* + * Block until all currently queued tasks in this taskqueue + * have begun execution. Tasks queued during execution of + * this function are ignored. + */ +static void +taskqueue_drain_tq_queue(struct taskqueue *queue) +{ + struct task t_barrier; + + if (STAILQ_EMPTY(&queue->tq_queue)) + return; + + /* + * Enqueue our barrier with the lowest possible priority + * so we are inserted after all current tasks. + */ + TASK_INIT(&t_barrier, 0, taskqueue_task_nop_fn, &t_barrier); + taskqueue_enqueue_locked(queue, &t_barrier); + + /* + * Raise the barrier's priority so newly queued tasks cannot + * pass it. + */ + t_barrier.ta_priority = USHRT_MAX; + + /* + * Once the barrier has executed, all previously queued tasks + * have completed or are currently executing. + */ + while (t_barrier.ta_pending != 0) + TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0); } +/* + * Block until all currently executing tasks for this taskqueue + * complete. Tasks that begin execution during the execution + * of this function are ignored. + */ +static void +taskqueue_drain_tq_active(struct taskqueue *queue) +{ + struct taskqueue_busy tb_marker, *tb_first; + + if (TAILQ_EMPTY(&queue->tq_active)) + return; + + /* Block taskq_terminate().*/ + queue->tq_callouts++; + + /* + * Wait for all currently executing taskqueue threads + * to go idle. + */ + tb_marker.tb_running = TB_DRAIN_WAITER; + TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link); + while (TAILQ_FIRST(&queue->tq_active) != &tb_marker) + TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0); + TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link); + + /* + * Wakeup any other drain waiter that happend to queue up + * without any interveining active thread. + */ + tb_first = TAILQ_FIRST(&queue->tq_active); + if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER) + wakeup(tb_first); + + /* Release taskqueue_terminate().*/ + queue->tq_callouts--; + if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) + wakeup_one(queue->tq_threads); +} + void taskqueue_block(struct taskqueue *queue) { @@ -334,14 +407,16 @@ taskqueue_run_locked(struct taskqueue *queue) { struct taskqueue_busy tb; + struct taskqueue_busy *tb_first; struct task *task; int pending; TQ_ASSERT_LOCKED(queue); tb.tb_running = NULL; - TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); while (STAILQ_FIRST(&queue->tq_queue)) { + TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); + /* * Carefully remove the first task from the queue and * zero its pending count. @@ -358,10 +433,13 @@ TQ_LOCK(queue); tb.tb_running = NULL; wakeup(task); + + TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); + tb_first = TAILQ_FIRST(&queue->tq_active); + if (tb_first != NULL && + tb_first->tb_running == TB_DRAIN_WAITER) + wakeup(tb_first); } - TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); - if (TAILQ_EMPTY(&queue->tq_active)) - wakeup(&queue->tq_active); } void @@ -448,19 +526,13 @@ void taskqueue_drain_all(struct taskqueue *queue) { - struct task *task; if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); - task = STAILQ_LAST(&queue->tq_queue, task, ta_link); - if (task != NULL) - while (task->ta_pending != 0) - TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); - taskqueue_drain_running(queue); - KASSERT(STAILQ_EMPTY(&queue->tq_queue), - ("taskqueue queue is not empty after draining")); + taskqueue_drain_tq_queue(queue); + taskqueue_drain_tq_active(queue); TQ_UNLOCK(queue); } Index: sys/sys/bitstring.h =================================================================== --- sys/sys/bitstring.h +++ sys/sys/bitstring.h @@ -29,118 +29,233 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * + * Copyright (c) 2014 Spectra Logic Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * * $FreeBSD$ */ #ifndef _SYS_BITSTRING_H_ #define _SYS_BITSTRING_H_ -typedef unsigned char bitstr_t; +typedef unsigned long bitstr_t; -/* internal macros */ - /* byte of the bitstring bit is in */ -#define _bit_byte(bit) \ - ((bit) >> 3) +/*---------------------- Private Implementation Details ----------------------*/ +#define _BITSTR_MASK (~0UL) +#define _BITSTR_BITS (sizeof(bitstr_t) * 8) - /* mask for the bit within its byte */ -#define _bit_mask(bit) \ - (1 << ((bit)&0x7)) +/* Index of the bitstr_t in a bit string that contain bit '_bit'. */ +static inline int +_bit_idx(int _bit) +{ + return (_bit / _BITSTR_BITS); +} -/* external macros */ - /* bytes in a bitstring of nbits bits */ -#define bitstr_size(nbits) \ - (((nbits) + 7) >> 3) +/* Bit number within the bitstr_t at '_bit_idx(_bit)'. */ +static inline int +_bit_offset(int _bit) +{ + return (_bit % _BITSTR_BITS); +} - /* allocate a bitstring */ -#define bit_alloc(nbits) \ - (bitstr_t *)calloc((size_t)bitstr_size(nbits), sizeof(bitstr_t)) +/* Mask for the testing/setting '_bit' within its bittstr_t. */ +static inline bitstr_t +_bit_mask(int _bit) +{ + return (1UL << _bit_offset(_bit)); +} - /* allocate a bitstring on the stack */ +/* Construct a bitstr_t with bits '_start' ... '_stop' inclusive set. */ +static inline bitstr_t +_bit_make_mask(int _start, int _stop) +{ + return ((_BITSTR_MASK << _bit_offset(_start)) & + (_BITSTR_MASK >> (_BITSTR_BITS - _bit_offset(_stop) - 1))); +} + +/*----------------------------- Public Interface -----------------------------*/ +/* Number of bytes consumed by a bit string of '_nbits' bits */ +#define bitstr_size(_nbits) (((_nbits) + _BITSTR_BITS - 1) / 8) + +/* Allocate a bit string of size '_nbits', initialized with no bits set. */ +#ifdef _KERNEL +static inline bitstr_t * +bit_alloc(int _nbits, struct malloc_type *type, int flags) +{ + return ((bitstr_t *)malloc(bitstr_size(_nbits), type, flags | M_ZERO)); +} +#else +static inline bitstr_t * +bit_alloc(int _nbits) +{ + return ((bitstr_t *)calloc(bitstr_size(_nbits), 1)); +} +#endif + +/* Declare a bit string, for stack or datastructure use, of size '_nbits'. */ #define bit_decl(name, nbits) \ - ((name)[bitstr_size(nbits)]) + ((name)[bitstr_size(nbits) / sizeof(bitstr_t)]) - /* is bit N of bitstring name set? */ -#define bit_test(name, bit) \ - ((name)[_bit_byte(bit)] & _bit_mask(bit)) +/* Is bit '_bit' of bit string '_bitstr' set? */ +static inline int +bit_test(const bitstr_t *_bitstr, int _bit) +{ + return ((_bitstr[_bit_idx(_bit)] & _bit_mask(_bit)) != 0); +} - /* set bit N of bitstring name */ -#define bit_set(name, bit) \ - ((name)[_bit_byte(bit)] |= _bit_mask(bit)) +/* Set bit '_bit' of bit string '_bitstr'. */ +static inline void +bit_set(bitstr_t *_bitstr, int _bit) +{ + _bitstr[_bit_idx(_bit)] |= _bit_mask(_bit); +} - /* clear bit N of bitstring name */ -#define bit_clear(name, bit) \ - ((name)[_bit_byte(bit)] &= ~_bit_mask(bit)) +/* Clear bit '_bit' of bit string '_bitstr'. */ +static inline void +bit_clear(bitstr_t *_bitstr, int _bit) +{ + _bitstr[_bit_idx(_bit)] &= ~_bit_mask(_bit); +} - /* clear bits start ... stop in bitstring */ -#define bit_nclear(name, start, stop) do { \ - register bitstr_t *_name = (name); \ - register int _start = (start), _stop = (stop); \ - register int _startbyte = _bit_byte(_start); \ - register int _stopbyte = _bit_byte(_stop); \ - if (_startbyte == _stopbyte) { \ - _name[_startbyte] &= ((0xff >> (8 - (_start&0x7))) | \ - (0xff << ((_stop&0x7) + 1))); \ - } else { \ - _name[_startbyte] &= 0xff >> (8 - (_start&0x7)); \ - while (++_startbyte < _stopbyte) \ - _name[_startbyte] = 0; \ - _name[_stopbyte] &= 0xff << ((_stop&0x7) + 1); \ - } \ -} while (0) +/* Set bits '_start' ... '_stop' inclusive in bit string '_bitstr'. */ +static inline void +bit_nset(bitstr_t *_bitstr, int _start, int _stop) +{ + bitstr_t *_stopbitstr; - /* set bits start ... stop in bitstring */ -#define bit_nset(name, start, stop) do { \ - register bitstr_t *_name = (name); \ - register int _start = (start), _stop = (stop); \ - register int _startbyte = _bit_byte(_start); \ - register int _stopbyte = _bit_byte(_stop); \ - if (_startbyte == _stopbyte) { \ - _name[_startbyte] |= ((0xff << (_start&0x7)) & \ - (0xff >> (7 - (_stop&0x7)))); \ - } else { \ - _name[_startbyte] |= 0xff << ((_start)&0x7); \ - while (++_startbyte < _stopbyte) \ - _name[_startbyte] = 0xff; \ - _name[_stopbyte] |= 0xff >> (7 - (_stop&0x7)); \ - } \ -} while (0) + _stopbitstr = _bitstr + _bit_idx(_stop); + _bitstr += _bit_idx(_start); - /* find first bit clear in name */ -#define bit_ffc(name, nbits, value) do { \ - register bitstr_t *_name = (name); \ - register int _byte, _nbits = (nbits); \ - register int _stopbyte = _bit_byte(_nbits - 1), _value = -1; \ - if (_nbits > 0) \ - for (_byte = 0; _byte <= _stopbyte; ++_byte) \ - if (_name[_byte] != 0xff) { \ - bitstr_t _lb; \ - _value = _byte << 3; \ - for (_lb = _name[_byte]; (_lb&0x1); \ - ++_value, _lb >>= 1); \ - break; \ - } \ - if (_value >= nbits) \ - _value = -1; \ - *(value) = _value; \ -} while (0) + if (_bitstr == _stopbitstr) { + *_bitstr |= _bit_make_mask(_start, _stop); + } else { + *_bitstr |= _bit_make_mask(_start, _BITSTR_BITS - 1); + while (++_bitstr < _stopbitstr) + *_bitstr = _BITSTR_MASK; + *_stopbitstr |= _bit_make_mask(0, _stop); + } +} - /* find first bit set in name */ -#define bit_ffs(name, nbits, value) do { \ - register bitstr_t *_name = (name); \ - register int _byte, _nbits = (nbits); \ - register int _stopbyte = _bit_byte(_nbits - 1), _value = -1; \ - if (_nbits > 0) \ - for (_byte = 0; _byte <= _stopbyte; ++_byte) \ - if (_name[_byte]) { \ - bitstr_t _lb; \ - _value = _byte << 3; \ - for (_lb = _name[_byte]; !(_lb&0x1); \ - ++_value, _lb >>= 1); \ - break; \ - } \ - if (_value >= nbits) \ - _value = -1; \ - *(value) = _value; \ -} while (0) +/* Clear bits '_start' ... '_stop' inclusive in bit string '_bitstr'. */ +static inline void +bit_nclear(bitstr_t *_bitstr, int _start, int _stop) +{ + bitstr_t *_stopbitstr; -#endif /* !_SYS_BITSTRING_H_ */ + _stopbitstr = _bitstr + _bit_idx(_stop); + _bitstr += _bit_idx(_start); + + if (_bitstr == _stopbitstr) { + *_bitstr &= ~_bit_make_mask(_start, _stop); + } else { + *_bitstr &= ~_bit_make_mask(_start, _BITSTR_BITS - 1); + while (++_bitstr < _stopbitstr) + *_bitstr = 0; + *_stopbitstr &= ~_bit_make_mask(0, _stop); + } +} + +/* + * Find the first bit set in bit string '_bitstr', at or after + * bit '_start'. + */ +static inline void +bit_ffs_at(bitstr_t *_bitstr, int _start, int _nbits, int *_result) +{ + bitstr_t *_curbitstr; + bitstr_t *_stopbitstr; + bitstr_t _test; + int _value, _offset; + + if (_nbits > 0) { + _curbitstr = _bitstr + _bit_idx(_start); + _stopbitstr = _bitstr + _bit_idx(_nbits - 1); + + _test = *_curbitstr; + if (_bit_offset(_start) != 0) + _test &= _bit_make_mask(_start, _BITSTR_BITS - 1); + while (_test == 0 && _curbitstr < _stopbitstr) + _test = *(++_curbitstr); + + _offset = ffsl(_test); + _value = ((_curbitstr - _bitstr) * _BITSTR_BITS) + _offset - 1; + if (_offset == 0 || _value >= _nbits) + _value = -1; + } else { + _value = -1; + } + *_result = _value; +} + +/* + * Find the first bit clear in bit string '_bitstr', at or after + * bit '_start'. + */ +static inline void +bit_ffc_at(bitstr_t *_bitstr, int _start, int _nbits, int *_result) +{ + bitstr_t *_curbitstr; + bitstr_t *_stopbitstr; + bitstr_t _test; + int _value, _offset; + + if (_nbits > 0) { + _curbitstr = _bitstr + _bit_idx(_start); + _stopbitstr = _bitstr + _bit_idx(_nbits - 1); + + _test = *_curbitstr; + if (_bit_offset(_start) != 0) + _test |= _bit_make_mask(0, _start - 1); + while (_test == _BITSTR_MASK && _curbitstr < _stopbitstr) + _test = *(++_curbitstr); + + _offset = ffsl(~_test); + _value = ((_curbitstr - _bitstr) * _BITSTR_BITS) + _offset - 1; + if (_offset == 0 || _value >= _nbits) + _value = -1; + } else { + _value = -1; + } + *_result = _value; +} + +/* Find the first bit set in bit string '_bitstr'. */ +static inline void +bit_ffs(bitstr_t *_bitstr, int _nbits, int *_result) +{ + bit_ffs_at(_bitstr, /*start*/0, _nbits, _result); +} + +/* Find the first bit clear in bit string '_bitstr'. */ +static inline void +bit_ffc(bitstr_t *_bitstr, int _nbits, int *_result) +{ + bit_ffc_at(_bitstr, /*start*/0, _nbits, _result); +} + +#endif /* _SYS_BITSTRING_H_ */ Index: sys/xen/evtchn/evtchn_dev.c =================================================================== --- sys/xen/evtchn/evtchn_dev.c +++ sys/xen/evtchn/evtchn_dev.c @@ -35,7 +35,6 @@ #include typedef struct evtchn_sotfc { - struct selinfo ev_rsel; } evtchn_softc_t; @@ -58,14 +57,6 @@ static struct mtx lock, upcall_lock; -static d_read_t evtchn_read; -static d_write_t evtchn_write; -static d_ioctl_t evtchn_ioctl; -static d_poll_t evtchn_poll; -static d_open_t evtchn_open; -static d_close_t evtchn_close; - - void evtchn_device_upcall(evtchn_port_t port) { @@ -74,14 +65,13 @@ evtchn_mask_port(port); evtchn_clear_port(port); - if ( ring != NULL ) { - if ( (ring_prod - ring_cons) < EVTCHN_RING_SIZE ) { + if (ring != NULL) { + if ((ring_prod - ring_cons) < EVTCHN_RING_SIZE) { ring[EVTCHN_RING_MASK(ring_prod)] = (uint16_t)port; - if ( ring_cons == ring_prod++ ) { + if (ring_cons == ring_prod++) { wakeup(evtchn_waddr); } - } - else { + } else { ring_overflow = 1; } } @@ -101,24 +91,23 @@ { int rc; unsigned int count, c, p, sst = 0, bytes1 = 0, bytes2 = 0; + count = uio->uio_resid; - count &= ~1; /* even number of bytes */ - if ( count == 0 ) - { + if (count == 0) { rc = 0; goto out; } - if ( count > PAGE_SIZE ) + if (count > PAGE_SIZE) count = PAGE_SIZE; - for ( ; ; ) { - if ( (c = ring_cons) != (p = ring_prod) ) + for (;;) { + if ((c = ring_cons) != (p = ring_prod)) break; - if ( ring_overflow ) { + if (ring_overflow) { rc = EFBIG; goto out; } @@ -135,30 +124,30 @@ } /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ - if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 ) { - bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(uint16_t); + if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { + bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * + sizeof(uint16_t); bytes2 = EVTCHN_RING_MASK(p) * sizeof(uint16_t); - } - else { + } else { bytes1 = (p - c) * sizeof(uint16_t); bytes2 = 0; } /* Truncate chunks according to caller's maximum byte count. */ - if ( bytes1 > count ) { + if (bytes1 > count) { bytes1 = count; bytes2 = 0; } - else if ( (bytes1 + bytes2) > count ) { + else if ((bytes1 + bytes2) > count) { bytes2 = count - bytes1; } - if ( uiomove(&ring[EVTCHN_RING_MASK(c)], bytes1, uio) || - ((bytes2 != 0) && uiomove(&ring[0], bytes2, uio))) - /* keeping this around as its replacement is not equivalent + if (uiomove(&ring[EVTCHN_RING_MASK(c)], bytes1, uio) || + ((bytes2 != 0) && uiomove(&ring[0], bytes2, uio))) { + /* + * keeping this around as its replacement is not equivalent * copyout(&ring[0], &buf[bytes1], bytes2) */ - { rc = EFAULT; goto out; } @@ -169,40 +158,39 @@ out: - return rc; + return (rc); } static int evtchn_write(struct cdev *dev, struct uio *uio, int ioflag) { int rc, i, count; + uint16_t *kbuf; count = uio->uio_resid; - - uint16_t *kbuf = (uint16_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK); + kbuf = (uint16_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK); + if (kbuf == NULL) + return (ENOMEM); - if ( kbuf == NULL ) - return ENOMEM; - count &= ~1; /* even number of bytes */ - if ( count == 0 ) { + if (count == 0) { rc = 0; goto out; } - if ( count > PAGE_SIZE ) + if (count > PAGE_SIZE) count = PAGE_SIZE; - if ( uiomove(kbuf, count, uio) != 0 ) { + if (uiomove(kbuf, count, uio) != 0) { rc = EFAULT; goto out; } mtx_lock_spin(&lock); - for ( i = 0; i < (count/2); i++ ) - if ( test_bit(kbuf[i], &bound_ports[0]) ) + for (i = 0; i < (count/2); i++) + if (test_bit(kbuf[i], &bound_ports[0])) evtchn_unmask_port(kbuf[i]); mtx_unlock_spin(&lock); @@ -210,7 +198,7 @@ out: free(kbuf, M_DEVBUF); - return rc; + return (rc); } static int @@ -219,22 +207,21 @@ { int rc = 0; -#ifdef NOTYET mtx_lock_spin(&lock); - switch ( cmd ) + switch (cmd) { case EVTCHN_RESET: __evtchn_reset_buffer_ring(); break; case EVTCHN_BIND: - if ( !synch_test_and_set_bit((uintptr_t)arg, &bound_ports[0]) ) + if (!synch_test_and_set_bit((uintptr_t)arg, &bound_ports[0])) unmask_evtchn((uintptr_t)arg); else rc = EINVAL; break; case EVTCHN_UNBIND: - if ( synch_test_and_clear_bit((uintptr_t)arg, &bound_ports[0]) ) + if (synch_test_and_clear_bit((uintptr_t)arg, &bound_ports[0])) mask_evtchn((uintptr_t)arg); else rc = EINVAL; @@ -244,30 +231,27 @@ break; } - mtx_unlock_spin(&lock); -#endif + mtx_unlock_spin(&lock); - return rc; + return (rc); } static int evtchn_poll(struct cdev *dev, int poll_events, struct thread *td) { - evtchn_softc_t *sc; unsigned int mask = POLLOUT | POLLWRNORM; sc = dev->si_drv1; - if ( ring_cons != ring_prod ) + if (ring_cons != ring_prod) mask |= POLLIN | POLLRDNORM; - else if ( ring_overflow ) + else if (ring_overflow) mask = POLLERR; else selrecord(td, &sc->ev_rsel); - - return mask; + return (mask); } @@ -277,21 +261,20 @@ uint16_t *_ring; if (flag & O_NONBLOCK) - return EBUSY; + return (EBUSY); - if ( synch_test_and_set_bit(0, &evtchn_dev_inuse) ) - return EBUSY; + if (synch_test_and_set_bit(0, &evtchn_dev_inuse)) + return (EBUSY); - if ( (_ring = (uint16_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK)) == NULL ) - return ENOMEM; + if ((_ring = (uint16_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK)) == NULL) + return (ENOMEM); mtx_lock_spin(&lock); ring = _ring; __evtchn_reset_buffer_ring(); mtx_unlock_spin(&lock); - - return 0; + return (0); } static int @@ -304,55 +287,109 @@ ring = NULL; } mtx_lock_spin(&lock); - for ( i = 0; i < NR_EVENT_CHANNELS; i++ ) - if ( synch_test_and_clear_bit(i, &bound_ports[0]) ) + for (i = 0; i < NR_EVENT_CHANNELS; i++) { + if (synch_test_and_clear_bit(i, &bound_ports[0])) evtchn_mask_port(i); + } mtx_unlock_spin(&lock); evtchn_dev_inuse = 0; - return 0; + return (0); } static struct cdevsw evtchn_devsw = { - .d_version = D_VERSION, - .d_open = evtchn_open, - .d_close = evtchn_close, - .d_read = evtchn_read, - .d_write = evtchn_write, - .d_ioctl = evtchn_ioctl, - .d_poll = evtchn_poll, - .d_name = "evtchn", + .d_version = D_VERSION, + .d_open = evtchn_open, + .d_close = evtchn_close, + .d_read = evtchn_read, + .d_write = evtchn_write, + .d_ioctl = evtchn_ioctl, + .d_poll = evtchn_poll, + .d_name = "evtchn", }; +/*------------------ Private Device Attachment Functions --------------------*/ +/** + * \brief Identify device instances of our type for the parent bus. + * + * \param ddriver Pointer to the driver type of the instances to identify. + * \param dev NewBus device_t for our parent bus. + */ +void +evtchn_identify(driver_t *driver __unused, device_t parent) +{ + /* + * A single instance of the event channel driver is + * always present in a system operating under Xen. + */ + BUS_ADD_CHILD(parent, 0, driver->name, 0); +} -/* XXX - if this device is ever supposed to support use by more than one process - * this global static will have to go away +/** + * \brief Probe for the existance of the event channel driver. + * + * \param dev NewBus device_t for this event channel driver instance. + * + * \return Always returns 0 indicating success. */ -static struct cdev *evtchn_dev; +static int +evtchn_probe(device_t dev) +{ + device_set_desc(dev, "Userspace Event Channel Driver"); + return (0); +} - -static int -evtchn_dev_init(void *dummy __unused) +/** + * \brief Attach the userspace event channel driver. + * + * \param dev NewBus device_t for an event channel driver instance. + * + * \return On success, 0. Otherwise an errno value indicating the + * type of failure. + */ +static int +evtchn_attach(device_t dev) { - /* XXX I believe we don't need these leaving them here for now until we - * have some semblance of it working + /* + * XXX - I believe we don't need these leaving them here for + * now until we have some semblance of it working */ mtx_init(&upcall_lock, "evtchup", NULL, MTX_DEF); /* (DEVFS) create '/dev/misc/evtchn'. */ - evtchn_dev = make_dev(&evtchn_devsw, 0, UID_ROOT, GID_WHEEL, 0600, "xen/evtchn"); + make_dev(&evtchn_devsw, 0, UID_ROOT, GID_WHEEL, 0600, "xen/evtchn"); mtx_init(&lock, "evch", NULL, MTX_SPIN | MTX_NOWITNESS); - evtchn_dev->si_drv1 = malloc(sizeof(evtchn_softc_t), M_DEVBUF, M_WAITOK); - bzero(evtchn_dev->si_drv1, sizeof(evtchn_softc_t)); +#if 0 + evtchn_dev->si_drv1 = + malloc(sizeof(evtchn_softc_t), M_DEVBUF, M_ZERO|M_WAITOK); +#endif - if (bootverbose) - printf("Event-channel device installed.\n"); - - return 0; + return (0); } -SYSINIT(evtchn_dev_init, SI_SUB_DRIVERS, SI_ORDER_FIRST, evtchn_dev_init, NULL); +/*-------------------- Private Device Attachment Data -----------------------*/ +static device_method_t evtchn_methods[] = { + /* Device interface */ + DEVMETHOD(device_identify, evtchn_identify), + DEVMETHOD(device_probe, evtchn_probe), + DEVMETHOD(device_attach, evtchn_attach), + DEVMETHOD(device_detach, evtchn_detach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + DEVMETHOD(device_suspend, evtchn_suspend), + DEVMETHOD(device_resume, evtchn_resume), + + DEVMETHOD_END +}; + +static driver_t netfront_driver = { + "evtchn", + evtchn_methods, + sizeof(struct evtchn_softc), +}; +devclass_t evtchn_devclass; + +DRIVER_MODULE(evtchn, xenstore, evtchn_driver, evtchn_devclass, 0, 0);