Index: stable/8/sys/amd64/include/xen/xenfunc.h =================================================================== --- stable/8/sys/amd64/include/xen/xenfunc.h (revision 209060) +++ stable/8/sys/amd64/include/xen/xenfunc.h (revision 209061) @@ -1,83 +1,82 @@ -/* - * - * Copyright (c) 2004,2005 Kip Macy +/*- + * Copyright (c) 2004, 2005 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 4. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ */ - #ifndef _XEN_XENFUNC_H_ #define _XEN_XENFUNC_H_ #ifdef XENHVM #include #else #include #include #endif #define BKPT __asm__("int3"); #define XPQ_CALL_DEPTH 5 #define XPQ_CALL_COUNT 2 #define PG_PRIV PG_AVAIL3 typedef struct { unsigned long pt_ref; unsigned long pt_eip[XPQ_CALL_COUNT][XPQ_CALL_DEPTH]; } pteinfo_t; extern pteinfo_t *pteinfo_list; #ifdef XENDEBUG_LOW #define __PRINTK(x) printk x #else #define __PRINTK(x) #endif char *xen_setbootenv(char *cmd_line); int xen_boothowto(char *envp); void _xen_machphys_update(vm_paddr_t, vm_paddr_t, char *file, int line); #ifdef INVARIANTS #define xen_machphys_update(a, b) _xen_machphys_update((a), (b), __FILE__, __LINE__) #else #define xen_machphys_update(a, b) _xen_machphys_update((a), (b), NULL, 0) #endif #ifndef XENHVM void xen_update_descriptor(union descriptor *, union descriptor *); #endif extern struct mtx balloon_lock; #if 0 #define balloon_lock(__flags) mtx_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) mtx_unlock_irqrestore(&balloon_lock, __flags) #else #define balloon_lock(__flags) __flags = 1 #define balloon_unlock(__flags) __flags = 0 #endif #endif /* _XEN_XENFUNC_H_ */ Index: stable/8/sys/amd64/include/xen/xenvar.h =================================================================== --- stable/8/sys/amd64/include/xen/xenvar.h (revision 209060) +++ stable/8/sys/amd64/include/xen/xenvar.h (revision 209061) @@ -1,122 +1,120 @@ -/* +/*- * Copyright (c) 2008 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef XENVAR_H_ #define XENVAR_H_ #define XBOOTUP 0x1 #define XPMAP 0x2 extern int xendebug_flags; #ifndef NOXENDEBUG #define XENPRINTF printk #else #define XENPRINTF printf #endif #include #if 0 #define TRACE_ENTER XENPRINTF("(file=%s, line=%d) entered %s\n", __FILE__, __LINE__, __FUNCTION__) #define TRACE_EXIT XENPRINTF("(file=%s, line=%d) exiting %s\n", __FILE__, __LINE__, __FUNCTION__) #define TRACE_DEBUG(argflags, _f, _a...) \ if (xendebug_flags & argflags) XENPRINTF("(file=%s, line=%d) " _f "\n", __FILE__, __LINE__, ## _a); #else #define TRACE_ENTER #define TRACE_EXIT #define TRACE_DEBUG(argflags, _f, _a...) #endif #ifdef XENHVM static inline vm_paddr_t phystomach(vm_paddr_t pa) { return (pa); } static inline vm_paddr_t machtophys(vm_paddr_t ma) { return (ma); } #define vtomach(va) pmap_kextract((vm_offset_t) (va)) #define PFNTOMFN(pa) (pa) #define MFNTOPFN(ma) (ma) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (TRUE) #define PT_UPDATES_FLUSH() ((void)0) #else extern xen_pfn_t *xen_phys_machine; extern xen_pfn_t *xen_machine_phys; /* Xen starts physical pages after the 4MB ISA hole - * FreeBSD doesn't */ #undef ADD_ISA_HOLE /* XXX */ #ifdef ADD_ISA_HOLE #define ISA_INDEX_OFFSET 1024 #define ISA_PDR_OFFSET 1 #else #define ISA_INDEX_OFFSET 0 #define ISA_PDR_OFFSET 0 #endif #define PFNTOMFN(i) (xen_phys_machine[(i)]) #define MFNTOPFN(i) ((vm_paddr_t)xen_machine_phys[(i)]) #define VTOP(x) ((((uintptr_t)(x))) - KERNBASE) #define PTOV(x) (((uintptr_t)(x)) + KERNBASE) #define VTOPFN(x) (VTOP(x) >> PAGE_SHIFT) #define PFNTOV(x) PTOV((vm_paddr_t)(x) << PAGE_SHIFT) #define VTOMFN(va) (vtomach(va) >> PAGE_SHIFT) #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define phystomach(pa) (((vm_paddr_t)(PFNTOMFN((pa) >> PAGE_SHIFT))) << PAGE_SHIFT) #define machtophys(ma) (((vm_paddr_t)(MFNTOPFN((ma) >> PAGE_SHIFT))) << PAGE_SHIFT) #endif void xpq_init(void); int xen_create_contiguous_region(vm_page_t pages, int npages); void xen_destroy_contiguous_region(void * addr, int npages); #endif Index: stable/8/sys/amd64/include/xen =================================================================== --- stable/8/sys/amd64/include/xen (revision 209060) +++ stable/8/sys/amd64/include/xen (revision 209061) Property changes on: stable/8/sys/amd64/include/xen ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/amd64/include/xen:r199549,199997,204158,207673,208901 Index: stable/8/sys/cddl/contrib/opensolaris =================================================================== --- stable/8/sys/cddl/contrib/opensolaris (revision 209060) +++ stable/8/sys/cddl/contrib/opensolaris (revision 209061) Property changes on: stable/8/sys/cddl/contrib/opensolaris ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/cddl/contrib/opensolaris:r199549,199997,204158,207673,208901 Index: stable/8/sys/contrib/dev/acpica =================================================================== --- stable/8/sys/contrib/dev/acpica (revision 209060) +++ stable/8/sys/contrib/dev/acpica (revision 209061) Property changes on: stable/8/sys/contrib/dev/acpica ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica:r199549,199997,204158,207673,208901 Index: stable/8/sys/contrib/pf =================================================================== --- stable/8/sys/contrib/pf (revision 209060) +++ stable/8/sys/contrib/pf (revision 209061) Property changes on: stable/8/sys/contrib/pf ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/pf:r199549,199997,204158,207673,208901 Index: stable/8/sys/dev/cxgb/sys/mvec.h =================================================================== --- stable/8/sys/dev/cxgb/sys/mvec.h (revision 209060) +++ stable/8/sys/dev/cxgb/sys/mvec.h (revision 209061) @@ -1,90 +1,88 @@ -/************************************************************************** - * - * Copyright (c) 2007,2009 Kip Macy kmacy@freebsd.org +/*- + * Copyright (c) 2007, 2009 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. The name of Kip Macy nor the names of other - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. * * $FreeBSD$ * - ***************************************************************************/ + */ #ifndef _MVEC_H_ #define _MVEC_H_ #include #define M_DDP 0x200000 /* direct data placement mbuf */ #define EXT_PHYS 10 /* physical/bus address */ #define m_cur_offset m_ext.ext_size /* override to provide ddp offset */ #define m_seq m_pkthdr.csum_data /* stored sequence */ #define m_ddp_gl m_ext.ext_buf /* ddp list */ #define m_ddp_flags m_pkthdr.csum_flags /* ddp flags */ #define m_ulp_mode m_pkthdr.tso_segsz /* upper level protocol */ static __inline void busdma_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *seg) { #if defined(__i386__) || defined(__amd64__) seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t)); seg->ds_len = m->m_len; #else int nsegstmp; bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0); #endif } int busdma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **m, bus_dma_segment_t *segs, int *nsegs); void busdma_map_sg_vec(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *segs, int *nsegs); static __inline int busdma_map_sgl(bus_dma_segment_t *vsegs, bus_dma_segment_t *segs, int count) { while (count--) { segs->ds_addr = pmap_kextract((vm_offset_t)vsegs->ds_addr); segs->ds_len = vsegs->ds_len; segs++; vsegs++; } return (0); } static __inline void m_freem_list(struct mbuf *m) { struct mbuf *n; while (m != NULL) { n = m->m_nextpkt; if (n != NULL) prefetch(n); m_freem(m); m = n; } } #endif /* _MVEC_H_ */ Index: stable/8/sys/dev/cxgb/sys/uipc_mvec.c =================================================================== --- stable/8/sys/dev/cxgb/sys/uipc_mvec.c (revision 209060) +++ stable/8/sys/dev/cxgb/sys/uipc_mvec.c (revision 209061) @@ -1,131 +1,127 @@ -/************************************************************************** - * - * Copyright (c) 2007-2008, Kip Macy kmacy@freebsd.org +/*- + * Copyright (c) 2007-2008 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. The name of Kip Macy nor the names of other - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * - ***************************************************************************/ + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INVARIANTS #define M_SANITY m_sanity #else #define M_SANITY(a, b) #endif int busdma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **m, bus_dma_segment_t *segs, int *nsegs) { struct mbuf *n = *m; int seg_count, defragged = 0, err = 0; bus_dma_segment_t *psegs; KASSERT(n->m_pkthdr.len, ("packet has zero header len")); if (n->m_pkthdr.len <= PIO_LEN) return (0); retry: psegs = segs; seg_count = 0; if (n->m_next == NULL) { busdma_map_mbuf_fast(tag, map, n, segs); *nsegs = 1; return (0); } #if defined(__i386__) || defined(__amd64__) while (n && seg_count < TX_MAX_SEGS) { /* * firmware doesn't like empty segments */ if (__predict_true(n->m_len != 0)) { seg_count++; busdma_map_mbuf_fast(tag, map, n, psegs); psegs++; } n = n->m_next; } #else err = bus_dmamap_load_mbuf_sg(tag, map, *m, segs, &seg_count, 0); #endif if (seg_count == 0) { if (cxgb_debug) printf("empty segment chain\n"); err = EFBIG; goto err_out; } else if (err == EFBIG || seg_count >= TX_MAX_SEGS) { if (cxgb_debug) printf("mbuf chain too long: %d max allowed %d\n", seg_count, TX_MAX_SEGS); if (!defragged) { n = m_defrag(*m, M_DONTWAIT); if (n == NULL) { err = ENOBUFS; goto err_out; } *m = n; defragged = 1; goto retry; } err = EFBIG; goto err_out; } *nsegs = seg_count; err_out: return (err); } void busdma_map_sg_vec(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *segs, int *nsegs) { for (*nsegs = 0; m != NULL ; segs++, *nsegs += 1, m = m->m_nextpkt) busdma_map_mbuf_fast(tag, map, m, segs); } Index: stable/8/sys/dev/xen/netfront/netfront.c =================================================================== --- stable/8/sys/dev/xen/netfront/netfront.c (revision 209060) +++ stable/8/sys/dev/xen/netfront/netfront.c (revision 209061) @@ -1,2078 +1,2186 @@ -/* - * +/*- * Copyright (c) 2004-2006 Kip Macy * All rights reserved. * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 700000 #include #include #endif #include #include #include /* for DELAY */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_if.h" #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP | CSUM_TSO) #define GRANT_INVALID_REF 0 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) #if __FreeBSD_version >= 700000 /* * Should the driver do LRO on the RX end * this can be toggled on the fly, but the * interface must be reset (down/up) for it * to take effect. */ static int xn_enable_lro = 1; TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); #else #define IFCAP_TSO4 0 #define CSUM_TSO 0 #endif #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif -#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) +/** + * \brief The maximum allowed data fragments in a single transmit + * request. + * + * This limit is imposed by the backend driver. We assume here that + * we are dealing with a Linux driver domain and have set our limit + * to mirror the Linux MAX_SKB_FRAGS constant. + */ +#define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2) + #define RX_COPY_THRESHOLD 256 #define net_ratelimit() 0 struct netfront_info; struct netfront_rx_info; static void xn_txeof(struct netfront_info *); static void xn_rxeof(struct netfront_info *); static void network_alloc_rx_buffers(struct netfront_info *); static void xn_tick_locked(struct netfront_info *); static void xn_tick(void *); static void xn_intr(void *); +static inline int xn_count_frags(struct mbuf *m); +static int xn_assemble_tx_request(struct netfront_info *sc, + struct mbuf *m_head); static void xn_start_locked(struct ifnet *); static void xn_start(struct ifnet *); static int xn_ioctl(struct ifnet *, u_long, caddr_t); static void xn_ifinit_locked(struct netfront_info *); static void xn_ifinit(void *); static void xn_stop(struct netfront_info *); #ifdef notyet static void xn_watchdog(struct ifnet *); #endif static void show_device(struct netfront_info *sc); #ifdef notyet static void netfront_closing(device_t dev); #endif static void netif_free(struct netfront_info *info); static int netfront_detach(device_t dev); static int talk_to_backend(device_t dev, struct netfront_info *info); static int create_netdev(device_t dev); static void netif_disconnect_backend(struct netfront_info *info); static int setup_device(device_t dev, struct netfront_info *info); static void end_access(int ref, void *page); +static int xn_ifmedia_upd(struct ifnet *ifp); +static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); + /* Xenolinux helper functions */ int network_connect(struct netfront_info *); static void xn_free_rx_ring(struct netfront_info *); static void xn_free_tx_ring(struct netfront_info *); static int xennet_get_responses(struct netfront_info *np, - struct netfront_rx_info *rinfo, RING_IDX rp, struct mbuf **list, - int *pages_flipped_p); + struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, + struct mbuf **list, int *pages_flipped_p); #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) #define INVALID_P2M_ENTRY (~0UL) /* * Mbuf pointers. We need these to keep track of the virtual addresses * of our mbuf chains since we can only convert from virtual to physical, * not the other way around. The size must track the free index arrays. */ struct xn_chain_data { - struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; - int xn_tx_chain_cnt; - struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; + struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; + int xn_tx_chain_cnt; + struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; }; +#define NUM_ELEMENTS(x) (sizeof(x)/sizeof(*x)) struct net_device_stats { u_long rx_packets; /* total packets received */ u_long tx_packets; /* total packets transmitted */ u_long rx_bytes; /* total bytes received */ u_long tx_bytes; /* total bytes transmitted */ u_long rx_errors; /* bad packets received */ u_long tx_errors; /* packet transmit problems */ u_long rx_dropped; /* no space in linux buffers */ u_long tx_dropped; /* no space available in linux */ u_long multicast; /* multicast packets received */ u_long collisions; /* detailed rx_errors: */ u_long rx_length_errors; u_long rx_over_errors; /* receiver ring buff overflow */ u_long rx_crc_errors; /* recved pkt with crc error */ u_long rx_frame_errors; /* recv'd frame alignment error */ u_long rx_fifo_errors; /* recv'r fifo overrun */ u_long rx_missed_errors; /* receiver missed packet */ /* detailed tx_errors */ u_long tx_aborted_errors; u_long tx_carrier_errors; u_long tx_fifo_errors; u_long tx_heartbeat_errors; u_long tx_window_errors; /* for cslip etc */ u_long rx_compressed; u_long tx_compressed; }; struct netfront_info { struct ifnet *xn_ifp; #if __FreeBSD_version >= 700000 struct lro_ctrl xn_lro; #endif struct net_device_stats stats; u_int tx_full; netif_tx_front_ring_t tx; netif_rx_front_ring_t rx; struct mtx tx_lock; struct mtx rx_lock; - struct sx sc_lock; + struct mtx sc_lock; u_int handle; u_int irq; u_int copying_receiver; u_int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 32 #define RX_MAX_TARGET NET_RX_RING_SIZE - int rx_min_target, rx_max_target, rx_target; + int rx_min_target; + int rx_max_target; + int rx_target; - /* - * {tx,rx}_skbs store outstanding skbuffs. The first entry in each - * array is an index into a chain of free entries. - */ - grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; -#define TX_MAX_TARGET min(NET_RX_RING_SIZE, 256) - device_t xbdev; - int tx_ring_ref; - int rx_ring_ref; - uint8_t mac[ETHER_ADDR_LEN]; + device_t xbdev; + int tx_ring_ref; + int rx_ring_ref; + uint8_t mac[ETHER_ADDR_LEN]; struct xn_chain_data xn_cdata; /* mbufs */ - struct mbuf_head xn_rx_batch; /* head of the batch queue */ + struct mbuf_head xn_rx_batch; /* head of the batch queue */ int xn_if_flags; struct callout xn_stat_ch; - u_long rx_pfn_array[NET_RX_RING_SIZE]; - multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; - mmu_update_t rx_mmu[NET_RX_RING_SIZE]; + u_long rx_pfn_array[NET_RX_RING_SIZE]; + multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; + mmu_update_t rx_mmu[NET_RX_RING_SIZE]; + struct ifmedia sc_media; }; #define rx_mbufs xn_cdata.xn_rx_chain #define tx_mbufs xn_cdata.xn_tx_chain #define XN_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \ mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \ - sx_init(&(_sc)->sc_lock, #_name"_rx") + mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF) #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock) #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock) #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock) #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock) -#define XN_LOCK(_sc) sx_xlock(&(_sc)->sc_lock); -#define XN_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_lock); +#define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock); +#define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock); -#define XN_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_lock, SX_LOCKED); +#define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED); #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED); #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED); #define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \ mtx_destroy(&(_sc)->tx_lock); \ - sx_destroy(&(_sc)->sc_lock); + mtx_destroy(&(_sc)->sc_lock); struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void -add_id_to_freelist(struct mbuf **list, unsigned short id) +add_id_to_freelist(struct mbuf **list, uintptr_t id) { - KASSERT(id != 0, ("add_id_to_freelist: the head item (0) must always be free.")); + KASSERT(id != 0, + ("%s: the head item (0) must always be free.", __func__)); list[id] = list[0]; - list[0] = (void *)(u_long)id; + list[0] = (struct mbuf *)id; } static inline unsigned short get_id_from_freelist(struct mbuf **list) { - u_int id = (u_int)(u_long)list[0]; - KASSERT(id != 0, ("get_id_from_freelist: the head item (0) must always remain free.")); + uintptr_t id; + + id = (uintptr_t)list[0]; + KASSERT(id != 0, + ("%s: the head item (0) must always remain free.", __func__)); list[0] = list[id]; return (id); } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct mbuf * -xennet_get_rx_mbuf(struct netfront_info *np, - RING_IDX ri) +xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct mbuf *m; m = np->rx_mbufs[i]; np->rx_mbufs[i] = NULL; return (m); } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define IPRINTK(fmt, args...) \ printf("[XEN] " fmt, ##args) +#ifdef INVARIANTS #define WPRINTK(fmt, args...) \ printf("[XEN] " fmt, ##args) -#if 0 +#else +#define WPRINTK(fmt, args...) +#endif +#ifdef DEBUG #define DPRINTK(fmt, args...) \ printf("[XEN] %s: " fmt, __func__, ##args) #else #define DPRINTK(fmt, args...) #endif /** * Read the 'mac' node at the given device's node in the store, and parse that * as colon-separated octets, placing result the given mac array. mac must be * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). * Return 0 on success, or errno on error. */ static int xen_net_read_mac(device_t dev, uint8_t mac[]) { int error, i; char *s, *e, *macstr; error = xenbus_read(XBT_NIL, xenbus_get_node(dev), "mac", NULL, (void **) &macstr); if (error) return (error); s = macstr; for (i = 0; i < ETHER_ADDR_LEN; i++) { mac[i] = strtoul(s, &e, 16); if (s == e || (e[0] != ':' && e[0] != 0)) { free(macstr, M_DEVBUF); return (ENOENT); } s = &e[1]; } free(macstr, M_DEVBUF); return (0); } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Connected state. */ static int netfront_probe(device_t dev) { if (!strcmp(xenbus_get_type(dev), "vif")) { device_set_desc(dev, "Virtual Network Interface"); return (0); } return (ENXIO); } static int netfront_attach(device_t dev) { int err; err = create_netdev(dev); if (err) { xenbus_dev_fatal(dev, err, "creating netdev"); return err; } #if __FreeBSD_version >= 700000 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW, &xn_enable_lro, 0, "Large Receive Offload"); #endif return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(device_t dev) { struct netfront_info *info = device_get_softc(dev); netif_disconnect_backend(info); return (0); } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(device_t dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; const char *node = xenbus_get_node(dev); int err; err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", node); goto out; } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, node, "tx-ring-ref","%u", - info->tx_ring_ref); + info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, node, "rx-ring-ref","%u", - info->rx_ring_ref); + info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, node, - "event-channel", "%u", irq_to_evtchn_port(info->irq)); + "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, node, "request-rx-copy", "%u", - info->copying_receiver); + info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, node, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, node, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } #if __FreeBSD_version >= 700000 err = xenbus_printf(xbt, node, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } #endif err = xenbus_transaction_end(xbt, 0); if (err) { if (err == EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netif_free(info); out: return err; } static int setup_device(device_t dev, struct netfront_info *info) { netif_tx_sring_t *txs; netif_rx_sring_t *rxs; int error; struct ifnet *ifp; ifp = info->xn_ifp; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); if (!txs) { error = ENOMEM; xenbus_dev_fatal(dev, error, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref); if (error) goto fail; rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); if (!rxs) { error = ENOMEM; xenbus_dev_fatal(dev, error, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref); if (error) goto fail; error = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev), "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, &info->irq); if (error) { xenbus_dev_fatal(dev, error, "bind_evtchn_to_irqhandler failed"); goto fail; } show_device(info); return (0); fail: netif_free(info); return (error); } /** * If this interface has an ipv4 address, send an arp for it. This * helps to get the network going again after migrating hosts. */ static void netfront_send_fake_arp(device_t dev, struct netfront_info *info) { struct ifnet *ifp; struct ifaddr *ifa; ifp = info->xn_ifp; TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family == AF_INET) { arp_ifinit(ifp, ifa); } } } /** * Callback received when the backend's state changes. */ static int netfront_backend_changed(device_t dev, XenbusState newstate) { struct netfront_info *sc = device_get_softc(dev); DPRINTK("newstate=%d\n", newstate); switch (newstate) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateConnected: case XenbusStateUnknown: case XenbusStateClosed: case XenbusStateReconfigured: case XenbusStateReconfiguring: break; case XenbusStateInitWait: if (xenbus_get_state(dev) != XenbusStateInitialising) break; if (network_connect(sc) != 0) break; xenbus_set_state(dev, XenbusStateConnected); netfront_send_fake_arp(dev, sc); break; case XenbusStateClosing: xenbus_set_state(dev, XenbusStateClosed); break; } return (0); } static void xn_free_rx_ring(struct netfront_info *sc) { #if 0 int i; for (i = 0; i < NET_RX_RING_SIZE; i++) { - if (sc->xn_cdata.xn_rx_chain[i] != NULL) { - m_freem(sc->xn_cdata.xn_rx_chain[i]); - sc->xn_cdata.xn_rx_chain[i] = NULL; + if (sc->xn_cdata.rx_mbufs[i] != NULL) { + m_freem(sc->rx_mbufs[i]); + sc->rx_mbufs[i] = NULL; } } sc->rx.rsp_cons = 0; sc->xn_rx_if->req_prod = 0; sc->xn_rx_if->event = sc->rx.rsp_cons ; #endif } static void xn_free_tx_ring(struct netfront_info *sc) { #if 0 int i; for (i = 0; i < NET_TX_RING_SIZE; i++) { - if (sc->xn_cdata.xn_tx_chain[i] != NULL) { - m_freem(sc->xn_cdata.xn_tx_chain[i]); + if (sc->tx_mbufs[i] != NULL) { + m_freem(sc->tx_mbufs[i]); sc->xn_cdata.xn_tx_chain[i] = NULL; } } return; #endif } -/* - * Do some brief math on the number of descriptors available to - * determine how many slots are available. +/** + * \brief Verify that there is sufficient space in the Tx ring + * buffer for a maximally sized request to be enqueued. * - * Firstly - wouldn't something with RING_FREE_REQUESTS() be more applicable? - * Secondly - MAX_SKB_FRAGS is a Linux construct which may not apply here. - * Thirdly - it isn't used here anyway; the magic constant '24' is possibly - * wrong? - * The "2" is presumably to ensure there are also enough slots available for - * the ring entries used for "options" (eg, the TSO entry before a packet - * is queued); I'm not sure why its 2 and not 1. Perhaps to make sure there's - * a "free" node in the tx mbuf list (node 0) to represent the freelist? - * - * This only figures out whether any xenbus ring descriptors are available; - * it doesn't at all reflect how many tx mbuf ring descriptors are also - * available. + * A transmit request requires a transmit descriptor for each packet + * fragment, plus up to 2 entries for "options" (e.g. TSO). */ static inline int -netfront_tx_slot_available(struct netfront_info *np) +xn_tx_slot_available(struct netfront_info *np) { - return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < - (TX_MAX_TARGET - /* MAX_SKB_FRAGS */ 24 - 2)); + return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2)); } + static void netif_release_tx_bufs(struct netfront_info *np) { - struct mbuf *m; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { - m = np->xn_cdata.xn_tx_chain[i]; + struct mbuf *m; - if (((u_long)m) < KERNBASE) + m = np->tx_mbufs[i]; + + /* + * We assume that no kernel addresses are + * less than NET_TX_RING_SIZE. Any entry + * in the table that is below this number + * must be an index from free-list tracking. + */ + if (((uintptr_t)m) <= NET_TX_RING_SIZE) continue; gnttab_grant_foreign_access_ref(np->grant_tx_ref[i], xenbus_get_otherend_id(np->xbdev), virt_to_mfn(mtod(m, vm_offset_t)), GNTMAP_readonly); gnttab_release_grant_reference(&np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_mbufs, i); np->xn_cdata.xn_tx_chain_cnt--; if (np->xn_cdata.xn_tx_chain_cnt < 0) { panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0"); } m_freem(m); } } static void network_alloc_rx_buffers(struct netfront_info *sc) { int otherend_id = xenbus_get_otherend_id(sc->xbdev); unsigned short id; struct mbuf *m_new; int i, batch_target, notify; RING_IDX req_prod; struct xen_memory_reservation reservation; grant_ref_t ref; int nr_flips; netif_rx_request_t *req; vm_offset_t vaddr; u_long pfn; req_prod = sc->rx.req_prod_pvt; if (unlikely(sc->carrier == 0)) return; /* - * Allocate skbuffs greedily, even though we batch updates to the + * Allocate mbufs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory - * allocator, so should reduce the chance of failed allocation + * allocator, and so should reduce the chance of failed allocation * requests both for ourself and for other kernel subsystems. + * + * Here we attempt to maintain rx_target buffers in flight, counting + * buffers that we have yet to process in the receive ring. */ batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons); for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); - if (m_new == NULL) + if (m_new == NULL) { + printf("%s: MGETHDR failed\n", __func__); goto no_mbuf; + } m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE); if ((m_new->m_flags & M_EXT) == 0) { + printf("%s: m_cljget failed\n", __func__); m_freem(m_new); no_mbuf: if (i != 0) goto refill; /* * XXX set timer */ break; } m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; /* queue the mbufs allocated */ mbufq_tail(&sc->xn_rx_batch, m_new); } - /* Is the batch large enough to be worthwhile? */ + /* + * If we've allocated at least half of our target number of entries, + * submit them to the backend - we have enough to make the overhead + * of submission worthwhile. Otherwise wait for more mbufs and + * request entries to become available. + */ if (i < (sc->rx_target/2)) { if (req_prod >sc->rx.sring->req_prod) goto push; return; } - /* Adjust floating fill target if we risked running out of buffers. */ - if ( ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) && - ((sc->rx_target *= 2) > sc->rx_max_target) ) - sc->rx_target = sc->rx_max_target; + /* + * Double floating fill target if we risked having the backend + * run out of empty buffers for receive traffic. We define "running + * low" as having less than a fourth of our target buffers free + * at the time we refilled the queue. + */ + if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) { + sc->rx_target *= 2; + if (sc->rx_target > sc->rx_max_target) + sc->rx_target = sc->rx_max_target; + } + refill: for (nr_flips = i = 0; ; i++) { if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL) break; m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); id = xennet_rxidx(req_prod + i); - KASSERT(sc->xn_cdata.xn_rx_chain[id] == NULL, - ("non-NULL xm_rx_chain")); - sc->xn_cdata.xn_rx_chain[id] = m_new; + KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain")); + sc->rx_mbufs[id] = m_new; ref = gnttab_claim_grant_reference(&sc->gref_rx_head); KASSERT((short)ref >= 0, ("negative ref")); sc->grant_rx_ref[id] = ref; vaddr = mtod(m_new, vm_offset_t); pfn = vtophys(vaddr) >> PAGE_SHIFT; req = RING_GET_REQUEST(&sc->rx, req_prod + i); if (sc->copying_receiver == 0) { gnttab_grant_foreign_transfer_ref(ref, otherend_id, pfn); sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(&sc->rx_mcl[i], vaddr, 0, 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, otherend_id, PFNTOMFN(pfn), 0); } req->id = id; req->gref = ref; sc->rx_pfn_array[i] = vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; } KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed")); /* * We may have allocated buffers which have entries outstanding * in the page * update queue -- make sure we flush those first! */ PT_UPDATES_FLUSH(); if (nr_flips != 0) { #ifdef notyet /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); #endif set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array); reservation.nr_extents = i; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ sc->rx_mcl[i].op = __HYPERVISOR_memory_op; sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation; sc->rx_mcl[i].args[1] = (u_long)&reservation; /* Zap PTEs and give away pages in one big multicall. */ (void)HYPERVISOR_multicall(sc->rx_mcl, i+1); /* Check return status of HYPERVISOR_dom_mem_op(). */ if (unlikely(sc->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); } else { if (HYPERVISOR_memory_op( XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory " "reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ sc->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify); if (notify) notify_remote_via_irq(sc->irq); } static void xn_rxeof(struct netfront_info *np) { struct ifnet *ifp; #if __FreeBSD_version >= 700000 struct lro_ctrl *lro = &np->xn_lro; struct lro_entry *queued; #endif struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; multicall_entry_t *mcl; struct mbuf *m; struct mbuf_head rxq, errq; int err, pages_flipped = 0, work_to_do; do { XN_RX_LOCK_ASSERT(np); if (!netfront_carrier_ok(np)) return; mbufq_init(&errq); mbufq_init(&rxq); ifp = np->xn_ifp; rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; while ((i != rp)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); m = NULL; - err = xennet_get_responses(np, &rinfo, rp, &m, + err = xennet_get_responses(np, &rinfo, rp, &i, &m, &pages_flipped); if (unlikely(err)) { if (m) mbufq_tail(&errq, m); np->stats.rx_errors++; - i = np->rx.rsp_cons; continue; } m->m_pkthdr.rcvif = ifp; if ( rx->flags & NETRXF_data_validated ) { /* Tell the stack the checksums are okay */ /* * XXX this isn't necessarily the case - need to add * check */ m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m->m_pkthdr.csum_data = 0xffff; } np->stats.rx_packets++; np->stats.rx_bytes += m->m_pkthdr.len; mbufq_tail(&rxq, m); - np->rx.rsp_cons = ++i; + np->rx.rsp_cons = i; } if (pages_flipped) { /* Some pages are no longer absent... */ #ifdef notyet balloon_update_driver_allowance(-pages_flipped); #endif /* Do all the remapping work, and M->P updates, in one big * hypercall. */ if (!!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (u_long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; (void)HYPERVISOR_multicall(np->rx_mcl, pages_flipped + 1); } } while ((m = mbufq_dequeue(&errq))) m_freem(m); /* * Process all the mbufs after the remapping is complete. * Break the mbuf chain first though. */ while ((m = mbufq_dequeue(&rxq)) != NULL) { ifp->if_ipackets++; /* * Do we really need to drop the rx lock? */ XN_RX_UNLOCK(np); #if __FreeBSD_version >= 700000 /* Use LRO if possible */ if ((ifp->if_capenable & IFCAP_LRO) == 0 || lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { /* * If LRO fails, pass up to the stack * directly. */ (*ifp->if_input)(ifp, m); } #else (*ifp->if_input)(ifp, m); #endif XN_RX_LOCK(np); } np->rx.rsp_cons = i; #if __FreeBSD_version >= 700000 /* * Flush any outstanding LRO work */ while (!SLIST_EMPTY(&lro->lro_active)) { queued = SLIST_FIRST(&lro->lro_active); SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif #if 0 /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; #endif network_alloc_rx_buffers(np); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do); } while (work_to_do); } static void xn_txeof(struct netfront_info *np) { RING_IDX i, prod; unsigned short id; struct ifnet *ifp; netif_tx_response_t *txr; struct mbuf *m; XN_TX_LOCK_ASSERT(np); if (!netfront_carrier_ok(np)) return; ifp = np->xn_ifp; - ifp->if_timer = 0; do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (i = np->tx.rsp_cons; i != prod; i++) { txr = RING_GET_RESPONSE(&np->tx, i); if (txr->status == NETIF_RSP_NULL) continue; + if (txr->status != NETIF_RSP_OKAY) { + printf("%s: WARNING: response is %d!\n", + __func__, txr->status); + } id = txr->id; - m = np->xn_cdata.xn_tx_chain[id]; + m = np->tx_mbufs[id]; KASSERT(m != NULL, ("mbuf not found in xn_tx_chain")); + KASSERT((uintptr_t)m > NET_TX_RING_SIZE, + ("mbuf already on the free list, but we're " + "trying to free it again!")); M_ASSERTVALID(m); /* * Increment packet count if this is the last * mbuf of the chain. */ if (!m->m_next) ifp->if_opackets++; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { - printf("network_tx_buf_gc: warning " - "-- grant still in use by backend " - "domain.\n"); - goto out; + panic("grant id %u still in use by the backend", + id); } gnttab_end_foreign_access_ref( np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; - np->xn_cdata.xn_tx_chain[id] = NULL; - add_id_to_freelist(np->xn_cdata.xn_tx_chain, id); + np->tx_mbufs[id] = NULL; + add_id_to_freelist(np->tx_mbufs, id); np->xn_cdata.xn_tx_chain_cnt--; - if (np->xn_cdata.xn_tx_chain_cnt < 0) { - panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0"); - } m_free(m); /* Only mark the queue active if we've freed up at least one slot to try */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of * tx_cons. Note that it is essential to schedule a * callback, no matter how few buffers are pending. Even if * there is space in the transmit ring, higher layers may * be blocked because too much data is outstanding: in such * cases notification from Xen is likely to be the only kick * that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while (prod != np->tx.sring->rsp_prod); - out: if (np->tx_full && ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) { np->tx_full = 0; #if 0 if (np->user_state == UST_OPEN) netif_wake_queue(dev); #endif } } static void xn_intr(void *xsc) { struct netfront_info *np = xsc; struct ifnet *ifp = np->xn_ifp; #if 0 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod && likely(netfront_carrier_ok(np)) && ifp->if_drv_flags & IFF_DRV_RUNNING)) return; #endif - if (np->tx.rsp_cons != np->tx.sring->rsp_prod) { + if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) { XN_TX_LOCK(np); xn_txeof(np); XN_TX_UNLOCK(np); } XN_RX_LOCK(np); xn_rxeof(np); XN_RX_UNLOCK(np); if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) xn_start(ifp); } static void xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL")); np->rx_mbufs[new] = m; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_info *np, - struct netif_extra_info *extras, RING_IDX rp) + struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons) { struct netif_extra_info *extra; - RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct mbuf *m; grant_ref_t ref; - if (unlikely(cons + 1 == rp)) { + if (unlikely(*cons + 1 == rp)) { #if 0 if (net_ratelimit()) WPRINTK("Missing extra info\n"); #endif - err = -EINVAL; + err = EINVAL; break; } extra = (struct netif_extra_info *) - RING_GET_RESPONSE(&np->rx, ++cons); + RING_GET_RESPONSE(&np->rx, ++(*cons)); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { #if 0 if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); #endif - err = -EINVAL; + err = EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } - m = xennet_get_rx_mbuf(np, cons); - ref = xennet_get_rx_ref(np, cons); + m = xennet_get_rx_mbuf(np, *cons); + ref = xennet_get_rx_ref(np, *cons); xennet_move_rx_slot(np, m, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); - np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, - struct netfront_rx_info *rinfo, RING_IDX rp, + struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, struct mbuf **list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; - RING_IDX cons = np->rx.rsp_cons; struct mbuf *m, *m0, *m_prev; - grant_ref_t ref = xennet_get_rx_ref(np, cons); - int max = 5 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */; + grant_ref_t ref = xennet_get_rx_ref(np, *cons); + RING_IDX ref_cons = *cons; + int max = 5 /* MAX_TX_REQ_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */; int frags = 1; int err = 0; u_long ret; - m0 = m = m_prev = xennet_get_rx_mbuf(np, cons); + m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons); if (rx->flags & NETRXF_extra_info) { - err = xennet_get_extras(np, extras, rp); - cons = np->rx.rsp_cons; + err = xennet_get_extras(np, extras, rp, cons); } if (m0 != NULL) { - m0->m_pkthdr.len = 0; - m0->m_next = NULL; + m0->m_pkthdr.len = 0; + m0->m_next = NULL; } for (;;) { u_long mfn; #if 0 - printf("rx->status=%hd rx->offset=%hu frags=%u\n", + DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", rx->status, rx->offset, frags); #endif if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { + #if 0 if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); #endif xennet_move_rx_slot(np, m, ref); - err = -EINVAL; - goto next; + if (m0 == m) + m0 = NULL; + m = NULL; + err = EINVAL; + goto next_skip_queue; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { + #if 0 if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); #endif - err = -EINVAL; + err = EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { - if (net_ratelimit()) - WPRINTK("Unfulfilled rx req " - "(id=%d, st=%d).\n", - rx->id, rx->status); + WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n", + rx->id, rx->status); xennet_move_rx_slot(np, m, ref); - err = -ENOMEM; + err = ENOMEM; goto next; } if (!xen_feature( XENFEAT_auto_translated_physmap)) { /* Remap the page. */ void *vaddr = mtod(m, void *); uint32_t pfn; mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (u_long)vaddr, (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW | PG_V | PG_M | PG_A, 0); pfn = (uintptr_t)m->m_ext.ext_arg1; mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); KASSERT(ret, ("ret != 0")); } gnttab_release_grant_reference(&np->gref_rx_head, ref); next: if (m == NULL) break; m->m_len = rx->status; m->m_data += rx->offset; m0->m_pkthdr.len += rx->status; +next_skip_queue: if (!(rx->flags & NETRXF_more_data)) break; - if (cons + frags == rp) { + if (*cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); - err = -ENOENT; + err = ENOENT; + printf("%s: cons %u frags %u rp %u, not enough frags\n", + __func__, *cons, frags, rp); break; } + /* + * Note that m can be NULL, if rx->status < 0 or if + * rx->offset + rx->status > PAGE_SIZE above. + */ m_prev = m; - rx = RING_GET_RESPONSE(&np->rx, cons + frags); - m = xennet_get_rx_mbuf(np, cons + frags); + rx = RING_GET_RESPONSE(&np->rx, *cons + frags); + m = xennet_get_rx_mbuf(np, *cons + frags); - m_prev->m_next = m; + /* + * m_prev == NULL can happen if rx->status < 0 or if + * rx->offset + * rx->status > PAGE_SIZE above. + */ + if (m_prev != NULL) + m_prev->m_next = m; + + /* + * m0 can be NULL if rx->status < 0 or if * rx->offset + + * rx->status > PAGE_SIZE above. + */ + if (m0 == NULL) + m0 = m; m->m_next = NULL; - ref = xennet_get_rx_ref(np, cons + frags); + ref = xennet_get_rx_ref(np, *cons + frags); + ref_cons = *cons + frags; frags++; } *list = m0; if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); - err = -E2BIG; + printf("%s: too many frags %d > max %d\n", __func__, frags, + max); + err = E2BIG; } - if (unlikely(err)) - np->rx.rsp_cons = cons + frags; + *cons += frags; *pages_flipped_p = pages_flipped; return err; } static void xn_tick_locked(struct netfront_info *sc) { XN_RX_LOCK_ASSERT(sc); callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); /* XXX placeholder for printing debug information */ } static void xn_tick(void *xsc) { struct netfront_info *sc; sc = xsc; XN_RX_LOCK(sc); xn_tick_locked(sc); XN_RX_UNLOCK(sc); } -static void -xn_start_locked(struct ifnet *ifp) + +/** + * \brief Count the number of fragments in an mbuf chain. + * + * Surprisingly, there isn't an M* macro for this. + */ +static inline int +xn_count_frags(struct mbuf *m) { - int otherend_id; - unsigned short id; - struct mbuf *m_head, *m; - struct netfront_info *sc; - netif_tx_request_t *tx; + int nfrags; + + for (nfrags = 0; m != NULL; m = m->m_next) + nfrags++; + + return (nfrags); +} + +/** + * Given an mbuf chain, make sure we have enough room and then push + * it onto the transmit ring. + */ +static int +xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head) +{ + struct ifnet *ifp; + struct mbuf *m; + u_int nfrags; netif_extra_info_t *extra; - RING_IDX i; - grant_ref_t ref; - u_long mfn, tx_bytes; - int notify, nfrags; + int otherend_id; - sc = ifp->if_softc; - otherend_id = xenbus_get_otherend_id(sc->xbdev); - tx_bytes = 0; + ifp = sc->xn_ifp; - if (!netfront_carrier_ok(sc)) - return; - - for (i = sc->tx.req_prod_pvt; TRUE; i++) { - IF_DEQUEUE(&ifp->if_snd, m_head); - if (m_head == NULL) - break; - - /* - * netfront_tx_slot_available() tries to do some math to - * ensure that there'll be enough xenbus ring slots available - * for the maximum number of packet fragments (and a couple more - * for what I guess are TSO and other ring entry items.) - */ - if (!netfront_tx_slot_available(sc)) { - IF_PREPEND(&ifp->if_snd, m_head); - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - break; - } + /** + * Defragment the mbuf if necessary. + */ + nfrags = xn_count_frags(m_head); - /* - * Defragment the mbuf if necessary. - */ - for (m = m_head, nfrags = 0; m; m = m->m_next) - nfrags++; - if (nfrags > MAX_SKB_FRAGS) { - m = m_defrag(m_head, M_DONTWAIT); - if (!m) { - m_freem(m_head); - break; - } - m_head = m; + /* + * Check to see whether this request is longer than netback + * can handle, and try to defrag it. + */ + /** + * It is a bit lame, but the netback driver in Linux can't + * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of + * the Linux network stack. + */ + if (nfrags > MAX_TX_REQ_FRAGS) { + m = m_defrag(m_head, M_DONTWAIT); + if (!m) { + /* + * Defrag failed, so free the mbuf and + * therefore drop the packet. + */ + m_freem(m_head); + return (EMSGSIZE); } + m_head = m; + } - /* Determine how many fragments now exist */ - for (m = m_head, nfrags = 0; m; m = m->m_next) - nfrags++; + /* Determine how many fragments now exist */ + nfrags = xn_count_frags(m_head); - /* - * Don't attempt to queue this packet if there aren't - * enough free entries in the chain. - * - * There isn't a 1:1 correspondance between the mbuf TX ring - * and the xenbus TX ring. - * xn_txeof() may need to be called to free up some slots. - * - * It is quite possible that this can be later eliminated if - * it turns out that partial * packets can be pushed into - * the ringbuffer, with fragments pushed in when further slots - * free up. - * - * It is also quite possible that the driver will lock up - * if the TX queue fills up with no RX traffic, and - * the mbuf ring is exhausted. The queue may need - * a swift kick to continue. - */ + /* + * Check to see whether the defragmented packet has too many + * segments for the Linux netback driver. + */ + /** + * The FreeBSD TCP stack, with TSO enabled, can produce a chain + * of mbufs longer than Linux can handle. Make sure we don't + * pass a too-long chain over to the other side by dropping the + * packet. It doesn't look like there is currently a way to + * tell the TCP stack to generate a shorter chain of packets. + */ + if (nfrags > MAX_TX_REQ_FRAGS) { + m_freem(m_head); + return (EMSGSIZE); + } - /* - * It is not +1 like the allocation because we need to keep - * slot [0] free for the freelist head - */ - if (sc->xn_cdata.xn_tx_chain_cnt + nfrags >= NET_TX_RING_SIZE) { - printf("xn_start_locked: xn_tx_chain_cnt (%d) + nfrags %d >= NET_TX_RING_SIZE (%d); must be full!\n", - (int) sc->xn_cdata.xn_tx_chain_cnt, - (int) nfrags, (int) NET_TX_RING_SIZE); - IF_PREPEND(&ifp->if_snd, m_head); - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - break; - } + /* + * This check should be redundant. We've already verified that we + * have enough slots in the ring to handle a packet of maximum + * size, and that our packet is less than the maximum size. Keep + * it in here as an assert for now just to make certain that + * xn_tx_chain_cnt is accurate. + */ + KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE, + ("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE " + "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt, + (int) nfrags, (int) NET_TX_RING_SIZE)); - /* - * Make sure there's actually space available in the - * Xen TX ring for this. Overcompensate for the possibility - * of having a TCP offload fragment just in case for now - * (the +1) rather than adding logic to accurately calculate - * the required size. - */ - if (RING_FREE_REQUESTS(&sc->tx) < (nfrags + 1)) { - printf("xn_start_locked: free ring slots (%d) < (nfrags + 1) (%d); must be full!\n", - (int) RING_FREE_REQUESTS(&sc->tx), - (int) (nfrags + 1)); - IF_PREPEND(&ifp->if_snd, m_head); - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - break; - } + /* + * Start packing the mbufs in this chain into + * the fragment pointers. Stop when we run out + * of fragments or hit the end of the mbuf chain. + */ + m = m_head; + extra = NULL; + otherend_id = xenbus_get_otherend_id(sc->xbdev); + for (m = m_head; m; m = m->m_next) { + netif_tx_request_t *tx; + uintptr_t id; + grant_ref_t ref; + u_long mfn; /* XXX Wrong type? */ - /* - * Start packing the mbufs in this chain into - * the fragment pointers. Stop when we run out - * of fragments or hit the end of the mbuf chain. - */ - m = m_head; - extra = NULL; - for (m = m_head; m; m = m->m_next) { - tx = RING_GET_REQUEST(&sc->tx, i); - id = get_id_from_freelist(sc->xn_cdata.xn_tx_chain); - if (id == 0) - panic("xn_start_locked: was allocated the freelist head!\n"); - sc->xn_cdata.xn_tx_chain_cnt++; - if (sc->xn_cdata.xn_tx_chain_cnt >= NET_TX_RING_SIZE+1) - panic("xn_start_locked: tx_chain_cnt must be < NET_TX_RING_SIZE+1\n"); - sc->xn_cdata.xn_tx_chain[id] = m; - tx->id = id; - ref = gnttab_claim_grant_reference(&sc->gref_tx_head); - KASSERT((short)ref >= 0, ("Negative ref")); - mfn = virt_to_mfn(mtod(m, vm_offset_t)); - gnttab_grant_foreign_access_ref(ref, otherend_id, - mfn, GNTMAP_readonly); - tx->gref = sc->grant_tx_ref[id] = ref; - tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); - tx->flags = 0; - if (m == m_head) { - /* - * The first fragment has the entire packet - * size, subsequent fragments have just the - * fragment size. The backend works out the - * true size of the first fragment by - * subtracting the sizes of the other - * fragments. - */ - tx->size = m->m_pkthdr.len; + tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt); + id = get_id_from_freelist(sc->tx_mbufs); + if (id == 0) + panic("xn_start_locked: was allocated the freelist head!\n"); + sc->xn_cdata.xn_tx_chain_cnt++; + if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE) + panic("xn_start_locked: tx_chain_cnt must be <= NET_TX_RING_SIZE\n"); + sc->tx_mbufs[id] = m; + tx->id = id; + ref = gnttab_claim_grant_reference(&sc->gref_tx_head); + KASSERT((short)ref >= 0, ("Negative ref")); + mfn = virt_to_mfn(mtod(m, vm_offset_t)); + gnttab_grant_foreign_access_ref(ref, otherend_id, + mfn, GNTMAP_readonly); + tx->gref = sc->grant_tx_ref[id] = ref; + tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); + tx->flags = 0; + if (m == m_head) { + /* + * The first fragment has the entire packet + * size, subsequent fragments have just the + * fragment size. The backend works out the + * true size of the first fragment by + * subtracting the sizes of the other + * fragments. + */ + tx->size = m->m_pkthdr.len; - /* - * The first fragment contains the - * checksum flags and is optionally - * followed by extra data for TSO etc. - */ - if (m->m_pkthdr.csum_flags - & CSUM_DELAY_DATA) { - tx->flags |= (NETTXF_csum_blank - | NETTXF_data_validated); - } + /* + * The first fragment contains the checksum flags + * and is optionally followed by extra data for + * TSO etc. + */ + /** + * CSUM_TSO requires checksum offloading. + * Some versions of FreeBSD fail to + * set CSUM_TCP in the CSUM_TSO case, + * so we have to test for CSUM_TSO + * explicitly. + */ + if (m->m_pkthdr.csum_flags + & (CSUM_DELAY_DATA | CSUM_TSO)) { + tx->flags |= (NETTXF_csum_blank + | NETTXF_data_validated); + } #if __FreeBSD_version >= 700000 - if (m->m_pkthdr.csum_flags & CSUM_TSO) { - struct netif_extra_info *gso = - (struct netif_extra_info *) - RING_GET_REQUEST(&sc->tx, ++i); + if (m->m_pkthdr.csum_flags & CSUM_TSO) { + struct netif_extra_info *gso = + (struct netif_extra_info *) + RING_GET_REQUEST(&sc->tx, + ++sc->tx.req_prod_pvt); - if (extra) - extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; - else - tx->flags |= NETTXF_extra_info; + tx->flags |= NETTXF_extra_info; - gso->u.gso.size = m->m_pkthdr.tso_segsz; - gso->u.gso.type = - XEN_NETIF_GSO_TYPE_TCPV4; - gso->u.gso.pad = 0; - gso->u.gso.features = 0; + gso->u.gso.size = m->m_pkthdr.tso_segsz; + gso->u.gso.type = + XEN_NETIF_GSO_TYPE_TCPV4; + gso->u.gso.pad = 0; + gso->u.gso.features = 0; - gso->type = XEN_NETIF_EXTRA_TYPE_GSO; - gso->flags = 0; - extra = gso; - } -#endif - } else { - tx->size = m->m_len; + gso->type = XEN_NETIF_EXTRA_TYPE_GSO; + gso->flags = 0; } - if (m->m_next) { - tx->flags |= NETTXF_more_data; - i++; - } +#endif + } else { + tx->size = m->m_len; } + if (m->m_next) + tx->flags |= NETTXF_more_data; - BPF_MTAP(ifp, m_head); + sc->tx.req_prod_pvt++; + } + BPF_MTAP(ifp, m_head); - sc->stats.tx_bytes += m_head->m_pkthdr.len; - sc->stats.tx_packets++; + sc->stats.tx_bytes += m_head->m_pkthdr.len; + sc->stats.tx_packets++; + + return (0); +} + +static void +xn_start_locked(struct ifnet *ifp) +{ + struct netfront_info *sc; + struct mbuf *m_head; + int notify; + + sc = ifp->if_softc; + + if (!netfront_carrier_ok(sc)) + return; + + /* + * While we have enough transmit slots available for at least one + * maximum-sized packet, pull mbufs off the queue and put them on + * the transmit ring. + */ + while (xn_tx_slot_available(sc)) { + IF_DEQUEUE(&ifp->if_snd, m_head); + if (m_head == NULL) + break; + + if (xn_assemble_tx_request(sc, m_head) != 0) + break; } - sc->tx.req_prod_pvt = i; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify); if (notify) notify_remote_via_irq(sc->irq); - xn_txeof(sc); - if (RING_FULL(&sc->tx)) { sc->tx_full = 1; #if 0 netif_stop_queue(dev); #endif } +} - return; -} static void xn_start(struct ifnet *ifp) { struct netfront_info *sc; sc = ifp->if_softc; XN_TX_LOCK(sc); xn_start_locked(ifp); XN_TX_UNLOCK(sc); } /* equivalent of network_open() in Linux */ static void xn_ifinit_locked(struct netfront_info *sc) { struct ifnet *ifp; XN_LOCK_ASSERT(sc); ifp = sc->xn_ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; xn_stop(sc); network_alloc_rx_buffers(sc); sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + if_link_state_change(ifp, LINK_STATE_UP); callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); } static void xn_ifinit(void *xsc) { struct netfront_info *sc = xsc; XN_LOCK(sc); xn_ifinit_locked(sc); XN_UNLOCK(sc); } static int xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct netfront_info *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct ifaddr *ifa = (struct ifaddr *)data; int mask, error = 0; switch(cmd) { case SIOCSIFADDR: case SIOCGIFADDR: XN_LOCK(sc); if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) xn_ifinit_locked(sc); arp_ifinit(ifp, ifa); XN_UNLOCK(sc); } else { XN_UNLOCK(sc); error = ether_ioctl(ifp, cmd, data); } break; case SIOCSIFMTU: /* XXX can we alter the MTU on a VN ?*/ #ifdef notyet if (ifr->ifr_mtu > XN_JUMBO_MTU) error = EINVAL; else #endif { ifp->if_mtu = ifr->ifr_mtu; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xn_ifinit(sc); } break; case SIOCSIFFLAGS: XN_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ #ifdef notyet /* No promiscuous mode with Xen */ if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->xn_if_flags & IFF_PROMISC)) { XN_SETBIT(sc, XN_RX_MODE, XN_RXMODE_RX_PROMISC); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->xn_if_flags & IFF_PROMISC) { XN_CLRBIT(sc, XN_RX_MODE, XN_RXMODE_RX_PROMISC); } else #endif xn_ifinit_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { xn_stop(sc); } } sc->xn_if_flags = ifp->if_flags; XN_UNLOCK(sc); error = 0; break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { if (IFCAP_TXCSUM & ifp->if_capenable) { ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO); } else { ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); } } if (mask & IFCAP_RXCSUM) { ifp->if_capenable ^= IFCAP_RXCSUM; } #if __FreeBSD_version >= 700000 if (mask & IFCAP_TSO4) { if (IFCAP_TSO4 & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } else if (IFCAP_TXCSUM & ifp->if_capenable) { ifp->if_capenable |= IFCAP_TSO4; ifp->if_hwassist |= CSUM_TSO; } else { IPRINTK("Xen requires tx checksum offload" " be enabled to use TSO\n"); error = EINVAL; } } if (mask & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; } #endif error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: #ifdef notyet if (ifp->if_drv_flags & IFF_DRV_RUNNING) { XN_LOCK(sc); xn_setmulti(sc); XN_UNLOCK(sc); error = 0; } #endif /* FALLTHROUGH */ case SIOCSIFMEDIA: case SIOCGIFMEDIA: - error = EINVAL; + error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); } return (error); } static void xn_stop(struct netfront_info *sc) { struct ifnet *ifp; XN_LOCK_ASSERT(sc); ifp = sc->xn_ifp; callout_stop(&sc->xn_stat_ch); xn_free_rx_ring(sc); xn_free_tx_ring(sc); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + if_link_state_change(ifp, LINK_STATE_DOWN); } /* START of Xenolinux helper functions adapted to FreeBSD */ int network_connect(struct netfront_info *np) { int i, requeue_idx, error; grant_ref_t ref; netif_rx_request_t *req; u_int feature_rx_copy, feature_rx_flip; error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev), "feature-rx-copy", NULL, "%u", &feature_rx_copy); if (error) feature_rx_copy = 0; error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev), "feature-rx-flip", NULL, "%u", &feature_rx_flip); if (error) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); - XN_LOCK(np); /* Recovery procedure: */ error = talk_to_backend(np->xbdev, np); if (error) return (error); /* Step 1: Reinitialise variables. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { struct mbuf *m; u_long pfn; if (np->rx_mbufs[i] == NULL) continue; m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); + req = RING_GET_REQUEST(&np->rx, requeue_idx); pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, xenbus_get_otherend_id(np->xbdev), pfn); } else { gnttab_grant_foreign_access_ref(ref, xenbus_get_otherend_id(np->xbdev), PFNTOMFN(pfn), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); XN_TX_LOCK(np); xn_txeof(np); XN_TX_UNLOCK(np); network_alloc_rx_buffers(np); - XN_UNLOCK(np); return (0); } static void show_device(struct netfront_info *sc) { #ifdef DEBUG if (sc) { IPRINTK("\n", sc->xn_ifno, be_state_name[sc->xn_backend_state], sc->xn_user_state ? "open" : "closed", sc->xn_evtchn, sc->xn_irq, sc->xn_tx_if, sc->xn_rx_if); } else { IPRINTK("\n"); } #endif } /** Create a network device. * @param handle device handle */ int create_netdev(device_t dev) { int i; struct netfront_info *np; int err; struct ifnet *ifp; np = device_get_softc(dev); np->xbdev = dev; XN_LOCK_INIT(np, xennetif); + + ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); + ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); + ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); + np->rx_target = RX_MIN_TARGET; np->rx_min_target = RX_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_mbufs[i] = (void *) ((u_long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } + np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0; + for (i = 0; i <= NET_RX_RING_SIZE; i++) { + np->rx_mbufs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ - if (gnttab_alloc_grant_references(TX_MAX_TARGET, - &np->gref_tx_head) < 0) { - printf("#### netfront can't alloc tx grant refs\n"); + if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, + &np->gref_tx_head) != 0) { + IPRINTK("#### netfront can't alloc tx grant refs\n"); err = ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, - &np->gref_rx_head) < 0) { - printf("#### netfront can't alloc rx grant refs\n"); + &np->gref_rx_head) != 0) { + WPRINTK("#### netfront can't alloc rx grant refs\n"); gnttab_free_grant_references(np->gref_tx_head); err = ENOMEM; goto exit; } err = xen_net_read_mac(dev, np->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", xenbus_get_node(dev)); goto out; } /* Set up ifnet structure */ ifp = np->xn_ifp = if_alloc(IFT_ETHER); ifp->if_softc = np; if_initname(ifp, "xn", device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xn_ioctl; ifp->if_output = ether_output; ifp->if_start = xn_start; #ifdef notyet ifp->if_watchdog = xn_watchdog; #endif ifp->if_init = xn_ifinit; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1; ifp->if_hwassist = XN_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM; #if __FreeBSD_version >= 700000 ifp->if_capabilities |= IFCAP_TSO4; if (xn_enable_lro) { int err = tcp_lro_init(&np->xn_lro); if (err) { device_printf(dev, "LRO initialization failed\n"); goto exit; } np->xn_lro.ifp = ifp; ifp->if_capabilities |= IFCAP_LRO; } #endif ifp->if_capenable = ifp->if_capabilities; ether_ifattach(ifp, np->mac); callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE); netfront_carrier_off(np); return (0); exit: gnttab_free_grant_references(np->gref_tx_head); out: panic("do something smart"); } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ #if 0 -static void netfront_closing(device_t dev) +static void +netfront_closing(device_t dev) { #if 0 struct netfront_info *info = dev->dev_driver_data; DPRINTK("netfront_closing: %s removed\n", dev->nodename); close_netdev(info); #endif xenbus_switch_state(dev, XenbusStateClosed); } #endif -static int netfront_detach(device_t dev) +static int +netfront_detach(device_t dev) { struct netfront_info *info = device_get_softc(dev); DPRINTK("%s\n", xenbus_get_node(dev)); netif_free(info); return 0; } - -static void netif_free(struct netfront_info *info) +static void +netif_free(struct netfront_info *info) { netif_disconnect_backend(info); #if 0 close_netdev(info); #endif } -static void netif_disconnect_backend(struct netfront_info *info) +static void +netif_disconnect_backend(struct netfront_info *info) { XN_RX_LOCK(info); XN_TX_LOCK(info); netfront_carrier_off(info); XN_TX_UNLOCK(info); XN_RX_UNLOCK(info); end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; if (info->irq) unbind_from_irqhandler(info->irq); info->irq = 0; } -static void end_access(int ref, void *page) +static void +end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, page); +} + +static int +xn_ifmedia_upd(struct ifnet *ifp) +{ + return (0); +} + +static void +xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; + ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; } /* ** Driver registration ** */ static device_method_t netfront_methods[] = { /* Device interface */ DEVMETHOD(device_probe, netfront_probe), DEVMETHOD(device_attach, netfront_attach), DEVMETHOD(device_detach, netfront_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, netfront_resume), /* Xenbus interface */ DEVMETHOD(xenbus_backend_changed, netfront_backend_changed), { 0, 0 } }; static driver_t netfront_driver = { "xn", netfront_methods, sizeof(struct netfront_info), }; devclass_t netfront_devclass; DRIVER_MODULE(xe, xenbus, netfront_driver, netfront_devclass, 0, 0); Index: stable/8/sys/dev/xen/xenpci =================================================================== --- stable/8/sys/dev/xen/xenpci (revision 209060) +++ stable/8/sys/dev/xen/xenpci (revision 209061) Property changes on: stable/8/sys/dev/xen/xenpci ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/dev/xen/xenpci:r199549,199997,204158,207673,208901 Index: stable/8/sys/geom/sched =================================================================== --- stable/8/sys/geom/sched (revision 209060) +++ stable/8/sys/geom/sched (revision 209061) Property changes on: stable/8/sys/geom/sched ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/geom/sched:r199549,199997,204158,207673,208901 Index: stable/8/sys/i386/include/xen/xenfunc.h =================================================================== --- stable/8/sys/i386/include/xen/xenfunc.h (revision 209060) +++ stable/8/sys/i386/include/xen/xenfunc.h (revision 209061) @@ -1,81 +1,78 @@ -/* - * - * Copyright (c) 2004,2005 Kip Macy +/*- + * Copyright (c) 2004, 2005 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 4. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. * * $FreeBSD$ */ - #ifndef _XEN_XENFUNC_H_ #define _XEN_XENFUNC_H_ #include #include #include #include #include #define BKPT __asm__("int3"); #define XPQ_CALL_DEPTH 5 #define XPQ_CALL_COUNT 2 #define PG_PRIV PG_AVAIL3 typedef struct { unsigned long pt_ref; unsigned long pt_eip[XPQ_CALL_COUNT][XPQ_CALL_DEPTH]; } pteinfo_t; extern pteinfo_t *pteinfo_list; #ifdef XENDEBUG_LOW #define __PRINTK(x) printk x #else #define __PRINTK(x) #endif char *xen_setbootenv(char *cmd_line); int xen_boothowto(char *envp); void _xen_machphys_update(vm_paddr_t, vm_paddr_t, char *file, int line); #ifdef INVARIANTS #define xen_machphys_update(a, b) _xen_machphys_update((a), (b), __FILE__, __LINE__) #else #define xen_machphys_update(a, b) _xen_machphys_update((a), (b), NULL, 0) #endif void xen_update_descriptor(union descriptor *, union descriptor *); extern struct mtx balloon_lock; #if 0 #define balloon_lock(__flags) mtx_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) mtx_unlock_irqrestore(&balloon_lock, __flags) #else #define balloon_lock(__flags) __flags = 1 #define balloon_unlock(__flags) __flags = 0 #endif #endif /* _XEN_XENFUNC_H_ */ Index: stable/8/sys/i386/include/xen/xenvar.h =================================================================== --- stable/8/sys/i386/include/xen/xenvar.h (revision 209060) +++ stable/8/sys/i386/include/xen/xenvar.h (revision 209061) @@ -1,105 +1,104 @@ -/* +/*- * Copyright (c) 2008 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. * - * * $FreeBSD$ */ + #ifndef XENVAR_H_ #define XENVAR_H_ #define XBOOTUP 0x1 #define XPMAP 0x2 extern int xendebug_flags; #ifndef NOXENDEBUG #define XENPRINTF printk #else #define XENPRINTF printf #endif #include extern xen_pfn_t *xen_phys_machine; extern xen_pfn_t *xen_pfn_to_mfn_frame_list[16]; extern xen_pfn_t *xen_pfn_to_mfn_frame_list_list; #if 0 #define TRACE_ENTER XENPRINTF("(file=%s, line=%d) entered %s\n", __FILE__, __LINE__, __FUNCTION__) #define TRACE_EXIT XENPRINTF("(file=%s, line=%d) exiting %s\n", __FILE__, __LINE__, __FUNCTION__) #define TRACE_DEBUG(argflags, _f, _a...) \ if (xendebug_flags & argflags) XENPRINTF("(file=%s, line=%d) " _f "\n", __FILE__, __LINE__, ## _a); #else #define TRACE_ENTER #define TRACE_EXIT #define TRACE_DEBUG(argflags, _f, _a...) #endif extern xen_pfn_t *xen_machine_phys; /* Xen starts physical pages after the 4MB ISA hole - * FreeBSD doesn't */ #undef ADD_ISA_HOLE /* XXX */ #ifdef ADD_ISA_HOLE #define ISA_INDEX_OFFSET 1024 #define ISA_PDR_OFFSET 1 #else #define ISA_INDEX_OFFSET 0 #define ISA_PDR_OFFSET 0 #endif #define PFNTOMFN(i) (xen_phys_machine[(i)]) #define MFNTOPFN(i) ((vm_paddr_t)xen_machine_phys[(i)]) #define VTOP(x) ((((uintptr_t)(x))) - KERNBASE) #define PTOV(x) (((uintptr_t)(x)) + KERNBASE) #define VTOPFN(x) (VTOP(x) >> PAGE_SHIFT) #define PFNTOV(x) PTOV((vm_paddr_t)(x) << PAGE_SHIFT) #define VTOMFN(va) (vtomach(va) >> PAGE_SHIFT) #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define phystomach(pa) (((vm_paddr_t)(PFNTOMFN((pa) >> PAGE_SHIFT))) << PAGE_SHIFT) #define machtophys(ma) (((vm_paddr_t)(MFNTOPFN((ma) >> PAGE_SHIFT))) << PAGE_SHIFT) void xpq_init(void); #define BITS_PER_LONG 32 #define NR_CPUS MAX_VIRT_CPUS #define BITS_TO_LONGS(bits) \ (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } xen_cpumask_t; int xen_create_contiguous_region(vm_page_t pages, int npages); void xen_destroy_contiguous_region(void * addr, int npages); #endif Index: stable/8/sys/kern/subr_bufring.c =================================================================== --- stable/8/sys/kern/subr_bufring.c (revision 209060) +++ stable/8/sys/kern/subr_bufring.c (revision 209061) @@ -1,68 +1,65 @@ -/************************************************************************** - * - * Copyright (c) 2007,2008 Kip Macy kmacy@freebsd.org +/*- + * Copyright (c) 2007, 2008 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. The name of Kip Macy nor the names of other - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * - ***************************************************************************/ + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include struct buf_ring * buf_ring_alloc(int count, struct malloc_type *type, int flags, struct mtx *lock) { struct buf_ring *br; KASSERT(powerof2(count), ("buf ring must be size power of 2")); br = malloc(sizeof(struct buf_ring) + count*sizeof(caddr_t), type, flags|M_ZERO); if (br == NULL) return (NULL); #ifdef DEBUG_BUFRING br->br_lock = lock; #endif br->br_prod_size = br->br_cons_size = count; br->br_prod_mask = br->br_cons_mask = count-1; br->br_prod_head = br->br_cons_head = 0; br->br_prod_tail = br->br_cons_tail = 0; return (br); } void buf_ring_free(struct buf_ring *br, struct malloc_type *type) { free(br, type); } Index: stable/8/sys/sys/buf_ring.h =================================================================== --- stable/8/sys/sys/buf_ring.h (revision 209060) +++ stable/8/sys/sys/buf_ring.h (revision 209061) @@ -1,279 +1,277 @@ -/************************************************************************** - * - * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org +/*- + * Copyright (c) 2007-2009 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. The name of Kip Macy nor the names of other - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. * * $FreeBSD$ * - ***************************************************************************/ + */ #ifndef _SYS_BUF_RING_H_ #define _SYS_BUF_RING_H_ #include #if defined(INVARIANTS) && !defined(DEBUG_BUFRING) #define DEBUG_BUFRING 1 #endif #ifdef DEBUG_BUFRING #include #include #endif struct buf_ring { volatile uint32_t br_prod_head; volatile uint32_t br_prod_tail; int br_prod_size; int br_prod_mask; uint64_t br_drops; uint64_t br_prod_bufs; uint64_t br_prod_bytes; /* * Pad out to next L2 cache line */ uint64_t _pad0[11]; volatile uint32_t br_cons_head; volatile uint32_t br_cons_tail; int br_cons_size; int br_cons_mask; /* * Pad out to next L2 cache line */ uint64_t _pad1[14]; #ifdef DEBUG_BUFRING struct mtx *br_lock; #endif void *br_ring[0]; }; /* * multi-producer safe lock-free ring buffer enqueue * */ static __inline int buf_ring_enqueue_bytes(struct buf_ring *br, void *buf, int nbytes) { uint32_t prod_head, prod_next; uint32_t cons_tail; int success; #ifdef DEBUG_BUFRING int i; for (i = br->br_cons_head; i != br->br_prod_head; i = ((i + 1) & br->br_cons_mask)) if(br->br_ring[i] == buf) panic("buf=%p already enqueue at %d prod=%d cons=%d", buf, i, br->br_prod_tail, br->br_cons_tail); #endif critical_enter(); do { prod_head = br->br_prod_head; cons_tail = br->br_cons_tail; prod_next = (prod_head + 1) & br->br_prod_mask; if (prod_next == cons_tail) { critical_exit(); return (ENOBUFS); } success = atomic_cmpset_int(&br->br_prod_head, prod_head, prod_next); } while (success == 0); #ifdef DEBUG_BUFRING if (br->br_ring[prod_head] != NULL) panic("dangling value in enqueue"); #endif br->br_ring[prod_head] = buf; wmb(); /* * If there are other enqueues in progress * that preceeded us, we need to wait for them * to complete */ while (br->br_prod_tail != prod_head) cpu_spinwait(); br->br_prod_bufs++; br->br_prod_bytes += nbytes; br->br_prod_tail = prod_next; critical_exit(); return (0); } static __inline int buf_ring_enqueue(struct buf_ring *br, void *buf) { return (buf_ring_enqueue_bytes(br, buf, 0)); } /* * multi-consumer safe dequeue * */ static __inline void * buf_ring_dequeue_mc(struct buf_ring *br) { uint32_t cons_head, cons_next; uint32_t prod_tail; void *buf; int success; critical_enter(); do { cons_head = br->br_cons_head; prod_tail = br->br_prod_tail; cons_next = (cons_head + 1) & br->br_cons_mask; if (cons_head == prod_tail) { critical_exit(); return (NULL); } success = atomic_cmpset_int(&br->br_cons_head, cons_head, cons_next); } while (success == 0); buf = br->br_ring[cons_head]; #ifdef DEBUG_BUFRING br->br_ring[cons_head] = NULL; #endif rmb(); /* * If there are other dequeues in progress * that preceeded us, we need to wait for them * to complete */ while (br->br_cons_tail != cons_head) cpu_spinwait(); br->br_cons_tail = cons_next; critical_exit(); return (buf); } /* * single-consumer dequeue * use where dequeue is protected by a lock * e.g. a network driver's tx queue lock */ static __inline void * buf_ring_dequeue_sc(struct buf_ring *br) { uint32_t cons_head, cons_next, cons_next_next; uint32_t prod_tail; void *buf; cons_head = br->br_cons_head; prod_tail = br->br_prod_tail; cons_next = (cons_head + 1) & br->br_cons_mask; cons_next_next = (cons_head + 2) & br->br_cons_mask; if (cons_head == prod_tail) return (NULL); #ifdef PREFETCH_DEFINED if (cons_next != prod_tail) { prefetch(br->br_ring[cons_next]); if (cons_next_next != prod_tail) prefetch(br->br_ring[cons_next_next]); } #endif br->br_cons_head = cons_next; buf = br->br_ring[cons_head]; #ifdef DEBUG_BUFRING br->br_ring[cons_head] = NULL; if (!mtx_owned(br->br_lock)) panic("lock not held on single consumer dequeue"); if (br->br_cons_tail != cons_head) panic("inconsistent list cons_tail=%d cons_head=%d", br->br_cons_tail, cons_head); #endif br->br_cons_tail = cons_next; return (buf); } /* * return a pointer to the first entry in the ring * without modifying it, or NULL if the ring is empty * race-prone if not protected by a lock */ static __inline void * buf_ring_peek(struct buf_ring *br) { #ifdef DEBUG_BUFRING if ((br->br_lock != NULL) && !mtx_owned(br->br_lock)) panic("lock not held on single consumer dequeue"); #endif /* * I believe it is safe to not have a memory barrier * here because we control cons and tail is worst case * a lagging indicator so we worst case we might * return NULL immediately after a buffer has been enqueued */ if (br->br_cons_head == br->br_prod_tail) return (NULL); return (br->br_ring[br->br_cons_head]); } static __inline int buf_ring_full(struct buf_ring *br) { return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail); } static __inline int buf_ring_empty(struct buf_ring *br) { return (br->br_cons_head == br->br_prod_tail); } static __inline int buf_ring_count(struct buf_ring *br) { return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail) & br->br_prod_mask); } struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags, struct mtx *); void buf_ring_free(struct buf_ring *br, struct malloc_type *type); #endif Index: stable/8/sys =================================================================== --- stable/8/sys (revision 209060) +++ stable/8/sys (revision 209061) Property changes on: stable/8/sys ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys:r199549,199997,204158,207673,208901