Index: sys/dev/xen/netback/netback.c =================================================================== --- sys/dev/xen/netback/netback.c +++ sys/dev/xen/netback/netback.c @@ -164,7 +164,7 @@ netif_tx_back_ring_t *ring, int error); static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp); static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, - const struct mbuf *mbufc, + struct mbuf *mbufc, gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, domid_t otherend_id); @@ -1709,12 +1709,12 @@ * \return The number of gnttab entries filled */ static int -xnb_txpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, +xnb_txpkt2gnttab(const struct xnb_pkt *pkt, struct mbuf *mbufc, gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, domid_t otherend_id) { - const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ + struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ int gnt_idx = 0; /* index into grant table */ RING_IDX r_idx = pkt->car; /* index into tx ring buffer */ int r_ofs = 0; /* offset of next data within tx request's data area */ Index: sys/kern/kern_mbuf.c =================================================================== --- sys/kern/kern_mbuf.c +++ sys/kern/kern_mbuf.c @@ -269,7 +269,6 @@ uma_zone_t zone_jumbop; uma_zone_t zone_jumbo9; uma_zone_t zone_jumbo16; -uma_zone_t zone_ext_refcnt; /* * Local prototypes. @@ -278,7 +277,6 @@ static int mb_ctor_clust(void *, int, void *, int); static int mb_ctor_pack(void *, int, void *, int); static void mb_dtor_mbuf(void *, int, void *); -static void mb_dtor_clust(void *, int, void *); static void mb_dtor_pack(void *, int, void *); static int mb_zinit_pack(void *, int, int); static void mb_zfini_pack(void *, int); @@ -312,13 +310,13 @@ uma_zone_set_maxaction(zone_mbuf, mb_reclaim); zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, - mb_ctor_clust, mb_dtor_clust, + mb_ctor_clust, #ifdef INVARIANTS - trash_init, trash_fini, + trash_dtor, trash_init, trash_fini, #else - NULL, NULL, + NULL, NULL, NULL, #endif - UMA_ALIGN_PTR, UMA_ZONE_REFCNT); + UMA_ALIGN_PTR, 0); if (nmbclusters > 0) nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached"); @@ -329,26 +327,26 @@ /* Make jumbo frame zone too. Page size, 9k and 16k. */ zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, - mb_ctor_clust, mb_dtor_clust, + mb_ctor_clust, #ifdef INVARIANTS - trash_init, trash_fini, + trash_dtor, trash_init, trash_fini, #else - NULL, NULL, + NULL, NULL, NULL, #endif - UMA_ALIGN_PTR, UMA_ZONE_REFCNT); + UMA_ALIGN_PTR, 0); if (nmbjumbop > 0) nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached"); uma_zone_set_maxaction(zone_jumbop, mb_reclaim); zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, - mb_ctor_clust, mb_dtor_clust, + mb_ctor_clust, #ifdef INVARIANTS - trash_init, trash_fini, + trash_dtor, trash_init, trash_fini, #else - NULL, NULL, + NULL, NULL, NULL, #endif - UMA_ALIGN_PTR, UMA_ZONE_REFCNT); + UMA_ALIGN_PTR, 0); uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc); if (nmbjumbo9 > 0) nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); @@ -356,24 +354,19 @@ uma_zone_set_maxaction(zone_jumbo9, mb_reclaim); zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, - mb_ctor_clust, mb_dtor_clust, + mb_ctor_clust, #ifdef INVARIANTS - trash_init, trash_fini, + trash_dtor, trash_init, trash_fini, #else - NULL, NULL, + NULL, NULL, NULL, #endif - UMA_ALIGN_PTR, UMA_ZONE_REFCNT); + UMA_ALIGN_PTR, 0); uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc); if (nmbjumbo16 > 0) nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached"); uma_zone_set_maxaction(zone_jumbo16, mb_reclaim); - zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), - NULL, NULL, - NULL, NULL, - UMA_ALIGN_PTR, UMA_ZONE_ZINIT); - /* * Hook event handler for low-memory situation, used to * drain protocols and push data back to the caches (UMA @@ -477,7 +470,6 @@ KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); - KASSERT(*m->m_ext.ext_cnt == 1, ("%s: ext_cnt != 1", __func__)); #ifdef INVARIANTS trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); #endif @@ -505,40 +497,11 @@ mb_ctor_clust(void *mem, int size, void *arg, int how) { struct mbuf *m; - u_int *refcnt; - int type; - uma_zone_t zone; #ifdef INVARIANTS trash_ctor(mem, size, arg, how); #endif - switch (size) { - case MCLBYTES: - type = EXT_CLUSTER; - zone = zone_clust; - break; -#if MJUMPAGESIZE != MCLBYTES - case MJUMPAGESIZE: - type = EXT_JUMBOP; - zone = zone_jumbop; - break; -#endif - case MJUM9BYTES: - type = EXT_JUMBO9; - zone = zone_jumbo9; - break; - case MJUM16BYTES: - type = EXT_JUMBO16; - zone = zone_jumbo16; - break; - default: - panic("unknown cluster size"); - break; - } - m = (struct mbuf *)arg; - refcnt = uma_find_refcnt(zone, mem); - *refcnt = 1; if (m != NULL) { m->m_ext.ext_buf = (caddr_t)mem; m->m_data = m->m_ext.ext_buf; @@ -547,33 +510,15 @@ m->m_ext.ext_arg1 = NULL; m->m_ext.ext_arg2 = NULL; m->m_ext.ext_size = size; - m->m_ext.ext_type = type; - m->m_ext.ext_flags = 0; - m->m_ext.ext_cnt = refcnt; + m->m_ext.ext_type = m_gettype(size); + m->m_ext.ext_flags = EXT_FLAG_EMBREF; + m->m_ext.ext_count = 1; } return (0); } /* - * The Mbuf Cluster zone destructor. - */ -static void -mb_dtor_clust(void *mem, int size, void *arg) -{ -#ifdef INVARIANTS - uma_zone_t zone; - - zone = m_getzone(size); - KASSERT(*(uma_find_refcnt(zone, mem)) <= 1, - ("%s: refcnt incorrect %u", __func__, - *(uma_find_refcnt(zone, mem))) ); - - trash_dtor(mem, size, arg); -#endif -} - -/* * The Packet secondary zone's init routine, executed on the * object's transition from mbuf keg slab to zone cache. */ @@ -670,58 +615,69 @@ void mb_free_ext(struct mbuf *m) { + volatile u_int *refcnt; + struct mbuf *mref; int freembuf; KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); + /* See if this is the mbuf that holds the embedded refcount. */ + if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { + refcnt = &m->m_ext.ext_count; + mref = m; + } else { + KASSERT(m->m_ext.ext_cnt != NULL, + ("%s: no refcounting pointer on %p", __func__, m)); + refcnt = m->m_ext.ext_cnt; + mref = __containerof(refcnt, struct mbuf, m_ext.ext_count); + } + /* - * Check if the header is embedded in the cluster. + * Check if the header is embedded in the cluster. It is + * important that we can't touch any of the mbuf fields + * after we have freed the external storage, since mbuf + * could have been embedded in it. */ freembuf = (m->m_flags & M_NOFREE) ? 0 : 1; - switch (m->m_ext.ext_type) { - case EXT_SFBUF: - sf_ext_free(m->m_ext.ext_arg1, m->m_ext.ext_arg2); - break; - case EXT_SFBUF_NOCACHE: - sf_ext_free_nocache(m->m_ext.ext_arg1, m->m_ext.ext_arg2); - break; - default: - KASSERT(m->m_ext.ext_cnt != NULL, - ("%s: no refcounting pointer on %p", __func__, m)); - /* - * Free attached storage if this mbuf is the only - * reference to it. - */ - if (*(m->m_ext.ext_cnt) != 1) { - if (atomic_fetchadd_int(m->m_ext.ext_cnt, -1) != 1) - break; - } - + /* Free attached storage if this mbuf is the only reference to it. */ + if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) { switch (m->m_ext.ext_type) { - case EXT_PACKET: /* The packet zone is special. */ - if (*(m->m_ext.ext_cnt) == 0) - *(m->m_ext.ext_cnt) = 1; - uma_zfree(zone_pack, m); - return; /* Job done. */ + case EXT_PACKET: + /* The packet zone is special. */ + if (*refcnt == 0) + *refcnt = 1; + uma_zfree(zone_pack, mref); + break; case EXT_CLUSTER: uma_zfree(zone_clust, m->m_ext.ext_buf); + uma_zfree(zone_mbuf, mref); break; case EXT_JUMBOP: uma_zfree(zone_jumbop, m->m_ext.ext_buf); + uma_zfree(zone_mbuf, mref); break; case EXT_JUMBO9: uma_zfree(zone_jumbo9, m->m_ext.ext_buf); + uma_zfree(zone_mbuf, mref); break; case EXT_JUMBO16: uma_zfree(zone_jumbo16, m->m_ext.ext_buf); + uma_zfree(zone_mbuf, mref); + break; + case EXT_SFBUF: + sf_ext_free(m->m_ext.ext_arg1, m->m_ext.ext_arg2); + uma_zfree(zone_mbuf, mref); + break; + case EXT_SFBUF_NOCACHE: + sf_ext_free_nocache(m->m_ext.ext_arg1, + m->m_ext.ext_arg2); + uma_zfree(zone_mbuf, mref); break; case EXT_NET_DRV: case EXT_MOD_TYPE: case EXT_DISPOSABLE: - *(m->m_ext.ext_cnt) = 0; - uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *, - m->m_ext.ext_cnt)); + uma_zfree(zone_mbuf, mref); /* FALLTHROUGH */ case EXT_EXTREF: KASSERT(m->m_ext.ext_free != NULL, @@ -735,7 +691,7 @@ } } - if (freembuf) + if (freembuf && m != mref) uma_zfree(zone_mbuf, m); } @@ -925,9 +881,7 @@ /*- * Configure a provided mbuf to refer to the provided external storage - * buffer and setup a reference count for said buffer. If the setting - * up of the reference count fails, the M_EXT bit will not be set. If - * successfull, the M_EXT bit is set in the mbuf's flags. + * buffer and setup a reference count for said buffer. * * Arguments: * mb The existing mbuf to which to attach the provided buffer. @@ -944,20 +898,14 @@ * Returns: * Nothing. */ -int +void m_extadd(struct mbuf *mb, caddr_t buf, u_int size, void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2, - int flags, int type, int wait) + int flags, int type) { - KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__)); - if (type != EXT_EXTREF) - mb->m_ext.ext_cnt = uma_zalloc(zone_ext_refcnt, wait); - - if (mb->m_ext.ext_cnt == NULL) - return (ENOMEM); + KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__)); - *(mb->m_ext.ext_cnt) = 1; mb->m_flags |= (M_EXT | flags); mb->m_ext.ext_buf = buf; mb->m_data = mb->m_ext.ext_buf; @@ -966,9 +914,12 @@ mb->m_ext.ext_arg1 = arg1; mb->m_ext.ext_arg2 = arg2; mb->m_ext.ext_type = type; - mb->m_ext.ext_flags = 0; - return (0); + if (type != EXT_EXTREF) { + mb->m_ext.ext_count = 1; + mb->m_ext.ext_flags = EXT_FLAG_EMBREF; + } else + mb->m_ext.ext_flags = 0; } /* Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -119,31 +119,6 @@ NULL, 0, sfstat_sysctl, "I", "sendfile statistics"); /* - * Add more references to a vm_page + sf_buf + sendfile_sync. Called - * by mbuf(9) code to add extra references to a page. - */ -void -sf_ext_ref(void *arg1, void *arg2) -{ - struct sf_buf *sf = arg1; - struct sendfile_sync *sfs = arg2; - vm_page_t pg = sf_buf_page(sf); - - sf_buf_ref(sf); - - vm_page_lock(pg); - vm_page_wire(pg); - vm_page_unlock(pg); - - if (sfs != NULL) { - mtx_lock(&sfs->mtx); - KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0")); - sfs->count++; - mtx_unlock(&sfs->mtx); - } -} - -/* * Detach mapped page and release resources back to the system. Called * by mbuf(9) code when last reference to a page is freed. */ @@ -807,7 +782,8 @@ m0->m_ext.ext_type = EXT_SFBUF; else m0->m_ext.ext_type = EXT_SFBUF_NOCACHE; - m0->m_ext.ext_flags = 0; + m0->m_ext.ext_flags = EXT_FLAG_EMBREF; + m0->m_ext.ext_count = 1; m0->m_flags |= (M_EXT | M_RDONLY); if (nios) m0->m_flags |= M_NOTREADY; Index: sys/kern/uipc_mbuf.c =================================================================== --- sys/kern/uipc_mbuf.c +++ sys/kern/uipc_mbuf.c @@ -138,29 +138,31 @@ * and bump the refcount of the cluster. */ void -mb_dupcl(struct mbuf *n, const struct mbuf *m) +mb_dupcl(struct mbuf *n, struct mbuf *m) { + volatile u_int *refcnt; KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n)); - switch (m->m_ext.ext_type) { - case EXT_SFBUF: - case EXT_SFBUF_NOCACHE: - sf_ext_ref(m->m_ext.ext_arg1, m->m_ext.ext_arg2); - break; - default: + n->m_ext = m->m_ext; + n->m_flags |= M_EXT; + n->m_flags |= m->m_flags & M_RDONLY; + + /* See if this is the mbuf that holds the embedded refcount. */ + if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { + refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count; + n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF; + } else { KASSERT(m->m_ext.ext_cnt != NULL, ("%s: no refcounting pointer on %p", __func__, m)); - if (*(m->m_ext.ext_cnt) == 1) - *(m->m_ext.ext_cnt) += 1; - else - atomic_add_int(m->m_ext.ext_cnt, 1); + refcnt = m->m_ext.ext_cnt; } - n->m_ext = m->m_ext; - n->m_flags |= M_EXT; - n->m_flags |= m->m_flags & M_RDONLY; + if (*refcnt == 1) + *refcnt += 1; + else + atomic_add_int(refcnt, 1); } void @@ -394,7 +396,7 @@ * only their reference counts are incremented. */ struct mbuf * -m_copym(const struct mbuf *m, int off0, int len, int wait) +m_copym(struct mbuf *m, int off0, int len, int wait) { struct mbuf *n, **np; int off = off0; Index: sys/netinet6/ip6_output.c =================================================================== --- sys/netinet6/ip6_output.c +++ sys/netinet6/ip6_output.c @@ -2972,7 +2972,7 @@ * pointer that might NOT be &loif -- easier than replicating that code here. */ void -ip6_mloopback(struct ifnet *ifp, const struct mbuf *m) +ip6_mloopback(struct ifnet *ifp, struct mbuf *m) { struct mbuf *copym; struct ip6_hdr *ip6; Index: sys/netinet6/ip6_var.h =================================================================== --- sys/netinet6/ip6_var.h +++ sys/netinet6/ip6_var.h @@ -382,7 +382,7 @@ void ip6_forward(struct mbuf *, int); -void ip6_mloopback(struct ifnet *, const struct mbuf *); +void ip6_mloopback(struct ifnet *, struct mbuf *); int ip6_output(struct mbuf *, struct ip6_pktopts *, struct route_in6 *, int, Index: sys/sys/mbuf.h =================================================================== --- sys/sys/mbuf.h +++ sys/sys/mbuf.h @@ -160,7 +160,10 @@ * they are correct. */ struct m_ext { - volatile u_int *ext_cnt; /* pointer to ref count info */ + union { + volatile u_int ext_count; /* value of ref count info */ + volatile u_int *ext_cnt; /* pointer to ref count info */ + }; caddr_t ext_buf; /* start of buffer */ uint32_t ext_size; /* size of buffer, for ext_free */ uint32_t ext_type:8, /* type of external storage */ @@ -370,7 +373,7 @@ * Flags for external mbuf buffer types. * NB: limited to the lower 24 bits. */ -#define EXT_FLAG_EMBREF 0x000001 /* embedded ext_cnt, notyet */ +#define EXT_FLAG_EMBREF 0x000001 /* embedded ext_count */ #define EXT_FLAG_EXTREF 0x000002 /* external ext_cnt, notyet */ #define EXT_FLAG_NOFREE 0x000010 /* don't free mbuf to pool, notyet */ @@ -396,7 +399,6 @@ /* * External reference/free functions. */ -void sf_ext_ref(void *, void *); void sf_ext_free(void *, void *); void sf_ext_free_nocache(void *, void *); @@ -524,9 +526,8 @@ extern uma_zone_t zone_jumbop; extern uma_zone_t zone_jumbo9; extern uma_zone_t zone_jumbo16; -extern uma_zone_t zone_ext_refcnt; -void mb_dupcl(struct mbuf *, const struct mbuf *); +void mb_dupcl(struct mbuf *, struct mbuf *); void mb_free_ext(struct mbuf *); void m_adj(struct mbuf *, int); int m_apply(struct mbuf *, int, int, @@ -539,7 +540,7 @@ struct mbuf *m_collapse(struct mbuf *, int, int); void m_copyback(struct mbuf *, int, int, c_caddr_t); void m_copydata(const struct mbuf *, int, int, caddr_t); -struct mbuf *m_copym(const struct mbuf *, int, int, int); +struct mbuf *m_copym(struct mbuf *, int, int, int); struct mbuf *m_copypacket(struct mbuf *, int); void m_copy_pkthdr(struct mbuf *, struct mbuf *); struct mbuf *m_copyup(struct mbuf *, int, int); @@ -550,9 +551,9 @@ void (*)(char *, caddr_t, u_int)); struct mbuf *m_dup(const struct mbuf *, int); int m_dup_pkthdr(struct mbuf *, const struct mbuf *, int); -int m_extadd(struct mbuf *, caddr_t, u_int, +void m_extadd(struct mbuf *, caddr_t, u_int, void (*)(struct mbuf *, void *, void *), void *, void *, - int, int, int); + int, int); u_int m_fixhdr(struct mbuf *); struct mbuf *m_fragment(struct mbuf *, int, int); void m_freem(struct mbuf *); @@ -709,30 +710,30 @@ return (uma_zalloc_arg(zone_pack, &args, how)); } +/* + * XXX: m_cljset() is a dangerous API. One must attach only a new, + * unreferenced cluster to an mbuf(9). It is not possible to assert + * that, so care can be taken only by users of the API. + */ static __inline void m_cljset(struct mbuf *m, void *cl, int type) { - uma_zone_t zone; int size; switch (type) { case EXT_CLUSTER: size = MCLBYTES; - zone = zone_clust; break; #if MJUMPAGESIZE != MCLBYTES case EXT_JUMBOP: size = MJUMPAGESIZE; - zone = zone_jumbop; break; #endif case EXT_JUMBO9: size = MJUM9BYTES; - zone = zone_jumbo9; break; case EXT_JUMBO16: size = MJUM16BYTES; - zone = zone_jumbo16; break; default: panic("%s: unknown cluster type %d", __func__, type); @@ -743,10 +744,9 @@ m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL; m->m_ext.ext_size = size; m->m_ext.ext_type = type; - m->m_ext.ext_flags = 0; - m->m_ext.ext_cnt = uma_find_refcnt(zone, cl); + m->m_ext.ext_flags = EXT_FLAG_EMBREF; + m->m_ext.ext_count = 1; m->m_flags |= M_EXT; - } static __inline void @@ -775,6 +775,16 @@ return (m); } +static inline u_int +m_extrefcnt(struct mbuf *m) +{ + + KASSERT(m->m_flags & M_EXT, ("%s: M_EXT missing", __func__)); + + return ((m->m_ext.ext_flags & EXT_FLAG_EMBREF) ? m->m_ext.ext_count : + *m->m_ext.ext_cnt); +} + /* * mbuf, cluster, and external object allocation macros (for compatibility * purposes). @@ -784,8 +794,8 @@ #define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type))) #define MCLGET(m, how) m_clget((m), (how)) #define MEXTADD(m, buf, size, free, arg1, arg2, flags, type) \ - (void )m_extadd((m), (caddr_t)(buf), (size), (free), (arg1), (arg2),\ - (flags), (type), M_NOWAIT) + m_extadd((m), (caddr_t)(buf), (size), (free), (arg1), (arg2), \ + (flags), (type)) #define m_getm(m, len, how, type) \ m_getm2((m), (len), (how), (type), M_PKTHDR) @@ -796,7 +806,7 @@ */ #define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && \ (!(((m)->m_flags & M_EXT)) || \ - (*((m)->m_ext.ext_cnt) == 1)) ) \ + (m_extrefcnt(m) == 1))) /* Check if the supplied mbuf has a packet header, or else panic. */ #define M_ASSERTPKTHDR(m) \ Index: sys/vm/uma.h =================================================================== --- sys/vm/uma.h +++ sys/vm/uma.h @@ -262,7 +262,7 @@ * information in the vm_page. */ #define UMA_ZONE_SECONDARY 0x0200 /* Zone is a Secondary Zone */ -#define UMA_ZONE_REFCNT 0x0400 /* Allocate refcnts in slabs */ +/* 0x0400 Unused */ #define UMA_ZONE_MAXBUCKET 0x0800 /* Use largest buckets */ #define UMA_ZONE_CACHESPREAD 0x1000 /* * Spread memory start locations across @@ -287,7 +287,7 @@ */ #define UMA_ZONE_INHERIT \ (UMA_ZONE_OFFPAGE | UMA_ZONE_MALLOC | UMA_ZONE_NOFREE | \ - UMA_ZONE_HASH | UMA_ZONE_REFCNT | UMA_ZONE_VTOSLAB | UMA_ZONE_PCPU) + UMA_ZONE_HASH | UMA_ZONE_VTOSLAB | UMA_ZONE_PCPU) /* Definitions for align */ #define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */ @@ -623,21 +623,6 @@ void uma_prealloc(uma_zone_t zone, int itemcnt); /* - * Used to lookup the reference counter allocated for an item - * from a UMA_ZONE_REFCNT zone. For UMA_ZONE_REFCNT zones, - * reference counters are allocated for items and stored in - * the underlying slab header. - * - * Arguments: - * zone The UMA_ZONE_REFCNT zone to which the item belongs. - * item The address of the item for which we want a refcnt. - * - * Returns: - * A pointer to a uint32_t reference counter. - */ -uint32_t *uma_find_refcnt(uma_zone_t zone, void *item); - -/* * Used to determine if a fixed-size zone is exhausted. * * Arguments: Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -112,7 +112,6 @@ /* This is the zone from which all of uma_slab_t's are allocated. */ static uma_zone_t slabzone; -static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */ /* * The initial hash tables come out of this zone so they can be allocated @@ -155,12 +154,6 @@ #define UMA_STARTUP2 2 /* - * Only mbuf clusters use ref zones. Just provide enough references - * to support the one user. New code should not use the ref facility. - */ -static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES; - -/* * This is the handle used to schedule events that need to happen * outside of the allocation fast path. */ @@ -951,7 +944,6 @@ static uma_slab_t keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) { - uma_slabrefcnt_t slabref; uma_alloc allocf; uma_slab_t slab; uint8_t *mem; @@ -1014,11 +1006,6 @@ #ifdef INVARIANTS BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); #endif - if (keg->uk_flags & UMA_ZONE_REFCNT) { - slabref = (uma_slabrefcnt_t)slab; - for (i = 0; i < keg->uk_ipers; i++) - slabref->us_refcnt[i] = 0; - } if (keg->uk_init != NULL) { for (i = 0; i < keg->uk_ipers; i++) @@ -1266,9 +1253,6 @@ keg->uk_rsize < sizeof(struct pcpu), ("%s: size %u too large", __func__, keg->uk_rsize)); - if (keg->uk_flags & UMA_ZONE_REFCNT) - rsize += sizeof(uint32_t); - if (keg->uk_flags & UMA_ZONE_OFFPAGE) shsize = 0; else @@ -1356,8 +1340,6 @@ /* Check whether we have enough space to not do OFFPAGE. */ if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { shsize = sizeof(struct uma_slab); - if (keg->uk_flags & UMA_ZONE_REFCNT) - shsize += keg->uk_ipers * sizeof(uint32_t); if (shsize & UMA_ALIGN_PTR) shsize = (shsize & ~UMA_ALIGN_PTR) + (UMA_ALIGN_PTR + 1); @@ -1446,7 +1428,7 @@ if (arg->flags & UMA_ZONE_ZINIT) keg->uk_init = zero_init; - if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC) + if (arg->flags & UMA_ZONE_MALLOC) keg->uk_flags |= UMA_ZONE_VTOSLAB; if (arg->flags & UMA_ZONE_PCPU) @@ -1458,13 +1440,6 @@ if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { keg_cachespread_init(keg); - } else if (keg->uk_flags & UMA_ZONE_REFCNT) { - if (keg->uk_size > - (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - - sizeof(uint32_t))) - keg_large_init(keg); - else - keg_small_init(keg); } else { if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) keg_large_init(keg); @@ -1472,15 +1447,8 @@ keg_small_init(keg); } - if (keg->uk_flags & UMA_ZONE_OFFPAGE) { - if (keg->uk_flags & UMA_ZONE_REFCNT) { - if (keg->uk_ipers > uma_max_ipers_ref) - panic("Too many ref items per zone: %d > %d\n", - keg->uk_ipers, uma_max_ipers_ref); - keg->uk_slabzone = slabrefzone; - } else - keg->uk_slabzone = slabzone; - } + if (keg->uk_flags & UMA_ZONE_OFFPAGE) + keg->uk_slabzone = slabzone; /* * If we haven't booted yet we need allocations to go through the @@ -1517,10 +1485,6 @@ /* Size of the slab struct and free list */ totsize = sizeof(struct uma_slab); - /* Size of the reference counts. */ - if (keg->uk_flags & UMA_ZONE_REFCNT) - totsize += keg->uk_ipers * sizeof(uint32_t); - if (totsize & UMA_ALIGN_PTR) totsize = (totsize & ~UMA_ALIGN_PTR) + (UMA_ALIGN_PTR + 1); @@ -1534,8 +1498,6 @@ * sure here anyway. */ totsize = keg->uk_pgoff + sizeof(struct uma_slab); - if (keg->uk_flags & UMA_ZONE_REFCNT) - totsize += keg->uk_ipers * sizeof(uint32_t); if (totsize > PAGE_SIZE * keg->uk_ppera) { printf("zone %s ipers %d rsize %d size %d\n", zone->uz_name, keg->uk_ipers, keg->uk_rsize, @@ -1797,7 +1759,6 @@ { struct uma_zctor_args args; uma_slab_t slab; - u_int slabsize; int i; #ifdef UMA_DEBUG @@ -1856,18 +1817,6 @@ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); - /* - * We also create a zone for the bigger slabs with reference - * counts in them, to accomodate UMA_ZONE_REFCNT zones. - */ - slabsize = sizeof(struct uma_slab_refcnt); - slabsize += uma_max_ipers_ref * sizeof(uint32_t); - slabrefzone = uma_zcreate("UMA RCntSlabs", - slabsize, - NULL, NULL, NULL, NULL, - UMA_ALIGN_PTR, - UMA_ZFLAG_INTERNAL); - hashzone = uma_zcreate("UMA Hash", sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, NULL, NULL, NULL, NULL, @@ -2090,14 +2039,7 @@ error = EINVAL; goto out; } - /* - * Both must either be refcnt, or not be refcnt. - */ - if ((zone->uz_flags & UMA_ZONE_REFCNT) != - (master->uz_flags & UMA_ZONE_REFCNT)) { - error = EINVAL; - goto out; - } + /* * The underlying object must be the same size. rsize * may be different. @@ -3220,26 +3162,6 @@ } /* See uma.h */ -uint32_t * -uma_find_refcnt(uma_zone_t zone, void *item) -{ - uma_slabrefcnt_t slabref; - uma_slab_t slab; - uma_keg_t keg; - uint32_t *refcnt; - int idx; - - slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); - slabref = (uma_slabrefcnt_t)slab; - keg = slab->us_keg; - KASSERT(keg->uk_flags & UMA_ZONE_REFCNT, - ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); - idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; - refcnt = &slabref->us_refcnt[idx]; - return refcnt; -} - -/* See uma.h */ static void uma_reclaim_locked(bool kmem_danger) { @@ -3260,7 +3182,6 @@ * zones are drained. We have to do the same for buckets. */ zone_drain(slabzone); - zone_drain(slabrefzone); bucket_zone_drain(); } Index: sys/vm/uma_int.h =================================================================== --- sys/vm/uma_int.h +++ sys/vm/uma_int.h @@ -250,17 +250,7 @@ #define us_link us_type._us_link #define us_size us_type._us_size -/* - * The slab structure for UMA_ZONE_REFCNT zones for whose items we - * maintain reference counters in the slab for. - */ -struct uma_slab_refcnt { - struct uma_slab us_head; /* slab header data */ - uint32_t us_refcnt[0]; /* Actually larger. */ -}; - typedef struct uma_slab * uma_slab_t; -typedef struct uma_slab_refcnt * uma_slabrefcnt_t; typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int); struct uma_klink {