Index: sys/dev/cxgb/cxgb_adapter.h =================================================================== --- sys/dev/cxgb/cxgb_adapter.h +++ sys/dev/cxgb/cxgb_adapter.h @@ -42,6 +42,7 @@ #include #include #include +#include #include #include Index: sys/dev/cxgb/cxgb_sge.c =================================================================== --- sys/dev/cxgb/cxgb_sge.c +++ sys/dev/cxgb/cxgb_sge.c @@ -77,6 +77,12 @@ #include #include +/* + * Internal mbuf(9) knowledge. + */ +uma_zone_t m_getzone(int); +void m_cljset(struct mbuf *m, void *cl, int type); + int txq_fills = 0; int multiq_tx_enable = 1; @@ -713,7 +719,7 @@ * We allocate an uninitialized mbuf + cluster, mbuf is * initialized after rx. */ - if (q->zone == zone_pack) { + if (q->type == EXT_CLUSTER) { if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL) break; cl = m->m_ext.ext_buf; @@ -738,7 +744,7 @@ cl, q->buf_size, refill_fl_cb, &cb_arg, 0); if (err != 0 || cb_arg.error) { - if (q->zone != zone_pack) + if (q->type != EXT_CLUSTER) uma_zfree(q->zone, cl); m_free(m); goto done; @@ -794,14 +800,12 @@ if (d->flags & RX_SW_DESC_INUSE) { bus_dmamap_unload(q->entry_tag, d->map); bus_dmamap_destroy(q->entry_tag, d->map); - if (q->zone == zone_pack) { - m_init(d->m, zone_pack, MCLBYTES, - M_NOWAIT, MT_DATA, M_EXT); - uma_zfree(zone_pack, d->m); + if (q->type == EXT_CLUSTER) { + m_init(d->m, M_NOWAIT, MT_DATA, M_EXT); + uma_zfree(q->zone, d->m); } else { - m_init(d->m, zone_mbuf, MLEN, - M_NOWAIT, MT_DATA, 0); - uma_zfree(zone_mbuf, d->m); + m_init(d->m, M_NOWAIT, MT_DATA, 0); + m_free(d->m); uma_zfree(q->zone, d->rxsd_cl); } } @@ -2510,24 +2514,12 @@ flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3); q->fl[0].buf_size = MCLBYTES; - q->fl[0].zone = zone_pack; - q->fl[0].type = EXT_PACKET; + q->fl[0].zone = m_getzone(q->fl[0].buf_size); + q->fl[0].type = m_gettype(q->fl[0].buf_size); - if (p->jumbo_buf_size == MJUM16BYTES) { - q->fl[1].zone = zone_jumbo16; - q->fl[1].type = EXT_JUMBO16; - } else if (p->jumbo_buf_size == MJUM9BYTES) { - q->fl[1].zone = zone_jumbo9; - q->fl[1].type = EXT_JUMBO9; - } else if (p->jumbo_buf_size == MJUMPAGESIZE) { - q->fl[1].zone = zone_jumbop; - q->fl[1].type = EXT_JUMBOP; - } else { - KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size)); - ret = EDOOFUS; - goto err; - } q->fl[1].buf_size = p->jumbo_buf_size; + q->fl[1].zone = m_getzone(q->fl[1].buf_size); + q->fl[1].type = m_gettype(q->fl[1].buf_size); /* Allocate and setup the lro_ctrl structure */ q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO); @@ -2725,8 +2717,8 @@ if ((sopeop == RSPQ_SOP_EOP) || (sopeop == RSPQ_SOP)) flags |= M_PKTHDR; - m_init(m, fl->zone, fl->buf_size, M_NOWAIT, MT_DATA, flags); - if (fl->zone == zone_pack) { + m_init(m, M_NOWAIT, MT_DATA, flags); + if (fl->type == EXT_CLUSTER) { /* * restore clobbered data pointer */ Index: sys/dev/cxgbe/t4_netmap.c =================================================================== --- sys/dev/cxgbe/t4_netmap.c +++ sys/dev/cxgbe/t4_netmap.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include Index: sys/dev/cxgbe/t4_sge.c =================================================================== --- sys/dev/cxgbe/t4_sge.c +++ sys/dev/cxgbe/t4_sge.c @@ -70,6 +70,12 @@ #include "common/t4_msg.h" #include "t4_mp_ring.h" +/* + * Internal mbuf(9) knowledge. + */ +uma_zone_t m_getzone(int); +void m_cljset(struct mbuf *m, void *cl, int type); + #ifdef T4_PKT_TIMESTAMP #define RX_COPY_THRESHOLD (MINCLSIZE - 8) #else @@ -1570,7 +1576,7 @@ MPASS(clm != NULL); m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE); /* No bzero required */ - if (m_init(m, NULL, 0, M_NOWAIT, MT_DATA, + if (m_init(m, M_NOWAIT, MT_DATA, fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE)) return (NULL); fl->mbuf_inlined++; Index: sys/dev/netmap/netmap_generic.c =================================================================== --- sys/dev/netmap/netmap_generic.c +++ sys/dev/netmap/netmap_generic.c @@ -122,7 +122,7 @@ m->m_ext.ext_free = NULL; if (GET_MBUF_REFCNT(m) == 0) SET_MBUF_REFCNT(m, 1); - uma_zfree(zone_pack, m); + m_free(m); } static inline struct mbuf * Index: sys/kern/kern_mbuf.c =================================================================== --- sys/kern/kern_mbuf.c +++ sys/kern/kern_mbuf.c @@ -44,8 +44,6 @@ #include #include -#include - #include #include #include @@ -275,7 +273,10 @@ /* * Local prototypes. + * XXX: two are exported for sake of cxgbe(4) */ +uma_zone_t m_getzone(int); +void m_cljset(struct mbuf *, void *, int); static int mb_ctor_mbuf(void *, int, void *, int); static int mb_ctor_clust(void *, int, void *, int); static int mb_ctor_pack(void *, int, void *, int); @@ -284,7 +285,6 @@ static void mb_dtor_pack(void *, int, void *); static int mb_zinit_pack(void *, int, int); static void mb_zfini_pack(void *, int); - static void mb_reclaim(uma_zone_t, int); static void *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, uint8_t *, int); @@ -435,7 +435,7 @@ m = (struct mbuf *)mem; flags = args->flags; - error = m_init(m, NULL, size, how, type, flags); + error = m_init(m, how, type, flags); return (error); } @@ -635,7 +635,7 @@ trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); #endif - error = m_init(m, NULL, size, how, type, flags); + error = m_init(m, how, type, flags); /* m_ext is already initialized. */ m->m_data = m->m_ext.ext_buf; @@ -644,24 +644,6 @@ return (error); } -int -m_pkthdr_init(struct mbuf *m, int how) -{ -#ifdef MAC - int error; -#endif - m->m_data = m->m_pktdat; - bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); -#ifdef MAC - /* If the label init fails, fail the alloc */ - error = mac_mbuf_init(m, how); - if (error) - return (error); -#endif - - return (0); -} - /* * This is the protocol drain routine. Called by UMA whenever any of the * mbuf zones is closed to its limit. @@ -683,3 +665,407 @@ if (pr->pr_drain != NULL) (*pr->pr_drain)(); } + +/* + * Select zone by size. + */ +uma_zone_t +m_getzone(int size) +{ + uma_zone_t zone; + + switch (size) { + case MCLBYTES: + zone = zone_clust; + break; +#if MJUMPAGESIZE != MCLBYTES + case MJUMPAGESIZE: + zone = zone_jumbop; + break; +#endif + case MJUM9BYTES: + zone = zone_jumbo9; + break; + case MJUM16BYTES: + zone = zone_jumbo16; + break; + default: + panic("%s: invalid cluster size %d", __func__, size); + } + + return (zone); +} + +/* + * Clean up after mbufs with M_EXT storage attached to them if the + * reference count hits 1. + */ +static void +mb_free_ext(struct mbuf *m) +{ + int freembuf; + + KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); + + /* + * Check if the header is embedded in the cluster. + */ + freembuf = (m->m_flags & M_NOFREE) ? 0 : 1; + + switch (m->m_ext.ext_type) { + case EXT_SFBUF: + sf_ext_free(m->m_ext.ext_arg1, m->m_ext.ext_arg2); + break; + case EXT_SFBUF_NOCACHE: + sf_ext_free_nocache(m->m_ext.ext_arg1, m->m_ext.ext_arg2); + break; + default: + KASSERT(m->m_ext.ext_cnt != NULL, + ("%s: no refcounting pointer on %p", __func__, m)); + /* + * Free attached storage if this mbuf is the only + * reference to it. + */ + if (*(m->m_ext.ext_cnt) != 1) { + if (atomic_fetchadd_int(m->m_ext.ext_cnt, -1) != 1) + break; + } + + switch (m->m_ext.ext_type) { + case EXT_PACKET: /* The packet zone is special. */ + if (*(m->m_ext.ext_cnt) == 0) + *(m->m_ext.ext_cnt) = 1; + uma_zfree(zone_pack, m); + return; /* Job done. */ + case EXT_CLUSTER: + uma_zfree(zone_clust, m->m_ext.ext_buf); + break; + case EXT_JUMBOP: + uma_zfree(zone_jumbop, m->m_ext.ext_buf); + break; + case EXT_JUMBO9: + uma_zfree(zone_jumbo9, m->m_ext.ext_buf); + break; + case EXT_JUMBO16: + uma_zfree(zone_jumbo16, m->m_ext.ext_buf); + break; + case EXT_NET_DRV: + case EXT_MOD_TYPE: + case EXT_DISPOSABLE: + *(m->m_ext.ext_cnt) = 0; + uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *, + m->m_ext.ext_cnt)); + /* FALLTHROUGH */ + case EXT_EXTREF: + KASSERT(m->m_ext.ext_free != NULL, + ("%s: ext_free not set", __func__)); + (*(m->m_ext.ext_free))(m, m->m_ext.ext_arg1, + m->m_ext.ext_arg2); + break; + default: + KASSERT(m->m_ext.ext_type == 0, + ("%s: unknown ext_type", __func__)); + } + } + + if (freembuf) + uma_zfree(zone_mbuf, m); +} + +/* + * Official mbuf(9) allocation KPI for stack and drivers: + * + * m_get() - a single mbuf without any attachments, sys/mbuf.h. + * m_gethdr() - a single mbuf initialized as M_PKTHDR, sys/mbuf.h. + * m_getcl() - an mbuf + 2k cluster, sys/mbuf.h. + * m_clget() - attach cluster to already allocated mbuf. + * m_cljget() - attach jumbo cluster to already allocated mbuf. + * m_get2() - allocate minimum mbuf that would fit size argument. + * m_getm2() - allocate a chain of mbufs/clusters. + * m_extadd() - attach external cluster to mbuf. + * + * m_free() - free single mbuf with its tags and ext, returns next mbuf. + * m_freem() - free chain of mbufs. + */ + +int +m_clget(struct mbuf *m, int how) +{ + + KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", + __func__, m)); + m->m_ext.ext_buf = (char *)NULL; + uma_zalloc_arg(zone_clust, m, how); + /* + * On a cluster allocation failure, drain the packet zone and retry, + * we might be able to loosen a few clusters up on the drain. + */ + if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { + zone_drain(zone_pack); + uma_zalloc_arg(zone_clust, m, how); + } + return (m->m_flags & M_EXT); +} + +/* + * m_cljget() is different from m_clget() as it can allocate clusters without + * attaching them to an mbuf. In that case the return value is the pointer + * to the cluster of the requested size. If an mbuf was specified, it gets + * the cluster attached to it and the return value can be safely ignored. + * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. + */ +void * +m_cljget(struct mbuf *m, int how, int size) +{ + uma_zone_t zone; + + if (m != NULL) { + KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", + __func__, m)); + m->m_ext.ext_buf = NULL; + } + + zone = m_getzone(size); + return (uma_zalloc_arg(zone, m, how)); +} + +void +m_cljset(struct mbuf *m, void *cl, int type) +{ + uma_zone_t zone; + int size; + + switch (type) { + case EXT_CLUSTER: + size = MCLBYTES; + zone = zone_clust; + break; +#if MJUMPAGESIZE != MCLBYTES + case EXT_JUMBOP: + size = MJUMPAGESIZE; + zone = zone_jumbop; + break; +#endif + case EXT_JUMBO9: + size = MJUM9BYTES; + zone = zone_jumbo9; + break; + case EXT_JUMBO16: + size = MJUM16BYTES; + zone = zone_jumbo16; + break; + default: + panic("%s: unknown cluster type %d", __func__, type); + break; + } + + m->m_data = m->m_ext.ext_buf = cl; + m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL; + m->m_ext.ext_size = size; + m->m_ext.ext_type = type; + m->m_ext.ext_flags = 0; + m->m_ext.ext_cnt = uma_find_refcnt(zone, cl); + m->m_flags |= M_EXT; + +} + +/* + * m_get2() allocates minimum mbuf that would fit "size" argument. + */ +struct mbuf * +m_get2(int size, int how, short type, int flags) +{ + struct mb_args args; + struct mbuf *m, *n; + + args.flags = flags; + args.type = type; + + if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0)) + return (uma_zalloc_arg(zone_mbuf, &args, how)); + if (size <= MCLBYTES) + return (uma_zalloc_arg(zone_pack, &args, how)); + + if (size > MJUMPAGESIZE) + return (NULL); + + m = uma_zalloc_arg(zone_mbuf, &args, how); + if (m == NULL) + return (NULL); + + n = uma_zalloc_arg(zone_jumbop, m, how); + if (n == NULL) { + uma_zfree(zone_mbuf, m); + return (NULL); + } + + return (m); +} + +/* + * m_getjcl() returns an mbuf with a cluster of the specified size attached. + * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. + */ +struct mbuf * +m_getjcl(int how, short type, int flags, int size) +{ + struct mb_args args; + struct mbuf *m, *n; + uma_zone_t zone; + + if (size == MCLBYTES) + return m_getcl(how, type, flags); + + args.flags = flags; + args.type = type; + + m = uma_zalloc_arg(zone_mbuf, &args, how); + if (m == NULL) + return (NULL); + + zone = m_getzone(size); + n = uma_zalloc_arg(zone, m, how); + if (n == NULL) { + uma_zfree(zone_mbuf, m); + return (NULL); + } + return (m); +} + +/* + * Allocate a given length worth of mbufs and/or clusters (whatever fits + * best) and return a pointer to the top of the allocated chain. If an + * existing mbuf chain is provided, then we will append the new chain + * to the existing one but still return the top of the newly allocated + * chain. + */ +struct mbuf * +m_getm2(struct mbuf *m, int len, int how, short type, int flags) +{ + struct mbuf *mb, *nm = NULL, *mtail = NULL; + + KASSERT(len >= 0, ("%s: len is < 0", __func__)); + + /* Validate flags. */ + flags &= (M_PKTHDR | M_EOR); + + /* Packet header mbuf must be first in chain. */ + if ((flags & M_PKTHDR) && m != NULL) + flags &= ~M_PKTHDR; + + /* Loop and append maximum sized mbufs to the chain tail. */ + while (len > 0) { + if (len > MCLBYTES) + mb = m_getjcl(how, type, (flags & M_PKTHDR), + MJUMPAGESIZE); + else if (len >= MINCLSIZE) + mb = m_getcl(how, type, (flags & M_PKTHDR)); + else if (flags & M_PKTHDR) + mb = m_gethdr(how, type); + else + mb = m_get(how, type); + + /* Fail the whole operation if one mbuf can't be allocated. */ + if (mb == NULL) { + if (nm != NULL) + m_freem(nm); + return (NULL); + } + + /* Book keeping. */ + len -= M_SIZE(mb); + if (mtail != NULL) + mtail->m_next = mb; + else + nm = mb; + mtail = mb; + flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */ + } + if (flags & M_EOR) + mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */ + + /* If mbuf was supplied, append new chain to the end of it. */ + if (m != NULL) { + for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next) + ; + mtail->m_next = nm; + mtail->m_flags &= ~M_EOR; + } else + m = nm; + + return (m); +} + +/*- + * Configure a provided mbuf to refer to the provided external storage + * buffer and setup a reference count for said buffer. If the setting + * up of the reference count fails, the M_EXT bit will not be set. If + * successfull, the M_EXT bit is set in the mbuf's flags. + * + * Arguments: + * mb The existing mbuf to which to attach the provided buffer. + * buf The address of the provided external storage buffer. + * size The size of the provided buffer. + * freef A pointer to a routine that is responsible for freeing the + * provided external storage buffer. + * args A pointer to an argument structure (of any type) to be passed + * to the provided freef routine (may be NULL). + * flags Any other flags to be passed to the provided mbuf. + * type The type that the external storage buffer should be + * labeled with. + * + * Returns: + * Nothing. + */ +int +m_extadd(struct mbuf *mb, caddr_t buf, u_int size, + void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2, + int flags, int type, int wait) +{ + KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__)); + + if (type != EXT_EXTREF) + mb->m_ext.ext_cnt = uma_zalloc(zone_ext_refcnt, wait); + + if (mb->m_ext.ext_cnt == NULL) + return (ENOMEM); + + *(mb->m_ext.ext_cnt) = 1; + mb->m_flags |= (M_EXT | flags); + mb->m_ext.ext_buf = buf; + mb->m_data = mb->m_ext.ext_buf; + mb->m_ext.ext_size = size; + mb->m_ext.ext_free = freef; + mb->m_ext.ext_arg1 = arg1; + mb->m_ext.ext_arg2 = arg2; + mb->m_ext.ext_type = type; + mb->m_ext.ext_flags = 0; + + return (0); +} + +struct mbuf * +m_free(struct mbuf *m) +{ + struct mbuf *n = m->m_next; + + if ((m->m_flags & (M_PKTHDR|M_NOFREE)) == (M_PKTHDR|M_NOFREE)) + m_tag_delete_chain(m, NULL); + if (m->m_flags & M_EXT) + mb_free_ext(m); + else if ((m->m_flags & M_NOFREE) == 0) + uma_zfree(zone_mbuf, m); + return (n); +} + +/* + * Free an entire chain of mbufs and associated external buffers, if + * applicable. + */ +void +m_freem(struct mbuf *mb) +{ + + while (mb != NULL) + mb = m_free(mb); +} Index: sys/kern/uipc_mbuf.c =================================================================== --- sys/kern/uipc_mbuf.c +++ sys/kern/uipc_mbuf.c @@ -48,6 +48,8 @@ #include #include +#include + int max_linkhdr; int max_protohdr; int max_hdr; @@ -132,269 +134,6 @@ #endif /* - * m_get2() allocates minimum mbuf that would fit "size" argument. - */ -struct mbuf * -m_get2(int size, int how, short type, int flags) -{ - struct mb_args args; - struct mbuf *m, *n; - - args.flags = flags; - args.type = type; - - if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0)) - return (uma_zalloc_arg(zone_mbuf, &args, how)); - if (size <= MCLBYTES) - return (uma_zalloc_arg(zone_pack, &args, how)); - - if (size > MJUMPAGESIZE) - return (NULL); - - m = uma_zalloc_arg(zone_mbuf, &args, how); - if (m == NULL) - return (NULL); - - n = uma_zalloc_arg(zone_jumbop, m, how); - if (n == NULL) { - uma_zfree(zone_mbuf, m); - return (NULL); - } - - return (m); -} - -/* - * m_getjcl() returns an mbuf with a cluster of the specified size attached. - * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. - */ -struct mbuf * -m_getjcl(int how, short type, int flags, int size) -{ - struct mb_args args; - struct mbuf *m, *n; - uma_zone_t zone; - - if (size == MCLBYTES) - return m_getcl(how, type, flags); - - args.flags = flags; - args.type = type; - - m = uma_zalloc_arg(zone_mbuf, &args, how); - if (m == NULL) - return (NULL); - - zone = m_getzone(size); - n = uma_zalloc_arg(zone, m, how); - if (n == NULL) { - uma_zfree(zone_mbuf, m); - return (NULL); - } - return (m); -} - -/* - * Allocate a given length worth of mbufs and/or clusters (whatever fits - * best) and return a pointer to the top of the allocated chain. If an - * existing mbuf chain is provided, then we will append the new chain - * to the existing one but still return the top of the newly allocated - * chain. - */ -struct mbuf * -m_getm2(struct mbuf *m, int len, int how, short type, int flags) -{ - struct mbuf *mb, *nm = NULL, *mtail = NULL; - - KASSERT(len >= 0, ("%s: len is < 0", __func__)); - - /* Validate flags. */ - flags &= (M_PKTHDR | M_EOR); - - /* Packet header mbuf must be first in chain. */ - if ((flags & M_PKTHDR) && m != NULL) - flags &= ~M_PKTHDR; - - /* Loop and append maximum sized mbufs to the chain tail. */ - while (len > 0) { - if (len > MCLBYTES) - mb = m_getjcl(how, type, (flags & M_PKTHDR), - MJUMPAGESIZE); - else if (len >= MINCLSIZE) - mb = m_getcl(how, type, (flags & M_PKTHDR)); - else if (flags & M_PKTHDR) - mb = m_gethdr(how, type); - else - mb = m_get(how, type); - - /* Fail the whole operation if one mbuf can't be allocated. */ - if (mb == NULL) { - if (nm != NULL) - m_freem(nm); - return (NULL); - } - - /* Book keeping. */ - len -= M_SIZE(mb); - if (mtail != NULL) - mtail->m_next = mb; - else - nm = mb; - mtail = mb; - flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */ - } - if (flags & M_EOR) - mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */ - - /* If mbuf was supplied, append new chain to the end of it. */ - if (m != NULL) { - for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next) - ; - mtail->m_next = nm; - mtail->m_flags &= ~M_EOR; - } else - m = nm; - - return (m); -} - -/* - * Free an entire chain of mbufs and associated external buffers, if - * applicable. - */ -void -m_freem(struct mbuf *mb) -{ - - while (mb != NULL) - mb = m_free(mb); -} - -/*- - * Configure a provided mbuf to refer to the provided external storage - * buffer and setup a reference count for said buffer. If the setting - * up of the reference count fails, the M_EXT bit will not be set. If - * successfull, the M_EXT bit is set in the mbuf's flags. - * - * Arguments: - * mb The existing mbuf to which to attach the provided buffer. - * buf The address of the provided external storage buffer. - * size The size of the provided buffer. - * freef A pointer to a routine that is responsible for freeing the - * provided external storage buffer. - * args A pointer to an argument structure (of any type) to be passed - * to the provided freef routine (may be NULL). - * flags Any other flags to be passed to the provided mbuf. - * type The type that the external storage buffer should be - * labeled with. - * - * Returns: - * Nothing. - */ -int -m_extadd(struct mbuf *mb, caddr_t buf, u_int size, - void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2, - int flags, int type, int wait) -{ - KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__)); - - if (type != EXT_EXTREF) - mb->m_ext.ext_cnt = uma_zalloc(zone_ext_refcnt, wait); - - if (mb->m_ext.ext_cnt == NULL) - return (ENOMEM); - - *(mb->m_ext.ext_cnt) = 1; - mb->m_flags |= (M_EXT | flags); - mb->m_ext.ext_buf = buf; - mb->m_data = mb->m_ext.ext_buf; - mb->m_ext.ext_size = size; - mb->m_ext.ext_free = freef; - mb->m_ext.ext_arg1 = arg1; - mb->m_ext.ext_arg2 = arg2; - mb->m_ext.ext_type = type; - mb->m_ext.ext_flags = 0; - - return (0); -} - -/* - * Non-directly-exported function to clean up after mbufs with M_EXT - * storage attached to them if the reference count hits 1. - */ -void -mb_free_ext(struct mbuf *m) -{ - int freembuf; - - KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); - - /* - * Check if the header is embedded in the cluster. - */ - freembuf = (m->m_flags & M_NOFREE) ? 0 : 1; - - switch (m->m_ext.ext_type) { - case EXT_SFBUF: - sf_ext_free(m->m_ext.ext_arg1, m->m_ext.ext_arg2); - break; - case EXT_SFBUF_NOCACHE: - sf_ext_free_nocache(m->m_ext.ext_arg1, m->m_ext.ext_arg2); - break; - default: - KASSERT(m->m_ext.ext_cnt != NULL, - ("%s: no refcounting pointer on %p", __func__, m)); - /* - * Free attached storage if this mbuf is the only - * reference to it. - */ - if (*(m->m_ext.ext_cnt) != 1) { - if (atomic_fetchadd_int(m->m_ext.ext_cnt, -1) != 1) - break; - } - - switch (m->m_ext.ext_type) { - case EXT_PACKET: /* The packet zone is special. */ - if (*(m->m_ext.ext_cnt) == 0) - *(m->m_ext.ext_cnt) = 1; - uma_zfree(zone_pack, m); - return; /* Job done. */ - case EXT_CLUSTER: - uma_zfree(zone_clust, m->m_ext.ext_buf); - break; - case EXT_JUMBOP: - uma_zfree(zone_jumbop, m->m_ext.ext_buf); - break; - case EXT_JUMBO9: - uma_zfree(zone_jumbo9, m->m_ext.ext_buf); - break; - case EXT_JUMBO16: - uma_zfree(zone_jumbo16, m->m_ext.ext_buf); - break; - case EXT_NET_DRV: - case EXT_MOD_TYPE: - case EXT_DISPOSABLE: - *(m->m_ext.ext_cnt) = 0; - uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *, - m->m_ext.ext_cnt)); - /* FALLTHROUGH */ - case EXT_EXTREF: - KASSERT(m->m_ext.ext_free != NULL, - ("%s: ext_free not set", __func__)); - (*(m->m_ext.ext_free))(m, m->m_ext.ext_arg1, - m->m_ext.ext_arg2); - break; - default: - KASSERT(m->m_ext.ext_type == 0, - ("%s: unknown ext_type", __func__)); - } - } - - if (freembuf) - uma_zfree(zone_mbuf, m); -} - -/* * Attach the cluster from *m to *n, set up m_ext in *n * and bump the refcount of the cluster. */ @@ -534,6 +273,26 @@ #undef M_SANITY_ACTION } +/* + * Non-inlined part of m_init(). + */ +int +m_pkthdr_init(struct mbuf *m, int how) +{ +#ifdef MAC + int error; +#endif + m->m_data = m->m_pktdat; + bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); +#ifdef MAC + /* If the label init fails, fail the alloc */ + error = mac_mbuf_init(m, how); + if (error) + return (error); +#endif + + return (0); +} /* * "Move" mbuf pkthdr from "from" to "to". Index: sys/netinet/tcp_pcap.c =================================================================== --- sys/netinet/tcp_pcap.c +++ sys/netinet/tcp_pcap.c @@ -341,7 +341,10 @@ n = mhead; tcp_pcap_m_freem(n->m_next); - m_init(n, NULL, 0, M_NOWAIT, MT_DATA, 0); + m->m_next = NULL; + m->m_nextpkt = NULL; + m->m_len = 0; + m->m_flags = 0; } } Index: sys/sys/mbuf.h =================================================================== --- sys/sys/mbuf.h +++ sys/sys/mbuf.h @@ -527,8 +527,51 @@ extern uma_zone_t zone_ext_refcnt; void mb_dupcl(struct mbuf *, const struct mbuf *); -void mb_free_ext(struct mbuf *); +void m_adj(struct mbuf *, int); +int m_apply(struct mbuf *, int, int, + int (*)(void *, void *, u_int), void *); +int m_append(struct mbuf *, int, c_caddr_t); +void m_cat(struct mbuf *, struct mbuf *); +void m_catpkt(struct mbuf *, struct mbuf *); +int m_clget(struct mbuf *m, int how); +void *m_cljget(struct mbuf *m, int how, int size); +struct mbuf *m_collapse(struct mbuf *, int, int); +void m_copyback(struct mbuf *, int, int, c_caddr_t); +void m_copydata(const struct mbuf *, int, int, caddr_t); +struct mbuf *m_copym(const struct mbuf *, int, int, int); +struct mbuf *m_copypacket(struct mbuf *, int); +void m_copy_pkthdr(struct mbuf *, struct mbuf *); +struct mbuf *m_copyup(struct mbuf *, int, int); +struct mbuf *m_defrag(struct mbuf *, int); +void m_demote_pkthdr(struct mbuf *); +void m_demote(struct mbuf *, int, int); +struct mbuf *m_devget(char *, int, int, struct ifnet *, + void (*)(char *, caddr_t, u_int)); +struct mbuf *m_dup(const struct mbuf *, int); +int m_dup_pkthdr(struct mbuf *, const struct mbuf *, int); +int m_extadd(struct mbuf *, caddr_t, u_int, + void (*)(struct mbuf *, void *, void *), void *, void *, + int, int, int); +u_int m_fixhdr(struct mbuf *); +struct mbuf *m_fragment(struct mbuf *, int, int); +struct mbuf *m_free(struct mbuf *); +void m_freem(struct mbuf *); +struct mbuf *m_get2(int, int, short, int); +struct mbuf *m_getjcl(int, short, int, int); +struct mbuf *m_getm2(struct mbuf *, int, int, short, int); +struct mbuf *m_getptr(struct mbuf *, int, int *); +u_int m_length(struct mbuf *, struct mbuf **); +int m_mbuftouio(struct uio *, struct mbuf *, int); +void m_move_pkthdr(struct mbuf *, struct mbuf *); int m_pkthdr_init(struct mbuf *, int); +struct mbuf *m_prepend(struct mbuf *, int, int); +void m_print(const struct mbuf *, int); +struct mbuf *m_pulldown(struct mbuf *, int, int, int *); +struct mbuf *m_pullup(struct mbuf *, int); +int m_sanity(struct mbuf *, int); +struct mbuf *m_split(struct mbuf *, int, int); +struct mbuf *m_uiotombuf(struct uio *, int, int, int, int); +struct mbuf *m_unshare(struct mbuf *, int); static __inline int m_gettype(int size) @@ -561,65 +604,10 @@ } /* - * Associated an external reference counted buffer with an mbuf. - */ -static __inline void -m_extaddref(struct mbuf *m, caddr_t buf, u_int size, u_int *ref_cnt, - void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2) -{ - - KASSERT(ref_cnt != NULL, ("%s: ref_cnt not provided", __func__)); - - atomic_add_int(ref_cnt, 1); - m->m_flags |= M_EXT; - m->m_ext.ext_buf = buf; - m->m_ext.ext_cnt = ref_cnt; - m->m_data = m->m_ext.ext_buf; - m->m_ext.ext_size = size; - m->m_ext.ext_free = freef; - m->m_ext.ext_arg1 = arg1; - m->m_ext.ext_arg2 = arg2; - m->m_ext.ext_type = EXT_EXTREF; - m->m_ext.ext_flags = 0; -} - -static __inline uma_zone_t -m_getzone(int size) -{ - uma_zone_t zone; - - switch (size) { - case MCLBYTES: - zone = zone_clust; - break; -#if MJUMPAGESIZE != MCLBYTES - case MJUMPAGESIZE: - zone = zone_jumbop; - break; -#endif - case MJUM9BYTES: - zone = zone_jumbo9; - break; - case MJUM16BYTES: - zone = zone_jumbo16; - break; - default: - panic("%s: invalid cluster size %d", __func__, size); - } - - return (zone); -} - -/* * Initialize an mbuf with linear storage. - * - * Inline because the consumer text overhead will be roughly the same to - * initialize or call a function with this many parameters and M_PKTHDR - * should go away with constant propagation for !MGETHDR. */ static __inline int -m_init(struct mbuf *m, uma_zone_t zone __unused, int size __unused, int how, - short type, int flags) +m_init(struct mbuf *m, int how, short type, int flags) { int error; @@ -648,20 +636,26 @@ } /* - * XXX This should be deprecated, very little use. + * Associated an external reference counted buffer with an mbuf. */ -static __inline struct mbuf * -m_getclr(int how, short type) +static __inline void +m_extaddref(struct mbuf *m, caddr_t buf, u_int size, u_int *ref_cnt, + void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2) { - struct mbuf *m; - struct mb_args args; - args.flags = 0; - args.type = type; - m = uma_zalloc_arg(zone_mbuf, &args, how); - if (m != NULL) - bzero(m->m_data, MLEN); - return (m); + KASSERT(ref_cnt != NULL, ("%s: ref_cnt not provided", __func__)); + + atomic_add_int(ref_cnt, 1); + m->m_flags |= M_EXT; + m->m_ext.ext_buf = buf; + m->m_ext.ext_cnt = ref_cnt; + m->m_data = m->m_ext.ext_buf; + m->m_ext.ext_size = size; + m->m_ext.ext_free = freef; + m->m_ext.ext_arg1 = arg1; + m->m_ext.ext_arg2 = arg2; + m->m_ext.ext_type = EXT_EXTREF; + m->m_ext.ext_flags = 0; } static __inline struct mbuf * @@ -684,87 +678,6 @@ return (uma_zalloc_arg(zone_pack, &args, how)); } -static __inline int -m_clget(struct mbuf *m, int how) -{ - - KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", - __func__, m)); - m->m_ext.ext_buf = (char *)NULL; - uma_zalloc_arg(zone_clust, m, how); - /* - * On a cluster allocation failure, drain the packet zone and retry, - * we might be able to loosen a few clusters up on the drain. - */ - if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { - zone_drain(zone_pack); - uma_zalloc_arg(zone_clust, m, how); - } - return (m->m_flags & M_EXT); -} - -/* - * m_cljget() is different from m_clget() as it can allocate clusters without - * attaching them to an mbuf. In that case the return value is the pointer - * to the cluster of the requested size. If an mbuf was specified, it gets - * the cluster attached to it and the return value can be safely ignored. - * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. - */ -static __inline void * -m_cljget(struct mbuf *m, int how, int size) -{ - uma_zone_t zone; - - if (m != NULL) { - KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", - __func__, m)); - m->m_ext.ext_buf = NULL; - } - - zone = m_getzone(size); - return (uma_zalloc_arg(zone, m, how)); -} - -static __inline void -m_cljset(struct mbuf *m, void *cl, int type) -{ - uma_zone_t zone; - int size; - - switch (type) { - case EXT_CLUSTER: - size = MCLBYTES; - zone = zone_clust; - break; -#if MJUMPAGESIZE != MCLBYTES - case EXT_JUMBOP: - size = MJUMPAGESIZE; - zone = zone_jumbop; - break; -#endif - case EXT_JUMBO9: - size = MJUM9BYTES; - zone = zone_jumbo9; - break; - case EXT_JUMBO16: - size = MJUM16BYTES; - zone = zone_jumbo16; - break; - default: - panic("%s: unknown cluster type %d", __func__, type); - break; - } - - m->m_data = m->m_ext.ext_buf = cl; - m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL; - m->m_ext.ext_size = size; - m->m_ext.ext_type = type; - m->m_ext.ext_flags = 0; - m->m_ext.ext_cnt = uma_find_refcnt(zone, cl); - m->m_flags |= M_EXT; - -} - static __inline void m_chtype(struct mbuf *m, short new_type) { @@ -942,50 +855,6 @@ extern int max_protohdr; /* Largest protocol header */ extern int nmbclusters; /* Maximum number of clusters */ -struct uio; - -void m_adj(struct mbuf *, int); -int m_apply(struct mbuf *, int, int, - int (*)(void *, void *, u_int), void *); -int m_append(struct mbuf *, int, c_caddr_t); -void m_cat(struct mbuf *, struct mbuf *); -void m_catpkt(struct mbuf *, struct mbuf *); -int m_extadd(struct mbuf *, caddr_t, u_int, - void (*)(struct mbuf *, void *, void *), void *, void *, - int, int, int); -struct mbuf *m_collapse(struct mbuf *, int, int); -void m_copyback(struct mbuf *, int, int, c_caddr_t); -void m_copydata(const struct mbuf *, int, int, caddr_t); -struct mbuf *m_copym(const struct mbuf *, int, int, int); -struct mbuf *m_copypacket(struct mbuf *, int); -void m_copy_pkthdr(struct mbuf *, struct mbuf *); -struct mbuf *m_copyup(struct mbuf *, int, int); -struct mbuf *m_defrag(struct mbuf *, int); -void m_demote_pkthdr(struct mbuf *); -void m_demote(struct mbuf *, int, int); -struct mbuf *m_devget(char *, int, int, struct ifnet *, - void (*)(char *, caddr_t, u_int)); -struct mbuf *m_dup(const struct mbuf *, int); -int m_dup_pkthdr(struct mbuf *, const struct mbuf *, int); -u_int m_fixhdr(struct mbuf *); -struct mbuf *m_fragment(struct mbuf *, int, int); -void m_freem(struct mbuf *); -struct mbuf *m_get2(int, int, short, int); -struct mbuf *m_getjcl(int, short, int, int); -struct mbuf *m_getm2(struct mbuf *, int, int, short, int); -struct mbuf *m_getptr(struct mbuf *, int, int *); -u_int m_length(struct mbuf *, struct mbuf **); -int m_mbuftouio(struct uio *, struct mbuf *, int); -void m_move_pkthdr(struct mbuf *, struct mbuf *); -struct mbuf *m_prepend(struct mbuf *, int, int); -void m_print(const struct mbuf *, int); -struct mbuf *m_pulldown(struct mbuf *, int, int, int *); -struct mbuf *m_pullup(struct mbuf *, int); -int m_sanity(struct mbuf *, int); -struct mbuf *m_split(struct mbuf *, int, int); -struct mbuf *m_uiotombuf(struct uio *, int, int, int, int); -struct mbuf *m_unshare(struct mbuf *, int); - /*- * Network packets may have annotations attached by affixing a list of * "packet tags" to the pkthdr structure. Packet tags are dynamically @@ -1167,20 +1036,6 @@ m_tag_locate(m, MTAG_ABI_COMPAT, type, start)); } -static __inline struct mbuf * -m_free(struct mbuf *m) -{ - struct mbuf *n = m->m_next; - - if ((m->m_flags & (M_PKTHDR|M_NOFREE)) == (M_PKTHDR|M_NOFREE)) - m_tag_delete_chain(m, NULL); - if (m->m_flags & M_EXT) - mb_free_ext(m); - else if ((m->m_flags & M_NOFREE) == 0) - uma_zfree(zone_mbuf, m); - return (n); -} - static __inline int rt_m_getfib(struct mbuf *m) {