Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/kern_mbuf.c
Show First 20 Lines • Show All 404 Lines • ▼ Show 20 Lines | #endif | ||||
*/ | */ | ||||
EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, | EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, | ||||
EVENTHANDLER_PRI_FIRST); | EVENTHANDLER_PRI_FIRST); | ||||
snd_tag_count = counter_u64_alloc(M_WAITOK); | snd_tag_count = counter_u64_alloc(M_WAITOK); | ||||
} | } | ||||
SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); | SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); | ||||
#ifdef NETDUMP | #ifdef PANICNET | ||||
/* | /* | ||||
* netdump makes use of a pre-allocated pool of mbufs and clusters. When | * panicnet makes use of a pre-allocated pool of mbufs and clusters. When | ||||
* netdump is configured, we initialize a set of UMA cache zones which return | * panicnet is configured, we initialize a set of UMA cache zones which return | ||||
* items from this pool. At panic-time, the regular UMA zone pointers are | * items from this pool. At panic-time, the regular UMA zone pointers are | ||||
* overwritten with those of the cache zones so that drivers may allocate and | * overwritten with those of the cache zones so that drivers may allocate and | ||||
* free mbufs and clusters without attempting to allocate physical memory. | * free mbufs and clusters without attempting to allocate physical memory. | ||||
* | * | ||||
* We keep mbufs and clusters in a pair of mbuf queues. In particular, for | * We keep mbufs and clusters in a pair of mbuf queues. In particular, for | ||||
* the purpose of caching clusters, we treat them as mbufs. | * the purpose of caching clusters, we treat them as mbufs. | ||||
*/ | */ | ||||
static struct mbufq nd_mbufq = | static struct mbufq pn_mbufq = | ||||
{ STAILQ_HEAD_INITIALIZER(nd_mbufq.mq_head), 0, INT_MAX }; | { STAILQ_HEAD_INITIALIZER(pn_mbufq.mq_head), 0, INT_MAX }; | ||||
static struct mbufq nd_clustq = | static struct mbufq pn_clustq = | ||||
{ STAILQ_HEAD_INITIALIZER(nd_clustq.mq_head), 0, INT_MAX }; | { STAILQ_HEAD_INITIALIZER(pn_clustq.mq_head), 0, INT_MAX }; | ||||
static int nd_clsize; | static int pn_clsize; | ||||
static uma_zone_t nd_zone_mbuf; | static uma_zone_t pn_zone_mbuf; | ||||
static uma_zone_t nd_zone_clust; | static uma_zone_t pn_zone_clust; | ||||
static uma_zone_t nd_zone_pack; | static uma_zone_t pn_zone_pack; | ||||
static struct panicnet_saved_zones { | |||||
uma_zone_t psz_mbuf; | |||||
uma_zone_t psz_clust; | |||||
uma_zone_t psz_pack; | |||||
uma_zone_t psz_jumbop; | |||||
uma_zone_t psz_jumbo9; | |||||
uma_zone_t psz_jumbo16; | |||||
bool psz_panicnet_zones_enabled; | |||||
} pn_saved_zones; | |||||
emaste: not part of the renaming? | |||||
Done Inline ActionsIs the comment that this change was introduced as part of the rename patch? It wasn't, or shouldn't have been. I thought it was part of the original panicnet_ named version uploaded as v0. Or is the question that these haven't been renamed? They have, I think. Please clarify :-) cem: Is the comment that this change was introduced as part of the rename patch? It wasn't, or… | |||||
static int | static int | ||||
nd_buf_import(void *arg, void **store, int count, int domain __unused, | pn_buf_import(void *arg, void **store, int count, int domain __unused, | ||||
int flags) | int flags) | ||||
{ | { | ||||
struct mbufq *q; | struct mbufq *q; | ||||
struct mbuf *m; | struct mbuf *m; | ||||
int i; | int i; | ||||
q = arg; | q = arg; | ||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
m = mbufq_dequeue(q); | m = mbufq_dequeue(q); | ||||
if (m == NULL) | if (m == NULL) | ||||
break; | break; | ||||
trash_init(m, q == &nd_mbufq ? MSIZE : nd_clsize, flags); | trash_init(m, q == &pn_mbufq ? MSIZE : pn_clsize, flags); | ||||
store[i] = m; | store[i] = m; | ||||
} | } | ||||
KASSERT((flags & M_WAITOK) == 0 || i == count, | KASSERT((flags & M_WAITOK) == 0 || i == count, | ||||
("%s: ran out of pre-allocated mbufs", __func__)); | ("%s: ran out of pre-allocated mbufs", __func__)); | ||||
return (i); | return (i); | ||||
} | } | ||||
static void | static void | ||||
nd_buf_release(void *arg, void **store, int count) | pn_buf_release(void *arg, void **store, int count) | ||||
{ | { | ||||
struct mbufq *q; | struct mbufq *q; | ||||
struct mbuf *m; | struct mbuf *m; | ||||
int i; | int i; | ||||
q = arg; | q = arg; | ||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
m = store[i]; | m = store[i]; | ||||
(void)mbufq_enqueue(q, m); | (void)mbufq_enqueue(q, m); | ||||
} | } | ||||
} | } | ||||
static int | static int | ||||
nd_pack_import(void *arg __unused, void **store, int count, int domain __unused, | pn_pack_import(void *arg __unused, void **store, int count, int domain __unused, | ||||
int flags __unused) | int flags __unused) | ||||
{ | { | ||||
struct mbuf *m; | struct mbuf *m; | ||||
void *clust; | void *clust; | ||||
int i; | int i; | ||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
m = m_get(MT_DATA, M_NOWAIT); | m = m_get(MT_DATA, M_NOWAIT); | ||||
if (m == NULL) | if (m == NULL) | ||||
break; | break; | ||||
clust = uma_zalloc(nd_zone_clust, M_NOWAIT); | clust = uma_zalloc(pn_zone_clust, M_NOWAIT); | ||||
if (clust == NULL) { | if (clust == NULL) { | ||||
m_free(m); | m_free(m); | ||||
break; | break; | ||||
} | } | ||||
mb_ctor_clust(clust, nd_clsize, m, 0); | mb_ctor_clust(clust, pn_clsize, m, 0); | ||||
store[i] = m; | store[i] = m; | ||||
} | } | ||||
KASSERT((flags & M_WAITOK) == 0 || i == count, | KASSERT((flags & M_WAITOK) == 0 || i == count, | ||||
("%s: ran out of pre-allocated mbufs", __func__)); | ("%s: ran out of pre-allocated mbufs", __func__)); | ||||
return (i); | return (i); | ||||
} | } | ||||
static void | static void | ||||
nd_pack_release(void *arg __unused, void **store, int count) | pn_pack_release(void *arg __unused, void **store, int count) | ||||
{ | { | ||||
struct mbuf *m; | struct mbuf *m; | ||||
void *clust; | void *clust; | ||||
int i; | int i; | ||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
m = store[i]; | m = store[i]; | ||||
clust = m->m_ext.ext_buf; | clust = m->m_ext.ext_buf; | ||||
uma_zfree(nd_zone_clust, clust); | uma_zfree(pn_zone_clust, clust); | ||||
uma_zfree(nd_zone_mbuf, m); | uma_zfree(pn_zone_mbuf, m); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Free the pre-allocated mbufs and clusters reserved for netdump, and destroy | * Free the pre-allocated mbufs and clusters reserved for panicnet, and destroy | ||||
* the corresponding UMA cache zones. | * the corresponding UMA cache zones. | ||||
*/ | */ | ||||
void | void | ||||
netdump_mbuf_drain(void) | panicnet_mbuf_drain(void) | ||||
{ | { | ||||
struct mbuf *m; | struct mbuf *m; | ||||
void *item; | void *item; | ||||
if (nd_zone_mbuf != NULL) { | if (pn_zone_mbuf != NULL) { | ||||
uma_zdestroy(nd_zone_mbuf); | uma_zdestroy(pn_zone_mbuf); | ||||
nd_zone_mbuf = NULL; | pn_zone_mbuf = NULL; | ||||
} | } | ||||
if (nd_zone_clust != NULL) { | if (pn_zone_clust != NULL) { | ||||
uma_zdestroy(nd_zone_clust); | uma_zdestroy(pn_zone_clust); | ||||
nd_zone_clust = NULL; | pn_zone_clust = NULL; | ||||
} | } | ||||
if (nd_zone_pack != NULL) { | if (pn_zone_pack != NULL) { | ||||
uma_zdestroy(nd_zone_pack); | uma_zdestroy(pn_zone_pack); | ||||
nd_zone_pack = NULL; | pn_zone_pack = NULL; | ||||
} | } | ||||
while ((m = mbufq_dequeue(&nd_mbufq)) != NULL) | while ((m = mbufq_dequeue(&pn_mbufq)) != NULL) | ||||
m_free(m); | m_free(m); | ||||
while ((item = mbufq_dequeue(&nd_clustq)) != NULL) | while ((item = mbufq_dequeue(&pn_clustq)) != NULL) | ||||
uma_zfree(m_getzone(nd_clsize), item); | uma_zfree(m_getzone(pn_clsize), item); | ||||
} | } | ||||
/* | /* | ||||
* Callback invoked immediately prior to starting a netdump. | * Callback invoked immediately prior to starting a panicnet connection. | ||||
*/ | */ | ||||
void | void | ||||
netdump_mbuf_dump(void) | panicnet_mbuf_start(void) | ||||
{ | { | ||||
MPASS(!pn_saved_zones.psz_panicnet_zones_enabled); | |||||
/* Save the old zone pointers to restore when panicnet is closed. */ | |||||
pn_saved_zones = (struct panicnet_saved_zones) { | |||||
.psz_panicnet_zones_enabled = true, | |||||
.psz_mbuf = zone_mbuf, | |||||
.psz_clust = zone_clust, | |||||
.psz_pack = zone_pack, | |||||
.psz_jumbop = zone_jumbop, | |||||
.psz_jumbo9 = zone_jumbo9, | |||||
.psz_jumbo16 = zone_jumbo16, | |||||
}; | |||||
/* | /* | ||||
* All cluster zones return buffers of the size requested by the | * All cluster zones return buffers of the size requested by the | ||||
* drivers. It's up to the driver to reinitialize the zones if the | * drivers. It's up to the driver to reinitialize the zones if the | ||||
* MTU of a netdump-enabled interface changes. | * MTU of a panicnet-enabled interface changes. | ||||
*/ | */ | ||||
printf("netdump: overwriting mbuf zone pointers\n"); | printf("panicnet: overwriting mbuf zone pointers\n"); | ||||
zone_mbuf = nd_zone_mbuf; | zone_mbuf = pn_zone_mbuf; | ||||
zone_clust = nd_zone_clust; | zone_clust = pn_zone_clust; | ||||
zone_pack = nd_zone_pack; | zone_pack = pn_zone_pack; | ||||
zone_jumbop = nd_zone_clust; | zone_jumbop = pn_zone_clust; | ||||
zone_jumbo9 = nd_zone_clust; | zone_jumbo9 = pn_zone_clust; | ||||
zone_jumbo16 = nd_zone_clust; | zone_jumbo16 = pn_zone_clust; | ||||
} | } | ||||
/* | /* | ||||
* Reinitialize the netdump mbuf+cluster pool and cache zones. | * Callback invoked when a panicnet connection is closed/finished. | ||||
*/ | */ | ||||
void | void | ||||
netdump_mbuf_reinit(int nmbuf, int nclust, int clsize) | panicnet_mbuf_finish(void) | ||||
{ | { | ||||
MPASS(pn_saved_zones.psz_panicnet_zones_enabled); | |||||
printf("panicnet: restoring mbuf zone pointers\n"); | |||||
zone_mbuf = pn_saved_zones.psz_mbuf; | |||||
zone_clust = pn_saved_zones.psz_clust; | |||||
zone_pack = pn_saved_zones.psz_pack; | |||||
zone_jumbop = pn_saved_zones.psz_jumbop; | |||||
zone_jumbo9 = pn_saved_zones.psz_jumbo9; | |||||
zone_jumbo16 = pn_saved_zones.psz_jumbo16; | |||||
memset(&pn_saved_zones, 0, sizeof(pn_saved_zones)); | |||||
} | |||||
/* | |||||
* Reinitialize the panicnet mbuf+cluster pool and cache zones. | |||||
*/ | |||||
void | |||||
panicnet_mbuf_reinit(int nmbuf, int nclust, int clsize) | |||||
{ | |||||
struct mbuf *m; | struct mbuf *m; | ||||
void *item; | void *item; | ||||
netdump_mbuf_drain(); | panicnet_mbuf_drain(); | ||||
nd_clsize = clsize; | pn_clsize = clsize; | ||||
nd_zone_mbuf = uma_zcache_create("netdump_" MBUF_MEM_NAME, | pn_zone_mbuf = uma_zcache_create("panicnet_" MBUF_MEM_NAME, | ||||
MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, | MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
trash_init, trash_fini, | trash_init, trash_fini, | ||||
#else | #else | ||||
NULL, NULL, | NULL, NULL, | ||||
#endif | #endif | ||||
nd_buf_import, nd_buf_release, | pn_buf_import, pn_buf_release, | ||||
&nd_mbufq, UMA_ZONE_NOBUCKET); | &pn_mbufq, UMA_ZONE_NOBUCKET); | ||||
nd_zone_clust = uma_zcache_create("netdump_" MBUF_CLUSTER_MEM_NAME, | pn_zone_clust = uma_zcache_create("panicnet_" MBUF_CLUSTER_MEM_NAME, | ||||
clsize, mb_ctor_clust, | clsize, mb_ctor_clust, | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
trash_dtor, trash_init, trash_fini, | trash_dtor, trash_init, trash_fini, | ||||
#else | #else | ||||
NULL, NULL, NULL, | NULL, NULL, NULL, | ||||
#endif | #endif | ||||
nd_buf_import, nd_buf_release, | pn_buf_import, pn_buf_release, | ||||
&nd_clustq, UMA_ZONE_NOBUCKET); | &pn_clustq, UMA_ZONE_NOBUCKET); | ||||
nd_zone_pack = uma_zcache_create("netdump_" MBUF_PACKET_MEM_NAME, | pn_zone_pack = uma_zcache_create("panicnet_" MBUF_PACKET_MEM_NAME, | ||||
MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL, | MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL, | ||||
nd_pack_import, nd_pack_release, | pn_pack_import, pn_pack_release, | ||||
NULL, UMA_ZONE_NOBUCKET); | NULL, UMA_ZONE_NOBUCKET); | ||||
while (nmbuf-- > 0) { | while (nmbuf-- > 0) { | ||||
m = m_get(MT_DATA, M_WAITOK); | m = m_get(MT_DATA, M_WAITOK); | ||||
uma_zfree(nd_zone_mbuf, m); | uma_zfree(pn_zone_mbuf, m); | ||||
} | } | ||||
while (nclust-- > 0) { | while (nclust-- > 0) { | ||||
item = uma_zalloc(m_getzone(nd_clsize), M_WAITOK); | item = uma_zalloc(m_getzone(pn_clsize), M_WAITOK); | ||||
uma_zfree(nd_zone_clust, item); | uma_zfree(pn_zone_clust, item); | ||||
} | } | ||||
} | } | ||||
#endif /* NETDUMP */ | #endif /* PANICNET */ | ||||
/* | /* | ||||
* UMA backend page allocator for the jumbo frame zones. | * UMA backend page allocator for the jumbo frame zones. | ||||
* | * | ||||
* Allocates kernel virtual memory that is backed by contiguous physical | * Allocates kernel virtual memory that is backed by contiguous physical | ||||
* pages. | * pages. | ||||
*/ | */ | ||||
static void * | static void * | ||||
▲ Show 20 Lines • Show All 966 Lines • Show Last 20 Lines |
not part of the renaming?