Page MenuHomeFreeBSD

D15736.id48391.diff
No OneTemporary

D15736.id48391.diff

Index: sys/kern/kern_malloc.c
===================================================================
--- sys/kern/kern_malloc.c
+++ sys/kern/kern_malloc.c
@@ -77,6 +77,7 @@
#include <vm/uma.h>
#include <vm/uma_int.h>
#include <vm/uma_dbg.h>
+#include <vm/memguard.h>
#ifdef DEBUG_MEMGUARD
#include <vm/memguard.h>
@@ -362,13 +363,12 @@
* statistics.
*/
static void
-malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
+malloc_type_zone_allocated_crit(struct malloc_type *mtp, unsigned long size,
int zindx)
{
struct malloc_type_internal *mtip;
struct malloc_type_stats *mtsp;
- critical_enter();
mtip = mtp->ks_handle;
mtsp = zpcpu_get(mtip->mti_stats);
if (size > 0) {
@@ -387,7 +387,15 @@
(uintptr_t) mtsp, size, zindx);
}
#endif
+}
+static void
+malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
+ int zindx)
+{
+
+ critical_enter();
+ malloc_type_zone_allocated_crit(mtp, size, zindx);
critical_exit();
}
@@ -405,13 +413,12 @@
* thread isn't preempted and doesn't migrate while updating per-CPU
* statistics.
*/
-void
-malloc_type_freed(struct malloc_type *mtp, unsigned long size)
+static void
+malloc_type_freed_crit(struct malloc_type *mtp, unsigned long size)
{
struct malloc_type_internal *mtip;
struct malloc_type_stats *mtsp;
- critical_enter();
mtip = mtp->ks_handle;
mtsp = zpcpu_get(mtip->mti_stats);
mtsp->mts_memfreed += size;
@@ -426,7 +433,14 @@
(uintptr_t) mtsp, size, 0);
}
#endif
+}
+void
+malloc_type_freed(struct malloc_type *mtp, unsigned long size)
+{
+
+ critical_enter();
+ malloc_type_freed_crit(mtp, size);
critical_exit();
}
@@ -542,6 +556,14 @@
}
#endif
+#ifdef DEBUG_REDZONE
+#define OSIZE_ARG_DEF , unsigned long osize
+#define OSIZE_ARG , osize
+#else
+#define OSIZE_ARG_DEF
+#define OSIZE_ARG
+#endif
+
/*
* malloc:
*
@@ -550,11 +572,59 @@
* If M_NOWAIT is set, this routine will not block and return NULL if
* the allocation fails.
*/
+static void * __noinline
+malloc_large(size_t size, struct malloc_type *mtp, int flags OSIZE_ARG_DEF)
+{
+ caddr_t va;
+
+ size = roundup(size, PAGE_SIZE);
+ va = uma_large_malloc(size, flags);
+ malloc_type_allocated(mtp, va == NULL ? 0 : size);
+ if (flags & M_WAITOK)
+ KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
+ else if (__predict_false(va == NULL))
+ t_malloc_fail = time_uptime;
+#ifdef DEBUG_REDZONE
+ if (va != NULL)
+ va = redzone_setup(va, osize);
+#endif
+ return ((void *) va);
+}
+
+static void * __noinline
+malloc_slowpath(uma_zone_t zone, struct malloc_type *mtp, int flags, int indx
+ OSIZE_ARG_DEF)
+{
+ caddr_t va;
+ size_t size;
+
+ critical_exit();
+
+ va = uma_zalloc(zone, flags);
+ if (va != NULL)
+ size = zone->uz_size;
+ malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
+ if (flags & M_WAITOK)
+ KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
+ else if (__predict_false(va == NULL))
+ t_malloc_fail = time_uptime;
+#ifdef DEBUG_REDZONE
+ if (va != NULL)
+ va = redzone_setup(va, osize);
+#endif
+ return ((void *) va);
+}
+
void *
(malloc)(size_t size, struct malloc_type *mtp, int flags)
{
+ uma_bucket_t bucket;
+ uma_cache_t cache;
+ void *item;
int indx;
+#ifdef MALLOC_DEBUG
caddr_t va;
+#endif
uma_zone_t zone;
#if defined(DEBUG_REDZONE)
unsigned long osize = size;
@@ -566,33 +636,48 @@
return (va);
#endif
- if (size <= kmem_zmax && (flags & M_EXEC) == 0) {
- if (size & KMEM_ZMASK)
- size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
- indx = kmemsize[size >> KMEM_ZSHIFT];
- zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
+ if (__predict_false(size > kmem_zmax || (flags & M_EXEC) != 0)) {
+ return (malloc_large(size, mtp, flags OSIZE_ARG));
+ }
+
+ if (size & KMEM_ZMASK)
+ size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
+ indx = kmemsize[size >> KMEM_ZSHIFT];
+ zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
#ifdef MALLOC_PROFILE
- krequests[size >> KMEM_ZSHIFT]++;
+ krequests[size >> KMEM_ZSHIFT]++;
#endif
- va = uma_zalloc(zone, flags);
- if (va != NULL)
- size = zone->uz_size;
- malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
- } else {
- size = roundup(size, PAGE_SIZE);
- zone = NULL;
- va = uma_large_malloc(size, flags);
- malloc_type_allocated(mtp, va == NULL ? 0 : size);
+ critical_enter();
+ cache = &zone->uz_cpu[curcpu];
+ bucket = cache->uc_allocbucket;
+ if (__predict_false(memguard_cmp_zone(zone) ||
+ bucket == NULL || bucket->ub_cnt == 0))
+ return (malloc_slowpath(zone, mtp, flags, indx OSIZE_ARG));
+ bucket->ub_cnt--;
+ item = bucket->ub_bucket[bucket->ub_cnt];
+#ifdef INVARIANTS
+ bucket->ub_bucket[bucket->ub_cnt] = NULL;
+#endif
+ KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
+ cache->uc_allocs++;
+ malloc_type_zone_allocated_crit(mtp, size, indx);
+#ifdef INVARIANTS
+ if (!uma_dbg_zskip(zone, item)) {
+ if (zone->uz_ctor(item, zone->uz_size, NULL, flags))
+ panic("malloc constructor failed");
+ uma_dbg_alloc(zone, NULL, item);
}
- if (flags & M_WAITOK)
- KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
- else if (va == NULL)
- t_malloc_fail = time_uptime;
+#endif
+ critical_exit();
#ifdef DEBUG_REDZONE
- if (va != NULL)
- va = redzone_setup(va, osize);
+ if (__predict_false(flags & M_ZERO))
+ memset(item, 0, size);
+ item = redzone_setup(item, osize);
+#else
+ if (__predict_false(flags & M_ZERO))
+ return (memset(item, 0, size));
#endif
- return ((void *) va);
+ return (item);
}
void *
@@ -709,9 +794,36 @@
*
* This routine may not block.
*/
+static void __noinline
+free_large(uma_slab_t slab, struct malloc_type *mtp)
+{
+ u_long size;
+
+ size = slab->us_size;
+ uma_large_free(slab);
+ malloc_type_freed(mtp, size);
+}
+
+static void __noinline
+free_slowpath(uma_slab_t slab, void *addr, struct malloc_type *mtp)
+{
+ u_long size;
+
+ critical_exit();
+ size = slab->us_keg->uk_size;
+#ifdef INVARIANTS
+ free_save_type(addr, mtp, size);
+#endif
+ uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
+ malloc_type_freed(mtp, size);
+}
+
void
free(void *addr, struct malloc_type *mtp)
{
+ uma_bucket_t bucket;
+ uma_cache_t cache;
+ uma_zone_t zone;
uma_slab_t slab;
u_long size;
@@ -720,25 +832,45 @@
return;
#endif
/* free(NULL, ...) does nothing */
- if (addr == NULL)
+ if (__predict_false(addr == NULL))
return;
slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
- if (slab == NULL)
+ if (__predict_false(slab == NULL))
panic("free: address %p(%p) has not been allocated.\n",
addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
- if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
- size = slab->us_keg->uk_size;
+ if (__predict_false(slab->us_flags & UMA_SLAB_MALLOC)) {
+ free_large(slab, mtp);
+ return;
+ }
+
+ critical_enter();
+ zone = LIST_FIRST(&slab->us_keg->uk_zones);
+ cache = &zone->uz_cpu[curcpu];
+ bucket = cache->uc_allocbucket;
+ if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
+ bucket = cache->uc_freebucket;
+ if (__predict_false(is_memguard_addr(addr) ||
+ bucket == NULL || bucket->ub_cnt == bucket->ub_entries)) {
+ free_slowpath(slab, addr, mtp);
+ return;
+ }
+ size = slab->us_keg->uk_size;
#ifdef INVARIANTS
- free_save_type(addr, mtp, size);
-#endif
- uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
- } else {
- size = slab->us_size;
- uma_large_free(slab);
+ free_save_type(addr, mtp, size);
+ if (!uma_dbg_zskip(zone, addr)) {
+ zone->uz_dtor(addr, zone->uz_size, NULL);
+ uma_dbg_free(zone, NULL, addr);
}
- malloc_type_freed(mtp, size);
+#endif
+ KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
+ ("uma_zfree: Freeing to non free bucket index."));
+ bucket->ub_bucket[bucket->ub_cnt] = addr;
+ bucket->ub_cnt++;
+ cache->uc_frees++;
+ malloc_type_freed_crit(mtp, size);
+ critical_exit();
}
void
Index: sys/vm/uma_core.c
===================================================================
--- sys/vm/uma_core.c
+++ sys/vm/uma_core.c
@@ -276,9 +276,6 @@
#ifdef INVARIANTS
static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
-static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
-static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
-static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
"Memory allocation debugging");
@@ -4048,7 +4045,7 @@
return (slab);
}
-static bool
+bool
uma_dbg_zskip(uma_zone_t zone, void *mem)
{
uma_keg_t keg;
@@ -4089,7 +4086,7 @@
* Set up the slab's freei data such that uma_dbg_free can function.
*
*/
-static void
+void
uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
{
uma_keg_t keg;
@@ -4117,7 +4114,7 @@
* and duplicate frees.
*
*/
-static void
+void
uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
{
uma_keg_t keg;
Index: sys/vm/uma_dbg.h
===================================================================
--- sys/vm/uma_dbg.h
+++ sys/vm/uma_dbg.h
@@ -40,6 +40,14 @@
#ifndef VM_UMA_DBG_H
#define VM_UMA_DBG_H
+#include <vm/uma_int.h>
+
+#ifdef INVARIANTS
+bool uma_dbg_zskip(uma_zone_t zone, void *mem);
+void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
+void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
+#endif
+
int trash_ctor(void *mem, int size, void *arg, int flags);
void trash_dtor(void *mem, int size, void *arg);
int trash_init(void *mem, int size, int flags);

File Metadata

Mime Type
text/plain
Expires
Sun, Dec 29, 5:25 AM (8 h, 24 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15627937
Default Alt Text
D15736.id48391.diff (9 KB)

Event Timeline