diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c index 4864277dd7d7..6b2e73d86fd2 100644 --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -1,917 +1,921 @@ /*- * Copyright (c) 1987, 1991, 1993 * The Regents of the University of California. * Copyright (c) 2005 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG_MEMGUARD #include #endif #if defined(INVARIANTS) && defined(__i386__) #include #endif #include /* * When realloc() is called, if the new size is sufficiently smaller than * the old size, realloc() will allocate a new, smaller block to avoid * wasting memory. 'Sufficiently smaller' is defined as: newsize <= * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. */ #ifndef REALLOC_FRACTION #define REALLOC_FRACTION 1 /* new block if <= half the size */ #endif MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); static void kmeminit(void *); SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); static struct malloc_type *kmemstatistics; static char *kmembase; static char *kmemlimit; static int kmemcount; #define KMEM_ZSHIFT 4 #define KMEM_ZBASE 16 #define KMEM_ZMASK (KMEM_ZBASE - 1) #define KMEM_ZMAX PAGE_SIZE #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) static u_int8_t kmemsize[KMEM_ZSIZE + 1]; /* These won't be powers of two for long */ struct { int kz_size; char *kz_name; uma_zone_t kz_zone; } kmemzones[] = { {16, "16", NULL}, {32, "32", NULL}, {64, "64", NULL}, {128, "128", NULL}, {256, "256", NULL}, {512, "512", NULL}, {1024, "1024", NULL}, {2048, "2048", NULL}, {4096, "4096", NULL}, #if PAGE_SIZE > 4096 {8192, "8192", NULL}, #if PAGE_SIZE > 8192 {16384, "16384", NULL}, #if PAGE_SIZE > 16384 {32768, "32768", NULL}, #if PAGE_SIZE > 32768 {65536, "65536", NULL}, #if PAGE_SIZE > 65536 #error "Unsupported PAGE_SIZE" #endif /* 65536 */ #endif /* 32768 */ #endif /* 16384 */ #endif /* 8192 */ #endif /* 4096 */ {0, NULL}, }; static uma_zone_t mt_zone; -#ifdef DEBUG_MEMGUARD -u_int vm_memguard_divisor; -SYSCTL_UINT(_vm, OID_AUTO, memguard_divisor, CTLFLAG_RD, &vm_memguard_divisor, - 0, "(kmem_size/memguard_divisor) == memguard submap size"); -#endif - u_int vm_kmem_size; SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0, "Size of kernel memory"); u_int vm_kmem_size_max; SYSCTL_UINT(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RD, &vm_kmem_size_max, 0, "Maximum size of kernel memory"); u_int vm_kmem_size_scale; SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RD, &vm_kmem_size_scale, 0, "Scale factor for kernel memory size"); /* * The malloc_mtx protects the kmemstatistics linked list. */ struct mtx malloc_mtx; #ifdef MALLOC_PROFILE uint64_t krequests[KMEM_ZSIZE + 1]; static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); #endif static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS); static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); /* time_uptime of last malloc(9) failure */ static time_t t_malloc_fail; #ifdef MALLOC_MAKE_FAILURES /* * Causes malloc failures every (n) mallocs with M_NOWAIT. If set to 0, * doesn't cause failures. */ SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, "Kernel malloc debugging options"); static int malloc_failure_rate; static int malloc_nowait_count; static int malloc_failure_count; SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW, &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate); SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); #endif int malloc_last_fail(void) { return (time_uptime - t_malloc_fail); } /* * Add this to the informational malloc_type bucket. */ static void malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, int zindx) { struct malloc_type_internal *mtip; struct malloc_type_stats *mtsp; critical_enter(); mtip = mtp->ks_handle; mtsp = &mtip->mti_stats[curcpu]; if (size > 0) { mtsp->mts_memalloced += size; mtsp->mts_numallocs++; } if (zindx != -1) mtsp->mts_size |= 1 << zindx; critical_exit(); } void malloc_type_allocated(struct malloc_type *mtp, unsigned long size) { if (size > 0) malloc_type_zone_allocated(mtp, size, -1); } /* * Remove this allocation from the informational malloc_type bucket. */ void malloc_type_freed(struct malloc_type *mtp, unsigned long size) { struct malloc_type_internal *mtip; struct malloc_type_stats *mtsp; critical_enter(); mtip = mtp->ks_handle; mtsp = &mtip->mti_stats[curcpu]; mtsp->mts_memfreed += size; mtsp->mts_numfrees++; critical_exit(); } /* * malloc: * * Allocate a block of memory. * * If M_NOWAIT is set, this routine will not block and return NULL if * the allocation fails. */ void * malloc(unsigned long size, struct malloc_type *mtp, int flags) { int indx; caddr_t va; uma_zone_t zone; uma_keg_t keg; #ifdef DIAGNOSTIC unsigned long osize = size; #endif #ifdef INVARIANTS /* * Check that exactly one of M_WAITOK or M_NOWAIT is specified. */ indx = flags & (M_WAITOK | M_NOWAIT); if (indx != M_NOWAIT && indx != M_WAITOK) { static struct timeval lasterr; static int curerr, once; if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { printf("Bad malloc flags: %x\n", indx); kdb_backtrace(); flags |= M_WAITOK; once++; } } #endif #if 0 if (size == 0) kdb_enter("zero size malloc"); #endif #ifdef MALLOC_MAKE_FAILURES if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { atomic_add_int(&malloc_nowait_count, 1); if ((malloc_nowait_count % malloc_failure_rate) == 0) { atomic_add_int(&malloc_failure_count, 1); t_malloc_fail = time_uptime; return (NULL); } } #endif if (flags & M_WAITOK) KASSERT(curthread->td_intr_nesting_level == 0, ("malloc(M_WAITOK) in interrupt context")); #ifdef DEBUG_MEMGUARD - /* XXX CHANGEME! */ - if (mtp == M_SUBPROC) + if (memguard_cmp(mtp)) return memguard_alloc(size, flags); #endif if (size <= KMEM_ZMAX) { if (size & KMEM_ZMASK) size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; indx = kmemsize[size >> KMEM_ZSHIFT]; zone = kmemzones[indx].kz_zone; keg = zone->uz_keg; #ifdef MALLOC_PROFILE krequests[size >> KMEM_ZSHIFT]++; #endif va = uma_zalloc(zone, flags); if (va != NULL) size = keg->uk_size; malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); } else { size = roundup(size, PAGE_SIZE); zone = NULL; keg = NULL; va = uma_large_malloc(size, flags); malloc_type_allocated(mtp, va == NULL ? 0 : size); } if (flags & M_WAITOK) KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); else if (va == NULL) t_malloc_fail = time_uptime; #ifdef DIAGNOSTIC if (va != NULL && !(flags & M_ZERO)) { memset(va, 0x70, osize); } #endif return ((void *) va); } /* * free: * * Free a block of memory allocated by malloc. * * This routine may not block. */ void free(void *addr, struct malloc_type *mtp) { uma_slab_t slab; u_long size; /* free(NULL, ...) does nothing */ if (addr == NULL) return; #ifdef DEBUG_MEMGUARD - /* XXX CHANGEME! */ - if (mtp == M_SUBPROC) { + if (memguard_cmp(mtp)) { memguard_free(addr); return; } #endif size = 0; slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); if (slab == NULL) panic("free: address %p(%p) has not been allocated.\n", addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); if (!(slab->us_flags & UMA_SLAB_MALLOC)) { #ifdef INVARIANTS struct malloc_type **mtpp = addr; #endif size = slab->us_keg->uk_size; #ifdef INVARIANTS /* * Cache a pointer to the malloc_type that most recently freed * this memory here. This way we know who is most likely to * have stepped on it later. * * This code assumes that size is a multiple of 8 bytes for * 64 bit machines */ mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR); mtpp += (size - sizeof(struct malloc_type *)) / sizeof(struct malloc_type *); *mtpp = mtp; #endif uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); } else { size = slab->us_size; uma_large_free(slab); } malloc_type_freed(mtp, size); } /* * realloc: change the size of a memory block */ void * realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags) { uma_slab_t slab; unsigned long alloc; void *newaddr; /* realloc(NULL, ...) is equivalent to malloc(...) */ if (addr == NULL) return (malloc(size, mtp, flags)); /* * XXX: Should report free of old memory and alloc of new memory to * per-CPU stats. */ #ifdef DEBUG_MEMGUARD -/* XXX: CHANGEME! */ -if (mtp == M_SUBPROC) { +if (memguard_cmp(mtp)) { slab = NULL; alloc = size; } else { #endif slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); /* Sanity check */ KASSERT(slab != NULL, ("realloc: address %p out of range", (void *)addr)); /* Get the size of the original block */ if (!(slab->us_flags & UMA_SLAB_MALLOC)) alloc = slab->us_keg->uk_size; else alloc = slab->us_size; /* Reuse the original block if appropriate */ if (size <= alloc && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) return (addr); #ifdef DEBUG_MEMGUARD } #endif /* Allocate a new, bigger (or smaller) block */ if ((newaddr = malloc(size, mtp, flags)) == NULL) return (NULL); /* Copy over original contents */ bcopy(addr, newaddr, min(size, alloc)); free(addr, mtp); return (newaddr); } /* * reallocf: same as realloc() but free memory on failure. */ void * reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags) { void *mem; if ((mem = realloc(addr, size, mtp, flags)) == NULL) free(addr, mtp); return (mem); } /* * Initialize the kernel memory allocator */ /* ARGSUSED*/ static void kmeminit(void *dummy) { u_int8_t indx; u_long mem_size; int i; mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); /* * Try to auto-tune the kernel memory size, so that it is * more applicable for a wider range of machine sizes. * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while * a VM_KMEM_SIZE of 12MB is a fair compromise. The * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space * available, and on an X86 with a total KVA space of 256MB, * try to keep VM_KMEM_SIZE_MAX at 80MB or below. * * Note that the kmem_map is also used by the zone allocator, * so make sure that there is enough space. */ vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE; mem_size = cnt.v_page_count; #if defined(VM_KMEM_SIZE_SCALE) vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; #endif TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale); if (vm_kmem_size_scale > 0 && (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE)) vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; #if defined(VM_KMEM_SIZE_MAX) vm_kmem_size_max = VM_KMEM_SIZE_MAX; #endif TUNABLE_INT_FETCH("vm.kmem_size_max", &vm_kmem_size_max); if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) vm_kmem_size = vm_kmem_size_max; /* Allow final override from the kernel environment */ #ifndef BURN_BRIDGES if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0) printf("kern.vm.kmem.size is now called vm.kmem_size!\n"); #endif TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size); /* * Limit kmem virtual size to twice the physical memory. * This allows for kmem map sparseness, but limits the size * to something sane. Be careful to not overflow the 32bit * ints while doing the check. */ if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count) vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE; /* * Tune settings based on the kernel map's size at this time. */ init_param3(vm_kmem_size / PAGE_SIZE); kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, (vm_offset_t *)&kmemlimit, vm_kmem_size); kmem_map->system_map = 1; #ifdef DEBUG_MEMGUARD /* * Initialize MemGuard if support compiled in. MemGuard is a * replacement allocator used for detecting tamper-after-free * scenarios as they occur. It is only used for debugging. */ vm_memguard_divisor = 10; - TUNABLE_INT_FETCH("vm.memguard_divisor", &vm_memguard_divisor); + TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); /* Pick a conservative value if provided value sucks. */ if ((vm_memguard_divisor <= 0) || ((vm_kmem_size / vm_memguard_divisor) == 0)) vm_memguard_divisor = 10; memguard_init(kmem_map, vm_kmem_size / vm_memguard_divisor); #endif uma_startup2(); mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), #ifdef INVARIANTS mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, #else NULL, NULL, NULL, NULL, #endif UMA_ALIGN_PTR, UMA_ZONE_MALLOC); for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { int size = kmemzones[indx].kz_size; char *name = kmemzones[indx].kz_name; kmemzones[indx].kz_zone = uma_zcreate(name, size, #ifdef INVARIANTS mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, #else NULL, NULL, NULL, NULL, #endif UMA_ALIGN_PTR, UMA_ZONE_MALLOC); for (;i <= size; i+= KMEM_ZBASE) kmemsize[i >> KMEM_ZSHIFT] = indx; } } void malloc_init(void *data) { struct malloc_type_internal *mtip; struct malloc_type *mtp; KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init")); mtp = data; mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); mtp->ks_handle = mtip; mtx_lock(&malloc_mtx); mtp->ks_next = kmemstatistics; kmemstatistics = mtp; kmemcount++; mtx_unlock(&malloc_mtx); } void malloc_uninit(void *data) { struct malloc_type_internal *mtip; struct malloc_type_stats *mtsp; struct malloc_type *mtp, *temp; long temp_allocs, temp_bytes; int i; mtp = data; KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL")); mtx_lock(&malloc_mtx); mtip = mtp->ks_handle; mtp->ks_handle = NULL; if (mtp != kmemstatistics) { for (temp = kmemstatistics; temp != NULL; temp = temp->ks_next) { if (temp->ks_next == mtp) temp->ks_next = mtp->ks_next; } } else kmemstatistics = mtp->ks_next; kmemcount--; mtx_unlock(&malloc_mtx); /* * Look for memory leaks. */ temp_allocs = temp_bytes = 0; for (i = 0; i < MAXCPU; i++) { mtsp = &mtip->mti_stats[i]; temp_allocs += mtsp->mts_numallocs; temp_allocs -= mtsp->mts_numfrees; temp_bytes += mtsp->mts_memalloced; temp_bytes -= mtsp->mts_memfreed; } if (temp_allocs > 0 || temp_bytes > 0) { printf("Warning: memory type %s leaked memory on destroy " "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, temp_allocs, temp_bytes); } uma_zfree(mt_zone, mtip); } +struct malloc_type * +malloc_desc2type(const char *desc) +{ + struct malloc_type *mtp; + + mtx_assert(&malloc_mtx, MA_OWNED); + for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { + if (strcmp(mtp->ks_shortdesc, desc) == 0) + return (mtp); + } + return (NULL); +} + static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS) { struct malloc_type_stats mts_local, *mtsp; struct malloc_type_internal *mtip; struct malloc_type *mtp; struct sbuf sbuf; long temp_allocs, temp_bytes; int linesize = 128; int bufsize; int first; int error; char *buf; int cnt; int i; cnt = 0; /* Guess at how much room is needed. */ mtx_lock(&malloc_mtx); cnt = kmemcount; mtx_unlock(&malloc_mtx); bufsize = linesize * (cnt + 1); buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN); mtx_lock(&malloc_mtx); sbuf_printf(&sbuf, "\n Type InUse MemUse HighUse Requests Size(s)\n"); for (mtp = kmemstatistics; cnt != 0 && mtp != NULL; mtp = mtp->ks_next, cnt--) { mtip = mtp->ks_handle; bzero(&mts_local, sizeof(mts_local)); for (i = 0; i < MAXCPU; i++) { mtsp = &mtip->mti_stats[i]; mts_local.mts_memalloced += mtsp->mts_memalloced; mts_local.mts_memfreed += mtsp->mts_memfreed; mts_local.mts_numallocs += mtsp->mts_numallocs; mts_local.mts_numfrees += mtsp->mts_numfrees; mts_local.mts_size |= mtsp->mts_size; } if (mts_local.mts_numallocs == 0) continue; /* * Due to races in per-CPU statistics gather, it's possible to * get a slightly negative number here. If we do, approximate * with 0. */ if (mts_local.mts_numallocs > mts_local.mts_numfrees) temp_allocs = mts_local.mts_numallocs - mts_local.mts_numfrees; else temp_allocs = 0; /* * Ditto for bytes allocated. */ if (mts_local.mts_memalloced > mts_local.mts_memfreed) temp_bytes = mts_local.mts_memalloced - mts_local.mts_memfreed; else temp_bytes = 0; /* * High-waterwark is no longer easily available, so we just * print '-' for that column. */ sbuf_printf(&sbuf, "%13s%6lu%6luK -%9llu", mtp->ks_shortdesc, temp_allocs, (temp_bytes + 1023) / 1024, (unsigned long long)mts_local.mts_numallocs); first = 1; for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1; i++) { if (mts_local.mts_size & (1 << i)) { if (first) sbuf_printf(&sbuf, " "); else sbuf_printf(&sbuf, ","); sbuf_printf(&sbuf, "%s", kmemzones[i].kz_name); first = 0; } } sbuf_printf(&sbuf, "\n"); } sbuf_finish(&sbuf); mtx_unlock(&malloc_mtx); error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); sbuf_delete(&sbuf); free(buf, M_TEMP); return (error); } SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD, NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats"); static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) { struct malloc_type_stream_header mtsh; struct malloc_type_internal *mtip; struct malloc_type_header mth; struct malloc_type *mtp; int buflen, count, error, i; struct sbuf sbuf; char *buffer; mtx_lock(&malloc_mtx); restart: mtx_assert(&malloc_mtx, MA_OWNED); count = kmemcount; mtx_unlock(&malloc_mtx); buflen = sizeof(mtsh) + count * (sizeof(mth) + sizeof(struct malloc_type_stats) * MAXCPU) + 1; buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); mtx_lock(&malloc_mtx); if (count < kmemcount) { free(buffer, M_TEMP); goto restart; } sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN); /* * Insert stream header. */ bzero(&mtsh, sizeof(mtsh)); mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; mtsh.mtsh_maxcpus = MAXCPU; mtsh.mtsh_count = kmemcount; if (sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)) < 0) { mtx_unlock(&malloc_mtx); error = ENOMEM; goto out; } /* * Insert alternating sequence of type headers and type statistics. */ for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { mtip = (struct malloc_type_internal *)mtp->ks_handle; /* * Insert type header. */ bzero(&mth, sizeof(mth)); strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); if (sbuf_bcat(&sbuf, &mth, sizeof(mth)) < 0) { mtx_unlock(&malloc_mtx); error = ENOMEM; goto out; } /* * Insert type statistics for each CPU. */ for (i = 0; i < MAXCPU; i++) { if (sbuf_bcat(&sbuf, &mtip->mti_stats[i], sizeof(mtip->mti_stats[i])) < 0) { mtx_unlock(&malloc_mtx); error = ENOMEM; goto out; } } } mtx_unlock(&malloc_mtx); sbuf_finish(&sbuf); error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); out: sbuf_delete(&sbuf); free(buffer, M_TEMP); return (error); } SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats", "Return malloc types"); SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, "Count of kernel malloc types"); #ifdef DDB DB_SHOW_COMMAND(malloc, db_show_malloc) { struct malloc_type_internal *mtip; struct malloc_type *mtp; u_int64_t allocs, frees; int i; db_printf("%18s %12s %12s %12s\n", "Type", "Allocs", "Frees", "Used"); for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { mtip = (struct malloc_type_internal *)mtp->ks_handle; allocs = 0; frees = 0; for (i = 0; i < MAXCPU; i++) { allocs += mtip->mti_stats[i].mts_numallocs; frees += mtip->mti_stats[i].mts_numfrees; } db_printf("%18s %12ju %12ju %12ju\n", mtp->ks_shortdesc, allocs, frees, allocs - frees); } } #endif #ifdef MALLOC_PROFILE static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) { int linesize = 64; struct sbuf sbuf; uint64_t count; uint64_t waste; uint64_t mem; int bufsize; int error; char *buf; int rsize; int size; int i; bufsize = linesize * (KMEM_ZSIZE + 1); bufsize += 128; /* For the stats line */ bufsize += 128; /* For the banner line */ waste = 0; mem = 0; buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN); sbuf_printf(&sbuf, "\n Size Requests Real Size\n"); for (i = 0; i < KMEM_ZSIZE; i++) { size = i << KMEM_ZSHIFT; rsize = kmemzones[kmemsize[i]].kz_size; count = (long long unsigned)krequests[i]; sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, (unsigned long long)count, rsize); if ((rsize * count) > (size * count)) waste += (rsize * count) - (size * count); mem += (rsize * count); } sbuf_printf(&sbuf, "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", (unsigned long long)mem, (unsigned long long)waste); sbuf_finish(&sbuf); error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); sbuf_delete(&sbuf); free(buf, M_TEMP); return (error); } SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); #endif /* MALLOC_PROFILE */ diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h index f1bdedb70642..e59b9ac64560 100644 --- a/sys/sys/malloc.h +++ b/sys/sys/malloc.h @@ -1,194 +1,196 @@ /*- * Copyright (c) 1987, 1993 * The Regents of the University of California. * Copyright (c) 2005 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)malloc.h 8.5 (Berkeley) 5/3/95 * $FreeBSD$ */ #ifndef _SYS_MALLOC_H_ #define _SYS_MALLOC_H_ #include #include #include #include #define MINALLOCSIZE UMA_SMALLEST_UNIT /* * flags to malloc. */ #define M_NOWAIT 0x0001 /* do not block */ #define M_WAITOK 0x0002 /* ok to block */ #define M_ZERO 0x0100 /* bzero the allocation */ #define M_NOVM 0x0200 /* don't ask VM for pages */ #define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */ #define M_MAGIC 877983977 /* time when first defined :-) */ /* * Two malloc type structures are present: malloc_type, which is used by a * type owner to declare the type, and malloc_type_internal, which holds * malloc-owned statistics and other ABI-sensitive fields, such as the set of * malloc statistics indexed by the compile-time MAXCPU constant. * Applications should avoid introducing dependence on the allocator private * data layout and size. * * The malloc_type ks_next field is protected by malloc_mtx. Other fields in * malloc_type are static after initialization so unsynchronized. * * Statistics in malloc_type_stats are written only when holding a critical * section and running on the CPU associated with the index into the stat * array, but read lock-free resulting in possible (minor) races, which the * monitoring app should take into account. */ struct malloc_type_stats { uint64_t mts_memalloced; /* Bytes allocated on CPU. */ uint64_t mts_memfreed; /* Bytes freed on CPU. */ uint64_t mts_numallocs; /* Number of allocates on CPU. */ uint64_t mts_numfrees; /* number of frees on CPU. */ uint64_t mts_size; /* Bitmask of sizes allocated on CPU. */ uint64_t _mts_reserved1; /* Reserved field. */ uint64_t _mts_reserved2; /* Reserved field. */ uint64_t _mts_reserved3; /* Reserved field. */ }; struct malloc_type_internal { struct malloc_type_stats mti_stats[MAXCPU]; }; /* * ABI-compatible version of the old 'struct malloc_type', only all stats are * now malloc-managed in malloc-owned memory rather than in caller memory, so * as to avoid ABI issues. The ks_next pointer is reused as a pointer to the * internal data handle. */ struct malloc_type { struct malloc_type *ks_next; /* Next in global chain. */ u_long _ks_memuse; /* No longer used. */ u_long _ks_size; /* No longer used. */ u_long _ks_inuse; /* No longer used. */ uint64_t _ks_calls; /* No longer used. */ u_long _ks_maxused; /* No longer used. */ u_long ks_magic; /* Detect programmer error. */ const char *ks_shortdesc; /* Printable type name. */ /* * struct malloc_type was terminated with a struct mtx, which is no * longer required. For ABI reasons, continue to flesh out the full * size of the old structure, but reuse the _lo_class field for our * internal data handle. */ void *ks_handle; /* Priv. data, was lo_class. */ const char *_lo_name; const char *_lo_type; u_int _lo_flags; void *_lo_list_next; struct witness *_lo_witness; uintptr_t _mtx_lock; u_int _mtx_recurse; }; /* * Statistics structure headers for user space. The kern.malloc sysctl * exposes a structure stream consisting of a stream header, then a series of * malloc type headers and statistics structures (quantity maxcpus). For * convenience, the kernel will provide the current value of maxcpus at the * head of the stream. */ #define MALLOC_TYPE_STREAM_VERSION 0x00000001 struct malloc_type_stream_header { uint32_t mtsh_version; /* Stream format version. */ uint32_t mtsh_maxcpus; /* Value of MAXCPU for stream. */ uint32_t mtsh_count; /* Number of records. */ uint32_t _mtsh_pad; /* Pad/reserved field. */ }; #define MALLOC_MAX_NAME 32 struct malloc_type_header { char mth_name[MALLOC_MAX_NAME]; }; #ifdef _KERNEL #define MALLOC_DEFINE(type, shortdesc, longdesc) \ struct malloc_type type[1] = { \ { NULL, 0, 0, 0, 0, 0, M_MAGIC, shortdesc, NULL, NULL, \ NULL, 0, NULL, NULL, 0, 0 } \ }; \ SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_SECOND, malloc_init, \ type); \ SYSUNINIT(type##_uninit, SI_SUB_KMEM, SI_ORDER_ANY, \ malloc_uninit, type) #define MALLOC_DECLARE(type) \ extern struct malloc_type type[1] MALLOC_DECLARE(M_CACHE); MALLOC_DECLARE(M_DEVBUF); MALLOC_DECLARE(M_TEMP); MALLOC_DECLARE(M_IP6OPT); /* for INET6 */ MALLOC_DECLARE(M_IP6NDP); /* for INET6 */ /* * Deprecated macro versions of not-quite-malloc() and free(). */ #define MALLOC(space, cast, size, type, flags) \ ((space) = (cast)malloc((u_long)(size), (type), (flags))) #define FREE(addr, type) free((addr), (type)) /* * XXX this should be declared in , but that tends to fail * because is included in a header before the source file * has a chance to include to get MALLOC_DECLARE() defined. */ MALLOC_DECLARE(M_IOV); extern struct mtx malloc_mtx; /* XXX struct malloc_type is unused for contig*(). */ void contigfree(void *addr, unsigned long size, struct malloc_type *type); void *contigmalloc(unsigned long size, struct malloc_type *type, int flags, vm_paddr_t low, vm_paddr_t high, unsigned long alignment, unsigned long boundary); void free(void *addr, struct malloc_type *type); void *malloc(unsigned long size, struct malloc_type *type, int flags); void malloc_init(void *); int malloc_last_fail(void); void malloc_type_allocated(struct malloc_type *type, unsigned long size); void malloc_type_freed(struct malloc_type *type, unsigned long size); void malloc_uninit(void *); void *realloc(void *addr, unsigned long size, struct malloc_type *type, int flags); void *reallocf(void *addr, unsigned long size, struct malloc_type *type, int flags); + +struct malloc_type *malloc_desc2type(const char *desc); #endif /* _KERNEL */ #endif /* !_SYS_MALLOC_H_ */ diff --git a/sys/vm/memguard.c b/sys/vm/memguard.c index 2140a0bf08b2..9a4f98b17ab7 100644 --- a/sys/vm/memguard.c +++ b/sys/vm/memguard.c @@ -1,315 +1,405 @@ /* * Copyright (c) 2005, * Bosko Milekic . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * MemGuard is a simple replacement allocator for debugging only * which provides ElectricFence-style memory barrier protection on * objects being allocated, and is used to detect tampering-after-free * scenarios. * * See the memguard(9) man page for more information on using MemGuard. */ #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include /* * The maximum number of pages allowed per allocation. If you're using * MemGuard to override very large items (> MAX_PAGES_PER_ITEM in size), * you need to increase MAX_PAGES_PER_ITEM. */ #define MAX_PAGES_PER_ITEM 64 +SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data"); +/* + * The vm_memguard_divisor variable controls how much of kmem_map should be + * reserved for MemGuard. + */ +u_int vm_memguard_divisor; +SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RD, &vm_memguard_divisor, + 0, "(kmem_size/memguard_divisor) == memguard submap size"); + +/* + * Short description (ks_shortdesc) of memory type to monitor. + */ +static char vm_memguard_desc[128] = ""; +static struct malloc_type *vm_memguard_mtype = NULL; +TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc)); +static int +memguard_sysctl_desc(SYSCTL_HANDLER_ARGS) +{ + struct malloc_type_internal *mtip; + struct malloc_type_stats *mtsp; + struct malloc_type *mtp; + char desc[128]; + long bytes; + int error, i; + + strlcpy(desc, vm_memguard_desc, sizeof(desc)); + error = sysctl_handle_string(oidp, desc, sizeof(desc), req); + if (error != 0 || req->newptr == NULL) + return (error); + + /* + * We can change memory type when no memory has been allocated for it + * or when there is no such memory type yet (ie. it will be loaded with + * kernel module). + */ + bytes = 0; + mtx_lock(&malloc_mtx); + mtp = malloc_desc2type(desc); + if (mtp != NULL) { + mtip = mtp->ks_handle; + for (i = 0; i < MAXCPU; i++) { + mtsp = &mtip->mti_stats[i]; + bytes += mtsp->mts_memalloced; + bytes -= mtsp->mts_memfreed; + } + } + if (bytes > 0) + error = EBUSY; + else { + /* + * If mtp is NULL, it will be initialized in memguard_cmp(). + */ + vm_memguard_mtype = mtp; + strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc)); + } + mtx_unlock(&malloc_mtx); + return (error); +} +SYSCTL_PROC(_vm_memguard, OID_AUTO, desc, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, + memguard_sysctl_desc, "A", "Short description of memory type to monitor"); + /* * Global MemGuard data. */ static vm_map_t memguard_map; static unsigned long memguard_mapsize; static unsigned long memguard_mapused; struct memguard_entry { STAILQ_ENTRY(memguard_entry) entries; void *ptr; }; static struct memguard_fifo { struct memguard_entry *stqh_first; struct memguard_entry **stqh_last; int index; } memguard_fifo_pool[MAX_PAGES_PER_ITEM]; /* * Local prototypes. */ static void memguard_guard(void *addr, int numpgs); static void memguard_unguard(void *addr, int numpgs); static struct memguard_fifo *vtomgfifo(vm_offset_t va); static void vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo); static void vclrmgfifo(vm_offset_t va); /* * Local macros. MemGuard data is global, so replace these with whatever * your system uses to protect global data (if it is kernel-level * parallelized). This is for porting among BSDs. */ #define MEMGUARD_CRIT_SECTION_DECLARE static struct mtx memguard_mtx #define MEMGUARD_CRIT_SECTION_INIT \ mtx_init(&memguard_mtx, "MemGuard mtx", NULL, MTX_DEF) #define MEMGUARD_CRIT_SECTION_ENTER mtx_lock(&memguard_mtx) #define MEMGUARD_CRIT_SECTION_EXIT mtx_unlock(&memguard_mtx) MEMGUARD_CRIT_SECTION_DECLARE; /* * Initialize the MemGuard mock allocator. All objects from MemGuard come * out of a single VM map (contiguous chunk of address space). */ void memguard_init(vm_map_t parent_map, unsigned long size) { char *base, *limit; int i; /* size must be multiple of PAGE_SIZE */ size /= PAGE_SIZE; size++; size *= PAGE_SIZE; memguard_map = kmem_suballoc(parent_map, (vm_offset_t *)&base, (vm_offset_t *)&limit, (vm_size_t)size); memguard_map->system_map = 1; memguard_mapsize = size; memguard_mapused = 0; MEMGUARD_CRIT_SECTION_INIT; MEMGUARD_CRIT_SECTION_ENTER; for (i = 0; i < MAX_PAGES_PER_ITEM; i++) { STAILQ_INIT(&memguard_fifo_pool[i]); memguard_fifo_pool[i].index = i; } MEMGUARD_CRIT_SECTION_EXIT; printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n"); printf("\tMEMGUARD map base: %p\n", base); printf("\tMEMGUARD map limit: %p\n", limit); printf("\tMEMGUARD map size: %ld (Bytes)\n", size); } /* * Allocate a single object of specified size with specified flags (either * M_WAITOK or M_NOWAIT). */ void * memguard_alloc(unsigned long size, int flags) { void *obj; struct memguard_entry *e = NULL; int numpgs; numpgs = size / PAGE_SIZE; if ((size % PAGE_SIZE) != 0) numpgs++; if (numpgs > MAX_PAGES_PER_ITEM) panic("MEMGUARD: You must increase MAX_PAGES_PER_ITEM " \ "in memguard.c (requested: %d pages)", numpgs); if (numpgs == 0) return NULL; /* * If we haven't exhausted the memguard_map yet, allocate from * it and grab a new page, even if we have recycled pages in our * FIFO. This is because we wish to allow recycled pages to live * guarded in the FIFO for as long as possible in order to catch * even very late tamper-after-frees, even though it means that * we end up wasting more memory, this is only a DEBUGGING allocator * after all. */ MEMGUARD_CRIT_SECTION_ENTER; if (memguard_mapused >= memguard_mapsize) { e = STAILQ_FIRST(&memguard_fifo_pool[numpgs - 1]); if (e != NULL) { STAILQ_REMOVE(&memguard_fifo_pool[numpgs - 1], e, memguard_entry, entries); MEMGUARD_CRIT_SECTION_EXIT; obj = e->ptr; free(e, M_TEMP); memguard_unguard(obj, numpgs); if (flags & M_ZERO) bzero(obj, PAGE_SIZE * numpgs); return obj; } MEMGUARD_CRIT_SECTION_EXIT; if (flags & M_WAITOK) panic("MEMGUARD: Failed with M_WAITOK: " \ "memguard_map too small"); return NULL; } memguard_mapused += (PAGE_SIZE * numpgs); MEMGUARD_CRIT_SECTION_EXIT; obj = (void *)kmem_malloc(memguard_map, PAGE_SIZE * numpgs, flags); if (obj != NULL) { vsetmgfifo((vm_offset_t)obj, &memguard_fifo_pool[numpgs - 1]); if (flags & M_ZERO) bzero(obj, PAGE_SIZE * numpgs); } else { MEMGUARD_CRIT_SECTION_ENTER; memguard_mapused -= (PAGE_SIZE * numpgs); MEMGUARD_CRIT_SECTION_EXIT; } return obj; } /* * Free specified single object. */ void memguard_free(void *addr) { struct memguard_entry *e; struct memguard_fifo *mgfifo; int idx; int *temp; addr = (void *)trunc_page((unsigned long)addr); /* * Page should not be guarded by now, so force a write. * The purpose of this is to increase the likelihood of catching a * double-free, but not necessarily a tamper-after-free (the second * thread freeing might not write before freeing, so this forces it * to and, subsequently, trigger a fault). */ temp = (int *)((unsigned long)addr + (PAGE_SIZE/2)); /* in page */ *temp = 0xd34dc0d3; mgfifo = vtomgfifo((vm_offset_t)addr); idx = mgfifo->index; memguard_guard(addr, idx + 1); e = malloc(sizeof(struct memguard_entry), M_TEMP, M_NOWAIT); if (e == NULL) { MEMGUARD_CRIT_SECTION_ENTER; memguard_mapused -= (PAGE_SIZE * (idx + 1)); MEMGUARD_CRIT_SECTION_EXIT; memguard_unguard(addr, idx + 1); /* just in case */ vclrmgfifo((vm_offset_t)addr); kmem_free(memguard_map, (vm_offset_t)addr, PAGE_SIZE * (idx + 1)); return; } e->ptr = addr; MEMGUARD_CRIT_SECTION_ENTER; STAILQ_INSERT_TAIL(mgfifo, e, entries); MEMGUARD_CRIT_SECTION_EXIT; } +int +memguard_cmp(struct malloc_type *mtp) +{ + +#if 1 + /* + * The safest way of comparsion is to always compare short description + * string of memory type, but it is also the slowest way. + */ + return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0); +#else + /* + * If we compare pointers, there are two possible problems: + * 1. Memory type was unloaded and new memory type was allocated at the + * same address. + * 2. Memory type was unloaded and loaded again, but allocated at a + * different address. + */ + if (vm_memguard_mtype != NULL) + return (mtp == vm_memguard_mtype); + if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) { + vm_memguard_mtype = mtp; + return (1); + } + return (0); +#endif +} + /* * Guard a page containing specified object (make it read-only so that * future writes to it fail). */ static void memguard_guard(void *addr, int numpgs) { void *a = (void *)trunc_page((unsigned long)addr); if (vm_map_protect(memguard_map, (vm_offset_t)a, (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)), VM_PROT_READ, FALSE) != KERN_SUCCESS) panic("MEMGUARD: Unable to guard page!"); } /* * Unguard a page containing specified object (make it read-and-write to * allow full data access). */ static void memguard_unguard(void *addr, int numpgs) { void *a = (void *)trunc_page((unsigned long)addr); if (vm_map_protect(memguard_map, (vm_offset_t)a, (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)), VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) panic("MEMGUARD: Unable to unguard page!"); } /* * vtomgfifo() converts a virtual address of the first page allocated for * an item to a memguard_fifo_pool reference for the corresponding item's * size. * * vsetmgfifo() sets a reference in an underlying page for the specified * virtual address to an appropriate memguard_fifo_pool. * * These routines are very similar to those defined by UMA in uma_int.h. * The difference is that these routines store the mgfifo in one of the * page's fields that is unused when the page is wired rather than the * object field, which is used. */ static struct memguard_fifo * vtomgfifo(vm_offset_t va) { vm_page_t p; struct memguard_fifo *mgfifo; p = PHYS_TO_VM_PAGE(pmap_kextract(va)); KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, ("MEMGUARD: Expected wired page in vtomgfifo!")); mgfifo = (struct memguard_fifo *)p->pageq.tqe_next; return mgfifo; } static void vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo) { vm_page_t p; p = PHYS_TO_VM_PAGE(pmap_kextract(va)); KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, ("MEMGUARD: Expected wired page in vsetmgfifo!")); p->pageq.tqe_next = (vm_page_t)mgfifo; } static void vclrmgfifo(vm_offset_t va) { vm_page_t p; p = PHYS_TO_VM_PAGE(pmap_kextract(va)); KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, ("MEMGUARD: Expected wired page in vclrmgfifo!")); p->pageq.tqe_next = NULL; } diff --git a/sys/vm/memguard.h b/sys/vm/memguard.h index 10ca96de26ff..34d79cf0f3cb 100644 --- a/sys/vm/memguard.h +++ b/sys/vm/memguard.h @@ -1,31 +1,34 @@ /* * Copyright (c) 2005, * Bosko Milekic . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ +extern u_int vm_memguard_divisor; + void memguard_init(vm_map_t parent_map, unsigned long size); void *memguard_alloc(unsigned long size, int flags); void memguard_free(void *addr); +int memguard_cmp(struct malloc_type *mtp);