Index: sys/vm/memguard.c =================================================================== --- sys/vm/memguard.c +++ sys/vm/memguard.c @@ -64,6 +64,13 @@ #include #include +#if VM_NRESERVLEVEL > 0 +#define MEMGUARD_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) +#else +#define MEMGUARD_SHIFT PAGE_SHIFT +#endif +#define MEMGUARD_ALIGN (1 << MEMGUARD_SHIFT) + static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data"); /* * The vm_memguard_divisor variable controls how much of kernel_arena should be @@ -194,6 +201,7 @@ memguard_mapsize = mem_pgs * 2 * PAGE_SIZE; if (km_size + memguard_mapsize > parent_size) memguard_mapsize = 0; + memguard_mapsize = roundup2(memguard_mapsize, MEMGUARD_ALIGN); return (km_size + memguard_mapsize); } @@ -206,7 +214,8 @@ { vm_offset_t base; - vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base); + vmem_xalloc(parent, memguard_mapsize, MEMGUARD_ALIGN, 0, 0, + VMEM_ADDR_MIN, VMEM_ADDR_MAX, M_BESTFIT | M_WAITOK, &base); vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize, PAGE_SIZE, 0, M_WAITOK); memguard_cursor = base; @@ -286,9 +295,10 @@ void * memguard_alloc(unsigned long req_size, int flags) { - vm_offset_t addr, origaddr; + vm_offset_t addr, next, origaddr; u_long size_p, size_v; - int do_guard, rv; + int domain, rv; + bool do_guard; size_p = round_page(req_size); if (size_p == 0) @@ -332,6 +342,7 @@ memguard_cursor, VMEM_ADDR_MAX, M_BESTFIT | M_NOWAIT, &origaddr) == 0) break; + /* * The map has no space. This may be due to * fragmentation, or because the cursor is near the @@ -348,14 +359,38 @@ addr = origaddr; if (do_guard) addr += PAGE_SIZE; - rv = kmem_back(kernel_object, addr, size_p, flags); + + /* + * The kmem_* API uses per-domain vmem arenas to ensure that pages + * backing a large virtual page all come from the same domain. We must + * provide the same guarantee. + */ + domain = 0; +#if VM_NRESERVLEVEL > 0 + if (vm_ndomains > 1) + domain = (addr >> MEMGUARD_SHIFT) % vm_ndomains; +#endif + rv = kmem_back_domain(domain, kernel_object, addr, size_p, flags); if (rv != KERN_SUCCESS) { vmem_xfree(memguard_arena, origaddr, size_v); memguard_fail_pgs++; addr = (vm_offset_t)NULL; +#if VM_NRESERVLEVEL > 0 + /* + * Select a different domain for subsequent allocations by + * advancing the cursor to the next superpage boundary. This + * may be undone by the non-atomic cursor update following a + * concurrent successful allocation. + */ + if (vm_ndomains > 1) { + next = roundup2(memguard_cursor, MEMGUARD_ALIGN); + (void)atomic_cmpset_long(&memguard_cursor, + memguard_cursor, next); + } +#endif goto out; } - memguard_cursor = addr + size_v; + memguard_cursor = origaddr + size_v; *v2sizep(trunc_page(addr)) = req_size; *v2sizev(trunc_page(addr)) = size_v; memguard_succ++;