Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_kern.c
Show First 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | |||||
#include <vm/vm_page.h> | #include <vm/vm_page.h> | ||||
#include <vm/vm_pageout.h> | #include <vm/vm_pageout.h> | ||||
#include <vm/vm_phys.h> | #include <vm/vm_phys.h> | ||||
#include <vm/vm_pagequeue.h> | #include <vm/vm_pagequeue.h> | ||||
#include <vm/vm_radix.h> | #include <vm/vm_radix.h> | ||||
#include <vm/vm_extern.h> | #include <vm/vm_extern.h> | ||||
#include <vm/uma.h> | #include <vm/uma.h> | ||||
vm_map_t kernel_map; | struct vm_map kernel_map_store; | ||||
vm_map_t exec_map; | struct vm_map exec_map_store; | ||||
vm_map_t pipe_map; | struct vm_map pipe_map_store; | ||||
const void *zero_region; | const void *zero_region; | ||||
CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); | CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); | ||||
/* NB: Used by kernel debuggers. */ | /* NB: Used by kernel debuggers. */ | ||||
const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS; | const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS; | ||||
u_int exec_map_entry_size; | u_int exec_map_entry_size; | ||||
▲ Show 20 Lines • Show All 244 Lines • ▼ Show 20 Lines | do { | ||||
if (addr != 0) | if (addr != 0) | ||||
break; | break; | ||||
} while (vm_domainset_iter_policy(&di, &domain) == 0); | } while (vm_domainset_iter_policy(&di, &domain) == 0); | ||||
return (addr); | return (addr); | ||||
} | } | ||||
/* | /* | ||||
* kmem_suballoc: | * kmem_subinit: | ||||
* | * | ||||
* Allocates a map to manage a subrange | * Initializes a map to manage a subrange | ||||
* of the kernel virtual address space. | * of the kernel virtual address space. | ||||
* | * | ||||
* Arguments are as follows: | * Arguments are as follows: | ||||
* | * | ||||
* parent Map to take range from | * parent Map to take range from | ||||
* min, max Returned endpoints of map | * min, max Returned endpoints of map | ||||
* size Size of range to find | * size Size of range to find | ||||
* superpage_align Request that min is superpage aligned | * superpage_align Request that min is superpage aligned | ||||
*/ | */ | ||||
vm_map_t | void | ||||
kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, | kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max, | ||||
vm_size_t size, boolean_t superpage_align) | vm_size_t size, bool superpage_align) | ||||
{ | { | ||||
int ret; | int ret; | ||||
vm_map_t result; | |||||
size = round_page(size); | size = round_page(size); | ||||
*min = vm_map_min(parent); | *min = vm_map_min(parent); | ||||
ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ? | ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ? | ||||
VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, | VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, | ||||
MAP_ACC_NO_CHARGE); | MAP_ACC_NO_CHARGE); | ||||
if (ret != KERN_SUCCESS) | if (ret != KERN_SUCCESS) | ||||
panic("kmem_suballoc: bad status return of %d", ret); | panic("kmem_subinit: bad status return of %d", ret); | ||||
*max = *min + size; | *max = *min + size; | ||||
result = vm_map_create(vm_map_pmap(parent), *min, *max); | vm_map_init(map, vm_map_pmap(parent), *min, *max); | ||||
if (result == NULL) | if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS) | ||||
panic("kmem_suballoc: cannot create submap"); | panic("kmem_subinit: unable to change range to submap"); | ||||
if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) | |||||
panic("kmem_suballoc: unable to change range to submap"); | |||||
return (result); | |||||
} | } | ||||
/* | /* | ||||
* kmem_malloc_domain: | * kmem_malloc_domain: | ||||
* | * | ||||
* Allocate wired-down pages in the kernel's address space. | * Allocate wired-down pages in the kernel's address space. | ||||
*/ | */ | ||||
static vm_offset_t | static vm_offset_t | ||||
▲ Show 20 Lines • Show All 340 Lines • ▼ Show 20 Lines | |||||
* data, bss, and all space allocated thus far (`boostrap' data). The | * data, bss, and all space allocated thus far (`boostrap' data). The | ||||
* new map will thus map the range between VM_MIN_KERNEL_ADDRESS and | * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and | ||||
* `start' as allocated, and the range between `start' and `end' as free. | * `start' as allocated, and the range between `start' and `end' as free. | ||||
* Create the kernel vmem arena and its per-domain children. | * Create the kernel vmem arena and its per-domain children. | ||||
*/ | */ | ||||
void | void | ||||
kmem_init(vm_offset_t start, vm_offset_t end) | kmem_init(vm_offset_t start, vm_offset_t end) | ||||
{ | { | ||||
vm_map_t m; | |||||
vm_size_t quantum; | vm_size_t quantum; | ||||
int domain; | int domain; | ||||
m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); | vm_map_init(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); | ||||
m->system_map = 1; | kernel_map->system_map = 1; | ||||
vm_map_lock(m); | vm_map_lock(kernel_map); | ||||
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */ | /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ | ||||
kernel_map = m; | (void)vm_map_insert(kernel_map, NULL, 0, | ||||
(void)vm_map_insert(m, NULL, 0, | |||||
#ifdef __amd64__ | #ifdef __amd64__ | ||||
KERNBASE, | KERNBASE, | ||||
#else | #else | ||||
VM_MIN_KERNEL_ADDRESS, | VM_MIN_KERNEL_ADDRESS, | ||||
#endif | #endif | ||||
start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); | start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); | ||||
/* ... and ending with the completion of the above `insert' */ | /* ... and ending with the completion of the above `insert' */ | ||||
#ifdef __amd64__ | #ifdef __amd64__ | ||||
/* | /* | ||||
* Mark KVA used for the page array as allocated. Other platforms | * Mark KVA used for the page array as allocated. Other platforms | ||||
* that handle vm_page_array allocation can simply adjust virtual_avail | * that handle vm_page_array allocation can simply adjust virtual_avail | ||||
* instead. | * instead. | ||||
*/ | */ | ||||
(void)vm_map_insert(m, NULL, 0, (vm_offset_t)vm_page_array, | (void)vm_map_insert(kernel_map, NULL, 0, (vm_offset_t)vm_page_array, | ||||
(vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size * | (vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size * | ||||
sizeof(struct vm_page)), | sizeof(struct vm_page)), | ||||
VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); | VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); | ||||
#endif | #endif | ||||
vm_map_unlock(m); | vm_map_unlock(kernel_map); | ||||
/* | /* | ||||
* Use a large import quantum on NUMA systems. This helps minimize | * Use a large import quantum on NUMA systems. This helps minimize | ||||
* interleaving of superpages, reducing internal fragmentation within | * interleaving of superpages, reducing internal fragmentation within | ||||
* the per-domain arenas. | * the per-domain arenas. | ||||
*/ | */ | ||||
if (vm_ndomains > 1 && PMAP_HAS_DMAP) | if (vm_ndomains > 1 && PMAP_HAS_DMAP) | ||||
quantum = KVA_NUMA_IMPORT_QUANTUM; | quantum = KVA_NUMA_IMPORT_QUANTUM; | ||||
▲ Show 20 Lines • Show All 115 Lines • Show Last 20 Lines |