Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/vm_kern.c
Show First 20 Lines • Show All 158 Lines • ▼ Show 20 Lines | |||||
* region's starting virtual address. The allocated pages are not | * region's starting virtual address. The allocated pages are not | ||||
* necessarily physically contiguous. If M_ZERO is specified through the | * necessarily physically contiguous. If M_ZERO is specified through the | ||||
* given flags, then the pages are zeroed before they are mapped. | * given flags, then the pages are zeroed before they are mapped. | ||||
*/ | */ | ||||
vm_offset_t | vm_offset_t | ||||
kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, | kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, | ||||
vm_paddr_t high, vm_memattr_t memattr) | vm_paddr_t high, vm_memattr_t memattr) | ||||
{ | { | ||||
vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; | vm_object_t object = kernel_object; | ||||
vm_offset_t addr, i, offset; | vm_offset_t addr, i, offset; | ||||
vm_page_t m; | vm_page_t m; | ||||
int pflags, tries; | int pflags, tries; | ||||
KASSERT(vmem == kernel_arena, | |||||
("kmem_alloc_attr: Only kernel_arena is supported.")); | |||||
size = round_page(size); | size = round_page(size); | ||||
if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) | if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) | ||||
return (0); | return (0); | ||||
offset = addr - VM_MIN_KERNEL_ADDRESS; | offset = addr - VM_MIN_KERNEL_ADDRESS; | ||||
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; | pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; | ||||
pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); | pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); | ||||
pflags |= VM_ALLOC_NOWAIT; | pflags |= VM_ALLOC_NOWAIT; | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
Show All 35 Lines | |||||
* through the given flags, then the pages are zeroed before they are | * through the given flags, then the pages are zeroed before they are | ||||
* mapped. | * mapped. | ||||
*/ | */ | ||||
vm_offset_t | vm_offset_t | ||||
kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low, | kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low, | ||||
vm_paddr_t high, u_long alignment, vm_paddr_t boundary, | vm_paddr_t high, u_long alignment, vm_paddr_t boundary, | ||||
vm_memattr_t memattr) | vm_memattr_t memattr) | ||||
{ | { | ||||
vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; | vm_object_t object = kernel_object; | ||||
vm_offset_t addr, offset, tmp; | vm_offset_t addr, offset, tmp; | ||||
vm_page_t end_m, m; | vm_page_t end_m, m; | ||||
u_long npages; | u_long npages; | ||||
int pflags, tries; | int pflags, tries; | ||||
KASSERT(vmem == kernel_arena, | |||||
("kmem_alloc_contig: Only kernel_arena is supported.")); | |||||
size = round_page(size); | size = round_page(size); | ||||
if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) | if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) | ||||
return (0); | return (0); | ||||
offset = addr - VM_MIN_KERNEL_ADDRESS; | offset = addr - VM_MIN_KERNEL_ADDRESS; | ||||
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; | pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; | ||||
pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); | pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); | ||||
pflags |= VM_ALLOC_NOWAIT; | pflags |= VM_ALLOC_NOWAIT; | ||||
npages = atop(size); | npages = atop(size); | ||||
▲ Show 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | |||||
* Allocate wired-down pages in the kernel's address space. | * Allocate wired-down pages in the kernel's address space. | ||||
*/ | */ | ||||
vm_offset_t | vm_offset_t | ||||
kmem_malloc(struct vmem *vmem, vm_size_t size, int flags) | kmem_malloc(struct vmem *vmem, vm_size_t size, int flags) | ||||
{ | { | ||||
vm_offset_t addr; | vm_offset_t addr; | ||||
int rv; | int rv; | ||||
KASSERT(vmem == kernel_arena, | |||||
("kmem_malloc: Only kernel_arena is supported.")); | |||||
size = round_page(size); | size = round_page(size); | ||||
if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) | if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) | ||||
return (0); | return (0); | ||||
rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object, | rv = kmem_back(kernel_object, addr, size, flags); | ||||
addr, size, flags); | |||||
if (rv != KERN_SUCCESS) { | if (rv != KERN_SUCCESS) { | ||||
vmem_free(vmem, addr, size); | vmem_free(vmem, addr, size); | ||||
return (0); | return (0); | ||||
} | } | ||||
return (addr); | return (addr); | ||||
} | } | ||||
/* | /* | ||||
* kmem_back: | * kmem_back: | ||||
* | * | ||||
* Allocate physical pages for the specified virtual address range. | * Allocate physical pages for the specified virtual address range. | ||||
*/ | */ | ||||
int | int | ||||
kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) | kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) | ||||
{ | { | ||||
vm_offset_t offset, i; | vm_offset_t offset, i; | ||||
vm_page_t m, mpred; | vm_page_t m, mpred; | ||||
int pflags; | int pflags; | ||||
KASSERT(object == kmem_object || object == kernel_object, | KASSERT(object == kernel_object, | ||||
("kmem_back: only supports kernel objects.")); | ("kmem_back: only supports kernel object.")); | ||||
offset = addr - VM_MIN_KERNEL_ADDRESS; | offset = addr - VM_MIN_KERNEL_ADDRESS; | ||||
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; | pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; | ||||
pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); | pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); | ||||
if (flags & M_WAITOK) | if (flags & M_WAITOK) | ||||
pflags |= VM_ALLOC_WAITFAIL; | pflags |= VM_ALLOC_WAITFAIL; | ||||
i = 0; | i = 0; | ||||
Show All 39 Lines | |||||
* that is being unmapped. | * that is being unmapped. | ||||
*/ | */ | ||||
void | void | ||||
kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) | kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) | ||||
{ | { | ||||
vm_page_t m, next; | vm_page_t m, next; | ||||
vm_offset_t end, offset; | vm_offset_t end, offset; | ||||
KASSERT(object == kmem_object || object == kernel_object, | KASSERT(object == kernel_object, | ||||
("kmem_unback: only supports kernel objects.")); | ("kmem_unback: only supports kernel object.")); | ||||
pmap_remove(kernel_pmap, addr, addr + size); | pmap_remove(kernel_pmap, addr, addr + size); | ||||
offset = addr - VM_MIN_KERNEL_ADDRESS; | offset = addr - VM_MIN_KERNEL_ADDRESS; | ||||
end = offset + size; | end = offset + size; | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
for (m = vm_page_lookup(object, atop(offset)); offset < end; | for (m = vm_page_lookup(object, atop(offset)); offset < end; | ||||
offset += PAGE_SIZE, m = next) { | offset += PAGE_SIZE, m = next) { | ||||
next = vm_page_next(m); | next = vm_page_next(m); | ||||
vm_page_unwire(m, PQ_NONE); | vm_page_unwire(m, PQ_NONE); | ||||
vm_page_free(m); | vm_page_free(m); | ||||
} | } | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
} | } | ||||
/* | /* | ||||
* kmem_free: | * kmem_free: | ||||
* | * | ||||
* Free memory allocated with kmem_malloc. The size must match the | * Free memory allocated with kmem_malloc. The size must match the | ||||
* original allocation. | * original allocation. | ||||
*/ | */ | ||||
void | void | ||||
kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size) | kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size) | ||||
{ | { | ||||
KASSERT(vmem == kernel_arena, | |||||
("kmem_free: Only kernel_arena is supported.")); | |||||
size = round_page(size); | size = round_page(size); | ||||
kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object, | kmem_unback(kernel_object, addr, size); | ||||
addr, size); | |||||
vmem_free(vmem, addr, size); | vmem_free(vmem, addr, size); | ||||
} | } | ||||
/* | /* | ||||
* kmap_alloc_wait: | * kmap_alloc_wait: | ||||
* | * | ||||
* Allocates pageable memory from a sub-map of the kernel. If the submap | * Allocates pageable memory from a sub-map of the kernel. If the submap | ||||
* has no room, the caller sleeps waiting for more memory in the submap. | * has no room, the caller sleeps waiting for more memory in the submap. | ||||
▲ Show 20 Lines • Show All 130 Lines • Show Last 20 Lines |