Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_mmap.c
Show First 20 Lines • Show All 336 Lines • ▼ Show 20 Lines | if (flags & MAP_FIXED) { | ||||
* as the file offset taken modulo PAGE_SIZE, so it | * as the file offset taken modulo PAGE_SIZE, so it | ||||
* should be aligned after adjustment by pageoff. | * should be aligned after adjustment by pageoff. | ||||
*/ | */ | ||||
addr -= pageoff; | addr -= pageoff; | ||||
if (addr & PAGE_MASK) | if (addr & PAGE_MASK) | ||||
return (EINVAL); | return (EINVAL); | ||||
/* Address range must be all in user VM space. */ | /* Address range must be all in user VM space. */ | ||||
if (addr < vm_map_min(&vms->vm_map) || | if (!vm_map_check_range(&vms->vm_map, addr, addr + size)) | ||||
addr + size > vm_map_max(&vms->vm_map)) | |||||
return (EINVAL); | return (EINVAL); | ||||
if (addr + size < addr) | |||||
return (EINVAL); | |||||
#ifdef MAP_32BIT | #ifdef MAP_32BIT | ||||
if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR) | if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR) | ||||
return (EINVAL); | return (EINVAL); | ||||
} else if (flags & MAP_32BIT) { | } else if (flags & MAP_32BIT) { | ||||
/* | /* | ||||
* For MAP_32BIT, override the hint if it is too high and | * For MAP_32BIT, override the hint if it is too high and | ||||
* do not bother moving the mapping past the heap (since | * do not bother moving the mapping past the heap (since | ||||
* the heap is usually above 2GB). | * the heap is usually above 2GB). | ||||
▲ Show 20 Lines • Show All 214 Lines • ▼ Show 20 Lines | |||||
int | int | ||||
kern_munmap(struct thread *td, uintptr_t addr0, size_t size) | kern_munmap(struct thread *td, uintptr_t addr0, size_t size) | ||||
{ | { | ||||
#ifdef HWPMC_HOOKS | #ifdef HWPMC_HOOKS | ||||
struct pmckern_map_out pkm; | struct pmckern_map_out pkm; | ||||
vm_map_entry_t entry; | vm_map_entry_t entry; | ||||
bool pmc_handled; | bool pmc_handled; | ||||
#endif | #endif | ||||
vm_offset_t addr; | vm_offset_t addr, end; | ||||
vm_size_t pageoff; | vm_size_t pageoff; | ||||
vm_map_t map; | vm_map_t map; | ||||
if (size == 0) | if (size == 0) | ||||
return (EINVAL); | return (EINVAL); | ||||
addr = addr0; | addr = addr0; | ||||
pageoff = (addr & PAGE_MASK); | pageoff = (addr & PAGE_MASK); | ||||
addr -= pageoff; | addr -= pageoff; | ||||
size += pageoff; | size += pageoff; | ||||
size = (vm_size_t) round_page(size); | size = (vm_size_t) round_page(size); | ||||
if (addr + size < addr) | end = addr + size; | ||||
return (EINVAL); | |||||
/* | |||||
* Check for illegal addresses. Watch out for address wrap... | |||||
*/ | |||||
map = &td->td_proc->p_vmspace->vm_map; | map = &td->td_proc->p_vmspace->vm_map; | ||||
if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) | if (!vm_map_check_range(map, addr, end)) | ||||
return (EINVAL); | return (EINVAL); | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
#ifdef HWPMC_HOOKS | #ifdef HWPMC_HOOKS | ||||
pmc_handled = false; | pmc_handled = false; | ||||
if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) { | if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) { | ||||
pmc_handled = true; | pmc_handled = true; | ||||
/* | /* | ||||
* Inform hwpmc if the address range being unmapped contains | * Inform hwpmc if the address range being unmapped contains | ||||
* an executable region. | * an executable region. | ||||
*/ | */ | ||||
pkm.pm_address = (uintptr_t) NULL; | pkm.pm_address = (uintptr_t) NULL; | ||||
if (vm_map_lookup_entry(map, addr, &entry)) { | if (vm_map_lookup_entry(map, addr, &entry)) { | ||||
for (; entry->start < addr + size; | for (; entry->start < end; | ||||
entry = vm_map_entry_succ(entry)) { | entry = vm_map_entry_succ(entry)) { | ||||
if (vm_map_check_protection(map, entry->start, | if (vm_map_check_protection(map, entry->start, | ||||
entry->end, VM_PROT_EXECUTE) == TRUE) { | entry->end, VM_PROT_EXECUTE) == TRUE) { | ||||
pkm.pm_address = (uintptr_t) addr; | pkm.pm_address = (uintptr_t) addr; | ||||
pkm.pm_size = (size_t) size; | pkm.pm_size = (size_t) size; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
} | } | ||||
#endif | #endif | ||||
vm_map_delete(map, addr, addr + size); | vm_map_delete(map, addr, end); | ||||
#ifdef HWPMC_HOOKS | #ifdef HWPMC_HOOKS | ||||
if (__predict_false(pmc_handled)) { | if (__predict_false(pmc_handled)) { | ||||
/* downgrade the lock to prevent a LOR with the pmc-sx lock */ | /* downgrade the lock to prevent a LOR with the pmc-sx lock */ | ||||
vm_map_lock_downgrade(map); | vm_map_lock_downgrade(map); | ||||
if (pkm.pm_address != (uintptr_t) NULL) | if (pkm.pm_address != (uintptr_t) NULL) | ||||
PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm); | PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm); | ||||
vm_map_unlock_read(map); | vm_map_unlock_read(map); | ||||
▲ Show 20 Lines • Show All 134 Lines • ▼ Show 20 Lines | kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav) | ||||
} | } | ||||
/* | /* | ||||
* Check for illegal addresses. Watch out for address wrap... Note | * Check for illegal addresses. Watch out for address wrap... Note | ||||
* that VM_*_ADDRESS are not constants due to casts (argh). | * that VM_*_ADDRESS are not constants due to casts (argh). | ||||
*/ | */ | ||||
map = &td->td_proc->p_vmspace->vm_map; | map = &td->td_proc->p_vmspace->vm_map; | ||||
addr = addr0; | addr = addr0; | ||||
if (addr < vm_map_min(map) || addr + len > vm_map_max(map)) | if (!vm_map_check_range(map, addr, addr + len)) | ||||
return (EINVAL); | |||||
if ((addr + len) < addr) | |||||
return (EINVAL); | return (EINVAL); | ||||
/* | /* | ||||
* Since this routine is only advisory, we default to conservative | * Since this routine is only advisory, we default to conservative | ||||
* behavior. | * behavior. | ||||
*/ | */ | ||||
start = trunc_page(addr); | start = trunc_page(addr); | ||||
end = round_page(addr + len); | end = round_page(addr + len); | ||||
▲ Show 20 Lines • Show All 887 Lines • Show Last 20 Lines |