Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_unix.c
Show First 20 Lines • Show All 89 Lines • ▼ Show 20 Lines | |||||
kern_break(struct thread *td, uintptr_t *addr) | kern_break(struct thread *td, uintptr_t *addr) | ||||
{ | { | ||||
struct vmspace *vm = td->td_proc->p_vmspace; | struct vmspace *vm = td->td_proc->p_vmspace; | ||||
vm_map_t map = &vm->vm_map; | vm_map_t map = &vm->vm_map; | ||||
vm_offset_t new, old, base; | vm_offset_t new, old, base; | ||||
rlim_t datalim, lmemlim, vmemlim; | rlim_t datalim, lmemlim, vmemlim; | ||||
int prot, rv; | int prot, rv; | ||||
int error = 0; | int error = 0; | ||||
boolean_t do_map_wirefuture; | |||||
datalim = lim_cur(td, RLIMIT_DATA); | datalim = lim_cur(td, RLIMIT_DATA); | ||||
lmemlim = lim_cur(td, RLIMIT_MEMLOCK); | lmemlim = lim_cur(td, RLIMIT_MEMLOCK); | ||||
vmemlim = lim_cur(td, RLIMIT_VMEM); | vmemlim = lim_cur(td, RLIMIT_VMEM); | ||||
do_map_wirefuture = FALSE; | |||||
new = round_page(*addr); | new = round_page(*addr); | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
base = round_page((vm_offset_t) vm->vm_daddr); | base = round_page((vm_offset_t) vm->vm_daddr); | ||||
old = base + ctob(vm->vm_dsize); | old = base + ctob(vm->vm_dsize); | ||||
if (new > base) { | if (new > base) { | ||||
/* | /* | ||||
* Check the resource limit, but allow a process to reduce | * Check the resource limit, but allow a process to reduce | ||||
▲ Show 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | if (racct_enable) { | ||||
PROC_UNLOCK(td->td_proc); | PROC_UNLOCK(td->td_proc); | ||||
} | } | ||||
#endif | #endif | ||||
prot = VM_PROT_RW; | prot = VM_PROT_RW; | ||||
#if (defined(COMPAT_FREEBSD32) && defined(__amd64__)) || defined(__i386__) | #if (defined(COMPAT_FREEBSD32) && defined(__amd64__)) || defined(__i386__) | ||||
if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32)) | if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32)) | ||||
prot |= VM_PROT_EXECUTE; | prot |= VM_PROT_EXECUTE; | ||||
#endif | #endif | ||||
rv = vm_map_insert(map, NULL, 0, old, new, prot, VM_PROT_ALL, 0); | rv = vm_map_insert(map, NULL, 0, old, new, prot, VM_PROT_ALL, | ||||
0); | |||||
if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { | |||||
rv = vm_map_wire_locked(map, old, new, | |||||
VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); | |||||
if (rv != KERN_SUCCESS) | |||||
vm_map_delete(map, old, new); | |||||
} | |||||
if (rv != KERN_SUCCESS) { | if (rv != KERN_SUCCESS) { | ||||
#ifdef RACCT | #ifdef RACCT | ||||
if (racct_enable) { | if (racct_enable) { | ||||
PROC_LOCK(td->td_proc); | PROC_LOCK(td->td_proc); | ||||
racct_set_force(td->td_proc, | racct_set_force(td->td_proc, | ||||
RACCT_DATA, old - base); | RACCT_DATA, old - base); | ||||
racct_set_force(td->td_proc, | racct_set_force(td->td_proc, | ||||
RACCT_VMEM, map->size); | RACCT_VMEM, map->size); | ||||
if (!old_mlock && map->flags & MAP_WIREFUTURE) { | if (!old_mlock && map->flags & MAP_WIREFUTURE) { | ||||
racct_set_force(td->td_proc, | racct_set_force(td->td_proc, | ||||
RACCT_MEMLOCK, | RACCT_MEMLOCK, | ||||
ptoa(pmap_wired_count(map->pmap))); | ptoa(pmap_wired_count(map->pmap))); | ||||
} | } | ||||
PROC_UNLOCK(td->td_proc); | PROC_UNLOCK(td->td_proc); | ||||
} | } | ||||
#endif | #endif | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto done; | goto done; | ||||
} | } | ||||
vm->vm_dsize += btoc(new - old); | vm->vm_dsize += btoc(new - old); | ||||
/* | |||||
* Handle the MAP_WIREFUTURE case for legacy applications, | |||||
* by marking the newly mapped range of pages as wired. | |||||
* We are not required to perform a corresponding | |||||
* vm_map_unwire() before vm_map_delete() below, as | |||||
* it will forcibly unwire the pages in the range. | |||||
* | |||||
* XXX If the pages cannot be wired, no error is returned. | |||||
*/ | |||||
if ((map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) | |||||
do_map_wirefuture = TRUE; | |||||
} else if (new < old) { | } else if (new < old) { | ||||
rv = vm_map_delete(map, new, old); | rv = vm_map_delete(map, new, old); | ||||
if (rv != KERN_SUCCESS) { | if (rv != KERN_SUCCESS) { | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto done; | goto done; | ||||
} | } | ||||
vm->vm_dsize -= btoc(old - new); | vm->vm_dsize -= btoc(old - new); | ||||
#ifdef RACCT | #ifdef RACCT | ||||
if (racct_enable) { | if (racct_enable) { | ||||
PROC_LOCK(td->td_proc); | PROC_LOCK(td->td_proc); | ||||
racct_set_force(td->td_proc, RACCT_DATA, new - base); | racct_set_force(td->td_proc, RACCT_DATA, new - base); | ||||
racct_set_force(td->td_proc, RACCT_VMEM, map->size); | racct_set_force(td->td_proc, RACCT_VMEM, map->size); | ||||
if (!old_mlock && map->flags & MAP_WIREFUTURE) { | if (!old_mlock && map->flags & MAP_WIREFUTURE) { | ||||
racct_set_force(td->td_proc, RACCT_MEMLOCK, | racct_set_force(td->td_proc, RACCT_MEMLOCK, | ||||
ptoa(pmap_wired_count(map->pmap))); | ptoa(pmap_wired_count(map->pmap))); | ||||
} | } | ||||
PROC_UNLOCK(td->td_proc); | PROC_UNLOCK(td->td_proc); | ||||
} | } | ||||
#endif | #endif | ||||
} | } | ||||
done: | done: | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
if (do_map_wirefuture) | |||||
(void) vm_map_wire(map, old, new, | |||||
VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); | |||||
if (error == 0) | if (error == 0) | ||||
*addr = new; | *addr = new; | ||||
return (error); | return (error); | ||||
} | } | ||||
#ifdef COMPAT_FREEBSD11 | #ifdef COMPAT_FREEBSD11 | ||||
int | int | ||||
freebsd11_vadvise(struct thread *td, struct freebsd11_vadvise_args *uap) | freebsd11_vadvise(struct thread *td, struct freebsd11_vadvise_args *uap) | ||||
{ | { | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
#endif | #endif |