Changeset View
Standalone View
sys/vm/vm_map.c
Show First 20 Lines • Show All 251 Lines • ▼ Show 20 Lines | vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) | ||||
vm = uma_zalloc(vmspace_zone, M_WAITOK); | vm = uma_zalloc(vmspace_zone, M_WAITOK); | ||||
KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); | KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); | ||||
if (!pinit(vmspace_pmap(vm))) { | if (!pinit(vmspace_pmap(vm))) { | ||||
uma_zfree(vmspace_zone, vm); | uma_zfree(vmspace_zone, vm); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
CTR1(KTR_VM, "vmspace_alloc: %p", vm); | CTR1(KTR_VM, "vmspace_alloc: %p", vm); | ||||
_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); | _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); | ||||
vm->vm_refcnt = 1; | refcount_init(&vm->vm_refcnt, 1); | ||||
vm->vm_shm = NULL; | vm->vm_shm = NULL; | ||||
vm->vm_swrss = 0; | vm->vm_swrss = 0; | ||||
vm->vm_tsize = 0; | vm->vm_tsize = 0; | ||||
vm->vm_dsize = 0; | vm->vm_dsize = 0; | ||||
vm->vm_ssize = 0; | vm->vm_ssize = 0; | ||||
vm->vm_taddr = 0; | vm->vm_taddr = 0; | ||||
vm->vm_daddr = 0; | vm->vm_daddr = 0; | ||||
vm->vm_maxsaddr = 0; | vm->vm_maxsaddr = 0; | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
vmspace_free(struct vmspace *vm) | vmspace_free(struct vmspace *vm) | ||||
{ | { | ||||
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, | WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, | ||||
"vmspace_free() called"); | "vmspace_free() called"); | ||||
if (vm->vm_refcnt == 0) | if (refcount_release(&vm->vm_refcnt)) | ||||
panic("vmspace_free: attempt to free already freed vmspace"); | |||||
if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) | |||||
vmspace_dofree(vm); | vmspace_dofree(vm); | ||||
} | } | ||||
void | void | ||||
vmspace_exitfree(struct proc *p) | vmspace_exitfree(struct proc *p) | ||||
{ | { | ||||
struct vmspace *vm; | struct vmspace *vm; | ||||
PROC_VMSPACE_LOCK(p); | PROC_VMSPACE_LOCK(p); | ||||
vm = p->p_vmspace; | vm = p->p_vmspace; | ||||
p->p_vmspace = NULL; | p->p_vmspace = NULL; | ||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); | KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); | ||||
vmspace_free(vm); | vmspace_free(vm); | ||||
} | } | ||||
void | void | ||||
vmspace_exit(struct thread *td) | vmspace_exit(struct thread *td) | ||||
{ | { | ||||
int refcnt; | |||||
struct vmspace *vm; | struct vmspace *vm; | ||||
struct proc *p; | struct proc *p; | ||||
bool released; | |||||
/* | |||||
* Release user portion of address space. | |||||
* This releases references to vnodes, | |||||
* which could cause I/O if the file has been unlinked. | |||||
* Need to do this early enough that we can still sleep. | |||||
* | |||||
* The last exiting process to reach this point releases as | |||||
* much of the environment as it can. vmspace_dofree() is the | |||||
* slower fallback in case another process had a temporary | |||||
* reference to the vmspace. | |||||
*/ | |||||
p = td->td_proc; | p = td->td_proc; | ||||
vm = p->p_vmspace; | vm = p->p_vmspace; | ||||
atomic_add_int(&vmspace0.vm_refcnt, 1); | |||||
refcnt = vm->vm_refcnt; | /* | ||||
do { | * Prepare to release the vmspace reference. The thread that releases | ||||
if (refcnt > 1 && p->p_vmspace != &vmspace0) { | * the last reference is responsible for tearing down the vmspace. | ||||
/* Switch now since other proc might free vmspace */ | * However, threads not releasing the final reference must switch to the | ||||
* kernel's vmspace0 before the decrement so that the subsequent pmap | |||||
kib: s/dummy/kernel/ | |||||
* deactivation does not modify a freed vmspace. | |||||
*/ | |||||
refcount_acquire(&vmspace0.vm_refcnt); | |||||
if (!(released = refcount_release_if_last(&vm->vm_refcnt))) { | |||||
if (p->p_vmspace != &vmspace0) { | |||||
PROC_VMSPACE_LOCK(p); | PROC_VMSPACE_LOCK(p); | ||||
p->p_vmspace = &vmspace0; | p->p_vmspace = &vmspace0; | ||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
pmap_activate(td); | pmap_activate(td); | ||||
} | } | ||||
} while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1)); | released = refcount_release(&vm->vm_refcnt); | ||||
if (refcnt == 1) { | } | ||||
if (released) { | |||||
/* | |||||
* pmap_remove_pages() expects the pmap to be active, so switch | |||||
* back first if necessary. | |||||
alcUnsubmitted Not Done Inline ActionsThere is nothing wrong with the patch here. I just want to make an observation. Setting aside the exact KASSERT()s in some of the pmap implementations, we really don't need the pmap to be active on the underlying processor. We really only need that the pmap not be active on any other processor, because some of the pmap implementations intentionally perform non-atomic read-and-clear operations to minimize the cost of checking the dirty bit and clearing the PTE. I wonder how often we pointlessly activate the pmap here. alc: There is nothing wrong with the patch here. I just want to make an observation.
Setting aside… | |||||
markjAuthorUnsubmitted Done Inline ActionsBy "pointlessly activate the pmap" do you mean switching to vmspace0's pmap and back again? I believe it should be fairly rare since it requires a process to exit while using a shared vmspace. I ran a kernel build with the following dtrace script running in the background: dtrace -n 'fbt::vmspace_exit:entry {@[args[0]->td_proc->p_vmspace->vm_refcnt] = count();}' and got: 61 1 1 53328 i.e., in all but one case the vmspace had one reference at the time the function was called. The sole exception was a case where p->p_vm == &vmspace0. I'm not sure how that arises. markj: By "pointlessly activate the pmap" do you mean switching to vmspace0's pmap and back again? I… | |||||
markjAuthorUnsubmitted Done Inline Actions
It is from exiting NFS client async I/O threads. markj: > I'm not sure how that arises.
It is from exiting NFS client async I/O threads. | |||||
*/ | |||||
if (p->p_vmspace != vm) { | if (p->p_vmspace != vm) { | ||||
/* vmspace not yet freed, switch back */ | |||||
PROC_VMSPACE_LOCK(p); | PROC_VMSPACE_LOCK(p); | ||||
p->p_vmspace = vm; | p->p_vmspace = vm; | ||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
pmap_activate(td); | pmap_activate(td); | ||||
} | } | ||||
pmap_remove_pages(vmspace_pmap(vm)); | pmap_remove_pages(vmspace_pmap(vm)); | ||||
/* Switch now since this proc will free vmspace */ | |||||
PROC_VMSPACE_LOCK(p); | PROC_VMSPACE_LOCK(p); | ||||
p->p_vmspace = &vmspace0; | p->p_vmspace = &vmspace0; | ||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
pmap_activate(td); | pmap_activate(td); | ||||
vmspace_dofree(vm); | vmspace_dofree(vm); | ||||
} | } | ||||
#ifdef RACCT | #ifdef RACCT | ||||
if (racct_enable) | if (racct_enable) | ||||
vmspace_container_reset(p); | vmspace_container_reset(p); | ||||
#endif | #endif | ||||
} | } | ||||
/* Acquire reference to vmspace owned by another process. */ | /* Acquire reference to vmspace owned by another process. */ | ||||
struct vmspace * | struct vmspace * | ||||
vmspace_acquire_ref(struct proc *p) | vmspace_acquire_ref(struct proc *p) | ||||
{ | { | ||||
struct vmspace *vm; | struct vmspace *vm; | ||||
int refcnt; | |||||
PROC_VMSPACE_LOCK(p); | PROC_VMSPACE_LOCK(p); | ||||
vm = p->p_vmspace; | vm = p->p_vmspace; | ||||
if (vm == NULL) { | if (vm == NULL) { | ||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
refcnt = vm->vm_refcnt; | if (!refcount_acquire_if_not_zero(&vm->vm_refcnt)) { | ||||
alcUnsubmitted Done Inline ActionsI would suggest merging this "if" statement with the one above it. alc: I would suggest merging this "if" statement with the one above it. | |||||
do { | |||||
if (refcnt <= 0) { /* Avoid 0->1 transition */ | |||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
} while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt + 1)); | |||||
if (vm != p->p_vmspace) { | if (vm != p->p_vmspace) { | ||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
vmspace_free(vm); | vmspace_free(vm); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
return (vm); | return (vm); | ||||
} | } | ||||
Show All 15 Lines | |||||
*/ | */ | ||||
void | void | ||||
vmspace_switch_aio(struct vmspace *newvm) | vmspace_switch_aio(struct vmspace *newvm) | ||||
{ | { | ||||
struct vmspace *oldvm; | struct vmspace *oldvm; | ||||
/* XXX: Need some way to assert that this is an aio daemon. */ | /* XXX: Need some way to assert that this is an aio daemon. */ | ||||
KASSERT(newvm->vm_refcnt > 0, | KASSERT(refcount_load(&newvm->vm_refcnt) > 0, | ||||
("vmspace_switch_aio: newvm unreferenced")); | ("vmspace_switch_aio: newvm unreferenced")); | ||||
oldvm = curproc->p_vmspace; | oldvm = curproc->p_vmspace; | ||||
if (oldvm == newvm) | if (oldvm == newvm) | ||||
return; | return; | ||||
/* | /* | ||||
* Point to the new address space and refer to it. | * Point to the new address space and refer to it. | ||||
*/ | */ | ||||
curproc->p_vmspace = newvm; | curproc->p_vmspace = newvm; | ||||
atomic_add_int(&newvm->vm_refcnt, 1); | refcount_acquire(&newvm->vm_refcnt); | ||||
/* Activate the new mapping. */ | /* Activate the new mapping. */ | ||||
pmap_activate(curthread); | pmap_activate(curthread); | ||||
vmspace_free(oldvm); | vmspace_free(oldvm); | ||||
} | } | ||||
void | void | ||||
▲ Show 20 Lines • Show All 4,307 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
int | int | ||||
vmspace_unshare(struct proc *p) | vmspace_unshare(struct proc *p) | ||||
{ | { | ||||
struct vmspace *oldvmspace = p->p_vmspace; | struct vmspace *oldvmspace = p->p_vmspace; | ||||
struct vmspace *newvmspace; | struct vmspace *newvmspace; | ||||
vm_ooffset_t fork_charge; | vm_ooffset_t fork_charge; | ||||
if (oldvmspace->vm_refcnt == 1) | if (refcount_load(&oldvmspace->vm_refcnt) == 1) | ||||
return (0); | return (0); | ||||
fork_charge = 0; | fork_charge = 0; | ||||
newvmspace = vmspace_fork(oldvmspace, &fork_charge); | newvmspace = vmspace_fork(oldvmspace, &fork_charge); | ||||
if (newvmspace == NULL) | if (newvmspace == NULL) | ||||
return (ENOMEM); | return (ENOMEM); | ||||
if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { | if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { | ||||
vmspace_free(newvmspace); | vmspace_free(newvmspace); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
▲ Show 20 Lines • Show All 489 Lines • Show Last 20 Lines |
s/dummy/kernel/