Changeset View
Standalone View
sys/vm/vm_map.c
Show First 20 Lines • Show All 383 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, | WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, | ||||
"vmspace_free() called"); | "vmspace_free() called"); | ||||
if (vm->vm_refcnt == 0) | if (vm->vm_refcnt == 0) | ||||
panic("vmspace_free: attempt to free already freed vmspace"); | panic("vmspace_free: attempt to free already freed vmspace"); | ||||
/* Ensure that pmap updates are visible to vmspace_exit(). */ | |||||
atomic_thread_fence_rel(); | |||||
if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) | if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) | ||||
kib: Don't you need a release there too ? | |||||
vmspace_dofree(vm); | vmspace_dofree(vm); | ||||
} | } | ||||
void | void | ||||
vmspace_exitfree(struct proc *p) | vmspace_exitfree(struct proc *p) | ||||
{ | { | ||||
struct vmspace *vm; | struct vmspace *vm; | ||||
Show All 10 Lines | |||||
{ | { | ||||
int refcnt; | int refcnt; | ||||
struct vmspace *vm; | struct vmspace *vm; | ||||
struct proc *p; | struct proc *p; | ||||
/* | /* | ||||
* Release user portion of address space. | * Release user portion of address space. | ||||
* This releases references to vnodes, | * This releases references to vnodes, | ||||
* which could cause I/O if the file has been unlinked. | * which could cause I/O if the file has been unlinked. | ||||
Done Inline ActionsIs this comment about vnodes still true somehow? markj: Is this comment about vnodes still true somehow? | |||||
Not Done Inline ActionsI believe comment tries to say that dropping references to vnodes could cause inactivation, that indeed results in non-trivial fs activities if nlink == 0. kib: I believe comment tries to say that dropping references to vnodes could cause inactivation… | |||||
Done Inline ActionsBut does anything in this function drop a vnode reference? I believe it would happen earlier, when the vm_map is destroyed. markj: But does anything in this function drop a vnode reference? I believe it would happen earlier… | |||||
Not Done Inline ActionsIt occurs in vmspace_dofree()->vm_map_remove()->vm_map_entry_deallocate(), and then vm_map_unlock() for deferred processing. When vm_map destroyed, we really do not have anything to process already. kib: It occurs in vmspace_dofree()->vm_map_remove()->vm_map_entry_deallocate(), and then… | |||||
* Need to do this early enough that we can still sleep. | * Need to do this early enough that we can still sleep. | ||||
* | * | ||||
* The last exiting process to reach this point releases as | * The last exiting process to reach this point releases as | ||||
* much of the environment as it can. vmspace_dofree() is the | * much of the environment as it can. vmspace_dofree() is the | ||||
* slower fallback in case another process had a temporary | * slower fallback in case another process had a temporary | ||||
* reference to the vmspace. | * reference to the vmspace. | ||||
*/ | */ | ||||
atomic_thread_fence_rel(); | |||||
p = td->td_proc; | p = td->td_proc; | ||||
vm = p->p_vmspace; | vm = p->p_vmspace; | ||||
atomic_add_int(&vmspace0.vm_refcnt, 1); | atomic_add_int(&vmspace0.vm_refcnt, 1); | ||||
refcnt = vm->vm_refcnt; | refcnt = atomic_load_int(&vm->vm_refcnt); | ||||
mmelUnsubmitted Not Done Inline ActionsShould not be the same loop in vmspace_acquire_ref () also converted to using of atomic_load_int (& vm-> vm_refcnt)? refcnt = vm->vm_refcnt; do { ... } } while (!atomic_fcmpset_int (&vm->vm_refcnt, &refcnt, refcnt + 1)); represents undefined behavior. The compiler is free to optimize refcnt back to vm-> vm_refcnt. mmel: Should not be the same loop in vmspace_acquire_ref () also converted to using of… | |||||
kibUnsubmitted Not Done Inline ActionsWe can put whatever pmap * into curpmap in critical section, if we do full userspace VA TLB invalidation inside the section. Anyway, whatever the method you prefer, I think it is worth doing correct fences in the refcount use. kib: We can put whatever pmap * into curpmap in critical section, if we do full userspace VA TLB… | |||||
do { | do { | ||||
if (refcnt > 1 && p->p_vmspace != &vmspace0) { | if (refcnt > 1 && p->p_vmspace != &vmspace0) { | ||||
/* Switch now since other proc might free vmspace */ | /* Switch now since other proc might free vmspace */ | ||||
PROC_VMSPACE_LOCK(p); | PROC_VMSPACE_LOCK(p); | ||||
p->p_vmspace = &vmspace0; | p->p_vmspace = &vmspace0; | ||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
pmap_activate(td); | pmap_activate(td); | ||||
} | } | ||||
} while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1)); | } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1)); | ||||
if (refcnt == 1) { | if (refcnt == 1) { | ||||
atomic_thread_fence_acq(); | |||||
if (p->p_vmspace != vm) { | if (p->p_vmspace != vm) { | ||||
/* vmspace not yet freed, switch back */ | /* vmspace not yet freed, switch back */ | ||||
PROC_VMSPACE_LOCK(p); | PROC_VMSPACE_LOCK(p); | ||||
p->p_vmspace = vm; | p->p_vmspace = vm; | ||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
pmap_activate(td); | pmap_activate(td); | ||||
} | } | ||||
pmap_remove_pages(vmspace_pmap(vm)); | pmap_remove_pages(vmspace_pmap(vm)); | ||||
▲ Show 20 Lines • Show All 4,934 Lines • Show Last 20 Lines |
Don't you need a release there too ?