Changeset View
Standalone View
sys/vm/vm_map.c
Show First 20 Lines • Show All 447 Lines • ▼ Show 20 Lines | if (vm != p->p_vmspace) { | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
PROC_VMSPACE_UNLOCK(p); | PROC_VMSPACE_UNLOCK(p); | ||||
return (vm); | return (vm); | ||||
} | } | ||||
/* | /* | ||||
* Switch between vmspaces in an AIO kernel process. | * Switch between vmspaces in an AIO kernel process. | ||||
* | * | ||||
* The AIO kernel processes switch to and from a user process's | * The new vmspace is either the vmspace of a user process obtained | ||||
markj: This comment deserves an update. | |||||
Done Inline ActionsSo I reread the comment when looking at this and I believe it is still accurate as what it is talking about is 'newvm' and the reason for the > 0 invariant on the refcount. That part is still always true, and it still allows us to avoid the more complicated logic in vmspace_acquire_ref(). jhb: So I reread the comment when looking at this and I believe it is still accurate as what it is… | |||||
Not Done Inline ActionsAh, I didn't read it closely enough. :( markj: Ah, I didn't read it closely enough. :( | |||||
* vmspace while performing an I/O operation on behalf of a user | * from an active AIO request or the initial vmspace of the AIO kernel | ||||
* process. The new vmspace is either the vmspace of a user process | * process (when it is idling). Because user processes will block to | ||||
* obtained from an active AIO request or the initial vmspace of the | * drain any active AIO requests before proceeding in exit() or | ||||
* AIO kernel process (when it is idling). Because user processes | * execve(), the reference count for vmspaces from AIO requests can | ||||
* will block to drain any active AIO requests before proceeding in | * never be 0. Similarly, AIO kernel processes hold an extra | ||||
* exit() or execve(), the vmspace reference count for these vmspaces | * reference on their initial vmspace for the life of the process. As | ||||
* can never be 0. This allows for a much simpler implementation than | * a result, the 'newvm' vmspace always has a non-zero reference | ||||
* the loop in vmspace_acquire_ref() above. Similarly, AIO kernel | * count. This permits an additional reference on 'newvm' to be | ||||
* processes hold an extra reference on their initial vmspace for the | * acquired via a simple atomic increment rather than the loop in | ||||
* life of the process so that this guarantee is true for any vmspace | * vmspace_acquire_ref() above. | ||||
* passed as 'newvm'. | |||||
*/ | */ | ||||
void | void | ||||
vmspace_switch_aio(struct vmspace *newvm) | vmspace_switch_aio(struct vmspace *newvm) | ||||
{ | { | ||||
struct vmspace *oldvm; | struct vmspace *oldvm; | ||||
/* XXX: Need some way to assert that this is an aio daemon. */ | /* XXX: Need some way to assert that this is an aio daemon. */ | ||||
KASSERT(newvm->vm_refcnt > 0, | KASSERT(newvm->vm_refcnt > 0, | ||||
("vmspace_switch_aio: newvm unreferenced")); | ("vmspace_switch_aio: newvm unreferenced")); | ||||
oldvm = curproc->p_vmspace; | oldvm = curproc->p_vmspace; | ||||
if (oldvm == newvm) | if (oldvm == newvm) | ||||
return; | return; | ||||
/* | /* | ||||
* Point to the new address space and refer to it. | * Point to the new address space and refer to it. | ||||
*/ | */ | ||||
curproc->p_vmspace = newvm; | curproc->p_vmspace = newvm; | ||||
atomic_add_int(&newvm->vm_refcnt, 1); | atomic_add_int(&newvm->vm_refcnt, 1); | ||||
/* Activate the new mapping. */ | /* Activate the new mapping. */ | ||||
pmap_activate(curthread); | pmap_activate(curthread); | ||||
/* Remove the daemon's reference to the old address space. */ | |||||
KASSERT(oldvm->vm_refcnt > 1, | |||||
("vmspace_switch_aio: oldvm dropping last reference")); | |||||
vmspace_free(oldvm); | vmspace_free(oldvm); | ||||
Done Inline ActionsAll of the places that call this function do it without holding any locks, so I think it's safe to do the "full" free here instead of only decrementing the reference count, but it'd be great to confirm that assumption. I believe all the craziness with vmspace reference counts is only needed when acquiring a new reference. jhb: All of the places that call this function do it without holding any locks, so I think it's safe… | |||||
Done Inline ActionsI think that this comment is actually misleading, and reflects the mistaken assertion that you've removed. Specifically, it suggests that the only thing that will happen to the oldvm is that its refcnt will be decremented. I would recommend that you either revise or remove this comment. alc: I think that this comment is actually misleading, and reflects the mistaken assertion that… | |||||
Done Inline ActionsI think I misread your note the first time. If you are saying the comment above 'vmspace_free(oldvm)' is misleading, I think I see what you are saying and can axe it. It's not really needed at this point anyway since it just restates the code. jhb: I think I misread your note the first time. If you are saying the comment above 'vmspace_free… | |||||
Done Inline Actions
Yes, that is what I meant, alc: > I think I misread your note the first time. If you are saying the comment above 'vmspace_free… | |||||
} | } | ||||
void | void | ||||
_vm_map_lock(vm_map_t map, const char *file, int line) | _vm_map_lock(vm_map_t map, const char *file, int line) | ||||
{ | { | ||||
if (map->system_map) | if (map->system_map) | ||||
mtx_lock_flags_(&map->system_mtx, 0, file, line); | mtx_lock_flags_(&map->system_mtx, 0, file, line); | ||||
▲ Show 20 Lines • Show All 4,376 Lines • Show Last 20 Lines |
This comment deserves an update.