Changeset View
Standalone View
sys/vm/vm_object.c
Show First 20 Lines • Show All 255 Lines • ▼ Show 20 Lines | _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) | ||||
*/ | */ | ||||
atomic_thread_fence_rel(); | atomic_thread_fence_rel(); | ||||
switch (type) { | switch (type) { | ||||
case OBJT_DEAD: | case OBJT_DEAD: | ||||
panic("_vm_object_allocate: can't create OBJT_DEAD"); | panic("_vm_object_allocate: can't create OBJT_DEAD"); | ||||
case OBJT_DEFAULT: | case OBJT_DEFAULT: | ||||
case OBJT_SWAP: | case OBJT_SWAP: | ||||
object->flags = OBJ_ONEMAPPING; | object->flags = OBJ_COLORED | OBJ_NOSPLIT; | ||||
break; | break; | ||||
case OBJT_DEVICE: | case OBJT_DEVICE: | ||||
case OBJT_SG: | case OBJT_SG: | ||||
object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; | object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; | ||||
break; | break; | ||||
case OBJT_MGTDEVICE: | case OBJT_MGTDEVICE: | ||||
object->flags = OBJ_FICTITIOUS; | object->flags = OBJ_FICTITIOUS; | ||||
break; | break; | ||||
case OBJT_PHYS: | case OBJT_PHYS: | ||||
object->flags = OBJ_UNMANAGED; | object->flags = OBJ_UNMANAGED; | ||||
break; | break; | ||||
case OBJT_VNODE: | case OBJT_VNODE: | ||||
object->flags = 0; | object->flags = 0; | ||||
break; | break; | ||||
default: | default: | ||||
panic("_vm_object_allocate: type %d is undefined", type); | panic("_vm_object_allocate: type %d is undefined", type); | ||||
} | } | ||||
object->size = size; | object->size = size; | ||||
object->domain.dr_policy = NULL; | object->domain.dr_policy = NULL; | ||||
object->generation = 1; | object->generation = 1; | ||||
object->cleangeneration = 1; | object->cleangeneration = 1; | ||||
object->pg_color = 0; | |||||
markj: Doesn't this break the colouring mechanism? vm_object_color() does nothing if OBJ_COLORED is… | |||||
alcUnsubmitted Not Done Inline ActionsNo, because vm_object_anonymous() clears the flag. That said, there is a related problem. To give an extreme example, when tmpfs creates a 4KB file, we are now going to allocate a reservation just for that one 4KB, and pretty soon we will run out of free 2MB chunks. alc: No, because vm_object_anonymous() clears the flag. That said, there is a related problem. To… | |||||
jeffAuthorUnsubmitted Done Inline ActionsWe don't consult the object size? Does this mean that every consumer needs to set color only when the object becomes large enough? jeff: We don't consult the object size?
Does this mean that every consumer needs to set color only… | |||||
alcUnsubmitted Not Done Inline ActionsSee my followup message. I think that it answers your question. alc: See my followup message. I think that it answers your question. | |||||
refcount_init(&object->ref_count, 1); | refcount_init(&object->ref_count, 1); | ||||
object->memattr = VM_MEMATTR_DEFAULT; | object->memattr = VM_MEMATTR_DEFAULT; | ||||
object->cred = NULL; | object->cred = NULL; | ||||
object->charge = 0; | object->charge = 0; | ||||
object->handle = NULL; | object->handle = NULL; | ||||
object->backing_object = NULL; | object->backing_object = NULL; | ||||
object->backing_object_offset = (vm_ooffset_t) 0; | object->backing_object_offset = (vm_ooffset_t) 0; | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
▲ Show 20 Lines • Show All 129 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* Returns a new object with the given size. | * Returns a new object with the given size. | ||||
*/ | */ | ||||
vm_object_t | vm_object_t | ||||
vm_object_allocate(objtype_t type, vm_pindex_t size) | vm_object_allocate(objtype_t type, vm_pindex_t size) | ||||
{ | { | ||||
vm_object_t object; | vm_object_t object; | ||||
object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); | object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); | ||||
Not Done Inline ActionsI think the cast is not needed. markj: I think the cast is not needed. | |||||
_vm_object_allocate(type, size, object); | _vm_object_allocate(type, size, object); | ||||
return (object); | return (object); | ||||
} | } | ||||
/* | |||||
* vm_object_anonymous: | |||||
* | |||||
* Returns a new default object of the given size and marked as | |||||
* anonymous memory for special split/collapse handling. Color | |||||
* to be initialized by the caller. | |||||
*/ | |||||
vm_object_t | |||||
vm_object_anonymous(vm_pindex_t size) | |||||
{ | |||||
vm_object_t object; | |||||
object = vm_object_allocate(OBJT_DEFAULT, size); | |||||
object->flags |= OBJ_ANONYMOUS | OBJ_ONEMAPPING; | |||||
object->flags &= ~(OBJ_COLORED | OBJ_NOSPLIT); | |||||
Not Done Inline ActionsBecause of the global object list and manipulation of the flags by code that iterates over that list, this is not safe. Perhaps, a cleaner and safer approach would be to introduce a vm_object_allocate_flags() and move the switch state that computes the default flags to vm_object_allocate(). alc: Because of the global object list and manipulation of the flags by code that iterates over that… | |||||
return (object); | |||||
} | |||||
markjUnsubmitted Not Done Inline ActionsExtra newline. markj: Extra newline. | |||||
/* | /* | ||||
* vm_object_reference: | * vm_object_reference: | ||||
* | * | ||||
* Gets another reference to the given object. Note: OBJ_DEAD | * Gets another reference to the given object. Note: OBJ_DEAD | ||||
* objects can be referenced during final cleaning. | * objects can be referenced during final cleaning. | ||||
*/ | */ | ||||
void | void | ||||
vm_object_reference(vm_object_t object) | vm_object_reference(vm_object_t object) | ||||
{ | { | ||||
if (object == NULL) | if (object == NULL) | ||||
return; | return; | ||||
VM_OBJECT_RLOCK(object); | VM_OBJECT_RLOCK(object); | ||||
vm_object_reference_locked(object); | vm_object_reference_locked(object); | ||||
VM_OBJECT_RUNLOCK(object); | VM_OBJECT_RUNLOCK(object); | ||||
Not Done Inline ActionsIt might make sense to add charge and ucred args. I might look at this as a followup. kib: It might make sense to add charge and ucred args. I might look at this as a followup. | |||||
Done Inline ActionsI would like to see more encapsulating of that functionality. As is it is handled directly everywhere with near identical copies of code. jeff: I would like to see more encapsulating of that functionality. As is it is handled directly… | |||||
} | } | ||||
/* | /* | ||||
* vm_object_reference_locked: | * vm_object_reference_locked: | ||||
* | * | ||||
* Gets another reference to the given object. | * Gets another reference to the given object. | ||||
* | * | ||||
* The object must be locked. | * The object must be locked. | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | while (object != NULL) { | ||||
/* | /* | ||||
* If the reference count goes to 0 we start calling | * If the reference count goes to 0 we start calling | ||||
* vm_object_terminate() on the object chain. A ref count | * vm_object_terminate() on the object chain. A ref count | ||||
* of 1 may be a special case depending on the shadow count | * of 1 may be a special case depending on the shadow count | ||||
* being 0 or 1. These cases require a write lock on the | * being 0 or 1. These cases require a write lock on the | ||||
* object. | * object. | ||||
*/ | */ | ||||
if ((object->flags & OBJ_ANONYMOUS) == 0) | |||||
released = refcount_release_if_gt(&object->ref_count, 1); | |||||
else | |||||
released = refcount_release_if_gt(&object->ref_count, 2); | released = refcount_release_if_gt(&object->ref_count, 2); | ||||
markjUnsubmitted Not Done Inline ActionsThis looks like it's part of a future diff. markj: This looks like it's part of a future diff. | |||||
jeffAuthorUnsubmitted Done Inline ActionsThe refcount was approved but not yet committed. jeff: The refcount was approved but not yet committed. | |||||
markjUnsubmitted Not Done Inline ActionsI just mean that I haven't seen a patch that changes vm_object so that ref_count is manipulated using atomics. markj: I just mean that I haven't seen a patch that changes vm_object so that ref_count is manipulated… | |||||
VM_OBJECT_RUNLOCK(object); | VM_OBJECT_RUNLOCK(object); | ||||
if (released) | if (released) | ||||
return; | return; | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
KASSERT(object->ref_count != 0, | KASSERT(object->ref_count != 0, | ||||
("vm_object_deallocate: object deallocated too many times: %d", object->type)); | ("vm_object_deallocate: object deallocated too many times: %d", object->type)); | ||||
refcount_release(&object->ref_count); | refcount_release(&object->ref_count); | ||||
if (object->ref_count > 1) { | if (object->ref_count > 1) { | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
return; | return; | ||||
} else if (object->ref_count == 1) { | } else if (object->ref_count == 1) { | ||||
if (object->shadow_count == 0 && | if (object->shadow_count == 0 && | ||||
object->handle == NULL && | object->handle == NULL && | ||||
(object->type == OBJT_DEFAULT || | (object->flags & OBJ_ANONYMOUS) != 0) { | ||||
(object->type == OBJT_SWAP && | |||||
(object->flags & OBJ_TMPFS_NODE) == 0))) { | |||||
vm_object_set_flag(object, OBJ_ONEMAPPING); | vm_object_set_flag(object, OBJ_ONEMAPPING); | ||||
} else if ((object->shadow_count == 1) && | } else if ((object->shadow_count == 1) && | ||||
(object->handle == NULL) && | (object->handle == NULL) && | ||||
(object->type == OBJT_DEFAULT || | (object->flags & OBJ_ANONYMOUS) != 0) { | ||||
object->type == OBJT_SWAP)) { | |||||
vm_object_t robject; | vm_object_t robject; | ||||
robject = LIST_FIRST(&object->shadow_head); | robject = LIST_FIRST(&object->shadow_head); | ||||
KASSERT(robject != NULL, | KASSERT(robject != NULL, | ||||
("vm_object_deallocate: ref_count: %d, shadow_count: %d", | ("vm_object_deallocate: ref_count: %d, shadow_count: %d", | ||||
object->ref_count, | object->ref_count, | ||||
object->shadow_count)); | object->shadow_count)); | ||||
KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0, | KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0, | ||||
▲ Show 20 Lines • Show All 487 Lines • ▼ Show 20 Lines | |||||
static bool | static bool | ||||
vm_object_advice_applies(vm_object_t object, int advice) | vm_object_advice_applies(vm_object_t object, int advice) | ||||
{ | { | ||||
if ((object->flags & OBJ_UNMANAGED) != 0) | if ((object->flags & OBJ_UNMANAGED) != 0) | ||||
return (false); | return (false); | ||||
if (advice != MADV_FREE) | if (advice != MADV_FREE) | ||||
return (true); | return (true); | ||||
return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) && | return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANONYMOUS)) == | ||||
(object->flags & OBJ_ONEMAPPING) != 0); | (OBJ_ONEMAPPING | OBJ_ANONYMOUS)); | ||||
} | } | ||||
static void | static void | ||||
vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, | vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, | ||||
vm_size_t size) | vm_size_t size) | ||||
{ | { | ||||
if (advice == MADV_FREE && object->type == OBJT_SWAP) | if (advice == MADV_FREE && object->type == OBJT_SWAP) | ||||
▲ Show 20 Lines • Show All 160 Lines • ▼ Show 20 Lines | if (source->ref_count == 1 && | ||||
return; | return; | ||||
} | } | ||||
VM_OBJECT_RUNLOCK(source); | VM_OBJECT_RUNLOCK(source); | ||||
} | } | ||||
/* | /* | ||||
* Allocate a new object with the given length. | * Allocate a new object with the given length. | ||||
*/ | */ | ||||
result = vm_object_allocate(OBJT_DEFAULT, atop(length)); | result = vm_object_anonymous(atop(length)); | ||||
/* | /* | ||||
* The new object shadows the source object, adding a reference to it. | * The new object shadows the source object, adding a reference to it. | ||||
* Our caller changes his reference to point to the new object, | * Our caller changes his reference to point to the new object, | ||||
* removing a reference to the source object. Net result: no change | * removing a reference to the source object. Net result: no change | ||||
* of reference count. | * of reference count. | ||||
* | * | ||||
* Try to optimize the result object's page color when shadowing | * Try to optimize the result object's page color when shadowing | ||||
Show All 38 Lines | |||||
vm_object_split(vm_map_entry_t entry) | vm_object_split(vm_map_entry_t entry) | ||||
{ | { | ||||
vm_page_t m, m_next; | vm_page_t m, m_next; | ||||
vm_object_t orig_object, new_object, source; | vm_object_t orig_object, new_object, source; | ||||
vm_pindex_t idx, offidxstart; | vm_pindex_t idx, offidxstart; | ||||
vm_size_t size; | vm_size_t size; | ||||
orig_object = entry->object.vm_object; | orig_object = entry->object.vm_object; | ||||
if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) | if ((orig_object->flags & OBJ_ANONYMOUS) == 0) | ||||
return; | return; | ||||
if (orig_object->ref_count <= 1) | if (orig_object->ref_count <= 1) | ||||
return; | return; | ||||
VM_OBJECT_WUNLOCK(orig_object); | VM_OBJECT_WUNLOCK(orig_object); | ||||
offidxstart = OFF_TO_IDX(entry->offset); | offidxstart = OFF_TO_IDX(entry->offset); | ||||
size = atop(entry->end - entry->start); | size = atop(entry->end - entry->start); | ||||
/* | /* | ||||
* If swap_pager_copy() is later called, it will convert new_object | * If swap_pager_copy() is later called, it will convert new_object | ||||
* into a swap object. | * into a swap object. | ||||
*/ | */ | ||||
new_object = vm_object_allocate(OBJT_DEFAULT, size); | new_object = vm_object_anonymous(size); | ||||
/* | /* | ||||
* At this point, the new object is still private, so the order in | * At this point, the new object is still private, so the order in | ||||
* which the original and new objects are locked does not matter. | * which the original and new objects are locked does not matter. | ||||
*/ | */ | ||||
VM_OBJECT_WLOCK(new_object); | VM_OBJECT_WLOCK(new_object); | ||||
VM_OBJECT_WLOCK(orig_object); | VM_OBJECT_WLOCK(orig_object); | ||||
new_object->domain = orig_object->domain; | new_object->domain = orig_object->domain; | ||||
▲ Show 20 Lines • Show All 356 Lines • ▼ Show 20 Lines | while (TRUE) { | ||||
*/ | */ | ||||
if ((backing_object = object->backing_object) == NULL) | if ((backing_object = object->backing_object) == NULL) | ||||
break; | break; | ||||
/* | /* | ||||
* we check the backing object first, because it is most likely | * we check the backing object first, because it is most likely | ||||
* not collapsable. | * not collapsable. | ||||
*/ | */ | ||||
if ((backing_object->flags & OBJ_ANONYMOUS) == 0) | |||||
break; | |||||
VM_OBJECT_WLOCK(backing_object); | VM_OBJECT_WLOCK(backing_object); | ||||
if (backing_object->handle != NULL || | if (backing_object->handle != NULL || | ||||
(backing_object->type != OBJT_DEFAULT && | |||||
backing_object->type != OBJT_SWAP) || | |||||
(backing_object->flags & (OBJ_DEAD | OBJ_NOSPLIT)) != 0 || | (backing_object->flags & (OBJ_DEAD | OBJ_NOSPLIT)) != 0 || | ||||
object->handle != NULL || | object->handle != NULL || | ||||
(object->type != OBJT_DEFAULT && | (object->type != OBJT_DEFAULT && | ||||
object->type != OBJT_SWAP) || | object->type != OBJT_SWAP) || | ||||
(object->flags & OBJ_DEAD)) { | (object->flags & OBJ_DEAD)) { | ||||
VM_OBJECT_WUNLOCK(backing_object); | VM_OBJECT_WUNLOCK(backing_object); | ||||
break; | break; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 339 Lines • ▼ Show 20 Lines | |||||
boolean_t | boolean_t | ||||
vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, | vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, | ||||
vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) | vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) | ||||
{ | { | ||||
vm_pindex_t next_pindex; | vm_pindex_t next_pindex; | ||||
if (prev_object == NULL) | if (prev_object == NULL) | ||||
return (TRUE); | return (TRUE); | ||||
if ((prev_object->flags & (OBJ_ANONYMOUS | OBJ_NOSPLIT)) == 0) | |||||
jeffAuthorUnsubmitted Done Inline ActionsThis line is wrong. I will fix and update later. jeff: This line is wrong. I will fix and update later. | |||||
return (FALSE); | |||||
VM_OBJECT_WLOCK(prev_object); | VM_OBJECT_WLOCK(prev_object); | ||||
if ((prev_object->type != OBJT_DEFAULT && | if (prev_object->type != OBJT_DEFAULT && | ||||
prev_object->type != OBJT_SWAP) || | prev_object->type != OBJT_SWAP) { | ||||
(prev_object->flags & OBJ_NOSPLIT) != 0) { | |||||
VM_OBJECT_WUNLOCK(prev_object); | VM_OBJECT_WUNLOCK(prev_object); | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
/* | /* | ||||
* Try to collapse the object first | * Try to collapse the object first | ||||
*/ | */ | ||||
vm_object_collapse(prev_object); | vm_object_collapse(prev_object); | ||||
▲ Show 20 Lines • Show All 606 Lines • Show Last 20 Lines |
Doesn't this break the colouring mechanism? vm_object_color() does nothing if OBJ_COLORED is already set. So, this change makes it impossible to set pg_color to a non-zero value in the fault path.