diff --git a/sys/vm/vm_reserv.h b/sys/vm/vm_reserv.h --- a/sys/vm/vm_reserv.h +++ b/sys/vm/vm_reserv.h @@ -51,6 +51,9 @@ vm_paddr_t boundary); vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain, int req, vm_page_t mpred); +int vm_reserv_alloc_npages(vm_object_t object, vm_pindex_t pindex, + int domain, vm_page_t mpred, int req, vm_page_t *ma, + u_long npages); void vm_reserv_break_all(vm_object_t object); boolean_t vm_reserv_free_page(vm_page_t m); void vm_reserv_init(void); diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -806,6 +806,133 @@ return (m_ret); } +/* + * Allocates "npages" physical pages from an existing or newly created + * reservation, starting from the offset "pindex". The allocated pages may + * not be physically contiguous. + * + * The page "mpred" must immediately precede the offset "pindex" within the + * specified object. + * + * The object must be locked. + */ +int +vm_reserv_alloc_npages(vm_object_t object, vm_pindex_t pindex, int domain, + vm_page_t mpred, int req, vm_page_t *ma, u_long npages) +{ + struct vm_domain *vmd; + vm_page_t m, msucc; + vm_pindex_t first, leftcap, rightcap; + vm_reserv_t rv; + int index, got; + + VM_OBJECT_ASSERT_WLOCKED(object); + KASSERT(npages != 0, ("%s: npages is 0", __func__)); + vmd = VM_DOMAIN(domain); + + /* + * Is a reservation fundamentally impossible? + */ + if (pindex < VM_RESERV_INDEX(object, pindex) || pindex >= object->size) + return (0); + /* + * Look for an existing reservation. + */ + npages = min(VM_LEVEL_0_NPAGES, npages); + rv = vm_reserv_from_object(object, pindex, mpred, &msucc); + if (rv == NULL) { + /* + * Could a reservation fit between the first index to the left + * that can be used and the first index to the right that cannot + * be used? + * + * We must synchronize with the reserv object lock to protect + * the pindex/object of the resulting reservations against + * rename while we are inspecting. + */ + first = pindex - VM_RESERV_INDEX(object, pindex); + vm_reserv_object_lock(object); + if (mpred != NULL) { + if ((rv = vm_reserv_from_page(mpred))->object != object) + leftcap = mpred->pindex + 1; + else + leftcap = rv->pindex + VM_LEVEL_0_NPAGES; + if (leftcap > first) { + vm_reserv_object_unlock(object); + return (0); + } + } + if (msucc != NULL) { + if ((rv = vm_reserv_from_page(msucc))->object != object) + rightcap = msucc->pindex; + else + rightcap = rv->pindex; + if (first + VM_LEVEL_0_NPAGES > rightcap) { + vm_reserv_object_unlock(object); + return (0); + } + } + vm_reserv_object_unlock(object); + + /* + * Would the last new reservation extend past the end of the + * object? + * + * If the object is unlikely to grow don't allocate a + * reservation for the tail. + */ + if ((object->flags & OBJ_ANON) == 0 && + first + VM_LEVEL_0_NPAGES > object->size) + return (0); + + /* + * Allocate and populate the new reservation. + */ + m = NULL; + if (vm_domain_allocate(vmd, req, npages)) { + vm_domain_free_lock(vmd); + m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, + VM_LEVEL_0_ORDER); + vm_domain_free_unlock(vmd); + if (m == NULL) { + vm_domain_freecnt_inc(vmd, npages); + return (0); + } + } else + return (0); + + rv = vm_reserv_from_page(m); + vm_reserv_lock(rv); + KASSERT(rv->pages == m, + ("vm_reserv_alloc_page: reserv %p's pages is corrupted", + rv)); + vm_reserv_insert(rv, object, first); + } else { + if (!vm_domain_allocate(vmd, req, npages)) + return (0); + vm_reserv_lock(rv); + } + + KASSERT(object != kernel_object || rv->domain == domain, + ("vm_reserv_alloc_page: domain mismatch")); + + index = VM_RESERV_INDEX(object, pindex); + got = 0; + for (; index < VM_LEVEL_0_NPAGES && got < npages; index++) { + if (bit_test(rv->popmap, index)) + continue; + vm_reserv_populate(rv, index); + ma[got] = &rv->pages[index]; + got++; + } + vm_reserv_unlock(rv); + + if (got < npages) + vm_domain_freecnt_inc(vmd, npages - got); + + return (got); +} + /* * Allocate a physical page from an existing or newly created reservation. *