Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_phys.c
Show First 20 Lines • Show All 61 Lines • ▼ Show 20 Lines | |||||
#include <ddb/ddb.h> | #include <ddb/ddb.h> | ||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
#include <vm/vm_param.h> | #include <vm/vm_param.h> | ||||
#include <vm/vm_kern.h> | #include <vm/vm_kern.h> | ||||
#include <vm/vm_object.h> | #include <vm/vm_object.h> | ||||
#include <vm/vm_page.h> | #include <vm/vm_page.h> | ||||
#include <vm/vm_phys.h> | #include <vm/vm_phys.h> | ||||
#include <vm/vm_pagequeue.h> | |||||
_Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, | _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, | ||||
"Too many physsegs."); | "Too many physsegs."); | ||||
#ifdef NUMA | #ifdef NUMA | ||||
struct mem_affinity *mem_affinity; | struct mem_affinity *mem_affinity; | ||||
int *mem_locality; | int *mem_locality; | ||||
#endif | #endif | ||||
▲ Show 20 Lines • Show All 570 Lines • ▼ Show 20 Lines | vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order) | ||||
KASSERT(order < VM_NFREEORDER, | KASSERT(order < VM_NFREEORDER, | ||||
("vm_phys_alloc_freelist_pages: order %d is out of range", order)); | ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); | ||||
flind = vm_freelist_to_flind[freelist]; | flind = vm_freelist_to_flind[freelist]; | ||||
/* Check if freelist is present */ | /* Check if freelist is present */ | ||||
if (flind < 0) | if (flind < 0) | ||||
return (NULL); | return (NULL); | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | vm_domain_free_assert_locked(VM_DOMAIN(domain)); | ||||
jeff: If we push a new lock to protect reservations we could split the free lock for synchronization… | |||||
fl = &vm_phys_free_queues[domain][flind][pool][0]; | fl = &vm_phys_free_queues[domain][flind][pool][0]; | ||||
for (oind = order; oind < VM_NFREEORDER; oind++) { | for (oind = order; oind < VM_NFREEORDER; oind++) { | ||||
m = TAILQ_FIRST(&fl[oind].pl); | m = TAILQ_FIRST(&fl[oind].pl); | ||||
if (m != NULL) { | if (m != NULL) { | ||||
vm_freelist_rem(fl, m, oind); | vm_freelist_rem(fl, m, oind); | ||||
vm_phys_split_pages(m, oind, fl, order); | vm_phys_split_pages(m, oind, fl, order); | ||||
return (m); | return (m); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 236 Lines • ▼ Show 20 Lines | vm_phys_free_pages(vm_page_t m, int order) | ||||
KASSERT(m->order == VM_NFREEORDER, | KASSERT(m->order == VM_NFREEORDER, | ||||
("vm_phys_free_pages: page %p has unexpected order %d", | ("vm_phys_free_pages: page %p has unexpected order %d", | ||||
m, m->order)); | m, m->order)); | ||||
KASSERT(m->pool < VM_NFREEPOOL, | KASSERT(m->pool < VM_NFREEPOOL, | ||||
("vm_phys_free_pages: page %p has unexpected pool %d", | ("vm_phys_free_pages: page %p has unexpected pool %d", | ||||
m, m->pool)); | m, m->pool)); | ||||
KASSERT(order < VM_NFREEORDER, | KASSERT(order < VM_NFREEORDER, | ||||
("vm_phys_free_pages: order %d is out of range", order)); | ("vm_phys_free_pages: order %d is out of range", order)); | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | |||||
seg = &vm_phys_segs[m->segind]; | seg = &vm_phys_segs[m->segind]; | ||||
vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); | |||||
if (order < VM_NFREEORDER - 1) { | if (order < VM_NFREEORDER - 1) { | ||||
pa = VM_PAGE_TO_PHYS(m); | pa = VM_PAGE_TO_PHYS(m); | ||||
do { | do { | ||||
pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); | pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); | ||||
if (pa < seg->start || pa >= seg->end) | if (pa < seg->start || pa >= seg->end) | ||||
break; | break; | ||||
m_buddy = &seg->first_page[atop(pa - seg->start)]; | m_buddy = &seg->first_page[atop(pa - seg->start)]; | ||||
if (m_buddy->order != order) | if (m_buddy->order != order) | ||||
Show All 21 Lines | |||||
{ | { | ||||
u_int n; | u_int n; | ||||
int order; | int order; | ||||
/* | /* | ||||
* Avoid unnecessary coalescing by freeing the pages in the largest | * Avoid unnecessary coalescing by freeing the pages in the largest | ||||
* possible power-of-two-sized subsets. | * possible power-of-two-sized subsets. | ||||
*/ | */ | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | vm_domain_free_assert_locked(vm_pagequeue_domain(m)); | ||||
for (;; npages -= n) { | for (;; npages -= n) { | ||||
/* | /* | ||||
* Unsigned "min" is used here so that "order" is assigned | * Unsigned "min" is used here so that "order" is assigned | ||||
* "VM_NFREEORDER - 1" when "m"'s physical address is zero | * "VM_NFREEORDER - 1" when "m"'s physical address is zero | ||||
* or the low-order bits of its physical address are zero | * or the low-order bits of its physical address are zero | ||||
* because the size of a physical address exceeds the size of | * because the size of a physical address exceeds the size of | ||||
* a long. | * a long. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 89 Lines • ▼ Show 20 Lines | |||||
vm_phys_unfree_page(vm_page_t m) | vm_phys_unfree_page(vm_page_t m) | ||||
{ | { | ||||
struct vm_freelist *fl; | struct vm_freelist *fl; | ||||
struct vm_phys_seg *seg; | struct vm_phys_seg *seg; | ||||
vm_paddr_t pa, pa_half; | vm_paddr_t pa, pa_half; | ||||
vm_page_t m_set, m_tmp; | vm_page_t m_set, m_tmp; | ||||
int order; | int order; | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | |||||
/* | /* | ||||
* First, find the contiguous, power of two-sized set of free | * First, find the contiguous, power of two-sized set of free | ||||
* physical pages containing the given physical page "m" and | * physical pages containing the given physical page "m" and | ||||
* assign it to "m_set". | * assign it to "m_set". | ||||
*/ | */ | ||||
seg = &vm_phys_segs[m->segind]; | seg = &vm_phys_segs[m->segind]; | ||||
vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); | |||||
for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && | for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && | ||||
order < VM_NFREEORDER - 1; ) { | order < VM_NFREEORDER - 1; ) { | ||||
order++; | order++; | ||||
pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); | pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); | ||||
if (pa >= seg->start) | if (pa >= seg->start) | ||||
m_set = &seg->first_page[atop(pa - seg->start)]; | m_set = &seg->first_page[atop(pa - seg->start)]; | ||||
else | else | ||||
return (FALSE); | return (FALSE); | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, | ||||
vm_paddr_t pa_end, pa_start; | vm_paddr_t pa_end, pa_start; | ||||
vm_page_t m_run; | vm_page_t m_run; | ||||
struct vm_phys_seg *seg; | struct vm_phys_seg *seg; | ||||
int segind; | int segind; | ||||
KASSERT(npages > 0, ("npages is 0")); | KASSERT(npages > 0, ("npages is 0")); | ||||
KASSERT(powerof2(alignment), ("alignment is not a power of 2")); | KASSERT(powerof2(alignment), ("alignment is not a power of 2")); | ||||
KASSERT(powerof2(boundary), ("boundary is not a power of 2")); | KASSERT(powerof2(boundary), ("boundary is not a power of 2")); | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | vm_domain_free_assert_locked(VM_DOMAIN(domain)); | ||||
if (low >= high) | if (low >= high) | ||||
return (NULL); | return (NULL); | ||||
m_run = NULL; | m_run = NULL; | ||||
for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { | for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { | ||||
seg = &vm_phys_segs[segind]; | seg = &vm_phys_segs[segind]; | ||||
if (seg->start >= high || seg->domain != domain) | if (seg->start >= high || seg->domain != domain) | ||||
continue; | continue; | ||||
if (low >= seg->end) | if (low >= seg->end) | ||||
Show All 28 Lines | vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages, | ||||
vm_paddr_t pa, pa_end, size; | vm_paddr_t pa, pa_end, size; | ||||
vm_page_t m, m_ret; | vm_page_t m, m_ret; | ||||
u_long npages_end; | u_long npages_end; | ||||
int oind, order, pind; | int oind, order, pind; | ||||
KASSERT(npages > 0, ("npages is 0")); | KASSERT(npages > 0, ("npages is 0")); | ||||
KASSERT(powerof2(alignment), ("alignment is not a power of 2")); | KASSERT(powerof2(alignment), ("alignment is not a power of 2")); | ||||
KASSERT(powerof2(boundary), ("boundary is not a power of 2")); | KASSERT(powerof2(boundary), ("boundary is not a power of 2")); | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); | ||||
/* Compute the queue that is the best fit for npages. */ | /* Compute the queue that is the best fit for npages. */ | ||||
for (order = 0; (1 << order) < npages; order++); | for (order = 0; (1 << order) < npages; order++); | ||||
/* Search for a run satisfying the specified conditions. */ | /* Search for a run satisfying the specified conditions. */ | ||||
size = npages << PAGE_SHIFT; | size = npages << PAGE_SHIFT; | ||||
for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; | for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; | ||||
oind++) { | oind++) { | ||||
for (pind = 0; pind < VM_NFREEPOOL; pind++) { | for (pind = 0; pind < VM_NFREEPOOL; pind++) { | ||||
fl = (*seg->free_queues)[pind]; | fl = (*seg->free_queues)[pind]; | ||||
▲ Show 20 Lines • Show All 98 Lines • Show Last 20 Lines |
If we push a new lock to protect reservations we could split the free lock for synchronization of sleeps and paging from the lock that protects the actual free queues.