Changeset View
Changeset View
Standalone View
Standalone View
sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 280 Lines • ▼ Show 20 Lines | ||||||||||
static struct rwlock pv_list_locks[NPV_LIST_LOCKS]; | static struct rwlock pv_list_locks[NPV_LIST_LOCKS]; | |||||||||
static struct md_page *pv_table; | static struct md_page *pv_table; | |||||||||
static struct md_page pv_dummy; | static struct md_page pv_dummy; | |||||||||
vm_paddr_t dmap_phys_base; /* The start of the dmap region */ | vm_paddr_t dmap_phys_base; /* The start of the dmap region */ | |||||||||
vm_paddr_t dmap_phys_max; /* The limit of the dmap region */ | vm_paddr_t dmap_phys_max; /* The limit of the dmap region */ | |||||||||
vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */ | vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */ | |||||||||
/* This code assumes all L1 DMAP entries will be used */ | ||||||||||
CTASSERT((DMAP_MIN_ADDRESS & ~L0_OFFSET) == DMAP_MIN_ADDRESS); | ||||||||||
CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS); | ||||||||||
extern pt_entry_t pagetable_l0_ttbr1[]; | extern pt_entry_t pagetable_l0_ttbr1[]; | |||||||||
#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) | #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) | |||||||||
static vm_paddr_t physmap[PHYSMAP_SIZE]; | static vm_paddr_t physmap[PHYSMAP_SIZE]; | |||||||||
static u_int physmap_idx; | static u_int physmap_idx; | |||||||||
static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, | static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, | |||||||||
"VM/pmap parameters"); | "VM/pmap parameters"); | |||||||||
#if PAGE_SIZE == PAGE_SIZE_4K | ||||||||||
#define L1_BLOCKS_SUPPORTED 1 | ||||||||||
#else | ||||||||||
/* TODO: Make this dynamic when we support FEAT_LPA2 (TCR_EL1.DS == 1) */ | ||||||||||
#define L1_BLOCKS_SUPPORTED 0 | ||||||||||
#endif | ||||||||||
#define PMAP_ASSERT_L1_BLOCKS_SUPPORTED MPASS(L1_BLOCKS_SUPPORTED) | ||||||||||
/* | /* | |||||||||
* This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs | * This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs | |||||||||
* that it has currently allocated to a pmap, a cursor ("asid_next") to | * that it has currently allocated to a pmap, a cursor ("asid_next") to | |||||||||
* optimize its search for a free ASID in the bit vector, and an epoch number | * optimize its search for a free ASID in the bit vector, and an epoch number | |||||||||
* ("asid_epoch") to indicate when it has reclaimed all previously allocated | * ("asid_epoch") to indicate when it has reclaimed all previously allocated | |||||||||
* ASIDs that are not currently active on a processor. | * ASIDs that are not currently active on a processor. | |||||||||
* | * | |||||||||
* The current epoch number is always in the range [0, INT_MAX). Negative | * The current epoch number is always in the range [0, INT_MAX). Negative | |||||||||
▲ Show 20 Lines • Show All 256 Lines • ▼ Show 20 Lines | pmap_pte(pmap_t pmap, vm_offset_t va, int *level) | |||||||||
l1 = pmap_l1(pmap, va); | l1 = pmap_l1(pmap, va); | |||||||||
if (l1 == NULL) { | if (l1 == NULL) { | |||||||||
*level = 0; | *level = 0; | |||||||||
return (NULL); | return (NULL); | |||||||||
} | } | |||||||||
desc = pmap_load(l1) & ATTR_DESCR_MASK; | desc = pmap_load(l1) & ATTR_DESCR_MASK; | |||||||||
if (desc == L1_BLOCK) { | if (desc == L1_BLOCK) { | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
*level = 1; | *level = 1; | |||||||||
return (l1); | return (l1); | |||||||||
} | } | |||||||||
if (desc != L1_TABLE) { | if (desc != L1_TABLE) { | |||||||||
*level = 1; | *level = 1; | |||||||||
return (NULL); | return (NULL); | |||||||||
} | } | |||||||||
Show All 34 Lines | pmap_pte_exists(pmap_t pmap, vm_offset_t va, int level, const char *diag) | |||||||||
KASSERT(level >= 0 && level < 4, | KASSERT(level >= 0 && level < 4, | |||||||||
("%s: %s passed an out-of-range level (%d)", __func__, diag, | ("%s: %s passed an out-of-range level (%d)", __func__, diag, | |||||||||
level)); | level)); | |||||||||
l0p = pmap_l0(pmap, va); | l0p = pmap_l0(pmap, va); | |||||||||
desc = pmap_load(l0p) & ATTR_DESCR_MASK; | desc = pmap_load(l0p) & ATTR_DESCR_MASK; | |||||||||
if (desc == L0_TABLE && level > 0) { | if (desc == L0_TABLE && level > 0) { | |||||||||
l1p = pmap_l0_to_l1(l0p, va); | l1p = pmap_l0_to_l1(l0p, va); | |||||||||
desc = pmap_load(l1p) & ATTR_DESCR_MASK; | desc = pmap_load(l1p) & ATTR_DESCR_MASK; | |||||||||
if (desc == L1_BLOCK && level == 1) | if (desc == L1_BLOCK && level == 1) { | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
return (l1p); | return (l1p); | |||||||||
else if (desc == L1_TABLE && level > 1) { | } | |||||||||
if (desc == L1_TABLE && level > 1) { | ||||||||||
l2p = pmap_l1_to_l2(l1p, va); | l2p = pmap_l1_to_l2(l1p, va); | |||||||||
desc = pmap_load(l2p) & ATTR_DESCR_MASK; | desc = pmap_load(l2p) & ATTR_DESCR_MASK; | |||||||||
if (desc == L2_BLOCK && level == 2) | if (desc == L2_BLOCK && level == 2) | |||||||||
return (l2p); | return (l2p); | |||||||||
else if (desc == L2_TABLE && level > 2) { | else if (desc == L2_TABLE && level > 2) { | |||||||||
l3p = pmap_l2_to_l3(l2p, va); | l3p = pmap_l2_to_l3(l2p, va); | |||||||||
desc = pmap_load(l3p) & ATTR_DESCR_MASK; | desc = pmap_load(l3p) & ATTR_DESCR_MASK; | |||||||||
if (desc == L3_PAGE && level == 3) | if (desc == L3_PAGE && level == 3) | |||||||||
Show All 33 Lines | pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1, | |||||||||
if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE) | if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE) | |||||||||
return (false); | return (false); | |||||||||
l1p = pmap_l0_to_l1(l0p, va); | l1p = pmap_l0_to_l1(l0p, va); | |||||||||
*l1 = l1p; | *l1 = l1p; | |||||||||
if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) { | if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) { | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
*l2 = NULL; | *l2 = NULL; | |||||||||
*l3 = NULL; | *l3 = NULL; | |||||||||
return (true); | return (true); | |||||||||
} | } | |||||||||
if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE) | if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE) | |||||||||
return (false); | return (false); | |||||||||
▲ Show 20 Lines • Show All 324 Lines • ▼ Show 20 Lines | for (i = 0; i < (physmap_idx * 2); i += 2) { | |||||||||
state.pa = physmap[i] & ~L3_OFFSET; | state.pa = physmap[i] & ~L3_OFFSET; | |||||||||
state.va = state.pa - dmap_phys_base + DMAP_MIN_ADDRESS; | state.va = state.pa - dmap_phys_base + DMAP_MIN_ADDRESS; | |||||||||
/* Create L3 mappings at the start of the region */ | /* Create L3 mappings at the start of the region */ | |||||||||
if ((state.pa & L2_OFFSET) != 0) | if ((state.pa & L2_OFFSET) != 0) | |||||||||
pmap_bootstrap_dmap_l3_page(&state, i); | pmap_bootstrap_dmap_l3_page(&state, i); | |||||||||
MPASS(state.pa <= physmap[i + 1]); | MPASS(state.pa <= physmap[i + 1]); | |||||||||
if (L1_BLOCKS_SUPPORTED) { | ||||||||||
/* Create L2 mappings at the start of the region */ | /* Create L2 mappings at the start of the region */ | |||||||||
if ((state.pa & L1_OFFSET) != 0) | if ((state.pa & L1_OFFSET) != 0) | |||||||||
pmap_bootstrap_dmap_l2_block(&state, i); | pmap_bootstrap_dmap_l2_block(&state, i); | |||||||||
MPASS(state.pa <= physmap[i + 1]); | MPASS(state.pa <= physmap[i + 1]); | |||||||||
/* Create the main L1 block mappings */ | /* Create the main L1 block mappings */ | |||||||||
for (; state.va < DMAP_MAX_ADDRESS && | for (; state.va < DMAP_MAX_ADDRESS && | |||||||||
(physmap[i + 1] - state.pa) >= L1_SIZE; | (physmap[i + 1] - state.pa) >= L1_SIZE; | |||||||||
state.va += L1_SIZE, state.pa += L1_SIZE) { | state.va += L1_SIZE, state.pa += L1_SIZE) { | |||||||||
/* Make sure there is a valid L1 table */ | /* Make sure there is a valid L1 table */ | |||||||||
pmap_bootstrap_dmap_l0_table(&state); | pmap_bootstrap_dmap_l0_table(&state); | |||||||||
MPASS((state.pa & L1_OFFSET) == 0); | MPASS((state.pa & L1_OFFSET) == 0); | |||||||||
pmap_store(&state.l1[pmap_l1_index(state.va)], | pmap_store(&state.l1[pmap_l1_index(state.va)], | |||||||||
state.pa | ATTR_DEFAULT | ATTR_S1_XN | | state.pa | ATTR_DEFAULT | ATTR_S1_XN | | |||||||||
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | | |||||||||
L1_BLOCK); | L1_BLOCK); | |||||||||
} | } | |||||||||
MPASS(state.pa <= physmap[i + 1]); | MPASS(state.pa <= physmap[i + 1]); | |||||||||
/* Create L2 mappings at the end of the region */ | /* Create L2 mappings at the end of the region */ | |||||||||
pmap_bootstrap_dmap_l2_block(&state, i); | pmap_bootstrap_dmap_l2_block(&state, i); | |||||||||
} else { | ||||||||||
for (; state.va < DMAP_MAX_ADDRESS && | ||||||||||
(physmap[i + 1] - state.pa) >= L2_SIZE;) { | ||||||||||
markj: This can be a while loop. | ||||||||||
pmap_bootstrap_dmap_l2_block(&state, i); | ||||||||||
} | ||||||||||
} | ||||||||||
MPASS(state.pa <= physmap[i + 1]); | MPASS(state.pa <= physmap[i + 1]); | |||||||||
/* Create L3 mappings at the end of the region */ | /* Create L3 mappings at the end of the region */ | |||||||||
pmap_bootstrap_dmap_l3_page(&state, i); | pmap_bootstrap_dmap_l3_page(&state, i); | |||||||||
MPASS(state.pa == physmap[i + 1]); | MPASS(state.pa == physmap[i + 1]); | |||||||||
if (state.pa > dmap_phys_max) { | if (state.pa > dmap_phys_max) { | |||||||||
dmap_phys_max = state.pa; | dmap_phys_max = state.pa; | |||||||||
▲ Show 20 Lines • Show All 211 Lines • ▼ Show 20 Lines | pmap_init(void) | |||||||||
/* | /* | |||||||||
* Are large page mappings enabled? | * Are large page mappings enabled? | |||||||||
*/ | */ | |||||||||
TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled); | TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled); | |||||||||
if (superpages_enabled) { | if (superpages_enabled) { | |||||||||
KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, | KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, | |||||||||
("pmap_init: can't assign to pagesizes[1]")); | ("pmap_init: can't assign to pagesizes[1]")); | |||||||||
pagesizes[1] = L2_SIZE; | pagesizes[1] = L2_SIZE; | |||||||||
if (L1_BLOCKS_SUPPORTED) { | ||||||||||
KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0, | KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0, | |||||||||
("pmap_init: can't assign to pagesizes[2]")); | ("pmap_init: can't assign to pagesizes[2]")); | |||||||||
pagesizes[2] = L1_SIZE; | pagesizes[2] = L1_SIZE; | |||||||||
} | } | |||||||||
} | ||||||||||
/* | /* | |||||||||
* Initialize the ASID allocator. | * Initialize the ASID allocator. | |||||||||
*/ | */ | |||||||||
pmap_init_asids(&asids, | pmap_init_asids(&asids, | |||||||||
(READ_SPECIALREG(tcr_el1) & TCR_ASID_16) != 0 ? 16 : 8); | (READ_SPECIALREG(tcr_el1) & TCR_ASID_16) != 0 ? 16 : 8); | |||||||||
if (has_hyp()) { | if (has_hyp()) { | |||||||||
▲ Show 20 Lines • Show All 202 Lines • ▼ Show 20 Lines | pmap_extract(pmap_t pmap, vm_offset_t va) | |||||||||
* will return either a valid block/page entry, or NULL. | * will return either a valid block/page entry, or NULL. | |||||||||
*/ | */ | |||||||||
pte = pmap_pte(pmap, va, &lvl); | pte = pmap_pte(pmap, va, &lvl); | |||||||||
if (pte != NULL) { | if (pte != NULL) { | |||||||||
tpte = pmap_load(pte); | tpte = pmap_load(pte); | |||||||||
pa = tpte & ~ATTR_MASK; | pa = tpte & ~ATTR_MASK; | |||||||||
switch(lvl) { | switch(lvl) { | |||||||||
case 1: | case 1: | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK, | KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK, | |||||||||
("pmap_extract: Invalid L1 pte found: %lx", | ("pmap_extract: Invalid L1 pte found: %lx", | |||||||||
tpte & ATTR_DESCR_MASK)); | tpte & ATTR_DESCR_MASK)); | |||||||||
pa |= (va & L1_OFFSET); | pa |= (va & L1_OFFSET); | |||||||||
break; | break; | |||||||||
case 2: | case 2: | |||||||||
KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK, | KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK, | |||||||||
("pmap_extract: Invalid L2 pte found: %lx", | ("pmap_extract: Invalid L2 pte found: %lx", | |||||||||
Show All 31 Lines | pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) | |||||||||
m = NULL; | m = NULL; | |||||||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | |||||||||
pte = pmap_pte(pmap, va, &lvl); | pte = pmap_pte(pmap, va, &lvl); | |||||||||
if (pte != NULL) { | if (pte != NULL) { | |||||||||
tpte = pmap_load(pte); | tpte = pmap_load(pte); | |||||||||
KASSERT(lvl > 0 && lvl <= 3, | KASSERT(lvl > 0 && lvl <= 3, | |||||||||
("pmap_extract_and_hold: Invalid level %d", lvl)); | ("pmap_extract_and_hold: Invalid level %d", lvl)); | |||||||||
/* | ||||||||||
* Check the pte is either a L3 page, or L1 or L2 block entry. | ||||||||||
markjUnsubmitted Done Inline Actions
markj: | ||||||||||
* We can assume L1_BLOCK == L2_BLOCK. | ||||||||||
*/ | ||||||||||
KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) || | KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) || | |||||||||
(lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK), | (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK), | |||||||||
("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl, | ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl, | |||||||||
tpte & ATTR_DESCR_MASK)); | tpte & ATTR_DESCR_MASK)); | |||||||||
use = false; | use = false; | |||||||||
if ((prot & VM_PROT_WRITE) == 0) | if ((prot & VM_PROT_WRITE) == 0) | |||||||||
use = true; | use = true; | |||||||||
▲ Show 20 Lines • Show All 880 Lines • ▼ Show 20 Lines | pmap_growkernel(vm_offset_t addr) | |||||||||
} | } | |||||||||
} | } | |||||||||
/*************************************************** | /*************************************************** | |||||||||
* page management routines. | * page management routines. | |||||||||
***************************************************/ | ***************************************************/ | |||||||||
CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); | CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); | |||||||||
#if PAGE_SIZE == PAGE_SIZE_4K | ||||||||||
CTASSERT(_NPCM == 3); | CTASSERT(_NPCM == 3); | |||||||||
CTASSERT(_NPCPV == 168); | CTASSERT(_NPCPV == 168); | |||||||||
#else | ||||||||||
CTASSERT(_NPCM == 11); | ||||||||||
CTASSERT(_NPCPV == 677); | ||||||||||
#endif | ||||||||||
static __inline struct pv_chunk * | static __inline struct pv_chunk * | |||||||||
pv_to_chunk(pv_entry_t pv) | pv_to_chunk(pv_entry_t pv) | |||||||||
{ | { | |||||||||
return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); | return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); | |||||||||
} | } | |||||||||
#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) | #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) | |||||||||
#define PC_FREE0 0xfffffffffffffffful | #define PC_FREE0 0xfffffffffffffffful | |||||||||
#define PC_FREE1 0xfffffffffffffffful | #define PC_FREE1 0xfffffffffffffffful | |||||||||
#if _NPCM == 3 | ||||||||||
#define PC_FREE2 0x000000fffffffffful | #define PC_FREE2 0x000000fffffffffful | |||||||||
#else | ||||||||||
#define PC_FREE2 0xfffffffffffffffful | ||||||||||
#define PC_FREE3 0xfffffffffffffffful | ||||||||||
#define PC_FREE4 0xfffffffffffffffful | ||||||||||
#define PC_FREE5 0xfffffffffffffffful | ||||||||||
#define PC_FREE6 0xfffffffffffffffful | ||||||||||
#define PC_FREE7 0xfffffffffffffffful | ||||||||||
#define PC_FREE8 0xfffffffffffffffful | ||||||||||
#define PC_FREE9 0xfffffffffffffffful | ||||||||||
#define PC_FREE10 0x0000001ffffffffful | ||||||||||
#endif | ||||||||||
static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 }; | #if _NPCM == 3 | |||||||||
#define PC_IS_FREE(pc) ((pc)->pc_map[0] == PC_FREE0 && \ | ||||||||||
(pc)->pc_map[1] == PC_FREE1 && (pc)->pc_map[2] == PC_FREE2) | ||||||||||
#elif _NPCM == 11 | ||||||||||
#define PC_IS_FREE(pc) ((pc)->pc_map[0] == PC_FREE0 && \ | ||||||||||
(pc)->pc_map[1] == PC_FREE1 && (pc)->pc_map[2] == PC_FREE2 && \ | ||||||||||
(pc)->pc_map[1] == PC_FREE3 && (pc)->pc_map[2] == PC_FREE4 && \ | ||||||||||
(pc)->pc_map[1] == PC_FREE5 && (pc)->pc_map[2] == PC_FREE6 && \ | ||||||||||
(pc)->pc_map[1] == PC_FREE7 && (pc)->pc_map[2] == PC_FREE8 && \ | ||||||||||
(pc)->pc_map[1] == PC_FREE9 && (pc)->pc_map[2] == PC_FREE10) | ||||||||||
markjUnsubmitted Done Inline ActionsThe array indices are wrong. I also wonder why this can't be a memcmp() with pc_freemask? memcmp() expands to a compiler builtin these days. Then we don't have to define PC_FREE0, ..., PC_FREE9. markj: The array indices are wrong.
I also wonder why this can't be a memcmp() with `pc_freemask`? | ||||||||||
andrewAuthorUnsubmitted Done Inline ActionsI think for 64k PAGE_SIZE it should be memcmp as I calculated _NPCM to be 43 (and _NPCM to be 2714). andrew: I think for 64k `PAGE_SIZE` it should be memcmp as I calculated `_NPCM` to be 43 (and `_NPCM`… | ||||||||||
andrewAuthorUnsubmitted Done Inline ActionsI tried with memcmp((pc)->pc_map, pc_freemask, sizeof(pc_freemask)) == 0 which clang expands to a call to memcmp (even if I use __builtin_memcmp). In the above case it generates 11 64-bit loads. andrew: I tried with `memcmp((pc)->pc_map, pc_freemask, sizeof(pc_freemask)) == 0` which clang expands… | ||||||||||
markjUnsubmitted Done Inline ActionsWhy is an out-of-line memcmp() call undesirable? Anyway, I think we should at least stop defining PC_FREE0..9. Just have #define PC_FREEN 0xfffffffffffffffful #define PC_FREEL 0x0000001ffffffffful and compare pc->pc_map[0..9] == PC_FREEN && pc->pc_map[10] == PC_FREEL. markj: Why is an out-of-line memcmp() call undesirable?
Anyway, I think we should at least stop… | ||||||||||
#endif | ||||||||||
static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2, | ||||||||||
#ifdef PC_FREE3 | ||||||||||
PC_FREE3, PC_FREE4, PC_FREE5, PC_FREE6, PC_FREE7, PC_FREE8, PC_FREE9, | ||||||||||
PC_FREE10 | ||||||||||
#endif | ||||||||||
}; | ||||||||||
#ifdef PV_STATS | #ifdef PV_STATS | |||||||||
static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; | static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; | |||||||||
SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, | SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, | |||||||||
"Current number of pv entry chunks"); | "Current number of pv entry chunks"); | |||||||||
SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, | SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, | |||||||||
"Current number of pv entry chunks allocated"); | "Current number of pv entry chunks allocated"); | |||||||||
SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, | SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, | |||||||||
▲ Show 20 Lines • Show All 148 Lines • ▼ Show 20 Lines | if (freed == 0) { | |||||||||
goto next_chunk; | goto next_chunk; | |||||||||
} | } | |||||||||
/* Every freed mapping is for a 4 KB page. */ | /* Every freed mapping is for a 4 KB page. */ | |||||||||
pmap_resident_count_dec(pmap, freed); | pmap_resident_count_dec(pmap, freed); | |||||||||
PV_STAT(atomic_add_long(&pv_entry_frees, freed)); | PV_STAT(atomic_add_long(&pv_entry_frees, freed)); | |||||||||
PV_STAT(atomic_add_int(&pv_entry_spare, freed)); | PV_STAT(atomic_add_int(&pv_entry_spare, freed)); | |||||||||
PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); | PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); | |||||||||
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | |||||||||
if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 && | if (PC_IS_FREE(pc)) { | |||||||||
pc->pc_map[2] == PC_FREE2) { | ||||||||||
PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); | PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); | |||||||||
PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); | PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); | |||||||||
PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); | PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); | |||||||||
/* Entire chunk is free; return it. */ | /* Entire chunk is free; return it. */ | |||||||||
m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); | m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); | |||||||||
dump_drop_page(m_pc->phys_addr); | dump_drop_page(m_pc->phys_addr); | |||||||||
mtx_lock(&pv_chunks_mutex); | mtx_lock(&pv_chunks_mutex); | |||||||||
TAILQ_REMOVE(&pv_chunks, pc, pc_lru); | TAILQ_REMOVE(&pv_chunks, pc, pc_lru); | |||||||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | free_pv_entry(pmap_t pmap, pv_entry_t pv) | |||||||||
PV_STAT(atomic_add_long(&pv_entry_frees, 1)); | PV_STAT(atomic_add_long(&pv_entry_frees, 1)); | |||||||||
PV_STAT(atomic_add_int(&pv_entry_spare, 1)); | PV_STAT(atomic_add_int(&pv_entry_spare, 1)); | |||||||||
PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); | PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); | |||||||||
pc = pv_to_chunk(pv); | pc = pv_to_chunk(pv); | |||||||||
idx = pv - &pc->pc_pventry[0]; | idx = pv - &pc->pc_pventry[0]; | |||||||||
field = idx / 64; | field = idx / 64; | |||||||||
bit = idx % 64; | bit = idx % 64; | |||||||||
pc->pc_map[field] |= 1ul << bit; | pc->pc_map[field] |= 1ul << bit; | |||||||||
if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 || | if (!PC_IS_FREE(pc)) { | |||||||||
pc->pc_map[2] != PC_FREE2) { | ||||||||||
/* 98% of the time, pc is already at the head of the list. */ | /* 98% of the time, pc is already at the head of the list. */ | |||||||||
if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { | if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { | |||||||||
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | |||||||||
TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); | TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); | |||||||||
} | } | |||||||||
return; | return; | |||||||||
} | } | |||||||||
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | |||||||||
▲ Show 20 Lines • Show All 71 Lines • ▼ Show 20 Lines | if (m == NULL) { | |||||||||
if (m == NULL) | if (m == NULL) | |||||||||
goto retry; | goto retry; | |||||||||
} | } | |||||||||
PV_STAT(atomic_add_int(&pc_chunk_count, 1)); | PV_STAT(atomic_add_int(&pc_chunk_count, 1)); | |||||||||
PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); | PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); | |||||||||
dump_add_page(m->phys_addr); | dump_add_page(m->phys_addr); | |||||||||
pc = (void *)PHYS_TO_DMAP(m->phys_addr); | pc = (void *)PHYS_TO_DMAP(m->phys_addr); | |||||||||
pc->pc_pmap = pmap; | pc->pc_pmap = pmap; | |||||||||
pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ | memcpy(pc->pc_map, pc_freemask, sizeof(pc_freemask)); | |||||||||
pc->pc_map[1] = PC_FREE1; | pc->pc_map[0] &= ~1ul; /* preallocated bit 0 */ | |||||||||
pc->pc_map[2] = PC_FREE2; | ||||||||||
mtx_lock(&pv_chunks_mutex); | mtx_lock(&pv_chunks_mutex); | |||||||||
TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); | TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); | |||||||||
mtx_unlock(&pv_chunks_mutex); | mtx_unlock(&pv_chunks_mutex); | |||||||||
pv = &pc->pc_pventry[0]; | pv = &pc->pc_pventry[0]; | |||||||||
TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); | TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); | |||||||||
PV_STAT(atomic_add_long(&pv_entry_count, 1)); | PV_STAT(atomic_add_long(&pv_entry_count, 1)); | |||||||||
PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1)); | PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1)); | |||||||||
return (pv); | return (pv); | |||||||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | if (m == NULL) { | |||||||||
goto retry; | goto retry; | |||||||||
reclaimed = true; | reclaimed = true; | |||||||||
} | } | |||||||||
PV_STAT(atomic_add_int(&pc_chunk_count, 1)); | PV_STAT(atomic_add_int(&pc_chunk_count, 1)); | |||||||||
PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); | PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); | |||||||||
dump_add_page(m->phys_addr); | dump_add_page(m->phys_addr); | |||||||||
pc = (void *)PHYS_TO_DMAP(m->phys_addr); | pc = (void *)PHYS_TO_DMAP(m->phys_addr); | |||||||||
pc->pc_pmap = pmap; | pc->pc_pmap = pmap; | |||||||||
pc->pc_map[0] = PC_FREE0; | memcpy(pc->pc_map, pc_freemask, sizeof(pc_freemask)); | |||||||||
pc->pc_map[1] = PC_FREE1; | ||||||||||
pc->pc_map[2] = PC_FREE2; | ||||||||||
TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); | TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); | |||||||||
TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); | TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); | |||||||||
PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); | PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); | |||||||||
/* | /* | |||||||||
* The reclaim might have freed a chunk from the current pmap. | * The reclaim might have freed a chunk from the current pmap. | |||||||||
* If that chunk contained available entries, we need to | * If that chunk contained available entries, we need to | |||||||||
* re-count the number of available entries. | * re-count the number of available entries. | |||||||||
▲ Show 20 Lines • Show All 417 Lines • ▼ Show 20 Lines | for (; sva < eva; sva = va_next) { | |||||||||
va_next = (sva + L1_SIZE) & ~L1_OFFSET; | va_next = (sva + L1_SIZE) & ~L1_OFFSET; | |||||||||
if (va_next < sva) | if (va_next < sva) | |||||||||
va_next = eva; | va_next = eva; | |||||||||
l1 = pmap_l0_to_l1(l0, sva); | l1 = pmap_l0_to_l1(l0, sva); | |||||||||
if (pmap_load(l1) == 0) | if (pmap_load(l1) == 0) | |||||||||
continue; | continue; | |||||||||
if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
KASSERT(va_next <= eva, | KASSERT(va_next <= eva, | |||||||||
("partial update of non-transparent 1G page " | ("partial update of non-transparent 1G page " | |||||||||
"l1 %#lx sva %#lx eva %#lx va_next %#lx", | "l1 %#lx sva %#lx eva %#lx va_next %#lx", | |||||||||
pmap_load(l1), sva, eva, va_next)); | pmap_load(l1), sva, eva, va_next)); | |||||||||
MPASS(pmap != kernel_pmap); | MPASS(pmap != kernel_pmap); | |||||||||
MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); | MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); | |||||||||
pmap_clear(l1); | pmap_clear(l1); | |||||||||
pmap_invalidate_page(pmap, sva, true); | pmap_invalidate_page(pmap, sva, true); | |||||||||
▲ Show 20 Lines • Show All 237 Lines • ▼ Show 20 Lines | for (; sva < eva; sva = va_next) { | |||||||||
va_next = (sva + L1_SIZE) & ~L1_OFFSET; | va_next = (sva + L1_SIZE) & ~L1_OFFSET; | |||||||||
if (va_next < sva) | if (va_next < sva) | |||||||||
va_next = eva; | va_next = eva; | |||||||||
l1 = pmap_l0_to_l1(l0, sva); | l1 = pmap_l0_to_l1(l0, sva); | |||||||||
if (pmap_load(l1) == 0) | if (pmap_load(l1) == 0) | |||||||||
continue; | continue; | |||||||||
if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
KASSERT(va_next <= eva, | KASSERT(va_next <= eva, | |||||||||
("partial update of non-transparent 1G page " | ("partial update of non-transparent 1G page " | |||||||||
"l1 %#lx sva %#lx eva %#lx va_next %#lx", | "l1 %#lx sva %#lx eva %#lx va_next %#lx", | |||||||||
pmap_load(l1), sva, eva, va_next)); | pmap_load(l1), sva, eva, va_next)); | |||||||||
MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); | MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); | |||||||||
if ((pmap_load(l1) & mask) != nbits) { | if ((pmap_load(l1) & mask) != nbits) { | |||||||||
pmap_store(l1, (pmap_load(l1) & ~mask) | nbits); | pmap_store(l1, (pmap_load(l1) & ~mask) | nbits); | |||||||||
pmap_invalidate_page(pmap, sva, true); | pmap_invalidate_page(pmap, sva, true); | |||||||||
▲ Show 20 Lines • Show All 314 Lines • ▼ Show 20 Lines | if ((pmap_load(l0p) & ATTR_DESCR_VALID) == 0) { | |||||||||
KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va)); | KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va)); | |||||||||
origpte = pmap_load(l1p); | origpte = pmap_load(l1p); | |||||||||
if ((origpte & ATTR_DESCR_VALID) == 0) { | if ((origpte & ATTR_DESCR_VALID) == 0) { | |||||||||
mp = PHYS_TO_VM_PAGE(pmap_load(l0p) & | mp = PHYS_TO_VM_PAGE(pmap_load(l0p) & | |||||||||
~ATTR_MASK); | ~ATTR_MASK); | |||||||||
mp->ref_count++; | mp->ref_count++; | |||||||||
} | } | |||||||||
} | } | |||||||||
KASSERT((origpte & ATTR_DESCR_VALID) == 0 || | KASSERT((origpte & ~ATTR_MASK) == (newpte & ~ATTR_MASK) || | |||||||||
((origpte & ATTR_DESCR_MASK) == L1_BLOCK && | (L1_BLOCKS_SUPPORTED && | |||||||||
(origpte & ~ATTR_MASK) == (newpte & ~ATTR_MASK)), | (origpte & ATTR_DESCR_MASK) == L1_BLOCK && | |||||||||
(origpte & ATTR_DESCR_VALID) == 0), | ||||||||||
("va %#lx changing 1G phys page l1 %#lx newpte %#lx", | ("va %#lx changing 1G phys page l1 %#lx newpte %#lx", | |||||||||
va, origpte, newpte)); | va, origpte, newpte)); | |||||||||
pmap_store(l1p, newpte); | pmap_store(l1p, newpte); | |||||||||
} else /* (psind == 1) */ { | } else /* (psind == 1) */ { | |||||||||
l2p = pmap_l2(pmap, va); | l2p = pmap_l2(pmap, va); | |||||||||
if (l2p == NULL) { | if (l2p == NULL) { | |||||||||
mp = _pmap_alloc_l3(pmap, pmap_l1_pindex(va), NULL); | mp = _pmap_alloc_l3(pmap, pmap_l1_pindex(va), NULL); | |||||||||
if (mp == NULL) { | if (mp == NULL) { | |||||||||
▲ Show 20 Lines • Show All 113 Lines • ▼ Show 20 Lines | pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | |||||||||
CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa); | CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa); | |||||||||
lock = NULL; | lock = NULL; | |||||||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | |||||||||
if ((flags & PMAP_ENTER_LARGEPAGE) != 0) { | if ((flags & PMAP_ENTER_LARGEPAGE) != 0) { | |||||||||
KASSERT((m->oflags & VPO_UNMANAGED) != 0, | KASSERT((m->oflags & VPO_UNMANAGED) != 0, | |||||||||
("managed largepage va %#lx flags %#x", va, flags)); | ("managed largepage va %#lx flags %#x", va, flags)); | |||||||||
new_l3 &= ~L3_PAGE; | new_l3 &= ~L3_PAGE; | |||||||||
if (psind == 2) | if (psind == 2) { | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
new_l3 |= L1_BLOCK; | new_l3 |= L1_BLOCK; | |||||||||
else /* (psind == 1) */ | } else /* (psind == 1) */ | |||||||||
new_l3 |= L2_BLOCK; | new_l3 |= L2_BLOCK; | |||||||||
rv = pmap_enter_largepage(pmap, va, new_l3, flags, psind); | rv = pmap_enter_largepage(pmap, va, new_l3, flags, psind); | |||||||||
goto out; | goto out; | |||||||||
} | } | |||||||||
if (psind == 1) { | if (psind == 1) { | |||||||||
/* Assert the required virtual and physical alignment. */ | /* Assert the required virtual and physical alignment. */ | |||||||||
KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned")); | KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned")); | |||||||||
KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); | KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); | |||||||||
▲ Show 20 Lines • Show All 661 Lines • ▼ Show 20 Lines | for (; sva < eva; sva = va_next) { | |||||||||
l1 = pmap_l0_to_l1(l0, sva); | l1 = pmap_l0_to_l1(l0, sva); | |||||||||
va_next = (sva + L1_SIZE) & ~L1_OFFSET; | va_next = (sva + L1_SIZE) & ~L1_OFFSET; | |||||||||
if (va_next < sva) | if (va_next < sva) | |||||||||
va_next = eva; | va_next = eva; | |||||||||
if (pmap_load(l1) == 0) | if (pmap_load(l1) == 0) | |||||||||
continue; | continue; | |||||||||
if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
KASSERT(va_next <= eva, | KASSERT(va_next <= eva, | |||||||||
("partial update of non-transparent 1G page " | ("partial update of non-transparent 1G page " | |||||||||
"l1 %#lx sva %#lx eva %#lx va_next %#lx", | "l1 %#lx sva %#lx eva %#lx va_next %#lx", | |||||||||
pmap_load(l1), sva, eva, va_next)); | pmap_load(l1), sva, eva, va_next)); | |||||||||
MPASS(pmap != kernel_pmap); | MPASS(pmap != kernel_pmap); | |||||||||
MPASS((pmap_load(l1) & (ATTR_SW_MANAGED | | MPASS((pmap_load(l1) & (ATTR_SW_MANAGED | | |||||||||
ATTR_SW_WIRED)) == ATTR_SW_WIRED); | ATTR_SW_WIRED)) == ATTR_SW_WIRED); | |||||||||
pmap_clear_bits(l1, ATTR_SW_WIRED); | pmap_clear_bits(l1, ATTR_SW_WIRED); | |||||||||
▲ Show 20 Lines • Show All 96 Lines • ▼ Show 20 Lines | for (addr = src_addr; addr < end_addr; addr = va_next) { | |||||||||
va_next = (addr + L1_SIZE) & ~L1_OFFSET; | va_next = (addr + L1_SIZE) & ~L1_OFFSET; | |||||||||
if (va_next < addr) | if (va_next < addr) | |||||||||
va_next = end_addr; | va_next = end_addr; | |||||||||
l1 = pmap_l0_to_l1(l0, addr); | l1 = pmap_l0_to_l1(l0, addr); | |||||||||
if (pmap_load(l1) == 0) | if (pmap_load(l1) == 0) | |||||||||
continue; | continue; | |||||||||
if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
KASSERT(va_next <= end_addr, | KASSERT(va_next <= end_addr, | |||||||||
("partial update of non-transparent 1G page " | ("partial update of non-transparent 1G page " | |||||||||
"l1 %#lx addr %#lx end_addr %#lx va_next %#lx", | "l1 %#lx addr %#lx end_addr %#lx va_next %#lx", | |||||||||
pmap_load(l1), addr, end_addr, va_next)); | pmap_load(l1), addr, end_addr, va_next)); | |||||||||
srcptepaddr = pmap_load(l1); | srcptepaddr = pmap_load(l1); | |||||||||
l1 = pmap_l1(dst_pmap, addr); | l1 = pmap_l1(dst_pmap, addr); | |||||||||
if (l1 == NULL) { | if (l1 == NULL) { | |||||||||
if (_pmap_alloc_l3(dst_pmap, | if (_pmap_alloc_l3(dst_pmap, | |||||||||
▲ Show 20 Lines • Show All 942 Lines • ▼ Show 20 Lines | for (; sva < eva; sva = va_next) { | |||||||||
va_next = (sva + L1_SIZE) & ~L1_OFFSET; | va_next = (sva + L1_SIZE) & ~L1_OFFSET; | |||||||||
if (va_next < sva) | if (va_next < sva) | |||||||||
va_next = eva; | va_next = eva; | |||||||||
l1 = pmap_l0_to_l1(l0, sva); | l1 = pmap_l0_to_l1(l0, sva); | |||||||||
if (pmap_load(l1) == 0) | if (pmap_load(l1) == 0) | |||||||||
continue; | continue; | |||||||||
if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
KASSERT(va_next <= eva, | KASSERT(va_next <= eva, | |||||||||
("partial update of non-transparent 1G page " | ("partial update of non-transparent 1G page " | |||||||||
"l1 %#lx sva %#lx eva %#lx va_next %#lx", | "l1 %#lx sva %#lx eva %#lx va_next %#lx", | |||||||||
pmap_load(l1), sva, eva, va_next)); | pmap_load(l1), sva, eva, va_next)); | |||||||||
continue; | continue; | |||||||||
} | } | |||||||||
va_next = (sva + L2_SIZE) & ~L2_OFFSET; | va_next = (sva + L2_SIZE) & ~L2_OFFSET; | |||||||||
▲ Show 20 Lines • Show All 497 Lines • ▼ Show 20 Lines | if (ptep == NULL && !skip_unmapped) { | |||||||||
/* | /* | |||||||||
* Split the entry to an level 3 table, then | * Split the entry to an level 3 table, then | |||||||||
* set the new attribute. | * set the new attribute. | |||||||||
*/ | */ | |||||||||
switch (lvl) { | switch (lvl) { | |||||||||
default: | default: | |||||||||
panic("Invalid DMAP table level: %d\n", lvl); | panic("Invalid DMAP table level: %d\n", lvl); | |||||||||
case 1: | case 1: | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
if ((tmpva & L1_OFFSET) == 0 && | if ((tmpva & L1_OFFSET) == 0 && | |||||||||
(base + size - tmpva) >= L1_SIZE) { | (base + size - tmpva) >= L1_SIZE) { | |||||||||
pte_size = L1_SIZE; | pte_size = L1_SIZE; | |||||||||
break; | break; | |||||||||
} | } | |||||||||
newpte = pmap_demote_l1(kernel_pmap, ptep, | newpte = pmap_demote_l1(kernel_pmap, ptep, | |||||||||
tmpva & ~L1_OFFSET); | tmpva & ~L1_OFFSET); | |||||||||
if (newpte == NULL) | if (newpte == NULL) | |||||||||
▲ Show 20 Lines • Show All 59 Lines • ▼ Show 20 Lines | pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va) | |||||||||
pt_entry_t *l2, newl2, oldl1; | pt_entry_t *l2, newl2, oldl1; | |||||||||
vm_offset_t tmpl1; | vm_offset_t tmpl1; | |||||||||
vm_paddr_t l2phys, phys; | vm_paddr_t l2phys, phys; | |||||||||
vm_page_t ml2; | vm_page_t ml2; | |||||||||
int i; | int i; | |||||||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | |||||||||
oldl1 = pmap_load(l1); | oldl1 = pmap_load(l1); | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK, | KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK, | |||||||||
("pmap_demote_l1: Demoting a non-block entry")); | ("pmap_demote_l1: Demoting a non-block entry")); | |||||||||
KASSERT((va & L1_OFFSET) == 0, | KASSERT((va & L1_OFFSET) == 0, | |||||||||
("pmap_demote_l1: Invalid virtual address %#lx", va)); | ("pmap_demote_l1: Invalid virtual address %#lx", va)); | |||||||||
KASSERT((oldl1 & ATTR_SW_MANAGED) == 0, | KASSERT((oldl1 & ATTR_SW_MANAGED) == 0, | |||||||||
("pmap_demote_l1: Level 1 table shouldn't be managed")); | ("pmap_demote_l1: Level 1 table shouldn't be managed")); | |||||||||
tmpl1 = 0; | tmpl1 = 0; | |||||||||
▲ Show 20 Lines • Show All 1,004 Lines • ▼ Show 20 Lines | for (sva = 0xffff000000000000ul, i = pmap_l0_index(sva); i < Ln_ENTRIES; | |||||||||
for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) { | for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) { | |||||||||
l1e = l1[j]; | l1e = l1[j]; | |||||||||
if ((l1e & ATTR_DESCR_VALID) == 0) { | if ((l1e & ATTR_DESCR_VALID) == 0) { | |||||||||
sysctl_kmaps_dump(sb, &range, sva); | sysctl_kmaps_dump(sb, &range, sva); | |||||||||
sva += L1_SIZE; | sva += L1_SIZE; | |||||||||
continue; | continue; | |||||||||
} | } | |||||||||
if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) { | if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) { | |||||||||
PMAP_ASSERT_L1_BLOCKS_SUPPORTED; | ||||||||||
sysctl_kmaps_check(sb, &range, sva, l0e, l1e, | sysctl_kmaps_check(sb, &range, sva, l0e, l1e, | |||||||||
0, 0); | 0, 0); | |||||||||
range.l1blocks++; | range.l1blocks++; | |||||||||
sva += L1_SIZE; | sva += L1_SIZE; | |||||||||
continue; | continue; | |||||||||
} | } | |||||||||
pa = l1e & ~ATTR_MASK; | pa = l1e & ~ATTR_MASK; | |||||||||
l2 = (pd_entry_t *)PHYS_TO_DMAP(pa); | l2 = (pd_entry_t *)PHYS_TO_DMAP(pa); | |||||||||
▲ Show 20 Lines • Show All 46 Lines • Show Last 20 Lines |
This can be a while loop.