Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -407,190 +407,6 @@ static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *); static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); -/* - * These load the old table data and store the new value. - * They need to be atomic as the System MMU may write to the table at - * the same time as the CPU. - */ -#define pmap_clear(table) atomic_store_64(table, 0) -#define pmap_clear_bits(table, bits) atomic_clear_64(table, bits) -#define pmap_load(table) (*table) -#define pmap_load_clear(table) atomic_swap_64(table, 0) -#define pmap_load_store(table, entry) atomic_swap_64(table, entry) -#define pmap_set_bits(table, bits) atomic_set_64(table, bits) -#define pmap_store(table, entry) atomic_store_64(table, entry) - -/********************/ -/* Inline functions */ -/********************/ - -static __inline void -pagecopy(void *s, void *d) -{ - - memcpy(d, s, PAGE_SIZE); -} - -static __inline pd_entry_t * -pmap_l0(pmap_t pmap, vm_offset_t va) -{ - - return (&pmap->pm_l0[pmap_l0_index(va)]); -} - -static __inline pd_entry_t * -pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) -{ - pd_entry_t *l1; - - l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); - return (&l1[pmap_l1_index(va)]); -} - -static __inline pd_entry_t * -pmap_l1(pmap_t pmap, vm_offset_t va) -{ - pd_entry_t *l0; - - l0 = pmap_l0(pmap, va); - if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE) - return (NULL); - - return (pmap_l0_to_l1(l0, va)); -} - -static __inline pd_entry_t * -pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va) -{ - pd_entry_t l1, *l2p; - - l1 = pmap_load(l1p); - - /* - * The valid bit may be clear if pmap_update_entry() is concurrently - * modifying the entry, so for KVA only the entry type may be checked. - */ - KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0, - ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va)); - KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, - ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va)); - l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK); - return (&l2p[pmap_l2_index(va)]); -} - -static __inline pd_entry_t * -pmap_l2(pmap_t pmap, vm_offset_t va) -{ - pd_entry_t *l1; - - l1 = pmap_l1(pmap, va); - if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE) - return (NULL); - - return (pmap_l1_to_l2(l1, va)); -} - -static __inline pt_entry_t * -pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va) -{ - pd_entry_t l2; - pt_entry_t *l3p; - - l2 = pmap_load(l2p); - - /* - * The valid bit may be clear if pmap_update_entry() is concurrently - * modifying the entry, so for KVA only the entry type may be checked. - */ - KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0, - ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va)); - KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, - ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va)); - l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK); - return (&l3p[pmap_l3_index(va)]); -} - -/* - * Returns the lowest valid pde for a given virtual address. - * The next level may or may not point to a valid page or block. - */ -static __inline pd_entry_t * -pmap_pde(pmap_t pmap, vm_offset_t va, int *level) -{ - pd_entry_t *l0, *l1, *l2, desc; - - l0 = pmap_l0(pmap, va); - desc = pmap_load(l0) & ATTR_DESCR_MASK; - if (desc != L0_TABLE) { - *level = -1; - return (NULL); - } - - l1 = pmap_l0_to_l1(l0, va); - desc = pmap_load(l1) & ATTR_DESCR_MASK; - if (desc != L1_TABLE) { - *level = 0; - return (l0); - } - - l2 = pmap_l1_to_l2(l1, va); - desc = pmap_load(l2) & ATTR_DESCR_MASK; - if (desc != L2_TABLE) { - *level = 1; - return (l1); - } - - *level = 2; - return (l2); -} - -/* - * Returns the lowest valid pte block or table entry for a given virtual - * address. If there are no valid entries return NULL and set the level to - * the first invalid level. - */ -static __inline pt_entry_t * -pmap_pte(pmap_t pmap, vm_offset_t va, int *level) -{ - pd_entry_t *l1, *l2, desc; - pt_entry_t *l3; - - l1 = pmap_l1(pmap, va); - if (l1 == NULL) { - *level = 0; - return (NULL); - } - desc = pmap_load(l1) & ATTR_DESCR_MASK; - if (desc == L1_BLOCK) { - *level = 1; - return (l1); - } - - if (desc != L1_TABLE) { - *level = 1; - return (NULL); - } - - l2 = pmap_l1_to_l2(l1, va); - desc = pmap_load(l2) & ATTR_DESCR_MASK; - if (desc == L2_BLOCK) { - *level = 2; - return (l2); - } - - if (desc != L2_TABLE) { - *level = 2; - return (NULL); - } - - *level = 3; - l3 = pmap_l2_to_l3(l2, va); - if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE) - return (NULL); - - return (l3); -} - bool pmap_ps_enabled(pmap_t pmap __unused) { @@ -641,13 +457,6 @@ return (true); } -static __inline int -pmap_l3_valid(pt_entry_t l3) -{ - - return ((l3 & ATTR_DESCR_MASK) == L3_PAGE); -} - CTASSERT(L1_BLOCK == L2_BLOCK); static pt_entry_t @@ -723,25 +532,6 @@ ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)); } -static __inline void -pmap_resident_count_inc(pmap_t pmap, int count) -{ - - PMAP_LOCK_ASSERT(pmap, MA_OWNED); - pmap->pm_stats.resident_count += count; -} - -static __inline void -pmap_resident_count_dec(pmap_t pmap, int count) -{ - - PMAP_LOCK_ASSERT(pmap, MA_OWNED); - KASSERT(pmap->pm_stats.resident_count >= count, - ("pmap %p resident count underflow %ld %d", pmap, - pmap->pm_stats.resident_count, count)); - pmap->pm_stats.resident_count -= count; -} - static pt_entry_t * pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot, u_int *l2_slot) Index: sys/arm64/include/pmap.h =================================================================== --- sys/arm64/include/pmap.h +++ sys/arm64/include/pmap.h @@ -40,11 +40,13 @@ #ifndef LOCORE +#include #include -#include -#include +#include +#include #include +#include #ifdef _KERNEL @@ -201,6 +203,23 @@ extern void (*pmap_clean_stage2_tlbi)(void); extern void (*pmap_invalidate_vpipt_icache)(void); +/* + * These load the old table data and store the new value. + * They need to be atomic as the System MMU may write to the table at + * the same time as the CPU. + */ +#define pmap_clear(table) atomic_store_64(table, 0) +#define pmap_clear_bits(table, bits) atomic_clear_64(table, bits) +#define pmap_load(table) (*table) +#define pmap_load_clear(table) atomic_swap_64(table, 0) +#define pmap_load_store(table, entry) atomic_swap_64(table, entry) +#define pmap_set_bits(table, bits) atomic_set_64(table, bits) +#define pmap_store(table, entry) atomic_store_64(table, entry) + +/********************/ +/* Inline functions */ +/********************/ + static inline int pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused) { @@ -208,6 +227,199 @@ return (0); } +static __inline void +pagecopy(void *s, void *d) +{ + + memcpy(d, s, PAGE_SIZE); +} + +static __inline pd_entry_t * +pmap_l0(pmap_t pmap, vm_offset_t va) +{ + + return (&pmap->pm_l0[pmap_l0_index(va)]); +} + +static __inline pd_entry_t * +pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) +{ + pd_entry_t *l1; + + l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); + return (&l1[pmap_l1_index(va)]); +} + +static __inline pd_entry_t * +pmap_l1(pmap_t pmap, vm_offset_t va) +{ + pd_entry_t *l0; + + l0 = pmap_l0(pmap, va); + if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE) + return (NULL); + + return (pmap_l0_to_l1(l0, va)); +} + +static __inline pd_entry_t * +pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va) +{ + pd_entry_t l1, *l2p; + + l1 = pmap_load(l1p); + + /* + * The valid bit may be clear if pmap_update_entry() is concurrently + * modifying the entry, so for KVA only the entry type may be checked. + */ + KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0, + ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va)); + KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, + ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va)); + l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK); + return (&l2p[pmap_l2_index(va)]); +} + +static __inline pd_entry_t * +pmap_l2(pmap_t pmap, vm_offset_t va) +{ + pd_entry_t *l1; + + l1 = pmap_l1(pmap, va); + if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE) + return (NULL); + + return (pmap_l1_to_l2(l1, va)); +} + +static __inline pt_entry_t * +pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va) +{ + pd_entry_t l2; + pt_entry_t *l3p; + + l2 = pmap_load(l2p); + + /* + * The valid bit may be clear if pmap_update_entry() is concurrently + * modifying the entry, so for KVA only the entry type may be checked. + */ + KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0, + ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va)); + KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, + ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va)); + l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK); + return (&l3p[pmap_l3_index(va)]); +} + +/* + * Returns the lowest valid pde for a given virtual address. + * The next level may or may not point to a valid page or block. + */ +static __inline pd_entry_t * +pmap_pde(pmap_t pmap, vm_offset_t va, int *level) +{ + pd_entry_t *l0, *l1, *l2, desc; + + l0 = pmap_l0(pmap, va); + desc = pmap_load(l0) & ATTR_DESCR_MASK; + if (desc != L0_TABLE) { + *level = -1; + return (NULL); + } + + l1 = pmap_l0_to_l1(l0, va); + desc = pmap_load(l1) & ATTR_DESCR_MASK; + if (desc != L1_TABLE) { + *level = 0; + return (l0); + } + + l2 = pmap_l1_to_l2(l1, va); + desc = pmap_load(l2) & ATTR_DESCR_MASK; + if (desc != L2_TABLE) { + *level = 1; + return (l1); + } + + *level = 2; + return (l2); +} + +/* + * Returns the lowest valid pte block or table entry for a given virtual + * address. If there are no valid entries return NULL and set the level to + * the first invalid level. + */ +static __inline pt_entry_t * +pmap_pte(pmap_t pmap, vm_offset_t va, int *level) +{ + pd_entry_t *l1, *l2, desc; + pt_entry_t *l3; + + l1 = pmap_l1(pmap, va); + if (l1 == NULL) { + *level = 0; + return (NULL); + } + desc = pmap_load(l1) & ATTR_DESCR_MASK; + if (desc == L1_BLOCK) { + *level = 1; + return (l1); + } + + if (desc != L1_TABLE) { + *level = 1; + return (NULL); + } + + l2 = pmap_l1_to_l2(l1, va); + desc = pmap_load(l2) & ATTR_DESCR_MASK; + if (desc == L2_BLOCK) { + *level = 2; + return (l2); + } + + if (desc != L2_TABLE) { + *level = 2; + return (NULL); + } + + *level = 3; + l3 = pmap_l2_to_l3(l2, va); + if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE) + return (NULL); + + return (l3); +} + +static __inline int +pmap_l3_valid(pt_entry_t l3) +{ + + return ((l3 & ATTR_DESCR_MASK) == L3_PAGE); +} + +static __inline void +pmap_resident_count_inc(pmap_t pmap, int count) +{ + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + pmap->pm_stats.resident_count += count; +} + +static __inline void +pmap_resident_count_dec(pmap_t pmap, int count) +{ + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + KASSERT(pmap->pm_stats.resident_count >= count, + ("pmap %p resident count underflow %ld %d", pmap, + pmap->pm_stats.resident_count, count)); + pmap->pm_stats.resident_count -= count; +} + #endif /* _KERNEL */ #endif /* !LOCORE */