Changeset View
Changeset View
Standalone View
Standalone View
sys/arm64/include/pmap.h
Show All 34 Lines | |||||
#ifndef _MACHINE_PMAP_H_ | #ifndef _MACHINE_PMAP_H_ | ||||
#define _MACHINE_PMAP_H_ | #define _MACHINE_PMAP_H_ | ||||
#include <machine/pte.h> | #include <machine/pte.h> | ||||
#ifndef LOCORE | #ifndef LOCORE | ||||
#include <sys/systm.h> | |||||
#include <sys/queue.h> | #include <sys/queue.h> | ||||
#include <sys/_lock.h> | #include <sys/lock.h> | ||||
#include <sys/_mutex.h> | #include <sys/mutex.h> | ||||
#include <vm/_vm_radix.h> | #include <vm/_vm_radix.h> | ||||
#include <vm/vm_param.h> | |||||
#ifdef _KERNEL | #ifdef _KERNEL | ||||
#define vtophys(va) pmap_kextract((vm_offset_t)(va)) | #define vtophys(va) pmap_kextract((vm_offset_t)(va)) | ||||
#endif | #endif | ||||
#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) | #define pmap_page_get_memattr(m) ((m)->md.pv_memattr) | ||||
▲ Show 20 Lines • Show All 140 Lines • ▼ Show 20 Lines | |||||
int pmap_sremove(pmap_t pmap, vm_offset_t va); | int pmap_sremove(pmap_t pmap, vm_offset_t va); | ||||
void pmap_sremove_pages(pmap_t pmap); | void pmap_sremove_pages(pmap_t pmap); | ||||
struct pcb *pmap_switch(struct thread *, struct thread *); | struct pcb *pmap_switch(struct thread *, struct thread *); | ||||
extern void (*pmap_clean_stage2_tlbi)(void); | extern void (*pmap_clean_stage2_tlbi)(void); | ||||
extern void (*pmap_invalidate_vpipt_icache)(void); | extern void (*pmap_invalidate_vpipt_icache)(void); | ||||
/* | |||||
* These load the old table data and store the new value. | |||||
* They need to be atomic as the System MMU may write to the table at | |||||
* the same time as the CPU. | |||||
*/ | |||||
#define pmap_clear(table) atomic_store_64(table, 0) | |||||
#define pmap_clear_bits(table, bits) atomic_clear_64(table, bits) | |||||
#define pmap_load(table) (*table) | |||||
#define pmap_load_clear(table) atomic_swap_64(table, 0) | |||||
#define pmap_load_store(table, entry) atomic_swap_64(table, entry) | |||||
#define pmap_set_bits(table, bits) atomic_set_64(table, bits) | |||||
#define pmap_store(table, entry) atomic_store_64(table, entry) | |||||
/********************/ | |||||
/* Inline functions */ | |||||
/********************/ | |||||
static inline int | static inline int | ||||
pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused) | pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused) | ||||
{ | { | ||||
return (0); | return (0); | ||||
} | |||||
static __inline void | |||||
pagecopy(void *s, void *d) | |||||
{ | |||||
memcpy(d, s, PAGE_SIZE); | |||||
} | |||||
static __inline pd_entry_t * | |||||
pmap_l0(pmap_t pmap, vm_offset_t va) | |||||
{ | |||||
return (&pmap->pm_l0[pmap_l0_index(va)]); | |||||
} | |||||
static __inline pd_entry_t * | |||||
pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) | |||||
{ | |||||
pd_entry_t *l1; | |||||
l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); | |||||
return (&l1[pmap_l1_index(va)]); | |||||
} | |||||
static __inline pd_entry_t * | |||||
pmap_l1(pmap_t pmap, vm_offset_t va) | |||||
{ | |||||
pd_entry_t *l0; | |||||
l0 = pmap_l0(pmap, va); | |||||
if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE) | |||||
return (NULL); | |||||
return (pmap_l0_to_l1(l0, va)); | |||||
} | |||||
static __inline pd_entry_t * | |||||
pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va) | |||||
{ | |||||
pd_entry_t l1, *l2p; | |||||
l1 = pmap_load(l1p); | |||||
/* | |||||
* The valid bit may be clear if pmap_update_entry() is concurrently | |||||
* modifying the entry, so for KVA only the entry type may be checked. | |||||
*/ | |||||
KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0, | |||||
("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va)); | |||||
KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, | |||||
("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va)); | |||||
l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK); | |||||
return (&l2p[pmap_l2_index(va)]); | |||||
} | |||||
static __inline pd_entry_t * | |||||
pmap_l2(pmap_t pmap, vm_offset_t va) | |||||
{ | |||||
pd_entry_t *l1; | |||||
l1 = pmap_l1(pmap, va); | |||||
if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE) | |||||
return (NULL); | |||||
return (pmap_l1_to_l2(l1, va)); | |||||
} | |||||
static __inline pt_entry_t * | |||||
pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va) | |||||
{ | |||||
pd_entry_t l2; | |||||
pt_entry_t *l3p; | |||||
l2 = pmap_load(l2p); | |||||
/* | |||||
* The valid bit may be clear if pmap_update_entry() is concurrently | |||||
* modifying the entry, so for KVA only the entry type may be checked. | |||||
*/ | |||||
KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0, | |||||
("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va)); | |||||
KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, | |||||
("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va)); | |||||
l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK); | |||||
return (&l3p[pmap_l3_index(va)]); | |||||
} | |||||
/* | |||||
* Returns the lowest valid pde for a given virtual address. | |||||
* The next level may or may not point to a valid page or block. | |||||
*/ | |||||
static __inline pd_entry_t * | |||||
pmap_pde(pmap_t pmap, vm_offset_t va, int *level) | |||||
{ | |||||
pd_entry_t *l0, *l1, *l2, desc; | |||||
l0 = pmap_l0(pmap, va); | |||||
desc = pmap_load(l0) & ATTR_DESCR_MASK; | |||||
if (desc != L0_TABLE) { | |||||
*level = -1; | |||||
return (NULL); | |||||
} | |||||
l1 = pmap_l0_to_l1(l0, va); | |||||
desc = pmap_load(l1) & ATTR_DESCR_MASK; | |||||
if (desc != L1_TABLE) { | |||||
*level = 0; | |||||
return (l0); | |||||
} | |||||
l2 = pmap_l1_to_l2(l1, va); | |||||
desc = pmap_load(l2) & ATTR_DESCR_MASK; | |||||
if (desc != L2_TABLE) { | |||||
*level = 1; | |||||
return (l1); | |||||
} | |||||
*level = 2; | |||||
return (l2); | |||||
} | |||||
/* | |||||
* Returns the lowest valid pte block or table entry for a given virtual | |||||
* address. If there are no valid entries return NULL and set the level to | |||||
* the first invalid level. | |||||
*/ | |||||
static __inline pt_entry_t * | |||||
pmap_pte(pmap_t pmap, vm_offset_t va, int *level) | |||||
{ | |||||
pd_entry_t *l1, *l2, desc; | |||||
pt_entry_t *l3; | |||||
l1 = pmap_l1(pmap, va); | |||||
if (l1 == NULL) { | |||||
*level = 0; | |||||
return (NULL); | |||||
} | |||||
desc = pmap_load(l1) & ATTR_DESCR_MASK; | |||||
if (desc == L1_BLOCK) { | |||||
*level = 1; | |||||
return (l1); | |||||
} | |||||
if (desc != L1_TABLE) { | |||||
*level = 1; | |||||
return (NULL); | |||||
} | |||||
l2 = pmap_l1_to_l2(l1, va); | |||||
desc = pmap_load(l2) & ATTR_DESCR_MASK; | |||||
if (desc == L2_BLOCK) { | |||||
*level = 2; | |||||
return (l2); | |||||
} | |||||
if (desc != L2_TABLE) { | |||||
*level = 2; | |||||
return (NULL); | |||||
} | |||||
*level = 3; | |||||
l3 = pmap_l2_to_l3(l2, va); | |||||
if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE) | |||||
return (NULL); | |||||
return (l3); | |||||
} | |||||
static __inline int | |||||
pmap_l3_valid(pt_entry_t l3) | |||||
{ | |||||
return ((l3 & ATTR_DESCR_MASK) == L3_PAGE); | |||||
} | |||||
static __inline void | |||||
pmap_resident_count_inc(pmap_t pmap, int count) | |||||
{ | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | |||||
pmap->pm_stats.resident_count += count; | |||||
} | |||||
static __inline void | |||||
pmap_resident_count_dec(pmap_t pmap, int count) | |||||
{ | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | |||||
KASSERT(pmap->pm_stats.resident_count >= count, | |||||
("pmap %p resident count underflow %ld %d", pmap, | |||||
pmap->pm_stats.resident_count, count)); | |||||
pmap->pm_stats.resident_count -= count; | |||||
} | } | ||||
#endif /* _KERNEL */ | #endif /* _KERNEL */ | ||||
#endif /* !LOCORE */ | #endif /* !LOCORE */ | ||||
#endif /* !_MACHINE_PMAP_H_ */ | #endif /* !_MACHINE_PMAP_H_ */ |