Changeset View
Standalone View
sys/amd64/amd64/pmap.c
- This file is larger than 256 KB, so syntax highlighting is disabled by default.
Show First 20 Lines • Show All 1,094 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Internal flags for pmap_enter()'s helper functions. | * Internal flags for pmap_enter()'s helper functions. | ||||
*/ | */ | ||||
#define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ | #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ | ||||
#define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ | #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ | ||||
/* | /* | ||||
* Internal flags for pmap_mapdev_internal() and | * Internal flags for pmap_mapdev_internal() and | ||||
* pmap_change_attr_locked(). | * pmap_change_props_locked(). | ||||
*/ | */ | ||||
#define MAPDEV_FLUSHCACHE 0x0000001 /* Flush cache after mapping. */ | #define MAPDEV_FLUSHCACHE 0x00000001 /* Flush cache after mapping. */ | ||||
#define MAPDEV_SETATTR 0x0000002 /* Modify existing attrs. */ | #define MAPDEV_SETATTR 0x00000002 /* Modify existing attrs. */ | ||||
#define MAPDEV_ASSERTVALID 0x00000004 /* Assert mapping validity. */ | |||||
static void free_pv_chunk(struct pv_chunk *pc); | static void free_pv_chunk(struct pv_chunk *pc); | ||||
static void free_pv_entry(pmap_t pmap, pv_entry_t pv); | static void free_pv_entry(pmap_t pmap, pv_entry_t pv); | ||||
static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); | static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); | ||||
static int popcnt_pc_map_pq(uint64_t *map); | static int popcnt_pc_map_pq(uint64_t *map); | ||||
static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); | static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); | ||||
static void reserve_pv_entries(pmap_t pmap, int needed, | static void reserve_pv_entries(pmap_t pmap, int needed, | ||||
struct rwlock **lockp); | struct rwlock **lockp); | ||||
static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | ||||
struct rwlock **lockp); | struct rwlock **lockp); | ||||
static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, | static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, | ||||
u_int flags, struct rwlock **lockp); | u_int flags, struct rwlock **lockp); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | ||||
struct rwlock **lockp); | struct rwlock **lockp); | ||||
#endif | #endif | ||||
static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); | static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); | ||||
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, | static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, | ||||
vm_offset_t va); | vm_offset_t va); | ||||
static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, | static int pmap_change_props_locked(vm_offset_t va, vm_size_t size, | ||||
int flags); | vm_prot_t prot, int mode, int flags); | ||||
static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); | static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); | ||||
static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, | static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, | ||||
vm_offset_t va, struct rwlock **lockp); | vm_offset_t va, struct rwlock **lockp); | ||||
static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, | static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, | ||||
vm_offset_t va); | vm_offset_t va); | ||||
static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, | static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot, struct rwlock **lockp); | vm_prot_t prot, struct rwlock **lockp); | ||||
static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, | static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, | ||||
u_int flags, vm_page_t m, struct rwlock **lockp); | u_int flags, vm_page_t m, struct rwlock **lockp); | ||||
static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, | static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, | ||||
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); | vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); | ||||
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); | static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); | ||||
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted); | static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted); | ||||
static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, | static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, | ||||
vm_offset_t eva); | vm_offset_t eva); | ||||
static void pmap_invalidate_cache_range_all(vm_offset_t sva, | static void pmap_invalidate_cache_range_all(vm_offset_t sva, | ||||
vm_offset_t eva); | vm_offset_t eva); | ||||
static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, | static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, | ||||
pd_entry_t pde); | pd_entry_t pde); | ||||
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); | static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); | ||||
static vm_page_t pmap_large_map_getptp_unlocked(void); | static vm_page_t pmap_large_map_getptp_unlocked(void); | ||||
static vm_paddr_t pmap_large_map_kextract(vm_offset_t va); | static vm_paddr_t pmap_large_map_kextract(vm_offset_t va); | ||||
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask); | static void pmap_pde_props(pd_entry_t *pde, u_long bits, u_long mask); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, | static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, | ||||
struct rwlock **lockp); | struct rwlock **lockp); | ||||
#endif | #endif | ||||
static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, | static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, | ||||
vm_prot_t prot); | vm_prot_t prot); | ||||
static void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask); | static void pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask); | ||||
static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva, | static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva, | ||||
bool exec); | bool exec); | ||||
static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va); | static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va); | ||||
static pd_entry_t *pmap_pti_pde(vm_offset_t va); | static pd_entry_t *pmap_pti_pde(vm_offset_t va); | ||||
static void pmap_pti_wire_pte(void *pte); | static void pmap_pti_wire_pte(void *pte); | ||||
static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, | static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, | ||||
struct spglist *free, struct rwlock **lockp); | struct spglist *free, struct rwlock **lockp); | ||||
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, | static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, | ||||
▲ Show 20 Lines • Show All 6,588 Lines • ▼ Show 20 Lines | restart: | ||||
} | } | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
} | } | ||||
/* | /* | ||||
* Miscellaneous support routines follow | * Miscellaneous support routines follow | ||||
*/ | */ | ||||
/* Adjust the cache mode for a 4KB page mapped via a PTE. */ | /* Adjust the properties for a 4KB page mapped via a PTE. */ | ||||
static __inline void | static __inline void | ||||
pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask) | pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask) | ||||
{ | { | ||||
u_int opte, npte; | u_long opte, npte; | ||||
/* | opte = *(u_long *)pte; | ||||
* The cache mode bits are all in the low 32-bits of the | |||||
* PTE, so we can just spin on updating the low 32-bits. | |||||
*/ | |||||
do { | do { | ||||
opte = *(u_int *)pte; | |||||
npte = opte & ~mask; | npte = opte & ~mask; | ||||
npte |= cache_bits; | npte |= bits; | ||||
} while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); | } while (npte != opte && !atomic_fcmpset_long((u_long *)pte, &opte, | ||||
npte)); | |||||
} | } | ||||
/* Adjust the cache mode for a 2MB page mapped via a PDE. */ | /* Adjust the properties for a 2MB page mapped via a PDE. */ | ||||
static __inline void | static __inline void | ||||
pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask) | pmap_pde_props(pd_entry_t *pde, u_long bits, u_long mask) | ||||
{ | { | ||||
u_int opde, npde; | u_long opde, npde; | ||||
/* | opde = *(u_long *)pde; | ||||
* The cache mode bits are all in the low 32-bits of the | |||||
* PDE, so we can just spin on updating the low 32-bits. | |||||
*/ | |||||
do { | do { | ||||
opde = *(u_int *)pde; | |||||
npde = opde & ~mask; | npde = opde & ~mask; | ||||
npde |= cache_bits; | npde |= bits; | ||||
} while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); | } while (npde != opde && !atomic_fcmpset_long((u_long *)pde, &opde, | ||||
npde)); | |||||
} | } | ||||
kib: After you changed types to u_long, it makes no sense to have pmap_pde_props(). Might be it can… | |||||
markjAuthorUnsubmitted Done Inline ActionsHmm, I do not see why it made sense before this diff. The only difference is the use of pd_entry_t vs. pt_entry_t. I will merge them. markj: Hmm, I do not see why it made sense before this diff. The only difference is the use of… | |||||
/* | /* | ||||
* Map a set of physical memory pages into the kernel virtual | * Map a set of physical memory pages into the kernel virtual | ||||
* address space. Return a pointer to where it is mapped. This | * address space. Return a pointer to where it is mapped. This | ||||
* routine is intended to be used for mapping device memory, | * routine is intended to be used for mapping device memory, | ||||
* NOT real memory. | * NOT real memory. | ||||
*/ | */ | ||||
static void * | static void * | ||||
Show All 38 Lines | if (!pmap_initialized) { | ||||
/* | /* | ||||
* If the specified range of physical addresses fits within | * If the specified range of physical addresses fits within | ||||
* the direct map window, use the direct map. | * the direct map window, use the direct map. | ||||
*/ | */ | ||||
if (pa < dmaplimit && pa + size <= dmaplimit) { | if (pa < dmaplimit && pa + size <= dmaplimit) { | ||||
va = PHYS_TO_DMAP(pa); | va = PHYS_TO_DMAP(pa); | ||||
if ((flags & MAPDEV_SETATTR) != 0) { | if ((flags & MAPDEV_SETATTR) != 0) { | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
i = pmap_change_attr_locked(va, size, mode, flags); | i = pmap_change_props_locked(va, size, | ||||
PROT_NONE, mode, flags); | |||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
} else | } else | ||||
i = 0; | i = 0; | ||||
if (!i) | if (!i) | ||||
return ((void *)(va + offset)); | return ((void *)(va + offset)); | ||||
} | } | ||||
va = kva_alloc(size); | va = kva_alloc(size); | ||||
if (va == 0) | if (va == 0) | ||||
▲ Show 20 Lines • Show All 169 Lines • ▼ Show 20 Lines | |||||
* virtual address range or the direct map. | * virtual address range or the direct map. | ||||
*/ | */ | ||||
int | int | ||||
pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) | pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) | ||||
{ | { | ||||
int error; | int error; | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
error = pmap_change_attr_locked(va, size, mode, MAPDEV_FLUSHCACHE); | error = pmap_change_props_locked(va, size, PROT_NONE, mode, | ||||
MAPDEV_FLUSHCACHE); | |||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | |||||
* Changes the specified virtual address range's protections to those | |||||
* specified by "prot". Like pmap_change_attr(), protections for aliases | |||||
* in the direct map are updated as well. Protections on aliasing mappings may | |||||
* be a subset of the requested protections; for example, mappings in the direct | |||||
* map are never executable. | |||||
kibUnsubmitted Not Done Inline ActionsWhy do we need to change protection on the direct map ? kib: Why do we need to change protection on the direct map ? | |||||
markjAuthorUnsubmitted Done Inline ActionsMainly to ensure that a read-only mapping is not writeable via the direct map. We do this already for the amd64 kernel image itself, in create_pagetables(). markj: Mainly to ensure that a read-only mapping is not writeable via the direct map. We do this… | |||||
markjAuthorUnsubmitted Done Inline ActionsLet me try to justify this approach further: initially I implemented a standalone pmap_change_prot() which only operated on the kernel map, i.e., direct map aliases were ignored. Then I realized that it was very similar to pmap_change_attr(), so I merged them. I am not sure that it is really important to modify the direct map for protection changes in the kernel map (this can be bypassed by clearing WP, after all), but conceptually it is weird to ignore the direct map alias if the intent is to restrict protections on kernel mappings, and it may help catch bugs. I also believe it will not cause excessive demotions: preloaded kernel modules are contiguous in physical memory, and modules loaded by link_elf.c will generally be backed by pages allocated from a reservation, provided that they are loaded during system startup. markj: Let me try to justify this approach further: initially I implemented a standalone… | |||||
*/ | |||||
int | |||||
pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot) | |||||
{ | |||||
int error; | |||||
/* Only supported within the kernel map. */ | |||||
if (va < VM_MIN_KERNEL_ADDRESS) | |||||
return (EINVAL); | |||||
PMAP_LOCK(kernel_pmap); | |||||
error = pmap_change_props_locked(va, size, prot, -1, | |||||
MAPDEV_ASSERTVALID); | |||||
PMAP_UNLOCK(kernel_pmap); | |||||
return (error); | |||||
} | |||||
static int | static int | ||||
pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags) | pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot, | ||||
int mode, int flags) | |||||
{ | { | ||||
vm_offset_t base, offset, tmpva; | vm_offset_t base, offset, tmpva; | ||||
vm_paddr_t pa_start, pa_end, pa_end1; | vm_paddr_t pa_start, pa_end, pa_end1; | ||||
pdp_entry_t *pdpe; | pdp_entry_t *pdpe; | ||||
pd_entry_t *pde; | pd_entry_t *pde, pde_bits, pde_mask; | ||||
pt_entry_t *pte; | pt_entry_t *pte, pte_bits, pte_mask; | ||||
int cache_bits_pte, cache_bits_pde, error; | int error; | ||||
boolean_t changed; | bool changed; | ||||
PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); | PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); | ||||
base = trunc_page(va); | base = trunc_page(va); | ||||
offset = va & PAGE_MASK; | offset = va & PAGE_MASK; | ||||
size = round_page(offset + size); | size = round_page(offset + size); | ||||
/* | /* | ||||
* Only supported on kernel virtual addresses, including the direct | * Only supported on kernel virtual addresses, including the direct | ||||
* map but excluding the recursive map. | * map but excluding the recursive map. | ||||
*/ | */ | ||||
if (base < DMAP_MIN_ADDRESS) | if (base < DMAP_MIN_ADDRESS) | ||||
return (EINVAL); | return (EINVAL); | ||||
cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); | /* | ||||
cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); | * Construct our flag sets and masks. "bits" is the subset of | ||||
changed = FALSE; | * "mask" that will be set in each modified PTE. | ||||
* | |||||
* Mappings in the direct map are never allowed to be executable. | |||||
*/ | |||||
pde_bits = pte_bits = 0; | |||||
pde_mask = pte_mask = 0; | |||||
if (mode != -1) { | |||||
pde_bits |= pmap_cache_bits(kernel_pmap, mode, true); | |||||
pde_mask |= X86_PG_PDE_CACHE; | |||||
pte_bits |= pmap_cache_bits(kernel_pmap, mode, false); | |||||
pte_mask |= X86_PG_PTE_CACHE; | |||||
} | |||||
if (prot != VM_PROT_NONE) { | |||||
if ((prot & VM_PROT_WRITE) != 0) { | |||||
pde_bits |= X86_PG_RW; | |||||
pte_bits |= X86_PG_RW; | |||||
kibUnsubmitted Not Done Inline ActionsDon't we need/want to preset PG_M ? kib: Don't we need/want to preset PG_M ? | |||||
markjAuthorUnsubmitted Done Inline ActionsI'm not sure. I think that would be incorrect for the pageable submaps. In other cases we always preset PG_M, I believe, so I can't see a case where setting PG_M here would change anything. markj: I'm not sure. I think that would be incorrect for the pageable submaps. In other cases we… | |||||
} | |||||
if ((prot & VM_PROT_EXECUTE) == 0 || | |||||
va < VM_MIN_KERNEL_ADDRESS) { | |||||
pde_bits |= pg_nx; | |||||
pte_bits |= pg_nx; | |||||
} | |||||
pde_mask |= X86_PG_RW | pg_nx; | |||||
pte_mask |= X86_PG_RW | pg_nx; | |||||
} | |||||
/* | /* | ||||
* Pages that aren't mapped aren't supported. Also break down 2MB pages | * Pages that aren't mapped aren't supported. Also break down 2MB pages | ||||
* into 4KB pages if required. | * into 4KB pages if required. | ||||
*/ | */ | ||||
for (tmpva = base; tmpva < base + size; ) { | for (tmpva = base; tmpva < base + size; ) { | ||||
pdpe = pmap_pdpe(kernel_pmap, tmpva); | pdpe = pmap_pdpe(kernel_pmap, tmpva); | ||||
if (pdpe == NULL || *pdpe == 0) | if (pdpe == NULL || *pdpe == 0) { | ||||
KASSERT((flags & MAPDEV_ASSERTVALID) == 0, | |||||
("%s: addr %#lx is not mapped", __func__, tmpva)); | |||||
return (EINVAL); | return (EINVAL); | ||||
} | |||||
if (*pdpe & PG_PS) { | if (*pdpe & PG_PS) { | ||||
/* | /* | ||||
* If the current 1GB page already has the required | * If the current 1GB page already has the required | ||||
* memory type, then we need not demote this page. Just | * properties, then we need not demote this page. Just | ||||
* increment tmpva to the next 1GB page frame. | * increment tmpva to the next 1GB page frame. | ||||
*/ | */ | ||||
if ((*pdpe & X86_PG_PDE_CACHE) == cache_bits_pde) { | if ((*pdpe & pde_mask) == pde_bits) { | ||||
tmpva = trunc_1gpage(tmpva) + NBPDP; | tmpva = trunc_1gpage(tmpva) + NBPDP; | ||||
continue; | continue; | ||||
} | } | ||||
/* | /* | ||||
* If the current offset aligns with a 1GB page frame | * If the current offset aligns with a 1GB page frame | ||||
* and there is at least 1GB left within the range, then | * and there is at least 1GB left within the range, then | ||||
* we need not break down this page into 2MB pages. | * we need not break down this page into 2MB pages. | ||||
*/ | */ | ||||
if ((tmpva & PDPMASK) == 0 && | if ((tmpva & PDPMASK) == 0 && | ||||
tmpva + PDPMASK < base + size) { | tmpva + PDPMASK < base + size) { | ||||
tmpva += NBPDP; | tmpva += NBPDP; | ||||
continue; | continue; | ||||
} | } | ||||
if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva)) | if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva)) | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
pde = pmap_pdpe_to_pde(pdpe, tmpva); | pde = pmap_pdpe_to_pde(pdpe, tmpva); | ||||
if (*pde == 0) | if (*pde == 0) { | ||||
KASSERT((flags & MAPDEV_ASSERTVALID) == 0, | |||||
("%s: addr %#lx is not mapped", __func__, tmpva)); | |||||
return (EINVAL); | return (EINVAL); | ||||
} | |||||
if (*pde & PG_PS) { | if (*pde & PG_PS) { | ||||
/* | /* | ||||
* If the current 2MB page already has the required | * If the current 2MB page already has the required | ||||
* memory type, then we need not demote this page. Just | * properties, then we need not demote this page. Just | ||||
* increment tmpva to the next 2MB page frame. | * increment tmpva to the next 2MB page frame. | ||||
*/ | */ | ||||
if ((*pde & X86_PG_PDE_CACHE) == cache_bits_pde) { | if ((*pde & pde_mask) == pde_bits) { | ||||
tmpva = trunc_2mpage(tmpva) + NBPDR; | tmpva = trunc_2mpage(tmpva) + NBPDR; | ||||
continue; | continue; | ||||
} | } | ||||
/* | /* | ||||
* If the current offset aligns with a 2MB page frame | * If the current offset aligns with a 2MB page frame | ||||
* and there is at least 2MB left within the range, then | * and there is at least 2MB left within the range, then | ||||
* we need not break down this page into 4KB pages. | * we need not break down this page into 4KB pages. | ||||
*/ | */ | ||||
if ((tmpva & PDRMASK) == 0 && | if ((tmpva & PDRMASK) == 0 && | ||||
tmpva + PDRMASK < base + size) { | tmpva + PDRMASK < base + size) { | ||||
tmpva += NBPDR; | tmpva += NBPDR; | ||||
continue; | continue; | ||||
} | } | ||||
if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) | if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
pte = pmap_pde_to_pte(pde, tmpva); | pte = pmap_pde_to_pte(pde, tmpva); | ||||
if (*pte == 0) | if (*pte == 0) { | ||||
KASSERT((flags & MAPDEV_ASSERTVALID) == 0, | |||||
("%s: addr %#lx is not mapped", __func__, tmpva)); | |||||
return (EINVAL); | return (EINVAL); | ||||
} | |||||
tmpva += PAGE_SIZE; | tmpva += PAGE_SIZE; | ||||
} | } | ||||
error = 0; | error = 0; | ||||
/* | /* | ||||
* Ok, all the pages exist, so run through them updating their | * Ok, all the pages exist, so run through them updating their | ||||
* cache mode if required. | * properties if required. | ||||
*/ | */ | ||||
changed = false; | |||||
pa_start = pa_end = 0; | pa_start = pa_end = 0; | ||||
for (tmpva = base; tmpva < base + size; ) { | for (tmpva = base; tmpva < base + size; ) { | ||||
pdpe = pmap_pdpe(kernel_pmap, tmpva); | pdpe = pmap_pdpe(kernel_pmap, tmpva); | ||||
if (*pdpe & PG_PS) { | if (*pdpe & PG_PS) { | ||||
if ((*pdpe & X86_PG_PDE_CACHE) != cache_bits_pde) { | if ((*pdpe & pde_mask) != pde_bits) { | ||||
pmap_pde_attr(pdpe, cache_bits_pde, | pmap_pde_props(pdpe, pde_bits, pde_mask); | ||||
X86_PG_PDE_CACHE); | changed = true; | ||||
changed = TRUE; | |||||
} | } | ||||
if (tmpva >= VM_MIN_KERNEL_ADDRESS && | if (tmpva >= VM_MIN_KERNEL_ADDRESS && | ||||
(*pdpe & PG_PS_FRAME) < dmaplimit) { | (*pdpe & PG_PS_FRAME) < dmaplimit) { | ||||
if (pa_start == pa_end) { | if (pa_start == pa_end) { | ||||
/* Start physical address run. */ | /* Start physical address run. */ | ||||
pa_start = *pdpe & PG_PS_FRAME; | pa_start = *pdpe & PG_PS_FRAME; | ||||
pa_end = pa_start + NBPDP; | pa_end = pa_start + NBPDP; | ||||
} else if (pa_end == (*pdpe & PG_PS_FRAME)) | } else if (pa_end == (*pdpe & PG_PS_FRAME)) | ||||
pa_end += NBPDP; | pa_end += NBPDP; | ||||
else { | else { | ||||
/* Run ended, update direct map. */ | /* Run ended, update direct map. */ | ||||
error = pmap_change_attr_locked( | error = pmap_change_props_locked( | ||||
PHYS_TO_DMAP(pa_start), | PHYS_TO_DMAP(pa_start), | ||||
pa_end - pa_start, mode, flags); | pa_end - pa_start, prot, mode, | ||||
flags); | |||||
if (error != 0) | if (error != 0) | ||||
break; | break; | ||||
/* Start physical address run. */ | /* Start physical address run. */ | ||||
pa_start = *pdpe & PG_PS_FRAME; | pa_start = *pdpe & PG_PS_FRAME; | ||||
pa_end = pa_start + NBPDP; | pa_end = pa_start + NBPDP; | ||||
} | } | ||||
} | } | ||||
tmpva = trunc_1gpage(tmpva) + NBPDP; | tmpva = trunc_1gpage(tmpva) + NBPDP; | ||||
continue; | continue; | ||||
} | } | ||||
pde = pmap_pdpe_to_pde(pdpe, tmpva); | pde = pmap_pdpe_to_pde(pdpe, tmpva); | ||||
if (*pde & PG_PS) { | if (*pde & PG_PS) { | ||||
if ((*pde & X86_PG_PDE_CACHE) != cache_bits_pde) { | if ((*pde & pde_mask) != pde_bits) { | ||||
pmap_pde_attr(pde, cache_bits_pde, | pmap_pde_props(pde, pde_bits, pde_mask); | ||||
X86_PG_PDE_CACHE); | changed = true; | ||||
changed = TRUE; | |||||
} | } | ||||
if (tmpva >= VM_MIN_KERNEL_ADDRESS && | if (tmpva >= VM_MIN_KERNEL_ADDRESS && | ||||
(*pde & PG_PS_FRAME) < dmaplimit) { | (*pde & PG_PS_FRAME) < dmaplimit) { | ||||
if (pa_start == pa_end) { | if (pa_start == pa_end) { | ||||
/* Start physical address run. */ | /* Start physical address run. */ | ||||
pa_start = *pde & PG_PS_FRAME; | pa_start = *pde & PG_PS_FRAME; | ||||
pa_end = pa_start + NBPDR; | pa_end = pa_start + NBPDR; | ||||
} else if (pa_end == (*pde & PG_PS_FRAME)) | } else if (pa_end == (*pde & PG_PS_FRAME)) | ||||
pa_end += NBPDR; | pa_end += NBPDR; | ||||
else { | else { | ||||
/* Run ended, update direct map. */ | /* Run ended, update direct map. */ | ||||
error = pmap_change_attr_locked( | error = pmap_change_props_locked( | ||||
PHYS_TO_DMAP(pa_start), | PHYS_TO_DMAP(pa_start), | ||||
pa_end - pa_start, mode, flags); | pa_end - pa_start, prot, mode, | ||||
flags); | |||||
if (error != 0) | if (error != 0) | ||||
break; | break; | ||||
/* Start physical address run. */ | /* Start physical address run. */ | ||||
pa_start = *pde & PG_PS_FRAME; | pa_start = *pde & PG_PS_FRAME; | ||||
pa_end = pa_start + NBPDR; | pa_end = pa_start + NBPDR; | ||||
} | } | ||||
} | } | ||||
tmpva = trunc_2mpage(tmpva) + NBPDR; | tmpva = trunc_2mpage(tmpva) + NBPDR; | ||||
} else { | } else { | ||||
pte = pmap_pde_to_pte(pde, tmpva); | pte = pmap_pde_to_pte(pde, tmpva); | ||||
if ((*pte & X86_PG_PTE_CACHE) != cache_bits_pte) { | if ((*pte & pte_mask) != pte_bits) { | ||||
pmap_pte_attr(pte, cache_bits_pte, | pmap_pte_props(pte, pte_bits, pte_mask); | ||||
X86_PG_PTE_CACHE); | changed = true; | ||||
changed = TRUE; | |||||
} | } | ||||
if (tmpva >= VM_MIN_KERNEL_ADDRESS && | if (tmpva >= VM_MIN_KERNEL_ADDRESS && | ||||
(*pte & PG_FRAME) < dmaplimit) { | (*pte & PG_FRAME) < dmaplimit) { | ||||
if (pa_start == pa_end) { | if (pa_start == pa_end) { | ||||
/* Start physical address run. */ | /* Start physical address run. */ | ||||
pa_start = *pte & PG_FRAME; | pa_start = *pte & PG_FRAME; | ||||
pa_end = pa_start + PAGE_SIZE; | pa_end = pa_start + PAGE_SIZE; | ||||
} else if (pa_end == (*pte & PG_FRAME)) | } else if (pa_end == (*pte & PG_FRAME)) | ||||
pa_end += PAGE_SIZE; | pa_end += PAGE_SIZE; | ||||
else { | else { | ||||
/* Run ended, update direct map. */ | /* Run ended, update direct map. */ | ||||
error = pmap_change_attr_locked( | error = pmap_change_props_locked( | ||||
PHYS_TO_DMAP(pa_start), | PHYS_TO_DMAP(pa_start), | ||||
pa_end - pa_start, mode, flags); | pa_end - pa_start, prot, mode, | ||||
flags); | |||||
if (error != 0) | if (error != 0) | ||||
break; | break; | ||||
/* Start physical address run. */ | /* Start physical address run. */ | ||||
pa_start = *pte & PG_FRAME; | pa_start = *pte & PG_FRAME; | ||||
pa_end = pa_start + PAGE_SIZE; | pa_end = pa_start + PAGE_SIZE; | ||||
} | } | ||||
} | } | ||||
tmpva += PAGE_SIZE; | tmpva += PAGE_SIZE; | ||||
} | } | ||||
} | } | ||||
if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) { | if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) { | ||||
pa_end1 = MIN(pa_end, dmaplimit); | pa_end1 = MIN(pa_end, dmaplimit); | ||||
if (pa_start != pa_end1) | if (pa_start != pa_end1) | ||||
error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start), | error = pmap_change_props_locked(PHYS_TO_DMAP(pa_start), | ||||
pa_end1 - pa_start, mode, flags); | pa_end1 - pa_start, prot, mode, flags); | ||||
} | } | ||||
/* | /* | ||||
* Flush CPU caches if required to make sure any data isn't cached that | * Flush CPU caches if required to make sure any data isn't cached that | ||||
* shouldn't be, etc. | * shouldn't be, etc. | ||||
*/ | */ | ||||
if (changed) { | if (changed) { | ||||
pmap_invalidate_range(kernel_pmap, base, tmpva); | pmap_invalidate_range(kernel_pmap, base, tmpva); | ||||
▲ Show 20 Lines • Show All 2,066 Lines • Show Last 20 Lines |
After you changed types to u_long, it makes no sense to have pmap_pde_props(). Might be it can exists as a macro, but I do not see much benefits.