Changeset View
Changeset View
Standalone View
Standalone View
head/sys/amd64/amd64/pmap.c
Show First 20 Lines • Show All 642 Lines • ▼ Show 20 Lines | |||||
static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, | static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot, struct rwlock **lockp); | vm_prot_t prot, struct rwlock **lockp); | ||||
static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, | static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, | ||||
u_int flags, vm_page_t m, struct rwlock **lockp); | u_int flags, vm_page_t m, struct rwlock **lockp); | ||||
static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, | static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, | ||||
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); | vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); | ||||
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); | static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); | ||||
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte); | static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte); | ||||
static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, | |||||
vm_offset_t eva); | |||||
static void pmap_invalidate_cache_range_all(vm_offset_t sva, | |||||
vm_offset_t eva); | |||||
static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, | static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, | ||||
pd_entry_t pde); | pd_entry_t pde); | ||||
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); | static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); | ||||
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask); | static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, | static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, | ||||
struct rwlock **lockp); | struct rwlock **lockp); | ||||
#endif | #endif | ||||
▲ Show 20 Lines • Show All 1,507 Lines • ▼ Show 20 Lines | pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde) | ||||
* TLB. | * TLB. | ||||
*/ | */ | ||||
if ((pde & PG_PROMOTED) != 0) | if ((pde & PG_PROMOTED) != 0) | ||||
pmap_invalidate_range(pmap, va, va + NBPDR - 1); | pmap_invalidate_range(pmap, va, va + NBPDR - 1); | ||||
else | else | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
} | } | ||||
DEFINE_IFUNC(, void, pmap_invalidate_cache_range, | |||||
(vm_offset_t sva, vm_offset_t eva), static) | |||||
{ | |||||
if ((cpu_feature & CPUID_SS) != 0) | |||||
return (pmap_invalidate_cache_range_selfsnoop); | |||||
if ((cpu_feature & CPUID_CLFSH) != 0) | |||||
return (pmap_force_invalidate_cache_range); | |||||
return (pmap_invalidate_cache_range_all); | |||||
} | |||||
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) | #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) | ||||
void | static void | ||||
pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force) | pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
if (force) { | |||||
sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1); | |||||
} else { | |||||
KASSERT((sva & PAGE_MASK) == 0, | KASSERT((sva & PAGE_MASK) == 0, | ||||
("pmap_invalidate_cache_range: sva not page-aligned")); | ("pmap_invalidate_cache_range: sva not page-aligned")); | ||||
KASSERT((eva & PAGE_MASK) == 0, | KASSERT((eva & PAGE_MASK) == 0, | ||||
("pmap_invalidate_cache_range: eva not page-aligned")); | ("pmap_invalidate_cache_range: eva not page-aligned")); | ||||
} | } | ||||
if ((cpu_feature & CPUID_SS) != 0 && !force) | static void | ||||
; /* If "Self Snoop" is supported and allowed, do nothing. */ | pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva) | ||||
else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 && | { | ||||
eva - sva < PMAP_CLFLUSH_THRESHOLD) { | |||||
pmap_invalidate_cache_range_check_align(sva, eva); | |||||
} | |||||
void | |||||
pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) | |||||
{ | |||||
sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1); | |||||
if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) { | |||||
/* | /* | ||||
* The supplied range is bigger than 2MB. | |||||
* Globally invalidate cache. | |||||
*/ | |||||
pmap_invalidate_cache(); | |||||
return; | |||||
} | |||||
/* | |||||
* XXX: Some CPUs fault, hang, or trash the local APIC | * XXX: Some CPUs fault, hang, or trash the local APIC | ||||
* registers if we use CLFLUSH on the local APIC | * registers if we use CLFLUSH on the local APIC range. The | ||||
* range. The local APIC is always uncached, so we | * local APIC is always uncached, so we don't need to flush | ||||
* don't need to flush for that range anyway. | * for that range anyway. | ||||
*/ | */ | ||||
if (pmap_kextract(sva) == lapic_paddr) | if (pmap_kextract(sva) == lapic_paddr) | ||||
return; | return; | ||||
if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) { | |||||
/* | /* | ||||
* Otherwise, do per-cache line flush. Use the sfence | * Do per-cache line flush. Use the sfence | ||||
* instruction to insure that previous stores are | * instruction to insure that previous stores are | ||||
* included in the write-back. The processor | * included in the write-back. The processor | ||||
* propagates flush to other processors in the cache | * propagates flush to other processors in the cache | ||||
* coherence domain. | * coherence domain. | ||||
*/ | */ | ||||
sfence(); | sfence(); | ||||
for (; sva < eva; sva += cpu_clflush_line_size) | for (; sva < eva; sva += cpu_clflush_line_size) | ||||
clflushopt(sva); | clflushopt(sva); | ||||
sfence(); | sfence(); | ||||
} else if ((cpu_feature & CPUID_CLFSH) != 0 && | } else { | ||||
eva - sva < PMAP_CLFLUSH_THRESHOLD) { | |||||
if (pmap_kextract(sva) == lapic_paddr) | |||||
return; | |||||
/* | /* | ||||
* Writes are ordered by CLFLUSH on Intel CPUs. | * Writes are ordered by CLFLUSH on Intel CPUs. | ||||
*/ | */ | ||||
if (cpu_vendor_id != CPU_VENDOR_INTEL) | if (cpu_vendor_id != CPU_VENDOR_INTEL) | ||||
mfence(); | mfence(); | ||||
for (; sva < eva; sva += cpu_clflush_line_size) | for (; sva < eva; sva += cpu_clflush_line_size) | ||||
clflush(sva); | clflush(sva); | ||||
if (cpu_vendor_id != CPU_VENDOR_INTEL) | if (cpu_vendor_id != CPU_VENDOR_INTEL) | ||||
mfence(); | mfence(); | ||||
} else { | } | ||||
} | |||||
/* | static void | ||||
* No targeted cache flush methods are supported by CPU, | pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva) | ||||
* or the supplied range is bigger than 2MB. | { | ||||
* Globally invalidate cache. | |||||
*/ | pmap_invalidate_cache_range_check_align(sva, eva); | ||||
pmap_invalidate_cache(); | pmap_invalidate_cache(); | ||||
} | } | ||||
} | |||||
/* | /* | ||||
* Remove the specified set of pages from the data and instruction caches. | * Remove the specified set of pages from the data and instruction caches. | ||||
* | * | ||||
* In contrast to pmap_invalidate_cache_range(), this function does not | * In contrast to pmap_invalidate_cache_range(), this function does not | ||||
* rely on the CPU's self-snoop feature, because it is intended for use | * rely on the CPU's self-snoop feature, because it is intended for use | ||||
* when moving pages into a different cache domain. | * when moving pages into a different cache domain. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 4,689 Lines • ▼ Show 20 Lines | if (!pmap_initialized) { | ||||
} | } | ||||
va = kva_alloc(size); | va = kva_alloc(size); | ||||
if (va == 0) | if (va == 0) | ||||
panic("%s: Couldn't allocate KVA", __func__); | panic("%s: Couldn't allocate KVA", __func__); | ||||
} | } | ||||
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) | for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) | ||||
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); | pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); | ||||
pmap_invalidate_range(kernel_pmap, va, va + tmpsize); | pmap_invalidate_range(kernel_pmap, va, va + tmpsize); | ||||
pmap_invalidate_cache_range(va, va + tmpsize, FALSE); | pmap_invalidate_cache_range(va, va + tmpsize); | ||||
return ((void *)(va + offset)); | return ((void *)(va + offset)); | ||||
} | } | ||||
void * | void * | ||||
pmap_mapdev(vm_paddr_t pa, vm_size_t size) | pmap_mapdev(vm_paddr_t pa, vm_size_t size) | ||||
{ | { | ||||
return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); | return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); | ||||
▲ Show 20 Lines • Show All 342 Lines • ▼ Show 20 Lines | pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode) | ||||
} | } | ||||
/* | /* | ||||
* Flush CPU caches if required to make sure any data isn't cached that | * Flush CPU caches if required to make sure any data isn't cached that | ||||
* shouldn't be, etc. | * shouldn't be, etc. | ||||
*/ | */ | ||||
if (changed) { | if (changed) { | ||||
pmap_invalidate_range(kernel_pmap, base, tmpva); | pmap_invalidate_range(kernel_pmap, base, tmpva); | ||||
pmap_invalidate_cache_range(base, tmpva, FALSE); | pmap_invalidate_cache_range(base, tmpva); | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* Demotes any mapping within the direct map region that covers more than the | * Demotes any mapping within the direct map region that covers more than the | ||||
* specified range of physical addresses. This range's size must be a power | * specified range of physical addresses. This range's size must be a power | ||||
* of two and its starting address must be a multiple of its size. Since the | * of two and its starting address must be a multiple of its size. Since the | ||||
▲ Show 20 Lines • Show All 1,009 Lines • Show Last 20 Lines |