Changeset View
Changeset View
Standalone View
Standalone View
sys/arm/include/cpu-v6.h
Show First 20 Lines • Show All 338 Lines • ▼ Show 20 Lines | tlb_flush_range_local(vm_offset_t va, vm_size_t size) | ||||
dsb(); | dsb(); | ||||
for (; va < eva; va += PAGE_SIZE) | for (; va < eva; va += PAGE_SIZE) | ||||
_CP15_TLBIMVA(va | CPU_ASID_KERNEL); | _CP15_TLBIMVA(va | CPU_ASID_KERNEL); | ||||
dsb(); | dsb(); | ||||
} | } | ||||
/* Broadcasting operations. */ | /* Broadcasting operations. */ | ||||
#if __ARM_ARCH >= 7 && defined SMP | #if __ARM_ARCH >= 7 && defined SMP | ||||
#ifdef CPU_CORTEXA8 | |||||
#define ARM_HAVE_MP_EXTENSIONS (cpuinfo.mp_ext != 0) | |||||
#else | |||||
#define ARM_HAVE_MP_EXTENSIONS 1 | |||||
#endif | |||||
skra: I think that SMP is not supported for `__ARM_ARCH == 6` as hardware TLB maintenance… | |||||
static __inline void | static __inline void | ||||
tlb_flush_all(void) | tlb_flush_all(void) | ||||
{ | { | ||||
dsb(); | dsb(); | ||||
Not Done Inline ActionsHere and on other places below, test cpuinfo.mp_ext only if ARM_GENERIC_KERNEL is defined. skra: Here and on other places below, test `cpuinfo.mp_ext` only if ARM_GENERIC_KERNEL is defined. | |||||
if (ARM_HAVE_MP_EXTENSIONS) | |||||
_CP15_TLBIALL(); | |||||
#ifdef CPU_CORTEXA8 | |||||
else | |||||
_CP15_TLBIALLIS(); | _CP15_TLBIALLIS(); | ||||
#endif | |||||
dsb(); | dsb(); | ||||
skraUnsubmitted Not Done Inline ActionsHow does it work in GENERIC kernel case? skra: How does it work in GENERIC kernel case? | |||||
} | } | ||||
static __inline void | static __inline void | ||||
tlb_flush_all_ng(void) | tlb_flush_all_ng(void) | ||||
{ | { | ||||
dsb(); | dsb(); | ||||
if (ARM_HAVE_MP_EXTENSIONS) | |||||
_CP15_TLBIASID(CPU_ASID_KERNEL); | |||||
#ifdef CPU_CORTEXA8 | |||||
else | |||||
_CP15_TLBIASIDIS(CPU_ASID_KERNEL); | _CP15_TLBIASIDIS(CPU_ASID_KERNEL); | ||||
#endif | |||||
dsb(); | dsb(); | ||||
} | } | ||||
static __inline void | static __inline void | ||||
tlb_flush(vm_offset_t va) | tlb_flush(vm_offset_t va) | ||||
{ | { | ||||
KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); | KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); | ||||
dsb(); | dsb(); | ||||
if (ARM_HAVE_MP_EXTENSIONS) | |||||
_CP15_TLBIMVA(va | CPU_ASID_KERNEL); | |||||
#ifdef CPU_CORTEXA8 | |||||
else | |||||
_CP15_TLBIMVAAIS(va); | _CP15_TLBIMVAAIS(va); | ||||
#endif | |||||
dsb(); | dsb(); | ||||
} | } | ||||
static __inline void | static __inline void | ||||
tlb_flush_range(vm_offset_t va, vm_size_t size) | tlb_flush_range(vm_offset_t va, vm_size_t size) | ||||
{ | { | ||||
vm_offset_t eva = va + size; | vm_offset_t eva = va + size; | ||||
KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); | KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); | ||||
KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, | KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, | ||||
size)); | size)); | ||||
dsb(); | dsb(); | ||||
if (ARM_HAVE_MP_EXTENSIONS) { | |||||
for (; va < eva; va += PAGE_SIZE) | for (; va < eva; va += PAGE_SIZE) | ||||
_CP15_TLBIMVA(va | CPU_ASID_KERNEL); | |||||
} | |||||
#ifdef CPU_CORTEXA8 | |||||
else { | |||||
for (; va < eva; va += PAGE_SIZE) | |||||
_CP15_TLBIMVAAIS(va); | _CP15_TLBIMVAAIS(va); | ||||
} | |||||
#endif | |||||
dsb(); | dsb(); | ||||
} | } | ||||
#else /* SMP */ | #else /* SMP */ | ||||
#define tlb_flush_all() tlb_flush_all_local() | #define tlb_flush_all() tlb_flush_all_local() | ||||
#define tlb_flush_all_ng() tlb_flush_all_ng_local() | #define tlb_flush_all_ng() tlb_flush_all_ng_local() | ||||
#define tlb_flush(va) tlb_flush_local(va) | #define tlb_flush(va) tlb_flush_local(va) | ||||
#define tlb_flush_range(va, size) tlb_flush_range_local(va, size) | #define tlb_flush_range(va, size) tlb_flush_range_local(va, size) | ||||
#endif /* SMP */ | #endif /* SMP */ | ||||
/* | /* | ||||
* Cache maintenance operations. | * Cache maintenance operations. | ||||
*/ | */ | ||||
/* Sync I and D caches to PoU */ | /* Sync I and D caches to PoU */ | ||||
static __inline void | static __inline void | ||||
icache_sync(vm_offset_t va, vm_size_t size) | icache_sync(vm_offset_t va, vm_size_t size) | ||||
{ | { | ||||
vm_offset_t eva = va + size; | vm_offset_t eva = va + size; | ||||
dsb(); | dsb(); | ||||
va &= ~cpuinfo.dcache_line_mask; | va &= ~cpuinfo.dcache_line_mask; | ||||
for ( ; va < eva; va += cpuinfo.dcache_line_size) { | |||||
#if __ARM_ARCH >= 7 && defined SMP | #if __ARM_ARCH >= 7 && defined SMP | ||||
Not Done Inline ActionsCould same solution be used like in tlb_flush_range() to not test mp_ncpus repeatedly in a loop? skra: Could same solution be used like in tlb_flush_range() to not test mp_ncpus repeatedly in a loop? | |||||
if (ARM_HAVE_MP_EXTENSIONS) { | |||||
for ( ; va < eva; va += cpuinfo.dcache_line_size) | |||||
_CP15_DCCMVAU(va); | _CP15_DCCMVAU(va); | ||||
#else | } else | ||||
_CP15_DCCMVAC(va); | |||||
#endif | #endif | ||||
{ | |||||
for ( ; va < eva; va += cpuinfo.dcache_line_size) | |||||
_CP15_DCCMVAC(va); | |||||
} | } | ||||
dsb(); | dsb(); | ||||
#if __ARM_ARCH >= 7 && defined SMP | #if __ARM_ARCH >= 7 && defined SMP | ||||
if (ARM_HAVE_MP_EXTENSIONS) | |||||
_CP15_ICIALLUIS(); | _CP15_ICIALLUIS(); | ||||
#else | else | ||||
_CP15_ICIALLU(); | |||||
#endif | #endif | ||||
_CP15_ICIALLU(); | |||||
dsb(); | dsb(); | ||||
isb(); | isb(); | ||||
} | } | ||||
/* Invalidate I cache */ | /* Invalidate I cache */ | ||||
static __inline void | static __inline void | ||||
icache_inv_all(void) | icache_inv_all(void) | ||||
{ | { | ||||
#if __ARM_ARCH >= 7 && defined SMP | #if __ARM_ARCH >= 7 && defined SMP | ||||
if (ARM_HAVE_MP_EXTENSIONS) | |||||
_CP15_ICIALLUIS(); | _CP15_ICIALLUIS(); | ||||
#else | else | ||||
_CP15_ICIALLU(); | |||||
#endif | #endif | ||||
_CP15_ICIALLU(); | |||||
dsb(); | dsb(); | ||||
isb(); | isb(); | ||||
} | } | ||||
/* Invalidate branch predictor buffer */ | /* Invalidate branch predictor buffer */ | ||||
static __inline void | static __inline void | ||||
bpb_inv_all(void) | bpb_inv_all(void) | ||||
{ | { | ||||
#if __ARM_ARCH >= 7 && defined SMP | #if __ARM_ARCH >= 7 && defined SMP | ||||
if (ARM_HAVE_MP_EXTENSIONS) | |||||
_CP15_BPIALLIS(); | _CP15_BPIALLIS(); | ||||
#else | else | ||||
_CP15_BPIALL(); | |||||
#endif | #endif | ||||
_CP15_BPIALL(); | |||||
dsb(); | dsb(); | ||||
isb(); | isb(); | ||||
} | } | ||||
/* Write back D-cache to PoU */ | /* Write back D-cache to PoU */ | ||||
static __inline void | static __inline void | ||||
dcache_wb_pou(vm_offset_t va, vm_size_t size) | dcache_wb_pou(vm_offset_t va, vm_size_t size) | ||||
{ | { | ||||
vm_offset_t eva = va + size; | vm_offset_t eva = va + size; | ||||
dsb(); | dsb(); | ||||
va &= ~cpuinfo.dcache_line_mask; | va &= ~cpuinfo.dcache_line_mask; | ||||
for ( ; va < eva; va += cpuinfo.dcache_line_size) { | |||||
#if __ARM_ARCH >= 7 && defined SMP | #if __ARM_ARCH >= 7 && defined SMP | ||||
Not Done Inline ActionsSame note like in icache_sync(). skra: Same note like in icache_sync(). | |||||
if (ARM_HAVE_MP_EXTENSIONS) { | |||||
for ( ; va < eva; va += cpuinfo.dcache_line_size) | |||||
_CP15_DCCMVAU(va); | _CP15_DCCMVAU(va); | ||||
#else | } else | ||||
_CP15_DCCMVAC(va); | |||||
#endif | #endif | ||||
{ | |||||
for ( ; va < eva; va += cpuinfo.dcache_line_size) | |||||
_CP15_DCCMVAC(va); | |||||
} | } | ||||
dsb(); | dsb(); | ||||
} | } | ||||
/* | /* | ||||
* Invalidate D-cache to PoC | * Invalidate D-cache to PoC | ||||
* | * | ||||
* Caches are invalidated from outermost to innermost as fresh cachelines | * Caches are invalidated from outermost to innermost as fresh cachelines | ||||
▲ Show 20 Lines • Show All 157 Lines • Show Last 20 Lines |
I think that SMP is not supported for __ARM_ARCH == 6 as hardware TLB maintenance broadcasting is needed. If so, CTASSERT(__ARM_ARCH >= 7, ...) if SMP is defined - with explaining comment. Here, test SMP only.
This __ARM_ARCH >= 7 && defined SMP test may be used in more places, so if I'm correct, it may be replaced by defined SMP everywhere then.