Changeset View
Changeset View
Standalone View
Standalone View
head/sys/powerpc/booke/pmap.c
Show First 20 Lines • Show All 122 Lines • ▼ Show 20 Lines | |||||
#include <machine/pmap.h> | #include <machine/pmap.h> | ||||
#include <machine/pte.h> | #include <machine/pte.h> | ||||
#include <ddb/ddb.h> | #include <ddb/ddb.h> | ||||
#include "mmu_if.h" | #include "mmu_if.h" | ||||
#define SPARSE_MAPDEV | #define SPARSE_MAPDEV | ||||
/* Use power-of-two mappings in mmu_booke_mapdev(), to save entries. */ | |||||
#define POW2_MAPPINGS | |||||
#ifdef DEBUG | #ifdef DEBUG | ||||
#define debugf(fmt, args...) printf(fmt, ##args) | #define debugf(fmt, args...) printf(fmt, ##args) | ||||
#else | #else | ||||
#define debugf(fmt, args...) | #define debugf(fmt, args...) | ||||
#endif | #endif | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
#define PRI0ptrX "016lx" | #define PRI0ptrX "016lx" | ||||
▲ Show 20 Lines • Show All 2,067 Lines • ▼ Show 20 Lines | |||||
static int | static int | ||||
tlb1_find_pa(vm_paddr_t pa, tlb_entry_t *e) | tlb1_find_pa(vm_paddr_t pa, tlb_entry_t *e) | ||||
{ | { | ||||
int i; | int i; | ||||
for (i = 0; i < TLB1_ENTRIES; i++) { | for (i = 0; i < TLB1_ENTRIES; i++) { | ||||
tlb1_read_entry(e, i); | tlb1_read_entry(e, i); | ||||
if ((e->mas1 & MAS1_VALID) == 0) | if ((e->mas1 & MAS1_VALID) == 0) | ||||
continue; | |||||
if (e->phys == pa) | |||||
return (i); | return (i); | ||||
} | } | ||||
return (-1); | return (-1); | ||||
} | } | ||||
static void * | static void * | ||||
mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) | mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) | ||||
{ | { | ||||
tlb_entry_t e; | tlb_entry_t e; | ||||
vm_paddr_t tmppa; | vm_paddr_t tmppa; | ||||
#ifndef __powerpc64__ | #ifndef __powerpc64__ | ||||
uintptr_t tmpva; | uintptr_t tmpva; | ||||
#endif | #endif | ||||
uintptr_t va; | uintptr_t va, retva; | ||||
vm_size_t sz; | vm_size_t sz; | ||||
int i; | int i; | ||||
int wimge; | int wimge; | ||||
/* | /* | ||||
* Check if this is premapped in TLB1. | * Check if this is premapped in TLB1. | ||||
*/ | */ | ||||
sz = size; | sz = size; | ||||
tmppa = pa; | tmppa = pa; | ||||
va = ~0; | va = ~0; | ||||
wimge = tlb_calc_wimg(pa, ma); | wimge = tlb_calc_wimg(pa, ma); | ||||
for (i = 0; i < TLB1_ENTRIES; i++) { | for (i = 0; i < TLB1_ENTRIES; i++) { | ||||
tlb1_read_entry(&e, i); | tlb1_read_entry(&e, i); | ||||
if (!(e.mas1 & MAS1_VALID)) | if (!(e.mas1 & MAS1_VALID)) | ||||
continue; | continue; | ||||
if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))) | if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))) | ||||
continue; | continue; | ||||
if (tmppa >= e.phys && tmppa < e.phys + e.size) { | if (tmppa >= e.phys && tmppa < e.phys + e.size) { | ||||
va = e.virt + (pa - e.phys); | va = e.virt + (pa - e.phys); | ||||
tmppa = e.phys + e.size; | tmppa = e.phys + e.size; | ||||
sz -= MIN(sz, e.size); | sz -= MIN(sz, e.size - (pa - e.phys)); | ||||
while (sz > 0 && (i = tlb1_find_pa(tmppa, &e)) != -1) { | while (sz > 0 && (i = tlb1_find_pa(tmppa, &e)) != -1) { | ||||
if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))) | if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))) | ||||
break; | break; | ||||
sz -= MIN(sz, e.size); | sz -= MIN(sz, e.size); | ||||
tmppa = e.phys + e.size; | tmppa = e.phys + e.size; | ||||
} | } | ||||
if (sz != 0) | if (sz != 0) | ||||
break; | break; | ||||
return ((void *)va); | return ((void *)va); | ||||
} | } | ||||
} | } | ||||
size = roundup(size, PAGE_SIZE); | size = roundup(size, PAGE_SIZE); | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
KASSERT(pa < VM_MAPDEV_PA_MAX, | KASSERT(pa < VM_MAPDEV_PA_MAX, | ||||
("Unsupported physical address! %lx", pa)); | ("Unsupported physical address! %lx", pa)); | ||||
va = VM_MAPDEV_BASE + pa; | va = VM_MAPDEV_BASE + pa; | ||||
retva = va; | |||||
#ifdef POW2_MAPPINGS | |||||
/* | |||||
* Align the mapping to a power of 2 size, taking into account that we | |||||
* may need to increase the size multiple times to satisfy the size and | |||||
* alignment requirements. | |||||
* | |||||
* This works in the general case because it's very rare (near never?) | |||||
* to have different access properties (WIMG) within a single | |||||
* power-of-two region. If a design does call for that, POW2_MAPPINGS | |||||
* can be undefined, and exact mappings will be used instead. | |||||
*/ | |||||
sz = size; | |||||
size = roundup2(size, 1 << ilog2(size)); | |||||
while (rounddown2(va, size) + size < va + sz) | |||||
size <<= 1; | |||||
va = rounddown2(va, size); | |||||
pa = rounddown2(pa, size); | |||||
#endif | |||||
#else | #else | ||||
/* | /* | ||||
* The device mapping area is between VM_MAXUSER_ADDRESS and | * The device mapping area is between VM_MAXUSER_ADDRESS and | ||||
* VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing. | * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing. | ||||
*/ | */ | ||||
#ifdef SPARSE_MAPDEV | #ifdef SPARSE_MAPDEV | ||||
/* | /* | ||||
* With a sparse mapdev, align to the largest starting region. This | * With a sparse mapdev, align to the largest starting region. This | ||||
* could feasibly be optimized for a 'best-fit' alignment, but that | * could feasibly be optimized for a 'best-fit' alignment, but that | ||||
* calculation could be very costly. | * calculation could be very costly. | ||||
* Align to the smaller of: | * Align to the smaller of: | ||||
* - first set bit in overlap of (pa & size mask) | * - first set bit in overlap of (pa & size mask) | ||||
* - largest size envelope | * - largest size envelope | ||||
* | * | ||||
* It's possible the device mapping may start at a PA that's not larger | * It's possible the device mapping may start at a PA that's not larger | ||||
* than the size mask, so we need to offset in to maximize the TLB entry | * than the size mask, so we need to offset in to maximize the TLB entry | ||||
* range and minimize the number of used TLB entries. | * range and minimize the number of used TLB entries. | ||||
*/ | */ | ||||
do { | do { | ||||
tmpva = tlb1_map_base; | tmpva = tlb1_map_base; | ||||
sz = ffsl((~((1 << flsl(size-1)) - 1)) & pa); | sz = ffsl((~((1 << flsl(size-1)) - 1)) & pa); | ||||
sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1; | sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1; | ||||
va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa); | va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa); | ||||
} while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size)); | } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size)); | ||||
#endif | |||||
va = atomic_fetchadd_int(&tlb1_map_base, size); | va = atomic_fetchadd_int(&tlb1_map_base, size); | ||||
retva = va; | |||||
#endif | #endif | ||||
#endif | |||||
if (tlb1_mapin_region(va, pa, size, tlb_calc_wimg(pa, ma)) != size) | if (tlb1_mapin_region(va, pa, size, tlb_calc_wimg(pa, ma)) != size) | ||||
return (NULL); | return (NULL); | ||||
return ((void *)va); | return ((void *)retva); | ||||
} | } | ||||
/* | /* | ||||
* 'Unmap' a range mapped by mmu_booke_mapdev(). | * 'Unmap' a range mapped by mmu_booke_mapdev(). | ||||
*/ | */ | ||||
static void | static void | ||||
mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) | mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 272 Lines • ▼ Show 20 Lines | for (i = 0; i < TLB1_ENTRIES; i++) { | ||||
tlb1_read_entry(&e, i); | tlb1_read_entry(&e, i); | ||||
if ((e.mas1 & MAS1_VALID) == 0) | if ((e.mas1 & MAS1_VALID) == 0) | ||||
return (i); | return (i); | ||||
} | } | ||||
return (-1); | return (-1); | ||||
} | } | ||||
static void | static void | ||||
tlb1_purge_va_range(vm_offset_t va, vm_size_t size) | |||||
{ | |||||
tlb_entry_t e; | |||||
int i; | |||||
for (i = 0; i < TLB1_ENTRIES; i++) { | |||||
tlb1_read_entry(&e, i); | |||||
if ((e.mas1 & MAS1_VALID) == 0) | |||||
continue; | |||||
if ((e.mas2 & MAS2_EPN_MASK) >= va && | |||||
(e.mas2 & MAS2_EPN_MASK) < va + size) { | |||||
mtspr(SPR_MAS1, e.mas1 & ~MAS1_VALID); | |||||
__asm __volatile("isync; tlbwe; isync; msync"); | |||||
} | |||||
} | |||||
} | |||||
static void | |||||
tlb1_write_entry_int(void *arg) | tlb1_write_entry_int(void *arg) | ||||
{ | { | ||||
struct tlbwrite_args *args = arg; | struct tlbwrite_args *args = arg; | ||||
uint32_t idx, mas0; | uint32_t idx, mas0; | ||||
idx = args->idx; | idx = args->idx; | ||||
if (idx == -1) { | if (idx == -1) { | ||||
tlb1_purge_va_range(args->e->virt, args->e->size); | |||||
idx = tlb1_find_free(); | idx = tlb1_find_free(); | ||||
if (idx == -1) | if (idx == -1) | ||||
panic("No free TLB1 entries!\n"); | panic("No free TLB1 entries!\n"); | ||||
} | } | ||||
/* Select entry */ | /* Select entry */ | ||||
mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); | mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); | ||||
mtspr(SPR_MAS0, mas0); | mtspr(SPR_MAS0, mas0); | ||||
▲ Show 20 Lines • Show All 492 Lines • Show Last 20 Lines |