Changeset View
Standalone View
sys/arm/arm/pmap-v6.c
Show First 20 Lines • Show All 104 Lines • ▼ Show 20 Lines | |||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/vmmeter.h> | #include <sys/vmmeter.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/mman.h> | #include <sys/mman.h> | ||||
#include <sys/sf_buf.h> | #include <sys/sf_buf.h> | ||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/sched.h> | #include <sys/sched.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#ifdef SMP | |||||
#include <sys/smp.h> | |||||
#else | |||||
#include <sys/cpuset.h> | |||||
#endif | |||||
#ifdef DDB | #ifdef DDB | ||||
#include <ddb/ddb.h> | #include <ddb/ddb.h> | ||||
#endif | #endif | ||||
#include <machine/physmem.h> | #include <machine/physmem.h> | ||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
▲ Show 20 Lines • Show All 165 Lines • ▼ Show 20 Lines | |||||
vm_offset_t pv_vafree; /* freelist stored in the PTE */ | vm_offset_t pv_vafree; /* freelist stored in the PTE */ | ||||
vm_paddr_t first_managed_pa; | vm_paddr_t first_managed_pa; | ||||
#define pa_to_pvh(pa) (&pv_table[pte1_index(pa - first_managed_pa)]) | #define pa_to_pvh(pa) (&pv_table[pte1_index(pa - first_managed_pa)]) | ||||
/* | /* | ||||
* All those kernel PT submaps that BSD is so fond of | * All those kernel PT submaps that BSD is so fond of | ||||
*/ | */ | ||||
struct sysmaps { | |||||
struct mtx lock; | |||||
pt2_entry_t *CMAP1; | |||||
pt2_entry_t *CMAP2; | |||||
pt2_entry_t *CMAP3; | |||||
caddr_t CADDR1; | |||||
caddr_t CADDR2; | |||||
caddr_t CADDR3; | |||||
}; | |||||
static struct sysmaps sysmaps_pcpu[MAXCPU]; | |||||
caddr_t _tmppt = 0; | caddr_t _tmppt = 0; | ||||
struct msgbuf *msgbufp = NULL; /* XXX move it to machdep.c */ | struct msgbuf *msgbufp = NULL; /* XXX move it to machdep.c */ | ||||
/* | /* | ||||
* Crashdump maps. | * Crashdump maps. | ||||
*/ | */ | ||||
static caddr_t crashdumpmap; | static caddr_t crashdumpmap; | ||||
▲ Show 20 Lines • Show All 812 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Initialize kernel PMAP locks and lists, kernel_pmap itself, and | * Initialize kernel PMAP locks and lists, kernel_pmap itself, and | ||||
* reserve various virtual spaces for temporary mappings. | * reserve various virtual spaces for temporary mappings. | ||||
*/ | */ | ||||
void | void | ||||
pmap_bootstrap(vm_offset_t firstaddr) | pmap_bootstrap(vm_offset_t firstaddr) | ||||
{ | { | ||||
pt2_entry_t *unused __unused; | pt2_entry_t *unused __unused; | ||||
struct sysmaps *sysmaps; | struct pcpu *pc; | ||||
u_int i; | |||||
/* | /* | ||||
* Initialize the kernel pmap (which is statically allocated). | * Initialize the kernel pmap (which is statically allocated). | ||||
*/ | */ | ||||
PMAP_LOCK_INIT(kernel_pmap); | PMAP_LOCK_INIT(kernel_pmap); | ||||
kernel_l1pa = (vm_paddr_t)kern_pt1; /* for libkvm */ | kernel_l1pa = (vm_paddr_t)kern_pt1; /* for libkvm */ | ||||
kernel_pmap->pm_pt1 = kern_pt1; | kernel_pmap->pm_pt1 = kern_pt1; | ||||
kernel_pmap->pm_pt2tab = kern_pt2tab; | kernel_pmap->pm_pt2tab = kern_pt2tab; | ||||
Show All 22 Lines | pmap_bootstrap(vm_offset_t firstaddr) | ||||
*/ | */ | ||||
#define SYSMAP(c, p, v, n) do { \ | #define SYSMAP(c, p, v, n) do { \ | ||||
v = (c)pmap_preboot_reserve_pages(n); \ | v = (c)pmap_preboot_reserve_pages(n); \ | ||||
p = pt2map_entry((vm_offset_t)v); \ | p = pt2map_entry((vm_offset_t)v); \ | ||||
} while (0) | } while (0) | ||||
/* | /* | ||||
* Local CMAP1/CMAP2 are used for zeroing and copying pages. | * Local CMAP1/CMAP2 are used for zeroing and copying pages. | ||||
* Local CMAP3 is used for data cache cleaning. | * Local CMAP2 is also used for data cache cleaning. | ||||
*/ | */ | ||||
for (i = 0; i < MAXCPU; i++) { | pc = pcpu_find(curcpu); | ||||
sysmaps = &sysmaps_pcpu[i]; | mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); | ||||
mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); | SYSMAP(caddr_t, pc->pc_cmap1_pte2p, pc->pc_cmap1_addr, 1); | ||||
SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1); | SYSMAP(caddr_t, pc->pc_cmap2_pte2p, pc->pc_cmap2_addr, 1); | ||||
SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1); | SYSMAP(vm_offset_t, unused, pc->pc_qmap_addr, 1); | ||||
SYSMAP(caddr_t, sysmaps->CMAP3, sysmaps->CADDR3, 1); | |||||
} | |||||
/* | /* | ||||
* Crashdump maps. | * Crashdump maps. | ||||
*/ | */ | ||||
SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS); | SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS); | ||||
/* | /* | ||||
* _tmppt is used for reading arbitrary physical pages via /dev/mem. | * _tmppt is used for reading arbitrary physical pages via /dev/mem. | ||||
Show All 16 Lines | #endif | ||||
* initialize phys_avail[] array and no further page allocation | * initialize phys_avail[] array and no further page allocation | ||||
* can happen after that until vm subsystem will be initialized. | * can happen after that until vm subsystem will be initialized. | ||||
*/ | */ | ||||
kernel_vm_end_new = kernel_vm_end; | kernel_vm_end_new = kernel_vm_end; | ||||
virtual_end = vm_max_kernel_address; | virtual_end = vm_max_kernel_address; | ||||
} | } | ||||
static void | static void | ||||
pmap_init_qpages(void) | pmap_init_reserved_pages(void) | ||||
{ | { | ||||
struct pcpu *pc; | struct pcpu *pc; | ||||
vm_offset_t pages; | |||||
int i; | int i; | ||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
pc = pcpu_find(i); | pc = pcpu_find(i); | ||||
pc->pc_qmap_addr = kva_alloc(PAGE_SIZE); | /* | ||||
if (pc->pc_qmap_addr == 0) | * Skip if the mapping has already been initialized, | ||||
* i.e. this is the BSP. | |||||
*/ | |||||
if (pc->pc_cmap1_addr != 0) | |||||
continue; | |||||
mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); | |||||
pages = kva_alloc(PAGE_SIZE * 3); | |||||
if (pages == 0) | |||||
panic("%s: unable to allocate KVA", __func__); | panic("%s: unable to allocate KVA", __func__); | ||||
pc->pc_cmap1_pte2p = pt2map_entry(pages); | |||||
pc->pc_cmap2_pte2p = pt2map_entry(pages + PAGE_SIZE); | |||||
pc->pc_cmap1_addr = (caddr_t)pages; | |||||
pc->pc_cmap2_addr = (caddr_t)(pages + PAGE_SIZE); | |||||
pc->pc_qmap_addr = pages + (PAGE_SIZE * 2); | |||||
} | } | ||||
} | } | ||||
SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_qpages, NULL); | SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); | ||||
/* | /* | ||||
* The function can already be use in second initialization stage. | * The function can already be use in second initialization stage. | ||||
* As such, the function DOES NOT call pmap_growkernel() where PT2 | * As such, the function DOES NOT call pmap_growkernel() where PT2 | ||||
* allocation can happen. So if used, be sure that PT2 for given | * allocation can happen. So if used, be sure that PT2 for given | ||||
* virtual address is allocated already! | * virtual address is allocated already! | ||||
* | * | ||||
* Add a wired page to the kva. | * Add a wired page to the kva. | ||||
▲ Show 20 Lines • Show All 334 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Zero L2 page table page. | * Zero L2 page table page. | ||||
* Use same KVA as in pmap_zero_page(). | * Use same KVA as in pmap_zero_page(). | ||||
*/ | */ | ||||
static __inline vm_paddr_t | static __inline vm_paddr_t | ||||
pmap_pt2pg_zero(vm_page_t m) | pmap_pt2pg_zero(vm_page_t m) | ||||
{ | { | ||||
pt2_entry_t *cmap2_pte2p; | |||||
skra: For consistency sake, the variable should be named** cmap_pte2p**. Or better,** cmap2_pte2p… | |||||
Not Done Inline ActionsI should be using it in pte2_clear() below...I'll change that. As for the naming, these names were taken from the i386 implementation where we don't have names like pt1_entry_t and pt2_entry_t to distinguish L1 and L2 PTEs. I'm fine with renaming the armv6 version according to your suggestion. jah: I should be using it in pte2_clear() below...I'll change that.
As for the naming, these names… | |||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
struct sysmaps *sysmaps; | struct pcpu *pc; | ||||
pa = VM_PAGE_TO_PHYS(m); | pa = VM_PAGE_TO_PHYS(m); | ||||
/* | /* | ||||
* XXX: For now, we map whole page even if it's already zero, | * XXX: For now, we map whole page even if it's already zero, | ||||
* to sync it even if the sync is only DSB. | * to sync it even if the sync is only DSB. | ||||
*/ | */ | ||||
sched_pin(); | sched_pin(); | ||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; | pc = pcpu_find(curcpu); | ||||
mtx_lock(&sysmaps->lock); | cmap2_pte2p = pc->pc_cmap2_pte2p; | ||||
if (pte2_load(sysmaps->CMAP2) != 0) | mtx_lock(&pc->pc_cmap_lock); | ||||
if (pte2_load(cmap2_pte2p) != 0) | |||||
panic("%s: CMAP2 busy", __func__); | panic("%s: CMAP2 busy", __func__); | ||||
pte2_store(sysmaps->CMAP2, PTE2_KERN_NG(pa, PTE2_AP_KRW, | pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, | ||||
vm_page_pte2_attr(m))); | vm_page_pte2_attr(m))); | ||||
/* Even VM_ALLOC_ZERO request is only advisory. */ | /* Even VM_ALLOC_ZERO request is only advisory. */ | ||||
if ((m->flags & PG_ZERO) == 0) | if ((m->flags & PG_ZERO) == 0) | ||||
pagezero(sysmaps->CADDR2); | pagezero(pc->pc_cmap2_addr); | ||||
pte2_sync_range((pt2_entry_t *)sysmaps->CADDR2, PAGE_SIZE); | pte2_sync_range((pt2_entry_t *)pc->pc_cmap2_addr, PAGE_SIZE); | ||||
pte2_clear(sysmaps->CMAP2); | pte2_clear(cmap2_pte2p); | ||||
tlb_flush((vm_offset_t)sysmaps->CADDR2); | tlb_flush((vm_offset_t)pc->pc_cmap2_addr); | ||||
/* | |||||
* Unpin the thread before releasing the lock. Otherwise the thread | |||||
* could be rescheduled while still bound to the current CPU, only | |||||
* to unpin itself immediately upon resuming execution. | |||||
*/ | |||||
sched_unpin(); | sched_unpin(); | ||||
mtx_unlock(&sysmaps->lock); | mtx_unlock(&pc->pc_cmap_lock); | ||||
Done Inline ActionsPlease, explain why you moved mtx_unlock() above sched_unpin(). IMO, it's not necessary and if mtx_unlock() causes that current thread is rescheduled, it remains pinned to some CPU. Thus, it can be scheduled only to that CPU, just to unpin itself. You also did this change in other functions bellow. skra: Please, explain why you moved mtx_unlock() above sched_unpin(). IMO, it's not necessary and if… | |||||
Not Done Inline ActionsGood catch, this came from an earlier version of the change in which I referenced the lock directly using PCPU_GET. It's no longer necessary and potentially harmful to performance as you point out. jah: Good catch, this came from an earlier version of the change in which I referenced the lock… | |||||
return (pa); | return (pa); | ||||
} | } | ||||
/* | /* | ||||
* Init just allocated page as L2 page table(s) holder | * Init just allocated page as L2 page table(s) holder | ||||
* and return its physical address. | * and return its physical address. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 4,010 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Sets the memory attribute for the specified page. | * Sets the memory attribute for the specified page. | ||||
*/ | */ | ||||
void | void | ||||
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) | pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) | ||||
{ | { | ||||
struct sysmaps *sysmaps; | pt2_entry_t *cmap2_pte2p; | ||||
vm_memattr_t oma; | vm_memattr_t oma; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
struct pcpu *pc; | |||||
oma = m->md.pat_mode; | oma = m->md.pat_mode; | ||||
m->md.pat_mode = ma; | m->md.pat_mode = ma; | ||||
CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m, | CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m, | ||||
VM_PAGE_TO_PHYS(m), oma, ma); | VM_PAGE_TO_PHYS(m), oma, ma); | ||||
if ((m->flags & PG_FICTITIOUS) != 0) | if ((m->flags & PG_FICTITIOUS) != 0) | ||||
return; | return; | ||||
Show All 10 Lines | |||||
#endif | #endif | ||||
/* | /* | ||||
* If page is not mapped by sf buffer, map the page | * If page is not mapped by sf buffer, map the page | ||||
* transient and do invalidation. | * transient and do invalidation. | ||||
*/ | */ | ||||
if (ma != oma) { | if (ma != oma) { | ||||
pa = VM_PAGE_TO_PHYS(m); | pa = VM_PAGE_TO_PHYS(m); | ||||
sched_pin(); | sched_pin(); | ||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; | pc = pcpu_find(curcpu); | ||||
mtx_lock(&sysmaps->lock); | cmap2_pte2p = pc->pc_cmap2_pte2p; | ||||
if (*sysmaps->CMAP2) | mtx_lock(&pc->pc_cmap_lock); | ||||
if (pte2_load(cmap2_pte2p) != 0) | |||||
panic("%s: CMAP2 busy", __func__); | panic("%s: CMAP2 busy", __func__); | ||||
pte2_store(sysmaps->CMAP2, PTE2_KERN_NG(pa, PTE2_AP_KRW, | pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, | ||||
vm_memattr_to_pte2(ma))); | vm_memattr_to_pte2(ma))); | ||||
dcache_wbinv_poc((vm_offset_t)sysmaps->CADDR2, pa, PAGE_SIZE); | dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE); | ||||
pte2_clear(sysmaps->CMAP2); | pte2_clear(cmap2_pte2p); | ||||
tlb_flush((vm_offset_t)sysmaps->CADDR2); | tlb_flush((vm_offset_t)pc->pc_cmap2_addr); | ||||
sched_unpin(); | sched_unpin(); | ||||
mtx_unlock(&sysmaps->lock); | mtx_unlock(&pc->pc_cmap_lock); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Miscellaneous support routines follow | * Miscellaneous support routines follow | ||||
*/ | */ | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 61 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* pmap_zero_page zeros the specified hardware page by mapping | * pmap_zero_page zeros the specified hardware page by mapping | ||||
* the page into KVM and using bzero to clear its contents. | * the page into KVM and using bzero to clear its contents. | ||||
*/ | */ | ||||
void | void | ||||
pmap_zero_page(vm_page_t m) | pmap_zero_page(vm_page_t m) | ||||
{ | { | ||||
struct sysmaps *sysmaps; | pt2_entry_t *cmap2_pte2p; | ||||
struct pcpu *pc; | |||||
sched_pin(); | sched_pin(); | ||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; | pc = pcpu_find(curcpu); | ||||
mtx_lock(&sysmaps->lock); | cmap2_pte2p = pc->pc_cmap2_pte2p; | ||||
if (pte2_load(sysmaps->CMAP2) != 0) | mtx_lock(&pc->pc_cmap_lock); | ||||
if (pte2_load(cmap2_pte2p) != 0) | |||||
panic("%s: CMAP2 busy", __func__); | panic("%s: CMAP2 busy", __func__); | ||||
pte2_store(sysmaps->CMAP2, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, | pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, | ||||
vm_page_pte2_attr(m))); | vm_page_pte2_attr(m))); | ||||
pagezero(sysmaps->CADDR2); | pagezero(pc->pc_cmap2_addr); | ||||
pte2_clear(sysmaps->CMAP2); | pte2_clear(cmap2_pte2p); | ||||
tlb_flush((vm_offset_t)sysmaps->CADDR2); | tlb_flush((vm_offset_t)pc->pc_cmap2_addr); | ||||
sched_unpin(); | sched_unpin(); | ||||
mtx_unlock(&sysmaps->lock); | mtx_unlock(&pc->pc_cmap_lock); | ||||
} | } | ||||
/* | /* | ||||
* pmap_zero_page_area zeros the specified hardware page by mapping | * pmap_zero_page_area zeros the specified hardware page by mapping | ||||
* the page into KVM and using bzero to clear its contents. | * the page into KVM and using bzero to clear its contents. | ||||
* | * | ||||
* off and size may not cover an area beyond a single hardware page. | * off and size may not cover an area beyond a single hardware page. | ||||
*/ | */ | ||||
void | void | ||||
pmap_zero_page_area(vm_page_t m, int off, int size) | pmap_zero_page_area(vm_page_t m, int off, int size) | ||||
{ | { | ||||
struct sysmaps *sysmaps; | pt2_entry_t *cmap2_pte2p; | ||||
struct pcpu *pc; | |||||
sched_pin(); | sched_pin(); | ||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; | pc = pcpu_find(curcpu); | ||||
mtx_lock(&sysmaps->lock); | cmap2_pte2p = pc->pc_cmap2_pte2p; | ||||
if (pte2_load(sysmaps->CMAP2) != 0) | mtx_lock(&pc->pc_cmap_lock); | ||||
if (pte2_load(cmap2_pte2p) != 0) | |||||
panic("%s: CMAP2 busy", __func__); | panic("%s: CMAP2 busy", __func__); | ||||
pte2_store(sysmaps->CMAP2, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, | pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, | ||||
vm_page_pte2_attr(m))); | vm_page_pte2_attr(m))); | ||||
if (off == 0 && size == PAGE_SIZE) | if (off == 0 && size == PAGE_SIZE) | ||||
pagezero(sysmaps->CADDR2); | pagezero(pc->pc_cmap2_addr); | ||||
else | else | ||||
bzero(sysmaps->CADDR2 + off, size); | bzero(pc->pc_cmap2_addr + off, size); | ||||
pte2_clear(sysmaps->CMAP2); | pte2_clear(cmap2_pte2p); | ||||
tlb_flush((vm_offset_t)sysmaps->CADDR2); | tlb_flush((vm_offset_t)pc->pc_cmap2_addr); | ||||
sched_unpin(); | sched_unpin(); | ||||
mtx_unlock(&sysmaps->lock); | mtx_unlock(&pc->pc_cmap_lock); | ||||
} | } | ||||
/* | /* | ||||
* pmap_copy_page copies the specified (machine independent) | * pmap_copy_page copies the specified (machine independent) | ||||
* page by mapping the page into virtual memory and using | * page by mapping the page into virtual memory and using | ||||
* bcopy to copy the page, one machine dependent page at a | * bcopy to copy the page, one machine dependent page at a | ||||
* time. | * time. | ||||
*/ | */ | ||||
void | void | ||||
pmap_copy_page(vm_page_t src, vm_page_t dst) | pmap_copy_page(vm_page_t src, vm_page_t dst) | ||||
{ | { | ||||
struct sysmaps *sysmaps; | pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; | ||||
struct pcpu *pc; | |||||
sched_pin(); | sched_pin(); | ||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; | pc = pcpu_find(curcpu); | ||||
mtx_lock(&sysmaps->lock); | cmap1_pte2p = pc->pc_cmap1_pte2p; | ||||
if (pte2_load(sysmaps->CMAP1) != 0) | cmap2_pte2p = pc->pc_cmap2_pte2p; | ||||
mtx_lock(&pc->pc_cmap_lock); | |||||
if (pte2_load(cmap1_pte2p) != 0) | |||||
panic("%s: CMAP1 busy", __func__); | panic("%s: CMAP1 busy", __func__); | ||||
if (pte2_load(sysmaps->CMAP2) != 0) | if (pte2_load(cmap2_pte2p) != 0) | ||||
panic("%s: CMAP2 busy", __func__); | panic("%s: CMAP2 busy", __func__); | ||||
pte2_store(sysmaps->CMAP1, PTE2_KERN_NG(VM_PAGE_TO_PHYS(src), | pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(src), | ||||
PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(src))); | PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(src))); | ||||
pte2_store(sysmaps->CMAP2, PTE2_KERN_NG(VM_PAGE_TO_PHYS(dst), | pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(dst), | ||||
PTE2_AP_KRW, vm_page_pte2_attr(dst))); | PTE2_AP_KRW, vm_page_pte2_attr(dst))); | ||||
bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); | bcopy(pc->pc_cmap1_addr, pc->pc_cmap2_addr, PAGE_SIZE); | ||||
pte2_clear(sysmaps->CMAP1); | pte2_clear(cmap1_pte2p); | ||||
tlb_flush((vm_offset_t)sysmaps->CADDR1); | tlb_flush((vm_offset_t)pc->pc_cmap1_addr); | ||||
pte2_clear(sysmaps->CMAP2); | pte2_clear(cmap2_pte2p); | ||||
tlb_flush((vm_offset_t)sysmaps->CADDR2); | tlb_flush((vm_offset_t)pc->pc_cmap2_addr); | ||||
sched_unpin(); | sched_unpin(); | ||||
mtx_unlock(&sysmaps->lock); | mtx_unlock(&pc->pc_cmap_lock); | ||||
} | } | ||||
int unmapped_buf_allowed = 1; | int unmapped_buf_allowed = 1; | ||||
void | void | ||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], | pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], | ||||
vm_offset_t b_offset, int xfersize) | vm_offset_t b_offset, int xfersize) | ||||
{ | { | ||||
struct sysmaps *sysmaps; | pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; | ||||
vm_page_t a_pg, b_pg; | vm_page_t a_pg, b_pg; | ||||
char *a_cp, *b_cp; | char *a_cp, *b_cp; | ||||
vm_offset_t a_pg_offset, b_pg_offset; | vm_offset_t a_pg_offset, b_pg_offset; | ||||
struct pcpu *pc; | |||||
int cnt; | int cnt; | ||||
sched_pin(); | sched_pin(); | ||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; | pc = pcpu_find(curcpu); | ||||
mtx_lock(&sysmaps->lock); | cmap1_pte2p = pc->pc_cmap1_pte2p; | ||||
if (*sysmaps->CMAP1 != 0) | cmap2_pte2p = pc->pc_cmap2_pte2p; | ||||
mtx_lock(&pc->pc_cmap_lock); | |||||
if (pte2_load(cmap1_pte2p) != 0) | |||||
panic("pmap_copy_pages: CMAP1 busy"); | panic("pmap_copy_pages: CMAP1 busy"); | ||||
if (*sysmaps->CMAP2 != 0) | if (pte2_load(cmap2_pte2p) != 0) | ||||
panic("pmap_copy_pages: CMAP2 busy"); | panic("pmap_copy_pages: CMAP2 busy"); | ||||
while (xfersize > 0) { | while (xfersize > 0) { | ||||
a_pg = ma[a_offset >> PAGE_SHIFT]; | a_pg = ma[a_offset >> PAGE_SHIFT]; | ||||
a_pg_offset = a_offset & PAGE_MASK; | a_pg_offset = a_offset & PAGE_MASK; | ||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset); | cnt = min(xfersize, PAGE_SIZE - a_pg_offset); | ||||
b_pg = mb[b_offset >> PAGE_SHIFT]; | b_pg = mb[b_offset >> PAGE_SHIFT]; | ||||
b_pg_offset = b_offset & PAGE_MASK; | b_pg_offset = b_offset & PAGE_MASK; | ||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset); | cnt = min(cnt, PAGE_SIZE - b_pg_offset); | ||||
pte2_store(sysmaps->CMAP1, PTE2_KERN_NG(VM_PAGE_TO_PHYS(a_pg), | pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(a_pg), | ||||
PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(a_pg))); | PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(a_pg))); | ||||
tlb_flush_local((vm_offset_t)sysmaps->CADDR1); | tlb_flush_local((vm_offset_t)pc->pc_cmap1_addr); | ||||
pte2_store(sysmaps->CMAP2, PTE2_KERN_NG(VM_PAGE_TO_PHYS(b_pg), | pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(b_pg), | ||||
PTE2_AP_KRW, vm_page_pte2_attr(b_pg))); | PTE2_AP_KRW, vm_page_pte2_attr(b_pg))); | ||||
tlb_flush_local((vm_offset_t)sysmaps->CADDR2); | tlb_flush_local((vm_offset_t)pc->pc_cmap2_addr); | ||||
a_cp = sysmaps->CADDR1 + a_pg_offset; | a_cp = pc->pc_cmap1_addr + a_pg_offset; | ||||
b_cp = sysmaps->CADDR2 + b_pg_offset; | b_cp = pc->pc_cmap2_addr + b_pg_offset; | ||||
bcopy(a_cp, b_cp, cnt); | bcopy(a_cp, b_cp, cnt); | ||||
a_offset += cnt; | a_offset += cnt; | ||||
b_offset += cnt; | b_offset += cnt; | ||||
xfersize -= cnt; | xfersize -= cnt; | ||||
} | } | ||||
pte2_clear(sysmaps->CMAP1); | pte2_clear(cmap1_pte2p); | ||||
tlb_flush((vm_offset_t)sysmaps->CADDR1); | tlb_flush((vm_offset_t)pc->pc_cmap1_addr); | ||||
pte2_clear(sysmaps->CMAP2); | pte2_clear(cmap2_pte2p); | ||||
tlb_flush((vm_offset_t)sysmaps->CADDR2); | tlb_flush((vm_offset_t)pc->pc_cmap2_addr); | ||||
sched_unpin(); | sched_unpin(); | ||||
mtx_unlock(&sysmaps->lock); | mtx_unlock(&pc->pc_cmap_lock); | ||||
} | } | ||||
vm_offset_t | vm_offset_t | ||||
pmap_quick_enter_page(vm_page_t m) | pmap_quick_enter_page(vm_page_t m) | ||||
{ | { | ||||
pt2_entry_t *pte2p; | pt2_entry_t *pte2p; | ||||
vm_offset_t qmap_addr; | vm_offset_t qmap_addr; | ||||
▲ Show 20 Lines • Show All 309 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Clean L1 data cache range by physical address. | * Clean L1 data cache range by physical address. | ||||
* The range must be within a single page. | * The range must be within a single page. | ||||
*/ | */ | ||||
static void | static void | ||||
pmap_dcache_wb_pou(vm_paddr_t pa, vm_size_t size, uint32_t attr) | pmap_dcache_wb_pou(vm_paddr_t pa, vm_size_t size, uint32_t attr) | ||||
{ | { | ||||
struct sysmaps *sysmaps; | pt2_entry_t *cmap2_pte2p; | ||||
struct pcpu *pc; | |||||
KASSERT(((pa & PAGE_MASK) + size) <= PAGE_SIZE, | KASSERT(((pa & PAGE_MASK) + size) <= PAGE_SIZE, | ||||
("%s: not on single page", __func__)); | ("%s: not on single page", __func__)); | ||||
sched_pin(); | sched_pin(); | ||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; | pc = pcpu_find(curcpu); | ||||
mtx_lock(&sysmaps->lock); | cmap2_pte2p = pc->pc_cmap2_pte2p; | ||||
if (*sysmaps->CMAP3) | mtx_lock(&pc->pc_cmap_lock); | ||||
panic("%s: CMAP3 busy", __func__); | if (pte2_load(cmap2_pte2p) != 0) | ||||
pte2_store(sysmaps->CMAP3, PTE2_KERN_NG(pa, PTE2_AP_KRW, attr)); | panic("%s: CMAP2 busy", __func__); | ||||
dcache_wb_pou((vm_offset_t)sysmaps->CADDR3 + (pa & PAGE_MASK), size); | pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, attr)); | ||||
pte2_clear(sysmaps->CMAP3); | dcache_wb_pou((vm_offset_t)pc->pc_cmap2_addr + (pa & PAGE_MASK), size); | ||||
tlb_flush((vm_offset_t)sysmaps->CADDR3); | pte2_clear(cmap2_pte2p); | ||||
tlb_flush((vm_offset_t)pc->pc_cmap2_addr); | |||||
sched_unpin(); | sched_unpin(); | ||||
mtx_unlock(&sysmaps->lock); | mtx_unlock(&pc->pc_cmap_lock); | ||||
} | } | ||||
/* | /* | ||||
* Sync instruction cache range which is not mapped yet. | * Sync instruction cache range which is not mapped yet. | ||||
*/ | */ | ||||
void | void | ||||
cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size) | cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 233 Lines • ▼ Show 20 Lines | |||||
#if defined(PMAP_DEBUG) | #if defined(PMAP_DEBUG) | ||||
/* | /* | ||||
* Reusing of KVA used in pmap_zero_page function !!! | * Reusing of KVA used in pmap_zero_page function !!! | ||||
*/ | */ | ||||
static void | static void | ||||
pmap_zero_page_check(vm_page_t m) | pmap_zero_page_check(vm_page_t m) | ||||
{ | { | ||||
pt2_entry_t *cmap2_pte2p; | |||||
uint32_t *p, *end; | uint32_t *p, *end; | ||||
struct sysmaps *sysmaps; | struct pcpu *pc; | ||||
sched_pin(); | sched_pin(); | ||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; | pc = pcpu_find(curcpu); | ||||
mtx_lock(&sysmaps->lock); | cmap2_pte2p = pc->pc_cmap2_pte2p; | ||||
if (pte2_load(sysmaps->CMAP2) != 0) | mtx_lock(&pc->pc_cmap_lock); | ||||
if (pte2_load(cmap2_pte2p) != 0) | |||||
panic("%s: CMAP2 busy", __func__); | panic("%s: CMAP2 busy", __func__); | ||||
pte2_store(sysmaps->CMAP2, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, | pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, | ||||
vm_page_pte2_attr(m))); | vm_page_pte2_attr(m))); | ||||
end = (uint32_t*)(sysmaps->CADDR2 + PAGE_SIZE); | end = (uint32_t*)(pc->pc_cmap2_addr + PAGE_SIZE); | ||||
for (p = (uint32_t*)sysmaps->CADDR2; p < end; p++) | for (p = (uint32_t*)pc->pc_cmap2_addr; p < end; p++) | ||||
if (*p != 0) | if (*p != 0) | ||||
panic("%s: page %p not zero, va: %p", __func__, m, | panic("%s: page %p not zero, va: %p", __func__, m, | ||||
sysmaps->CADDR2); | pc->pc_cmap2_addr); | ||||
pte2_clear(sysmaps->CMAP2); | pte2_clear(cmap2_pte2p); | ||||
tlb_flush((vm_offset_t)sysmaps->CADDR2); | tlb_flush((vm_offset_t)pc->pc_cmap2_addr); | ||||
sched_unpin(); | sched_unpin(); | ||||
mtx_unlock(&sysmaps->lock); | mtx_unlock(&pc->pc_cmap_lock); | ||||
} | } | ||||
int | int | ||||
pmap_pid_dump(int pid) | pmap_pid_dump(int pid) | ||||
{ | { | ||||
pmap_t pmap; | pmap_t pmap; | ||||
struct proc *p; | struct proc *p; | ||||
int npte2 = 0; | int npte2 = 0; | ||||
▲ Show 20 Lines • Show All 311 Lines • Show Last 20 Lines |
For consistency sake, the variable should be named cmap_pte2p. Or better, cmap2_pte2p in this case. However, the question is, if the variable is still needed. You already did not used it in pte2_clear().
The same comment about consistent naming of variable(s) is valid for other functions too.