Changeset View
Changeset View
Standalone View
Standalone View
head/sys/riscv/riscv/pmap.c
Show First 20 Lines • Show All 146 Lines • ▼ Show 20 Lines | |||||
#include <vm/vm_pager.h> | #include <vm/vm_pager.h> | ||||
#include <vm/vm_radix.h> | #include <vm/vm_radix.h> | ||||
#include <vm/vm_reserv.h> | #include <vm/vm_reserv.h> | ||||
#include <vm/uma.h> | #include <vm/uma.h> | ||||
#include <machine/machdep.h> | #include <machine/machdep.h> | ||||
#include <machine/md_var.h> | #include <machine/md_var.h> | ||||
#include <machine/pcb.h> | #include <machine/pcb.h> | ||||
#include <machine/sbi.h> | |||||
#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t))) | #define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t))) | ||||
#define NUPDE (NPDEPG * NPDEPG) | #define NUPDE (NPDEPG * NPDEPG) | ||||
#define NUSERPGTBLS (NUPDE + NPDEPG) | #define NUSERPGTBLS (NUPDE + NPDEPG) | ||||
#if !defined(DIAGNOSTIC) | #if !defined(DIAGNOSTIC) | ||||
#ifdef __GNUC_GNU_INLINE__ | #ifdef __GNUC_GNU_INLINE__ | ||||
#define PMAP_INLINE __attribute__((__gnu_inline__)) inline | #define PMAP_INLINE __attribute__((__gnu_inline__)) inline | ||||
▲ Show 20 Lines • Show All 196 Lines • ▼ Show 20 Lines | |||||
static __inline int | static __inline int | ||||
pmap_is_write(pt_entry_t entry) | pmap_is_write(pt_entry_t entry) | ||||
{ | { | ||||
return (entry & PTE_W); | return (entry & PTE_W); | ||||
} | } | ||||
static __inline int | static __inline int | ||||
pmap_is_current(pmap_t pmap) | |||||
{ | |||||
return ((pmap == pmap_kernel()) || | |||||
(pmap == curthread->td_proc->p_vmspace->vm_map.pmap)); | |||||
} | |||||
static __inline int | |||||
pmap_l3_valid(pt_entry_t l3) | pmap_l3_valid(pt_entry_t l3) | ||||
{ | { | ||||
return (l3 & PTE_V); | return (l3 & PTE_V); | ||||
} | } | ||||
static __inline int | |||||
pmap_l3_valid_cacheable(pt_entry_t l3) | |||||
{ | |||||
/* TODO */ | |||||
return (0); | |||||
} | |||||
#define PTE_SYNC(pte) cpu_dcache_wb_range((vm_offset_t)pte, sizeof(*pte)) | |||||
static inline int | static inline int | ||||
pmap_page_accessed(pt_entry_t pte) | pmap_page_accessed(pt_entry_t pte) | ||||
{ | { | ||||
return (pte & PTE_A); | return (pte & PTE_A); | ||||
} | } | ||||
/* Checks if the page is dirty. */ | /* Checks if the page is dirty. */ | ||||
▲ Show 20 Lines • Show All 109 Lines • ▼ Show 20 Lines | for (; va < DMAP_MAX_ADDRESS && pa < max_pa; | ||||
entry |= (pn << PTE_PPN0_S); | entry |= (pn << PTE_PPN0_S); | ||||
pmap_load_store(&l1[l1_slot], entry); | pmap_load_store(&l1[l1_slot], entry); | ||||
} | } | ||||
/* Set the upper limit of the DMAP region */ | /* Set the upper limit of the DMAP region */ | ||||
dmap_phys_max = pa; | dmap_phys_max = pa; | ||||
dmap_max_addr = va; | dmap_max_addr = va; | ||||
cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE); | sfence_vma(); | ||||
cpu_tlb_flushID(); | |||||
} | } | ||||
static vm_offset_t | static vm_offset_t | ||||
pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start) | pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start) | ||||
{ | { | ||||
vm_offset_t l2pt, l3pt; | vm_offset_t l3pt; | ||||
pt_entry_t entry; | pt_entry_t entry; | ||||
pd_entry_t *l2; | pd_entry_t *l2; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
u_int l2_slot; | u_int l2_slot; | ||||
pn_t pn; | pn_t pn; | ||||
KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address")); | KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address")); | ||||
l2 = pmap_l2(kernel_pmap, va); | l2 = pmap_l2(kernel_pmap, va); | ||||
l2 = (pd_entry_t *)((uintptr_t)l2 & ~(PAGE_SIZE - 1)); | l2 = (pd_entry_t *)((uintptr_t)l2 & ~(PAGE_SIZE - 1)); | ||||
l2pt = (vm_offset_t)l2; | |||||
l2_slot = pmap_l2_index(va); | l2_slot = pmap_l2_index(va); | ||||
l3pt = l3_start; | l3pt = l3_start; | ||||
for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) { | for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) { | ||||
KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); | KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); | ||||
pa = pmap_early_vtophys(l1pt, l3pt); | pa = pmap_early_vtophys(l1pt, l3pt); | ||||
pn = (pa / PAGE_SIZE); | pn = (pa / PAGE_SIZE); | ||||
entry = (PTE_V); | entry = (PTE_V); | ||||
entry |= (pn << PTE_PPN0_S); | entry |= (pn << PTE_PPN0_S); | ||||
pmap_load_store(&l2[l2_slot], entry); | pmap_load_store(&l2[l2_slot], entry); | ||||
l3pt += PAGE_SIZE; | l3pt += PAGE_SIZE; | ||||
} | } | ||||
/* Clean the L2 page table */ | /* Clean the L2 page table */ | ||||
memset((void *)l3_start, 0, l3pt - l3_start); | memset((void *)l3_start, 0, l3pt - l3_start); | ||||
cpu_dcache_wb_range(l3_start, l3pt - l3_start); | |||||
cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE); | |||||
return (l3pt); | return (l3pt); | ||||
} | } | ||||
/* | /* | ||||
* Bootstrap the system enough to run with virtual memory. | * Bootstrap the system enough to run with virtual memory. | ||||
*/ | */ | ||||
void | void | ||||
pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) | pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) | ||||
▲ Show 20 Lines • Show All 106 Lines • ▼ Show 20 Lines | pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) | ||||
freemempos = KERNBASE + kernlen; | freemempos = KERNBASE + kernlen; | ||||
freemempos = roundup2(freemempos, PAGE_SIZE); | freemempos = roundup2(freemempos, PAGE_SIZE); | ||||
/* Create the l3 tables for the early devmap */ | /* Create the l3 tables for the early devmap */ | ||||
freemempos = pmap_bootstrap_l3(l1pt, | freemempos = pmap_bootstrap_l3(l1pt, | ||||
VM_MAX_KERNEL_ADDRESS - L2_SIZE, freemempos); | VM_MAX_KERNEL_ADDRESS - L2_SIZE, freemempos); | ||||
cpu_tlb_flushID(); | sfence_vma(); | ||||
#define alloc_pages(var, np) \ | #define alloc_pages(var, np) \ | ||||
(var) = freemempos; \ | (var) = freemempos; \ | ||||
freemempos += (np * PAGE_SIZE); \ | freemempos += (np * PAGE_SIZE); \ | ||||
memset((char *)(var), 0, ((np) * PAGE_SIZE)); | memset((char *)(var), 0, ((np) * PAGE_SIZE)); | ||||
/* Allocate dynamic per-cpu area. */ | /* Allocate dynamic per-cpu area. */ | ||||
alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); | alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); | ||||
Show All 39 Lines | #define alloc_pages(var, np) \ | ||||
phys_avail[avail_slot + 1] = 0; | phys_avail[avail_slot + 1] = 0; | ||||
/* | /* | ||||
* Maxmem isn't the "maximum memory", it's one larger than the | * Maxmem isn't the "maximum memory", it's one larger than the | ||||
* highest page of the physical address space. It should be | * highest page of the physical address space. It should be | ||||
* called something like "Maxphyspage". | * called something like "Maxphyspage". | ||||
*/ | */ | ||||
Maxmem = atop(phys_avail[avail_slot - 1]); | Maxmem = atop(phys_avail[avail_slot - 1]); | ||||
cpu_tlb_flushID(); | |||||
} | } | ||||
/* | /* | ||||
* Initialize a vm_page's machine-dependent fields. | * Initialize a vm_page's machine-dependent fields. | ||||
*/ | */ | ||||
void | void | ||||
pmap_page_init(vm_page_t m) | pmap_page_init(vm_page_t m) | ||||
{ | { | ||||
Show All 19 Lines | pmap_init(void) | ||||
/* | /* | ||||
* Initialize the pool of pv list locks. | * Initialize the pool of pv list locks. | ||||
*/ | */ | ||||
for (i = 0; i < NPV_LIST_LOCKS; i++) | for (i = 0; i < NPV_LIST_LOCKS; i++) | ||||
rw_init(&pv_list_locks[i], "pmap pv list"); | rw_init(&pv_list_locks[i], "pmap pv list"); | ||||
} | } | ||||
#ifdef SMP | |||||
/* | /* | ||||
* Normal, non-SMP, invalidation functions. | * For SMP, these functions have to use IPIs for coherence. | ||||
* We inline these within pmap.c for speed. | * | ||||
* In general, the calling thread uses a plain fence to order the | |||||
* writes to the page tables before invoking an SBI callback to invoke | |||||
* sfence_vma() on remote CPUs. | |||||
* | |||||
* Since the riscv pmap does not yet have a pm_active field, IPIs are | |||||
* sent to all CPUs in the system. | |||||
*/ | */ | ||||
PMAP_INLINE void | static void | ||||
pmap_invalidate_page(pmap_t pmap, vm_offset_t va) | pmap_invalidate_page(pmap_t pmap, vm_offset_t va) | ||||
{ | { | ||||
cpuset_t mask; | |||||
/* TODO */ | |||||
sched_pin(); | sched_pin(); | ||||
__asm __volatile("sfence.vma %0" :: "r" (va) : "memory"); | mask = all_cpus; | ||||
CPU_CLR(PCPU_GET(cpuid), &mask); | |||||
fence(); | |||||
sbi_remote_sfence_vma(mask.__bits, va, 1); | |||||
sfence_vma_page(va); | |||||
sched_unpin(); | sched_unpin(); | ||||
} | } | ||||
PMAP_INLINE void | static void | ||||
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
cpuset_t mask; | |||||
/* TODO */ | |||||
sched_pin(); | sched_pin(); | ||||
__asm __volatile("sfence.vma"); | mask = all_cpus; | ||||
CPU_CLR(PCPU_GET(cpuid), &mask); | |||||
fence(); | |||||
sbi_remote_sfence_vma(mask.__bits, sva, eva - sva + 1); | |||||
/* | |||||
* Might consider a loop of sfence_vma_page() for a small | |||||
* number of pages in the future. | |||||
*/ | |||||
sfence_vma(); | |||||
sched_unpin(); | sched_unpin(); | ||||
} | } | ||||
PMAP_INLINE void | static void | ||||
pmap_invalidate_all(pmap_t pmap) | pmap_invalidate_all(pmap_t pmap) | ||||
{ | { | ||||
cpuset_t mask; | |||||
/* TODO */ | |||||
sched_pin(); | sched_pin(); | ||||
__asm __volatile("sfence.vma"); | mask = all_cpus; | ||||
CPU_CLR(PCPU_GET(cpuid), &mask); | |||||
fence(); | |||||
/* | |||||
* XXX: The SBI doc doesn't detail how to specify x0 as the | |||||
* address to perform a global fence. BBL currently treats | |||||
* all sfence_vma requests as global however. | |||||
*/ | |||||
sbi_remote_sfence_vma(mask.__bits, 0, 0); | |||||
sched_unpin(); | sched_unpin(); | ||||
} | } | ||||
#else | |||||
/* | |||||
* Normal, non-SMP, invalidation functions. | |||||
* We inline these within pmap.c for speed. | |||||
*/ | |||||
static __inline void | |||||
pmap_invalidate_page(pmap_t pmap, vm_offset_t va) | |||||
{ | |||||
sfence_vma_page(va); | |||||
} | |||||
static __inline void | |||||
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | |||||
{ | |||||
/* | /* | ||||
* Might consider a loop of sfence_vma_page() for a small | |||||
* number of pages in the future. | |||||
*/ | |||||
sfence_vma(); | |||||
} | |||||
static __inline void | |||||
pmap_invalidate_all(pmap_t pmap) | |||||
{ | |||||
sfence_vma(); | |||||
} | |||||
#endif | |||||
/* | |||||
* Routine: pmap_extract | * Routine: pmap_extract | ||||
* Function: | * Function: | ||||
* Extract the physical page address associated | * Extract the physical page address associated | ||||
* with the given map/virtual_address pair. | * with the given map/virtual_address pair. | ||||
*/ | */ | ||||
vm_paddr_t | vm_paddr_t | ||||
pmap_extract(pmap_t pmap, vm_offset_t va) | pmap_extract(pmap_t pmap, vm_offset_t va) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 114 Lines • ▼ Show 20 Lines | while (size != 0) { | ||||
l3 = pmap_l3(kernel_pmap, va); | l3 = pmap_l3(kernel_pmap, va); | ||||
KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va)); | KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va)); | ||||
pn = (pa / PAGE_SIZE); | pn = (pa / PAGE_SIZE); | ||||
entry = (PTE_V | PTE_RWX); | entry = (PTE_V | PTE_RWX); | ||||
entry |= (pn << PTE_PPN0_S); | entry |= (pn << PTE_PPN0_S); | ||||
pmap_load_store(l3, entry); | pmap_load_store(l3, entry); | ||||
PTE_SYNC(l3); | |||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
size -= PAGE_SIZE; | size -= PAGE_SIZE; | ||||
} | } | ||||
pmap_invalidate_range(kernel_pmap, sva, va); | pmap_invalidate_range(kernel_pmap, sva, va); | ||||
} | } | ||||
/* | /* | ||||
* Remove a page from the kernel pagetables. | * Remove a page from the kernel pagetables. | ||||
* Note: not SMP coherent. | * Note: not SMP coherent. | ||||
*/ | */ | ||||
PMAP_INLINE void | PMAP_INLINE void | ||||
pmap_kremove(vm_offset_t va) | pmap_kremove(vm_offset_t va) | ||||
{ | { | ||||
pt_entry_t *l3; | pt_entry_t *l3; | ||||
l3 = pmap_l3(kernel_pmap, va); | l3 = pmap_l3(kernel_pmap, va); | ||||
KASSERT(l3 != NULL, ("pmap_kremove: Invalid address")); | KASSERT(l3 != NULL, ("pmap_kremove: Invalid address")); | ||||
if (pmap_l3_valid_cacheable(pmap_load(l3))) | |||||
cpu_dcache_wb_range(va, L3_SIZE); | |||||
pmap_load_clear(l3); | pmap_load_clear(l3); | ||||
PTE_SYNC(l3); | |||||
pmap_invalidate_page(kernel_pmap, va); | sfence_vma(); | ||||
} | } | ||||
void | void | ||||
pmap_kremove_device(vm_offset_t sva, vm_size_t size) | pmap_kremove_device(vm_offset_t sva, vm_size_t size) | ||||
{ | { | ||||
pt_entry_t *l3; | pt_entry_t *l3; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
KASSERT((sva & L3_OFFSET) == 0, | KASSERT((sva & L3_OFFSET) == 0, | ||||
("pmap_kremove_device: Invalid virtual address")); | ("pmap_kremove_device: Invalid virtual address")); | ||||
KASSERT((size & PAGE_MASK) == 0, | KASSERT((size & PAGE_MASK) == 0, | ||||
("pmap_kremove_device: Mapping is not page-sized")); | ("pmap_kremove_device: Mapping is not page-sized")); | ||||
va = sva; | va = sva; | ||||
while (size != 0) { | while (size != 0) { | ||||
l3 = pmap_l3(kernel_pmap, va); | l3 = pmap_l3(kernel_pmap, va); | ||||
KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va)); | KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va)); | ||||
pmap_load_clear(l3); | pmap_load_clear(l3); | ||||
PTE_SYNC(l3); | |||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
size -= PAGE_SIZE; | size -= PAGE_SIZE; | ||||
} | } | ||||
pmap_invalidate_range(kernel_pmap, sva, va); | pmap_invalidate_range(kernel_pmap, sva, va); | ||||
} | } | ||||
/* | /* | ||||
* Used to map a range of physical addresses into kernel | * Used to map a range of physical addresses into kernel | ||||
* virtual address space. | * virtual address space. | ||||
* | * | ||||
* The value passed in '*virt' is a suggested virtual address for | * The value passed in '*virt' is a suggested virtual address for | ||||
Show All 37 Lines | for (i = 0; i < count; i++) { | ||||
pa = VM_PAGE_TO_PHYS(m); | pa = VM_PAGE_TO_PHYS(m); | ||||
pn = (pa / PAGE_SIZE); | pn = (pa / PAGE_SIZE); | ||||
l3 = pmap_l3(kernel_pmap, va); | l3 = pmap_l3(kernel_pmap, va); | ||||
entry = (PTE_V | PTE_RWX); | entry = (PTE_V | PTE_RWX); | ||||
entry |= (pn << PTE_PPN0_S); | entry |= (pn << PTE_PPN0_S); | ||||
pmap_load_store(l3, entry); | pmap_load_store(l3, entry); | ||||
PTE_SYNC(l3); | |||||
va += L3_SIZE; | va += L3_SIZE; | ||||
} | } | ||||
pmap_invalidate_range(kernel_pmap, sva, va); | pmap_invalidate_range(kernel_pmap, sva, va); | ||||
} | } | ||||
/* | /* | ||||
* This routine tears out page mappings from the | * This routine tears out page mappings from the | ||||
* kernel -- it is meant only for temporary mappings. | * kernel -- it is meant only for temporary mappings. | ||||
* Note: SMP coherent. Uses a ranged shootdown IPI. | * Note: SMP coherent. Uses a ranged shootdown IPI. | ||||
*/ | */ | ||||
void | void | ||||
pmap_qremove(vm_offset_t sva, int count) | pmap_qremove(vm_offset_t sva, int count) | ||||
{ | { | ||||
pt_entry_t *l3; | pt_entry_t *l3; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva)); | KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva)); | ||||
va = sva; | va = sva; | ||||
while (count-- > 0) { | while (count-- > 0) { | ||||
l3 = pmap_l3(kernel_pmap, va); | l3 = pmap_l3(kernel_pmap, va); | ||||
KASSERT(l3 != NULL, ("pmap_kremove: Invalid address")); | KASSERT(l3 != NULL, ("pmap_kremove: Invalid address")); | ||||
if (pmap_l3_valid_cacheable(pmap_load(l3))) | |||||
cpu_dcache_wb_range(va, L3_SIZE); | |||||
pmap_load_clear(l3); | pmap_load_clear(l3); | ||||
PTE_SYNC(l3); | |||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
pmap_invalidate_range(kernel_pmap, sva, va); | pmap_invalidate_range(kernel_pmap, sva, va); | ||||
} | } | ||||
/*************************************************** | /*************************************************** | ||||
* Page table page management routines..... | * Page table page management routines..... | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) | ||||
* unmap the page table page | * unmap the page table page | ||||
*/ | */ | ||||
if (m->pindex >= NUPDE) { | if (m->pindex >= NUPDE) { | ||||
/* PD page */ | /* PD page */ | ||||
pd_entry_t *l1; | pd_entry_t *l1; | ||||
l1 = pmap_l1(pmap, va); | l1 = pmap_l1(pmap, va); | ||||
pmap_load_clear(l1); | pmap_load_clear(l1); | ||||
pmap_distribute_l1(pmap, pmap_l1_index(va), 0); | pmap_distribute_l1(pmap, pmap_l1_index(va), 0); | ||||
PTE_SYNC(l1); | |||||
} else { | } else { | ||||
/* PTE page */ | /* PTE page */ | ||||
pd_entry_t *l2; | pd_entry_t *l2; | ||||
l2 = pmap_l2(pmap, va); | l2 = pmap_l2(pmap, va); | ||||
pmap_load_clear(l2); | pmap_load_clear(l2); | ||||
PTE_SYNC(l2); | |||||
} | } | ||||
pmap_resident_count_dec(pmap, 1); | pmap_resident_count_dec(pmap, 1); | ||||
if (m->pindex < NUPDE) { | if (m->pindex < NUPDE) { | ||||
pd_entry_t *l1; | pd_entry_t *l1; | ||||
/* We just released a PT, unhold the matching PD */ | /* We just released a PT, unhold the matching PD */ | ||||
vm_page_t pdpg; | vm_page_t pdpg; | ||||
l1 = pmap_l1(pmap, va); | l1 = pmap_l1(pmap, va); | ||||
▲ Show 20 Lines • Show All 129 Lines • ▼ Show 20 Lines | if (ptepindex >= NUPDE) { | ||||
l1index = ptepindex - NUPDE; | l1index = ptepindex - NUPDE; | ||||
l1 = &pmap->pm_l1[l1index]; | l1 = &pmap->pm_l1[l1index]; | ||||
pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE); | pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE); | ||||
entry = (PTE_V); | entry = (PTE_V); | ||||
entry |= (pn << PTE_PPN0_S); | entry |= (pn << PTE_PPN0_S); | ||||
pmap_load_store(l1, entry); | pmap_load_store(l1, entry); | ||||
pmap_distribute_l1(pmap, l1index, entry); | pmap_distribute_l1(pmap, l1index, entry); | ||||
PTE_SYNC(l1); | |||||
} else { | } else { | ||||
vm_pindex_t l1index; | vm_pindex_t l1index; | ||||
pd_entry_t *l1, *l2; | pd_entry_t *l1, *l2; | ||||
l1index = ptepindex >> (L1_SHIFT - L2_SHIFT); | l1index = ptepindex >> (L1_SHIFT - L2_SHIFT); | ||||
l1 = &pmap->pm_l1[l1index]; | l1 = &pmap->pm_l1[l1index]; | ||||
if (pmap_load(l1) == 0) { | if (pmap_load(l1) == 0) { | ||||
/* recurse for allocating page dir */ | /* recurse for allocating page dir */ | ||||
Show All 12 Lines | if (ptepindex >= NUPDE) { | ||||
phys = PTE_TO_PHYS(pmap_load(l1)); | phys = PTE_TO_PHYS(pmap_load(l1)); | ||||
l2 = (pd_entry_t *)PHYS_TO_DMAP(phys); | l2 = (pd_entry_t *)PHYS_TO_DMAP(phys); | ||||
l2 = &l2[ptepindex & Ln_ADDR_MASK]; | l2 = &l2[ptepindex & Ln_ADDR_MASK]; | ||||
pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE); | pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE); | ||||
entry = (PTE_V); | entry = (PTE_V); | ||||
entry |= (pn << PTE_PPN0_S); | entry |= (pn << PTE_PPN0_S); | ||||
pmap_load_store(l2, entry); | pmap_load_store(l2, entry); | ||||
PTE_SYNC(l2); | |||||
} | } | ||||
pmap_resident_count_inc(pmap, 1); | pmap_resident_count_inc(pmap, 1); | ||||
return (m); | return (m); | ||||
} | } | ||||
static vm_page_t | static vm_page_t | ||||
▲ Show 20 Lines • Show All 117 Lines • ▼ Show 20 Lines | if (pmap_load(l1) == 0) { | ||||
paddr = VM_PAGE_TO_PHYS(nkpg); | paddr = VM_PAGE_TO_PHYS(nkpg); | ||||
pn = (paddr / PAGE_SIZE); | pn = (paddr / PAGE_SIZE); | ||||
entry = (PTE_V); | entry = (PTE_V); | ||||
entry |= (pn << PTE_PPN0_S); | entry |= (pn << PTE_PPN0_S); | ||||
pmap_load_store(l1, entry); | pmap_load_store(l1, entry); | ||||
pmap_distribute_l1(kernel_pmap, | pmap_distribute_l1(kernel_pmap, | ||||
pmap_l1_index(kernel_vm_end), entry); | pmap_l1_index(kernel_vm_end), entry); | ||||
PTE_SYNC(l1); | |||||
continue; /* try again */ | continue; /* try again */ | ||||
} | } | ||||
l2 = pmap_l1_to_l2(l1, kernel_vm_end); | l2 = pmap_l1_to_l2(l1, kernel_vm_end); | ||||
if ((pmap_load(l2) & PTE_A) != 0) { | if ((pmap_load(l2) & PTE_A) != 0) { | ||||
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; | kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; | ||||
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { | if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { | ||||
kernel_vm_end = vm_map_max(kernel_map); | kernel_vm_end = vm_map_max(kernel_map); | ||||
break; | break; | ||||
Show All 11 Lines | while (kernel_vm_end < addr) { | ||||
} | } | ||||
paddr = VM_PAGE_TO_PHYS(nkpg); | paddr = VM_PAGE_TO_PHYS(nkpg); | ||||
pn = (paddr / PAGE_SIZE); | pn = (paddr / PAGE_SIZE); | ||||
entry = (PTE_V); | entry = (PTE_V); | ||||
entry |= (pn << PTE_PPN0_S); | entry |= (pn << PTE_PPN0_S); | ||||
pmap_load_store(l2, entry); | pmap_load_store(l2, entry); | ||||
PTE_SYNC(l2); | |||||
pmap_invalidate_page(kernel_pmap, kernel_vm_end); | pmap_invalidate_page(kernel_pmap, kernel_vm_end); | ||||
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; | kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; | ||||
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { | if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { | ||||
kernel_vm_end = vm_map_max(kernel_map); | kernel_vm_end = vm_map_max(kernel_map); | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
▲ Show 20 Lines • Show All 263 Lines • ▼ Show 20 Lines | |||||
pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va, | pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va, | ||||
pd_entry_t l2e, struct spglist *free, struct rwlock **lockp) | pd_entry_t l2e, struct spglist *free, struct rwlock **lockp) | ||||
{ | { | ||||
pt_entry_t old_l3; | pt_entry_t old_l3; | ||||
vm_paddr_t phys; | vm_paddr_t phys; | ||||
vm_page_t m; | vm_page_t m; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3))) | |||||
cpu_dcache_wb_range(va, L3_SIZE); | |||||
old_l3 = pmap_load_clear(l3); | old_l3 = pmap_load_clear(l3); | ||||
PTE_SYNC(l3); | |||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
if (old_l3 & PTE_SW_WIRED) | if (old_l3 & PTE_SW_WIRED) | ||||
pmap->pm_stats.wired_count -= 1; | pmap->pm_stats.wired_count -= 1; | ||||
pmap_resident_count_dec(pmap, 1); | pmap_resident_count_dec(pmap, 1); | ||||
if (old_l3 & PTE_SW_MANAGED) { | if (old_l3 & PTE_SW_MANAGED) { | ||||
phys = PTE_TO_PHYS(old_l3); | phys = PTE_TO_PHYS(old_l3); | ||||
m = PHYS_TO_VM_PAGE(phys); | m = PHYS_TO_VM_PAGE(phys); | ||||
if (pmap_page_dirty(old_l3)) | if (pmap_page_dirty(old_l3)) | ||||
▲ Show 20 Lines • Show All 139 Lines • ▼ Show 20 Lines | while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { | ||||
KASSERT(l2 != NULL, ("pmap_remove_all: no l2 table found")); | KASSERT(l2 != NULL, ("pmap_remove_all: no l2 table found")); | ||||
tl2 = pmap_load(l2); | tl2 = pmap_load(l2); | ||||
KASSERT((tl2 & PTE_RX) == 0, | KASSERT((tl2 & PTE_RX) == 0, | ||||
("pmap_remove_all: found a table when expecting " | ("pmap_remove_all: found a table when expecting " | ||||
"a block in %p's pv list", m)); | "a block in %p's pv list", m)); | ||||
l3 = pmap_l2_to_l3(l2, pv->pv_va); | l3 = pmap_l2_to_l3(l2, pv->pv_va); | ||||
if (pmap_is_current(pmap) && | |||||
pmap_l3_valid_cacheable(pmap_load(l3))) | |||||
cpu_dcache_wb_range(pv->pv_va, L3_SIZE); | |||||
tl3 = pmap_load_clear(l3); | tl3 = pmap_load_clear(l3); | ||||
PTE_SYNC(l3); | |||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va); | ||||
if (tl3 & PTE_SW_WIRED) | if (tl3 & PTE_SW_WIRED) | ||||
pmap->pm_stats.wired_count--; | pmap->pm_stats.wired_count--; | ||||
if ((tl3 & PTE_A) != 0) | if ((tl3 & PTE_A) != 0) | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
/* | /* | ||||
* Update the vm_page_t clean and reference bits. | * Update the vm_page_t clean and reference bits. | ||||
Show All 13 Lines | |||||
/* | /* | ||||
* Set the physical protection on the | * Set the physical protection on the | ||||
* specified range of this map as requested. | * specified range of this map as requested. | ||||
*/ | */ | ||||
void | void | ||||
pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) | pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) | ||||
{ | { | ||||
vm_offset_t va, va_next; | vm_offset_t va_next; | ||||
pd_entry_t *l1, *l2; | pd_entry_t *l1, *l2; | ||||
pt_entry_t *l3p, l3; | pt_entry_t *l3p, l3; | ||||
pt_entry_t entry; | pt_entry_t entry; | ||||
if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | ||||
pmap_remove(pmap, sva, eva); | pmap_remove(pmap, sva, eva); | ||||
return; | return; | ||||
} | } | ||||
Show All 22 Lines | for (; sva < eva; sva = va_next) { | ||||
if (pmap_load(l2) == 0) | if (pmap_load(l2) == 0) | ||||
continue; | continue; | ||||
if ((pmap_load(l2) & PTE_RX) != 0) | if ((pmap_load(l2) & PTE_RX) != 0) | ||||
continue; | continue; | ||||
if (va_next > eva) | if (va_next > eva) | ||||
va_next = eva; | va_next = eva; | ||||
va = va_next; | |||||
for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++, | for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++, | ||||
sva += L3_SIZE) { | sva += L3_SIZE) { | ||||
l3 = pmap_load(l3p); | l3 = pmap_load(l3p); | ||||
if (pmap_l3_valid(l3)) { | if (pmap_l3_valid(l3)) { | ||||
entry = pmap_load(l3p); | entry = pmap_load(l3p); | ||||
entry &= ~(PTE_W); | entry &= ~(PTE_W); | ||||
pmap_load_store(l3p, entry); | pmap_load_store(l3p, entry); | ||||
PTE_SYNC(l3p); | |||||
/* XXX: Use pmap_invalidate_range */ | /* XXX: Use pmap_invalidate_range */ | ||||
pmap_invalidate_page(pmap, sva); | pmap_invalidate_page(pmap, sva); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 81 Lines • ▼ Show 20 Lines | if (l3 == NULL) { | ||||
l2_pa = VM_PAGE_TO_PHYS(l2_m); | l2_pa = VM_PAGE_TO_PHYS(l2_m); | ||||
l2_pn = (l2_pa / PAGE_SIZE); | l2_pn = (l2_pa / PAGE_SIZE); | ||||
l1 = pmap_l1(pmap, va); | l1 = pmap_l1(pmap, va); | ||||
entry = (PTE_V); | entry = (PTE_V); | ||||
entry |= (l2_pn << PTE_PPN0_S); | entry |= (l2_pn << PTE_PPN0_S); | ||||
pmap_load_store(l1, entry); | pmap_load_store(l1, entry); | ||||
pmap_distribute_l1(pmap, pmap_l1_index(va), entry); | pmap_distribute_l1(pmap, pmap_l1_index(va), entry); | ||||
PTE_SYNC(l1); | |||||
l2 = pmap_l1_to_l2(l1, va); | l2 = pmap_l1_to_l2(l1, va); | ||||
} | } | ||||
KASSERT(l2 != NULL, | KASSERT(l2 != NULL, | ||||
("No l2 table after allocating one")); | ("No l2 table after allocating one")); | ||||
l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | | l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | | ||||
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); | ||||
if (l3_m == NULL) | if (l3_m == NULL) | ||||
panic("pmap_enter: l3 pte_m == NULL"); | panic("pmap_enter: l3 pte_m == NULL"); | ||||
if ((l3_m->flags & PG_ZERO) == 0) | if ((l3_m->flags & PG_ZERO) == 0) | ||||
pmap_zero_page(l3_m); | pmap_zero_page(l3_m); | ||||
l3_pa = VM_PAGE_TO_PHYS(l3_m); | l3_pa = VM_PAGE_TO_PHYS(l3_m); | ||||
l3_pn = (l3_pa / PAGE_SIZE); | l3_pn = (l3_pa / PAGE_SIZE); | ||||
entry = (PTE_V); | entry = (PTE_V); | ||||
entry |= (l3_pn << PTE_PPN0_S); | entry |= (l3_pn << PTE_PPN0_S); | ||||
pmap_load_store(l2, entry); | pmap_load_store(l2, entry); | ||||
PTE_SYNC(l2); | |||||
l3 = pmap_l2_to_l3(l2, va); | l3 = pmap_l2_to_l3(l2, va); | ||||
} | } | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
} | } | ||||
orig_l3 = pmap_load(l3); | orig_l3 = pmap_load(l3); | ||||
opa = PTE_TO_PHYS(orig_l3); | opa = PTE_TO_PHYS(orig_l3); | ||||
pv = NULL; | pv = NULL; | ||||
Show All 34 Lines | if (opa == pa) { | ||||
*/ | */ | ||||
if ((orig_l3 & PTE_SW_MANAGED) != 0) { | if ((orig_l3 & PTE_SW_MANAGED) != 0) { | ||||
if (pmap_is_write(new_l3)) | if (pmap_is_write(new_l3)) | ||||
vm_page_aflag_set(m, PGA_WRITEABLE); | vm_page_aflag_set(m, PGA_WRITEABLE); | ||||
} | } | ||||
goto validate; | goto validate; | ||||
} | } | ||||
/* Flush the cache, there might be uncommitted data in it */ | |||||
if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3)) | |||||
cpu_dcache_wb_range(va, L3_SIZE); | |||||
/* | /* | ||||
* The physical page has changed. Temporarily invalidate | * The physical page has changed. Temporarily invalidate | ||||
* the mapping. This ensures that all threads sharing the | * the mapping. This ensures that all threads sharing the | ||||
* pmap keep a consistent view of the mapping, which is | * pmap keep a consistent view of the mapping, which is | ||||
* necessary for the correct handling of COW faults. It | * necessary for the correct handling of COW faults. It | ||||
* also permits reuse of the old mapping's PV entry, | * also permits reuse of the old mapping's PV entry, | ||||
* avoiding an allocation. | * avoiding an allocation. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | if ((new_l3 & PTE_SW_MANAGED) != 0) { | ||||
} | } | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); | CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); | ||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
if (pmap_is_write(new_l3)) | if (pmap_is_write(new_l3)) | ||||
vm_page_aflag_set(m, PGA_WRITEABLE); | vm_page_aflag_set(m, PGA_WRITEABLE); | ||||
} | } | ||||
validate: | |||||
/* | /* | ||||
* Sync the i-cache on all harts before updating the PTE | |||||
* if the new PTE is executable. | |||||
*/ | |||||
if (prot & VM_PROT_EXECUTE) | |||||
pmap_sync_icache(pmap, va, PAGE_SIZE); | |||||
/* | |||||
* Update the L3 entry. | * Update the L3 entry. | ||||
*/ | */ | ||||
if (orig_l3 != 0) { | if (orig_l3 != 0) { | ||||
validate: | |||||
orig_l3 = pmap_load_store(l3, new_l3); | orig_l3 = pmap_load_store(l3, new_l3); | ||||
PTE_SYNC(l3); | pmap_invalidate_page(pmap, va); | ||||
KASSERT(PTE_TO_PHYS(orig_l3) == pa, | KASSERT(PTE_TO_PHYS(orig_l3) == pa, | ||||
("pmap_enter: invalid update")); | ("pmap_enter: invalid update")); | ||||
if (pmap_page_dirty(orig_l3) && | if (pmap_page_dirty(orig_l3) && | ||||
(orig_l3 & PTE_SW_MANAGED) != 0) | (orig_l3 & PTE_SW_MANAGED) != 0) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
} else { | } else { | ||||
pmap_load_store(l3, new_l3); | pmap_load_store(l3, new_l3); | ||||
PTE_SYNC(l3); | |||||
} | } | ||||
pmap_invalidate_page(pmap, va); | |||||
if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap)) | |||||
cpu_icache_sync_range(va, PAGE_SIZE); | |||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
rw_runlock(&pvh_global_lock); | rw_runlock(&pvh_global_lock); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 163 Lines • ▼ Show 20 Lines | pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
entry = (PTE_V | PTE_RWX); | entry = (PTE_V | PTE_RWX); | ||||
entry |= (pn << PTE_PPN0_S); | entry |= (pn << PTE_PPN0_S); | ||||
/* | /* | ||||
* Now validate mapping with RO protection | * Now validate mapping with RO protection | ||||
*/ | */ | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) | if ((m->oflags & VPO_UNMANAGED) == 0) | ||||
entry |= PTE_SW_MANAGED; | entry |= PTE_SW_MANAGED; | ||||
/* | |||||
* Sync the i-cache on all harts before updating the PTE | |||||
* if the new PTE is executable. | |||||
*/ | |||||
if (prot & VM_PROT_EXECUTE) | |||||
pmap_sync_icache(pmap, va, PAGE_SIZE); | |||||
pmap_load_store(l3, entry); | pmap_load_store(l3, entry); | ||||
PTE_SYNC(l3); | |||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
return (mpte); | return (mpte); | ||||
} | } | ||||
/* | /* | ||||
* This code maps large physical mmap regions into the | * This code maps large physical mmap regions into the | ||||
* processor address space. Note that some shortcuts | * processor address space. Note that some shortcuts | ||||
* are taken, but the code works. | * are taken, but the code works. | ||||
▲ Show 20 Lines • Show All 324 Lines • ▼ Show 20 Lines | */ | ||||
m, (uintmax_t)m->phys_addr, | m, (uintmax_t)m->phys_addr, | ||||
(uintmax_t)tl3)); | (uintmax_t)tl3)); | ||||
KASSERT((m->flags & PG_FICTITIOUS) != 0 || | KASSERT((m->flags & PG_FICTITIOUS) != 0 || | ||||
m < &vm_page_array[vm_page_array_size], | m < &vm_page_array[vm_page_array_size], | ||||
("pmap_remove_pages: bad l3 %#jx", | ("pmap_remove_pages: bad l3 %#jx", | ||||
(uintmax_t)tl3)); | (uintmax_t)tl3)); | ||||
if (pmap_is_current(pmap) && | |||||
pmap_l3_valid_cacheable(pmap_load(l3))) | |||||
cpu_dcache_wb_range(pv->pv_va, L3_SIZE); | |||||
pmap_load_clear(l3); | pmap_load_clear(l3); | ||||
PTE_SYNC(l3); | |||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va); | ||||
/* | /* | ||||
* Update the vm_page_t clean/reference bits. | * Update the vm_page_t clean/reference bits. | ||||
*/ | */ | ||||
if (pmap_page_dirty(tl3)) | if (pmap_page_dirty(tl3)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
▲ Show 20 Lines • Show All 457 Lines • ▼ Show 20 Lines | pmap_activate(struct thread *td) | ||||
reg = SATP_MODE_SV39; | reg = SATP_MODE_SV39; | ||||
reg |= (td->td_pcb->pcb_l1addr >> PAGE_SHIFT); | reg |= (td->td_pcb->pcb_l1addr >> PAGE_SHIFT); | ||||
__asm __volatile("csrw sptbr, %0" :: "r"(reg)); | __asm __volatile("csrw sptbr, %0" :: "r"(reg)); | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
critical_exit(); | critical_exit(); | ||||
} | } | ||||
static void | |||||
pmap_sync_icache_one(void *arg __unused) | |||||
{ | |||||
__asm __volatile("fence.i"); | |||||
} | |||||
void | void | ||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) | pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) | ||||
{ | { | ||||
cpuset_t mask; | |||||
/* | /* | ||||
* From the RISC-V User-Level ISA V2.2: | * From the RISC-V User-Level ISA V2.2: | ||||
* | * | ||||
* "To make a store to instruction memory visible to all | * "To make a store to instruction memory visible to all | ||||
* RISC-V harts, the writing hart has to execute a data FENCE | * RISC-V harts, the writing hart has to execute a data FENCE | ||||
* before requesting that all remote RISC-V harts execute a | * before requesting that all remote RISC-V harts execute a | ||||
* FENCE.I." | * FENCE.I." | ||||
*/ | */ | ||||
__asm __volatile("fence"); | sched_pin(); | ||||
smp_rendezvous(NULL, pmap_sync_icache_one, NULL, NULL); | mask = all_cpus; | ||||
CPU_CLR(PCPU_GET(cpuid), &mask); | |||||
fence(); | |||||
sbi_remote_fence_i(mask.__bits); | |||||
sched_unpin(); | |||||
} | } | ||||
/* | /* | ||||
* Increase the starting virtual address of the given mapping if a | * Increase the starting virtual address of the given mapping if a | ||||
* different alignment might result in more superpage mappings. | * different alignment might result in more superpage mappings. | ||||
*/ | */ | ||||
void | void | ||||
pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, | pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, | ||||
▲ Show 20 Lines • Show All 86 Lines • Show Last 20 Lines |