Changeset View
Changeset View
Standalone View
Standalone View
sys/i386/i386/pmap.c
Show First 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | |||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||||
* SUCH DAMAGE. | * SUCH DAMAGE. | ||||
* | * | ||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 | * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 | ||||
*/ | */ | ||||
/*- | /*- | ||||
* Copyright (c) 2003 Networks Associates Technology, Inc. | * Copyright (c) 2003 Networks Associates Technology, Inc. | ||||
* All rights reserved. | * All rights reserved. | ||||
* Copyright (c) 2018 The FreeBSD Foundation | |||||
* All rights reserved. | |||||
* | * | ||||
* This software was developed for the FreeBSD Project by Jake Burkholder, | * This software was developed for the FreeBSD Project by Jake Burkholder, | ||||
* Safeport Network Services, and Network Associates Laboratories, the | * Safeport Network Services, and Network Associates Laboratories, the | ||||
* Security Research Division of Network Associates, Inc. under | * Security Research Division of Network Associates, Inc. under | ||||
* DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA | * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA | ||||
* CHATS research program. | * CHATS research program. | ||||
* | * | ||||
* Portions of this software were developed by | |||||
* Konstantin Belousov <kib@FreeBSD.org> under sponsorship from | |||||
* the FreeBSD Foundation. | |||||
* | |||||
* Redistribution and use in source and binary forms, with or without | * Redistribution and use in source and binary forms, with or without | ||||
* modification, are permitted provided that the following conditions | * modification, are permitted provided that the following conditions | ||||
* are met: | * are met: | ||||
* 1. Redistributions of source code must retain the above copyright | * 1. Redistributions of source code must retain the above copyright | ||||
* notice, this list of conditions and the following disclaimer. | * notice, this list of conditions and the following disclaimer. | ||||
* 2. Redistributions in binary form must reproduce the above copyright | * 2. Redistributions in binary form must reproduce the above copyright | ||||
* notice, this list of conditions and the following disclaimer in the | * notice, this list of conditions and the following disclaimer in the | ||||
* documentation and/or other materials provided with the distribution. | * documentation and/or other materials provided with the distribution. | ||||
▲ Show 20 Lines • Show All 51 Lines • ▼ Show 20 Lines | |||||
#include <sys/proc.h> | #include <sys/proc.h> | ||||
#include <sys/rwlock.h> | #include <sys/rwlock.h> | ||||
#include <sys/sf_buf.h> | #include <sys/sf_buf.h> | ||||
#include <sys/sx.h> | #include <sys/sx.h> | ||||
#include <sys/vmmeter.h> | #include <sys/vmmeter.h> | ||||
#include <sys/sched.h> | #include <sys/sched.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/vmem.h> | |||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
#include <vm/vm_param.h> | #include <vm/vm_param.h> | ||||
#include <vm/vm_kern.h> | #include <vm/vm_kern.h> | ||||
#include <vm/vm_page.h> | #include <vm/vm_page.h> | ||||
#include <vm/vm_map.h> | #include <vm/vm_map.h> | ||||
#include <vm/vm_object.h> | #include <vm/vm_object.h> | ||||
#include <vm/vm_extern.h> | #include <vm/vm_extern.h> | ||||
#include <vm/vm_pageout.h> | #include <vm/vm_pageout.h> | ||||
#include <vm/vm_pager.h> | #include <vm/vm_pager.h> | ||||
#include <vm/vm_phys.h> | #include <vm/vm_phys.h> | ||||
#include <vm/vm_radix.h> | #include <vm/vm_radix.h> | ||||
#include <vm/vm_reserv.h> | #include <vm/vm_reserv.h> | ||||
#include <vm/uma.h> | #include <vm/uma.h> | ||||
#ifdef DEV_APIC | #ifdef DEV_APIC | ||||
#include <sys/bus.h> | #include <sys/bus.h> | ||||
#include <machine/intr_machdep.h> | #include <machine/intr_machdep.h> | ||||
#include <x86/apicvar.h> | #include <x86/apicvar.h> | ||||
#endif | #endif | ||||
#include <machine/bootinfo.h> | |||||
#include <machine/cpu.h> | #include <machine/cpu.h> | ||||
#include <machine/cputypes.h> | #include <machine/cputypes.h> | ||||
#include <machine/md_var.h> | #include <machine/md_var.h> | ||||
#include <machine/pcb.h> | #include <machine/pcb.h> | ||||
#include <machine/specialreg.h> | #include <machine/specialreg.h> | ||||
#ifdef SMP | #ifdef SMP | ||||
#include <machine/smp.h> | #include <machine/smp.h> | ||||
#endif | #endif | ||||
Show All 33 Lines | |||||
#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) | #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) | ||||
#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) | #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) | ||||
#define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ | #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ | ||||
atomic_clear_int((u_int *)(pte), PG_W)) | atomic_clear_int((u_int *)(pte), PG_W)) | ||||
#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) | #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) | ||||
struct pmap kernel_pmap_store; | struct pmap kernel_pmap_store; | ||||
LIST_HEAD(pmaplist, pmap); | |||||
static struct pmaplist allpmaps; | |||||
static struct mtx allpmaps_lock; | |||||
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ | vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ | ||||
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ | vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ | ||||
int pgeflag = 0; /* PG_G or-in */ | int pgeflag = 0; /* PG_G or-in */ | ||||
int pseflag = 0; /* PG_PS or-in */ | int pseflag = 0; /* PG_PS or-in */ | ||||
static int nkpt = NKPT; | static int nkpt = NKPT; | ||||
vm_offset_t kernel_vm_end = KERNBASE + NKPT * NBPDR; | vm_offset_t kernel_vm_end = /* 0 + */ NKPT * NBPDR; | ||||
extern u_int32_t KERNend; | |||||
extern u_int32_t KPTphys; | |||||
#if defined(PAE) || defined(PAE_TABLES) | #if defined(PAE) || defined(PAE_TABLES) | ||||
pt_entry_t pg_nx; | pt_entry_t pg_nx; | ||||
static uma_zone_t pdptzone; | static uma_zone_t pdptzone; | ||||
#endif | #endif | ||||
static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); | static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); | ||||
▲ Show 20 Lines • Show All 124 Lines • ▼ Show 20 Lines | |||||
static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free); | static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free); | ||||
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); | static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); | ||||
static void pmap_pte_release(pt_entry_t *pte); | static void pmap_pte_release(pt_entry_t *pte); | ||||
static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *); | static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *); | ||||
#if defined(PAE) || defined(PAE_TABLES) | #if defined(PAE) || defined(PAE_TABLES) | ||||
static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, | static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, | ||||
uint8_t *flags, int wait); | uint8_t *flags, int wait); | ||||
#endif | #endif | ||||
static void pmap_set_pg(void); | static void pmap_init_trm(void); | ||||
static __inline void pagezero(void *page); | static __inline void pagezero(void *page); | ||||
CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); | CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); | ||||
CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); | CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); | ||||
void pmap_cold(void); | |||||
extern char _end[]; | |||||
u_long physfree; /* phys addr of next free page */ | |||||
u_long vm86phystk; /* PA of vm86/bios stack */ | |||||
u_long vm86paddr; /* address of vm86 region */ | |||||
int vm86pa; /* phys addr of vm86 region */ | |||||
u_long KERNend; /* phys addr end of kernel (just after bss) */ | |||||
pd_entry_t *IdlePTD; /* phys addr of kernel PTD */ | |||||
#if defined(PAE) || defined(PAE_TABLES) | |||||
pdpt_entry_t *IdlePDPT; /* phys addr of kernel PDPT */ | |||||
#endif | |||||
pt_entry_t *KPTmap; /* address of kernel page tables */ | |||||
u_long KPTphys; /* phys addr of kernel page tables */ | |||||
static u_long | |||||
allocpages(u_int cnt, u_long *physfree) | |||||
{ | |||||
u_long res; | |||||
res = *physfree; | |||||
*physfree += PAGE_SIZE * cnt; | |||||
bzero((void *)res, PAGE_SIZE * cnt); | |||||
return (res); | |||||
} | |||||
static void | |||||
pmap_cold_map(u_long pa, u_long va, u_long cnt) | |||||
{ | |||||
pt_entry_t *pt; | |||||
for (pt = (pt_entry_t *)KPTphys + atop(va); cnt > 0; | |||||
cnt--, pt++, va += PAGE_SIZE, pa += PAGE_SIZE) | |||||
*pt = pa | PG_V | PG_RW | PG_A | PG_M; | |||||
} | |||||
static void | |||||
pmap_cold_mapident(u_long pa, u_long cnt) | |||||
{ | |||||
pmap_cold_map(pa, pa, cnt); | |||||
} | |||||
_Static_assert(2 * NBPDR == KERNBASE, "Broken double-map of zero PTD"); | |||||
/* | /* | ||||
* If you get an error here, then you set KVA_PAGES wrong! See the | * Called from locore.s before paging is enabled. Sets up the first | ||||
* description of KVA_PAGES in sys/i386/include/pmap.h. It must be | * kernel page table. Since kernel is mapped with PA == VA, this code | ||||
* multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. | * does not require relocations. | ||||
*/ | */ | ||||
CTASSERT(KERNBASE % (1 << 24) == 0); | void | ||||
pmap_cold(void) | |||||
{ | |||||
pt_entry_t *pt; | |||||
u_long a; | |||||
u_int cr3, ncr4; | |||||
physfree = (u_long)&_end; | |||||
if (bootinfo.bi_esymtab != 0) | |||||
physfree = bootinfo.bi_esymtab; | |||||
if (bootinfo.bi_kernend != 0) | |||||
physfree = bootinfo.bi_kernend; | |||||
physfree = roundup2(physfree, NBPDR); | |||||
KERNend = physfree; | |||||
/* Allocate Kernel Page Tables */ | |||||
KPTphys = allocpages(NKPT, &physfree); | |||||
KPTmap = (pt_entry_t *)KPTphys; | |||||
/* Allocate Page Table Directory */ | |||||
#if defined(PAE) || defined(PAE_TABLES) | |||||
/* XXX only need 32 bytes (easier for now) */ | |||||
IdlePDPT = (pdpt_entry_t *)allocpages(1, &physfree); | |||||
#endif | |||||
IdlePTD = (pd_entry_t *)allocpages(NPGPTD, &physfree); | |||||
/* | /* | ||||
* Allocate KSTACK. Leave a guard page between IdlePTD and | |||||
* proc0kstack, to control stack overflow for thread0 and | |||||
* prevent corruption of the page table. We leak the guard | |||||
* physical memory due to 1:1 mappings. | |||||
*/ | |||||
allocpages(1, &physfree); | |||||
proc0kstack = allocpages(TD0_KSTACK_PAGES, &physfree); | |||||
/* vm86/bios stack */ | |||||
vm86phystk = allocpages(1, &physfree); | |||||
/* pgtable + ext + IOPAGES */ | |||||
vm86paddr = vm86pa = allocpages(3, &physfree); | |||||
/* Install page tables into PTD. Page table page 1 is wasted. */ | |||||
for (a = 0; a < NKPT; a++) | |||||
IdlePTD[a] = (KPTphys + ptoa(a)) | PG_V | PG_RW | PG_A | PG_M; | |||||
#if defined(PAE) || defined(PAE_TABLES) | |||||
/* PAE install PTD pointers into PDPT */ | |||||
for (a = 0; a < NPGPTD; a++) | |||||
IdlePDPT[a] = ((u_int)IdlePTD + ptoa(a)) | PG_V; | |||||
#endif | |||||
/* | |||||
* Install recursive mapping for kernel page tables into | |||||
* itself. | |||||
*/ | |||||
for (a = 0; a < NPGPTD; a++) | |||||
IdlePTD[PTDPTDI + a] = ((u_int)IdlePTD + ptoa(a)) | PG_V | | |||||
PG_RW; | |||||
/* | |||||
* Initialize page table pages mapping physical address zero | |||||
* through the (physical) end of the kernel. Many of these | |||||
* pages must be reserved, and we reserve them all and map | |||||
* them linearly for convenience. We do this even if we've | |||||
* enabled PSE above; we'll just switch the corresponding | |||||
* kernel PDEs before we turn on paging. | |||||
* | |||||
* This and all other page table entries allow read and write | |||||
* access for various reasons. Kernel mappings never have any | |||||
* access restrictions. | |||||
*/ | |||||
pmap_cold_mapident(0, atop(NBPDR)); | |||||
pmap_cold_map(0, NBPDR, atop(NBPDR)); | |||||
pmap_cold_mapident(KERNBASE, atop(KERNend - KERNBASE)); | |||||
/* Map page table directory */ | |||||
#if defined(PAE) || defined(PAE_TABLES) | |||||
pmap_cold_mapident((u_long)IdlePDPT, 1); | |||||
#endif | |||||
pmap_cold_mapident((u_long)IdlePTD, NPGPTD); | |||||
/* Map proc0kstack */ | |||||
pmap_cold_mapident(proc0kstack, TD0_KSTACK_PAGES); | |||||
/* ISA hole already mapped */ | |||||
pmap_cold_mapident(vm86phystk, 1); | |||||
pmap_cold_mapident(vm86pa, 3); | |||||
/* Map page 0 into the vm86 page table */ | |||||
*(pt_entry_t *)vm86pa = 0 | PG_RW | PG_U | PG_A | PG_M | PG_V; | |||||
/* ...likewise for the ISA hole for vm86 */ | |||||
for (pt = (pt_entry_t *)vm86pa + atop(ISA_HOLE_START), a = 0; | |||||
a < atop(ISA_HOLE_LENGTH); a++, pt++) | |||||
*pt = (ISA_HOLE_START + ptoa(a)) | PG_RW | PG_U | PG_A | | |||||
PG_M | PG_V; | |||||
/* Enable PSE, PGE, VME, and PAE if configured. */ | |||||
ncr4 = 0; | |||||
if ((cpu_feature & CPUID_PSE) != 0) { | |||||
ncr4 |= CR4_PSE; | |||||
/* | |||||
* Superpage mapping of the kernel text. Existing 4k | |||||
* page table pages are wasted. | |||||
*/ | |||||
for (a = KERNBASE; a < KERNend; a += NBPDR) | |||||
IdlePTD[a >> PDRSHIFT] = a | PG_PS | PG_A | PG_M | | |||||
PG_RW | PG_V; | |||||
} | |||||
if ((cpu_feature & CPUID_PGE) != 0) { | |||||
ncr4 |= CR4_PGE; | |||||
pgeflag = PG_G; | |||||
} | |||||
ncr4 |= (cpu_feature & CPUID_VME) != 0 ? CR4_VME : 0; | |||||
#if defined(PAE) || defined(PAE_TABLES) | |||||
ncr4 |= CR4_PAE; | |||||
#endif | |||||
if (ncr4 != 0) | |||||
load_cr4(rcr4() | ncr4); | |||||
/* Now enable paging */ | |||||
#if defined(PAE) || defined(PAE_TABLES) | |||||
cr3 = (u_int)IdlePDPT; | |||||
#else | |||||
cr3 = (u_int)IdlePTD; | |||||
#endif | |||||
load_cr3(cr3); | |||||
load_cr0(rcr0() | CR0_PG); | |||||
/* | |||||
* Now running relocated at KERNBASE where the system is | |||||
* linked to run. | |||||
*/ | |||||
/* | |||||
* Remove the lowest part of the double mapping of low memory | |||||
* to get some null pointer checks. | |||||
*/ | |||||
IdlePTD[0] = 0; | |||||
load_cr3(cr3); /* invalidate TLB */ | |||||
} | |||||
/* | |||||
* Bootstrap the system enough to run with virtual memory. | * Bootstrap the system enough to run with virtual memory. | ||||
* | * | ||||
* On the i386 this is called after mapping has already been enabled | * On the i386 this is called after mapping has already been enabled | ||||
* in locore.s with the page table created in pmap_cold(), | |||||
* and just syncs the pmap module with what has already been done. | * and just syncs the pmap module with what has already been done. | ||||
* [We can't call it easily with mapping off since the kernel is not | |||||
* mapped with PA == VA, hence we would have to relocate every address | |||||
* from the linked base (virtual) address "KERNBASE" to the actual | |||||
* (physical) address starting relative to 0] | |||||
*/ | */ | ||||
void | void | ||||
pmap_bootstrap(vm_paddr_t firstaddr) | pmap_bootstrap(vm_paddr_t firstaddr) | ||||
{ | { | ||||
vm_offset_t va; | vm_offset_t va; | ||||
pt_entry_t *pte, *unused; | pt_entry_t *pte, *unused; | ||||
struct pcpu *pc; | struct pcpu *pc; | ||||
int i; | int i; | ||||
Show All 9 Lines | pmap_bootstrap(vm_paddr_t firstaddr) | ||||
/* | /* | ||||
* Initialize the first available kernel virtual address. However, | * Initialize the first available kernel virtual address. However, | ||||
* using "firstaddr" may waste a few pages of the kernel virtual | * using "firstaddr" may waste a few pages of the kernel virtual | ||||
* address space, because locore may not have mapped every physical | * address space, because locore may not have mapped every physical | ||||
* page that it allocated. Preferably, locore would provide a first | * page that it allocated. Preferably, locore would provide a first | ||||
* unused virtual address in addition to "firstaddr". | * unused virtual address in addition to "firstaddr". | ||||
*/ | */ | ||||
virtual_avail = (vm_offset_t) KERNBASE + firstaddr; | virtual_avail = (vm_offset_t)firstaddr; | ||||
virtual_end = VM_MAX_KERNEL_ADDRESS; | virtual_end = VM_MAX_KERNEL_ADDRESS; | ||||
/* | /* | ||||
* Initialize the kernel pmap (which is statically allocated). | * Initialize the kernel pmap (which is statically allocated). | ||||
*/ | */ | ||||
PMAP_LOCK_INIT(kernel_pmap); | PMAP_LOCK_INIT(kernel_pmap); | ||||
kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); | kernel_pmap->pm_pdir = IdlePTD; | ||||
#if defined(PAE) || defined(PAE_TABLES) | #if defined(PAE) || defined(PAE_TABLES) | ||||
kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); | kernel_pmap->pm_pdpt = IdlePDPT; | ||||
#endif | #endif | ||||
CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ | CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ | ||||
TAILQ_INIT(&kernel_pmap->pm_pvchunk); | TAILQ_INIT(&kernel_pmap->pm_pvchunk); | ||||
/* | /* | ||||
* Initialize the global pv list lock. | * Initialize the global pv list lock. | ||||
*/ | */ | ||||
rw_init(&pvh_global_lock, "pmap pv global"); | rw_init(&pvh_global_lock, "pmap pv global"); | ||||
LIST_INIT(&allpmaps); | |||||
/* | /* | ||||
* Request a spin mutex so that changes to allpmaps cannot be | |||||
* preempted by smp_rendezvous_cpus(). Otherwise, | |||||
* pmap_update_pde_kernel() could access allpmaps while it is | |||||
* being changed. | |||||
*/ | |||||
mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); | |||||
mtx_lock_spin(&allpmaps_lock); | |||||
LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); | |||||
mtx_unlock_spin(&allpmaps_lock); | |||||
/* | |||||
* Reserve some special page table entries/VA space for temporary | * Reserve some special page table entries/VA space for temporary | ||||
* mapping of pages. | * mapping of pages. | ||||
*/ | */ | ||||
#define SYSMAP(c, p, v, n) \ | #define SYSMAP(c, p, v, n) \ | ||||
v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); | v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); | ||||
va = virtual_avail; | va = virtual_avail; | ||||
pte = vtopte(va); | pte = vtopte(va); | ||||
Show All 33 Lines | #define SYSMAP(c, p, v, n) \ | ||||
* | * | ||||
* KPTmap is first initialized by locore. However, that initial | * KPTmap is first initialized by locore. However, that initial | ||||
* KPTmap can only support NKPT page table pages. Here, a larger | * KPTmap can only support NKPT page table pages. Here, a larger | ||||
* KPTmap is created that can support KVA_PAGES page table pages. | * KPTmap is created that can support KVA_PAGES page table pages. | ||||
*/ | */ | ||||
SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES) | SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES) | ||||
for (i = 0; i < NKPT; i++) | for (i = 0; i < NKPT; i++) | ||||
KPTD[i] = (KPTphys + (i << PAGE_SHIFT)) | pgeflag | PG_RW | PG_V; | KPTD[i] = (KPTphys + ptoa(i)) | PG_RW | PG_V; | ||||
/* | /* | ||||
* Adjust the start of the KPTD and KPTmap so that the implementation | |||||
* of pmap_kextract() and pmap_growkernel() can be made simpler. | |||||
*/ | |||||
KPTD -= KPTDI; | |||||
KPTmap -= i386_btop(KPTDI << PDRSHIFT); | |||||
/* | |||||
* PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(), | * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(), | ||||
* respectively. | * respectively. | ||||
*/ | */ | ||||
SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) | SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) | ||||
SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) | SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) | ||||
mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); | mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); | ||||
virtual_avail = va; | virtual_avail = va; | ||||
/* | /* | ||||
* Finish removing the identity mapping (virt == phys) of low memory. | |||||
* It was only used for 2 instructions in locore. locore then | |||||
* unmapped the first PTD to get some null pointer checks. ACPI | |||||
* wakeup will map the first PTD transiently to use it for 1 | |||||
* instruction. The double mapping for low memory is not usable in | |||||
* normal operation since it breaks trapping of null pointers and | |||||
* causes inconsistencies in page tables when combined with PG_G. | |||||
*/ | |||||
for (i = 1; i < NKPT; i++) | |||||
PTD[i] = 0; | |||||
/* | |||||
* Initialize the PAT MSR if present. | * Initialize the PAT MSR if present. | ||||
* pmap_init_pat() clears and sets CR4_PGE, which, as a | * pmap_init_pat() clears and sets CR4_PGE, which, as a | ||||
* side-effect, invalidates stale PG_G TLB entries that might | * side-effect, invalidates stale PG_G TLB entries that might | ||||
* have been created in our pre-boot environment. We assume | * have been created in our pre-boot environment. We assume | ||||
* that PAT support implies PGE and in reverse, PGE presence | * that PAT support implies PGE and in reverse, PGE presence | ||||
* comes with PAT. Both features were added for Pentium Pro. | * comes with PAT. Both features were added for Pentium Pro. | ||||
*/ | */ | ||||
pmap_init_pat(); | pmap_init_pat(); | ||||
/* Turn on PG_G on kernel page(s) */ | |||||
pmap_set_pg(); | |||||
} | } | ||||
static void | static void | ||||
pmap_init_reserved_pages(void) | pmap_init_reserved_pages(void) | ||||
{ | { | ||||
struct pcpu *pc; | struct pcpu *pc; | ||||
vm_offset_t pages; | vm_offset_t pages; | ||||
int i; | int i; | ||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
pc = pcpu_find(i); | pc = pcpu_find(i); | ||||
mtx_init(&pc->pc_copyout_mlock, "cpmlk", NULL, MTX_DEF | | |||||
MTX_NEW); | |||||
pc->pc_copyout_maddr = kva_alloc(ptoa(2)); | |||||
if (pc->pc_copyout_maddr == 0) | |||||
panic("unable to allocate non-sleepable copyout KVA"); | |||||
sx_init(&pc->pc_copyout_slock, "cpslk"); | |||||
pc->pc_copyout_saddr = kva_alloc(ptoa(2)); | |||||
if (pc->pc_copyout_saddr == 0) | |||||
panic("unable to allocate sleepable copyout KVA"); | |||||
/* | /* | ||||
* Skip if the mapping has already been initialized, | * Skip if the mappings have already been initialized, | ||||
* i.e. this is the BSP. | * i.e. this is the BSP. | ||||
*/ | */ | ||||
if (pc->pc_cmap_addr1 != 0) | if (pc->pc_cmap_addr1 != 0) | ||||
continue; | continue; | ||||
mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); | mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); | ||||
pages = kva_alloc(PAGE_SIZE * 3); | pages = kva_alloc(PAGE_SIZE * 3); | ||||
if (pages == 0) | if (pages == 0) | ||||
panic("%s: unable to allocate KVA", __func__); | panic("unable to allocate CMAP KVA"); | ||||
pc->pc_cmap_pte1 = vtopte(pages); | pc->pc_cmap_pte1 = vtopte(pages); | ||||
pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE); | pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE); | ||||
pc->pc_cmap_addr1 = (caddr_t)pages; | pc->pc_cmap_addr1 = (caddr_t)pages; | ||||
pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE); | pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE); | ||||
pc->pc_qmap_addr = pages + (PAGE_SIZE * 2); | pc->pc_qmap_addr = pages + atop(2); | ||||
} | } | ||||
} | } | ||||
SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); | SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); | ||||
/* | /* | ||||
* Setup the PAT MSR. | * Setup the PAT MSR. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 94 Lines • ▼ Show 20 Lines | pmap_init_pat(void) | ||||
invltlb(); | invltlb(); | ||||
/* Restore caches and PGE. */ | /* Restore caches and PGE. */ | ||||
load_cr0(cr0); | load_cr0(cr0); | ||||
load_cr4(cr4); | load_cr4(cr4); | ||||
} | } | ||||
/* | /* | ||||
* Set PG_G on kernel pages. Only the BSP calls this when SMP is turned on. | |||||
*/ | |||||
static void | |||||
pmap_set_pg(void) | |||||
{ | |||||
pt_entry_t *pte; | |||||
vm_offset_t va, endva; | |||||
if (pgeflag == 0) | |||||
return; | |||||
endva = KERNBASE + KERNend; | |||||
if (pseflag) { | |||||
va = KERNBASE + roundup2(KERNLOAD, NBPDR); | |||||
while (va < endva) { | |||||
pdir_pde(PTD, va) |= pgeflag; | |||||
invltlb(); /* Flush non-PG_G entries. */ | |||||
va += NBPDR; | |||||
} | |||||
} else { | |||||
va = (vm_offset_t)btext; | |||||
while (va < endva) { | |||||
pte = vtopte(va); | |||||
if (*pte) | |||||
*pte |= pgeflag; | |||||
invltlb(); /* Flush non-PG_G entries. */ | |||||
va += PAGE_SIZE; | |||||
} | |||||
} | |||||
} | |||||
/* | |||||
* Initialize a vm_page's machine-dependent fields. | * Initialize a vm_page's machine-dependent fields. | ||||
*/ | */ | ||||
void | void | ||||
pmap_page_init(vm_page_t m) | pmap_page_init(vm_page_t m) | ||||
{ | { | ||||
TAILQ_INIT(&m->md.pv_list); | TAILQ_INIT(&m->md.pv_list); | ||||
m->md.pat_mode = PAT_WRITE_BACK; | m->md.pat_mode = PAT_WRITE_BACK; | ||||
▲ Show 20 Lines • Show All 80 Lines • ▼ Show 20 Lines | pmap_init(void) | ||||
vm_size_t s; | vm_size_t s; | ||||
int i, pv_npg; | int i, pv_npg; | ||||
/* | /* | ||||
* Initialize the vm page array entries for the kernel pmap's | * Initialize the vm page array entries for the kernel pmap's | ||||
* page table pages. | * page table pages. | ||||
*/ | */ | ||||
for (i = 0; i < NKPT; i++) { | for (i = 0; i < NKPT; i++) { | ||||
mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT)); | mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i)); | ||||
KASSERT(mpte >= vm_page_array && | KASSERT(mpte >= vm_page_array && | ||||
mpte < &vm_page_array[vm_page_array_size], | mpte < &vm_page_array[vm_page_array_size], | ||||
("pmap_init: page table page is out of range")); | ("pmap_init: page table page is out of range")); | ||||
mpte->pindex = i + KPTDI; | mpte->pindex = i + KPTDI; | ||||
mpte->phys_addr = KPTphys + (i << PAGE_SHIFT); | mpte->phys_addr = KPTphys + ptoa(i); | ||||
} | } | ||||
/* | /* | ||||
* Initialize the address space (zone) for the pv entries. Set a | * Initialize the address space (zone) for the pv entries. Set a | ||||
* high water mark so that the system can recover from excessive | * high water mark so that the system can recover from excessive | ||||
* numbers of pv entries. | * numbers of pv entries. | ||||
*/ | */ | ||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); | TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | |||||
#if defined(PAE) || defined(PAE_TABLES) | #if defined(PAE) || defined(PAE_TABLES) | ||||
pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, | pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, | ||||
NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, | NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, | ||||
UMA_ZONE_VM | UMA_ZONE_NOFREE); | UMA_ZONE_VM | UMA_ZONE_NOFREE); | ||||
uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); | uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); | ||||
#endif | #endif | ||||
pmap_initialized = 1; | pmap_initialized = 1; | ||||
pmap_init_trm(); | |||||
if (!bootverbose) | if (!bootverbose) | ||||
return; | return; | ||||
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { | for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { | ||||
ppim = pmap_preinit_mapping + i; | ppim = pmap_preinit_mapping + i; | ||||
if (ppim->va == 0) | if (ppim->va == 0) | ||||
continue; | continue; | ||||
printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i, | printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i, | ||||
(uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode); | (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode); | ||||
} | } | ||||
} | } | ||||
SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, | SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, | ||||
"Max number of PV entries"); | "Max number of PV entries"); | ||||
SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, | SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, | ||||
"Page share factor per proc"); | "Page share factor per proc"); | ||||
▲ Show 20 Lines • Show All 51 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* The caller is responsible for maintaining TLB consistency. | * The caller is responsible for maintaining TLB consistency. | ||||
*/ | */ | ||||
static void | static void | ||||
pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde) | pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde) | ||||
{ | { | ||||
pd_entry_t *pde; | pd_entry_t *pde; | ||||
pmap_t pmap; | |||||
boolean_t PTD_updated; | |||||
PTD_updated = FALSE; | pde = pmap_pde(kernel_pmap, va); | ||||
mtx_lock_spin(&allpmaps_lock); | |||||
LIST_FOREACH(pmap, &allpmaps, pm_list) { | |||||
if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & | |||||
PG_FRAME)) | |||||
PTD_updated = TRUE; | |||||
pde = pmap_pde(pmap, va); | |||||
pde_store(pde, newpde); | pde_store(pde, newpde); | ||||
} | } | ||||
mtx_unlock_spin(&allpmaps_lock); | |||||
KASSERT(PTD_updated, | |||||
("pmap_kenter_pde: current page table is not in allpmaps")); | |||||
} | |||||
/* | /* | ||||
* After changing the page size for the specified virtual address in the page | * After changing the page size for the specified virtual address in the page | ||||
* table, flush the corresponding entries from the processor's TLB. Only the | * table, flush the corresponding entries from the processor's TLB. Only the | ||||
* calling processor's TLB is affected. | * calling processor's TLB is affected. | ||||
* | * | ||||
* The calling thread must be pinned to a processor. | * The calling thread must be pinned to a processor. | ||||
*/ | */ | ||||
static void | static void | ||||
pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde) | pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde) | ||||
{ | { | ||||
u_long cr4; | |||||
if ((newpde & PG_PS) == 0) | if ((newpde & PG_PS) == 0) | ||||
/* Demotion: flush a specific 2MB page mapping. */ | /* Demotion: flush a specific 2MB page mapping. */ | ||||
invlpg(va); | invlpg(va); | ||||
else if ((newpde & PG_G) == 0) | else /* if ((newpde & PG_G) == 0) */ | ||||
/* | /* | ||||
* Promotion: flush every 4KB page mapping from the TLB | * Promotion: flush every 4KB page mapping from the TLB | ||||
* because there are too many to flush individually. | * because there are too many to flush individually. | ||||
*/ | */ | ||||
invltlb(); | invltlb(); | ||||
else { | |||||
/* | |||||
* Promotion: flush every 4KB page mapping from the TLB, | |||||
* including any global (PG_G) mappings. | |||||
*/ | |||||
cr4 = rcr4(); | |||||
load_cr4(cr4 & ~CR4_PGE); | |||||
/* | |||||
* Although preemption at this point could be detrimental to | |||||
* performance, it would not lead to an error. PG_G is simply | |||||
* ignored if CR4.PGE is clear. Moreover, in case this block | |||||
* is re-entered, the load_cr4() either above or below will | |||||
* modify CR4.PGE flushing the TLB. | |||||
*/ | |||||
load_cr4(cr4 | CR4_PGE); | |||||
} | } | ||||
} | |||||
void | void | ||||
invltlb_glob(void) | invltlb_glob(void) | ||||
{ | { | ||||
uint64_t cr4; | |||||
if (pgeflag == 0) { | |||||
invltlb(); | invltlb(); | ||||
} else { | |||||
cr4 = rcr4(); | |||||
load_cr4(cr4 & ~CR4_PGE); | |||||
load_cr4(cr4 | CR4_PGE); | |||||
} | } | ||||
} | |||||
#ifdef SMP | #ifdef SMP | ||||
/* | /* | ||||
* For SMP, these functions have to use the IPI mechanism for coherence. | * For SMP, these functions have to use the IPI mechanism for coherence. | ||||
* | * | ||||
* N.B.: Before calling any of the following TLB invalidation functions, | * N.B.: Before calling any of the following TLB invalidation functions, | ||||
* the calling processor must ensure that all stores updating a non- | * the calling processor must ensure that all stores updating a non- | ||||
Show All 13 Lines | |||||
*/ | */ | ||||
void | void | ||||
pmap_invalidate_page(pmap_t pmap, vm_offset_t va) | pmap_invalidate_page(pmap_t pmap, vm_offset_t va) | ||||
{ | { | ||||
cpuset_t *mask, other_cpus; | cpuset_t *mask, other_cpus; | ||||
u_int cpuid; | u_int cpuid; | ||||
sched_pin(); | sched_pin(); | ||||
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { | if (pmap == kernel_pmap) { | ||||
invlpg(va); | invlpg(va); | ||||
mask = &all_cpus; | mask = &all_cpus; | ||||
} else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { | |||||
mask = &all_cpus; | |||||
} else { | } else { | ||||
cpuid = PCPU_GET(cpuid); | cpuid = PCPU_GET(cpuid); | ||||
other_cpus = all_cpus; | other_cpus = all_cpus; | ||||
CPU_CLR(cpuid, &other_cpus); | CPU_CLR(cpuid, &other_cpus); | ||||
if (CPU_ISSET(cpuid, &pmap->pm_active)) | |||||
invlpg(va); | |||||
CPU_AND(&other_cpus, &pmap->pm_active); | CPU_AND(&other_cpus, &pmap->pm_active); | ||||
mask = &other_cpus; | mask = &other_cpus; | ||||
} | } | ||||
smp_masked_invlpg(*mask, va, pmap); | smp_masked_invlpg(*mask, va, pmap); | ||||
sched_unpin(); | sched_unpin(); | ||||
} | } | ||||
/* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ | /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ | ||||
#define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE) | #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE) | ||||
void | void | ||||
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
cpuset_t *mask, other_cpus; | cpuset_t *mask, other_cpus; | ||||
vm_offset_t addr; | vm_offset_t addr; | ||||
u_int cpuid; | u_int cpuid; | ||||
if (eva - sva >= PMAP_INVLPG_THRESHOLD) { | if (eva - sva >= PMAP_INVLPG_THRESHOLD) { | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
return; | return; | ||||
} | } | ||||
sched_pin(); | sched_pin(); | ||||
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { | if (pmap == kernel_pmap) { | ||||
for (addr = sva; addr < eva; addr += PAGE_SIZE) | for (addr = sva; addr < eva; addr += PAGE_SIZE) | ||||
invlpg(addr); | invlpg(addr); | ||||
mask = &all_cpus; | mask = &all_cpus; | ||||
} else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { | |||||
mask = &all_cpus; | |||||
} else { | } else { | ||||
cpuid = PCPU_GET(cpuid); | cpuid = PCPU_GET(cpuid); | ||||
other_cpus = all_cpus; | other_cpus = all_cpus; | ||||
CPU_CLR(cpuid, &other_cpus); | CPU_CLR(cpuid, &other_cpus); | ||||
if (CPU_ISSET(cpuid, &pmap->pm_active)) | |||||
for (addr = sva; addr < eva; addr += PAGE_SIZE) | |||||
invlpg(addr); | |||||
CPU_AND(&other_cpus, &pmap->pm_active); | CPU_AND(&other_cpus, &pmap->pm_active); | ||||
mask = &other_cpus; | mask = &other_cpus; | ||||
} | } | ||||
smp_masked_invlpg_range(*mask, sva, eva, pmap); | smp_masked_invlpg_range(*mask, sva, eva, pmap); | ||||
sched_unpin(); | sched_unpin(); | ||||
} | } | ||||
void | void | ||||
pmap_invalidate_all(pmap_t pmap) | pmap_invalidate_all(pmap_t pmap) | ||||
{ | { | ||||
cpuset_t *mask, other_cpus; | cpuset_t *mask, other_cpus; | ||||
u_int cpuid; | u_int cpuid; | ||||
sched_pin(); | sched_pin(); | ||||
if (pmap == kernel_pmap) { | if (pmap == kernel_pmap) { | ||||
invltlb_glob(); | invltlb(); | ||||
mask = &all_cpus; | mask = &all_cpus; | ||||
} else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { | } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { | ||||
invltlb(); | |||||
mask = &all_cpus; | mask = &all_cpus; | ||||
} else { | } else { | ||||
cpuid = PCPU_GET(cpuid); | cpuid = PCPU_GET(cpuid); | ||||
other_cpus = all_cpus; | other_cpus = all_cpus; | ||||
CPU_CLR(cpuid, &other_cpus); | CPU_CLR(cpuid, &other_cpus); | ||||
if (CPU_ISSET(cpuid, &pmap->pm_active)) | |||||
invltlb(); | |||||
CPU_AND(&other_cpus, &pmap->pm_active); | CPU_AND(&other_cpus, &pmap->pm_active); | ||||
mask = &other_cpus; | mask = &other_cpus; | ||||
} | } | ||||
smp_masked_invltlb(*mask, pmap); | smp_masked_invltlb(*mask, pmap); | ||||
sched_unpin(); | sched_unpin(); | ||||
} | } | ||||
void | void | ||||
Show All 14 Lines | struct pde_action { | ||||
u_int store; /* processor that updates the PDE */ | u_int store; /* processor that updates the PDE */ | ||||
}; | }; | ||||
static void | static void | ||||
pmap_update_pde_kernel(void *arg) | pmap_update_pde_kernel(void *arg) | ||||
{ | { | ||||
struct pde_action *act = arg; | struct pde_action *act = arg; | ||||
pd_entry_t *pde; | pd_entry_t *pde; | ||||
pmap_t pmap; | |||||
if (act->store == PCPU_GET(cpuid)) { | if (act->store == PCPU_GET(cpuid)) { | ||||
pde = pmap_pde(kernel_pmap, act->va); | |||||
/* | |||||
* Elsewhere, this operation requires allpmaps_lock for | |||||
* synchronization. Here, it does not because it is being | |||||
* performed in the context of an all_cpus rendezvous. | |||||
*/ | |||||
LIST_FOREACH(pmap, &allpmaps, pm_list) { | |||||
pde = pmap_pde(pmap, act->va); | |||||
pde_store(pde, act->newpde); | pde_store(pde, act->newpde); | ||||
} | } | ||||
} | } | ||||
} | |||||
static void | static void | ||||
pmap_update_pde_user(void *arg) | pmap_update_pde_user(void *arg) | ||||
{ | { | ||||
struct pde_action *act = arg; | struct pde_action *act = arg; | ||||
if (act->store == PCPU_GET(cpuid)) | if (act->store == PCPU_GET(cpuid)) | ||||
pde_store(act->pde, act->newpde); | pde_store(act->pde, act->newpde); | ||||
▲ Show 20 Lines • Show All 56 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Normal, non-SMP, 486+ invalidation functions. | * Normal, non-SMP, 486+ invalidation functions. | ||||
* We inline these within pmap.c for speed. | * We inline these within pmap.c for speed. | ||||
*/ | */ | ||||
PMAP_INLINE void | PMAP_INLINE void | ||||
pmap_invalidate_page(pmap_t pmap, vm_offset_t va) | pmap_invalidate_page(pmap_t pmap, vm_offset_t va) | ||||
{ | { | ||||
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) | if (pmap == kernel_pmap) | ||||
invlpg(va); | invlpg(va); | ||||
} | } | ||||
PMAP_INLINE void | PMAP_INLINE void | ||||
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
vm_offset_t addr; | vm_offset_t addr; | ||||
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) | if (pmap == kernel_pmap) | ||||
for (addr = sva; addr < eva; addr += PAGE_SIZE) | for (addr = sva; addr < eva; addr += PAGE_SIZE) | ||||
invlpg(addr); | invlpg(addr); | ||||
} | } | ||||
PMAP_INLINE void | PMAP_INLINE void | ||||
pmap_invalidate_all(pmap_t pmap) | pmap_invalidate_all(pmap_t pmap) | ||||
{ | { | ||||
if (pmap == kernel_pmap) | if (pmap == kernel_pmap) | ||||
invltlb_glob(); | |||||
else if (!CPU_EMPTY(&pmap->pm_active)) | |||||
invltlb(); | invltlb(); | ||||
} | } | ||||
PMAP_INLINE void | PMAP_INLINE void | ||||
pmap_invalidate_cache(void) | pmap_invalidate_cache(void) | ||||
{ | { | ||||
wbinvd(); | wbinvd(); | ||||
▲ Show 20 Lines • Show All 115 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Are we current address space or kernel? | * Are we current address space or kernel? | ||||
*/ | */ | ||||
static __inline int | static __inline int | ||||
pmap_is_current(pmap_t pmap) | pmap_is_current(pmap_t pmap) | ||||
{ | { | ||||
return (pmap == kernel_pmap || pmap == | return (pmap == kernel_pmap); | ||||
vmspace_pmap(curthread->td_proc->p_vmspace)); | |||||
} | } | ||||
/* | /* | ||||
* If the given pmap is not the current or kernel pmap, the returned pte must | * If the given pmap is not the current or kernel pmap, the returned pte must | ||||
* be released by passing it to pmap_pte_release(). | * be released by passing it to pmap_pte_release(). | ||||
*/ | */ | ||||
pt_entry_t * | pt_entry_t * | ||||
pmap_pte(pmap_t pmap, vm_offset_t va) | pmap_pte(pmap_t pmap, vm_offset_t va) | ||||
▲ Show 20 Lines • Show All 181 Lines • ▼ Show 20 Lines | |||||
* This function may be used before pmap_bootstrap() is called. | * This function may be used before pmap_bootstrap() is called. | ||||
*/ | */ | ||||
PMAP_INLINE void | PMAP_INLINE void | ||||
pmap_kenter(vm_offset_t va, vm_paddr_t pa) | pmap_kenter(vm_offset_t va, vm_paddr_t pa) | ||||
{ | { | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
pte = vtopte(va); | pte = vtopte(va); | ||||
pte_store(pte, pa | PG_RW | PG_V | pgeflag); | pte_store(pte, pa | PG_RW | PG_V); | ||||
} | } | ||||
static __inline void | static __inline void | ||||
pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) | pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) | ||||
{ | { | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
pte = vtopte(va); | pte = vtopte(va); | ||||
pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); | pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(mode, 0)); | ||||
} | } | ||||
/* | /* | ||||
* Remove a page from the kernel pagetables. | * Remove a page from the kernel pagetables. | ||||
* Note: not SMP coherent. | * Note: not SMP coherent. | ||||
* | * | ||||
* This function may be used before pmap_bootstrap() is called. | * This function may be used before pmap_bootstrap() is called. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | else if ((va & PDRMASK) > superpage_offset) | ||||
va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset; | va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset; | ||||
} | } | ||||
sva = va; | sva = va; | ||||
while (start < end) { | while (start < end) { | ||||
if ((start & PDRMASK) == 0 && end - start >= NBPDR && | if ((start & PDRMASK) == 0 && end - start >= NBPDR && | ||||
pseflag) { | pseflag) { | ||||
KASSERT((va & PDRMASK) == 0, | KASSERT((va & PDRMASK) == 0, | ||||
("pmap_map: misaligned va %#x", va)); | ("pmap_map: misaligned va %#x", va)); | ||||
newpde = start | PG_PS | pgeflag | PG_RW | PG_V; | newpde = start | PG_PS | PG_RW | PG_V; | ||||
pmap_kenter_pde(va, newpde); | pmap_kenter_pde(va, newpde); | ||||
va += NBPDR; | va += NBPDR; | ||||
start += NBPDR; | start += NBPDR; | ||||
} else { | } else { | ||||
pmap_kenter(va, start); | pmap_kenter(va, start); | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
start += PAGE_SIZE; | start += PAGE_SIZE; | ||||
} | } | ||||
Show All 23 Lines | pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) | ||||
pte = vtopte(sva); | pte = vtopte(sva); | ||||
endpte = pte + count; | endpte = pte + count; | ||||
while (pte < endpte) { | while (pte < endpte) { | ||||
m = *ma++; | m = *ma++; | ||||
pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); | pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); | ||||
if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { | if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { | ||||
oldpte |= *pte; | oldpte |= *pte; | ||||
#if defined(PAE) || defined(PAE_TABLES) | #if defined(PAE) || defined(PAE_TABLES) | ||||
pte_store(pte, pa | pgeflag | pg_nx | PG_RW | PG_V); | pte_store(pte, pa | pg_nx | PG_RW | PG_V); | ||||
#else | #else | ||||
pte_store(pte, pa | pgeflag | PG_RW | PG_V); | pte_store(pte, pa | PG_RW | PG_V); | ||||
#endif | #endif | ||||
} | } | ||||
pte++; | pte++; | ||||
} | } | ||||
if (__predict_false((oldpte & PG_V) != 0)) | if (__predict_false((oldpte & PG_V) != 0)) | ||||
pmap_invalidate_range(kernel_pmap, sva, sva + count * | pmap_invalidate_range(kernel_pmap, sva, sva + count * | ||||
PAGE_SIZE); | PAGE_SIZE); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 112 Lines • ▼ Show 20 Lines | |||||
* conditionally free the page, and manage the hold/wire counts. | * conditionally free the page, and manage the hold/wire counts. | ||||
*/ | */ | ||||
static int | static int | ||||
pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free) | pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free) | ||||
{ | { | ||||
pd_entry_t ptepde; | pd_entry_t ptepde; | ||||
vm_page_t mpte; | vm_page_t mpte; | ||||
if (va >= VM_MAXUSER_ADDRESS) | if (pmap == kernel_pmap) | ||||
return (0); | return (0); | ||||
ptepde = *pmap_pde(pmap, va); | ptepde = *pmap_pde(pmap, va); | ||||
mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); | mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); | ||||
return (pmap_unwire_ptp(pmap, mpte, free)); | return (pmap_unwire_ptp(pmap, mpte, free)); | ||||
} | } | ||||
/* | /* | ||||
* Initialize the pmap for the swapper process. | * Initialize the pmap for the swapper process. | ||||
*/ | */ | ||||
void | void | ||||
pmap_pinit0(pmap_t pmap) | pmap_pinit0(pmap_t pmap) | ||||
{ | { | ||||
PMAP_LOCK_INIT(pmap); | PMAP_LOCK_INIT(pmap); | ||||
/* | pmap->pm_pdir = IdlePTD; | ||||
* Since the page table directory is shared with the kernel pmap, | |||||
* which is already included in the list "allpmaps", this pmap does | |||||
* not need to be inserted into that list. | |||||
*/ | |||||
pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); | |||||
#if defined(PAE) || defined(PAE_TABLES) | #if defined(PAE) || defined(PAE_TABLES) | ||||
pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); | pmap->pm_pdpt = IdlePDPT; | ||||
#endif | #endif | ||||
pmap->pm_root.rt_root = 0; | pmap->pm_root.rt_root = 0; | ||||
CPU_ZERO(&pmap->pm_active); | CPU_ZERO(&pmap->pm_active); | ||||
PCPU_SET(curpmap, pmap); | PCPU_SET(curpmap, pmap); | ||||
TAILQ_INIT(&pmap->pm_pvchunk); | TAILQ_INIT(&pmap->pm_pvchunk); | ||||
bzero(&pmap->pm_stats, sizeof pmap->pm_stats); | bzero(&pmap->pm_stats, sizeof pmap->pm_stats); | ||||
} | } | ||||
/* | /* | ||||
* Initialize a preallocated and zeroed pmap structure, | * Initialize a preallocated and zeroed pmap structure, | ||||
* such as one in a vmspace structure. | * such as one in a vmspace structure. | ||||
*/ | */ | ||||
int | int | ||||
pmap_pinit(pmap_t pmap) | pmap_pinit(pmap_t pmap) | ||||
{ | { | ||||
vm_page_t m, ptdpg[NPGPTD]; | vm_page_t m; | ||||
vm_paddr_t pa; | |||||
int i; | int i; | ||||
/* | /* | ||||
* No need to allocate page table space yet but we do need a valid | * No need to allocate page table space yet but we do need a valid | ||||
* page directory table. | * page directory table. | ||||
*/ | */ | ||||
if (pmap->pm_pdir == NULL) { | if (pmap->pm_pdir == NULL) { | ||||
pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD); | pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD); | ||||
Show All 13 Lines | KASSERT(vm_radix_is_empty(&pmap->pm_root), | ||||
("pmap_pinit: pmap has reserved page table page(s)")); | ("pmap_pinit: pmap has reserved page table page(s)")); | ||||
/* | /* | ||||
* allocate the page directory page(s) | * allocate the page directory page(s) | ||||
*/ | */ | ||||
for (i = 0; i < NPGPTD;) { | for (i = 0; i < NPGPTD;) { | ||||
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | | m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | | ||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO); | VM_ALLOC_WIRED | VM_ALLOC_ZERO); | ||||
if (m == NULL) | if (m == NULL) { | ||||
vm_wait(NULL); | vm_wait(NULL); | ||||
else | } else { | ||||
ptdpg[i++] = m; | pmap->pm_ptdpg[i] = m; | ||||
#if defined(PAE) || defined(PAE_TABLES) | |||||
pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(m) | PG_V; | |||||
#endif | |||||
i++; | |||||
} | } | ||||
} | |||||
pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); | pmap_qenter((vm_offset_t)pmap->pm_pdir, pmap->pm_ptdpg, NPGPTD); | ||||
for (i = 0; i < NPGPTD; i++) | for (i = 0; i < NPGPTD; i++) | ||||
if ((ptdpg[i]->flags & PG_ZERO) == 0) | if ((pmap->pm_ptdpg[i]->flags & PG_ZERO) == 0) | ||||
pagezero(pmap->pm_pdir + (i * NPDEPG)); | pagezero(pmap->pm_pdir + (i * NPDEPG)); | ||||
mtx_lock_spin(&allpmaps_lock); | /* Install the trampoline mapping. */ | ||||
LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); | pmap->pm_pdir[TRPTDI] = PTD[TRPTDI]; | ||||
/* Copy the kernel page table directory entries. */ | |||||
bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); | |||||
mtx_unlock_spin(&allpmaps_lock); | |||||
/* install self-referential address mapping entry(s) */ | |||||
for (i = 0; i < NPGPTD; i++) { | |||||
pa = VM_PAGE_TO_PHYS(ptdpg[i]); | |||||
pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M; | |||||
#if defined(PAE) || defined(PAE_TABLES) | |||||
pmap->pm_pdpt[i] = pa | PG_V; | |||||
#endif | |||||
} | |||||
CPU_ZERO(&pmap->pm_active); | CPU_ZERO(&pmap->pm_active); | ||||
TAILQ_INIT(&pmap->pm_pvchunk); | TAILQ_INIT(&pmap->pm_pvchunk); | ||||
bzero(&pmap->pm_stats, sizeof pmap->pm_stats); | bzero(&pmap->pm_stats, sizeof pmap->pm_stats); | ||||
return (1); | return (1); | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 95 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Release any resources held by the given physical map. | * Release any resources held by the given physical map. | ||||
* Called when a pmap initialized by pmap_pinit is being released. | * Called when a pmap initialized by pmap_pinit is being released. | ||||
* Should only be called if the map contains no valid mappings. | * Should only be called if the map contains no valid mappings. | ||||
*/ | */ | ||||
void | void | ||||
pmap_release(pmap_t pmap) | pmap_release(pmap_t pmap) | ||||
{ | { | ||||
vm_page_t m, ptdpg[NPGPTD]; | vm_page_t m; | ||||
int i; | int i; | ||||
KASSERT(pmap->pm_stats.resident_count == 0, | KASSERT(pmap->pm_stats.resident_count == 0, | ||||
("pmap_release: pmap resident count %ld != 0", | ("pmap_release: pmap resident count %ld != 0", | ||||
pmap->pm_stats.resident_count)); | pmap->pm_stats.resident_count)); | ||||
KASSERT(vm_radix_is_empty(&pmap->pm_root), | KASSERT(vm_radix_is_empty(&pmap->pm_root), | ||||
("pmap_release: pmap has reserved page table page(s)")); | ("pmap_release: pmap has reserved page table page(s)")); | ||||
KASSERT(CPU_EMPTY(&pmap->pm_active), | KASSERT(CPU_EMPTY(&pmap->pm_active), | ||||
("releasing active pmap %p", pmap)); | ("releasing active pmap %p", pmap)); | ||||
mtx_lock_spin(&allpmaps_lock); | |||||
LIST_REMOVE(pmap, pm_list); | |||||
mtx_unlock_spin(&allpmaps_lock); | |||||
for (i = 0; i < NPGPTD; i++) | |||||
ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] & | |||||
PG_FRAME); | |||||
bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) * | |||||
sizeof(*pmap->pm_pdir)); | |||||
pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); | pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); | ||||
for (i = 0; i < NPGPTD; i++) { | for (i = 0; i < NPGPTD; i++) { | ||||
m = ptdpg[i]; | m = pmap->pm_ptdpg[i]; | ||||
#if defined(PAE) || defined(PAE_TABLES) | #if defined(PAE) || defined(PAE_TABLES) | ||||
KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), | KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), | ||||
("pmap_release: got wrong ptd page")); | ("pmap_release: got wrong ptd page")); | ||||
#endif | #endif | ||||
vm_page_unwire_noq(m); | vm_page_unwire_noq(m); | ||||
vm_page_free_zero(m); | vm_page_free(m); | ||||
} | } | ||||
} | } | ||||
static int | static int | ||||
kvm_size(SYSCTL_HANDLER_ARGS) | kvm_size(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; | unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; | ||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | if (nkpg == NULL) | ||||
panic("pmap_growkernel: no memory to grow kernel"); | panic("pmap_growkernel: no memory to grow kernel"); | ||||
nkpt++; | nkpt++; | ||||
if ((nkpg->flags & PG_ZERO) == 0) | if ((nkpg->flags & PG_ZERO) == 0) | ||||
pmap_zero_page(nkpg); | pmap_zero_page(nkpg); | ||||
ptppaddr = VM_PAGE_TO_PHYS(nkpg); | ptppaddr = VM_PAGE_TO_PHYS(nkpg); | ||||
newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); | newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); | ||||
pdir_pde(KPTD, kernel_vm_end) = pgeflag | newpdir; | pdir_pde(KPTD, kernel_vm_end) = newpdir; | ||||
pmap_kenter_pde(kernel_vm_end, newpdir); | pmap_kenter_pde(kernel_vm_end, newpdir); | ||||
kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; | kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; | ||||
if (kernel_vm_end - 1 >= kernel_map->max_offset) { | if (kernel_vm_end - 1 >= kernel_map->max_offset) { | ||||
kernel_vm_end = kernel_map->max_offset; | kernel_vm_end = kernel_map->max_offset; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
▲ Show 20 Lines • Show All 541 Lines • ▼ Show 20 Lines | if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, | ||||
pmap_remove_pde(pmap, pde, sva, &free); | pmap_remove_pde(pmap, pde, sva, &free); | ||||
if ((oldpde & PG_G) == 0) | if ((oldpde & PG_G) == 0) | ||||
pmap_invalidate_pde_page(pmap, sva, oldpde); | pmap_invalidate_pde_page(pmap, sva, oldpde); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" | CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" | ||||
" in pmap %p", va, pmap); | " in pmap %p", va, pmap); | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
if (va < VM_MAXUSER_ADDRESS) | if (pmap != kernel_pmap) | ||||
pmap->pm_stats.resident_count++; | pmap->pm_stats.resident_count++; | ||||
} | } | ||||
mptepa = VM_PAGE_TO_PHYS(mpte); | mptepa = VM_PAGE_TO_PHYS(mpte); | ||||
/* | /* | ||||
* If the page mapping is in the kernel's address space, then the | * If the page mapping is in the kernel's address space, then the | ||||
* KPTmap can provide access to the page table page. Otherwise, | * KPTmap can provide access to the page table page. Otherwise, | ||||
* temporarily map the page table page (mpte) into the kernel's | * temporarily map the page table page (mpte) into the kernel's | ||||
* address space at either PADDR1 or PADDR2. | * address space at either PADDR1 or PADDR2. | ||||
*/ | */ | ||||
if (va >= KERNBASE) | if (pmap == kernel_pmap) | ||||
firstpte = &KPTmap[i386_btop(trunc_4mpage(va))]; | firstpte = &KPTmap[i386_btop(trunc_4mpage(va))]; | ||||
else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { | else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { | ||||
if ((*PMAP1 & PG_FRAME) != mptepa) { | if ((*PMAP1 & PG_FRAME) != mptepa) { | ||||
*PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M; | *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M; | ||||
#ifdef SMP | #ifdef SMP | ||||
PMAP1cpu = PCPU_GET(cpuid); | PMAP1cpu = PCPU_GET(cpuid); | ||||
#endif | #endif | ||||
invlcaddr(PADDR1); | invlcaddr(PADDR1); | ||||
▲ Show 20 Lines • Show All 778 Lines • ▼ Show 20 Lines | pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
vm_paddr_t opa, pa; | vm_paddr_t opa, pa; | ||||
vm_page_t mpte, om; | vm_page_t mpte, om; | ||||
boolean_t invlva, wired; | boolean_t invlva, wired; | ||||
va = trunc_page(va); | va = trunc_page(va); | ||||
mpte = NULL; | mpte = NULL; | ||||
wired = (flags & PMAP_ENTER_WIRED) != 0; | wired = (flags & PMAP_ENTER_WIRED) != 0; | ||||
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); | KASSERT((pmap == kernel_pmap && va < VM_MAX_KERNEL_ADDRESS) || | ||||
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, | (pmap != kernel_pmap && va < VM_MAXUSER_ADDRESS), | ||||
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", | ("pmap_enter: toobig k%d %#x", pmap == kernel_pmap, va)); | ||||
KASSERT(va < PMAP_TRM_MIN_ADDRESS, | |||||
("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)", | |||||
va)); | va)); | ||||
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) | if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) | ||||
VM_OBJECT_ASSERT_LOCKED(m->object); | VM_OBJECT_ASSERT_LOCKED(m->object); | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
sched_pin(); | sched_pin(); | ||||
pde = pmap_pde(pmap, va); | pde = pmap_pde(pmap, va); | ||||
if (va < VM_MAXUSER_ADDRESS) { | if (pmap != kernel_pmap) { | ||||
/* | /* | ||||
* va is for UVA. | * va is for UVA. | ||||
* In the case that a page table page is not resident, | * In the case that a page table page is not resident, | ||||
* we are creating it here. pmap_allocpte() handles | * we are creating it here. pmap_allocpte() handles | ||||
* demotion. | * demotion. | ||||
*/ | */ | ||||
mpte = pmap_allocpte(pmap, va, flags); | mpte = pmap_allocpte(pmap, va, flags); | ||||
if (mpte == NULL) { | if (mpte == NULL) { | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | if (opa) { | ||||
} | } | ||||
} else | } else | ||||
pmap->pm_stats.resident_count++; | pmap->pm_stats.resident_count++; | ||||
/* | /* | ||||
* Enter on the PV list if part of our managed memory. | * Enter on the PV list if part of our managed memory. | ||||
*/ | */ | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) { | if ((m->oflags & VPO_UNMANAGED) == 0) { | ||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, | KASSERT(pmap != kernel_pmap || va < kmi.clean_sva || | ||||
va >= kmi.clean_eva, | |||||
("pmap_enter: managed mapping within the clean submap")); | ("pmap_enter: managed mapping within the clean submap")); | ||||
if (pv == NULL) | if (pv == NULL) | ||||
pv = get_pv_entry(pmap, FALSE); | pv = get_pv_entry(pmap, FALSE); | ||||
pv->pv_va = va; | pv->pv_va = va; | ||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | ||||
pa |= PG_MANAGED; | pa |= PG_MANAGED; | ||||
} else if (pv != NULL) | } else if (pv != NULL) | ||||
free_pv_entry(pmap, pv); | free_pv_entry(pmap, pv); | ||||
Show All 15 Lines | if ((newpte & PG_MANAGED) != 0) | ||||
vm_page_aflag_set(m, PGA_WRITEABLE); | vm_page_aflag_set(m, PGA_WRITEABLE); | ||||
} | } | ||||
#if defined(PAE) || defined(PAE_TABLES) | #if defined(PAE) || defined(PAE_TABLES) | ||||
if ((prot & VM_PROT_EXECUTE) == 0) | if ((prot & VM_PROT_EXECUTE) == 0) | ||||
newpte |= pg_nx; | newpte |= pg_nx; | ||||
#endif | #endif | ||||
if (wired) | if (wired) | ||||
newpte |= PG_W; | newpte |= PG_W; | ||||
if (va < VM_MAXUSER_ADDRESS) | if (pmap != kernel_pmap) | ||||
newpte |= PG_U; | newpte |= PG_U; | ||||
if (pmap == kernel_pmap) | |||||
newpte |= pgeflag; | |||||
/* | /* | ||||
* if the mapping or permission bits are different, we need | * if the mapping or permission bits are different, we need | ||||
* to update the pte. | * to update the pte. | ||||
*/ | */ | ||||
if ((origpte & ~(PG_M|PG_A)) != newpte) { | if ((origpte & ~(PG_M|PG_A)) != newpte) { | ||||
newpte |= PG_A; | newpte |= PG_A; | ||||
if ((flags & VM_PROT_WRITE) != 0) | if ((flags & VM_PROT_WRITE) != 0) | ||||
▲ Show 20 Lines • Show All 168 Lines • ▼ Show 20 Lines | |||||
static vm_page_t | static vm_page_t | ||||
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, | pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot, vm_page_t mpte) | vm_prot_t prot, vm_page_t mpte) | ||||
{ | { | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
struct spglist free; | struct spglist free; | ||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || | KASSERT(pmap != kernel_pmap || va < kmi.clean_sva || | ||||
(m->oflags & VPO_UNMANAGED) != 0, | va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, | ||||
("pmap_enter_quick_locked: managed mapping within the clean submap")); | ("pmap_enter_quick_locked: managed mapping within the clean submap")); | ||||
rw_assert(&pvh_global_lock, RA_WLOCKED); | rw_assert(&pvh_global_lock, RA_WLOCKED); | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
/* | /* | ||||
* In the case that a page table page is not | * In the case that a page table page is not | ||||
* resident, we are creating it here. | * resident, we are creating it here. | ||||
*/ | */ | ||||
if (va < VM_MAXUSER_ADDRESS) { | if (pmap != kernel_pmap) { | ||||
u_int ptepindex; | u_int ptepindex; | ||||
pd_entry_t ptepa; | pd_entry_t ptepa; | ||||
/* | /* | ||||
* Calculate pagetable page index | * Calculate pagetable page index | ||||
*/ | */ | ||||
ptepindex = va >> PDRSHIFT; | ptepindex = va >> PDRSHIFT; | ||||
if (mpte && (mpte->pindex == ptepindex)) { | if (mpte && (mpte->pindex == ptepindex)) { | ||||
Show All 19 Lines | if (mpte && (mpte->pindex == ptepindex)) { | ||||
if (mpte == NULL) | if (mpte == NULL) | ||||
return (mpte); | return (mpte); | ||||
} | } | ||||
} | } | ||||
} else { | } else { | ||||
mpte = NULL; | mpte = NULL; | ||||
} | } | ||||
/* | /* XXXKIB: pmap_pte_quick() instead ? */ | ||||
* This call to vtopte makes the assumption that we are | pte = pmap_pte(pmap, va); | ||||
* entering the page into the current pmap. In order to support | |||||
* quick entry into any pmap, one would likely use pmap_pte_quick. | |||||
* But that isn't as quick as vtopte. | |||||
*/ | |||||
pte = vtopte(va); | |||||
if (*pte) { | if (*pte) { | ||||
if (mpte != NULL) { | if (mpte != NULL) { | ||||
mpte->wire_count--; | mpte->wire_count--; | ||||
mpte = NULL; | mpte = NULL; | ||||
} | } | ||||
pmap_pte_release(pte); | |||||
return (mpte); | return (mpte); | ||||
} | } | ||||
/* | /* | ||||
* Enter on the PV list if part of our managed memory. | * Enter on the PV list if part of our managed memory. | ||||
*/ | */ | ||||
if ((m->oflags & VPO_UNMANAGED) == 0 && | if ((m->oflags & VPO_UNMANAGED) == 0 && | ||||
!pmap_try_insert_pv_entry(pmap, va, m)) { | !pmap_try_insert_pv_entry(pmap, va, m)) { | ||||
if (mpte != NULL) { | if (mpte != NULL) { | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
if (pmap_unwire_ptp(pmap, mpte, &free)) { | if (pmap_unwire_ptp(pmap, mpte, &free)) { | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
} | } | ||||
mpte = NULL; | mpte = NULL; | ||||
} | } | ||||
pmap_pte_release(pte); | |||||
return (mpte); | return (mpte); | ||||
} | } | ||||
/* | /* | ||||
* Increment counters | * Increment counters | ||||
*/ | */ | ||||
pmap->pm_stats.resident_count++; | pmap->pm_stats.resident_count++; | ||||
pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); | pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); | ||||
#if defined(PAE) || defined(PAE_TABLES) | #if defined(PAE) || defined(PAE_TABLES) | ||||
if ((prot & VM_PROT_EXECUTE) == 0) | if ((prot & VM_PROT_EXECUTE) == 0) | ||||
pa |= pg_nx; | pa |= pg_nx; | ||||
#endif | #endif | ||||
/* | /* | ||||
* Now validate mapping with RO protection | * Now validate mapping with RO protection | ||||
*/ | */ | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) | if ((m->oflags & VPO_UNMANAGED) != 0) | ||||
pte_store(pte, pa | PG_V | PG_U); | pte_store(pte, pa | PG_V | PG_U); | ||||
else | else | ||||
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); | pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); | ||||
pmap_pte_release(pte); | |||||
return (mpte); | return (mpte); | ||||
} | } | ||||
/* | /* | ||||
* Make a temporary mapping for a physical address. This is only intended | * Make a temporary mapping for a physical address. This is only intended | ||||
* to be used for panic dumps. | * to be used for panic dumps. | ||||
*/ | */ | ||||
void * | void * | ||||
▲ Show 20 Lines • Show All 175 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
/* | /* | ||||
* Copy the range specified by src_addr/len | * Copy the range specified by src_addr/len | ||||
* from the source map to the range dst_addr/len | * from the source map to the range dst_addr/len | ||||
* in the destination map. | * in the destination map. | ||||
* | * | ||||
* This routine is only advisory and need not do anything. | * This routine is only advisory and need not do anything. Since | ||||
* current pmap is always the kernel pmap when executing in | |||||
* kernel, and we do not copy from the kernel pmap to a user | |||||
* pmap, this optimization is not usable in 4/4G full split i386 | |||||
* world. | |||||
*/ | */ | ||||
void | void | ||||
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, | pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, | ||||
vm_offset_t src_addr) | vm_offset_t src_addr) | ||||
{ | { | ||||
struct spglist free; | |||||
vm_offset_t addr; | |||||
vm_offset_t end_addr = src_addr + len; | |||||
vm_offset_t pdnxt; | |||||
if (dst_addr != src_addr) | |||||
return; | |||||
if (!pmap_is_current(src_pmap)) | |||||
return; | |||||
rw_wlock(&pvh_global_lock); | |||||
if (dst_pmap < src_pmap) { | |||||
PMAP_LOCK(dst_pmap); | |||||
PMAP_LOCK(src_pmap); | |||||
} else { | |||||
PMAP_LOCK(src_pmap); | |||||
PMAP_LOCK(dst_pmap); | |||||
} | } | ||||
sched_pin(); | |||||
for (addr = src_addr; addr < end_addr; addr = pdnxt) { | |||||
pt_entry_t *src_pte, *dst_pte; | |||||
vm_page_t dstmpte, srcmpte; | |||||
pd_entry_t srcptepaddr; | |||||
u_int ptepindex; | |||||
KASSERT(addr < UPT_MIN_ADDRESS, | |||||
("pmap_copy: invalid to pmap_copy page tables")); | |||||
pdnxt = (addr + NBPDR) & ~PDRMASK; | |||||
if (pdnxt < addr) | |||||
pdnxt = end_addr; | |||||
ptepindex = addr >> PDRSHIFT; | |||||
srcptepaddr = src_pmap->pm_pdir[ptepindex]; | |||||
if (srcptepaddr == 0) | |||||
continue; | |||||
if (srcptepaddr & PG_PS) { | |||||
if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr) | |||||
continue; | |||||
if (dst_pmap->pm_pdir[ptepindex] == 0 && | |||||
((srcptepaddr & PG_MANAGED) == 0 || | |||||
pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr & | |||||
PG_PS_FRAME))) { | |||||
dst_pmap->pm_pdir[ptepindex] = srcptepaddr & | |||||
~PG_W; | |||||
dst_pmap->pm_stats.resident_count += | |||||
NBPDR / PAGE_SIZE; | |||||
pmap_pde_mappings++; | |||||
} | |||||
continue; | |||||
} | |||||
srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); | |||||
KASSERT(srcmpte->wire_count > 0, | |||||
("pmap_copy: source page table page is unused")); | |||||
if (pdnxt > end_addr) | |||||
pdnxt = end_addr; | |||||
src_pte = vtopte(addr); | |||||
while (addr < pdnxt) { | |||||
pt_entry_t ptetemp; | |||||
ptetemp = *src_pte; | |||||
/* | /* | ||||
* we only virtual copy managed pages | |||||
*/ | |||||
if ((ptetemp & PG_MANAGED) != 0) { | |||||
dstmpte = pmap_allocpte(dst_pmap, addr, | |||||
PMAP_ENTER_NOSLEEP); | |||||
if (dstmpte == NULL) | |||||
goto out; | |||||
dst_pte = pmap_pte_quick(dst_pmap, addr); | |||||
if (*dst_pte == 0 && | |||||
pmap_try_insert_pv_entry(dst_pmap, addr, | |||||
PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { | |||||
/* | |||||
* Clear the wired, modified, and | |||||
* accessed (referenced) bits | |||||
* during the copy. | |||||
*/ | |||||
*dst_pte = ptetemp & ~(PG_W | PG_M | | |||||
PG_A); | |||||
dst_pmap->pm_stats.resident_count++; | |||||
} else { | |||||
SLIST_INIT(&free); | |||||
if (pmap_unwire_ptp(dst_pmap, dstmpte, | |||||
&free)) { | |||||
pmap_invalidate_page(dst_pmap, | |||||
addr); | |||||
vm_page_free_pages_toq(&free, | |||||
true); | |||||
} | |||||
goto out; | |||||
} | |||||
if (dstmpte->wire_count >= srcmpte->wire_count) | |||||
break; | |||||
} | |||||
addr += PAGE_SIZE; | |||||
src_pte++; | |||||
} | |||||
} | |||||
out: | |||||
sched_unpin(); | |||||
rw_wunlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(src_pmap); | |||||
PMAP_UNLOCK(dst_pmap); | |||||
} | |||||
/* | |||||
* Zero 1 page of virtual memory mapped from a hardware page by the caller. | * Zero 1 page of virtual memory mapped from a hardware page by the caller. | ||||
*/ | */ | ||||
static __inline void | static __inline void | ||||
pagezero(void *page) | pagezero(void *page) | ||||
{ | { | ||||
#if defined(I686_CPU) | #if defined(I686_CPU) | ||||
if (cpu_class == CPUCLASS_686) { | if (cpu_class == CPUCLASS_686) { | ||||
if (cpu_feature & CPUID_SSE2) | if (cpu_feature & CPUID_SSE2) | ||||
▲ Show 20 Lines • Show All 296 Lines • ▼ Show 20 Lines | for (field = 0; field < _NPCM; field++) { | ||||
bitmask = 1UL << bit; | bitmask = 1UL << bit; | ||||
idx = field * 32 + bit; | idx = field * 32 + bit; | ||||
pv = &pc->pc_pventry[idx]; | pv = &pc->pc_pventry[idx]; | ||||
inuse &= ~bitmask; | inuse &= ~bitmask; | ||||
pte = pmap_pde(pmap, pv->pv_va); | pte = pmap_pde(pmap, pv->pv_va); | ||||
tpte = *pte; | tpte = *pte; | ||||
if ((tpte & PG_PS) == 0) { | if ((tpte & PG_PS) == 0) { | ||||
pte = vtopte(pv->pv_va); | pte = pmap_pte_quick(pmap, pv->pv_va); | ||||
tpte = *pte & ~PG_PTE_PAT; | tpte = *pte & ~PG_PTE_PAT; | ||||
} | } | ||||
if (tpte == 0) { | if (tpte == 0) { | ||||
printf( | printf( | ||||
"TPTE at %p IS ZERO @ VA %08x\n", | "TPTE at %p IS ZERO @ VA %08x\n", | ||||
pte, pv->pv_va); | pte, pv->pv_va); | ||||
panic("bad pte"); | panic("bad pte"); | ||||
▲ Show 20 Lines • Show All 149 Lines • ▼ Show 20 Lines | pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) | ||||
pd_entry_t *pde; | pd_entry_t *pde; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
boolean_t rv; | boolean_t rv; | ||||
rv = FALSE; | rv = FALSE; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
pde = pmap_pde(pmap, addr); | pde = pmap_pde(pmap, addr); | ||||
if (*pde != 0 && (*pde & PG_PS) == 0) { | if (*pde != 0 && (*pde & PG_PS) == 0) { | ||||
pte = vtopte(addr); | pte = pmap_pte(pmap, addr); | ||||
if (pte != NULL) | |||||
rv = *pte == 0; | rv = *pte == 0; | ||||
pmap_pte_release(pte); | |||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
/* | /* | ||||
* pmap_is_referenced: | * pmap_is_referenced: | ||||
* | * | ||||
▲ Show 20 Lines • Show All 485 Lines • ▼ Show 20 Lines | pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) | ||||
vm_offset_t va, offset; | vm_offset_t va, offset; | ||||
vm_size_t tmpsize; | vm_size_t tmpsize; | ||||
int i; | int i; | ||||
offset = pa & PAGE_MASK; | offset = pa & PAGE_MASK; | ||||
size = round_page(offset + size); | size = round_page(offset + size); | ||||
pa = pa & PG_FRAME; | pa = pa & PG_FRAME; | ||||
if (pa < KERNLOAD && pa + size <= KERNLOAD) | if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) | ||||
va = KERNBASE + pa; | va = pa + PMAP_MAP_LOW; | ||||
else if (!pmap_initialized) { | else if (!pmap_initialized) { | ||||
va = 0; | va = 0; | ||||
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { | for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { | ||||
ppim = pmap_preinit_mapping + i; | ppim = pmap_preinit_mapping + i; | ||||
if (ppim->va == 0) { | if (ppim->va == 0) { | ||||
ppim->pa = pa; | ppim->pa = pa; | ||||
ppim->sz = size; | ppim->sz = size; | ||||
ppim->mode = mode; | ppim->mode = mode; | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
pmap_unmapdev(vm_offset_t va, vm_size_t size) | pmap_unmapdev(vm_offset_t va, vm_size_t size) | ||||
{ | { | ||||
struct pmap_preinit_mapping *ppim; | struct pmap_preinit_mapping *ppim; | ||||
vm_offset_t offset; | vm_offset_t offset; | ||||
int i; | int i; | ||||
if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) | if (va >= PMAP_MAP_LOW && va <= KERNBASE && va + size <= KERNBASE) | ||||
return; | return; | ||||
offset = va & PAGE_MASK; | offset = va & PAGE_MASK; | ||||
size = round_page(offset + size); | size = round_page(offset + size); | ||||
va = trunc_page(va); | va = trunc_page(va); | ||||
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { | for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { | ||||
ppim = pmap_preinit_mapping + i; | ppim = pmap_preinit_mapping + i; | ||||
if (ppim->va == va && ppim->sz == size) { | if (ppim->va == va && ppim->sz == size) { | ||||
if (pmap_initialized) | if (pmap_initialized) | ||||
▲ Show 20 Lines • Show All 280 Lines • ▼ Show 20 Lines | #if defined(PAE) || defined(PAE_TABLES) | ||||
cr3 = vtophys(pmap->pm_pdpt); | cr3 = vtophys(pmap->pm_pdpt); | ||||
#else | #else | ||||
cr3 = vtophys(pmap->pm_pdir); | cr3 = vtophys(pmap->pm_pdir); | ||||
#endif | #endif | ||||
/* | /* | ||||
* pmap_activate is for the current thread on the current cpu | * pmap_activate is for the current thread on the current cpu | ||||
*/ | */ | ||||
td->td_pcb->pcb_cr3 = cr3; | td->td_pcb->pcb_cr3 = cr3; | ||||
load_cr3(cr3); | |||||
PCPU_SET(curpmap, pmap); | PCPU_SET(curpmap, pmap); | ||||
critical_exit(); | critical_exit(); | ||||
} | } | ||||
void | void | ||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) | pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) | ||||
{ | { | ||||
} | } | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | pmap_quick_remove_page(vm_offset_t addr) | ||||
qaddr = PCPU_GET(qmap_addr); | qaddr = PCPU_GET(qmap_addr); | ||||
pte = vtopte(qaddr); | pte = vtopte(qaddr); | ||||
KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use")); | KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use")); | ||||
KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address")); | KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address")); | ||||
*pte = 0; | *pte = 0; | ||||
critical_exit(); | critical_exit(); | ||||
} | |||||
static vmem_t *pmap_trm_arena; | |||||
static vmem_addr_t pmap_trm_arena_last = PMAP_TRM_MIN_ADDRESS; | |||||
static int trm_guard = PAGE_SIZE; | |||||
static int | |||||
pmap_trm_import(void *unused __unused, vmem_size_t size, int flags, | |||||
vmem_addr_t *addrp) | |||||
{ | |||||
vm_page_t m; | |||||
vmem_addr_t af, addr, prev_addr; | |||||
pt_entry_t *trm_pte; | |||||
prev_addr = atomic_load_long(&pmap_trm_arena_last); | |||||
size = round_page(size) + trm_guard; | |||||
for (;;) { | |||||
if (prev_addr + size < prev_addr || prev_addr + size < size || | |||||
prev_addr + size > PMAP_TRM_MAX_ADDRESS) | |||||
return (ENOMEM); | |||||
addr = prev_addr + size; | |||||
if (atomic_fcmpset_int(&pmap_trm_arena_last, &prev_addr, addr)) | |||||
break; | |||||
} | |||||
prev_addr += trm_guard; | |||||
trm_pte = PTmap + atop(prev_addr); | |||||
for (af = prev_addr; af < addr; af += PAGE_SIZE) { | |||||
m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY | | |||||
VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK); | |||||
pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) | | |||||
PG_M | PG_A | PG_RW | PG_V | pgeflag | | |||||
pmap_cache_bits(VM_MEMATTR_DEFAULT, FALSE)); | |||||
} | |||||
*addrp = prev_addr; | |||||
return (0); | |||||
} | |||||
static | |||||
void pmap_init_trm(void) | |||||
{ | |||||
vm_page_t pd_m; | |||||
TUNABLE_INT_FETCH("machdep.trm_guard", &trm_guard); | |||||
if ((trm_guard & PAGE_MASK) != 0) | |||||
trm_guard = 0; | |||||
pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK); | |||||
vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE); | |||||
pd_m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY | | |||||
VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK | VM_ALLOC_ZERO); | |||||
if ((pd_m->flags & PG_ZERO) == 0) | |||||
pmap_zero_page(pd_m); | |||||
PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V | | |||||
pmap_cache_bits(VM_MEMATTR_DEFAULT, TRUE); | |||||
} | |||||
void * | |||||
pmap_trm_alloc(size_t size, int flags) | |||||
{ | |||||
vmem_addr_t res; | |||||
int error; | |||||
MPASS((flags & ~(M_WAITOK | M_NOWAIT | M_ZERO)) == 0); | |||||
error = vmem_xalloc(pmap_trm_arena, roundup2(size, 4), sizeof(int), | |||||
0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags | M_FIRSTFIT, &res); | |||||
if (error != 0) | |||||
return (NULL); | |||||
return ((void *)res); | |||||
} | |||||
void | |||||
pmap_trm_free(void *addr, size_t size) | |||||
{ | |||||
vmem_free(pmap_trm_arena, (uintptr_t)addr, roundup2(size, 4)); | |||||
} | } | ||||
#if defined(PMAP_DEBUG) | #if defined(PMAP_DEBUG) | ||||
pmap_pid_dump(int pid) | pmap_pid_dump(int pid) | ||||
{ | { | ||||
pmap_t pmap; | pmap_t pmap; | ||||
struct proc *p; | struct proc *p; | ||||
int npte = 0; | int npte = 0; | ||||
▲ Show 20 Lines • Show All 54 Lines • Show Last 20 Lines |