Changeset View
Standalone View
sys/amd64/include/pmap.h
Show First 20 Lines • Show All 190 Lines • ▼ Show 20 Lines | |||||
* Number of kernel PML4 slots. Can be anywhere from 1 to 64 or so, | * Number of kernel PML4 slots. Can be anywhere from 1 to 64 or so, | ||||
* but setting it larger than NDMPML4E makes no sense. | * but setting it larger than NDMPML4E makes no sense. | ||||
* | * | ||||
* Each slot provides .5 TB of kernel virtual space. | * Each slot provides .5 TB of kernel virtual space. | ||||
*/ | */ | ||||
#define NKPML4E 4 | #define NKPML4E 4 | ||||
/* | /* | ||||
* Number of PML4 slots for the KASAN shadow map. It requires 1 byte of memory | |||||
* for every 8 bytes of the kernel address space. | |||||
*/ | |||||
#define NKASANPML4E ((NKPML4E + 7) / 8) | |||||
kib: In fact this is a lot of memory.
I wonder if KASAN should imply some reduction in the sizing… | |||||
markjAuthorUnsubmitted Done Inline ActionsWell, the shadow map is grown lazily based on demand for KVA. NKASAMPML4E is just the number of reserved slots. Which sizes are you referring to exactly? I was surprised that Peter did not manage to trigger any panics due to OOM conditions in pmap_growkernel() while testing the patch. Might be it is more of a theoretical concern for now. The only time I see panics in pmap_growkernel() is with kernel memory leaks or some kind of overcommit, e.g., something requests an absurdly large buffer with malloc(9). markj: Well, the shadow map is grown lazily based on demand for KVA. NKASAMPML4E is just the number of… | |||||
kibUnsubmitted Not Done Inline ActionsFor instance, clean map, buffer cache (number of buffers) + transient map sizing, kernel map itself. They all are sized based on amount of physical memory. For instance, on mid-range modern machine with 128G, 1/8 is 16G, which is significant. Sure, real population of these maps is dynamic, and we probably do not grow simultaneosly in all mappings, also enough memory is consumed by userspace which provides enough safety buffer. But still it is a large error to over-estimate the amount of available memory by 1/8. [I do not suggest that this is blocker] kib: For instance, clean map, buffer cache (number of buffers) + transient map sizing, kernel map… | |||||
markjAuthorUnsubmitted Done Inline ActionsI made some changes to scale a few constants appropriately. I am not sure if it is enough to fully alleviate the problem. markj: I made some changes to scale a few constants appropriately. I am not sure if it is enough to… | |||||
/* | |||||
* We use the same numbering of the page table pages for 5-level and | * We use the same numbering of the page table pages for 5-level and | ||||
* 4-level paging structures. | * 4-level paging structures. | ||||
*/ | */ | ||||
#define NUPML5E (NPML5EPG / 2) /* number of userland PML5 | #define NUPML5E (NPML5EPG / 2) /* number of userland PML5 | ||||
pages */ | pages */ | ||||
#define NUPML4E (NUPML5E * NPML4EPG) /* number of userland PML4 | #define NUPML4E (NUPML5E * NPML4EPG) /* number of userland PML4 | ||||
pages */ | pages */ | ||||
#define NUPDPE (NUPML4E * NPDPEPG) /* number of userland PDP | #define NUPDPE (NUPML4E * NPDPEPG) /* number of userland PDP | ||||
Show All 31 Lines | |||||
#define PML5PML5I (NPML5EPG / 2) /* Index of recursive pml5 mapping */ | #define PML5PML5I (NPML5EPG / 2) /* Index of recursive pml5 mapping */ | ||||
#define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */ | #define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */ | ||||
#define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */ | #define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */ | ||||
#define KPML4I (NPML4EPG-1) | #define KPML4I (NPML4EPG-1) | ||||
#define KPDPI (NPDPEPG-2) /* kernbase at -2GB */ | #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */ | ||||
#define KASANPML4I (DMPML4I - NKASANPML4E) /* Below the direct map */ | |||||
/* Large map: index of the first and max last pml4 entry */ | /* Large map: index of the first and max last pml4 entry */ | ||||
#define LMSPML4I (PML4PML4I + 1) | #define LMSPML4I (PML4PML4I + 1) | ||||
#define LMEPML4I (DMPML4I - 1) | #define LMEPML4I (KASANPML4I - 1) | ||||
/* | /* | ||||
* XXX doesn't really belong here I guess... | * XXX doesn't really belong here I guess... | ||||
*/ | */ | ||||
#define ISA_HOLE_START 0xa0000 | #define ISA_HOLE_START 0xa0000 | ||||
#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) | #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) | ||||
#define PMAP_PCID_NONE 0xffffffff | #define PMAP_PCID_NONE 0xffffffff | ||||
▲ Show 20 Lines • Show All 239 Lines • ▼ Show 20 Lines | |||||
void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva, | void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva, | ||||
vm_offset_t eva); | vm_offset_t eva); | ||||
int pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); | int pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); | ||||
int pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, | int pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, | ||||
u_int keyidx, int flags); | u_int keyidx, int flags); | ||||
void pmap_thread_init_invl_gen(struct thread *td); | void pmap_thread_init_invl_gen(struct thread *td); | ||||
int pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap); | int pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap); | ||||
void pmap_page_array_startup(long count); | void pmap_page_array_startup(long count); | ||||
#ifdef KASAN | |||||
void pmap_kasan_enter(vm_offset_t); | |||||
#endif | |||||
#endif /* _KERNEL */ | #endif /* _KERNEL */ | ||||
/* Return various clipped indexes for a given VA */ | /* Return various clipped indexes for a given VA */ | ||||
static __inline vm_pindex_t | static __inline vm_pindex_t | ||||
pmap_pte_index(vm_offset_t va) | pmap_pte_index(vm_offset_t va) | ||||
{ | { | ||||
return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1)); | return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1)); | ||||
Show All 33 Lines |
In fact this is a lot of memory.
I wonder if KASAN should imply some reduction in the sizing of the kernel maps. Or, is the idea that with KASAN we usually not survive far enough for this to matter?