diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -481,6 +481,8 @@ struct kva_layout_s kva_layout = { .kva_min = KV4ADDR(PML4PML4I, 0, 0, 0), + .kva_max = KV4ADDR(NPML4EPG - 1, NPDPEPG - 1, + NPDEPG - 1, NPTEPG - 1), .dmap_low = KV4ADDR(DMPML4I, 0, 0, 0), .dmap_high = KV4ADDR(DMPML4I + NDMPML4E, 0, 0, 0), .lm_low = KV4ADDR(LMSPML4I, 0, 0, 0), @@ -489,10 +491,20 @@ .km_high = KV4ADDR(KPML4BASE + NKPML4E - 1, NPDPEPG - 1, NPDEPG - 1, NPTEPG - 1), .rec_pt = KV4ADDR(PML4PML4I, 0, 0, 0), + .kasan_shadow_low = KV4ADDR(KASANPML4I, 0, 0, 0), + .kasan_shadow_high = KV4ADDR(KASANPML4I + NKASANPML4E, 0, 0, 0), + .kmsan_shadow_low = KV4ADDR(KMSANSHADPML4I, 0, 0, 0), + .kmsan_shadow_high = KV4ADDR(KMSANSHADPML4I + NKMSANSHADPML4E, + 0, 0, 0), + .kmsan_origin_low = KV4ADDR(KMSANORIGPML4I, 0, 0, 0), + .kmsan_origin_high = KV4ADDR(KMSANORIGPML4I + NKMSANORIGPML4E, + 0, 0, 0), }; struct kva_layout_s kva_layout_la57 = { .kva_min = KV5ADDR(NPML5EPG / 2, 0, 0, 0, 0), /* == rec_pt */ + .kva_max = KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1, + NPDEPG - 1, NPTEPG - 1), .dmap_low = KV5ADDR(DMPML5I, 0, 0, 0, 0), .dmap_high = KV5ADDR(DMPML5I + NDMPML5E, 0, 0, 0, 0), .lm_low = KV5ADDR(LMSPML5I, 0, 0, 0, 0), @@ -501,6 +513,14 @@ .km_high = KV4ADDR(KPML4BASE + NKPML4E - 1, NPDPEPG - 1, NPDEPG - 1, NPTEPG - 1), .rec_pt = KV5ADDR(PML5PML5I, 0, 0, 0, 0), + .kasan_shadow_low = KV4ADDR(KASANPML4I, 0, 0, 0), + .kasan_shadow_high = KV4ADDR(KASANPML4I + NKASANPML4E, 0, 0, 0), + .kmsan_shadow_low = KV4ADDR(KMSANSHADPML4I, 0, 0, 0), + .kmsan_shadow_high = KV4ADDR(KMSANSHADPML4I + NKMSANSHADPML4E, + 0, 0, 0), + .kmsan_origin_low = KV4ADDR(KMSANORIGPML4I, 0, 0, 0), + .kmsan_origin_high = KV4ADDR(KMSANORIGPML4I + NKMSANORIGPML4E, + 0, 0, 0), }; /* @@ -2003,7 +2023,7 @@ */ p5_p[i] = KPML5phys | X86_PG_RW | X86_PG_A | X86_PG_M | X86_PG_V | pg_nx; - } else if (i >= DMPML5I && i < DMPML5I + NDMPML5E) { + } else if (i >= DMPML5I && i < DMPML5I + ndmpml4phys) { /* Connect DMAP pml4 pages to PML5. */ p5_p[i] = (DMPML4phys + ptoa(i - DMPML5I)) | X86_PG_RW | X86_PG_V | pg_nx; @@ -11880,9 +11900,7 @@ mode, range->pdpes, range->pdes, range->ptes); /* Reset to sentinel value. */ - range->sva = la57 ? KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1, - NPDEPG - 1, NPTEPG - 1) : KV4ADDR(NPML4EPG - 1, NPDPEPG - 1, - NPDEPG - 1, NPTEPG - 1); + range->sva = kva_layout.kva_max; } /* @@ -11923,12 +11941,18 @@ */ static void sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range, - vm_offset_t va, pml4_entry_t pml4e, pdp_entry_t pdpe, pd_entry_t pde, - pt_entry_t pte) + vm_offset_t va, pml5_entry_t pml5e, pml4_entry_t pml4e, pdp_entry_t pdpe, + pd_entry_t pde, pt_entry_t pte) { pt_entry_t attrs; - attrs = pml4e & (X86_PG_RW | X86_PG_U | pg_nx); + if (la57) { + attrs = pml5e & (X86_PG_RW | X86_PG_U | pg_nx); + attrs |= pml4e & pg_nx; + attrs &= pg_nx | (pml4e & (X86_PG_RW | X86_PG_U)); + } else { + attrs = pml4e & (X86_PG_RW | X86_PG_U | pg_nx); + } attrs |= pdpe & pg_nx; attrs &= pg_nx | (pdpe & (X86_PG_RW | X86_PG_U)); @@ -11961,13 +11985,15 @@ { struct pmap_kernel_map_range range; struct sbuf sbuf, *sb; + pml5_entry_t pml5e; pml4_entry_t pml4e; pdp_entry_t *pdp, pdpe; pd_entry_t *pd, pde; pt_entry_t *pt, pte; vm_offset_t sva; vm_paddr_t pa; - int error, i, j, k, l; + int error, j, k, l; + bool first; error = sysctl_wire_old_buffer(req, 0); if (error != 0) @@ -11976,9 +12002,8 @@ sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req); /* Sentinel value. */ - range.sva = la57 ? KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1, - NPDEPG - 1, NPTEPG - 1) : KV4ADDR(NPML4EPG - 1, NPDPEPG - 1, - NPDEPG - 1, NPTEPG - 1); + range.sva = kva_layout.kva_max; + pml5e = 0; /* no UB for la48 */ /* * Iterate over the kernel page tables without holding the kernel pmap @@ -11987,44 +12012,50 @@ * Within the large map, ensure that PDP and PD page addresses are * valid before descending. */ - for (sva = 0, i = pmap_pml4e_index(sva); i < NPML4EPG; i++) { - switch (i) { - case PML4PML4I: - if (!la57) - sbuf_printf(sb, "\nRecursive map:\n"); - break; - case DMPML4I: - if (!la57) - sbuf_printf(sb, "\nDirect map:\n"); - break; + for (first = true, sva = 0; sva != 0 || first; first = false) { + if (sva == kva_layout.rec_pt) + sbuf_printf(sb, "\nRecursive map:\n"); + else if (sva == kva_layout.dmap_low) + sbuf_printf(sb, "\nDirect map:\n"); #ifdef KASAN - case KASANPML4I: + else if (sva == kva_layout.kasan_shadow_low) sbuf_printf(sb, "\nKASAN shadow map:\n"); - break; #endif #ifdef KMSAN - case KMSANSHADPML4I: + else if (sva == kva_layout.kmsan_shadow_low) sbuf_printf(sb, "\nKMSAN shadow map:\n"); - break; - case KMSANORIGPML4I: + else if (sva == kva_layout.kmsan_origin_low) sbuf_printf(sb, "\nKMSAN origin map:\n"); - break; #endif - case KPML4BASE: + else if (sva == kva_layout.km_low) sbuf_printf(sb, "\nKernel map:\n"); - break; - case LMSPML4I: - if (!la57) - sbuf_printf(sb, "\nLarge map:\n"); - break; - } + else if (sva == kva_layout.lm_low) + sbuf_printf(sb, "\nLarge map:\n"); /* Convert to canonical form. */ - if (sva == 1ul << 47) - sva |= -1ul << 48; + if (la57) { + if (sva == 1ul << 56) { + sva |= -1ul << 57; + continue; + } + } else { + if (sva == 1ul << 47) { + sva |= -1ul << 48; + continue; + } + } restart: - pml4e = kernel_pml4[i]; + if (la57) { + pml5e = *pmap_pml5e(kernel_pmap, sva); + if ((pml5e & X86_PG_V) == 0) { + sva = rounddown2(sva, NBPML5); + sysctl_kmaps_dump(sb, &range, sva); + sva += NBPML5; + continue; + } + } + pml4e = *pmap_pml4e(kernel_pmap, sva); if ((pml4e & X86_PG_V) == 0) { sva = rounddown2(sva, NBPML4); sysctl_kmaps_dump(sb, &range, sva); @@ -12045,8 +12076,8 @@ pa = pdpe & PG_FRAME; if ((pdpe & PG_PS) != 0) { sva = rounddown2(sva, NBPDP); - sysctl_kmaps_check(sb, &range, sva, pml4e, pdpe, - 0, 0); + sysctl_kmaps_check(sb, &range, sva, pml5e, + pml4e, pdpe, 0, 0); range.pdpes++; sva += NBPDP; continue; @@ -12058,6 +12089,7 @@ * freed. Validate the next-level address * before descending. */ + sva += NBPDP; goto restart; } pd = (pd_entry_t *)PHYS_TO_DMAP(pa); @@ -12074,7 +12106,7 @@ if ((pde & PG_PS) != 0) { sva = rounddown2(sva, NBPDR); sysctl_kmaps_check(sb, &range, sva, - pml4e, pdpe, pde, 0); + pml5e, pml4e, pdpe, pde, 0); range.pdes++; sva += NBPDR; continue; @@ -12086,6 +12118,7 @@ * may be freed. Validate the * next-level address before descending. */ + sva += NBPDR; goto restart; } pt = (pt_entry_t *)PHYS_TO_DMAP(pa); @@ -12099,7 +12132,7 @@ continue; } sysctl_kmaps_check(sb, &range, sva, - pml4e, pdpe, pde, pte); + pml5e, pml4e, pdpe, pde, pte); range.ptes++; } } diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -557,6 +557,7 @@ struct kva_layout_s { vm_offset_t kva_min; + vm_offset_t kva_max; vm_offset_t dmap_low; /* DMAP_MIN_ADDRESS */ vm_offset_t dmap_high; /* DMAP_MAX_ADDRESS */ vm_offset_t lm_low; /* LARGEMAP_MIN_ADDRESS */ @@ -564,6 +565,12 @@ vm_offset_t km_low; /* VM_MIN_KERNEL_ADDRESS */ vm_offset_t km_high; /* VM_MAX_KERNEL_ADDRESS */ vm_offset_t rec_pt; + vm_offset_t kasan_shadow_low; /* KASAN_MIN_ADDRESS */ + vm_offset_t kasan_shadow_high; /* KASAN_MAX_ADDRESS */ + vm_offset_t kmsan_shadow_low; /* KMSAN_SHAD_MIN_ADDRESS */ + vm_offset_t kmsan_shadow_high; /* KMSAN_SHAD_MAX_ADDRESS */ + vm_offset_t kmsan_origin_low; /* KMSAN_ORIG_MIN_ADDRESS */ + vm_offset_t kmsan_origin_high; /* KMSAN_ORIG_MAX_ADDRESS */ }; extern struct kva_layout_s kva_layout; diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h --- a/sys/amd64/include/vmparam.h +++ b/sys/amd64/include/vmparam.h @@ -200,16 +200,14 @@ #define VM_MIN_KERNEL_ADDRESS kva_layout.km_low #define VM_MAX_KERNEL_ADDRESS kva_layout.km_high -#define KASAN_MIN_ADDRESS KV4ADDR(KASANPML4I, 0, 0, 0) -#define KASAN_MAX_ADDRESS KV4ADDR(KASANPML4I + NKASANPML4E, 0, 0, 0) +#define KASAN_MIN_ADDRESS (kva_layout.kasan_shadow_low) +#define KASAN_MAX_ADDRESS (kva_layout.kasan_shadow_high) -#define KMSAN_SHAD_MIN_ADDRESS KV4ADDR(KMSANSHADPML4I, 0, 0, 0) -#define KMSAN_SHAD_MAX_ADDRESS KV4ADDR(KMSANSHADPML4I + NKMSANSHADPML4E, \ - 0, 0, 0) +#define KMSAN_SHAD_MIN_ADDRESS (kva_layout.kmsan_shadow_low) +#define KMSAN_SHAD_MAX_ADDRESS (kva_layout.kmsan_shadow_high) -#define KMSAN_ORIG_MIN_ADDRESS KV4ADDR(KMSANORIGPML4I, 0, 0, 0) -#define KMSAN_ORIG_MAX_ADDRESS KV4ADDR(KMSANORIGPML4I + NKMSANORIGPML4E, \ - 0, 0, 0) +#define KMSAN_ORIG_MIN_ADDRESS (kva_layout.kmsan_origin_low) +#define KMSAN_ORIG_MAX_ADDRESS (kva_layout.kmsan_origin_high) /* * Formally kernel mapping starts at KERNBASE, but kernel linker