diff --git a/sys/riscv/include/pmap.h b/sys/riscv/include/pmap.h --- a/sys/riscv/include/pmap.h +++ b/sys/riscv/include/pmap.h @@ -137,6 +137,13 @@ #define L1_MAPPABLE_P(va, pa, size) \ ((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE) +enum pmap_mode { + PMAP_MODE_SV39, + PMAP_MODE_SV48, +}; + +extern enum pmap_mode pmap_mode; + struct thread; #define pmap_vm_page_alloc_check(m) diff --git a/sys/riscv/include/pte.h b/sys/riscv/include/pte.h --- a/sys/riscv/include/pte.h +++ b/sys/riscv/include/pte.h @@ -44,22 +44,24 @@ typedef uint64_t pn_t; /* page number */ #endif -/* Level 0 table, 512GiB per entry */ +/* Level 0 table, 512GiB per entry, SV48 only */ #define L0_SHIFT 39 +#define L0_SIZE (1UL << L0_SHIFT) +#define L0_OFFSET (L0_SIZE - 1) /* Level 1 table, 1GiB per entry */ #define L1_SHIFT 30 -#define L1_SIZE (1 << L1_SHIFT) +#define L1_SIZE (1UL << L1_SHIFT) #define L1_OFFSET (L1_SIZE - 1) /* Level 2 table, 2MiB per entry */ #define L2_SHIFT 21 -#define L2_SIZE (1 << L2_SHIFT) +#define L2_SIZE (1UL << L2_SHIFT) #define L2_OFFSET (L2_SIZE - 1) /* Level 3 table, 4KiB per entry */ #define L3_SHIFT 12 -#define L3_SIZE (1 << L3_SHIFT) +#define L3_SIZE (1UL << L3_SHIFT) #define L3_OFFSET (L3_SIZE - 1) #define Ln_ENTRIES_SHIFT 9 diff --git a/sys/riscv/include/vmparam.h b/sys/riscv/include/vmparam.h --- a/sys/riscv/include/vmparam.h +++ b/sys/riscv/include/vmparam.h @@ -180,8 +180,10 @@ ((va) - DMAP_MIN_ADDRESS) + dmap_phys_base; \ }) -#define VM_MIN_USER_ADDRESS (0x0000000000000000UL) -#define VM_MAX_USER_ADDRESS (0x0000004000000000UL) +#define VM_MIN_USER_ADDRESS_SV39 (0x0000000000000000UL) +#define VM_MAX_USER_ADDRESS_SV39 (0x0000004000000000UL) +#define VM_MIN_USER_ADDRESS VM_MIN_USER_ADDRESS_SV39 +#define VM_MAX_USER_ADDRESS VM_MAX_USER_ADDRESS_SV39 #define VM_MINUSER_ADDRESS (VM_MIN_USER_ADDRESS) #define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS) @@ -191,8 +193,10 @@ (((va) < VM_MAX_USER_ADDRESS) || ((va) >= VM_MIN_KERNEL_ADDRESS)) #define KERNBASE (VM_MIN_KERNEL_ADDRESS) -#define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE) -#define USRSTACK SHAREDPAGE +#define SHAREDPAGE_SV39 (VM_MAX_USER_ADDRESS_SV39 - PAGE_SIZE) +#define SHAREDPAGE SHAREDPAGE_SV39 +#define USRSTACK SHAREDPAGE_SV39 +#define PS_STRINGS_SV39 (USRSTACK_SV39 - sizeof(struct ps_strings)) #define VM_EARLY_DTB_ADDRESS (VM_MAX_KERNEL_ADDRESS - (2 * L2_SIZE)) diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -160,7 +160,19 @@ #include #include -#define NUL1E (Ln_ENTRIES * Ln_ENTRIES) +/* + * Boundary values for the page table page index space: + * + * L3 pages: [0, NUL2E) + * L2 pages: [NUL2E, NUL2E + NUL1E) + * L1 pages: [NUL2E + NUL1E, NUL2E + NUL1E + NUL0E) + * + * Note that these ranges are used in both SV39 and SV48 mode. In SV39 mode the + * ranges are not fully populated since there are at most Ln_ENTRIES^2 L3 pages + * in a set of page tables. + */ +#define NUL0E Ln_ENTRIES +#define NUL1E (Ln_ENTRIES * NUL0E) #define NUL2E (Ln_ENTRIES * NUL1E) #if !defined(DIAGNOSTIC) @@ -179,6 +191,7 @@ #define PV_STAT(x) do { } while (0) #endif +#define pmap_l1_pindex(v) (NUL2E + ((v) >> L1_SHIFT)) #define pmap_l2_pindex(v) ((v) >> L2_SHIFT) #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) @@ -219,6 +232,8 @@ LIST_HEAD(pmaplist, pmap); static struct pmaplist allpmaps = LIST_HEAD_INITIALIZER(); +enum pmap_mode __read_frequently pmap_mode = PMAP_MODE_SV39; + struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ @@ -338,6 +353,7 @@ bzero(p, PAGE_SIZE); } +#define pmap_l0_index(va) (((va) >> L0_SHIFT) & Ln_ADDR_MASK) #define pmap_l1_index(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK) #define pmap_l2_index(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK) #define pmap_l3_index(va) (((va) >> L3_SHIFT) & Ln_ADDR_MASK)