diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S --- a/sys/arm64/arm64/locore.S +++ b/sys/arm64/arm64/locore.S @@ -43,6 +43,12 @@ * space, the same as a single level 2 page with 4k pages. */ #define L3_PAGE_COUNT 32 +#elif PAGE_SIZE == PAGE_SIZE_4K +/* + * Space for a level 3 table holding the end of the executable memory and + * the start of the non-executable data. + */ +#define L3_PAGE_COUNT 1 #endif /* @@ -483,22 +489,64 @@ /* Get the number of blocks/pages to allocate, rounded down */ lsr x14, x8, #(PTE_SHIFT) - ldr x25, =etext + ldr x26, =etext +#if PAGE_SIZE != PAGE_SIZE_4K ldr x8, =((1 << PTE_SHIFT) - 1) - add x25, x25, x8 + add x26, x26, x8 +#endif mov x8, #(KERNBASE) - sub x25, x25, x8 + sub x25, x26, x8 lsr x25, x25, #(PTE_SHIFT) +#if PAGE_SIZE == PAGE_SIZE_4K + /* Calculate the number of executable level 3 pages to create */ + lsr x26, x26, #(L3_SHIFT) + bfc x26, #(Ln_ENTRIES_SHIFT), #(64 - Ln_ENTRIES_SHIFT) + + /* Build the L3 table holding the end of the exectuable code */ + lsl x15, x25, #(PTE_SHIFT) + adrp x6, pagetable_l3_ttbr1 + add x6, x6, :lo12:pagetable_l3_ttbr1 + ldr x7, =(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | \ + ATTR_S1_AP(ATTR_S1_AP_RO)) + ldr x8, =(KERNBASE) + add x8, x8, x15 + add x9, x28, x15 + mov x10, x26 + bl build_l3_page_pagetable + + /* Build the remaining level 3 pages */ + ldr x7, =(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_XN) + lsl x27, x26, #(L3_SHIFT) + add x8, x8, x27 + add x9, x28, x15 + add x9, x9, x27 + ldr x10, =(Ln_ENTRIES) + sub x10, x10, x26 + bl build_l3_page_pagetable + + /* Link the l2 -> l3 table */ + mov x9, x6 + adrp x6, pagetable_l2_ttbr1 + add x6, x6, :lo12:pagetable_l2_ttbr1 + bl link_l2_pagetable +#endif + /* Create the kernel space PTE table */ adrp x6, LL_PAGE_TABLE add x6, x6, :lo12:LL_PAGE_TABLE - mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) + ldr x7, =(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | \ + ATTR_S1_AP(ATTR_S1_AP_RO)) mov x8, #(KERNBASE) mov x9, x28 mov x10, x25 bl BUILD_PTE_FUNC +#if PAGE_SIZE == PAGE_SIZE_4K + /* Skip memory mapped through the L2 table */ + add x25, x25, #1 +#endif + /* Create the kernel space XN PTE table */ lsl x10, x25, #(PTE_SHIFT) ldr x7, =(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_XN) @@ -718,7 +766,6 @@ ret LEND(build_l2_block_pagetable) -#if PAGE_SIZE != PAGE_SIZE_4K /* * Builds an L2 -> L3 table descriptor * @@ -758,6 +805,7 @@ * x11, x12 and x13 are trashed */ LENTRY(build_l3_page_pagetable) + cbz x10, 2f /* * Build the L3 table entry. */ @@ -786,10 +834,10 @@ add x11, x11, #1 add x9, x9, #1 cbnz x10, 1b +2: ret LEND(build_l3_page_pagetable) -#endif LENTRY(start_mmu) dsb sy @@ -917,10 +965,8 @@ */ .globl pagetable_l0_ttbr1 pagetable: -#if PAGE_SIZE != PAGE_SIZE_4K pagetable_l3_ttbr1: .space (PAGE_SIZE * L3_PAGE_COUNT) -#endif pagetable_l2_ttbr1: .space PAGE_SIZE pagetable_l1_ttbr1: diff --git a/sys/conf/ldscript.arm64 b/sys/conf/ldscript.arm64 --- a/sys/conf/ldscript.arm64 +++ b/sys/conf/ldscript.arm64 @@ -15,6 +15,12 @@ *(.gnu.warning) *(.gnu.linkonce.t*) } =0x9090 + /* + * Align to the the largest page size the kernel could be built for. + * If we don't then building page tables in locore.S could fail as it + * assumes the .text section is on a different page to later sections + */ + . = ALIGN(16 * 1024); _etext = .; PROVIDE (etext = .);