Page MenuHomeFreeBSD

D3885.id9373.diff
No OneTemporary

D3885.id9373.diff

Index: sys/arm64/arm64/locore.S
===================================================================
--- sys/arm64/arm64/locore.S
+++ sys/arm64/arm64/locore.S
@@ -100,6 +100,16 @@
br x15
virtdone:
+ /*
+ * Now that we are in virtual address space,
+ * we don't need the identity mapping in TTBR0 and
+ * can set the TCR to a more useful value.
+ */
+ ldr x2, tcr1
+ mrs x3, id_aa64mmfr0_el1
+ bfi x2, x3, #32, #3
+ msr tcr_el1, x2
+
/* Set up the stack */
adr x25, initstack_end
mov sp, x25
@@ -167,7 +177,7 @@
/* Load the kernel page table */
adr x26, pagetable_l1_ttbr1
/* Load the identity page table */
- adr x27, pagetable_l1_ttbr0
+ adr x27, pagetable_l0_ttbr0
/* Enable the mmu */
bl start_mmu
@@ -177,6 +187,16 @@
br x15
mp_virtdone:
+ /*
+ * Now that we are in virtual address space,
+ * we don't need the identity mapping in TTBR0 and
+ * can set the TCR to a more useful value.
+ */
+ ldr x2, tcr1
+ mrs x3, id_aa64mmfr0_el1
+ bfi x2, x3, #32, #3
+ msr tcr_el1, x2
+
ldr x4, =secondary_stacks
mov x5, #(PAGE_SIZE * KSTACK_PAGES)
mul x5, x0, x5
@@ -357,7 +377,7 @@
mov x7, #NORMAL_MEM
mov x8, #(KERNBASE & L2_BLOCK_MASK)
mov x9, x28
- bl build_block_pagetable
+ bl build_l2_block_pagetable
/* Move to the l1 table */
add x26, x26, #PAGE_SIZE
@@ -373,21 +393,49 @@
*/
add x27, x26, #PAGE_SIZE
-#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
- /* Create a table for the UART */
- mov x6, x27 /* The initial page table */
+ /* Link the l0 -> l1 table */
+ add x6, x26, #(3 * PAGE_SIZE)
+ mov x8, #0
+ lsl x8, x8, #L0_SHIFT
+ mov x9, x27
+ bl link_l0_pagetable
+
+ /* Create the VA = PA map */
+ mov x6, x27
mov x7, #DEVICE_MEM
- mov x8, #(SOCDEV_VA) /* VA start */
- mov x9, #(SOCDEV_PA) /* PA start */
- bl build_section_pagetable
-#endif
+ mov x9, #0
+ lsl x9, x9, #L0_SHIFT
+ mov x8, #0 /* all pages */
+ mov x10, #512
+ bl build_l1_block_pagetable
+
+ /* Move to second l0 mapping */
+ add x27, x27, #PAGE_SIZE
+
+ /* Do we need a second l0 mapping? */
+ adr x8, pagetable
+ lsr x8, x8, #L0_SHIFT
+ and x8, x8, #0x1FF
+ cbz x8, 1f
+
+ /* Link the l0 -> l1 table */
+ add x6, x26, #(3 * PAGE_SIZE)
+ lsl x8, x8, #L0_SHIFT
+ mov x9, x27
+ bl link_l0_pagetable
/* Create the VA = PA map */
- mov x6, x27 /* The initial page table */
+ mov x6, x27
mov x7, #NORMAL_UNCACHED /* Uncached as it's only needed early on */
- mov x9, x27
- mov x8, x9 /* VA start (== PA start) */
- bl build_section_pagetable
+ mov x9, #1
+ lsl x9, x9, #L0_SHIFT
+ mov x8, #0 /* all pages */
+ mov x10, #512
+ bl build_l1_block_pagetable
+
+1:
+ /* Move to L0 for TTBR0 */
+ add x27, x27, #PAGE_SIZE
/* Restore the Link register */
mov x30, x5
@@ -424,10 +472,41 @@
ret
/*
+ * Builds an L0 -> L1 table descriptor
+ *
+ * This is a link for a 512GiB block of memory with up to 1GiB regions mapped
+ * within it by build_l1_block_pagetable.
+ *
+ * x6 = L0 table
+ * x8 = Virtual Address
+ * x9 = L1 PA (trashed)
+ * x11, x12 and x13 are trashed
+ */
+link_l0_pagetable:
+ /*
+ * Link an L0 -> L1 table entry.
+ */
+ /* Find the table index */
+ lsr x11, x8, #L0_SHIFT
+ and x11, x11, #Ln_ADDR_MASK
+
+ /* Build the L0 block entry */
+ mov x12, #L0_TABLE
+
+ /* Only use the output address bits */
+ lsr x9, x9, #12
+ orr x12, x12, x9, lsl #12
+
+ /* Store the entry */
+ str x12, [x6, x11, lsl #3]
+
+ ret
+
+/*
* Builds an L1 -> L2 table descriptor
*
* This is a link for a 1GiB block of memory with up to 2MiB regions mapped
- * within it by build_block_pagetable.
+ * within it by build_l2_block_pagetable.
*
* x6 = L1 table
* x8 = Virtual Address
@@ -455,6 +534,47 @@
ret
/*
+ * Builds count 1 GiB page table entry
+ * x6 = L1 table
+ * x7 = Type (0 = Device, 1 = Normal)
+ * x8 = VA start
+ * x9 = PA start (trashed)
+ * x10 = Entry count (TODO)
+ * x11, x12 and x13 are trashed
+ */
+build_l1_block_pagetable:
+ /*
+ * Build the L1 table entry.
+ */
+ /* Find the table index */
+ lsr x11, x8, #L1_SHIFT
+ and x11, x11, #Ln_ADDR_MASK
+
+ /* Build the L1 block entry */
+ lsl x12, x7, #2
+ orr x12, x12, #L1_BLOCK
+ orr x12, x12, #(ATTR_AF)
+
+ /* Only use the output address bits */
+ lsr x9, x9, #L1_SHIFT
+
+ /* Set the physical address for this virtual address */
+1: orr x12, x12, x9, lsl #L1_SHIFT
+
+ /* Store the entry */
+ str x12, [x6, x11, lsl #3]
+
+ /* Clear the address bits */
+ and x12, x12, #ATTR_MASK_L
+
+ sub x10, x10, #1
+ add x11, x11, #1
+ add x9, x9, #1
+ cbnz x10, 1b
+
+2: ret
+
+/*
* Builds count 2 MiB page table entry
* x6 = L2 table
* x7 = Type (0 = Device, 1 = Normal)
@@ -463,7 +583,7 @@
* x10 = Entry count (TODO)
* x11, x12 and x13 are trashed
*/
-build_block_pagetable:
+build_l2_block_pagetable:
/*
* Build the L2 table entry.
*/
@@ -519,8 +639,13 @@
ldr x2, mair
msr mair_el1, x2
- /* Setup TCR according to PARange bits from ID_AA64MMFR0_EL1 */
- ldr x2, tcr
+ /*
+ * Setup TCR according to PARange bits from ID_AA64MMFR0_EL1.
+ * Some machines have their memory mapped >512GiB, which can not
+ * be identity-mapped using the default 39 VA bits. Thus, use
+ * 48 VA bits for now and switch back to 39 after the VA jump.
+ */
+ ldr x2, tcr0
mrs x3, id_aa64mmfr0_el1
bfi x2, x3, #32, #3
msr tcr_el1, x2
@@ -540,9 +665,12 @@
mair:
/* Device Normal, no cache Normal, write-back */
.quad MAIR_ATTR(0x00, 0) | MAIR_ATTR(0x44, 1) | MAIR_ATTR(0xff, 2)
-tcr:
- .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_ASID_16 | TCR_TG1_4K | \
- TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
+tcr0:
+ .quad (TCR_T1SZ(64 - VIRT_BITS) | TCR_T0SZ(64 - 48) | \
+ TCR_ASID_16 | TCR_TG1_4K | TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
+tcr1:
+ .quad (TCR_T1SZ(64 - VIRT_BITS) | TCR_T0SZ(64 - VIRT_BITS) | \
+ TCR_ASID_16 | TCR_TG1_4K | TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
sctlr_set:
/* Bits to set */
.quad (SCTLR_UCI | SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
@@ -568,7 +696,11 @@
.space PAGE_SIZE
pagetable_l1_ttbr1:
.space PAGE_SIZE
-pagetable_l1_ttbr0:
+pagetable_l1_ttbr0_0:
+ .space PAGE_SIZE
+pagetable_l1_ttbr0_1:
+ .space PAGE_SIZE
+pagetable_l0_ttbr0:
.space PAGE_SIZE
pagetable_end:
Index: sys/arm64/arm64/pmap.c
===================================================================
--- sys/arm64/arm64/pmap.c
+++ sys/arm64/arm64/pmap.c
@@ -445,19 +445,22 @@
return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET));
}
+vm_paddr_t dmap_phys_base = 0;
+
static void
-pmap_bootstrap_dmap(vm_offset_t l1pt)
+pmap_bootstrap_dmap(vm_offset_t l1pt, vm_paddr_t kernstart)
{
vm_offset_t va;
vm_paddr_t pa;
pd_entry_t *l1;
u_int l1_slot;
+ pa = dmap_phys_base = kernstart & ~0xfffffffff;
va = DMAP_MIN_ADDRESS;
l1 = (pd_entry_t *)l1pt;
l1_slot = pmap_l1_index(DMAP_MIN_ADDRESS);
- for (pa = 0; va < DMAP_MAX_ADDRESS;
+ for (; va < DMAP_MAX_ADDRESS;
pa += L1_SIZE, va += L1_SIZE, l1_slot++) {
KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
@@ -567,7 +570,7 @@
rw_init(&pvh_global_lock, "pmap pv global");
/* Create a direct map region early so we can use it for pa -> va */
- pmap_bootstrap_dmap(l1pt);
+ pmap_bootstrap_dmap(l1pt, kernstart);
va = KERNBASE;
pa = KERNBASE - kern_delta;
Index: sys/arm64/include/armreg.h
===================================================================
--- sys/arm64/include/armreg.h
+++ sys/arm64/include/armreg.h
@@ -231,7 +231,8 @@
#define TCR_T1SZ_SHIFT 16
#define TCR_T0SZ_SHIFT 0
-#define TCR_TxSZ(x) (((x) << TCR_T1SZ_SHIFT) | ((x) << TCR_T0SZ_SHIFT))
+#define TCR_T0SZ(x) ((x) << TCR_T0SZ_SHIFT)
+#define TCR_T1SZ(x) ((x) << TCR_T1SZ_SHIFT)
/* Saved Program Status Register */
#define DBG_SPSR_SS (0x1 << 21)
Index: sys/arm64/include/vmparam.h
===================================================================
--- sys/arm64/include/vmparam.h
+++ sys/arm64/include/vmparam.h
@@ -160,8 +160,9 @@
#define DMAP_MIN_ADDRESS (0xffffffc000000000UL)
#define DMAP_MAX_ADDRESS (0xffffffdfffffffffUL)
-#define DMAP_MIN_PHYSADDR (0x0000000000000000UL)
-#define DMAP_MAX_PHYSADDR (DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS)
+extern vm_paddr_t dmap_phys_base;
+#define DMAP_MIN_PHYSADDR (dmap_phys_base)
+#define DMAP_MAX_PHYSADDR (dmap_phys_base + (DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS))
/* True if pa is in the dmap range */
#define PHYS_IN_DMAP(pa) ((pa) <= DMAP_MAX_PHYSADDR)
@@ -174,7 +175,7 @@
KASSERT(PHYS_IN_DMAP(pa), \
("%s: PA out of range, PA: 0x%lx", __func__, \
(vm_paddr_t)(pa))); \
- (pa) | DMAP_MIN_ADDRESS; \
+ (pa - dmap_phys_base) | DMAP_MIN_ADDRESS; \
})
#define DMAP_TO_PHYS(va) \
@@ -182,7 +183,7 @@
KASSERT(VIRT_IN_DMAP(va), \
("%s: VA out of range, VA: 0x%lx", __func__, \
(vm_offset_t)(va))); \
- (va) & ~DMAP_MIN_ADDRESS; \
+ ((va) & ~DMAP_MIN_ADDRESS) + dmap_phys_base; \
})
#define VM_MIN_USER_ADDRESS (0x0000000000000000UL)

File Metadata

Mime Type
text/plain
Expires
Sun, Feb 8, 5:35 PM (5 h, 17 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28479381
Default Alt Text
D3885.id9373.diff (8 KB)

Event Timeline