Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F101853140
D21999.id63888.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
14 KB
Referenced Files
None
Subscribers
None
D21999.id63888.diff
View Options
Index: head/sys/conf/Makefile.powerpc
===================================================================
--- head/sys/conf/Makefile.powerpc
+++ head/sys/conf/Makefile.powerpc
@@ -42,6 +42,20 @@
CFLAGS+= -msoft-float
CFLAGS.gcc+= -Wa,-many
+# Apply compiler-specific DPAA exceptions.
+.if "${COMPILER_TYPE}" == "clang"
+DPAAWARNFLAGS += \
+ -Wno-error=parentheses-equality \
+ -Wno-error=self-assign \
+ -Wno-error=incompatible-pointer-types-discards-qualifiers \
+ -Wno-error=non-literal-null-conversion \
+ -Wno-error=enum-conversion
+.elif "${COMPILER_TYPE}" == "gcc" && ${COMPILER_VERSION} >= 50200
+DPAAWARNFLAGS += \
+ -Wno-error=redundant-decls \
+ -Wno-error=int-in-bool-context
+.endif
+
# Build position-independent kernel
CFLAGS+= -fPIC
LDFLAGS+= -pie
Index: head/sys/powerpc/booke/locore.S
===================================================================
--- head/sys/powerpc/booke/locore.S
+++ head/sys/powerpc/booke/locore.S
@@ -39,6 +39,10 @@
#include <machine/vmparam.h>
#include <machine/tlb.h>
+#ifdef _CALL_ELF
+.abiversion _CALL_ELF
+#endif
+
#define TMPSTACKSZ 16384
#ifdef __powerpc64__
@@ -76,6 +80,12 @@
#define WORD_SIZE 4
#endif
+#ifdef __powerpc64__
+ /* Placate lld by creating a kboot stub. */
+ .section ".text.kboot", "x", @progbits
+ b __start
+#endif
+
.text
.globl btext
btext:
@@ -309,6 +319,9 @@
1: mflr %r3
ld %r1,0(%r3)
add %r1,%r1,%r3
+/*
+ * Relocate kernel
+ */
bl 1f
.llong _DYNAMIC-.
1: mflr %r3
@@ -379,16 +392,63 @@
.globl __boot_page
.align 12
__boot_page:
- bl 1f
+ /*
+ * The boot page is a special page of memory used during AP bringup.
+ * Before the AP comes out of reset, the physical 4K page holding this
+ * code is arranged to be mapped at 0xfffff000 by use of
+ * platform-dependent registers.
+ *
+ * Alternatively, this page may be executed using an ePAPR-standardized
+ * method -- writing to the address specified in "cpu-release-addr".
+ *
+ * In either case, execution begins at the last instruction of the
+ * page, which is a branch back to the start of the page.
+ *
+ * The code in the page must do initial MMU setup and normalize the
+ * TLBs for regular operation in the correct address space before
+ * reading outside the page.
+ *
+ * This implementation accomplishes this by:
+ * 1) Wiping TLB0 and all TLB1 entries but the one currently in use.
+ * 2) Establishing a temporary 4K TLB1 mapping in AS=1, and switching
+ * to it with rfi. This entry must NOT be in TLB1 slot 0.
+ * (This is needed to give the code freedom to clean up AS=0.)
+ * 3) Removing the initial TLB1 entry, leaving us with a single valid
+ * TLB1 entry, NOT in slot 0.
+ * 4) Installing an AS0 entry in TLB1 slot 0 mapping the 64MB kernel
+ * segment at its final virtual address. A second rfi is done to
+ * switch to the final address space. At this point we can finally
+ * access the rest of the kernel segment safely.
+ * 5) The temporary TLB1 AS=1 entry is removed, finally leaving us in
+ * a consistent (but minimal) state.
+ * 6) Set up TOC, stack, and pcpu registers.
+ * 7) Now that we can finally call C code, call pmap_boostrap_ap(),
+ * which finishes copying in the shared TLB1 entries.
+ *
+ * At this point, the MMU is fully set up, and we can proceed with
+ * running the actual AP bootstrap code.
+ *
+ * Pieces of this code are also used for UP kernel, but in this case
+ * the sections specific to boot page functionality are dropped by
+ * the preprocessor.
+ */
+#ifdef __powerpc64__
+ nop /* PPC64 alignment word. 64-bit target. */
+#endif
+ bl 1f /* 32-bit target. */
.globl bp_trace
bp_trace:
- .long 0
+ ADDR(0) /* Trace pointer (%r31). */
.globl bp_kernload
bp_kernload:
- .long 0
+ ADDR(0) /* Kern phys. load address. */
+ .globl bp_virtaddr
+bp_virtaddr:
+ ADDR(0) /* Virt. address of __boot_page. */
+
/*
* Initial configuration
*/
@@ -444,7 +504,7 @@
mfmsr %r3
ori %r3, %r3, (PSL_IS | PSL_DS)
#ifdef __powerpc64__
- oris %r3, %r3, PSL_CM@h
+ oris %r3, %r3, PSL_CM@h /* Ensure we're in 64-bit after RFI */
#endif
bl 3f
3: mflr %r4
@@ -461,7 +521,7 @@
bl tlb1_inval_entry
/*
- * Setup final mapping in TLB1[1] and switch to it
+ * Setup final mapping in TLB1[0] and switch to it
*/
/* Final kernel mapping, map in 64 MB of RAM */
lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
@@ -481,31 +541,19 @@
isync
/* Retrieve kernel load [physical] address from bp_kernload */
+5:
+ mflr %r3
#ifdef __powerpc64__
- b 0f
- .align 3
-0:
- nop
-#endif
- bl 5f
- ADDR(bp_kernload)
- ADDR(__boot_page)
-5: mflr %r3
-#ifdef __powerpc64__
- ld %r4, 0(%r3)
- ld %r5, 8(%r3)
- clrrdi %r3, %r3, 12
+ clrrdi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */
#else
- lwz %r4, 0(%r3)
- lwz %r5, 4(%r3)
- rlwinm %r3, %r3, 0, 0, 19
+ clrrwi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */
#endif
- sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */
- lwzx %r3, %r4, %r3
+ LOAD %r4, (bp_kernload - __boot_page)(%r3)
+ LOAD %r5, (bp_virtaddr - __boot_page)(%r3)
/* Set RPN and protection */
- ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
- mtspr SPR_MAS3, %r3
+ ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
+ mtspr SPR_MAS3, %r4
isync
li %r4, 0
mtspr SPR_MAS7, %r4
@@ -518,8 +566,8 @@
bl 6f
6: mflr %r3
rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
- add %r3, %r3, %r5 /* Make this virtual address */
- addi %r3, %r3, (7f - 6b)
+ add %r3, %r3, %r5 /* Make this a virtual address */
+ addi %r3, %r3, (7f - 6b) /* And figure out return address. */
#ifdef __powerpc64__
lis %r4, PSL_CM@h /* Note AS=0 */
#else
@@ -758,6 +806,11 @@
*/
.space 4092 - (__boot_page_padding - __boot_page)
b __boot_page
+ /*
+ * This is the end of the boot page.
+ * During AP startup, the previous instruction is at 0xfffffffc
+ * virtual (i.e. the reset vector.)
+ */
#endif /* SMP */
/************************************************************************/
@@ -881,6 +934,7 @@
* created.
*/
ENTRY(get_spr)
+ /* Note: The spr number is patched at runtime */
mfspr %r3, 0
blr
@@ -900,7 +954,9 @@
.space 10240 /* XXX: this really should not be necessary */
#ifdef __powerpc64__
TOC_ENTRY(tmpstack)
+#ifdef SMP
TOC_ENTRY(bp_kernload)
+#endif
#endif
/*
Index: head/sys/powerpc/booke/pmap.c
===================================================================
--- head/sys/powerpc/booke/pmap.c
+++ head/sys/powerpc/booke/pmap.c
@@ -1674,7 +1674,7 @@
/* Calculate corresponding physical addresses for the kernel region. */
phys_kernelend = kernload + kernsize;
debugf("kernel image and allocated data:\n");
- debugf(" kernload = 0x%09llx\n", (uint64_t)kernload);
+ debugf(" kernload = 0x%09jx\n", (uintmax_t)kernload);
debugf(" kernstart = 0x%"PRI0ptrX"\n", kernstart);
debugf(" kernsize = 0x%"PRI0ptrX"\n", kernsize);
@@ -1859,9 +1859,9 @@
thread0.td_kstack = kstack0;
thread0.td_kstack_pages = kstack_pages;
- debugf("kstack_sz = 0x%08x\n", kstack0_sz);
- debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
- kstack0_phys, kstack0_phys + kstack0_sz);
+ debugf("kstack_sz = 0x%08jx\n", (uintmax_t)kstack0_sz);
+ debugf("kstack0_phys at 0x%09jx - 0x%09jx\n",
+ (uintmax_t)kstack0_phys, (uintmax_t)kstack0_phys + kstack0_sz);
debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
kstack0, kstack0 + kstack0_sz);
@@ -4003,7 +4003,7 @@
for (idx = 0; idx < nents; idx++) {
pgsz = pgs[idx];
- debugf("%u: %llx -> %jx, size=%jx\n", idx, pa,
+ debugf("%u: %jx -> %jx, size=%jx\n", idx, (uintmax_t)pa,
(uintmax_t)va, (uintmax_t)pgsz);
tlb1_set_entry(va, pa, pgsz,
_TLB_ENTRY_SHARED | _TLB_ENTRY_MEM);
Index: head/sys/powerpc/booke/trap_subr.S
===================================================================
--- head/sys/powerpc/booke/trap_subr.S
+++ head/sys/powerpc/booke/trap_subr.S
@@ -120,8 +120,8 @@
GET_CPUINFO(%r1); /* Per-cpu structure */ \
STORE %r30, (savearea+CPUSAVE_R30)(%r1); \
STORE %r31, (savearea+CPUSAVE_R31)(%r1); \
- mfdear %r30; \
- mfesr %r31; \
+ mfspr %r30, SPR_DEAR; \
+ mfspr %r31, SPR_ESR; \
STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \
STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \
mfspr %r30, isrr0; \
@@ -143,8 +143,8 @@
GET_CPUINFO(%r1); /* Per-cpu structure */ \
STORE %r30, (savearea+CPUSAVE_R30)(%r1); \
STORE %r31, (savearea+CPUSAVE_R31)(%r1); \
- mfdear %r30; \
- mfesr %r31; \
+ mfspr %r30, SPR_DEAR; \
+ mfspr %r31, SPR_ESR; \
STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \
STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \
mfspr %r30, isrr0; \
@@ -375,9 +375,9 @@
rlwinm outr, inr, 6, 23, 25; /* 4 x TLBSAVE_LEN */
#endif
#define TLB_PROLOG \
- mtsprg4 %r1; /* Save SP */ \
- mtsprg5 %r28; \
- mtsprg6 %r29; \
+ mtspr SPR_SPRG4, %r1; /* Save SP */ \
+ mtspr SPR_SPRG5, %r28; \
+ mtspr SPR_SPRG6, %r29; \
/* calculate TLB nesting level and TLBSAVE instance address */ \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \
@@ -388,8 +388,8 @@
add %r1, %r1, %r29; /* current TLBSAVE ptr */ \
\
/* save R20-31 */ \
- mfsprg5 %r28; \
- mfsprg6 %r29; \
+ mfspr %r28, SPR_SPRG5; \
+ mfspr %r29, SPR_SPRG6; \
TLB_SAVE_REGS(%r1); \
/* save LR, CR */ \
mflr %r30; \
@@ -402,7 +402,7 @@
STORE %r30, (TLBSAVE_BOOKE_SRR0)(%r1); /* save SRR0 */ \
STORE %r31, (TLBSAVE_BOOKE_SRR1)(%r1); /* save SRR1 */ \
isync; \
- mfsprg4 %r1
+ mfspr %r1, SPR_SPRG4
/*
* restores LR, CR, SRR0-1, R20-31 from the TLBSAVE area
@@ -410,7 +410,7 @@
* same notes as for the TLB_PROLOG
*/
#define TLB_RESTORE \
- mtsprg4 %r1; /* Save SP */ \
+ mtspr SPR_SPRG4, %r1; /* Save SP */ \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
/* calculate TLB nesting level and TLBSAVE instance addr */ \
LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \
@@ -432,7 +432,7 @@
mtsrr1 %r31; \
/* restore R20-31 */ \
TLB_RESTORE_REGS(%r1); \
- mfsprg4 %r1
+ mfspr %r1, SPR_SPRG4
#ifdef SMP
#define TLB_LOCK \
@@ -693,7 +693,7 @@
TLB_PROLOG
TLB_LOCK
- mfdear %r31
+ mfspr %r31, SPR_DEAR
/*
* Save MAS0-MAS2 registers. There might be another tlb miss during
@@ -1052,8 +1052,8 @@
mflr %r31
mtsrr0 %r31
- mfdear %r30
- mfesr %r31
+ mfspr %r30, SPR_DEAR
+ mfspr %r31, SPR_ESR
STORE %r30, (PC_DBSAVE+CPUSAVE_BOOKE_DEAR)(%r3)
STORE %r31, (PC_DBSAVE+CPUSAVE_BOOKE_ESR)(%r3)
Index: head/sys/powerpc/conf/dpaa/config.dpaa
===================================================================
--- head/sys/powerpc/conf/dpaa/config.dpaa
+++ head/sys/powerpc/conf/dpaa/config.dpaa
@@ -2,7 +2,7 @@
files "dpaa/files.dpaa"
-makeoptions DPAA_COMPILE_CMD="${LINUXKPI_C} \
+makeoptions DPAA_COMPILE_CMD="${LINUXKPI_C} ${DPAAWARNFLAGS} \
-Wno-cast-qual -Wno-unused-function -Wno-init-self -fms-extensions \
-include $S/contrib/ncsw/build/dflags.h \
-Wno-error=missing-prototypes \
Index: head/sys/powerpc/include/spr.h
===================================================================
--- head/sys/powerpc/include/spr.h
+++ head/sys/powerpc/include/spr.h
@@ -504,7 +504,11 @@
#define SPR_HASH2 0x3d3 /* .68 Secondary Hash Address Register */
#define SPR_IMISS 0x3d4 /* .68 Instruction TLB Miss Address Register */
#define SPR_TLBMISS 0x3d4 /* .6. TLB Miss Address Register */
+#if defined(BOOKE_PPC4XX)
#define SPR_DEAR 0x3d5 /* 4.. Data Error Address Register */
+#else
+#define SPR_DEAR 0x03d /* ..8 Data Exception Address Register */
+#endif
#define SPR_ICMP 0x3d5 /* .68 Instruction TLB Compare Register */
#define SPR_PTEHI 0x3d5 /* .6. Instruction TLB Compare Register */
#define SPR_EVPR 0x3d6 /* 4.. Exception Vector Prefix Register */
Index: head/sys/powerpc/mpc85xx/platform_mpc85xx.c
===================================================================
--- head/sys/powerpc/mpc85xx/platform_mpc85xx.c
+++ head/sys/powerpc/mpc85xx/platform_mpc85xx.c
@@ -68,7 +68,8 @@
extern void *ap_pcpu;
extern vm_paddr_t kernload; /* Kernel physical load address */
extern uint8_t __boot_page[]; /* Boot page body */
-extern uint32_t bp_kernload;
+extern vm_paddr_t bp_kernload; /* Boot page copy of kernload */
+extern vm_offset_t bp_virtaddr; /* Virtual address of boot page */
extern vm_offset_t __startkernel;
struct cpu_release {
@@ -354,11 +355,13 @@
pmap_kenter(rel_page, rel_pa & ~PAGE_MASK);
rel = (struct cpu_release *)rel_va;
bptr = pmap_kextract((uintptr_t)__boot_page);
+
cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
- rel->pir = pc->pc_cpuid; __asm __volatile("sync");
- rel->entry_h = (bptr >> 32);
- rel->entry_l = bptr; __asm __volatile("sync");
+ rel->pir = pc->pc_cpuid; __asm __volatile("sync" ::: "memory");
+ rel->entry_h = (bptr >> 32); __asm __volatile("sync" ::: "memory");
cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
+ rel->entry_l = bptr & 0xffffffff; __asm __volatile("sync" ::: "memory");
+ cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
if (bootverbose)
printf("Waking up CPU %d via CPU release page %p\n",
pc->pc_cpuid, rel);
@@ -397,11 +400,13 @@
cpuid = pc->pc_cpuid + 24;
}
bp_kernload = kernload;
+ bp_virtaddr = (vm_offset_t)&__boot_page;
/*
- * bp_kernload is in the boot page. Sync the cache because ePAPR
- * booting has the other core(s) already running.
+ * bp_kernload and bp_virtaddr are in the boot page. Sync the cache
+ * because ePAPR booting has the other core(s) already running.
*/
cpu_flush_dcache(&bp_kernload, sizeof(bp_kernload));
+ cpu_flush_dcache(&bp_virtaddr, sizeof(bp_virtaddr));
ap_pcpu = pc;
__asm __volatile("msync; isync");
Index: head/sys/powerpc/powerpc/pmap_dispatch.c
===================================================================
--- head/sys/powerpc/powerpc/pmap_dispatch.c
+++ head/sys/powerpc/powerpc/pmap_dispatch.c
@@ -145,7 +145,7 @@
u_int flags, int8_t psind)
{
- CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va,
+ CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %#x, %d)", pmap, va,
p, prot, flags, psind);
return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
}
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Tue, Nov 5, 5:23 PM (11 h, 43 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
14470679
Default Alt Text
D21999.id63888.diff (14 KB)
Attached To
Mode
D21999: [PowerPC] Book-E clang support
Attached
Detach File
Event Timeline
Log In to Comment