Index: head/sys/powerpc/booke/locore.S =================================================================== --- head/sys/powerpc/booke/locore.S (revision 333134) +++ head/sys/powerpc/booke/locore.S (revision 333135) @@ -1,1004 +1,942 @@ /*- * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "assym.inc" #include "opt_hwpmc_hooks.h" #include #include #include #include #include #include #include #include #define TMPSTACKSZ 16384 #ifdef __powerpc64__ #define GET_TOCBASE(r) \ mfspr r, SPR_SPRG8 #define TOC_RESTORE nop #define CMPI cmpdi #define CMPL cmpld #define LOAD ld #define LOADX ldarx #define STORE std #define STOREX stdcx. #define STU stdu #define CALLSIZE 48 #define REDZONE 288 #define THREAD_REG %r13 #define ADDR(x) \ .llong x #define WORD_SIZE 8 #else #define GET_TOCBASE(r) #define TOC_RESTORE #define CMPI cmpwi #define CMPL cmplw #define LOAD lwz #define LOADX lwarx #define STOREX stwcx. #define STORE stw #define STU stwu #define CALLSIZE 8 #define REDZONE 0 #define THREAD_REG %r2 #define ADDR(x) \ .long x #define WORD_SIZE 4 #endif .text .globl btext btext: /* * This symbol is here for the benefit of kvm_mkdb, and is supposed to * mark the start of kernel text. */ .globl kernel_text kernel_text: /* * Startup entry. Note, this must be the first thing in the text segment! */ .text .globl __start __start: /* * Assumptions on the boot loader: * - System memory starts from physical address 0 * - It's mapped by a single TLB1 entry * - TLB1 mapping is 1:1 pa to va * - Kernel is loaded at 64MB boundary * - All PID registers are set to the same value * - CPU is running in AS=0 * * Registers contents provided by the loader(8): * r1 : stack pointer * r3 : metadata pointer * * We rearrange the TLB1 layout as follows: * - Find TLB1 entry we started in * - Make sure it's protected, invalidate other entries * - Create temp entry in the second AS (make sure it's not TLB[1]) * - Switch to temp mapping * - Map 64MB of RAM in TLB1[1] * - Use AS=1, set EPN to KERNBASE and RPN to kernel load address * - Switch to TLB1[1] mapping * - Invalidate temp mapping * * locore registers use: * r1 : stack pointer * r2 : trace pointer (AP only, for early diagnostics) * r3-r27 : scratch registers * r28 : temp TLB1 entry * r29 : initial TLB1 entry we started in * r30-r31 : arguments (metadata pointer) */ /* * Keep arguments in r30 & r31 for later use. */ mr %r30, %r3 mr %r31, %r4 /* * Initial cleanup */ li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */ #ifdef __powerpc64__ oris %r3, %r3, PSL_CM@h #endif mtmsr %r3 isync /* * Initial HIDs configuration */ 1: mfpvr %r3 rlwinm %r3, %r3, 16, 16, 31 lis %r4, HID0_E500_DEFAULT_SET@h ori %r4, %r4, HID0_E500_DEFAULT_SET@l /* Check for e500mc and e5500 */ cmpli 0, 0, %r3, FSL_E500mc bne 2f lis %r4, HID0_E500MC_DEFAULT_SET@h ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l b 3f 2: cmpli 0, 0, %r3, FSL_E5500 bne 3f lis %r4, HID0_E5500_DEFAULT_SET@h ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 3: mtspr SPR_HID0, %r4 isync /* * E500mc and E5500 do not have HID1 register, so skip HID1 setup on * this core. */ cmpli 0, 0, %r3, FSL_E500mc beq 1f cmpli 0, 0, %r3, FSL_E5500 beq 1f cmpli 0, 0, %r3, FSL_E6500 beq 1f lis %r3, HID1_E500_DEFAULT_SET@h ori %r3, %r3, HID1_E500_DEFAULT_SET@l mtspr SPR_HID1, %r3 isync 1: /* Invalidate all entries in TLB0 */ li %r3, 0 bl tlb_inval_all cmpwi %r30, 0 beq done_mapping /* * Locate the TLB1 entry that maps this code */ bl 1f 1: mflr %r3 bl tlb1_find_current /* the entry found is returned in r29 */ bl tlb1_inval_all_but_current /* * Create temporary mapping in AS=1 and switch to it */ bl tlb1_temp_mapping_as1 mfmsr %r3 ori %r3, %r3, (PSL_IS | PSL_DS) bl 2f 2: mflr %r4 addi %r4, %r4, (3f - 2b) mtspr SPR_SRR0, %r4 mtspr SPR_SRR1, %r3 rfi /* Switch context */ /* * Invalidate initial entry */ 3: mr %r3, %r29 bl tlb1_inval_entry /* * Setup final mapping in TLB1[1] and switch to it */ /* Final kernel mapping, map in 64 MB of RAM */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ li %r4, 0 /* Entry 0 */ rlwimi %r3, %r4, 16, 10, 15 mtspr SPR_MAS0, %r3 isync li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ isync LOAD_ADDR(%r3, KERNBASE) ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ mtspr SPR_MAS2, %r3 isync /* Discover phys load address */ bl 3f 3: mflr %r4 /* Use current address */ rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */ ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l mtspr SPR_MAS3, %r4 /* Set RPN and protection */ isync bl zero_mas7 bl zero_mas8 tlbwe isync msync /* Switch to the above TLB1[1] mapping */ bl 4f 4: mflr %r4 #ifdef __powerpc64__ clrldi %r4, %r4, 38 clrrdi %r3, %r3, 12 #else rlwinm %r4, %r4, 0, 6, 31 /* Current offset from kernel load address */ rlwinm %r3, %r3, 0, 0, 19 #endif add %r4, %r4, %r3 /* Convert to kernel virtual address */ addi %r4, %r4, (5f - 4b) li %r3, PSL_DE /* Note AS=0 */ #ifdef __powerpc64__ oris %r3, %r3, PSL_CM@h #endif mtspr SPR_SRR0, %r4 mtspr SPR_SRR1, %r3 rfi /* * Invalidate temp mapping */ 5: mr %r3, %r28 bl tlb1_inval_entry done_mapping: #ifdef __powerpc64__ /* Set up the TOC pointer */ b 0f .align 3 0: nop bl 1f .llong __tocbase + 0x8000 - . 1: mflr %r2 ld %r1,0(%r2) add %r2,%r1,%r2 mtspr SPR_SPRG8, %r2 /* Get load offset */ ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */ subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */ /* Set up the stack pointer */ ld %r1,TOC_REF(tmpstack)(%r2) addi %r1,%r1,TMPSTACKSZ-96 add %r1,%r1,%r31 bl 1f .llong _DYNAMIC-. 1: mflr %r3 ld %r4,0(%r3) add %r3,%r4,%r3 mr %r4,%r31 #else /* * Setup a temporary stack */ bl 1f .long tmpstack-. 1: mflr %r1 lwz %r2,0(%r1) add %r1,%r1,%r2 addi %r1, %r1, (TMPSTACKSZ - 16) /* * Relocate kernel */ bl 1f .long _DYNAMIC-. .long _GLOBAL_OFFSET_TABLE_-. 1: mflr %r5 lwz %r3,0(%r5) /* _DYNAMIC in %r3 */ add %r3,%r3,%r5 lwz %r4,4(%r5) /* GOT pointer */ add %r4,%r4,%r5 lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */ subf %r4,%r4,%r3 /* subtract to calculate relocbase */ #endif bl CNAME(elf_reloc_self) TOC_RESTORE /* * Initialise exception vector offsets */ bl CNAME(ivor_setup) TOC_RESTORE /* * Set up arguments and jump to system initialization code */ mr %r3, %r30 mr %r4, %r31 /* Prepare core */ bl CNAME(booke_init) TOC_RESTORE /* Switch to thread0.td_kstack now */ mr %r1, %r3 li %r3, 0 STORE %r3, 0(%r1) /* Machine independet part, does not return */ bl CNAME(mi_startup) TOC_RESTORE /* NOT REACHED */ 5: b 5b #ifdef SMP /************************************************************************/ /* AP Boot page */ /************************************************************************/ .text .globl __boot_page .align 12 __boot_page: bl 1f .globl bp_trace bp_trace: .long 0 .globl bp_kernload bp_kernload: .long 0 /* * Initial configuration */ 1: mflr %r31 /* r31 hold the address of bp_trace */ /* Set HIDs */ mfpvr %r3 rlwinm %r3, %r3, 16, 16, 31 /* HID0 for E500 is default */ lis %r4, HID0_E500_DEFAULT_SET@h ori %r4, %r4, HID0_E500_DEFAULT_SET@l cmpli 0, 0, %r3, FSL_E500mc bne 2f lis %r4, HID0_E500MC_DEFAULT_SET@h ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l b 3f 2: cmpli 0, 0, %r3, FSL_E5500 bne 3f lis %r4, HID0_E5500_DEFAULT_SET@h ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 3: mtspr SPR_HID0, %r4 isync /* Enable branch prediction */ li %r3, BUCSR_BPEN mtspr SPR_BUCSR, %r3 isync /* Invalidate all entries in TLB0 */ li %r3, 0 bl tlb_inval_all /* * Find TLB1 entry which is translating us now */ bl 2f 2: mflr %r3 bl tlb1_find_current /* the entry number found is in r29 */ bl tlb1_inval_all_but_current /* * Create temporary translation in AS=1 and switch to it */ bl tlb1_temp_mapping_as1 mfmsr %r3 ori %r3, %r3, (PSL_IS | PSL_DS) #ifdef __powerpc64__ oris %r3, %r3, PSL_CM@h #endif bl 3f 3: mflr %r4 addi %r4, %r4, (4f - 3b) mtspr SPR_SRR0, %r4 mtspr SPR_SRR1, %r3 rfi /* Switch context */ /* * Invalidate initial entry */ 4: mr %r3, %r29 bl tlb1_inval_entry /* * Setup final mapping in TLB1[1] and switch to it */ /* Final kernel mapping, map in 64 MB of RAM */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ li %r4, 0 /* Entry 0 */ rlwimi %r3, %r4, 16, 4, 15 mtspr SPR_MAS0, %r3 isync li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ isync LOAD_ADDR(%r3, KERNBASE) ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ mtspr SPR_MAS2, %r3 isync /* Retrieve kernel load [physical] address from bp_kernload */ #ifdef __powerpc64__ b 0f .align 3 0: nop #endif bl 5f ADDR(bp_kernload) ADDR(__boot_page) 5: mflr %r3 #ifdef __powerpc64__ ld %r4, 0(%r3) ld %r5, 8(%r3) clrrdi %r3, %r3, 12 #else lwz %r4, 0(%r3) lwz %r5, 4(%r3) rlwinm %r3, %r3, 0, 0, 19 #endif sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */ lwzx %r3, %r4, %r3 /* Set RPN and protection */ ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l mtspr SPR_MAS3, %r3 isync bl zero_mas7 bl zero_mas8 tlbwe isync msync /* Switch to the final mapping */ bl 6f 6: mflr %r3 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */ add %r3, %r3, %r5 /* Make this virtual address */ addi %r3, %r3, (7f - 6b) #ifdef __powerpc64__ lis %r4, PSL_CM@h /* Note AS=0 */ #else li %r4, 0 /* Note AS=0 */ #endif mtspr SPR_SRR0, %r3 mtspr SPR_SRR1, %r4 rfi 7: /* * At this point we're running at virtual addresses KERNBASE and beyond so * it's allowed to directly access all locations the kernel was linked * against. */ /* * Invalidate temp mapping */ mr %r3, %r28 bl tlb1_inval_entry #ifdef __powerpc64__ /* Set up the TOC pointer */ b 0f .align 3 0: nop bl 1f .llong __tocbase + 0x8000 - . 1: mflr %r2 ld %r1,0(%r2) add %r2,%r1,%r2 mtspr SPR_SPRG8, %r2 /* Get load offset */ ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */ subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */ /* Set up the stack pointer */ ld %r1,TOC_REF(tmpstack)(%r2) addi %r1,%r1,TMPSTACKSZ-96 add %r1,%r1,%r31 #else /* * Setup a temporary stack */ bl 1f .long tmpstack-. 1: mflr %r1 lwz %r2,0(%r1) add %r1,%r1,%r2 stw %r1, 0(%r1) addi %r1, %r1, (TMPSTACKSZ - 16) #endif /* * Initialise exception vector offsets */ bl CNAME(ivor_setup) TOC_RESTORE /* * Assign our pcpu instance */ bl 1f .long ap_pcpu-. 1: mflr %r4 lwz %r3, 0(%r4) add %r3, %r3, %r4 LOAD %r3, 0(%r3) mtsprg0 %r3 bl CNAME(pmap_bootstrap_ap) TOC_RESTORE bl CNAME(cpudep_ap_bootstrap) TOC_RESTORE /* Switch to the idle thread's kstack */ mr %r1, %r3 bl CNAME(machdep_ap_bootstrap) TOC_RESTORE /* NOT REACHED */ 6: b 6b #endif /* SMP */ #if defined (BOOKE_E500) /* * Invalidate all entries in the given TLB. * * r3 TLBSEL */ tlb_inval_all: rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */ ori %r3, %r3, (1 << 2) /* INVALL */ tlbivax 0, %r3 isync msync tlbsync msync blr /* * expects address to look up in r3, returns entry number in r29 * * FIXME: the hidden assumption is we are now running in AS=0, but we should * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS] */ tlb1_find_current: mfspr %r17, SPR_PID0 slwi %r17, %r17, MAS6_SPID0_SHIFT mtspr SPR_MAS6, %r17 isync tlbsx 0, %r3 mfspr %r17, SPR_MAS0 rlwinm %r29, %r17, 16, 26, 31 /* MAS0[ESEL] -> r29 */ /* Make sure we have IPROT set on the entry */ mfspr %r17, SPR_MAS1 oris %r17, %r17, MAS1_IPROT@h mtspr SPR_MAS1, %r17 isync tlbwe isync msync blr /* * Invalidates a single entry in TLB1. * * r3 ESEL * r4-r5 scratched */ tlb1_inval_entry: lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */ rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */ mtspr SPR_MAS0, %r4 isync tlbre li %r5, 0 /* MAS1[V] = 0 */ mtspr SPR_MAS1, %r5 isync tlbwe isync msync blr /* * r29 current entry number * r28 returned temp entry * r3-r5 scratched */ tlb1_temp_mapping_as1: /* Read our current translation */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */ mtspr SPR_MAS0, %r3 isync tlbre /* * Prepare and write temp entry * * FIXME this is not robust against overflow i.e. when the current * entry is the last in TLB1 */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ addi %r28, %r29, 1 /* Use next entry. */ rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */ mtspr SPR_MAS0, %r3 isync mfspr %r5, SPR_MAS1 li %r4, 1 /* AS=1 */ rlwimi %r5, %r4, 12, 19, 19 li %r4, 0 /* Global mapping, TID=0 */ rlwimi %r5, %r4, 16, 8, 15 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h mtspr SPR_MAS1, %r5 isync mflr %r3 bl zero_mas7 bl zero_mas8 mtlr %r3 tlbwe isync msync blr /* * Loops over TLB1, invalidates all entries skipping the one which currently * maps this code. * * r29 current entry * r3-r5 scratched */ tlb1_inval_all_but_current: mfspr %r3, SPR_TLB1CFG /* Get number of entries */ andi. %r3, %r3, TLBCFG_NENTRY_MASK@l li %r4, 0 /* Start from Entry 0 */ 1: lis %r5, MAS0_TLBSEL1@h rlwimi %r5, %r4, 16, 10, 15 mtspr SPR_MAS0, %r5 isync tlbre mfspr %r5, SPR_MAS1 cmpw %r4, %r29 /* our current entry? */ beq 2f rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */ mtspr SPR_MAS1, %r5 isync tlbwe isync msync 2: addi %r4, %r4, 1 cmpw %r4, %r3 /* Check if this is the last entry */ bne 1b blr /* * MAS7 and MAS8 conditional zeroing. */ .globl zero_mas7 zero_mas7: mfpvr %r20 rlwinm %r20, %r20, 16, 16, 31 cmpli 0, 0, %r20, FSL_E500v1 beq 1f li %r20, 0 mtspr SPR_MAS7, %r20 isync 1: blr .globl zero_mas8 zero_mas8: mfpvr %r20 rlwinm %r20, %r20, 16, 16, 31 cmpli 0, 0, %r20, FSL_E500mc beq 1f cmpli 0, 0, %r20, FSL_E5500 beq 1f blr 1: li %r20, 0 mtspr SPR_MAS8, %r20 isync blr #endif #ifdef SMP .globl __boot_tlb1 /* * The __boot_tlb1 table is used to hold BSP TLB1 entries * marked with _TLB_ENTRY_SHARED flag during AP bootstrap. * The BSP fills in the table in tlb_ap_prep() function. Next, * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap(). */ __boot_tlb1: .space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE __boot_page_padding: /* * Boot page needs to be exactly 4K, with the last word of this page * acting as the reset vector, so we need to stuff the remainder. * Upon release from holdoff CPU fetches the last word of the boot * page. */ .space 4092 - (__boot_page_padding - __boot_page) b __boot_page #endif /* SMP */ /************************************************************************/ /* locore subroutines */ /************************************************************************/ /* * Cache disable/enable/inval sequences according * to section 2.16 of E500CORE RM. */ ENTRY(dcache_inval) /* Invalidate d-cache */ mfspr %r3, SPR_L1CSR0 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l msync isync mtspr SPR_L1CSR0, %r3 isync 1: mfspr %r3, SPR_L1CSR0 andi. %r3, %r3, L1CSR0_DCFI bne 1b blr ENTRY(dcache_disable) /* Disable d-cache */ mfspr %r3, SPR_L1CSR0 li %r4, L1CSR0_DCE@l not %r4, %r4 and %r3, %r3, %r4 msync isync mtspr SPR_L1CSR0, %r3 isync blr ENTRY(dcache_enable) /* Enable d-cache */ mfspr %r3, SPR_L1CSR0 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l msync isync mtspr SPR_L1CSR0, %r3 isync blr ENTRY(icache_inval) /* Invalidate i-cache */ mfspr %r3, SPR_L1CSR1 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l isync mtspr SPR_L1CSR1, %r3 isync 1: mfspr %r3, SPR_L1CSR1 andi. %r3, %r3, L1CSR1_ICFI bne 1b blr ENTRY(icache_disable) /* Disable i-cache */ mfspr %r3, SPR_L1CSR1 li %r4, L1CSR1_ICE@l not %r4, %r4 and %r3, %r3, %r4 isync mtspr SPR_L1CSR1, %r3 isync blr ENTRY(icache_enable) /* Enable i-cache */ mfspr %r3, SPR_L1CSR1 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l isync mtspr SPR_L1CSR1, %r3 isync blr /* * L2 cache disable/enable/inval sequences for E500mc. */ ENTRY(l2cache_inval) mfspr %r3, SPR_L2CSR0 oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l isync mtspr SPR_L2CSR0, %r3 isync 1: mfspr %r3, SPR_L2CSR0 andis. %r3, %r3, L2CSR0_L2FI@h bne 1b blr ENTRY(l2cache_enable) mfspr %r3, SPR_L2CSR0 oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h isync mtspr SPR_L2CSR0, %r3 isync blr /* * Branch predictor setup. */ ENTRY(bpred_enable) mfspr %r3, SPR_BUCSR ori %r3, %r3, BUCSR_BBFI isync mtspr SPR_BUCSR, %r3 isync ori %r3, %r3, BUCSR_BPEN isync mtspr SPR_BUCSR, %r3 isync blr -ENTRY(dataloss_erratum_access) - /* Lock two cache lines into I-Cache */ - sync - mfspr %r11, SPR_L1CSR1 - rlwinm %r11, %r11, 0, ~L1CSR1_ICUL - sync - isync - mtspr SPR_L1CSR1, %r11 - isync - - lis %r8, 2f@h - ori %r8, %r8, 2f@l - icbtls 0, 0, %r8 - addi %r9, %r8, 64 - - sync - mfspr %r11, SPR_L1CSR1 -3: andi. %r11, %r11, L1CSR1_ICUL - bne 3b - - icbtls 0, 0, %r9 - - sync - mfspr %r11, SPR_L1CSR1 -3: andi. %r11, %r11, L1CSR1_ICUL - bne 3b - - b 2f - .align 6 - /* Inside a locked cacheline, wait a while, write, then wait a while */ -2: sync - - mfspr %r5, TBR_TBL -4: addis %r11, %r5, 0x100000@h /* wait around one million timebase ticks */ - mfspr %r5, TBR_TBL - subf. %r5, %r5, %r11 - bgt 4b - - stw %r4, 0(%r3) - - mfspr %r5, TBR_TBL -4: addis %r11, %r5, 0x100000@h /* wait around one million timebase ticks */ - mfspr %r5, TBR_TBL - subf. %r5, %r5, %r11 - bgt 4b - - sync - - /* - * Fill out the rest of this cache line and the next with nops, - * to ensure that nothing outside the locked area will be - * fetched due to a branch. - */ - .rept 19 - nop - .endr - - icblc 0, 0, %r8 - icblc 0, 0, %r9 - - blr - /* * XXX: This should be moved to a shared AIM/booke asm file, if one ever is * created. */ ENTRY(get_spr) mfspr %r3, 0 blr /************************************************************************/ /* Data section */ /************************************************************************/ .data .align 3 GLOBAL(__startkernel) ADDR(begin) GLOBAL(__endkernel) ADDR(end) .align 4 tmpstack: .space TMPSTACKSZ tmpstackbound: .space 10240 /* XXX: this really should not be necessary */ #ifdef __powerpc64__ TOC_ENTRY(tmpstack) TOC_ENTRY(bp_kernload) #endif /* * Compiled KERNBASE locations */ .globl kernbase .set kernbase, KERNBASE #include Index: head/sys/powerpc/mpc85xx/mpc85xx.c =================================================================== --- head/sys/powerpc/mpc85xx/mpc85xx.c (revision 333134) +++ head/sys/powerpc/mpc85xx/mpc85xx.c (revision 333135) @@ -1,471 +1,357 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2008 Semihalf, Rafal Jaworowski * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * MPC85xx system specific routines */ uint32_t ccsr_read4(uintptr_t addr) { volatile uint32_t *ptr = (void *)addr; return (*ptr); } void ccsr_write4(uintptr_t addr, uint32_t val) { volatile uint32_t *ptr = (void *)addr; *ptr = val; powerpc_iomb(); } int law_getmax(void) { uint32_t ver; int law_max; ver = SVR_VER(mfspr(SPR_SVR)); switch (ver) { case SVR_MPC8555: case SVR_MPC8555E: law_max = 8; break; case SVR_MPC8533: case SVR_MPC8533E: case SVR_MPC8548: case SVR_MPC8548E: law_max = 10; break; case SVR_P5020: case SVR_P5020E: case SVR_P5021: case SVR_P5021E: case SVR_P5040: case SVR_P5040E: law_max = 32; break; default: law_max = 8; } return (law_max); } static inline void law_write(uint32_t n, uint64_t bar, uint32_t sr) { if (mpc85xx_is_qoriq()) { ccsr_write4(OCP85XX_LAWBARH(n), bar >> 32); ccsr_write4(OCP85XX_LAWBARL(n), bar); ccsr_write4(OCP85XX_LAWSR_QORIQ(n), sr); ccsr_read4(OCP85XX_LAWSR_QORIQ(n)); } else { ccsr_write4(OCP85XX_LAWBAR(n), bar >> 12); ccsr_write4(OCP85XX_LAWSR_85XX(n), sr); ccsr_read4(OCP85XX_LAWSR_85XX(n)); } /* * The last write to LAWAR should be followed by a read * of LAWAR before any device try to use any of windows. * What more the read of LAWAR should be followed by isync * instruction. */ isync(); } static inline void law_read(uint32_t n, uint64_t *bar, uint32_t *sr) { if (mpc85xx_is_qoriq()) { *bar = (uint64_t)ccsr_read4(OCP85XX_LAWBARH(n)) << 32 | ccsr_read4(OCP85XX_LAWBARL(n)); *sr = ccsr_read4(OCP85XX_LAWSR_QORIQ(n)); } else { *bar = (uint64_t)ccsr_read4(OCP85XX_LAWBAR(n)) << 12; *sr = ccsr_read4(OCP85XX_LAWSR_85XX(n)); } } static int law_find_free(void) { uint32_t i,sr; uint64_t bar; int law_max; law_max = law_getmax(); /* Find free LAW */ for (i = 0; i < law_max; i++) { law_read(i, &bar, &sr); if ((sr & 0x80000000) == 0) break; } return (i); } #define _LAW_SR(trgt,size) (0x80000000 | (trgt << 20) | \ (flsl(size + (size - 1)) - 2)) int law_enable(int trgt, uint64_t bar, uint32_t size) { uint64_t bar_tmp; uint32_t sr, sr_tmp; int i, law_max; if (size == 0) return (0); law_max = law_getmax(); sr = _LAW_SR(trgt, size); /* Bail if already programmed. */ for (i = 0; i < law_max; i++) { law_read(i, &bar_tmp, &sr_tmp); if (sr == sr_tmp && bar == bar_tmp) return (0); } /* Find an unused access window. */ i = law_find_free(); if (i == law_max) return (ENOSPC); law_write(i, bar, sr); return (0); } int law_disable(int trgt, uint64_t bar, uint32_t size) { uint64_t bar_tmp; uint32_t sr, sr_tmp; int i, law_max; law_max = law_getmax(); sr = _LAW_SR(trgt, size); /* Find and disable requested LAW. */ for (i = 0; i < law_max; i++) { law_read(i, &bar_tmp, &sr_tmp); if (sr == sr_tmp && bar == bar_tmp) { law_write(i, 0, 0); return (0); } } return (ENOENT); } int law_pci_target(struct resource *res, int *trgt_mem, int *trgt_io) { u_long start; uint32_t ver; int trgt, rv; ver = SVR_VER(mfspr(SPR_SVR)); start = rman_get_start(res) & 0xf000; rv = 0; trgt = -1; switch (start) { case 0x0000: case 0x8000: trgt = 0; break; case 0x1000: case 0x9000: trgt = 1; break; case 0x2000: case 0xa000: if (ver == SVR_MPC8548E || ver == SVR_MPC8548) trgt = 3; else trgt = 2; break; case 0x3000: case 0xb000: if (ver == SVR_MPC8548E || ver == SVR_MPC8548) rv = EINVAL; else trgt = 3; break; default: rv = ENXIO; } if (rv == 0) { *trgt_mem = trgt; *trgt_io = trgt; } return (rv); } static void l3cache_inval(void) { /* Flash invalidate the CPC and clear all the locks */ ccsr_write4(OCP85XX_CPC_CSR0, OCP85XX_CPC_CSR0_FI | OCP85XX_CPC_CSR0_LFC); while (ccsr_read4(OCP85XX_CPC_CSR0) & (OCP85XX_CPC_CSR0_FI | OCP85XX_CPC_CSR0_LFC)) ; } static void l3cache_enable(void) { ccsr_write4(OCP85XX_CPC_CSR0, OCP85XX_CPC_CSR0_CE | OCP85XX_CPC_CSR0_PE); /* Read back to sync write */ ccsr_read4(OCP85XX_CPC_CSR0); } void mpc85xx_enable_l3_cache(void) { uint32_t csr, size, ver; /* Enable L3 CoreNet Platform Cache (CPC) */ ver = SVR_VER(mfspr(SPR_SVR)); if (ver == SVR_P2041 || ver == SVR_P2041E || ver == SVR_P3041 || ver == SVR_P3041E || ver == SVR_P5020 || ver == SVR_P5020E) { csr = ccsr_read4(OCP85XX_CPC_CSR0); if ((csr & OCP85XX_CPC_CSR0_CE) == 0) { l3cache_inval(); l3cache_enable(); } csr = ccsr_read4(OCP85XX_CPC_CSR0); if ((boothowto & RB_VERBOSE) != 0 || (csr & OCP85XX_CPC_CSR0_CE) == 0) { size = OCP85XX_CPC_CFG0_SZ_K(ccsr_read4(OCP85XX_CPC_CFG0)); printf("L3 Corenet Platform Cache: %d KB %sabled\n", size, (csr & OCP85XX_CPC_CSR0_CE) == 0 ? "dis" : "en"); } } } int mpc85xx_is_qoriq(void) { uint16_t pvr = mfpvr() >> 16; /* QorIQ register set is only in e500mc and derivative core based SoCs. */ if (pvr == FSL_E500mc || pvr == FSL_E5500 || pvr == FSL_E6500) return (1); return (0); } -static void -mpc85xx_dataloss_erratum_spr976(void) -{ - uint32_t svr = SVR_VER(mfspr(SPR_SVR)); - - /* Ignore whether it's the E variant */ - svr &= ~0x8; - - if (svr != SVR_P3041 && svr != SVR_P4040 && - svr != SVR_P4080 && svr != SVR_P5020) - return; - - mb(); - isync(); - mtspr(976, (mfspr(976) & ~0x1f8) | 0x48); - isync(); -} - -static vm_offset_t -mpc85xx_map_dcsr(void) -{ - phandle_t node; - u_long b, s; - int err; - - /* - * Try to access the dcsr node directly i.e. through /aliases/. - */ - if ((node = OF_finddevice("dcsr")) != -1) - if (fdt_is_compatible_strict(node, "fsl,dcsr")) - goto moveon; - /* - * Find the node the long way. - */ - if ((node = OF_finddevice("/")) == -1) - return (0); - - if ((node = ofw_bus_find_compatible(node, "fsl,dcsr")) == 0) - return (0); - -moveon: - err = fdt_get_range(node, 0, &b, &s); - - if (err != 0) - return (0); - - law_enable(OCP85XX_TGTIF_DCSR, b, 0x400000); - return pmap_early_io_map(b, 0x400000); -} - - - -void -mpc85xx_fix_errata(vm_offset_t va_ccsr) -{ - uint32_t svr = SVR_VER(mfspr(SPR_SVR)); - vm_offset_t va_dcsr; - - /* Ignore whether it's the E variant */ - svr &= ~0x8; - - if (svr != SVR_P3041 && svr != SVR_P4040 && - svr != SVR_P4080 && svr != SVR_P5020) - return; - - if (mfmsr() & PSL_EE) - return; - - /* - * dcsr region need to be mapped thus patch can refer to. - * Align dcsr right after ccsbar. - */ - va_dcsr = mpc85xx_map_dcsr(); - if (va_dcsr == 0) - goto err; - - /* - * As A004510 errata specify, special purpose register 976 - * SPR976[56:60] = 6'b001001 must be set. e500mc core reference manual - * does not document SPR976 register. - */ - mpc85xx_dataloss_erratum_spr976(); - - /* - * Specific settings in the CCF and core platform cache (CPC) - * are required to reconfigure the CoreNet coherency fabric. - * The register settings that should be updated are described - * in errata and relay on base address, offset and updated value. - * Special conditions must be used to update these registers correctly. - */ - dataloss_erratum_access(va_dcsr + 0xb0e08, 0xe0201800); - dataloss_erratum_access(va_dcsr + 0xb0e18, 0xe0201800); - dataloss_erratum_access(va_dcsr + 0xb0e38, 0xe0400000); - dataloss_erratum_access(va_dcsr + 0xb0008, 0x00900000); - dataloss_erratum_access(va_dcsr + 0xb0e40, 0xe00a0000); - - switch (svr) { - case SVR_P5020: - dataloss_erratum_access(va_ccsr + 0x18600, 0xc0000000); - break; - case SVR_P4040: - case SVR_P4080: - dataloss_erratum_access(va_ccsr + 0x18600, 0xff000000); - break; - case SVR_P3041: - dataloss_erratum_access(va_ccsr + 0x18600, 0xf0000000); - } - dataloss_erratum_access(va_ccsr + 0x10f00, 0x415e5000); - dataloss_erratum_access(va_ccsr + 0x11f00, 0x415e5000); - -err: - return; -} - uint32_t mpc85xx_get_platform_clock(void) { phandle_t soc; static uint32_t freq; if (freq != 0) return (freq); soc = OF_finddevice("/soc"); /* freq isn't modified on error. */ OF_getencprop(soc, "bus-frequency", (void *)&freq, sizeof(freq)); return (freq); } uint32_t mpc85xx_get_system_clock(void) { uint32_t freq; freq = mpc85xx_get_platform_clock(); return (freq / 2); } Index: head/sys/powerpc/mpc85xx/mpc85xx.h =================================================================== --- head/sys/powerpc/mpc85xx/mpc85xx.h (revision 333134) +++ head/sys/powerpc/mpc85xx/mpc85xx.h (revision 333135) @@ -1,179 +1,177 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2008 Semihalf, Rafal Jaworowski * Copyright 2006 by Juniper Networks. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MPC85XX_H_ #define _MPC85XX_H_ #include /* * Configuration control and status registers */ extern vm_offset_t ccsrbar_va; extern vm_paddr_t ccsrbar_pa; extern vm_size_t ccsrbar_size; #define CCSRBAR_VA ccsrbar_va #define OCP85XX_CCSRBAR (CCSRBAR_VA + 0x0) #define OCP85XX_BPTR (CCSRBAR_VA + 0x20) #define OCP85XX_BSTRH (CCSRBAR_VA + 0x20) #define OCP85XX_BSTRL (CCSRBAR_VA + 0x24) #define OCP85XX_BSTAR (CCSRBAR_VA + 0x28) #define OCP85XX_COREDISR (CCSRBAR_VA + 0xE0094) #define OCP85XX_BRR (CCSRBAR_VA + 0xE00E4) /* * Run Control and Power Management registers */ #define CCSR_CTBENR (CCSRBAR_VA + 0xE2084) #define CCSR_CTBCKSELR (CCSRBAR_VA + 0xE208C) #define CCSR_CTBCHLTCR (CCSRBAR_VA + 0xE2094) /* * DDR Memory controller. */ #define OCP85XX_DDR1_CS0_CONFIG (CCSRBAR_VA + 0x8080) /* * E500 Coherency Module registers */ #define OCP85XX_EEBPCR (CCSRBAR_VA + 0x1010) /* * Local access registers */ /* Write order: OCP_LAWBARH -> OCP_LAWBARL -> OCP_LAWSR */ #define OCP85XX_LAWBARH(n) (CCSRBAR_VA + 0xc00 + 0x10 * (n)) #define OCP85XX_LAWBARL(n) (CCSRBAR_VA + 0xc04 + 0x10 * (n)) #define OCP85XX_LAWSR_QORIQ(n) (CCSRBAR_VA + 0xc08 + 0x10 * (n)) #define OCP85XX_LAWBAR(n) (CCSRBAR_VA + 0xc08 + 0x10 * (n)) #define OCP85XX_LAWSR_85XX(n) (CCSRBAR_VA + 0xc10 + 0x10 * (n)) #define OCP85XX_LAWSR(n) (mpc85xx_is_qoriq() ? OCP85XX_LAWSR_QORIQ(n) : \ OCP85XX_LAWSR_85XX(n)) /* Attribute register */ #define OCP85XX_ENA_MASK 0x80000000 #define OCP85XX_DIS_MASK 0x7fffffff #define OCP85XX_TGTIF_LBC_QORIQ 0x1f #define OCP85XX_TGTIF_RAM_INTL_QORIQ 0x14 #define OCP85XX_TGTIF_RAM1_QORIQ 0x10 #define OCP85XX_TGTIF_RAM2_QORIQ 0x11 #define OCP85XX_TGTIF_BMAN 0x18 #define OCP85XX_TGTIF_DCSR 0x1D #define OCP85XX_TGTIF_QMAN 0x3C #define OCP85XX_TRGT_SHIFT_QORIQ 20 #define OCP85XX_TGTIF_LBC_85XX 0x04 #define OCP85XX_TGTIF_RAM_INTL_85XX 0x0b #define OCP85XX_TGTIF_RIO_85XX 0x0c #define OCP85XX_TGTIF_RAM1_85XX 0x0f #define OCP85XX_TGTIF_RAM2_85XX 0x16 #define OCP85XX_TGTIF_LBC \ (mpc85xx_is_qoriq() ? OCP85XX_TGTIF_LBC_QORIQ : OCP85XX_TGTIF_LBC_85XX) #define OCP85XX_TGTIF_RAM_INTL \ (mpc85xx_is_qoriq() ? OCP85XX_TGTIF_RAM_INTL_QORIQ : OCP85XX_TGTIF_RAM_INTL_85XX) #define OCP85XX_TGTIF_RIO \ (mpc85xx_is_qoriq() ? OCP85XX_TGTIF_RIO_QORIQ : OCP85XX_TGTIF_RIO_85XX) #define OCP85XX_TGTIF_RAM1 \ (mpc85xx_is_qoriq() ? OCP85XX_TGTIF_RAM1_QORIQ : OCP85XX_TGTIF_RAM1_85XX) #define OCP85XX_TGTIF_RAM2 \ (mpc85xx_is_qoriq() ? OCP85XX_TGTIF_RAM2_QORIQ : OCP85XX_TGTIF_RAM2_85XX) /* * L2 cache registers */ #define OCP85XX_L2CTL (CCSRBAR_VA + 0x20000) /* * L3 CoreNet platform cache (CPC) registers */ #define OCP85XX_CPC_CSR0 (CCSRBAR_VA + 0x10000) #define OCP85XX_CPC_CSR0_CE 0x80000000 #define OCP85XX_CPC_CSR0_PE 0x40000000 #define OCP85XX_CPC_CSR0_FI 0x00200000 #define OCP85XX_CPC_CSR0_WT 0x00080000 #define OCP85XX_CPC_CSR0_FL 0x00000800 #define OCP85XX_CPC_CSR0_LFC 0x00000400 #define OCP85XX_CPC_CFG0 (CCSRBAR_VA + 0x10008) #define OCP85XX_CPC_CFG_SZ_MASK 0x00003fff #define OCP85XX_CPC_CFG0_SZ_K(x) (((x) & OCP85XX_CPC_CFG_SZ_MASK) << 6) /* * Power-On Reset configuration */ #define OCP85XX_PORDEVSR (CCSRBAR_VA + 0xe000c) #define OCP85XX_PORDEVSR_IO_SEL 0x00780000 #define OCP85XX_PORDEVSR_IO_SEL_SHIFT 19 #define OCP85XX_PORDEVSR2 (CCSRBAR_VA + 0xe0014) /* * Status Registers. */ #define OCP85XX_RSTCR (CCSRBAR_VA + 0xe00b0) #define OCP85XX_CLKDVDR (CCSRBAR_VA + 0xe0800) #define OCP85XX_CLKDVDR_PXCKEN 0x80000000 #define OCP85XX_CLKDVDR_SSICKEN 0x20000000 #define OCP85XX_CLKDVDR_PXCKINV 0x10000000 #define OCP85XX_CLKDVDR_PXCLK_MASK 0x00FF0000 #define OCP85XX_CLKDVDR_SSICLK_MASK 0x000000FF /* * Run Control/Power Management Registers. */ #define OCP85XX_RCPM_CDOZSR (CCSRBAR_VA + 0xe2004) #define OCP85XX_RCPM_CDOZCR (CCSRBAR_VA + 0xe200c) /* * Prototypes. */ uint32_t ccsr_read4(uintptr_t addr); void ccsr_write4(uintptr_t addr, uint32_t val); int law_enable(int trgt, uint64_t bar, uint32_t size); int law_disable(int trgt, uint64_t bar, uint32_t size); int law_getmax(void); int law_pci_target(struct resource *, int *, int *); DECLARE_CLASS(mpc85xx_platform); int mpc85xx_attach(platform_t); void mpc85xx_enable_l3_cache(void); -void mpc85xx_fix_errata(vm_offset_t); -void dataloss_erratum_access(vm_offset_t, uint32_t); int mpc85xx_is_qoriq(void); uint32_t mpc85xx_get_platform_clock(void); uint32_t mpc85xx_get_system_clock(void); #endif /* _MPC85XX_H_ */ Index: head/sys/powerpc/mpc85xx/platform_mpc85xx.c =================================================================== --- head/sys/powerpc/mpc85xx/platform_mpc85xx.c (revision 333134) +++ head/sys/powerpc/mpc85xx/platform_mpc85xx.c (revision 333135) @@ -1,537 +1,534 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "platform_if.h" #ifdef SMP extern void *ap_pcpu; extern vm_paddr_t kernload; /* Kernel physical load address */ extern uint8_t __boot_page[]; /* Boot page body */ extern uint32_t bp_kernload; struct cpu_release { uint32_t entry_h; uint32_t entry_l; uint32_t r3_h; uint32_t r3_l; uint32_t reserved; uint32_t pir; }; #endif extern uint32_t *bootinfo; vm_paddr_t ccsrbar_pa; vm_offset_t ccsrbar_va; vm_size_t ccsrbar_size; static int cpu, maxcpu; static int mpc85xx_probe(platform_t); static void mpc85xx_mem_regions(platform_t, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz); static u_long mpc85xx_timebase_freq(platform_t, struct cpuref *cpuref); static int mpc85xx_smp_first_cpu(platform_t, struct cpuref *cpuref); static int mpc85xx_smp_next_cpu(platform_t, struct cpuref *cpuref); static int mpc85xx_smp_get_bsp(platform_t, struct cpuref *cpuref); static int mpc85xx_smp_start_cpu(platform_t, struct pcpu *cpu); static void mpc85xx_smp_timebase_sync(platform_t, u_long tb, int ap); static void mpc85xx_reset(platform_t); static platform_method_t mpc85xx_methods[] = { PLATFORMMETHOD(platform_probe, mpc85xx_probe), PLATFORMMETHOD(platform_attach, mpc85xx_attach), PLATFORMMETHOD(platform_mem_regions, mpc85xx_mem_regions), PLATFORMMETHOD(platform_timebase_freq, mpc85xx_timebase_freq), PLATFORMMETHOD(platform_smp_first_cpu, mpc85xx_smp_first_cpu), PLATFORMMETHOD(platform_smp_next_cpu, mpc85xx_smp_next_cpu), PLATFORMMETHOD(platform_smp_get_bsp, mpc85xx_smp_get_bsp), PLATFORMMETHOD(platform_smp_start_cpu, mpc85xx_smp_start_cpu), PLATFORMMETHOD(platform_smp_timebase_sync, mpc85xx_smp_timebase_sync), PLATFORMMETHOD(platform_reset, mpc85xx_reset), PLATFORMMETHOD_END }; DEFINE_CLASS_0(mpc85xx, mpc85xx_platform, mpc85xx_methods, 0); PLATFORM_DEF(mpc85xx_platform); static int mpc85xx_probe(platform_t plat) { u_int pvr = (mfpvr() >> 16) & 0xFFFF; switch (pvr) { case FSL_E500v1: case FSL_E500v2: case FSL_E500mc: case FSL_E5500: case FSL_E6500: return (BUS_PROBE_DEFAULT); } return (ENXIO); } int mpc85xx_attach(platform_t plat) { phandle_t cpus, child, ccsr; const char *soc_name_guesses[] = {"/soc", "soc", NULL}; const char **name; pcell_t ranges[6], acells, pacells, scells; uint64_t ccsrbar, ccsrsize; int i; if ((cpus = OF_finddevice("/cpus")) != -1) { for (maxcpu = 0, child = OF_child(cpus); child != 0; child = OF_peer(child), maxcpu++) ; } else maxcpu = 1; /* * Locate CCSR region. Irritatingly, there is no way to find it * unless you already know where it is. Try to infer its location * from the device tree. */ ccsr = -1; for (name = soc_name_guesses; *name != NULL && ccsr == -1; name++) ccsr = OF_finddevice(*name); if (ccsr == -1) { char type[64]; /* That didn't work. Search for devices of type "soc" */ child = OF_child(OF_peer(0)); for (OF_child(child); child != 0; child = OF_peer(child)) { if (OF_getprop(child, "device_type", type, sizeof(type)) <= 0) continue; if (strcmp(type, "soc") == 0) { ccsr = child; break; } } } if (ccsr == -1) panic("Could not locate CCSR window!"); OF_getprop(ccsr, "#size-cells", &scells, sizeof(scells)); OF_getprop(ccsr, "#address-cells", &acells, sizeof(acells)); OF_searchprop(OF_parent(ccsr), "#address-cells", &pacells, sizeof(pacells)); OF_getprop(ccsr, "ranges", ranges, sizeof(ranges)); ccsrbar = ccsrsize = 0; for (i = acells; i < acells + pacells; i++) { ccsrbar <<= 32; ccsrbar |= ranges[i]; } for (i = acells + pacells; i < acells + pacells + scells; i++) { ccsrsize <<= 32; ccsrsize |= ranges[i]; } ccsrbar_va = pmap_early_io_map(ccsrbar, ccsrsize); ccsrbar_pa = ccsrbar; ccsrbar_size = ccsrsize; -#if 0 - mpc85xx_fix_errata(ccsrbar_va); -#endif mpc85xx_enable_l3_cache(); return (0); } void mpc85xx_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz) { ofw_mem_regions(phys, physsz, avail, availsz); } static u_long mpc85xx_timebase_freq(platform_t plat, struct cpuref *cpuref) { u_long ticks; phandle_t cpus, child; pcell_t freq; if (bootinfo != NULL) { if (bootinfo[0] == 1) { /* Backward compatibility. See 8-STABLE. */ ticks = bootinfo[3] >> 3; } else { /* Compatibility with Juniper's loader. */ ticks = bootinfo[5] >> 3; } } else ticks = 0; if ((cpus = OF_finddevice("/cpus")) == -1) goto out; if ((child = OF_child(cpus)) == 0) goto out; switch (OF_getproplen(child, "timebase-frequency")) { case 4: { uint32_t tbase; OF_getprop(child, "timebase-frequency", &tbase, sizeof(tbase)); ticks = tbase; return (ticks); } case 8: { uint64_t tbase; OF_getprop(child, "timebase-frequency", &tbase, sizeof(tbase)); ticks = tbase; return (ticks); } default: break; } freq = 0; if (OF_getprop(child, "bus-frequency", (void *)&freq, sizeof(freq)) <= 0) goto out; if (freq == 0) goto out; /* * Time Base and Decrementer are updated every 8 CCB bus clocks. * HID0[SEL_TBCLK] = 0 */ if (mpc85xx_is_qoriq()) ticks = freq / 32; else ticks = freq / 8; out: if (ticks <= 0) panic("Unable to determine timebase frequency!"); return (ticks); } static int mpc85xx_smp_first_cpu(platform_t plat, struct cpuref *cpuref) { cpu = 0; cpuref->cr_cpuid = cpu; cpuref->cr_hwref = cpuref->cr_cpuid; if (bootverbose) printf("powerpc_smp_first_cpu: cpuid %d\n", cpuref->cr_cpuid); cpu++; return (0); } static int mpc85xx_smp_next_cpu(platform_t plat, struct cpuref *cpuref) { if (cpu >= maxcpu) return (ENOENT); cpuref->cr_cpuid = cpu++; cpuref->cr_hwref = cpuref->cr_cpuid; if (bootverbose) printf("powerpc_smp_next_cpu: cpuid %d\n", cpuref->cr_cpuid); return (0); } static int mpc85xx_smp_get_bsp(platform_t plat, struct cpuref *cpuref) { cpuref->cr_cpuid = mfspr(SPR_PIR); cpuref->cr_hwref = cpuref->cr_cpuid; return (0); } #ifdef SMP static int mpc85xx_smp_start_cpu_epapr(platform_t plat, struct pcpu *pc) { vm_paddr_t rel_pa, bptr; volatile struct cpu_release *rel; vm_offset_t rel_va, rel_page; phandle_t node; int i; /* If we're calling this, the node already exists. */ node = OF_finddevice("/cpus"); for (i = 0, node = OF_child(node); i < pc->pc_cpuid; i++, node = OF_peer(node)) ; if (OF_getencprop(node, "cpu-release-addr", (pcell_t *)&rel_pa, sizeof(rel_pa)) == -1) { return (ENOENT); } rel_page = kva_alloc(PAGE_SIZE); if (rel_page == 0) return (ENOMEM); critical_enter(); rel_va = rel_page + (rel_pa & PAGE_MASK); pmap_kenter(rel_page, rel_pa & ~PAGE_MASK); rel = (struct cpu_release *)rel_va; bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload; cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel)); rel->pir = pc->pc_cpuid; __asm __volatile("sync"); rel->entry_h = (bptr >> 32); rel->entry_l = bptr; __asm __volatile("sync"); cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel)); if (bootverbose) printf("Waking up CPU %d via CPU release page %p\n", pc->pc_cpuid, rel); critical_exit(); pmap_kremove(rel_page); kva_free(rel_page, PAGE_SIZE); return (0); } #endif static int mpc85xx_smp_start_cpu(platform_t plat, struct pcpu *pc) { #ifdef SMP vm_paddr_t bptr; uint32_t reg; int timeout; uintptr_t brr; int cpuid; int epapr_boot = 0; uint32_t tgt; if (mpc85xx_is_qoriq()) { reg = ccsr_read4(OCP85XX_COREDISR); cpuid = pc->pc_cpuid; if ((reg & (1 << cpuid)) != 0) { printf("%s: CPU %d is disabled!\n", __func__, pc->pc_cpuid); return (-1); } brr = OCP85XX_BRR; } else { brr = OCP85XX_EEBPCR; cpuid = pc->pc_cpuid + 24; } bp_kernload = kernload; /* * bp_kernload is in the boot page. Sync the cache because ePAPR * booting has the other core(s) already running. */ cpu_flush_dcache(&bp_kernload, sizeof(bp_kernload)); ap_pcpu = pc; __asm __volatile("msync; isync"); /* First try the ePAPR way. */ if (mpc85xx_smp_start_cpu_epapr(plat, pc) == 0) { epapr_boot = 1; goto spin_wait; } reg = ccsr_read4(brr); if ((reg & (1 << cpuid)) != 0) { printf("SMP: CPU %d already out of hold-off state!\n", pc->pc_cpuid); return (ENXIO); } /* Flush caches to have our changes hit DRAM. */ cpu_flush_dcache(__boot_page, 4096); bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload; KASSERT((bptr & 0xfff) == 0, ("%s: boot page is not aligned (%#jx)", __func__, (uintmax_t)bptr)); if (mpc85xx_is_qoriq()) { /* * Read DDR controller configuration to select proper BPTR target ID. * * On P5020 bit 29 of DDR1_CS0_CONFIG enables DDR controllers * interleaving. If this bit is set, we have to use * OCP85XX_TGTIF_RAM_INTL as BPTR target ID. On other QorIQ DPAA SoCs, * this bit is reserved and always 0. */ reg = ccsr_read4(OCP85XX_DDR1_CS0_CONFIG); if (reg & (1 << 29)) tgt = OCP85XX_TGTIF_RAM_INTL; else tgt = OCP85XX_TGTIF_RAM1; /* * Set BSTR to the physical address of the boot page */ ccsr_write4(OCP85XX_BSTRH, bptr >> 32); ccsr_write4(OCP85XX_BSTRL, bptr); ccsr_write4(OCP85XX_BSTAR, OCP85XX_ENA_MASK | (tgt << OCP85XX_TRGT_SHIFT_QORIQ) | (ffsl(PAGE_SIZE) - 2)); /* Read back OCP85XX_BSTAR to synchronize write */ ccsr_read4(OCP85XX_BSTAR); /* * Enable and configure time base on new CPU. */ /* Set TB clock source to platform clock / 32 */ reg = ccsr_read4(CCSR_CTBCKSELR); ccsr_write4(CCSR_CTBCKSELR, reg & ~(1 << pc->pc_cpuid)); /* Enable TB */ reg = ccsr_read4(CCSR_CTBENR); ccsr_write4(CCSR_CTBENR, reg | (1 << pc->pc_cpuid)); } else { /* * Set BPTR to the physical address of the boot page */ bptr = (bptr >> 12) | 0x80000000u; ccsr_write4(OCP85XX_BPTR, bptr); __asm __volatile("isync; msync"); } /* * Release AP from hold-off state */ reg = ccsr_read4(brr); ccsr_write4(brr, reg | (1 << cpuid)); __asm __volatile("isync; msync"); spin_wait: timeout = 500; while (!pc->pc_awake && timeout--) DELAY(1000); /* wait 1ms */ /* * Disable boot page translation so that the 4K page at the default * address (= 0xfffff000) isn't permanently remapped and thus not * usable otherwise. */ if (!epapr_boot) { if (mpc85xx_is_qoriq()) ccsr_write4(OCP85XX_BSTAR, 0); else ccsr_write4(OCP85XX_BPTR, 0); __asm __volatile("isync; msync"); } if (!pc->pc_awake) panic("SMP: CPU %d didn't wake up.\n", pc->pc_cpuid); return ((pc->pc_awake) ? 0 : EBUSY); #else /* No SMP support */ return (ENXIO); #endif } static void mpc85xx_reset(platform_t plat) { /* * Try the dedicated reset register first. * If the SoC doesn't have one, we'll fall * back to using the debug control register. */ ccsr_write4(OCP85XX_RSTCR, 2); /* Clear DBCR0, disables debug interrupts and events. */ mtspr(SPR_DBCR0, 0); __asm __volatile("isync"); /* Enable Debug Interrupts in MSR. */ mtmsr(mfmsr() | PSL_DE); /* Enable debug interrupts and issue reset. */ mtspr(SPR_DBCR0, mfspr(SPR_DBCR0) | DBCR0_IDM | DBCR0_RST_SYSTEM); printf("Reset failed...\n"); while (1) ; } static void mpc85xx_smp_timebase_sync(platform_t plat, u_long tb, int ap) { mttb(tb); }