Index: head/sys/powerpc/aim/locore.S =================================================================== --- head/sys/powerpc/aim/locore.S (revision 304050) +++ head/sys/powerpc/aim/locore.S (revision 304051) @@ -1,8 +1,15 @@ /* $FreeBSD$ */ #ifdef __powerpc64__ #include #else #include #endif +/* + * XXX: This should be moved to a shared AIM/booke asm file, if one ever is + * created. + */ +ENTRY(get_spr) + mfspr %r3, 0 + blr Index: head/sys/powerpc/booke/locore.S =================================================================== --- head/sys/powerpc/booke/locore.S (revision 304050) +++ head/sys/powerpc/booke/locore.S (revision 304051) @@ -1,872 +1,880 @@ /*- * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "assym.s" #include "opt_hwpmc_hooks.h" #include #include #include #include #include #include #include #include #define TMPSTACKSZ 16384 .text .globl btext btext: /* * This symbol is here for the benefit of kvm_mkdb, and is supposed to * mark the start of kernel text. */ .globl kernel_text kernel_text: /* * Startup entry. Note, this must be the first thing in the text segment! */ .text .globl __start __start: /* * Assumptions on the boot loader: * - System memory starts from physical address 0 * - It's mapped by a single TLB1 entry * - TLB1 mapping is 1:1 pa to va * - Kernel is loaded at 64MB boundary * - All PID registers are set to the same value * - CPU is running in AS=0 * * Registers contents provided by the loader(8): * r1 : stack pointer * r3 : metadata pointer * * We rearrange the TLB1 layout as follows: * - Find TLB1 entry we started in * - Make sure it's protected, invalidate other entries * - Create temp entry in the second AS (make sure it's not TLB[1]) * - Switch to temp mapping * - Map 64MB of RAM in TLB1[1] * - Use AS=1, set EPN to KERNBASE and RPN to kernel load address * - Switch to to TLB1[1] mapping * - Invalidate temp mapping * * locore registers use: * r1 : stack pointer * r2 : trace pointer (AP only, for early diagnostics) * r3-r27 : scratch registers * r28 : temp TLB1 entry * r29 : initial TLB1 entry we started in * r30-r31 : arguments (metadata pointer) */ /* * Keep arguments in r30 & r31 for later use. */ mr %r30, %r3 mr %r31, %r4 /* * Initial cleanup */ li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */ mtmsr %r3 isync /* * Initial HIDs configuration */ 1: mfpvr %r3 rlwinm %r3, %r3, 16, 16, 31 lis %r4, HID0_E500_DEFAULT_SET@h ori %r4, %r4, HID0_E500_DEFAULT_SET@l /* Check for e500mc and e5500 */ cmpli 0, 0, %r3, FSL_E500mc bne 2f lis %r4, HID0_E500MC_DEFAULT_SET@h ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l b 3f 2: cmpli 0, 0, %r3, FSL_E5500 bne 3f lis %r4, HID0_E5500_DEFAULT_SET@h ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 3: mtspr SPR_HID0, %r4 isync /* * E500mc and E5500 do not have HID1 register, so skip HID1 setup on * this core. */ cmpli 0, 0, %r3, FSL_E500mc beq 1f cmpli 0, 0, %r3, FSL_E5500 beq 1f lis %r3, HID1_E500_DEFAULT_SET@h ori %r3, %r3, HID1_E500_DEFAULT_SET@l mtspr SPR_HID1, %r3 isync 1: /* Invalidate all entries in TLB0 */ li %r3, 0 bl tlb_inval_all cmpwi %r30, 0 beq done_mapping /* * Locate the TLB1 entry that maps this code */ bl 1f 1: mflr %r3 bl tlb1_find_current /* the entry found is returned in r29 */ bl tlb1_inval_all_but_current /* * Create temporary mapping in AS=1 and switch to it */ bl tlb1_temp_mapping_as1 mfmsr %r3 ori %r3, %r3, (PSL_IS | PSL_DS) bl 2f 2: mflr %r4 addi %r4, %r4, (3f - 2b) mtspr SPR_SRR0, %r4 mtspr SPR_SRR1, %r3 rfi /* Switch context */ /* * Invalidate initial entry */ 3: mr %r3, %r29 bl tlb1_inval_entry /* * Setup final mapping in TLB1[1] and switch to it */ /* Final kernel mapping, map in 64 MB of RAM */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ li %r4, 0 /* Entry 0 */ rlwimi %r3, %r4, 16, 10, 15 mtspr SPR_MAS0, %r3 isync li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ isync lis %r3, KERNBASE@h ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */ #ifdef SMP ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ #endif mtspr SPR_MAS2, %r3 isync /* Discover phys load address */ bl 3f 3: mflr %r4 /* Use current address */ rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */ ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l mtspr SPR_MAS3, %r4 /* Set RPN and protection */ isync bl zero_mas7 bl zero_mas8 tlbwe isync msync /* Switch to the above TLB1[1] mapping */ bl 4f 4: mflr %r4 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */ rlwinm %r3, %r3, 0, 0, 19 add %r4, %r4, %r3 /* Convert to kernel virtual address */ addi %r4, %r4, (5f - 4b) li %r3, PSL_DE /* Note AS=0 */ mtspr SPR_SRR0, %r4 mtspr SPR_SRR1, %r3 rfi /* * Invalidate temp mapping */ 5: mr %r3, %r28 bl tlb1_inval_entry done_mapping: /* * Setup a temporary stack */ bl 1f .long tmpstack-. 1: mflr %r1 lwz %r2,0(%r1) add %r1,%r1,%r2 addi %r1, %r1, (TMPSTACKSZ - 16) /* * Relocate kernel */ bl 1f .long _DYNAMIC-. .long _GLOBAL_OFFSET_TABLE_-. 1: mflr %r5 lwz %r3,0(%r5) /* _DYNAMIC in %r3 */ add %r3,%r3,%r5 lwz %r4,4(%r5) /* GOT pointer */ add %r4,%r4,%r5 lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */ subf %r4,%r4,%r3 /* subtract to calculate relocbase */ bl elf_reloc_self /* * Initialise exception vector offsets */ bl ivor_setup /* * Set up arguments and jump to system initialization code */ mr %r3, %r30 mr %r4, %r31 /* Prepare core */ bl booke_init /* Switch to thread0.td_kstack now */ mr %r1, %r3 li %r3, 0 stw %r3, 0(%r1) /* Machine independet part, does not return */ bl mi_startup /* NOT REACHED */ 5: b 5b #ifdef SMP /************************************************************************/ /* AP Boot page */ /************************************************************************/ .text .globl __boot_page .align 12 __boot_page: bl 1f .globl bp_trace bp_trace: .long 0 .globl bp_kernload bp_kernload: .long 0 /* * Initial configuration */ 1: mflr %r31 /* r31 hold the address of bp_trace */ /* Set HIDs */ mfpvr %r3 rlwinm %r3, %r3, 16, 16, 31 /* HID0 for E500 is default */ lis %r4, HID0_E500_DEFAULT_SET@h ori %r4, %r4, HID0_E500_DEFAULT_SET@l cmpli 0, 0, %r3, FSL_E500mc bne 2f lis %r4, HID0_E500MC_DEFAULT_SET@h ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l b 3f 2: cmpli 0, 0, %r3, FSL_E5500 bne 3f lis %r4, HID0_E5500_DEFAULT_SET@h ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 3: mtspr SPR_HID0, %r4 isync /* Enable branch prediction */ li %r3, BUCSR_BPEN mtspr SPR_BUCSR, %r3 isync /* Invalidate all entries in TLB0 */ li %r3, 0 bl tlb_inval_all /* * Find TLB1 entry which is translating us now */ bl 2f 2: mflr %r3 bl tlb1_find_current /* the entry number found is in r29 */ bl tlb1_inval_all_but_current /* * Create temporary translation in AS=1 and switch to it */ bl tlb1_temp_mapping_as1 mfmsr %r3 ori %r3, %r3, (PSL_IS | PSL_DS) bl 3f 3: mflr %r4 addi %r4, %r4, (4f - 3b) mtspr SPR_SRR0, %r4 mtspr SPR_SRR1, %r3 rfi /* Switch context */ /* * Invalidate initial entry */ 4: mr %r3, %r29 bl tlb1_inval_entry /* * Setup final mapping in TLB1[1] and switch to it */ /* Final kernel mapping, map in 64 MB of RAM */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ li %r4, 0 /* Entry 0 */ rlwimi %r3, %r4, 16, 4, 15 mtspr SPR_MAS0, %r3 isync li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ isync lis %r3, KERNBASE@h ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */ ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ mtspr SPR_MAS2, %r3 isync /* Retrieve kernel load [physical] address from bp_kernload */ bl 5f .long bp_kernload .long __boot_page 5: mflr %r3 lwz %r4, 0(%r3) lwz %r5, 4(%r3) rlwinm %r3, %r3, 0, 0, 19 sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */ lwzx %r3, %r4, %r3 /* Set RPN and protection */ ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l mtspr SPR_MAS3, %r3 isync bl zero_mas7 bl zero_mas8 tlbwe isync msync /* Switch to the final mapping */ bl 6f 6: mflr %r3 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */ add %r3, %r3, %r5 /* Make this virtual address */ addi %r3, %r3, (7f - 6b) li %r4, 0 /* Note AS=0 */ mtspr SPR_SRR0, %r3 mtspr SPR_SRR1, %r4 rfi 7: /* * At this point we're running at virtual addresses KERNBASE and beyond so * it's allowed to directly access all locations the kernel was linked * against. */ /* * Invalidate temp mapping */ mr %r3, %r28 bl tlb1_inval_entry /* * Setup a temporary stack */ bl 1f .long tmpstack-. 1: mflr %r1 lwz %r2,0(%r1) add %r1,%r1,%r2 stw %r1, 0(%r1) addi %r1, %r1, (TMPSTACKSZ - 16) /* * Initialise exception vector offsets */ bl ivor_setup /* * Assign our pcpu instance */ bl 1f .long ap_pcpu-. 1: mflr %r4 lwz %r3, 0(%r4) add %r3, %r3, %r4 lwz %r3, 0(%r3) mtsprg0 %r3 bl pmap_bootstrap_ap bl cpudep_ap_bootstrap /* Switch to the idle thread's kstack */ mr %r1, %r3 bl machdep_ap_bootstrap /* NOT REACHED */ 6: b 6b #endif /* SMP */ #if defined (BOOKE_E500) /* * Invalidate all entries in the given TLB. * * r3 TLBSEL */ tlb_inval_all: rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */ ori %r3, %r3, (1 << 2) /* INVALL */ tlbivax 0, %r3 isync msync tlbsync msync blr /* * expects address to look up in r3, returns entry number in r29 * * FIXME: the hidden assumption is we are now running in AS=0, but we should * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS] */ tlb1_find_current: mfspr %r17, SPR_PID0 slwi %r17, %r17, MAS6_SPID0_SHIFT mtspr SPR_MAS6, %r17 isync tlbsx 0, %r3 mfspr %r17, SPR_MAS0 rlwinm %r29, %r17, 16, 26, 31 /* MAS0[ESEL] -> r29 */ /* Make sure we have IPROT set on the entry */ mfspr %r17, SPR_MAS1 oris %r17, %r17, MAS1_IPROT@h mtspr SPR_MAS1, %r17 isync tlbwe isync msync blr /* * Invalidates a single entry in TLB1. * * r3 ESEL * r4-r5 scratched */ tlb1_inval_entry: lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */ rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */ mtspr SPR_MAS0, %r4 isync tlbre li %r5, 0 /* MAS1[V] = 0 */ mtspr SPR_MAS1, %r5 isync tlbwe isync msync blr /* * r29 current entry number * r28 returned temp entry * r3-r5 scratched */ tlb1_temp_mapping_as1: /* Read our current translation */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */ mtspr SPR_MAS0, %r3 isync tlbre /* * Prepare and write temp entry * * FIXME this is not robust against overflow i.e. when the current * entry is the last in TLB1 */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ addi %r28, %r29, 1 /* Use next entry. */ rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */ mtspr SPR_MAS0, %r3 isync mfspr %r5, SPR_MAS1 li %r4, 1 /* AS=1 */ rlwimi %r5, %r4, 12, 19, 19 li %r4, 0 /* Global mapping, TID=0 */ rlwimi %r5, %r4, 16, 8, 15 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h mtspr SPR_MAS1, %r5 isync mflr %r3 bl zero_mas7 bl zero_mas8 mtlr %r3 tlbwe isync msync blr /* * Loops over TLB1, invalidates all entries skipping the one which currently * maps this code. * * r29 current entry * r3-r5 scratched */ tlb1_inval_all_but_current: mr %r6, %r3 mfspr %r3, SPR_TLB1CFG /* Get number of entries */ andi. %r3, %r3, TLBCFG_NENTRY_MASK@l li %r4, 0 /* Start from Entry 0 */ 1: lis %r5, MAS0_TLBSEL1@h rlwimi %r5, %r4, 16, 10, 15 mtspr SPR_MAS0, %r5 isync tlbre mfspr %r5, SPR_MAS1 cmpw %r4, %r29 /* our current entry? */ beq 2f rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */ mtspr SPR_MAS1, %r5 isync tlbwe isync msync 2: addi %r4, %r4, 1 cmpw %r4, %r3 /* Check if this is the last entry */ bne 1b blr /* * MAS7 and MAS8 conditional zeroing. */ .globl zero_mas7 zero_mas7: mfpvr %r20 rlwinm %r20, %r20, 16, 16, 31 cmpli 0, 0, %r20, FSL_E500v1 beq 1f li %r20, 0 mtspr SPR_MAS7, %r20 isync 1: blr .globl zero_mas8 zero_mas8: mfpvr %r20 rlwinm %r20, %r20, 16, 16, 31 cmpli 0, 0, %r20, FSL_E500mc beq 1f cmpli 0, 0, %r20, FSL_E5500 beq 1f blr 1: li %r20, 0 mtspr SPR_MAS8, %r20 isync blr #endif #ifdef SMP .globl __boot_tlb1 /* * The __boot_tlb1 table is used to hold BSP TLB1 entries * marked with _TLB_ENTRY_SHARED flag during AP bootstrap. * The BSP fills in the table in tlb_ap_prep() function. Next, * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap(). */ __boot_tlb1: .space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE __boot_page_padding: /* * Boot page needs to be exactly 4K, with the last word of this page * acting as the reset vector, so we need to stuff the remainder. * Upon release from holdoff CPU fetches the last word of the boot * page. */ .space 4092 - (__boot_page_padding - __boot_page) b __boot_page #endif /* SMP */ /************************************************************************/ /* locore subroutines */ /************************************************************************/ /* * Cache disable/enable/inval sequences according * to section 2.16 of E500CORE RM. */ ENTRY(dcache_inval) /* Invalidate d-cache */ mfspr %r3, SPR_L1CSR0 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l msync isync mtspr SPR_L1CSR0, %r3 isync 1: mfspr %r3, SPR_L1CSR0 andi. %r3, %r3, L1CSR0_DCFI bne 1b blr ENTRY(dcache_disable) /* Disable d-cache */ mfspr %r3, SPR_L1CSR0 li %r4, L1CSR0_DCE@l not %r4, %r4 and %r3, %r3, %r4 msync isync mtspr SPR_L1CSR0, %r3 isync blr ENTRY(dcache_enable) /* Enable d-cache */ mfspr %r3, SPR_L1CSR0 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l msync isync mtspr SPR_L1CSR0, %r3 isync blr ENTRY(icache_inval) /* Invalidate i-cache */ mfspr %r3, SPR_L1CSR1 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l isync mtspr SPR_L1CSR1, %r3 isync 1: mfspr %r3, SPR_L1CSR1 andi. %r3, %r3, L1CSR1_ICFI bne 1b blr ENTRY(icache_disable) /* Disable i-cache */ mfspr %r3, SPR_L1CSR1 li %r4, L1CSR1_ICE@l not %r4, %r4 and %r3, %r3, %r4 isync mtspr SPR_L1CSR1, %r3 isync blr ENTRY(icache_enable) /* Enable i-cache */ mfspr %r3, SPR_L1CSR1 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l isync mtspr SPR_L1CSR1, %r3 isync blr /* * L2 cache disable/enable/inval sequences for E500mc. */ ENTRY(l2cache_inval) mfspr %r3, SPR_L2CSR0 oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l isync mtspr SPR_L2CSR0, %r3 isync 1: mfspr %r3, SPR_L2CSR0 andis. %r3, %r3, L2CSR0_L2FI@h bne 1b blr ENTRY(l2cache_enable) mfspr %r3, SPR_L2CSR0 oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h isync mtspr SPR_L2CSR0, %r3 isync blr /* * Branch predictor setup. */ ENTRY(bpred_enable) mfspr %r3, SPR_BUCSR ori %r3, %r3, BUCSR_BBFI isync mtspr SPR_BUCSR, %r3 isync ori %r3, %r3, BUCSR_BPEN isync mtspr SPR_BUCSR, %r3 isync blr ENTRY(dataloss_erratum_access) /* Lock two cache lines into I-Cache */ sync mfspr %r11, SPR_L1CSR1 rlwinm %r11, %r11, 0, ~L1CSR1_ICUL sync isync mtspr SPR_L1CSR1, %r11 isync lis %r8, 2f@h ori %r8, %r8, 2f@l icbtls 0, 0, %r8 addi %r9, %r8, 64 sync mfspr %r11, SPR_L1CSR1 3: andi. %r11, %r11, L1CSR1_ICUL bne 3b icbtls 0, 0, %r9 sync mfspr %r11, SPR_L1CSR1 3: andi. %r11, %r11, L1CSR1_ICUL bne 3b b 2f .align 6 /* Inside a locked cacheline, wait a while, write, then wait a while */ 2: sync mfspr %r5, TBR_TBL 4: addis %r11, %r5, 0x100000@h /* wait around one million timebase ticks */ mfspr %r5, TBR_TBL subf. %r5, %r5, %r11 bgt 4b stw %r4, 0(%r3) mfspr %r5, TBR_TBL 4: addis %r11, %r5, 0x100000@h /* wait around one million timebase ticks */ mfspr %r5, TBR_TBL subf. %r5, %r5, %r11 bgt 4b sync /* * Fill out the rest of this cache line and the next with nops, * to ensure that nothing outside the locked area will be * fetched due to a branch. */ .rept 19 nop .endr icblc 0, 0, %r8 icblc 0, 0, %r9 blr +/* + * XXX: This should be moved to a shared AIM/booke asm file, if one ever is + * created. + */ +ENTRY(get_spr) + mfspr %r3, 0 + blr + /************************************************************************/ /* Data section */ /************************************************************************/ .data .align 3 GLOBAL(__startkernel) .long begin GLOBAL(__endkernel) .long end .align 4 tmpstack: .space TMPSTACKSZ tmpstackbound: .space 10240 /* XXX: this really should not be necessary */ /* * Compiled KERNBASE locations */ .globl kernbase .set kernbase, KERNBASE #include Index: head/sys/powerpc/powerpc/machdep.c =================================================================== --- head/sys/powerpc/powerpc/machdep.c (revision 304050) +++ head/sys/powerpc/powerpc/machdep.c (revision 304051) @@ -1,518 +1,546 @@ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef __powerpc64__ #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int cold = 1; #ifdef __powerpc64__ int cacheline_size = 128; #else int cacheline_size = 32; #endif int hw_direct_map = 1; extern void *ap_pcpu; struct pcpu __pcpu[MAXCPU]; static struct trapframe frame0; char machine[] = "powerpc"; SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); static void cpu_startup(void *); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size, CTLFLAG_RD, &cacheline_size, 0, ""); uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *); long Maxmem = 0; long realmem = 0; struct kva_md_info kmi; static void cpu_startup(void *dummy) { /* * Initialise the decrementer-based clock. */ decr_init(); /* * Good {morning,afternoon,evening,night}. */ cpu_setup(PCPU_GET(cpuid)); #ifdef PERFMON perfmon_init(); #endif printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)physmem), ptoa((uintmax_t)physmem) / 1048576); realmem = physmem; if (bootverbose) printf("available KVA = %zu (%zu MB)\n", virtual_end - virtual_avail, (virtual_end - virtual_avail) / 1048576); /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { vm_paddr_t size1 = phys_avail[indx + 1] - phys_avail[indx]; #ifdef __powerpc64__ printf("0x%016jx - 0x%016jx, %jd bytes (%jd pages)\n", #else printf("0x%09jx - 0x%09jx, %ju bytes (%ju pages)\n", #endif (uintmax_t)phys_avail[indx], (uintmax_t)phys_avail[indx + 1] - 1, (uintmax_t)size1, (uintmax_t)size1 / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ju (%ju MB)\n", ptoa((uintmax_t)vm_cnt.v_free_count), ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); } extern vm_offset_t __startkernel, __endkernel; extern unsigned char __bss_start[]; extern unsigned char __sbss_start[]; extern unsigned char __sbss_end[]; extern unsigned char _end[]; void aim_cpu_init(vm_offset_t toc); void booke_cpu_init(void); uintptr_t powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp) { struct pcpu *pc; vm_offset_t startkernel, endkernel; void *kmdp; char *env; #ifdef DDB vm_offset_t ksym_start; vm_offset_t ksym_end; #endif kmdp = NULL; /* First guess at start/end kernel positions */ startkernel = __startkernel; endkernel = __endkernel; /* Check for ePAPR loader, which puts a magic value into r6 */ if (mdp == (void *)0x65504150) mdp = NULL; #ifdef AIM /* * If running from an FDT, make sure we are in real mode to avoid * tromping on firmware page tables. Everything in the kernel assumes * 1:1 mappings out of firmware, so this won't break anything not * already broken. This doesn't work if there is live OF, since OF * may internally use non-1:1 mappings. */ if (ofentry == 0) mtmsr(mfmsr() & ~(PSL_IR | PSL_DR)); #endif /* * Parse metadata if present and fetch parameters. Must be done * before console is inited so cninit gets the right value of * boothowto. */ if (mdp != NULL) { preload_metadata = mdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) { boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0); endkernel = ulmax(endkernel, MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t)); #ifdef DDB ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); db_fetch_ksymtab(ksym_start, ksym_end); #endif } } else { bzero(__sbss_start, __sbss_end - __sbss_start); bzero(__bss_start, _end - __bss_start); init_static_kenv(NULL, 0); } #ifdef BOOKE tlb1_init(); #endif /* Store boot environment state */ OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry); /* * Init params/tunables that can be overridden by the loader */ init_param1(); /* * Start initializing proc0 and thread0. */ proc_linkup0(&proc0, &thread0); thread0.td_frame = &frame0; /* * Set up per-cpu data. */ pc = __pcpu; pcpu_init(pc, 0, sizeof(struct pcpu)); pc->pc_curthread = &thread0; #ifdef __powerpc64__ __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread)); #else __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread)); #endif pc->pc_cpuid = 0; __asm __volatile("mtsprg 0, %0" :: "r"(pc)); /* * Init mutexes, which we use heavily in PMAP */ mutex_init(); /* * Install the OF client interface */ OF_bootstrap(); /* * Initialize the console before printing anything. */ cninit(); /* * Complain if there is no metadata. */ if (mdp == NULL || kmdp == NULL) { printf("powerpc_init: no loader metadata.\n"); } /* * Init KDB */ kdb_init(); #ifdef AIM aim_cpu_init(toc); #else /* BOOKE */ booke_cpu_init(); /* Make sure the kernel icache is valid before we go too much further */ __syncicache((caddr_t)startkernel, endkernel - startkernel); #endif /* * Choose a platform module so we can get the physical memory map. */ platform_probe_and_attach(); /* * Bring up MMU */ pmap_bootstrap(startkernel, endkernel); mtmsr(PSL_KERNSET & ~PSL_EE); /* * Initialize params/tunables that are derived from memsize */ init_param2(physmem); /* * Grab booted kernel's name */ env = kern_getenv("kernelname"); if (env != NULL) { strlcpy(kernelname, env, sizeof(kernelname)); freeenv(env); } /* * Finish setting up thread0. */ thread0.td_pcb = (struct pcb *) ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~15UL); bzero((void *)thread0.td_pcb, sizeof(struct pcb)); pc->pc_curpcb = thread0.td_pcb; /* Initialise the message buffer. */ msgbufinit(msgbufp, msgbufsize); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif return (((uintptr_t)thread0.td_pcb - (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL); } void bzero(void *buf, size_t len) { caddr_t p; p = buf; while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) { *p++ = 0; len--; } while (len >= sizeof(u_long) * 8) { *(u_long*) p = 0; *((u_long*) p + 1) = 0; *((u_long*) p + 2) = 0; *((u_long*) p + 3) = 0; len -= sizeof(u_long) * 8; *((u_long*) p + 4) = 0; *((u_long*) p + 5) = 0; *((u_long*) p + 6) = 0; *((u_long*) p + 7) = 0; p += sizeof(u_long) * 8; } while (len >= sizeof(u_long)) { *(u_long*) p = 0; len -= sizeof(u_long); p += sizeof(u_long); } while (len) { *p++ = 0; len--; } } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { register_t addr, off; /* * Align the address to a cacheline and adjust the length * accordingly. Then round the length to a multiple of the * cacheline for easy looping. */ addr = (uintptr_t)ptr; off = addr & (cacheline_size - 1); addr -= off; len = roundup2(len + off, cacheline_size); while (len > 0) { __asm __volatile ("dcbf 0,%0" :: "r"(addr)); __asm __volatile ("sync"); addr += cacheline_size; len -= cacheline_size; } } int ptrace_set_pc(struct thread *td, unsigned long addr) { struct trapframe *tf; tf = td->td_frame; tf->srr0 = (register_t)addr; return (0); } void spinlock_enter(void) { struct thread *td; register_t msr; td = curthread; if (td->td_md.md_spinlock_count == 0) { __asm __volatile("or 2,2,2"); /* Set high thread priority */ msr = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_msr = msr; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t msr; td = curthread; critical_exit(); msr = td->td_md.md_saved_msr; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) { intr_restore(msr); __asm __volatile("or 6,6,6"); /* Set normal thread priority */ } } +/* + * Simple ddb(4) command/hack to view any SPR on the running CPU. + * Uses a trivial asm function to perform the mfspr, and rewrites the mfspr + * instruction each time. + * XXX: Since it uses code modification, it won't work if the kernel code pages + * are marked RO. + */ +extern register_t get_spr(int); + +DB_SHOW_COMMAND(spr, db_show_spr) +{ + register_t spr; + volatile uint32_t *p; + int sprno, saved_sprno; + + if (!have_addr) + return; + + saved_sprno = sprno = (intptr_t) addr; + sprno = ((sprno & 0x3e0) >> 5) | ((sprno & 0x1f) << 5); + p = (uint32_t *)(void *)&get_spr; + *p = (*p & ~0x001ff800) | (sprno << 11); + __syncicache(get_spr, cacheline_size); + spr = get_spr(sprno); + + db_printf("SPR %d(%x): %lx\n", saved_sprno, saved_sprno, + (unsigned long)spr); +}