Index: stable/10/sys/arm/arm/bcopyinout.S =================================================================== --- stable/10/sys/arm/arm/bcopyinout.S (revision 278651) +++ stable/10/sys/arm/arm/bcopyinout.S (revision 278652) @@ -1,622 +1,622 @@ /* $NetBSD: bcopyinout.S,v 1.11 2003/10/13 21:22:40 scw Exp $ */ /*- * Copyright (c) 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Allen Briggs for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "assym.s" #include #include .L_arm_memcpy: .word _C_LABEL(_arm_memcpy) .L_min_memcpy_size: .word _C_LABEL(_min_memcpy_size) __FBSDID("$FreeBSD$"); #ifdef _ARM_ARCH_5E #include #else .text - .align 0 + .align 2 #ifdef _ARM_ARCH_6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) #else .Lcurpcb: .word _C_LABEL(__pcpu) + PC_CURPCB #define GET_PCB(tmp) \ ldr tmp, .Lcurpcb #endif #define SAVE_REGS stmfd sp!, {r4-r11} #define RESTORE_REGS ldmfd sp!, {r4-r11} #if defined(_ARM_ARCH_5E) #define HELLOCPP # #define PREFETCH(rx,o) pld [ rx , HELLOCPP (o) ] #else #define PREFETCH(rx,o) #endif /* * r0 = user space address * r1 = kernel space address * r2 = length * * Copies bytes from user space to kernel space * * We save/restore r4-r11: * r4-r11 are scratch */ ENTRY(copyin) /* Quick exit if length is zero */ teq r2, #0 moveq r0, #0 RETeq ldr r3, .L_arm_memcpy ldr r3, [r3] cmp r3, #0 beq .Lnormal ldr r3, .L_min_memcpy_size ldr r3, [r3] cmp r2, r3 blt .Lnormal stmfd sp!, {r0-r2, r4, lr} mov r3, r0 mov r0, r1 mov r1, r3 mov r3, #2 /* SRC_IS_USER */ ldr r4, .L_arm_memcpy mov lr, pc ldr pc, [r4] cmp r0, #0 ldmfd sp!, {r0-r2, r4, lr} moveq r0, #0 RETeq .Lnormal: SAVE_REGS GET_PCB(r4) ldr r4, [r4] ldr r5, [r4, #PCB_ONFAULT] adr r3, .Lcopyfault str r3, [r4, #PCB_ONFAULT] PREFETCH(r0, 0) PREFETCH(r1, 0) /* * If not too many bytes, take the slow path. */ cmp r2, #0x08 blt .Licleanup /* * Align destination to word boundary. */ and r6, r1, #0x3 ldr pc, [pc, r6, lsl #2] b .Lialend .word .Lialend .word .Lial3 .word .Lial2 .word .Lial1 .Lial3: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lial2: ldrbt r7, [r0], #1 sub r2, r2, #1 strb r7, [r1], #1 .Lial1: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lialend: /* * If few bytes left, finish slow. */ cmp r2, #0x08 blt .Licleanup /* * If source is not aligned, finish slow. */ ands r3, r0, #0x03 bne .Licleanup cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */ blt .Licleanup8 /* * Align destination to cacheline boundary. * If source and destination are nicely aligned, this can be a big * win. If not, it's still cheaper to copy in groups of 32 even if * we don't get the nice cacheline alignment. */ and r6, r1, #0x1f ldr pc, [pc, r6] b .Licaligned .word .Licaligned .word .Lical28 .word .Lical24 .word .Lical20 .word .Lical16 .word .Lical12 .word .Lical8 .word .Lical4 .Lical28:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 .Lical24:ldrt r7, [r0], #4 sub r2, r2, #4 str r7, [r1], #4 .Lical20:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 .Lical16:ldrt r7, [r0], #4 sub r2, r2, #4 str r7, [r1], #4 .Lical12:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 .Lical8:ldrt r7, [r0], #4 sub r2, r2, #4 str r7, [r1], #4 .Lical4:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 /* * We start with > 0x40 bytes to copy (>= 0x60 got us into this * part of the code, and we may have knocked that down by as much * as 0x1c getting aligned). * * This loop basically works out to: * do { * prefetch-next-cacheline(s) * bytes -= 0x20; * copy cacheline * } while (bytes >= 0x40); * bytes -= 0x20; * copy cacheline */ .Licaligned: PREFETCH(r0, 32) PREFETCH(r1, 32) sub r2, r2, #0x20 /* Copy a cacheline */ ldrt r10, [r0], #4 ldrt r11, [r0], #4 ldrt r6, [r0], #4 ldrt r7, [r0], #4 ldrt r8, [r0], #4 ldrt r9, [r0], #4 stmia r1!, {r10-r11} ldrt r10, [r0], #4 ldrt r11, [r0], #4 stmia r1!, {r6-r11} cmp r2, #0x40 bge .Licaligned sub r2, r2, #0x20 /* Copy a cacheline */ ldrt r10, [r0], #4 ldrt r11, [r0], #4 ldrt r6, [r0], #4 ldrt r7, [r0], #4 ldrt r8, [r0], #4 ldrt r9, [r0], #4 stmia r1!, {r10-r11} ldrt r10, [r0], #4 ldrt r11, [r0], #4 stmia r1!, {r6-r11} cmp r2, #0x08 blt .Liprecleanup .Licleanup8: ldrt r8, [r0], #4 ldrt r9, [r0], #4 sub r2, r2, #8 stmia r1!, {r8, r9} cmp r2, #8 bge .Licleanup8 .Liprecleanup: /* * If we're done, bail. */ cmp r2, #0 beq .Lout .Licleanup: and r6, r2, #0x3 ldr pc, [pc, r6, lsl #2] b .Licend .word .Lic4 .word .Lic1 .word .Lic2 .word .Lic3 .Lic4: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lic3: ldrbt r7, [r0], #1 sub r2, r2, #1 strb r7, [r1], #1 .Lic2: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lic1: ldrbt r7, [r0], #1 subs r2, r2, #1 strb r7, [r1], #1 .Licend: bne .Licleanup .Liout: mov r0, #0 str r5, [r4, #PCB_ONFAULT] RESTORE_REGS RET .Lcopyfault: ldr r0, =EFAULT str r5, [r4, #PCB_ONFAULT] RESTORE_REGS RET END(copyin) /* * r0 = kernel space address * r1 = user space address * r2 = length * * Copies bytes from kernel space to user space * * We save/restore r4-r11: * r4-r11 are scratch */ ENTRY(copyout) /* Quick exit if length is zero */ teq r2, #0 moveq r0, #0 RETeq ldr r3, .L_arm_memcpy ldr r3, [r3] cmp r3, #0 beq .Lnormale ldr r3, .L_min_memcpy_size ldr r3, [r3] cmp r2, r3 blt .Lnormale stmfd sp!, {r0-r2, r4, lr} mov r3, r0 mov r0, r1 mov r1, r3 mov r3, #1 /* DST_IS_USER */ ldr r4, .L_arm_memcpy mov lr, pc ldr pc, [r4] cmp r0, #0 ldmfd sp!, {r0-r2, r4, lr} moveq r0, #0 RETeq .Lnormale: SAVE_REGS GET_PCB(r4) ldr r4, [r4] ldr r5, [r4, #PCB_ONFAULT] adr r3, .Lcopyfault str r3, [r4, #PCB_ONFAULT] PREFETCH(r0, 0) PREFETCH(r1, 0) /* * If not too many bytes, take the slow path. */ cmp r2, #0x08 blt .Lcleanup /* * Align destination to word boundary. */ and r6, r1, #0x3 ldr pc, [pc, r6, lsl #2] b .Lalend .word .Lalend .word .Lal3 .word .Lal2 .word .Lal1 .Lal3: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lal2: ldrb r7, [r0], #1 sub r2, r2, #1 strbt r7, [r1], #1 .Lal1: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lalend: /* * If few bytes left, finish slow. */ cmp r2, #0x08 blt .Lcleanup /* * If source is not aligned, finish slow. */ ands r3, r0, #0x03 bne .Lcleanup cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */ blt .Lcleanup8 /* * Align source & destination to cacheline boundary. */ and r6, r1, #0x1f ldr pc, [pc, r6] b .Lcaligned .word .Lcaligned .word .Lcal28 .word .Lcal24 .word .Lcal20 .word .Lcal16 .word .Lcal12 .word .Lcal8 .word .Lcal4 .Lcal28:ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 .Lcal24:ldr r7, [r0], #4 sub r2, r2, #4 strt r7, [r1], #4 .Lcal20:ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 .Lcal16:ldr r7, [r0], #4 sub r2, r2, #4 strt r7, [r1], #4 .Lcal12:ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 .Lcal8: ldr r7, [r0], #4 sub r2, r2, #4 strt r7, [r1], #4 .Lcal4: ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 /* * We start with > 0x40 bytes to copy (>= 0x60 got us into this * part of the code, and we may have knocked that down by as much * as 0x1c getting aligned). * * This loop basically works out to: * do { * prefetch-next-cacheline(s) * bytes -= 0x20; * copy cacheline * } while (bytes >= 0x40); * bytes -= 0x20; * copy cacheline */ .Lcaligned: PREFETCH(r0, 32) PREFETCH(r1, 32) sub r2, r2, #0x20 /* Copy a cacheline */ ldmia r0!, {r6-r11} strt r6, [r1], #4 strt r7, [r1], #4 ldmia r0!, {r6-r7} strt r8, [r1], #4 strt r9, [r1], #4 strt r10, [r1], #4 strt r11, [r1], #4 strt r6, [r1], #4 strt r7, [r1], #4 cmp r2, #0x40 bge .Lcaligned sub r2, r2, #0x20 /* Copy a cacheline */ ldmia r0!, {r6-r11} strt r6, [r1], #4 strt r7, [r1], #4 ldmia r0!, {r6-r7} strt r8, [r1], #4 strt r9, [r1], #4 strt r10, [r1], #4 strt r11, [r1], #4 strt r6, [r1], #4 strt r7, [r1], #4 cmp r2, #0x08 blt .Lprecleanup .Lcleanup8: ldmia r0!, {r8-r9} sub r2, r2, #8 strt r8, [r1], #4 strt r9, [r1], #4 cmp r2, #8 bge .Lcleanup8 .Lprecleanup: /* * If we're done, bail. */ cmp r2, #0 beq .Lout .Lcleanup: and r6, r2, #0x3 ldr pc, [pc, r6, lsl #2] b .Lcend .word .Lc4 .word .Lc1 .word .Lc2 .word .Lc3 .Lc4: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lc3: ldrb r7, [r0], #1 sub r2, r2, #1 strbt r7, [r1], #1 .Lc2: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lc1: ldrb r7, [r0], #1 subs r2, r2, #1 strbt r7, [r1], #1 .Lcend: bne .Lcleanup .Lout: mov r0, #0 str r5, [r4, #PCB_ONFAULT] RESTORE_REGS RET END(copyout) #endif /* * int badaddr_read_1(const uint8_t *src, uint8_t *dest) * * Copies a single 8-bit value from src to dest, returning 0 on success, * else EFAULT if a page fault occurred. */ ENTRY(badaddr_read_1) GET_PCB(r2) ldr r2, [r2] ldr ip, [r2, #PCB_ONFAULT] adr r3, 1f str r3, [r2, #PCB_ONFAULT] nop nop nop ldrb r3, [r0] nop nop nop strb r3, [r1] mov r0, #0 /* No fault */ 1: str ip, [r2, #PCB_ONFAULT] RET END(badaddr_read_1) /* * int badaddr_read_2(const uint16_t *src, uint16_t *dest) * * Copies a single 16-bit value from src to dest, returning 0 on success, * else EFAULT if a page fault occurred. */ ENTRY(badaddr_read_2) GET_PCB(r2) ldr r2, [r2] ldr ip, [r2, #PCB_ONFAULT] adr r3, 1f str r3, [r2, #PCB_ONFAULT] nop nop nop ldrh r3, [r0] nop nop nop strh r3, [r1] mov r0, #0 /* No fault */ 1: str ip, [r2, #PCB_ONFAULT] RET END(badaddr_read_2) /* * int badaddr_read_4(const uint32_t *src, uint32_t *dest) * * Copies a single 32-bit value from src to dest, returning 0 on success, * else EFAULT if a page fault occurred. */ ENTRY(badaddr_read_4) GET_PCB(r2) ldr r2, [r2] ldr ip, [r2, #PCB_ONFAULT] adr r3, 1f str r3, [r2, #PCB_ONFAULT] nop nop nop ldr r3, [r0] nop nop nop str r3, [r1] mov r0, #0 /* No fault */ 1: str ip, [r2, #PCB_ONFAULT] RET END(badaddr_read_4) Index: stable/10/sys/arm/arm/bcopyinout_xscale.S =================================================================== --- stable/10/sys/arm/arm/bcopyinout_xscale.S (revision 278651) +++ stable/10/sys/arm/arm/bcopyinout_xscale.S (revision 278652) @@ -1,940 +1,940 @@ /* $NetBSD: bcopyinout_xscale.S,v 1.3 2003/12/15 09:27:18 scw Exp $ */ /*- * Copyright 2003 Wasabi Systems, Inc. * All rights reserved. * * Written by Steve C. Woodford for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); .syntax unified .text - .align 0 + .align 2 #ifdef _ARM_ARCH_6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) #else .Lcurpcb: .word _C_LABEL(__pcpu) + PC_CURPCB #define GET_PCB(tmp) \ ldr tmp, .Lcurpcb #endif /* * r0 = user space address * r1 = kernel space address * r2 = length * * Copies bytes from user space to kernel space */ ENTRY(copyin) cmp r2, #0x00 movle r0, #0x00 movle pc, lr /* Bail early if length is <= 0 */ ldr r3, .L_arm_memcpy ldr r3, [r3] cmp r3, #0 beq .Lnormal ldr r3, .L_min_memcpy_size ldr r3, [r3] cmp r2, r3 blt .Lnormal stmfd sp!, {r0-r2, r4, lr} mov r3, r0 mov r0, r1 mov r1, r3 mov r3, #2 /* SRC_IS_USER */ ldr r4, .L_arm_memcpy mov lr, pc ldr pc, [r4] cmp r0, #0 ldmfd sp!, {r0-r2, r4, lr} moveq r0, #0 RETeq .Lnormal: stmfd sp!, {r10-r11, lr} GET_PCB(r10) ldr r10, [r10] mov r3, #0x00 adr ip, .Lcopyin_fault ldr r11, [r10, #PCB_ONFAULT] str ip, [r10, #PCB_ONFAULT] bl .Lcopyin_guts str r11, [r10, #PCB_ONFAULT] mov r0, #0x00 ldmfd sp!, {r10-r11, pc} .Lcopyin_fault: ldr r0, =EFAULT str r11, [r10, #PCB_ONFAULT] cmp r3, #0x00 ldmfdgt sp!, {r4-r7} /* r3 > 0 Restore r4-r7 */ ldmfdlt sp!, {r4-r9} /* r3 < 0 Restore r4-r9 */ ldmfd sp!, {r10-r11, pc} .Lcopyin_guts: pld [r0] /* Word-align the destination buffer */ ands ip, r1, #0x03 /* Already word aligned? */ beq .Lcopyin_wordaligned /* Yup */ rsb ip, ip, #0x04 cmp r2, ip /* Enough bytes left to align it? */ blt .Lcopyin_l4_2 /* Nope. Just copy bytewise */ sub r2, r2, ip rsbs ip, ip, #0x03 addne pc, pc, ip, lsl #3 nop ldrbt ip, [r0], #0x01 strb ip, [r1], #0x01 ldrbt ip, [r0], #0x01 strb ip, [r1], #0x01 ldrbt ip, [r0], #0x01 strb ip, [r1], #0x01 cmp r2, #0x00 /* All done? */ RETeq /* Destination buffer is now word aligned */ .Lcopyin_wordaligned: ands ip, r0, #0x03 /* Is src also word-aligned? */ bne .Lcopyin_bad_align /* Nope. Things just got bad */ cmp r2, #0x08 /* Less than 8 bytes remaining? */ blt .Lcopyin_w_less_than8 /* Quad-align the destination buffer */ tst r1, #0x07 /* Already quad aligned? */ ldrtne ip, [r0], #0x04 strne ip, [r1], #0x04 subne r2, r2, #0x04 stmfd sp!, {r4-r9} /* Free up some registers */ mov r3, #-1 /* Signal restore r4-r9 */ /* Destination buffer quad aligned, source is word aligned */ subs r2, r2, #0x80 blt .Lcopyin_w_lessthan128 /* Copy 128 bytes at a time */ .Lcopyin_w_loop128: ldrt r4, [r0], #0x04 /* LD:00-03 */ ldrt r5, [r0], #0x04 /* LD:04-07 */ pld [r0, #0x18] /* Prefetch 0x20 */ ldrt r6, [r0], #0x04 /* LD:08-0b */ ldrt r7, [r0], #0x04 /* LD:0c-0f */ ldrt r8, [r0], #0x04 /* LD:10-13 */ ldrt r9, [r0], #0x04 /* LD:14-17 */ strd r4, [r1], #0x08 /* ST:00-07 */ ldrt r4, [r0], #0x04 /* LD:18-1b */ ldrt r5, [r0], #0x04 /* LD:1c-1f */ strd r6, [r1], #0x08 /* ST:08-0f */ ldrt r6, [r0], #0x04 /* LD:20-23 */ ldrt r7, [r0], #0x04 /* LD:24-27 */ pld [r0, #0x18] /* Prefetch 0x40 */ strd r8, [r1], #0x08 /* ST:10-17 */ ldrt r8, [r0], #0x04 /* LD:28-2b */ ldrt r9, [r0], #0x04 /* LD:2c-2f */ strd r4, [r1], #0x08 /* ST:18-1f */ ldrt r4, [r0], #0x04 /* LD:30-33 */ ldrt r5, [r0], #0x04 /* LD:34-37 */ strd r6, [r1], #0x08 /* ST:20-27 */ ldrt r6, [r0], #0x04 /* LD:38-3b */ ldrt r7, [r0], #0x04 /* LD:3c-3f */ strd r8, [r1], #0x08 /* ST:28-2f */ ldrt r8, [r0], #0x04 /* LD:40-43 */ ldrt r9, [r0], #0x04 /* LD:44-47 */ pld [r0, #0x18] /* Prefetch 0x60 */ strd r4, [r1], #0x08 /* ST:30-37 */ ldrt r4, [r0], #0x04 /* LD:48-4b */ ldrt r5, [r0], #0x04 /* LD:4c-4f */ strd r6, [r1], #0x08 /* ST:38-3f */ ldrt r6, [r0], #0x04 /* LD:50-53 */ ldrt r7, [r0], #0x04 /* LD:54-57 */ strd r8, [r1], #0x08 /* ST:40-47 */ ldrt r8, [r0], #0x04 /* LD:58-5b */ ldrt r9, [r0], #0x04 /* LD:5c-5f */ strd r4, [r1], #0x08 /* ST:48-4f */ ldrt r4, [r0], #0x04 /* LD:60-63 */ ldrt r5, [r0], #0x04 /* LD:64-67 */ pld [r0, #0x18] /* Prefetch 0x80 */ strd r6, [r1], #0x08 /* ST:50-57 */ ldrt r6, [r0], #0x04 /* LD:68-6b */ ldrt r7, [r0], #0x04 /* LD:6c-6f */ strd r8, [r1], #0x08 /* ST:58-5f */ ldrt r8, [r0], #0x04 /* LD:70-73 */ ldrt r9, [r0], #0x04 /* LD:74-77 */ strd r4, [r1], #0x08 /* ST:60-67 */ ldrt r4, [r0], #0x04 /* LD:78-7b */ ldrt r5, [r0], #0x04 /* LD:7c-7f */ strd r6, [r1], #0x08 /* ST:68-6f */ strd r8, [r1], #0x08 /* ST:70-77 */ subs r2, r2, #0x80 strd r4, [r1], #0x08 /* ST:78-7f */ bge .Lcopyin_w_loop128 .Lcopyin_w_lessthan128: adds r2, r2, #0x80 /* Adjust for extra sub */ ldmfdeq sp!, {r4-r9} RETeq subs r2, r2, #0x20 blt .Lcopyin_w_lessthan32 /* Copy 32 bytes at a time */ .Lcopyin_w_loop32: ldrt r4, [r0], #0x04 ldrt r5, [r0], #0x04 pld [r0, #0x18] ldrt r6, [r0], #0x04 ldrt r7, [r0], #0x04 ldrt r8, [r0], #0x04 ldrt r9, [r0], #0x04 strd r4, [r1], #0x08 ldrt r4, [r0], #0x04 ldrt r5, [r0], #0x04 strd r6, [r1], #0x08 strd r8, [r1], #0x08 subs r2, r2, #0x20 strd r4, [r1], #0x08 bge .Lcopyin_w_loop32 .Lcopyin_w_lessthan32: adds r2, r2, #0x20 /* Adjust for extra sub */ ldmfdeq sp!, {r4-r9} RETeq /* Return now if done */ and r4, r2, #0x18 rsb r5, r4, #0x18 subs r2, r2, r4 add pc, pc, r5, lsl #1 nop /* At least 24 bytes remaining */ ldrt r4, [r0], #0x04 ldrt r5, [r0], #0x04 nop strd r4, [r1], #0x08 /* At least 16 bytes remaining */ ldrt r4, [r0], #0x04 ldrt r5, [r0], #0x04 nop strd r4, [r1], #0x08 /* At least 8 bytes remaining */ ldrt r4, [r0], #0x04 ldrt r5, [r0], #0x04 nop strd r4, [r1], #0x08 /* Less than 8 bytes remaining */ ldmfd sp!, {r4-r9} RETeq /* Return now if done */ mov r3, #0x00 .Lcopyin_w_less_than8: subs r2, r2, #0x04 ldrtge ip, [r0], #0x04 strge ip, [r1], #0x04 RETeq /* Return now if done */ addlt r2, r2, #0x04 ldrbt ip, [r0], #0x01 cmp r2, #0x02 ldrbtge r2, [r0], #0x01 strb ip, [r1], #0x01 ldrbtgt ip, [r0] strbge r2, [r1], #0x01 strbgt ip, [r1] RET /* * At this point, it has not been possible to word align both buffers. * The destination buffer (r1) is word aligned, but the source buffer * (r0) is not. */ .Lcopyin_bad_align: stmfd sp!, {r4-r7} mov r3, #0x01 bic r0, r0, #0x03 cmp ip, #2 ldrt ip, [r0], #0x04 bgt .Lcopyin_bad3 beq .Lcopyin_bad2 b .Lcopyin_bad1 .Lcopyin_bad1_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #8 #else mov r4, ip, lsr #8 #endif ldrt r5, [r0], #0x04 pld [r0, #0x018] ldrt r6, [r0], #0x04 ldrt r7, [r0], #0x04 ldrt ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #24 mov r5, r5, lsl #8 orr r5, r5, r6, lsr #24 mov r6, r6, lsl #8 orr r6, r6, r7, lsr #24 mov r7, r7, lsl #8 orr r7, r7, ip, lsr #24 #else orr r4, r4, r5, lsl #24 mov r5, r5, lsr #8 orr r5, r5, r6, lsl #24 mov r6, r6, lsr #8 orr r6, r6, r7, lsl #24 mov r7, r7, lsr #8 orr r7, r7, ip, lsl #24 #endif str r4, [r1], #0x04 str r5, [r1], #0x04 str r6, [r1], #0x04 str r7, [r1], #0x04 .Lcopyin_bad1: subs r2, r2, #0x10 bge .Lcopyin_bad1_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x03 blt .Lcopyin_l4 .Lcopyin_bad1_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #8 #else mov r4, ip, lsr #8 #endif ldrt ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #24 #else orr r4, r4, ip, lsl #24 #endif str r4, [r1], #0x04 bge .Lcopyin_bad1_loop4 sub r0, r0, #0x03 b .Lcopyin_l4 .Lcopyin_bad2_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #16 #else mov r4, ip, lsr #16 #endif ldrt r5, [r0], #0x04 pld [r0, #0x018] ldrt r6, [r0], #0x04 ldrt r7, [r0], #0x04 ldrt ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #16 mov r5, r5, lsl #16 orr r5, r5, r6, lsr #16 mov r6, r6, lsl #16 orr r6, r6, r7, lsr #16 mov r7, r7, lsl #16 orr r7, r7, ip, lsr #16 #else orr r4, r4, r5, lsl #16 mov r5, r5, lsr #16 orr r5, r5, r6, lsl #16 mov r6, r6, lsr #16 orr r6, r6, r7, lsl #16 mov r7, r7, lsr #16 orr r7, r7, ip, lsl #16 #endif str r4, [r1], #0x04 str r5, [r1], #0x04 str r6, [r1], #0x04 str r7, [r1], #0x04 .Lcopyin_bad2: subs r2, r2, #0x10 bge .Lcopyin_bad2_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x02 blt .Lcopyin_l4 .Lcopyin_bad2_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #16 #else mov r4, ip, lsr #16 #endif ldrt ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #16 #else orr r4, r4, ip, lsl #16 #endif str r4, [r1], #0x04 bge .Lcopyin_bad2_loop4 sub r0, r0, #0x02 b .Lcopyin_l4 .Lcopyin_bad3_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #24 #else mov r4, ip, lsr #24 #endif ldrt r5, [r0], #0x04 pld [r0, #0x018] ldrt r6, [r0], #0x04 ldrt r7, [r0], #0x04 ldrt ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #8 mov r5, r5, lsl #24 orr r5, r5, r6, lsr #8 mov r6, r6, lsl #24 orr r6, r6, r7, lsr #8 mov r7, r7, lsl #24 orr r7, r7, ip, lsr #8 #else orr r4, r4, r5, lsl #8 mov r5, r5, lsr #24 orr r5, r5, r6, lsl #8 mov r6, r6, lsr #24 orr r6, r6, r7, lsl #8 mov r7, r7, lsr #24 orr r7, r7, ip, lsl #8 #endif str r4, [r1], #0x04 str r5, [r1], #0x04 str r6, [r1], #0x04 str r7, [r1], #0x04 .Lcopyin_bad3: subs r2, r2, #0x10 bge .Lcopyin_bad3_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x01 blt .Lcopyin_l4 .Lcopyin_bad3_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #24 #else mov r4, ip, lsr #24 #endif ldrt ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #8 #else orr r4, r4, ip, lsl #8 #endif str r4, [r1], #0x04 bge .Lcopyin_bad3_loop4 sub r0, r0, #0x01 .Lcopyin_l4: ldmfd sp!, {r4-r7} mov r3, #0x00 adds r2, r2, #0x04 RETeq .Lcopyin_l4_2: rsbs r2, r2, #0x03 addne pc, pc, r2, lsl #3 nop ldrbt ip, [r0], #0x01 strb ip, [r1], #0x01 ldrbt ip, [r0], #0x01 strb ip, [r1], #0x01 ldrbt ip, [r0] strb ip, [r1] RET END(copyin) /* * r0 = kernel space address * r1 = user space address * r2 = length * * Copies bytes from kernel space to user space */ ENTRY(copyout) cmp r2, #0x00 movle r0, #0x00 movle pc, lr /* Bail early if length is <= 0 */ ldr r3, .L_arm_memcpy ldr r3, [r3] cmp r3, #0 beq .Lnormale ldr r3, .L_min_memcpy_size ldr r3, [r3] cmp r2, r3 blt .Lnormale stmfd sp!, {r0-r2, r4, lr} mov r3, r0 mov r0, r1 mov r1, r3 mov r3, #1 /* DST_IS_USER */ ldr r4, .L_arm_memcpy mov lr, pc ldr pc, [r4] cmp r0, #0 ldmfd sp!, {r0-r2, r4, lr} moveq r0, #0 RETeq .Lnormale: stmfd sp!, {r10-r11, lr} GET_PCB(r10) ldr r10, [r10] mov r3, #0x00 adr ip, .Lcopyout_fault ldr r11, [r10, #PCB_ONFAULT] str ip, [r10, #PCB_ONFAULT] bl .Lcopyout_guts str r11, [r10, #PCB_ONFAULT] mov r0, #0x00 ldmfd sp!, {r10-r11, pc} .Lcopyout_fault: ldr r0, =EFAULT str r11, [r10, #PCB_ONFAULT] cmp r3, #0x00 ldmfdgt sp!, {r4-r7} /* r3 > 0 Restore r4-r7 */ ldmfdlt sp!, {r4-r9} /* r3 < 0 Restore r4-r9 */ ldmfd sp!, {r10-r11, pc} .Lcopyout_guts: pld [r0] /* Word-align the destination buffer */ ands ip, r1, #0x03 /* Already word aligned? */ beq .Lcopyout_wordaligned /* Yup */ rsb ip, ip, #0x04 cmp r2, ip /* Enough bytes left to align it? */ blt .Lcopyout_l4_2 /* Nope. Just copy bytewise */ sub r2, r2, ip rsbs ip, ip, #0x03 addne pc, pc, ip, lsl #3 nop ldrb ip, [r0], #0x01 strbt ip, [r1], #0x01 ldrb ip, [r0], #0x01 strbt ip, [r1], #0x01 ldrb ip, [r0], #0x01 strbt ip, [r1], #0x01 cmp r2, #0x00 /* All done? */ RETeq /* Destination buffer is now word aligned */ .Lcopyout_wordaligned: ands ip, r0, #0x03 /* Is src also word-aligned? */ bne .Lcopyout_bad_align /* Nope. Things just got bad */ cmp r2, #0x08 /* Less than 8 bytes remaining? */ blt .Lcopyout_w_less_than8 /* Quad-align the destination buffer */ tst r0, #0x07 /* Already quad aligned? */ ldrne ip, [r0], #0x04 subne r2, r2, #0x04 strtne ip, [r1], #0x04 stmfd sp!, {r4-r9} /* Free up some registers */ mov r3, #-1 /* Signal restore r4-r9 */ /* Destination buffer word aligned, source is quad aligned */ subs r2, r2, #0x80 blt .Lcopyout_w_lessthan128 /* Copy 128 bytes at a time */ .Lcopyout_w_loop128: ldrd r4, [r0], #0x08 /* LD:00-07 */ pld [r0, #0x18] /* Prefetch 0x20 */ ldrd r6, [r0], #0x08 /* LD:08-0f */ ldrd r8, [r0], #0x08 /* LD:10-17 */ strt r4, [r1], #0x04 /* ST:00-03 */ strt r5, [r1], #0x04 /* ST:04-07 */ ldrd r4, [r0], #0x08 /* LD:18-1f */ strt r6, [r1], #0x04 /* ST:08-0b */ strt r7, [r1], #0x04 /* ST:0c-0f */ ldrd r6, [r0], #0x08 /* LD:20-27 */ pld [r0, #0x18] /* Prefetch 0x40 */ strt r8, [r1], #0x04 /* ST:10-13 */ strt r9, [r1], #0x04 /* ST:14-17 */ ldrd r8, [r0], #0x08 /* LD:28-2f */ strt r4, [r1], #0x04 /* ST:18-1b */ strt r5, [r1], #0x04 /* ST:1c-1f */ ldrd r4, [r0], #0x08 /* LD:30-37 */ strt r6, [r1], #0x04 /* ST:20-23 */ strt r7, [r1], #0x04 /* ST:24-27 */ ldrd r6, [r0], #0x08 /* LD:38-3f */ strt r8, [r1], #0x04 /* ST:28-2b */ strt r9, [r1], #0x04 /* ST:2c-2f */ ldrd r8, [r0], #0x08 /* LD:40-47 */ pld [r0, #0x18] /* Prefetch 0x60 */ strt r4, [r1], #0x04 /* ST:30-33 */ strt r5, [r1], #0x04 /* ST:34-37 */ ldrd r4, [r0], #0x08 /* LD:48-4f */ strt r6, [r1], #0x04 /* ST:38-3b */ strt r7, [r1], #0x04 /* ST:3c-3f */ ldrd r6, [r0], #0x08 /* LD:50-57 */ strt r8, [r1], #0x04 /* ST:40-43 */ strt r9, [r1], #0x04 /* ST:44-47 */ ldrd r8, [r0], #0x08 /* LD:58-4f */ strt r4, [r1], #0x04 /* ST:48-4b */ strt r5, [r1], #0x04 /* ST:4c-4f */ ldrd r4, [r0], #0x08 /* LD:60-67 */ pld [r0, #0x18] /* Prefetch 0x80 */ strt r6, [r1], #0x04 /* ST:50-53 */ strt r7, [r1], #0x04 /* ST:54-57 */ ldrd r6, [r0], #0x08 /* LD:68-6f */ strt r8, [r1], #0x04 /* ST:58-5b */ strt r9, [r1], #0x04 /* ST:5c-5f */ ldrd r8, [r0], #0x08 /* LD:70-77 */ strt r4, [r1], #0x04 /* ST:60-63 */ strt r5, [r1], #0x04 /* ST:64-67 */ ldrd r4, [r0], #0x08 /* LD:78-7f */ strt r6, [r1], #0x04 /* ST:68-6b */ strt r7, [r1], #0x04 /* ST:6c-6f */ strt r8, [r1], #0x04 /* ST:70-73 */ strt r9, [r1], #0x04 /* ST:74-77 */ subs r2, r2, #0x80 strt r4, [r1], #0x04 /* ST:78-7b */ strt r5, [r1], #0x04 /* ST:7c-7f */ bge .Lcopyout_w_loop128 .Lcopyout_w_lessthan128: adds r2, r2, #0x80 /* Adjust for extra sub */ ldmfdeq sp!, {r4-r9} RETeq /* Return now if done */ subs r2, r2, #0x20 blt .Lcopyout_w_lessthan32 /* Copy 32 bytes at a time */ .Lcopyout_w_loop32: ldrd r4, [r0], #0x08 pld [r0, #0x18] ldrd r6, [r0], #0x08 ldrd r8, [r0], #0x08 strt r4, [r1], #0x04 strt r5, [r1], #0x04 ldrd r4, [r0], #0x08 strt r6, [r1], #0x04 strt r7, [r1], #0x04 strt r8, [r1], #0x04 strt r9, [r1], #0x04 subs r2, r2, #0x20 strt r4, [r1], #0x04 strt r5, [r1], #0x04 bge .Lcopyout_w_loop32 .Lcopyout_w_lessthan32: adds r2, r2, #0x20 /* Adjust for extra sub */ ldmfdeq sp!, {r4-r9} RETeq /* Return now if done */ and r4, r2, #0x18 rsb r5, r4, #0x18 subs r2, r2, r4 add pc, pc, r5, lsl #1 nop /* At least 24 bytes remaining */ ldrd r4, [r0], #0x08 strt r4, [r1], #0x04 strt r5, [r1], #0x04 nop /* At least 16 bytes remaining */ ldrd r4, [r0], #0x08 strt r4, [r1], #0x04 strt r5, [r1], #0x04 nop /* At least 8 bytes remaining */ ldrd r4, [r0], #0x08 strt r4, [r1], #0x04 strt r5, [r1], #0x04 nop /* Less than 8 bytes remaining */ ldmfd sp!, {r4-r9} RETeq /* Return now if done */ mov r3, #0x00 .Lcopyout_w_less_than8: subs r2, r2, #0x04 ldrge ip, [r0], #0x04 strtge ip, [r1], #0x04 RETeq /* Return now if done */ addlt r2, r2, #0x04 ldrb ip, [r0], #0x01 cmp r2, #0x02 ldrbge r2, [r0], #0x01 strbt ip, [r1], #0x01 ldrbgt ip, [r0] strbtge r2, [r1], #0x01 strbtgt ip, [r1] RET /* * At this point, it has not been possible to word align both buffers. * The destination buffer (r1) is word aligned, but the source buffer * (r0) is not. */ .Lcopyout_bad_align: stmfd sp!, {r4-r7} mov r3, #0x01 bic r0, r0, #0x03 cmp ip, #2 ldr ip, [r0], #0x04 bgt .Lcopyout_bad3 beq .Lcopyout_bad2 b .Lcopyout_bad1 .Lcopyout_bad1_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #8 #else mov r4, ip, lsr #8 #endif ldr r5, [r0], #0x04 pld [r0, #0x018] ldr r6, [r0], #0x04 ldr r7, [r0], #0x04 ldr ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #24 mov r5, r5, lsl #8 orr r5, r5, r6, lsr #24 mov r6, r6, lsl #8 orr r6, r6, r7, lsr #24 mov r7, r7, lsl #8 orr r7, r7, ip, lsr #24 #else orr r4, r4, r5, lsl #24 mov r5, r5, lsr #8 orr r5, r5, r6, lsl #24 mov r6, r6, lsr #8 orr r6, r6, r7, lsl #24 mov r7, r7, lsr #8 orr r7, r7, ip, lsl #24 #endif strt r4, [r1], #0x04 strt r5, [r1], #0x04 strt r6, [r1], #0x04 strt r7, [r1], #0x04 .Lcopyout_bad1: subs r2, r2, #0x10 bge .Lcopyout_bad1_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x03 blt .Lcopyout_l4 .Lcopyout_bad1_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #8 #else mov r4, ip, lsr #8 #endif ldr ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #24 #else orr r4, r4, ip, lsl #24 #endif strt r4, [r1], #0x04 bge .Lcopyout_bad1_loop4 sub r0, r0, #0x03 b .Lcopyout_l4 .Lcopyout_bad2_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #16 #else mov r4, ip, lsr #16 #endif ldr r5, [r0], #0x04 pld [r0, #0x018] ldr r6, [r0], #0x04 ldr r7, [r0], #0x04 ldr ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #16 mov r5, r5, lsl #16 orr r5, r5, r6, lsr #16 mov r6, r6, lsl #16 orr r6, r6, r7, lsr #16 mov r7, r7, lsl #16 orr r7, r7, ip, lsr #16 #else orr r4, r4, r5, lsl #16 mov r5, r5, lsr #16 orr r5, r5, r6, lsl #16 mov r6, r6, lsr #16 orr r6, r6, r7, lsl #16 mov r7, r7, lsr #16 orr r7, r7, ip, lsl #16 #endif strt r4, [r1], #0x04 strt r5, [r1], #0x04 strt r6, [r1], #0x04 strt r7, [r1], #0x04 .Lcopyout_bad2: subs r2, r2, #0x10 bge .Lcopyout_bad2_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x02 blt .Lcopyout_l4 .Lcopyout_bad2_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #16 #else mov r4, ip, lsr #16 #endif ldr ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #16 #else orr r4, r4, ip, lsl #16 #endif strt r4, [r1], #0x04 bge .Lcopyout_bad2_loop4 sub r0, r0, #0x02 b .Lcopyout_l4 .Lcopyout_bad3_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #24 #else mov r4, ip, lsr #24 #endif ldr r5, [r0], #0x04 pld [r0, #0x018] ldr r6, [r0], #0x04 ldr r7, [r0], #0x04 ldr ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #8 mov r5, r5, lsl #24 orr r5, r5, r6, lsr #8 mov r6, r6, lsl #24 orr r6, r6, r7, lsr #8 mov r7, r7, lsl #24 orr r7, r7, ip, lsr #8 #else orr r4, r4, r5, lsl #8 mov r5, r5, lsr #24 orr r5, r5, r6, lsl #8 mov r6, r6, lsr #24 orr r6, r6, r7, lsl #8 mov r7, r7, lsr #24 orr r7, r7, ip, lsl #8 #endif strt r4, [r1], #0x04 strt r5, [r1], #0x04 strt r6, [r1], #0x04 strt r7, [r1], #0x04 .Lcopyout_bad3: subs r2, r2, #0x10 bge .Lcopyout_bad3_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x01 blt .Lcopyout_l4 .Lcopyout_bad3_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #24 #else mov r4, ip, lsr #24 #endif ldr ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #8 #else orr r4, r4, ip, lsl #8 #endif strt r4, [r1], #0x04 bge .Lcopyout_bad3_loop4 sub r0, r0, #0x01 .Lcopyout_l4: ldmfd sp!, {r4-r7} mov r3, #0x00 adds r2, r2, #0x04 RETeq .Lcopyout_l4_2: rsbs r2, r2, #0x03 addne pc, pc, r2, lsl #3 nop ldrb ip, [r0], #0x01 strbt ip, [r1], #0x01 ldrb ip, [r0], #0x01 strbt ip, [r1], #0x01 ldrb ip, [r0] strbt ip, [r1] RET END(copyout) Index: stable/10/sys/arm/arm/copystr.S =================================================================== --- stable/10/sys/arm/arm/copystr.S (revision 278651) +++ stable/10/sys/arm/arm/copystr.S (revision 278652) @@ -1,214 +1,214 @@ /* $NetBSD: copystr.S,v 1.8 2002/10/13 14:54:48 bjh21 Exp $ */ /*- * Copyright (c) 1995 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * copystr.S * * optimised and fault protected copystr functions * * Created : 16/05/95 */ #include "assym.s" #include #include __FBSDID("$FreeBSD$"); #include .text - .align 0 + .align 2 #ifdef _ARM_ARCH_6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) #else .Lpcb: .word _C_LABEL(__pcpu) + PC_CURPCB #define GET_PCB(tmp) \ ldr tmp, .Lpcb #endif /* * r0 - from * r1 - to * r2 - maxlens * r3 - lencopied * * Copy string from r0 to r1 */ ENTRY(copystr) stmfd sp!, {r4-r5} /* stack is 8 byte aligned */ teq r2, #0x00000000 mov r5, #0x00000000 moveq r0, #ENAMETOOLONG beq 2f 1: ldrb r4, [r0], #0x0001 add r5, r5, #0x00000001 teq r4, #0x00000000 strb r4, [r1], #0x0001 teqne r5, r2 bne 1b teq r4, #0x00000000 moveq r0, #0x00000000 movne r0, #ENAMETOOLONG 2: teq r3, #0x00000000 strne r5, [r3] ldmfd sp!, {r4-r5} /* stack is 8 byte aligned */ RET END(copystr) #define SAVE_REGS stmfd sp!, {r4-r6} #define RESTORE_REGS ldmfd sp!, {r4-r6} /* * r0 - user space address * r1 - kernel space address * r2 - maxlens * r3 - lencopied * * Copy string from user space to kernel space */ ENTRY(copyinstr) SAVE_REGS teq r2, #0x00000000 mov r6, #0x00000000 moveq r0, #ENAMETOOLONG beq 2f GET_PCB(r4) ldr r4, [r4] #ifdef DIAGNOSTIC teq r4, #0x00000000 beq .Lcopystrpcbfault #endif adr r5, .Lcopystrfault str r5, [r4, #PCB_ONFAULT] 1: ldrbt r5, [r0], #0x0001 add r6, r6, #0x00000001 teq r5, #0x00000000 strb r5, [r1], #0x0001 teqne r6, r2 bne 1b mov r0, #0x00000000 str r0, [r4, #PCB_ONFAULT] teq r5, #0x00000000 moveq r0, #0x00000000 movne r0, #ENAMETOOLONG 2: teq r3, #0x00000000 strne r6, [r3] RESTORE_REGS RET END(copyinstr) /* * r0 - kernel space address * r1 - user space address * r2 - maxlens * r3 - lencopied * * Copy string from kernel space to user space */ ENTRY(copyoutstr) SAVE_REGS teq r2, #0x00000000 mov r6, #0x00000000 moveq r0, #ENAMETOOLONG beq 2f GET_PCB(r4) ldr r4, [r4] #ifdef DIAGNOSTIC teq r4, #0x00000000 beq .Lcopystrpcbfault #endif adr r5, .Lcopystrfault str r5, [r4, #PCB_ONFAULT] 1: ldrb r5, [r0], #0x0001 add r6, r6, #0x00000001 teq r5, #0x00000000 strbt r5, [r1], #0x0001 teqne r6, r2 bne 1b mov r0, #0x00000000 str r0, [r4, #PCB_ONFAULT] teq r5, #0x00000000 moveq r0, #0x00000000 movne r0, #ENAMETOOLONG 2: teq r3, #0x00000000 strne r6, [r3] RESTORE_REGS RET END(copyoutstr) /* A fault occurred during the copy */ .Lcopystrfault: mov r1, #0x00000000 str r1, [r4, #PCB_ONFAULT] RESTORE_REGS RET #ifdef DIAGNOSTIC .Lcopystrpcbfault: mov r2, r1 mov r1, r0 adr r0, Lcopystrpcbfaulttext bic sp, sp, #7 /* align stack to 8 bytes */ b _C_LABEL(panic) Lcopystrpcbfaulttext: .asciz "No valid PCB during copyinoutstr() addr1=%08x addr2=%08x\n" - .align 0 + .align 2 #endif Index: stable/10/sys/arm/arm/cpufunc_asm.S =================================================================== --- stable/10/sys/arm/arm/cpufunc_asm.S (revision 278651) +++ stable/10/sys/arm/arm/cpufunc_asm.S (revision 278652) @@ -1,195 +1,195 @@ /* $NetBSD: cpufunc_asm.S,v 1.12 2003/09/06 09:14:52 rearnsha Exp $ */ /*- * Copyright (c) 1997,1998 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Causality Limited. * 4. The name of Causality Limited may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * cpufunc.S * * Assembly functions for CPU / MMU / TLB specific operations * * Created : 30/01/97 * */ #include __FBSDID("$FreeBSD$"); .text - .align 0 + .align 2 ENTRY(cpufunc_nullop) RET END(cpufunc_nullop) /* * Generic functions to read the internal coprocessor registers * * Currently these registers are : * c0 - CPU ID * c5 - Fault status * c6 - Fault address * */ ENTRY(cpufunc_id) mrc p15, 0, r0, c0, c0, 0 RET END(cpufunc_id) ENTRY(cpufunc_cpuid) mrc p15, 0, r0, c0, c0, 0 RET END(cpufunc_cpuid) ENTRY(cpu_get_control) mrc p15, 0, r0, c1, c0, 0 RET END(cpu_get_control) ENTRY(cpu_read_cache_config) mrc p15, 0, r0, c0, c0, 1 RET END(cpu_read_cache_config) ENTRY(cpufunc_faultstatus) mrc p15, 0, r0, c5, c0, 0 RET END(cpufunc_faultstatus) ENTRY(cpufunc_faultaddress) mrc p15, 0, r0, c6, c0, 0 RET END(cpufunc_faultaddress) /* * Generic functions to write the internal coprocessor registers * * * Currently these registers are * c1 - CPU Control * c3 - Domain Access Control * * All other registers are CPU architecture specific */ #if 0 /* See below. */ ENTRY(cpufunc_control) mcr p15, 0, r0, c1, c0, 0 RET END(cpufunc_control) #endif ENTRY(cpufunc_domains) mcr p15, 0, r0, c3, c0, 0 RET END(cpufunc_domains) /* * Generic functions to read/modify/write the internal coprocessor registers * * * Currently these registers are * c1 - CPU Control * * All other registers are CPU architecture specific */ ENTRY(cpufunc_control) mrc p15, 0, r3, c1, c0, 0 /* Read the control register */ bic r2, r3, r0 /* Clear bits */ eor r2, r2, r1 /* XOR bits */ teq r2, r3 /* Only write if there is a change */ mcrne p15, 0, r2, c1, c0, 0 /* Write new control register */ mov r0, r3 /* Return old value */ RET .Lglou: .asciz "plop %p\n" - .align 0 + .align 2 END(cpufunc_control) /* * other potentially useful software functions are: * clean D cache entry and flush I cache entry * for the moment use cache_purgeID_E */ /* Random odd functions */ /* * Function to get the offset of a stored program counter from the * instruction doing the store. This offset is defined to be the same * for all STRs and STMs on a given implementation. Code based on * section 2.4.3 of the ARM ARM (2nd Ed.), with modifications to work * in 26-bit modes as well. */ ENTRY(get_pc_str_offset) mov ip, sp stmfd sp!, {fp, ip, lr, pc} sub fp, ip, #4 sub sp, sp, #4 mov r1, pc /* R1 = addr of following STR */ mov r0, r0 str pc, [sp] /* [SP] = . + offset */ ldr r0, [sp] sub r0, r0, r1 ldmdb fp, {fp, sp, pc} END(get_pc_str_offset) /* Allocate and lock a cacheline for the specified address. */ #define CPWAIT_BRANCH \ sub pc, pc, #4 #define CPWAIT() \ mrc p15, 0, r2, c2, c0, 0; \ mov r2, r2; \ CPWAIT_BRANCH ENTRY(arm_lock_cache_line) mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */ mov r1, #1 mcr p15, 0, r1, c9, c2, 0 /* Enable data cache lock mode */ CPWAIT() mcr p15, 0, r0, c7, c2, 5 /* Allocate the cache line */ mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */ mov r1, #0 str r1, [r0] mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */ mcr p15, 0, r1, c9, c2, 0 /* Disable data cache lock mode */ CPWAIT() RET END(arm_lock_cache_line) Index: stable/10/sys/arm/arm/cpufunc_asm_arm10.S =================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_arm10.S (revision 278651) +++ stable/10/sys/arm/arm/cpufunc_asm_arm10.S (revision 278652) @@ -1,276 +1,276 @@ /* $NetBSD: cpufunc_asm_arm10.S,v 1.1 2003/09/06 09:12:29 rearnsha Exp $ */ /*- * Copyright (c) 2002 ARM Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * ARM10 assembly functions for CPU / MMU / TLB specific operations * */ #include __FBSDID("$FreeBSD$"); /* * Functions to set the MMU Translation Table Base register * * We need to clean and flush the cache as it uses virtual * addresses that are about to change. */ ENTRY(arm10_setttb) stmfd sp!, {r0, lr} bl _C_LABEL(arm10_idcache_wbinv_all) ldmfd sp!, {r0, lr} mcr p15, 0, r0, c2, c0, 0 /* load new TTB */ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */ bx lr END(arm10_setttb) /* * TLB functions */ ENTRY(arm10_tlb_flushID_SE) mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ bx lr END(arm10_tlb_flushID_SE) ENTRY(arm10_tlb_flushI_SE) mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ bx lr END(arm10_tlb_flushI_SE) /* * Cache operations. For the entire cache we use the set/index * operations. */ s_max .req r0 i_max .req r1 s_inc .req r2 i_inc .req r3 ENTRY_NP(arm10_icache_sync_range) ldr ip, .Larm10_line_size cmp r1, #0x4000 bcs .Larm10_icache_sync_all ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larm10_sync_next: mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bhi .Larm10_sync_next mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ bx lr END(arm10_icache_sync_range) ENTRY_NP(arm10_icache_sync_all) .Larm10_icache_sync_all: /* * We assume that the code here can never be out of sync with the * dcache, so that we can safely flush the Icache and fall through * into the Dcache cleaning code. */ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through to clean Dcache. */ .Larm10_dcache_wb: ldr ip, .Larm10_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} .Lnext_set: orr ip, s_max, i_max .Lnext_index: mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */ subs ip, ip, i_inc bhs .Lnext_index /* Next index */ subs s_max, s_max, s_inc bhs .Lnext_set /* Next set */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ bx lr END(arm10_icache_sync_all) .Larm10_line_size: .word _C_LABEL(arm_pdcache_line_size) ENTRY(arm10_dcache_wb_range) ldr ip, .Larm10_line_size cmp r1, #0x4000 bcs .Larm10_dcache_wb ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larm10_wb_next: mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bhi .Larm10_wb_next mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ bx lr END(arm10_dcache_wb_range) ENTRY(arm10_dcache_wbinv_range) ldr ip, .Larm10_line_size cmp r1, #0x4000 bcs .Larm10_dcache_wbinv_all ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larm10_wbinv_next: mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bhi .Larm10_wbinv_next mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ bx lr END(arm10_dcache_wbinv_range) /* * Note, we must not invalidate everything. If the range is too big we * must use wb-inv of the entire cache. */ ENTRY(arm10_dcache_inv_range) ldr ip, .Larm10_line_size cmp r1, #0x4000 bcs .Larm10_dcache_wbinv_all ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larm10_inv_next: mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bhi .Larm10_inv_next mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ bx lr END(arm10_dcache_inv_range) ENTRY(arm10_idcache_wbinv_range) ldr ip, .Larm10_line_size cmp r1, #0x4000 bcs .Larm10_idcache_wbinv_all ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larm10_id_wbinv_next: mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bhi .Larm10_id_wbinv_next mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ bx lr END(arm10_idcache_wbinv_range) ENTRY_NP(arm10_idcache_wbinv_all) .Larm10_idcache_wbinv_all: /* * We assume that the code here can never be out of sync with the * dcache, so that we can safely flush the Icache and fall through * into the Dcache purging code. */ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through to purge Dcache. */ EENTRY(arm10_dcache_wbinv_all) .Larm10_dcache_wbinv_all: ldr ip, .Larm10_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} .Lnext_set_inv: orr ip, s_max, i_max .Lnext_index_inv: mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */ subs ip, ip, i_inc bhs .Lnext_index_inv /* Next index */ subs s_max, s_max, s_inc bhs .Lnext_set_inv /* Next set */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ bx lr EEND(arm10_dcache_wbinv_all) END(arm10_idcache_wbinv_all) .Larm10_cache_data: .word _C_LABEL(arm10_dcache_sets_max) /* * Context switch. * * These is the CPU-specific parts of the context switcher cpu_switch() * These functions actually perform the TTB reload. * * NOTE: Special calling convention * r1, r4-r13 must be preserved */ ENTRY(arm10_context_switch) /* * We can assume that the caches will only contain kernel addresses * at this point. So no need to flush them again. */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */ mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */ /* Paranoia -- make sure the pipeline is empty. */ nop nop nop bx lr END(arm10_context_switch) .bss /* XXX The following macros should probably be moved to asm.h */ #define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x: #define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x)) /* * Parameters for the cache cleaning code. Note that the order of these * four variables is assumed in the code above. Hence the reason for * declaring them in the assembler file. */ - .align 0 + .align 2 C_OBJECT(arm10_dcache_sets_max) .space 4 C_OBJECT(arm10_dcache_index_max) .space 4 C_OBJECT(arm10_dcache_sets_inc) .space 4 C_OBJECT(arm10_dcache_index_inc) .space 4 Index: stable/10/sys/arm/arm/cpufunc_asm_arm9.S =================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_arm9.S (revision 278651) +++ stable/10/sys/arm/arm/cpufunc_asm_arm9.S (revision 278652) @@ -1,263 +1,263 @@ /* $NetBSD: cpufunc_asm_arm9.S,v 1.3 2004/01/26 15:54:16 rearnsha Exp $ */ /* * Copyright (c) 2001, 2004 ARM Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * ARM9 assembly functions for CPU / MMU / TLB specific operations */ #include __FBSDID("$FreeBSD$"); /* * Functions to set the MMU Translation Table Base register * * We need to clean and flush the cache as it uses virtual * addresses that are about to change. */ ENTRY(arm9_setttb) stmfd sp!, {r0, lr} bl _C_LABEL(arm9_idcache_wbinv_all) ldmfd sp!, {r0, lr} mcr p15, 0, r0, c2, c0, 0 /* load new TTB */ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */ mov pc, lr END(arm9_setttb) /* * TLB functions */ ENTRY(arm9_tlb_flushID_SE) mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ mov pc, lr END(arm9_tlb_flushID_SE) /* * Cache operations. For the entire cache we use the set/index * operations. */ s_max .req r0 i_max .req r1 s_inc .req r2 i_inc .req r3 ENTRY_NP(arm9_icache_sync_range) ldr ip, .Larm9_line_size cmp r1, #0x4000 bcs .Larm9_icache_sync_all ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larm9_sync_next: mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bhi .Larm9_sync_next mov pc, lr END(arm9_icache_sync_range) ENTRY_NP(arm9_icache_sync_all) .Larm9_icache_sync_all: /* * We assume that the code here can never be out of sync with the * dcache, so that we can safely flush the Icache and fall through * into the Dcache cleaning code. */ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through to clean Dcache. */ .Larm9_dcache_wb: ldr ip, .Larm9_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} .Lnext_set: orr ip, s_max, i_max .Lnext_index: mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */ subs ip, ip, i_inc bhs .Lnext_index /* Next index */ subs s_max, s_max, s_inc bhs .Lnext_set /* Next set */ mov pc, lr END(arm9_icache_sync_all) .Larm9_line_size: .word _C_LABEL(arm_pdcache_line_size) ENTRY(arm9_dcache_wb_range) ldr ip, .Larm9_line_size cmp r1, #0x4000 bcs .Larm9_dcache_wb ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larm9_wb_next: mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bhi .Larm9_wb_next mov pc, lr END(arm9_dcache_wb_range) ENTRY(arm9_dcache_wbinv_range) ldr ip, .Larm9_line_size cmp r1, #0x4000 bcs .Larm9_dcache_wbinv_all ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larm9_wbinv_next: mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bhi .Larm9_wbinv_next mov pc, lr END(arm9_dcache_wbinv_range) /* * Note, we must not invalidate everything. If the range is too big we * must use wb-inv of the entire cache. */ ENTRY(arm9_dcache_inv_range) ldr ip, .Larm9_line_size cmp r1, #0x4000 bcs .Larm9_dcache_wbinv_all ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larm9_inv_next: mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bhi .Larm9_inv_next mov pc, lr END(arm9_dcache_inv_range) ENTRY(arm9_idcache_wbinv_range) ldr ip, .Larm9_line_size cmp r1, #0x4000 bcs .Larm9_idcache_wbinv_all ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larm9_id_wbinv_next: mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bhi .Larm9_id_wbinv_next mov pc, lr END(arm9_idcache_wbinv_range) ENTRY_NP(arm9_idcache_wbinv_all) .Larm9_idcache_wbinv_all: /* * We assume that the code here can never be out of sync with the * dcache, so that we can safely flush the Icache and fall through * into the Dcache purging code. */ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through */ EENTRY(arm9_dcache_wbinv_all) .Larm9_dcache_wbinv_all: ldr ip, .Larm9_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} .Lnext_set_inv: orr ip, s_max, i_max .Lnext_index_inv: mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */ subs ip, ip, i_inc bhs .Lnext_index_inv /* Next index */ subs s_max, s_max, s_inc bhs .Lnext_set_inv /* Next set */ mov pc, lr EEND(arm9_dcache_wbinv_all) END(arm9_idcache_wbinv_all) .Larm9_cache_data: .word _C_LABEL(arm9_dcache_sets_max) /* * Context switch. * * These is the CPU-specific parts of the context switcher cpu_switch() * These functions actually perform the TTB reload. * * NOTE: Special calling convention * r1, r4-r13 must be preserved */ ENTRY(arm9_context_switch) /* * We can assume that the caches will only contain kernel addresses * at this point. So no need to flush them again. */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */ mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */ /* Paranoia -- make sure the pipeline is empty. */ nop nop nop mov pc, lr END(arm9_context_switch) .bss /* XXX The following macros should probably be moved to asm.h */ #define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x: #define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x)) /* * Parameters for the cache cleaning code. Note that the order of these * four variables is assumed in the code above. Hence the reason for * declaring them in the assembler file. */ - .align 0 + .align 2 C_OBJECT(arm9_dcache_sets_max) .space 4 C_OBJECT(arm9_dcache_index_max) .space 4 C_OBJECT(arm9_dcache_sets_inc) .space 4 C_OBJECT(arm9_dcache_index_inc) .space 4 Index: stable/10/sys/arm/arm/cpufunc_asm_armv5.S =================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_armv5.S (revision 278651) +++ stable/10/sys/arm/arm/cpufunc_asm_armv5.S (revision 278652) @@ -1,247 +1,247 @@ /* $NetBSD: cpufunc_asm_armv5.S,v 1.3 2007/01/06 00:50:54 christos Exp $ */ /* * Copyright (c) 2002, 2005 ARM Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * ARMv5 assembly functions for manipulating caches. * These routines can be used by any core that supports the set/index * operations. */ #include __FBSDID("$FreeBSD$"); /* * Functions to set the MMU Translation Table Base register * * We need to clean and flush the cache as it uses virtual * addresses that are about to change. */ ENTRY(armv5_setttb) stmfd sp!, {r0, lr} bl _C_LABEL(armv5_idcache_wbinv_all) ldmfd sp!, {r0, lr} mcr p15, 0, r0, c2, c0, 0 /* load new TTB */ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */ RET END(armv5_setttb) /* * Cache operations. For the entire cache we use the set/index * operations. */ s_max .req r0 i_max .req r1 s_inc .req r2 i_inc .req r3 ENTRY_NP(armv5_icache_sync_range) ldr ip, .Larmv5_line_size cmp r1, #0x4000 bcs .Larmv5_icache_sync_all ldr ip, [ip] sub r1, r1, #1 /* Don't overrun */ sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 1: mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bpl 1b mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET END(armv5_icache_sync_range) ENTRY_NP(armv5_icache_sync_all) .Larmv5_icache_sync_all: /* * We assume that the code here can never be out of sync with the * dcache, so that we can safely flush the Icache and fall through * into the Dcache cleaning code. */ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through to clean Dcache. */ .Larmv5_dcache_wb: ldr ip, .Larmv5_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} 1: orr ip, s_max, i_max 2: mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */ sub ip, ip, i_inc tst ip, i_max /* Index 0 is last one */ bne 2b /* Next index */ mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */ subs s_max, s_max, s_inc bpl 1b /* Next set */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET END(armv5_icache_sync_all) .Larmv5_line_size: .word _C_LABEL(arm_pdcache_line_size) ENTRY(armv5_dcache_wb_range) ldr ip, .Larmv5_line_size cmp r1, #0x4000 bcs .Larmv5_dcache_wb ldr ip, [ip] sub r1, r1, #1 /* Don't overrun */ sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 1: mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bpl 1b mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET END(armv5_dcache_wb_range) ENTRY(armv5_dcache_wbinv_range) ldr ip, .Larmv5_line_size cmp r1, #0x4000 bcs .Larmv5_dcache_wbinv_all ldr ip, [ip] sub r1, r1, #1 /* Don't overrun */ sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 1: mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bpl 1b mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET END(armv5_dcache_wbinv_range) /* * Note, we must not invalidate everything. If the range is too big we * must use wb-inv of the entire cache. */ ENTRY(armv5_dcache_inv_range) ldr ip, .Larmv5_line_size cmp r1, #0x4000 bcs .Larmv5_dcache_wbinv_all ldr ip, [ip] sub r1, r1, #1 /* Don't overrun */ sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 1: mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bpl 1b mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET END(armv5_dcache_inv_range) ENTRY(armv5_idcache_wbinv_range) ldr ip, .Larmv5_line_size cmp r1, #0x4000 bcs .Larmv5_idcache_wbinv_all ldr ip, [ip] sub r1, r1, #1 /* Don't overrun */ sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 1: mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */ add r0, r0, ip subs r1, r1, ip bpl 1b mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET END(armv5_idcache_wbinv_range) ENTRY_NP(armv5_idcache_wbinv_all) .Larmv5_idcache_wbinv_all: /* * We assume that the code here can never be out of sync with the * dcache, so that we can safely flush the Icache and fall through * into the Dcache purging code. */ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through to purge Dcache. */ EENTRY(armv5_dcache_wbinv_all) .Larmv5_dcache_wbinv_all: ldr ip, .Larmv5_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} 1: orr ip, s_max, i_max 2: mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */ sub ip, ip, i_inc tst ip, i_max /* Index 0 is last one */ bne 2b /* Next index */ mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */ subs s_max, s_max, s_inc bpl 1b /* Next set */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET EEND(armv5_dcache_wbinv_all) END(armv5_idcache_wbinv_all) .Larmv5_cache_data: .word _C_LABEL(armv5_dcache_sets_max) .bss /* XXX The following macros should probably be moved to asm.h */ #define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x: #define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x)) /* * Parameters for the cache cleaning code. Note that the order of these * four variables is assumed in the code above. Hence the reason for * declaring them in the assembler file. */ - .align 0 + .align 2 C_OBJECT(armv5_dcache_sets_max) .space 4 C_OBJECT(armv5_dcache_index_max) .space 4 C_OBJECT(armv5_dcache_sets_inc) .space 4 C_OBJECT(armv5_dcache_index_inc) .space 4 Index: stable/10/sys/arm/arm/exception.S =================================================================== --- stable/10/sys/arm/arm/exception.S (revision 278651) +++ stable/10/sys/arm/arm/exception.S (revision 278652) @@ -1,465 +1,465 @@ /* $NetBSD: exception.S,v 1.13 2003/10/31 16:30:15 scw Exp $ */ /*- * Copyright (c) 1994-1997 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * exception.S * * Low level handlers for exception vectors * * Created : 24/09/94 * * Based on kate/display/abort.s * */ #include "assym.s" #include #include #include __FBSDID("$FreeBSD$"); .text - .align 0 + .align 2 /* * ASM macros for pushing and pulling trapframes from the stack * * These macros are used to handle the irqframe and trapframe structures * defined above. */ /* * PUSHFRAME - macro to push a trap frame on the stack in the current mode * Since the current mode is used, the SVC lr field is not defined. * * NOTE: r13 and r14 are stored separately as a work around for the * SA110 rev 2 STM^ bug */ #ifdef ARM_TP_ADDRESS #define PUSHFRAME \ sub sp, sp, #4; /* Align the stack */ \ str lr, [sp, #-4]!; /* Push the return address */ \ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]!; \ ldr r0, =ARM_RAS_START; \ mov r1, #0; \ str r1, [r0]; \ mov r1, #0xffffffff; \ str r1, [r0, #4]; #else #define PUSHFRAME \ sub sp, sp, #4; /* Align the stack */ \ str lr, [sp, #-4]!; /* Push the return address */ \ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]!; #endif /* * PULLFRAME - macro to pull a trap frame from the stack in the current mode * Since the current mode is used, the SVC lr field is ignored. */ #ifdef ARM_TP_ADDRESS #define PULLFRAME \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*17); /* Adjust the stack pointer */ \ ldr lr, [sp], #4; /* Pull the return address */ \ add sp, sp, #4 /* Align the stack */ #else #define PULLFRAME \ ldr r0, [sp], #4 ; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; \ clrex; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*17); /* Adjust the stack pointer */ \ ldr lr, [sp], #4; /* Pull the return address */ \ add sp, sp, #4 /* Align the stack */ #endif /* * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode * This should only be used if the processor is not currently in SVC32 * mode. The processor mode is switched to SVC mode and the trap frame is * stored. The SVC lr field is used to store the previous value of * lr in SVC mode. * * NOTE: r13 and r14 are stored separately as a work around for the * SA110 rev 2 STM^ bug */ #ifdef ARM_TP_ADDRESS #define PUSHFRAMEINSVC \ stmdb sp, {r0-r3}; /* Save 4 registers */ \ mov r0, lr; /* Save xxx32 r14 */ \ mov r1, sp; /* Save xxx32 sp */ \ mrs r3, spsr; /* Save xxx32 spsr */ \ mrs r2, cpsr; /* Get the CPSR */ \ bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \ orr r2, r2, #(PSR_SVC32_MODE); \ msr cpsr_c, r2; /* Punch into SVC mode */ \ mov r2, sp; /* Save SVC sp */ \ bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \ sub sp, sp, #4; /* Pad trapframe to keep alignment */ \ str r0, [sp, #-4]!; /* Push return address */ \ str lr, [sp, #-4]!; /* Push SVC lr */ \ str r2, [sp, #-4]!; /* Push SVC sp */ \ msr spsr_fsxc, r3; /* Restore correct spsr */ \ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ ldr r5, =ARM_RAS_START; /* Check if there's any RAS */ \ ldr r4, [r5, #4]; /* reset it to point at the */ \ cmp r4, #0xffffffff; /* end of memory if necessary; */ \ movne r1, #0xffffffff; /* leave value in r4 for later */ \ strne r1, [r5, #4]; /* comparision against PC. */ \ ldr r3, [r5]; /* Retrieve global RAS_START */ \ cmp r3, #0; /* and reset it if non-zero. */ \ movne r1, #0; /* If non-zero RAS_START and */ \ strne r1, [r5]; /* PC was lower than RAS_END, */ \ ldrne r1, [r0, #16]; /* adjust the saved PC so that */ \ cmpne r4, r1; /* execution later resumes at */ \ strhi r3, [r0, #16]; /* the RAS_START location. */ \ mrs r0, spsr; \ str r0, [sp, #-4]! #else #define PUSHFRAMEINSVC \ stmdb sp, {r0-r3}; /* Save 4 registers */ \ mov r0, lr; /* Save xxx32 r14 */ \ mov r1, sp; /* Save xxx32 sp */ \ mrs r3, spsr; /* Save xxx32 spsr */ \ mrs r2, cpsr; /* Get the CPSR */ \ bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \ orr r2, r2, #(PSR_SVC32_MODE); \ msr cpsr_c, r2; /* Punch into SVC mode */ \ mov r2, sp; /* Save SVC sp */ \ bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \ sub sp, sp, #4; /* Pad trapframe to keep alignment */ \ str r0, [sp, #-4]!; /* Push return address */ \ str lr, [sp, #-4]!; /* Push SVC lr */ \ str r2, [sp, #-4]!; /* Push SVC sp */ \ msr spsr_fsxc, r3; /* Restore correct spsr */ \ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]! #endif /* * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack * in SVC32 mode and restore the saved processor mode and PC. * This should be used when the SVC lr register needs to be restored on * exit. */ #ifdef ARM_TP_ADDRESS #define PULLFRAMEFROMSVCANDEXIT \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; /* restore SPSR */ \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*15); /* Adjust the stack pointer */ \ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ #else #define PULLFRAMEFROMSVCANDEXIT \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; /* restore SPSR */ \ clrex; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*15); /* Adjust the stack pointer */ \ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ #endif #if defined(__ARM_EABI__) /* * Unwind hints so we can unwind past functions that use * PULLFRAMEFROMSVCANDEXIT. They are run in reverse order. * As the last thing we do is restore the stack pointer * we can ignore the padding at the end of struct trapframe. */ #define UNWINDSVCFRAME \ .save {r13-r15}; /* Restore sp, lr, pc */ \ .pad #(2*4); /* Skip user sp and lr */ \ .save {r0-r12}; /* Restore r0-r12 */ \ .pad #(4) /* Skip spsr */ #else #define UNWINDSVCFRAME #endif #define DO_AST \ ldr r0, [sp]; /* Get the SPSR from stack */ \ mrs r4, cpsr; /* save CPSR */ \ orr r1, r4, #(PSR_I|PSR_F); \ msr cpsr_c, r1; /* Disable interrupts */ \ and r0, r0, #(PSR_MODE); /* Returning to USR mode? */ \ teq r0, #(PSR_USR32_MODE); \ bne 2f; /* Nope, get out now */ \ bic r4, r4, #(PSR_I|PSR_F); \ 1: GET_CURTHREAD_PTR(r5); \ ldr r1, [r5, #(TD_FLAGS)]; \ and r1, r1, #(TDF_ASTPENDING|TDF_NEEDRESCHED); \ teq r1, #0; \ beq 2f; /* Nope. Just bail */ \ msr cpsr_c, r4; /* Restore interrupts */ \ mov r0, sp; \ bl _C_LABEL(ast); /* ast(frame) */ \ orr r0, r4, #(PSR_I|PSR_F); \ msr cpsr_c, r0; \ b 1b; \ 2: /* * Entry point for a Software Interrupt (SWI). * * The hardware switches to svc32 mode on a swi, so we're already on the * right stack; just build a trapframe and call the handler. */ ASENTRY_NP(swi_entry) PUSHFRAME /* Build the trapframe on the */ mov r0, sp /* scv32 stack, pass it to the */ bl _C_LABEL(swi_handler) /* swi handler. */ /* * The fork_trampoline() code in swtch.S aranges for the MI fork_exit() * to return to swi_exit here, to return to userland. The net effect is * that a newly created thread appears to return from a SWI just like * the parent thread that created it. */ ASEENTRY_NP(swi_exit) DO_AST /* Handle pending signals. */ PULLFRAME /* Deallocate trapframe. */ movs pc, lr /* Return to userland. */ STOP_UNWINDING /* Don't unwind into user mode. */ EEND(swi_exit) END(swi_entry) /* * Standard exception exit handler. * * This is used to return from all exceptions except SWI. It uses DO_AST and * PULLFRAMEFROMSVCANDEXIT and can only be called if the exception entry code * used PUSHFRAMEINSVC. * * If the return is to user mode, this uses DO_AST to deliver any pending * signals and/or handle TDF_NEEDRESCHED first. */ ASENTRY_NP(exception_exit) DO_AST /* Handle pending signals. */ PULLFRAMEFROMSVCANDEXIT /* Return. */ UNWINDSVCFRAME /* Special unwinding for exceptions. */ END(exception_exit) /* * Entry point for a Prefetch Abort exception. * * The hardware switches to the abort mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(prefetch_abort_entry) #ifdef __XSCALE__ nop /* Make absolutely sure any pending */ nop /* imprecise aborts have occurred. */ #endif sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ mov r1, #1 /* Type flag */ b _C_LABEL(abort_handler) END(prefetch_abort_entry) /* * Entry point for a Data Abort exception. * * The hardware switches to the abort mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(data_abort_entry) #ifdef __XSCALE__ nop /* Make absolutely sure any pending */ nop /* imprecise aborts have occurred. */ #endif sub lr, lr, #8 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Exception exit routine */ mov r0, sp /* Trapframe to the handler */ mov r1, #0 /* Type flag */ b _C_LABEL(abort_handler) END(data_abort_entry) /* * Entry point for an Undefined Instruction exception. * * The hardware switches to the undefined mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the undefined mode stack). */ ASENTRY_NP(undefined_entry) sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ b undefinedinstruction /* trapframe to the handler. */ END(undefined_entry) /* * Entry point for a normal IRQ. * * The hardware switches to the IRQ mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the IRQ mode stack). */ ASENTRY_NP(irq_entry) sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ b _C_LABEL(arm_irq_handler)/* trapframe to the handler. */ END(irq_entry) /* * Entry point for an FIQ interrupt. * * We don't currently support FIQ handlers very much. Something can * install itself in the FIQ vector using code (that may or may not work * these days) in fiq.c. If nobody does that and an FIQ happens, this * default handler just disables FIQs and otherwise ignores it. */ ASENTRY_NP(fiq_entry) mrs r8, cpsr /* FIQ handling isn't supported, */ bic r8, #(PSR_F) /* just disable FIQ and return. */ msr cpsr_c, r8 /* The r8 we trash here is the */ subs pc, lr, #4 /* banked FIQ-mode r8. */ END(fiq_entry) /* * Entry point for an Address Exception exception. * This is an arm26 exception that should never happen. */ ASENTRY_NP(addr_exception_entry) mov r3, lr mrs r2, spsr mrs r1, cpsr adr r0, Laddr_exception_msg b _C_LABEL(panic) Laddr_exception_msg: .asciz "Address Exception CPSR=0x%08x SPSR=0x%08x LR=0x%08x\n" .balign 4 END(addr_exception_entry) /* * Entry point for the system Reset vector. * This should never happen, so panic. */ ASENTRY_NP(reset_entry) mov r1, lr adr r0, Lreset_panicmsg b _C_LABEL(panic) /* NOTREACHED */ Lreset_panicmsg: .asciz "Reset vector called, LR = 0x%08x" .balign 4 END(reset_entry) /* * page0 and page0_data -- An image of the ARM vectors which is copied to * the ARM vectors page (high or low) as part of CPU initialization. The * code that does the copy assumes that page0_data holds one 32-bit word * of data for each of the predefined ARM vectors. It also assumes that * page0_data follows the vectors in page0, but other stuff can appear * between the two. We currently leave room between the two for some fiq * handler code to be copied in. */ .global _C_LABEL(page0), _C_LABEL(page0_data) _C_LABEL(page0): ldr pc, .Lreset_entry ldr pc, .Lundefined_entry ldr pc, .Lswi_entry ldr pc, .Lprefetch_abort_entry ldr pc, .Ldata_abort_entry ldr pc, .Laddr_exception_entry ldr pc, .Lirq_entry .fiqv: ldr pc, .Lfiq_entry .space 256 /* room for some fiq handler code */ _C_LABEL(page0_data): .Lreset_entry: .word reset_entry .Lundefined_entry: .word undefined_entry .Lswi_entry: .word swi_entry .Lprefetch_abort_entry: .word prefetch_abort_entry .Ldata_abort_entry: .word data_abort_entry .Laddr_exception_entry: .word addr_exception_entry .Lirq_entry: .word irq_entry .Lfiq_entry: .word fiq_entry /* * These items are used by the code in fiq.c to install what it calls the * "null" handler. It's actually our default vector entry that just jumps * to the default handler which just disables FIQs and returns. */ .global _C_LABEL(fiq_nullhandler_code), _C_LABEL(fiq_nullhandler_size) _C_LABEL(fiq_nullhandler_code): .word .fiqv _C_LABEL(fiq_nullhandler_size): .word 4 Index: stable/10/sys/arm/arm/fusu.S =================================================================== --- stable/10/sys/arm/arm/fusu.S (revision 278651) +++ stable/10/sys/arm/arm/fusu.S (revision 278652) @@ -1,394 +1,394 @@ /* $NetBSD: fusu.S,v 1.10 2003/12/01 13:34:44 rearnsha Exp $ */ /*- * Copyright (c) 1996-1998 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include "assym.s" __FBSDID("$FreeBSD$"); .syntax unified #ifdef _ARM_ARCH_6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) #else .Lcurpcb: .word _C_LABEL(__pcpu) + PC_CURPCB #define GET_PCB(tmp) \ ldr tmp, .Lcurpcb #endif /* * fuword(caddr_t uaddr); * Fetch an int from the user's address space. */ ENTRY(casuword) EENTRY_NP(casuword32) GET_PCB(r3) ldr r3, [r3] #ifdef DIAGNOSTIC teq r3, #0x00000000 beq .Lfusupcbfault #endif stmfd sp!, {r4, r5} adr r4, .Lcasuwordfault str r4, [r3, #PCB_ONFAULT] #ifdef _ARM_ARCH_6 1: cmp r0, #KERNBASE mvnhs r0, #0 bhs 2f ldrex r5, [r0] cmp r5, r1 movne r0, r5 bne 2f strex r5, r2, [r0] cmp r5, #0 bne 1b #else ldrt r5, [r0] cmp r5, r1 movne r0, r5 strteq r2, [r0] #endif moveq r0, r1 2: ldmfd sp!, {r4, r5} mov r1, #0x00000000 str r1, [r3, #PCB_ONFAULT] RET EEND(casuword32) END(casuword) /* * Handle faults from casuword. Clean up and return -1. */ .Lcasuwordfault: mov r0, #0x00000000 str r0, [r3, #PCB_ONFAULT] mvn r0, #0x00000000 ldmfd sp!, {r4, r5} RET /* * fuword(caddr_t uaddr); * Fetch an int from the user's address space. */ ENTRY(fuword) EENTRY_NP(fuword32) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r1, .Lfusufault str r1, [r2, #PCB_ONFAULT] ldrt r3, [r0] mov r1, #0x00000000 str r1, [r2, #PCB_ONFAULT] mov r0, r3 RET EEND(fuword32) END(fuword) /* * fusword(caddr_t uaddr); * Fetch a short from the user's address space. */ ENTRY(fusword) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r1, .Lfusufault str r1, [r2, #PCB_ONFAULT] ldrbt r3, [r0], #1 ldrbt ip, [r0] #ifdef __ARMEB__ orr r0, ip, r3, asl #8 #else orr r0, r3, ip, asl #8 #endif mov r1, #0x00000000 str r1, [r2, #PCB_ONFAULT] RET END(fusword) /* * fuswintr(caddr_t uaddr); * Fetch a short from the user's address space. Can be called during an * interrupt. */ ENTRY(fuswintr) ldr r2, Lblock_userspace_access ldr r2, [r2] teq r2, #0 mvnne r0, #0x00000000 RETne GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r1, _C_LABEL(fusubailout) str r1, [r2, #PCB_ONFAULT] ldrbt r3, [r0], #1 ldrbt ip, [r0] #ifdef __ARMEB__ orr r0, ip, r3, asl #8 #else orr r0, r3, ip, asl #8 #endif mov r1, #0x00000000 str r1, [r2, #PCB_ONFAULT] RET END(fuswintr) Lblock_userspace_access: .word _C_LABEL(block_userspace_access) .data - .align 0 + .align 2 .global _C_LABEL(block_userspace_access) _C_LABEL(block_userspace_access): .word 0 .text /* * fubyte(caddr_t uaddr); * Fetch a byte from the user's address space. */ ENTRY(fubyte) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r1, .Lfusufault str r1, [r2, #PCB_ONFAULT] ldrbt r3, [r0] mov r1, #0x00000000 str r1, [r2, #PCB_ONFAULT] mov r0, r3 RET END(fubyte) /* * Handle faults from [fs]u*(). Clean up and return -1. */ .Lfusufault: mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] mvn r0, #0x00000000 RET /* * Handle faults from [fs]u*(). Clean up and return -1. This differs from * fusufault() in that trap() will recognise it and return immediately rather * than trying to page fault. */ /* label must be global as fault.c references it */ .global _C_LABEL(fusubailout) _C_LABEL(fusubailout): mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] mvn r0, #0x00000000 RET #ifdef DIAGNOSTIC /* * Handle earlier faults from [fs]u*(), due to no pcb */ .Lfusupcbfault: mov r1, r0 adr r0, fusupcbfaulttext b _C_LABEL(panic) fusupcbfaulttext: .asciz "Yikes - no valid PCB during fusuxxx() addr=%08x\n" .align 2 #endif /* * suword(caddr_t uaddr, int x); * Store an int in the user's address space. */ ENTRY(suword) EENTRY_NP(suword32) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] strt r1, [r0] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET EEND(suword32) END(suword) /* * suswintr(caddr_t uaddr, short x); * Store a short in the user's address space. Can be called during an * interrupt. */ ENTRY(suswintr) ldr r2, Lblock_userspace_access ldr r2, [r2] teq r2, #0 mvnne r0, #0x00000000 RETne GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, _C_LABEL(fusubailout) str r3, [r2, #PCB_ONFAULT] #ifdef __ARMEB__ mov ip, r1, lsr #8 strbt ip, [r0], #1 #else strbt r1, [r0], #1 mov r1, r1, lsr #8 #endif strbt r1, [r0] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET END(suswintr) /* * susword(caddr_t uaddr, short x); * Store a short in the user's address space. */ ENTRY(susword) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] #ifdef __ARMEB__ mov ip, r1, lsr #8 strbt ip, [r0], #1 #else strbt r1, [r0], #1 mov r1, r1, lsr #8 #endif strbt r1, [r0] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET END(susword) /* * subyte(caddr_t uaddr, char x); * Store a byte in the user's address space. */ ENTRY(subyte) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] strbt r1, [r0] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET END(subyte) Index: stable/10/sys/arm/arm/locore-v4.S =================================================================== --- stable/10/sys/arm/arm/locore-v4.S (revision 278651) +++ stable/10/sys/arm/arm/locore-v4.S (revision 278652) @@ -1,596 +1,596 @@ /* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */ /*- * Copyright 2011 Semihalf * Copyright (C) 1994-1997 Mark Brinicombe * Copyright (C) 1994 Brini * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of Brini may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include "assym.s" #include #include #include #include #include __FBSDID("$FreeBSD$"); /* * Sanity check the configuration. * FLASHADDR and LOADERRAMADDR depend on PHYSADDR in some cases. * ARMv4 and ARMv5 make assumptions on where they are loaded. * * TODO: Fix the ARMv4/v5 case. */ #if (defined(FLASHADDR) || defined(LOADERRAMADDR) || !defined(_ARM_ARCH_6)) && \ !defined(PHYSADDR) #error PHYSADDR must be defined for this configuration #endif /* What size should this really be ? It is only used by initarm() */ #define INIT_ARM_STACK_SIZE (2048 * 4) #define CPWAIT_BRANCH \ sub pc, pc, #4 #define CPWAIT(tmp) \ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ mov tmp, tmp /* wait for it to complete */ ;\ CPWAIT_BRANCH /* branch to next insn */ /* * This is for libkvm, and should be the address of the beginning * of the kernel text segment (not necessarily the same as kernbase). * * These are being phased out. Newer copies of libkvm don't need these * values as the information is added to the core file by inspecting * the running kernel. */ .text - .align 0 + .align 2 #ifdef PHYSADDR .globl kernbase .set kernbase,KERNBASE .globl physaddr .set physaddr,PHYSADDR #endif /* * On entry for FreeBSD boot ABI: * r0 - metadata pointer or 0 (boothowto on AT91's boot2) * r1 - if (r0 == 0) then metadata pointer * On entry for Linux boot ABI: * r0 - 0 * r1 - machine type (passed as arg2 to initarm) * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) * * For both types of boot we gather up the args, put them in a struct arm_boot_params * structure and pass that to initarm. */ .globl btext btext: ASENTRY_NP(_start) STOP_UNWINDING /* Can't unwind into the bootloader! */ mov r9, r0 /* 0 or boot mode from boot2 */ mov r8, r1 /* Save Machine type */ mov ip, r2 /* Save meta data */ mov fp, r3 /* Future expansion */ /* Make sure interrupts are disabled. */ mrs r7, cpsr orr r7, r7, #(PSR_I | PSR_F) msr cpsr_c, r7 #if defined (FLASHADDR) && defined(LOADERRAMADDR) /* Check if we're running from flash. */ ldr r7, =FLASHADDR /* * If we're running with MMU disabled, test against the * physical address instead. */ mrc p15, 0, r2, c1, c0, 0 ands r2, r2, #CPU_CONTROL_MMU_ENABLE ldreq r6, =PHYSADDR ldrne r6, =LOADERRAMADDR cmp r7, r6 bls flash_lower cmp r7, pc bhi from_ram b do_copy flash_lower: cmp r6, pc bls from_ram do_copy: ldr r7, =KERNBASE adr r1, _start ldr r0, Lreal_start ldr r2, Lend sub r2, r2, r0 sub r0, r0, r7 add r0, r0, r6 mov r4, r0 bl memcpy ldr r0, Lram_offset add pc, r4, r0 Lram_offset: .word from_ram-_C_LABEL(_start) from_ram: nop #endif disable_mmu: /* Disable MMU for a while */ mrc p15, 0, r2, c1, c0, 0 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ CPU_CONTROL_WBUF_ENABLE) bic r2, r2, #(CPU_CONTROL_IC_ENABLE) bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE) mcr p15, 0, r2, c1, c0, 0 nop nop nop CPWAIT(r0) Lunmapped: /* * Build page table from scratch. */ /* Find the delta between VA and PA */ adr r0, Lpagetable bl translate_va_to_pa #ifndef _ARM_ARCH_6 /* * Some of the older ports (the various XScale, mostly) assume * that the memory before the kernel is mapped, and use it for * the various stacks, page tables, etc. For those CPUs, map the * 64 first MB of RAM, as it used to be. */ /* * Map PA == VA */ ldr r5, =PHYSADDR mov r1, r5 mov r2, r5 /* Map 64MiB, preserved over calls to build_pagetables */ mov r3, #64 bl build_pagetables /* Create the kernel map to jump to */ mov r1, r5 ldr r2, =(KERNBASE) bl build_pagetables ldr r5, =(KERNPHYSADDR) #else /* * Map PA == VA */ /* Find the start kernels load address */ adr r5, _start ldr r2, =(L1_S_OFFSET) bic r5, r2 mov r1, r5 mov r2, r5 /* Map 64MiB, preserved over calls to build_pagetables */ mov r3, #64 bl build_pagetables /* Create the kernel map to jump to */ mov r1, r5 ldr r2, =(KERNVIRTADDR) bl build_pagetables #endif #if defined(SOCDEV_PA) && defined(SOCDEV_VA) /* Create the custom map */ ldr r1, =SOCDEV_PA ldr r2, =SOCDEV_VA bl build_pagetables #endif #if defined(SMP) orr r0, r0, #2 /* Set TTB shared memory flag */ #endif mcr p15, 0, r0, c2, c0, 0 /* Set TTB */ mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */ #if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT) mov r0, #0 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */ #endif /* Set the Domain Access register. Very important! */ mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) mcr p15, 0, r0, c3, c0, 0 /* * Enable MMU. * On armv6 enable extended page tables, and set alignment checking * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd * instructions emitted by clang. */ mrc p15, 0, r0, c1, c0, 0 #ifdef _ARM_ARCH_6 orr r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE) orr r0, r0, #(CPU_CONTROL_AFLT_ENABLE) orr r0, r0, #(CPU_CONTROL_AF_ENABLE) #endif orr r0, r0, #(CPU_CONTROL_MMU_ENABLE) mcr p15, 0, r0, c1, c0, 0 nop nop nop CPWAIT(r0) mmu_done: nop adr r1, .Lstart ldmia r1, {r1, r2, sp} /* Set initial stack and */ sub r2, r2, r1 /* get zero init data */ mov r3, #0 .L1: str r3, [r1], #0x0004 /* get zero init data */ subs r2, r2, #4 bgt .L1 ldr pc, .Lvirt_done virt_done: mov r1, #28 /* loader info size is 28 bytes also second arg */ subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ mov r0, sp /* loader info pointer is first arg */ bic sp, sp, #7 /* align stack to 8 bytes */ str r1, [r0] /* Store length of loader info */ str r9, [r0, #4] /* Store r0 from boot loader */ str r8, [r0, #8] /* Store r1 from boot loader */ str ip, [r0, #12] /* store r2 from boot loader */ str fp, [r0, #16] /* store r3 from boot loader */ str r5, [r0, #20] /* store the physical address */ adr r4, Lpagetable /* load the pagetable address */ ldr r5, [r4, #4] str r5, [r0, #24] /* store the pagetable address */ mov fp, #0 /* trace back starts here */ bl _C_LABEL(initarm) /* Off we go */ /* init arm will return the new stack pointer. */ mov sp, r0 bl _C_LABEL(mi_startup) /* call mi_startup()! */ adr r0, .Lmainreturned b _C_LABEL(panic) /* NOTREACHED */ END(_start) #define VA_TO_PA_POINTER(name, table) \ name: ;\ .word . ;\ .word table /* * Returns the physical address of a magic va to pa pointer. * r0 - The pagetable data pointer. This must be built using the * VA_TO_PA_POINTER macro. * e.g. * VA_TO_PA_POINTER(Lpagetable, pagetable) * ... * adr r0, Lpagetable * bl translate_va_to_pa * r0 will now contain the physical address of pagetable * r1, r2 - Trashed */ translate_va_to_pa: ldr r1, [r0] sub r2, r1, r0 /* At this point: r2 = VA - PA */ /* * Find the physical address of the table. After these two * instructions: * r1 = va(pagetable) * * r0 = va(pagetable) - (VA - PA) * = va(pagetable) - VA + PA * = pa(pagetable) */ ldr r1, [r0, #4] sub r0, r1, r2 RET /* * Builds the page table * r0 - The table base address * r1 - The physical address (trashed) * r2 - The virtual address (trashed) * r3 - The number of 1MiB sections * r4 - Trashed * * Addresses must be 1MiB aligned */ build_pagetables: /* Set the required page attributed */ ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) #if defined(SMP) orr r4, #(L1_SHARED) #endif orr r1, r4 /* Move the virtual address to the correct bit location */ lsr r2, #(L1_S_SHIFT - 2) mov r4, r3 1: str r1, [r0, r2] add r2, r2, #4 add r1, r1, #(L1_S_SIZE) adds r4, r4, #-1 bhi 1b RET VA_TO_PA_POINTER(Lpagetable, pagetable) Lreal_start: .word _start Lend: .word _edata .Lstart: .word _edata .word _ebss .word svcstk + INIT_ARM_STACK_SIZE .Lvirt_done: .word virt_done .Lmainreturned: .asciz "main() returned" - .align 0 + .align 2 .bss svcstk: .space INIT_ARM_STACK_SIZE /* * Memory for the initial pagetable. We are unable to place this in * the bss as this will be cleared after the table is loaded. */ .section ".init_pagetable" .align 14 /* 16KiB aligned */ pagetable: .space L1_TABLE_SIZE .text - .align 0 + .align 2 .Lcpufuncs: .word _C_LABEL(cpufuncs) #if defined(SMP) .Lmpvirt_done: .word mpvirt_done VA_TO_PA_POINTER(Lstartup_pagetable_secondary, temp_pagetable) ASENTRY_NP(mpentry) /* Make sure interrupts are disabled. */ mrs r7, cpsr orr r7, r7, #(PSR_I | PSR_F) msr cpsr_c, r7 /* Disable MMU. It should be disabled already, but make sure. */ mrc p15, 0, r2, c1, c0, 0 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ CPU_CONTROL_WBUF_ENABLE) bic r2, r2, #(CPU_CONTROL_IC_ENABLE) bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE) mcr p15, 0, r2, c1, c0, 0 nop nop nop CPWAIT(r0) #if ARM_MMU_V6 bl armv6_idcache_inv_all /* Modifies r0 only */ #elif ARM_MMU_V7 bl armv7_idcache_inv_all /* Modifies r0-r3, ip */ #endif /* Load the page table physical address */ adr r0, Lstartup_pagetable_secondary bl translate_va_to_pa /* Load the address the secondary page table */ ldr r0, [r0] orr r0, r0, #2 /* Set TTB shared memory flag */ mcr p15, 0, r0, c2, c0, 0 /* Set TTB */ mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */ mov r0, #0 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */ /* Set the Domain Access register. Very important! */ mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) mcr p15, 0, r0, c3, c0, 0 /* Enable MMU */ mrc p15, 0, r0, c1, c0, 0 orr r0, r0, #CPU_CONTROL_V6_EXTPAGE orr r0, r0, #CPU_CONTROL_AF_ENABLE orr r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ CPU_CONTROL_WBUF_ENABLE) orr r0, r0, #(CPU_CONTROL_IC_ENABLE) orr r0, r0, #(CPU_CONTROL_BPRD_ENABLE) mcr p15, 0, r0, c1, c0, 0 nop nop nop CPWAIT(r0) adr r1, .Lstart ldmia r1, {r1, r2, sp} /* Set initial stack and */ mrc p15, 0, r0, c0, c0, 5 and r0, r0, #15 mov r1, #2048 mul r2, r1, r0 sub sp, sp, r2 str r1, [sp] ldr pc, .Lmpvirt_done mpvirt_done: mov fp, #0 /* trace back starts here */ bl _C_LABEL(init_secondary) /* Off we go */ adr r0, .Lmpreturned b _C_LABEL(panic) /* NOTREACHED */ .Lmpreturned: .asciz "init_secondary() returned" - .align 0 + .align 2 END(mpentry) #endif ENTRY_NP(cpu_halt) mrs r2, cpsr bic r2, r2, #(PSR_MODE) orr r2, r2, #(PSR_SVC32_MODE) orr r2, r2, #(PSR_I | PSR_F) msr cpsr_fsxc, r2 ldr r4, .Lcpu_reset_address ldr r4, [r4] ldr r0, .Lcpufuncs mov lr, pc ldr pc, [r0, #CF_IDCACHE_WBINV_ALL] mov lr, pc ldr pc, [r0, #CF_L2CACHE_WBINV_ALL] /* * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's * necessary. */ ldr r1, .Lcpu_reset_needs_v4_MMU_disable ldr r1, [r1] cmp r1, #0 mov r2, #0 /* * MMU & IDC off, 32 bit program & data space * Hurl ourselves into the ROM */ mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE) mcr p15, 0, r0, c1, c0, 0 mcrne p15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */ mov pc, r4 /* * _cpu_reset_address contains the address to branch to, to complete * the cpu reset after turning the MMU off * This variable is provided by the hardware specific code */ .Lcpu_reset_address: .word _C_LABEL(cpu_reset_address) /* * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the * v4 MMU disable instruction needs executing... it is an illegal instruction * on f.e. ARM6/7 that locks up the computer in an endless illegal * instruction / data-abort / reset loop. */ .Lcpu_reset_needs_v4_MMU_disable: .word _C_LABEL(cpu_reset_needs_v4_MMU_disable) END(cpu_halt) /* * setjump + longjmp */ ENTRY(setjmp) stmia r0, {r4-r14} mov r0, #0x00000000 RET END(setjmp) ENTRY(longjmp) ldmia r0, {r4-r14} mov r0, #0x00000001 RET END(longjmp) .data .global _C_LABEL(esym) _C_LABEL(esym): .word _C_LABEL(end) ENTRY_NP(abort) b _C_LABEL(abort) END(abort) ENTRY_NP(sigcode) mov r0, sp add r0, r0, #SIGF_UC /* * Call the sigreturn system call. * * We have to load r7 manually rather than using * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is * correct. Using the alternative places esigcode at the address * of the data rather than the address one past the data. */ ldr r7, [pc, #12] /* Load SYS_sigreturn */ swi SYS_sigreturn /* Well if that failed we better exit quick ! */ ldr r7, [pc, #8] /* Load SYS_exit */ swi SYS_exit /* Branch back to retry SYS_sigreturn */ b . - 16 END(sigcode) .word SYS_sigreturn .word SYS_exit - .align 0 + .align 2 .global _C_LABEL(esigcode) _C_LABEL(esigcode): .data .global szsigcode szsigcode: .long esigcode-sigcode /* End of locore.S */ Index: stable/10/sys/arm/arm/locore-v6.S =================================================================== --- stable/10/sys/arm/arm/locore-v6.S (revision 278651) +++ stable/10/sys/arm/arm/locore-v6.S (revision 278652) @@ -1,537 +1,537 @@ /*- * Copyright 2004-2014 Olivier Houchard * Copyright 2012-2014 Ian Lepore * Copyright 2013-2014 Andrew Turner * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "assym.s" #include #include #include #include #include #include #include __FBSDID("$FreeBSD$"); #ifndef ARM_NEW_PMAP #define PTE1_OFFSET L1_S_OFFSET #define PTE1_SHIFT L1_S_SHIFT #define PTE1_SIZE L1_S_SIZE #endif /* A small statically-allocated stack used only during initarm() and AP startup. */ #define INIT_ARM_STACK_SIZE 2048 .text - .align 0 + .align 2 /* * On entry for FreeBSD boot ABI: * r0 - metadata pointer or 0 (boothowto on AT91's boot2) * r1 - if (r0 == 0) then metadata pointer * On entry for Linux boot ABI: * r0 - 0 * r1 - machine type (passed as arg2 to initarm) * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) * * For both types of boot we gather up the args, put them in a struct arm_boot_params * structure and pass that to initarm. */ .globl btext btext: ASENTRY_NP(_start) STOP_UNWINDING /* Can't unwind into the bootloader! */ /* Make sure interrupts are disabled. */ cpsid ifa mov r8, r0 /* 0 or boot mode from boot2 */ mov r9, r1 /* Save Machine type */ mov r10, r2 /* Save meta data */ mov r11, r3 /* Future expansion */ /* * Check whether data cache is enabled. If it is, then we know * current tags are valid (not power-on garbage values) and there * might be dirty lines that need cleaning. Disable cache to prevent * new lines being allocated, then call wbinv_poc_all to clean it. */ mrc CP15_SCTLR(r7) tst r7, #CPU_CONTROL_DC_ENABLE blne dcache_wbinv_poc_all /* ! Do not write to memory between wbinv and disabling cache ! */ /* * Now there are no dirty lines, but there may still be lines marked * valid. Disable all caches and the MMU, and invalidate everything * before setting up new page tables and re-enabling the mmu. */ 1: bic r7, #CPU_CONTROL_DC_ENABLE bic r7, #CPU_CONTROL_MMU_ENABLE bic r7, #CPU_CONTROL_IC_ENABLE bic r7, #CPU_CONTROL_UNAL_ENABLE bic r7, #CPU_CONTROL_BPRD_ENABLE bic r7, #CPU_CONTROL_SW_ENABLE orr r7, #CPU_CONTROL_AFLT_ENABLE orr r7, #CPU_CONTROL_VECRELOC mcr CP15_SCTLR(r7) ISB bl dcache_inv_poc_all mcr CP15_ICIALLU ISB /* * Build page table from scratch. */ /* Calculate the physical address of the startup pagetable. */ adr r0, Lpagetable bl translate_va_to_pa /* * Map PA == VA */ /* Find the start kernels load address */ adr r5, _start ldr r2, =(PTE1_OFFSET) bic r5, r2 mov r1, r5 mov r2, r5 /* Map 64MiB, preserved over calls to build_pagetables */ mov r3, #64 bl build_pagetables /* Create the kernel map to jump to */ mov r1, r5 ldr r2, =(KERNVIRTADDR) bl build_pagetables #if defined(SOCDEV_PA) && defined(SOCDEV_VA) /* Create the custom map used for early_printf(). */ ldr r1, =SOCDEV_PA ldr r2, =SOCDEV_VA bl build_pagetables #endif bl init_mmu /* Switch to virtual addresses. */ ldr pc, =1f 1: /* Setup stack, clear BSS */ ldr r1, =.Lstart ldmia r1, {r1, r2, sp} /* Set initial stack and */ add sp, sp, #INIT_ARM_STACK_SIZE sub r2, r2, r1 /* get zero init data */ mov r3, #0 2: str r3, [r1], #0x0004 /* get zero init data */ subs r2, r2, #4 bgt 2b mov r1, #28 /* loader info size is 28 bytes also second arg */ subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ mov r0, sp /* loader info pointer is first arg */ bic sp, sp, #7 /* align stack to 8 bytes */ str r1, [r0] /* Store length of loader info */ str r8, [r0, #4] /* Store r0 from boot loader */ str r9, [r0, #8] /* Store r1 from boot loader */ str r10, [r0, #12] /* store r2 from boot loader */ str r11, [r0, #16] /* store r3 from boot loader */ str r5, [r0, #20] /* store the physical address */ adr r4, Lpagetable /* load the pagetable address */ ldr r5, [r4, #4] str r5, [r0, #24] /* store the pagetable address */ mov fp, #0 /* trace back starts here */ bl _C_LABEL(initarm) /* Off we go */ /* init arm will return the new stack pointer. */ mov sp, r0 bl _C_LABEL(mi_startup) /* call mi_startup()! */ ldr r0, =.Lmainreturned b _C_LABEL(panic) /* NOTREACHED */ END(_start) #define VA_TO_PA_POINTER(name, table) \ name: ;\ .word . ;\ .word table /* * Returns the physical address of a magic va to pa pointer. * r0 - The pagetable data pointer. This must be built using the * VA_TO_PA_POINTER macro. * e.g. * VA_TO_PA_POINTER(Lpagetable, pagetable) * ... * adr r0, Lpagetable * bl translate_va_to_pa * r0 will now contain the physical address of pagetable * r1, r2 - Trashed */ translate_va_to_pa: ldr r1, [r0] sub r2, r1, r0 /* At this point: r2 = VA - PA */ /* * Find the physical address of the table. After these two * instructions: * r1 = va(pagetable) * * r0 = va(pagetable) - (VA - PA) * = va(pagetable) - VA + PA * = pa(pagetable) */ ldr r1, [r0, #4] sub r0, r1, r2 mov pc, lr /* * Init MMU * r0 - The table base address */ ASENTRY_NP(init_mmu) /* Setup TLB and MMU registers */ mcr CP15_TTBR0(r0) /* Set TTB */ mov r0, #0 mcr CP15_CONTEXTIDR(r0) /* Set ASID to 0 */ /* Set the Domain Access register */ mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) mcr CP15_DACR(r0) #ifdef ARM_NEW_PMAP /* * Set TEX remap registers * - All is set to uncacheable memory */ ldr r0, =0xAAAAA mrc CP15_PRRR(r0) mov r0, #0 mcr CP15_NMRR(r0) #endif mcr CP15_TLBIALL /* Flush TLB */ DSB ISB /* Enable MMU */ mrc CP15_SCTLR(r0) orr r0, r0, #CPU_CONTROL_MMU_ENABLE orr r0, r0, #CPU_CONTROL_V6_EXTPAGE #ifdef ARM_NEW_PMAP orr r0, r0, #CPU_CONTROL_TR_ENABLE #endif orr r0, r0, #CPU_CONTROL_AF_ENABLE mcr CP15_SCTLR(r0) DSB ISB mcr CP15_TLBIALL /* Flush TLB */ mcr CP15_BPIALL /* Flush Branch predictor */ ISB mov pc, lr END(init_mmu) /* * Init SMP coherent mode, enable caching and switch to final MMU table. * Called with disabled caches * r0 - The table base address * r1 - clear bits for aux register * r2 - set bits for aux register */ ASENTRY_NP(reinit_mmu) push {r4-r11, lr} mov r4, r0 mov r5, r1 mov r6, r2 /* !! Be very paranoid here !! */ /* !! We cannot write single bit here !! */ #if 0 /* XXX writeback shouldn't be necessary */ /* Write back and invalidate all integrated caches */ bl dcache_wbinv_poc_all #else bl dcache_inv_pou_all #endif mcr CP15_ICIALLU ISB /* Set auxiliary register */ mrc CP15_ACTLR(r7) bic r8, r7, r5 /* Mask bits */ eor r8, r8, r6 /* Set bits */ teq r7, r8 mcrne CP15_ACTLR(r8) ISB /* Enable caches. */ mrc CP15_SCTLR(r7) orr r7, #CPU_CONTROL_DC_ENABLE orr r7, #CPU_CONTROL_IC_ENABLE orr r7, #CPU_CONTROL_BPRD_ENABLE mcr CP15_SCTLR(r7) DSB mcr CP15_TTBR0(r4) /* Set new TTB */ DSB ISB /* Flush all TLBs */ mcr CP15_TLBIALL DSB ISB #if 0 /* XXX writeback shouldn't be necessary */ /* Write back and invalidate all integrated caches */ bl dcache_wbinv_poc_all #else bl dcache_inv_pou_all #endif mcr CP15_ICIALLU ISB pop {r4-r11, pc} END(reinit_mmu) /* * Builds the page table * r0 - The table base address * r1 - The physical address (trashed) * r2 - The virtual address (trashed) * r3 - The number of 1MiB sections * r4 - Trashed * * Addresses must be 1MiB aligned */ build_pagetables: /* Set the required page attributed */ #if defined(ARM_NEW_PMAP) ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0 #elif defined(SMP) ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|L1_SHARED) #else ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) #endif orr r1, r4 /* Move the virtual address to the correct bit location */ lsr r2, #(PTE1_SHIFT - 2) mov r4, r3 1: str r1, [r0, r2] add r2, r2, #4 add r1, r1, #(PTE1_SIZE) adds r4, r4, #-1 bhi 1b mov pc, lr VA_TO_PA_POINTER(Lpagetable, boot_pt1) .Lstart: .word _edata .word _ebss .word svcstk .Lmainreturned: .asciz "main() returned" - .align 0 + .align 2 .bss svcstk: .space INIT_ARM_STACK_SIZE * MAXCPU /* * Memory for the initial pagetable. We are unable to place this in * the bss as this will be cleared after the table is loaded. */ .section ".init_pagetable" .align 14 /* 16KiB aligned */ .globl boot_pt1 boot_pt1: .space L1_TABLE_SIZE .text - .align 0 + .align 2 .Lcpufuncs: .word _C_LABEL(cpufuncs) #if defined(SMP) ASENTRY_NP(mpentry) /* Make sure interrupts are disabled. */ cpsid ifa /* Setup core, disable all caches. */ mrc CP15_SCTLR(r0) bic r0, #CPU_CONTROL_MMU_ENABLE bic r0, #CPU_CONTROL_DC_ENABLE bic r0, #CPU_CONTROL_IC_ENABLE bic r0, #CPU_CONTROL_UNAL_ENABLE bic r0, #CPU_CONTROL_BPRD_ENABLE bic r0, #CPU_CONTROL_SW_ENABLE orr r0, #CPU_CONTROL_AFLT_ENABLE orr r0, #CPU_CONTROL_VECRELOC mcr CP15_SCTLR(r0) ISB /* Invalidate L1 cache I+D cache */ bl dcache_inv_pou_all mcr CP15_ICIALLU ISB /* Find the delta between VA and PA */ adr r0, Lpagetable bl translate_va_to_pa bl init_mmu adr r1, .Lstart ldmia r1, {r1, r2, sp} /* Set initial stack and */ mrc p15, 0, r0, c0, c0, 5 and r0, r0, #15 mov r1, #INIT_ARM_STACK_SIZE mul r2, r1, r0 add sp, sp, r2 str r1, [sp] /* Switch to virtual addresses. */ ldr pc, =1f 1: mov fp, #0 /* trace back starts here */ bl _C_LABEL(init_secondary) /* Off we go */ adr r0, .Lmpreturned b _C_LABEL(panic) /* NOTREACHED */ +END(mpentry) .Lmpreturned: .asciz "init_secondary() returned" - .align 0 -END(mpentry) + .align 2 #endif ENTRY_NP(cpu_halt) /* XXX re-implement !!! */ cpsid ifa bl dcache_wbinv_poc_all ldr r4, .Lcpu_reset_address ldr r4, [r4] teq r4, #0 movne pc, r4 1: WFI b 1b /* * _cpu_reset_address contains the address to branch to, to complete * the cpu reset after turning the MMU off * This variable is provided by the hardware specific code */ .Lcpu_reset_address: .word _C_LABEL(cpu_reset_address) END(cpu_halt) /* * setjump + longjmp */ ENTRY(setjmp) stmia r0, {r4-r14} mov r0, #0x00000000 RET END(setjmp) ENTRY(longjmp) ldmia r0, {r4-r14} mov r0, #0x00000001 RET END(longjmp) .data .global _C_LABEL(esym) _C_LABEL(esym): .word _C_LABEL(end) ENTRY_NP(abort) b _C_LABEL(abort) END(abort) ENTRY_NP(sigcode) mov r0, sp add r0, r0, #SIGF_UC /* * Call the sigreturn system call. * * We have to load r7 manually rather than using * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is * correct. Using the alternative places esigcode at the address * of the data rather than the address one past the data. */ ldr r7, [pc, #12] /* Load SYS_sigreturn */ swi SYS_sigreturn /* Well if that failed we better exit quick ! */ ldr r7, [pc, #8] /* Load SYS_exit */ swi SYS_exit /* Branch back to retry SYS_sigreturn */ b . - 16 END(sigcode) .word SYS_sigreturn .word SYS_exit - .align 0 + .align 2 .global _C_LABEL(esigcode) _C_LABEL(esigcode): .data .global szsigcode szsigcode: .long esigcode-sigcode /* End of locore.S */ Index: stable/10/sys/arm/include/asm.h =================================================================== --- stable/10/sys/arm/include/asm.h (revision 278651) +++ stable/10/sys/arm/include/asm.h (revision 278652) @@ -1,243 +1,243 @@ /* $NetBSD: asm.h,v 1.5 2003/08/07 16:26:53 agc Exp $ */ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)asm.h 5.5 (Berkeley) 5/7/91 * * $FreeBSD$ */ #ifndef _MACHINE_ASM_H_ #define _MACHINE_ASM_H_ #include #include #include #define _C_LABEL(x) x #define _ASM_LABEL(x) x #ifndef _ALIGN_TEXT -# define _ALIGN_TEXT .align 0 +# define _ALIGN_TEXT .align 2 #endif #if defined(__ARM_EABI__) && !defined(_STANDALONE) #define STOP_UNWINDING .cantunwind #define _FNSTART .fnstart #define _FNEND .fnend #else #define STOP_UNWINDING #define _FNSTART #define _FNEND #endif /* * gas/arm uses @ as a single comment character and thus cannot be used here. * It recognises the # instead of an @ symbol in .type directives. */ #define _ASM_TYPE_FUNCTION #function #define _ASM_TYPE_OBJECT #object /* XXX Is this still the right prologue for profiling? */ #ifdef GPROF #define _PROF_PROLOGUE \ mov ip, lr; \ bl __mcount #else #define _PROF_PROLOGUE #endif /* * EENTRY()/EEND() mark "extra" entry/exit points from a function. * LEENTRY()/LEEND() are the the same for local symbols. * The unwind info cannot handle the concept of a nested function, or a function * with multiple .fnstart directives, but some of our assembler code is written * with multiple labels to allow entry at several points. The EENTRY() macro * defines such an extra entry point without a new .fnstart, so that it's * basically just a label that you can jump to. The EEND() macro does nothing * at all, except document the exit point associated with the same-named entry. */ #define GLOBAL(x) .global x #define _LEENTRY(x) .type x,_ASM_TYPE_FUNCTION; x: #define _LEEND(x) /* nothing */ #define _EENTRY(x) GLOBAL(x); _LEENTRY(x) #define _EEND(x) _LEEND(x) #define _LENTRY(x) .text; _ALIGN_TEXT; _LEENTRY(x); _FNSTART #define _LEND(x) .size x, . - x; _FNEND #define _ENTRY(x) .text; _ALIGN_TEXT; _EENTRY(x); _FNSTART #define _END(x) _LEND(x) #define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE #define EENTRY(y) _EENTRY(_C_LABEL(y)); #define ENTRY_NP(y) _ENTRY(_C_LABEL(y)) #define EENTRY_NP(y) _EENTRY(_C_LABEL(y)) #define END(y) _END(_C_LABEL(y)) #define EEND(y) _EEND(_C_LABEL(y)) #define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE #define ASLENTRY(y) _LENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE #define ASEENTRY(y) _EENTRY(_ASM_LABEL(y)); #define ASLEENTRY(y) _LEENTRY(_ASM_LABEL(y)); #define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y)) #define ASLENTRY_NP(y) _LENTRY(_ASM_LABEL(y)) #define ASEENTRY_NP(y) _EENTRY(_ASM_LABEL(y)) #define ASLEENTRY_NP(y) _LEENTRY(_ASM_LABEL(y)) #define ASEND(y) _END(_ASM_LABEL(y)) #define ASLEND(y) _LEND(_ASM_LABEL(y)) #define ASEEND(y) _EEND(_ASM_LABEL(y)) #define ASLEEND(y) _LEEND(_ASM_LABEL(y)) #define ASMSTR .asciz #if defined(PIC) #define PLT_SYM(x) PIC_SYM(x, PLT) #define GOT_SYM(x) PIC_SYM(x, GOT) #define GOT_GET(x,got,sym) \ ldr x, sym; \ ldr x, [x, got] #define GOT_INIT(got,gotsym,pclabel) \ ldr got, gotsym; \ pclabel: add got, got, pc #ifdef __thumb__ #define GOT_INITSYM(gotsym,pclabel) \ - .align 0; \ + .align 2; \ gotsym: .word _C_LABEL(_GLOBAL_OFFSET_TABLE_) - (pclabel+4) #else #define GOT_INITSYM(gotsym,pclabel) \ - .align 0; \ + .align 2; \ gotsym: .word _C_LABEL(_GLOBAL_OFFSET_TABLE_) - (pclabel+8) #endif #ifdef __STDC__ #define PIC_SYM(x,y) x ## ( ## y ## ) #else #define PIC_SYM(x,y) x/**/(/**/y/**/) #endif #else #define PLT_SYM(x) x #define GOT_SYM(x) x #define GOT_GET(x,got,sym) \ ldr x, sym; #define GOT_INIT(got,gotsym,pclabel) #define GOT_INITSYM(gotsym,pclabel) #define PIC_SYM(x,y) x #endif /* PIC */ #undef __FBSDID #if !defined(lint) && !defined(STRIP_FBSDID) #define __FBSDID(s) .ident s #else #define __FBSDID(s) /* nothing */ #endif #define WEAK_ALIAS(alias,sym) \ .weak alias; \ alias = sym #ifdef __STDC__ #define WARN_REFERENCES(sym,msg) \ .stabs msg ## ,30,0,0,0 ; \ .stabs __STRING(_C_LABEL(sym)) ## ,1,0,0,0 #else #define WARN_REFERENCES(sym,msg) \ .stabs msg,30,0,0,0 ; \ .stabs __STRING(sym),1,0,0,0 #endif /* __STDC__ */ /* Exactly one of the __ARM_ARCH_*__ macros will be defined by the compiler. */ /* The _ARM_ARCH_* macros are deprecated and will be removed soon. */ /* This should be moved into another header so it can be used in * both asm and C code. machine/asm.h cannot be included in C code. */ #if defined (__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__) #define _ARM_ARCH_7 #define _HAVE_ARMv7_INSTRUCTIONS 1 #endif #if defined (_HAVE_ARMv7_INSTRUCTIONS) || defined (__ARM_ARCH_6__) || \ defined (__ARM_ARCH_6J__) || defined (__ARM_ARCH_6K__) || \ defined (__ARM_ARCH_6Z__) || defined (__ARM_ARCH_6ZK__) #define _ARM_ARCH_6 #define _HAVE_ARMv6_INSTRUCTIONS 1 #endif #if defined (_HAVE_ARMv6_INSTRUCTIONS) || defined (__ARM_ARCH_5TE__) || \ defined (__ARM_ARCH_5TEJ__) || defined (__ARM_ARCH_5E__) #define _ARM_ARCH_5E #define _HAVE_ARMv5E_INSTRUCTIONS 1 #endif #if defined (_HAVE_ARMv5E_INSTRUCTIONS) || defined (__ARM_ARCH_5__) || \ defined (__ARM_ARCH_5T__) #define _ARM_ARCH_5 #define _HAVE_ARMv5_INSTRUCTIONS 1 #endif #if defined (_HAVE_ARMv5_INSTRUCTIONS) || defined (__ARM_ARCH_4T__) #define _ARM_ARCH_4T #define _HAVE_ARMv4T_INSTRUCTIONS 1 #endif /* FreeBSD requires ARMv4, so this is always set. */ #define _HAVE_ARMv4_INSTRUCTIONS 1 #if defined (_HAVE_ARMv4T_INSTRUCTIONS) # define RET bx lr # define RETeq bxeq lr # define RETne bxne lr # define RETc(c) bx##c lr #else # define RET mov pc, lr # define RETeq moveq pc, lr # define RETne movne pc, lr # define RETc(c) mov##c pc, lr #endif #if __ARM_ARCH >= 7 #define ISB isb #define DSB dsb #define DMB dmb #define WFI wfi #elif __ARM_ARCH == 6 #define ISB mcr CP15_CP15ISB #define DSB mcr CP15_CP15DSB #define DMB mcr CP15_CP15DMB #define WFI mcr CP15_CP15WFI #else #define ISB mcr CP15_CP15ISB #define DSB mcr CP15_CP15DSB /* DSB and DMB are the */ #define DMB mcr CP15_CP15DSB /* same prior to v6.*/ /* No form of WFI available on v4, define nothing to get an error on use. */ #endif #endif /* !_MACHINE_ASM_H_ */ Index: stable/10/sys/arm/include/profile.h =================================================================== --- stable/10/sys/arm/include/profile.h (revision 278651) +++ stable/10/sys/arm/include/profile.h (revision 278652) @@ -1,125 +1,125 @@ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)profile.h 8.1 (Berkeley) 6/11/93 * $FreeBSD$ */ #ifndef _MACHINE_PROFILE_H_ #define _MACHINE_PROFILE_H_ /* * Config generates something to tell the compiler to align functions on 32 * byte boundaries. A strict alignment is good for keeping the tables small. */ #define FUNCTION_ALIGNMENT 16 #define _MCOUNT_DECL void mcount typedef u_long fptrdiff_t; /* * Cannot implement mcount in C as GCC will trash the ip register when it * pushes a trapframe. Pity we cannot insert assembly before the function * prologue. */ #ifndef PLTSYM #define PLTSYM #endif #define MCOUNT \ __asm__(".text"); \ - __asm__(".align 0"); \ + __asm__(".align 2"); \ __asm__(".type __mcount ,%function"); \ __asm__(".global __mcount"); \ __asm__("__mcount:"); \ /* \ * Preserve registers that are trashed during mcount \ */ \ __asm__("stmfd sp!, {r0-r3, ip, lr}"); \ /* \ * find the return address for mcount, \ * and the return address for mcount's caller. \ * \ * frompcindex = pc pushed by call into self. \ */ \ __asm__("mov r0, ip"); \ /* \ * selfpc = pc pushed by mcount call \ */ \ __asm__("mov r1, lr"); \ /* \ * Call the real mcount code \ */ \ __asm__("bl mcount"); \ /* \ * Restore registers that were trashed during mcount \ */ \ __asm__("ldmfd sp!, {r0-r3, lr, pc}"); void bintr(void); void btrap(void); void eintr(void); void user(void); #define MCOUNT_FROMPC_USER(pc) \ ((pc < (uintfptr_t)VM_MAXUSER_ADDRESS) ? (uintfptr_t)user : pc) #define MCOUNT_FROMPC_INTR(pc) \ ((pc >= (uintfptr_t)btrap && pc < (uintfptr_t)eintr) ? \ ((pc >= (uintfptr_t)bintr) ? (uintfptr_t)bintr : \ (uintfptr_t)btrap) : ~0U) #ifdef _KERNEL #define MCOUNT_DECL(s) register_t s; #include #include /* * splhigh() and splx() are heavyweight, and call mcount(). Therefore * we disabled interrupts (IRQ, but not FIQ) directly on the CPU. * * We're lucky that the CPSR and 's' both happen to be 'int's. */ #define MCOUNT_ENTER(s) {s = intr_disable(); } /* kill IRQ */ #define MCOUNT_EXIT(s) {intr_restore(s); } /* restore old value */ void mcount(uintfptr_t frompc, uintfptr_t selfpc); #else typedef u_int uintfptr_t; #endif /* _KERNEL */ #endif /* !_MACHINE_PROFILE_H_ */ Index: stable/10/sys/conf/kmod.mk =================================================================== --- stable/10/sys/conf/kmod.mk (revision 278651) +++ stable/10/sys/conf/kmod.mk (revision 278652) @@ -1,488 +1,493 @@ # From: @(#)bsd.prog.mk 5.26 (Berkeley) 6/25/91 # $FreeBSD$ # # The include file handles building and installing loadable # kernel modules. # # # +++ variables +++ # # CLEANFILES Additional files to remove for the clean and cleandir targets. # # EXPORT_SYMS A list of symbols that should be exported from the module, # or the name of a file containing a list of symbols, or YES # to export all symbols. If not defined, no symbols are # exported. # # KMOD The name of the kernel module to build. # # KMODDIR Base path for kernel modules (see kld(4)). [/boot/kernel] # # KMODOWN Module file owner. [${BINOWN}] # # KMODGRP Module file group. [${BINGRP}] # # KMODMODE Module file mode. [${BINMODE}] # # KMODLOAD Command to load a kernel module [/sbin/kldload] # # KMODUNLOAD Command to unload a kernel module [/sbin/kldunload] # # MFILES Optionally a list of interfaces used by the module. # This file contains a default list of interfaces. # # PROG The name of the kernel module to build. # If not supplied, ${KMOD}.ko is used. # # SRCS List of source files. # # FIRMWS List of firmware images in format filename:shortname:version # # FIRMWARE_LICENSE # Set to the name of the license the user has to agree on in # order to use this firmware. See /usr/share/doc/legal # # DESTDIR The tree where the module gets installed. [not set] # # +++ targets +++ # # install: # install the kernel module; if the Makefile # does not itself define the target install, the targets # beforeinstall and afterinstall may also be used to cause # actions immediately before and after the install target # is executed. # # load: # Load a module. # # unload: # Unload a module. # # backwards compat option for older systems. MACHINE_CPUARCH?=${MACHINE_ARCH:C/mips(n32|64)?(el)?/mips/:C/arm(v6)?(eb)?/arm/:C/powerpc64/powerpc/} AWK?= awk KMODLOAD?= /sbin/kldload KMODUNLOAD?= /sbin/kldunload OBJCOPY?= objcopy .if defined(KMODDEPS) .error "Do not use KMODDEPS on 5.0+; use MODULE_VERSION/MODULE_DEPEND" .endif .include .include .SUFFIXES: .out .o .c .cc .cxx .C .y .l .s .S # amd64 and mips use direct linking for kmod, all others use shared binaries .if ${MACHINE_CPUARCH} != amd64 && ${MACHINE_CPUARCH} != mips __KLD_SHARED=yes .else __KLD_SHARED=no .endif .if !empty(CFLAGS:M-O[23s]) && empty(CFLAGS:M-fno-strict-aliasing) CFLAGS+= -fno-strict-aliasing .endif WERROR?= -Werror CFLAGS+= ${WERROR} CFLAGS+= -D_KERNEL CFLAGS+= -DKLD_MODULE # Don't use any standard or source-relative include directories. CSTD= c99 NOSTDINC= -nostdinc CFLAGS:= ${CFLAGS:N-I*} ${NOSTDINC} ${INCLMAGIC} ${CFLAGS:M-I*} .if defined(KERNBUILDDIR) CFLAGS+= -DHAVE_KERNEL_OPTION_HEADERS -include ${KERNBUILDDIR}/opt_global.h .endif # Add -I paths for system headers. Individual module makefiles don't # need any -I paths for this. Similar defaults for .PATH can't be # set because there are no standard paths for non-headers. CFLAGS+= -I. -I@ # Add -I path for altq headers as they are included via net/if_var.h # for example. CFLAGS+= -I@/contrib/altq .if ${COMPILER_TYPE} != "clang" CFLAGS+= -finline-limit=${INLINE_LIMIT} CFLAGS+= --param inline-unit-growth=100 CFLAGS+= --param large-function-growth=1000 .endif # Disallow common variables, and if we end up with commons from # somewhere unexpected, allocate storage for them in the module itself. CFLAGS+= -fno-common LDFLAGS+= -d -warn-common CFLAGS+= ${DEBUG_FLAGS} .if ${MACHINE_CPUARCH} == amd64 CFLAGS+= -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer .endif +# Temporary workaround for PR 196407, which contains the fascinating details. +.if ${MACHINE_CPUARCH} == arm +CFLAGS.clang+= -mllvm -arm-use-movt=0 +.endif + .if ${MACHINE_CPUARCH} == powerpc CFLAGS+= -mlongcall -fno-omit-frame-pointer .endif .if ${MACHINE_CPUARCH} == mips CFLAGS+= -G0 -fno-pic -mno-abicalls -mlong-calls .endif .if defined(DEBUG) || defined(DEBUG_FLAGS) CTFFLAGS+= -g .endif .if defined(FIRMWS) .if !exists(@) ${KMOD:S/$/.c/}: @ .else ${KMOD:S/$/.c/}: @/tools/fw_stub.awk .endif ${AWK} -f @/tools/fw_stub.awk ${FIRMWS} -m${KMOD} -c${KMOD:S/$/.c/g} \ ${FIRMWARE_LICENSE:C/.+/-l/}${FIRMWARE_LICENSE} SRCS+= ${KMOD:S/$/.c/} CLEANFILES+= ${KMOD:S/$/.c/} .for _firmw in ${FIRMWS} ${_firmw:C/\:.*$/.fwo/}: ${_firmw:C/\:.*$//} @${ECHO} ${_firmw:C/\:.*$//} ${.ALLSRC:M*${_firmw:C/\:.*$//}} @if [ -e ${_firmw:C/\:.*$//} ]; then \ ${LD} -b binary --no-warn-mismatch ${LDFLAGS} \ -r -d -o ${.TARGET} ${_firmw:C/\:.*$//}; \ else \ ln -s ${.ALLSRC:M*${_firmw:C/\:.*$//}} ${_firmw:C/\:.*$//}; \ ${LD} -b binary --no-warn-mismatch ${LDFLAGS} \ -r -d -o ${.TARGET} ${_firmw:C/\:.*$//}; \ rm ${_firmw:C/\:.*$//}; \ fi OBJS+= ${_firmw:C/\:.*$/.fwo/} .endfor .endif OBJS+= ${SRCS:N*.h:R:S/$/.o/g} .if !defined(PROG) PROG= ${KMOD}.ko .endif .if !defined(DEBUG_FLAGS) FULLPROG= ${PROG} .else FULLPROG= ${PROG}.debug ${PROG}: ${FULLPROG} ${PROG}.symbols ${OBJCOPY} --strip-debug --add-gnu-debuglink=${PROG}.symbols\ ${FULLPROG} ${.TARGET} ${PROG}.symbols: ${FULLPROG} ${OBJCOPY} --only-keep-debug ${FULLPROG} ${.TARGET} .endif .if ${__KLD_SHARED} == yes ${FULLPROG}: ${KMOD}.kld ${LD} -Bshareable ${LDFLAGS} -o ${.TARGET} ${KMOD}.kld .if !defined(DEBUG_FLAGS) ${OBJCOPY} --strip-debug ${.TARGET} .endif .endif EXPORT_SYMS?= NO .if ${EXPORT_SYMS} != YES CLEANFILES+= export_syms .endif .if ${__KLD_SHARED} == yes ${KMOD}.kld: ${OBJS} .else ${FULLPROG}: ${OBJS} .endif ${LD} ${LDFLAGS} -r -d -o ${.TARGET} ${OBJS} .if defined(MK_CTF) && ${MK_CTF} != "no" ${CTFMERGE} ${CTFFLAGS} -o ${.TARGET} ${OBJS} .endif .if defined(EXPORT_SYMS) .if ${EXPORT_SYMS} != YES .if ${EXPORT_SYMS} == NO :> export_syms .elif !exists(${.CURDIR}/${EXPORT_SYMS}) echo ${EXPORT_SYMS} > export_syms .else grep -v '^#' < ${EXPORT_SYMS} > export_syms .endif awk -f ${SYSDIR}/conf/kmod_syms.awk ${.TARGET} \ export_syms | xargs -J% ${OBJCOPY} % ${.TARGET} .endif .endif .if !defined(DEBUG_FLAGS) && ${__KLD_SHARED} == no ${OBJCOPY} --strip-debug ${.TARGET} .endif _ILINKS=@ machine .if ${MACHINE} != ${MACHINE_CPUARCH} _ILINKS+=${MACHINE_CPUARCH} .endif .if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64" _ILINKS+=x86 .endif all: objwarn ${PROG} beforedepend: ${_ILINKS} # Ensure that the links exist without depending on it when it exists which # causes all the modules to be rebuilt when the directory pointed to changes. .for _link in ${_ILINKS} .if !exists(${.OBJDIR}/${_link}) ${OBJS}: ${.OBJDIR}/${_link} .endif .endfor # Search for kernel source tree in standard places. .for _dir in ${.CURDIR}/../.. ${.CURDIR}/../../.. /sys /usr/src/sys .if !defined(SYSDIR) && exists(${_dir}/kern/) SYSDIR= ${_dir} .endif .endfor .if !defined(SYSDIR) || !exists(${SYSDIR}/kern/) .error "can't find kernel source tree" .endif .for _link in ${_ILINKS} .PHONY: ${_link} ${_link}: ${.OBJDIR}/${_link} ${.OBJDIR}/${_link}: @case ${.TARGET:T} in \ machine) \ path=${SYSDIR}/${MACHINE}/include ;; \ @) \ path=${SYSDIR} ;; \ *) \ path=${SYSDIR}/${.TARGET:T}/include ;; \ esac ; \ path=`(cd $$path && /bin/pwd)` ; \ ${ECHO} ${.TARGET:T} "->" $$path ; \ ln -sf $$path ${.TARGET:T} .endfor CLEANFILES+= ${PROG} ${KMOD}.kld ${OBJS} .if defined(DEBUG_FLAGS) CLEANFILES+= ${FULLPROG} ${PROG}.symbols .endif .if !target(install) _INSTALLFLAGS:= ${INSTALLFLAGS} .for ie in ${INSTALLFLAGS_EDIT} _INSTALLFLAGS:= ${_INSTALLFLAGS${ie}} .endfor .if !target(realinstall) realinstall: _kmodinstall .ORDER: beforeinstall _kmodinstall _kmodinstall: ${INSTALL} -o ${KMODOWN} -g ${KMODGRP} -m ${KMODMODE} \ ${_INSTALLFLAGS} ${PROG} ${DESTDIR}${KMODDIR} .if defined(DEBUG_FLAGS) && !defined(INSTALL_NODEBUG) && ${MK_KERNEL_SYMBOLS} != "no" ${INSTALL} -o ${KMODOWN} -g ${KMODGRP} -m ${KMODMODE} \ ${_INSTALLFLAGS} ${PROG}.symbols ${DESTDIR}${KMODDIR} .endif .include .if !defined(NO_XREF) afterinstall: _kldxref .ORDER: realinstall _kldxref .ORDER: _installlinks _kldxref _kldxref: @if type kldxref >/dev/null 2>&1; then \ ${ECHO} kldxref ${DESTDIR}${KMODDIR}; \ kldxref ${DESTDIR}${KMODDIR}; \ fi .endif .endif # !target(realinstall) .endif # !target(install) .if !target(load) load: ${PROG} ${KMODLOAD} -v ${.OBJDIR}/${PROG} .endif .if !target(unload) unload: ${KMODUNLOAD} -v ${PROG} .endif .if defined(KERNBUILDDIR) .PATH: ${KERNBUILDDIR} CFLAGS+= -I${KERNBUILDDIR} .for _src in ${SRCS:Mopt_*.h} CLEANFILES+= ${_src} .if !target(${_src}) ${_src}: ln -sf ${KERNBUILDDIR}/${_src} ${.TARGET} .endif .endfor .else .for _src in ${SRCS:Mopt_*.h} CLEANFILES+= ${_src} .if !target(${_src}) ${_src}: :> ${.TARGET} .endif .endfor .endif # Respect configuration-specific C flags. CFLAGS+= ${CONF_CFLAGS} MFILES?= dev/acpica/acpi_if.m dev/acpi_support/acpi_wmi_if.m \ dev/agp/agp_if.m dev/ata/ata_if.m dev/eisa/eisa_if.m \ dev/fb/fb_if.m dev/gpio/gpio_if.m dev/gpio/gpiobus_if.m \ dev/iicbus/iicbb_if.m dev/iicbus/iicbus_if.m \ dev/mmc/mmcbr_if.m dev/mmc/mmcbus_if.m \ dev/mii/miibus_if.m dev/mvs/mvs_if.m dev/ofw/ofw_bus_if.m \ dev/pccard/card_if.m dev/pccard/power_if.m dev/pci/pci_if.m \ dev/pci/pcib_if.m dev/ppbus/ppbus_if.m \ dev/sdhci/sdhci_if.m dev/smbus/smbus_if.m dev/spibus/spibus_if.m \ dev/sound/pci/hda/hdac_if.m \ dev/sound/pcm/ac97_if.m dev/sound/pcm/channel_if.m \ dev/sound/pcm/feeder_if.m dev/sound/pcm/mixer_if.m \ dev/sound/midi/mpu_if.m dev/sound/midi/mpufoi_if.m \ dev/sound/midi/synth_if.m dev/usb/usb_if.m isa/isa_if.m \ kern/bus_if.m kern/clock_if.m \ kern/cpufreq_if.m kern/device_if.m kern/serdev_if.m \ libkern/iconv_converter_if.m opencrypto/cryptodev_if.m \ pc98/pc98/canbus_if.m dev/etherswitch/mdio_if.m .for _srcsrc in ${MFILES} .for _ext in c h .for _src in ${SRCS:M${_srcsrc:T:R}.${_ext}} CLEANFILES+= ${_src} .if !target(${_src}) .if !exists(@) ${_src}: @ .else ${_src}: @/tools/makeobjops.awk @/${_srcsrc} .endif ${AWK} -f @/tools/makeobjops.awk @/${_srcsrc} -${_ext} .endif .endfor # _src .endfor # _ext .endfor # _srcsrc .if !empty(SRCS:Mvnode_if.c) CLEANFILES+= vnode_if.c .if !exists(@) vnode_if.c: @ .else vnode_if.c: @/tools/vnode_if.awk @/kern/vnode_if.src .endif ${AWK} -f @/tools/vnode_if.awk @/kern/vnode_if.src -c .endif .if !empty(SRCS:Mvnode_if.h) CLEANFILES+= vnode_if.h vnode_if_newproto.h vnode_if_typedef.h .if !exists(@) vnode_if.h vnode_if_newproto.h vnode_if_typedef.h: @ .else vnode_if.h vnode_if_newproto.h vnode_if_typedef.h: @/tools/vnode_if.awk \ @/kern/vnode_if.src .endif vnode_if.h: vnode_if_newproto.h vnode_if_typedef.h ${AWK} -f @/tools/vnode_if.awk @/kern/vnode_if.src -h vnode_if_newproto.h: ${AWK} -f @/tools/vnode_if.awk @/kern/vnode_if.src -p vnode_if_typedef.h: ${AWK} -f @/tools/vnode_if.awk @/kern/vnode_if.src -q .endif .for _i in mii pccard .if !empty(SRCS:M${_i}devs.h) CLEANFILES+= ${_i}devs.h .if !exists(@) ${_i}devs.h: @ .else ${_i}devs.h: @/tools/${_i}devs2h.awk @/dev/${_i}/${_i}devs .endif ${AWK} -f @/tools/${_i}devs2h.awk @/dev/${_i}/${_i}devs .endif .endfor # _i .if !empty(SRCS:Musbdevs.h) CLEANFILES+= usbdevs.h .if !exists(@) usbdevs.h: @ .else usbdevs.h: @/tools/usbdevs2h.awk @/dev/usb/usbdevs .endif ${AWK} -f @/tools/usbdevs2h.awk @/dev/usb/usbdevs -h .endif .if !empty(SRCS:Musbdevs_data.h) CLEANFILES+= usbdevs_data.h .if !exists(@) usbdevs_data.h: @ .else usbdevs_data.h: @/tools/usbdevs2h.awk @/dev/usb/usbdevs .endif ${AWK} -f @/tools/usbdevs2h.awk @/dev/usb/usbdevs -d .endif .if !empty(SRCS:Macpi_quirks.h) CLEANFILES+= acpi_quirks.h .if !exists(@) acpi_quirks.h: @ .else acpi_quirks.h: @/tools/acpi_quirks2h.awk @/dev/acpica/acpi_quirks .endif ${AWK} -f @/tools/acpi_quirks2h.awk @/dev/acpica/acpi_quirks .endif .if !empty(SRCS:Massym.s) CLEANFILES+= assym.s genassym.o assym.s: genassym.o .if defined(KERNBUILDDIR) genassym.o: opt_global.h .endif .if !exists(@) assym.s: @ .else assym.s: @/kern/genassym.sh .endif sh @/kern/genassym.sh genassym.o > ${.TARGET} .if exists(@) genassym.o: @/${MACHINE_CPUARCH}/${MACHINE_CPUARCH}/genassym.c .endif genassym.o: @ machine ${SRCS:Mopt_*.h} ${CC} -c ${CFLAGS:N-fno-common} \ @/${MACHINE_CPUARCH}/${MACHINE_CPUARCH}/genassym.c .endif lint: ${SRCS} ${LINT} ${LINTKERNFLAGS} ${CFLAGS:M-[DILU]*} ${.ALLSRC:M*.c} .if defined(KERNBUILDDIR) ${OBJS}: opt_global.h .endif .include cleandepend: cleanilinks # .depend needs include links so we remove them only together. cleanilinks: rm -f ${_ILINKS} .if !exists(${.OBJDIR}/${DEPENDFILE}) ${OBJS}: ${SRCS:M*.h} .endif .include .include "kern.mk" Index: stable/10 =================================================================== --- stable/10 (revision 278651) +++ stable/10 (revision 278652) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r276525,276596