Index: head/lib/libc/powerpc/SYS.h =================================================================== --- head/lib/libc/powerpc/SYS.h (revision 368353) +++ head/lib/libc/powerpc/SYS.h (revision 368354) @@ -1,71 +1,73 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2002 Benno Rice. All rights reserved. * Copyright (c) 2002 David E. O'Brien. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $NetBSD: SYS.h,v 1.8 2002/01/14 00:55:56 thorpej Exp $ * $FreeBSD$ */ #include #include #define _SYSCALL(name) \ .text; \ .align 2; \ li 0,(SYS_##name); \ sc #define SYSCALL(name) \ .text; \ .align 2; \ 2: b CNAME(HIDENAME(cerror)); \ ENTRY(__sys_##name); \ WEAK_REFERENCE(__sys_##name, name); \ WEAK_REFERENCE(__sys_##name, _##name); \ _SYSCALL(name); \ bso 2b #define PSEUDO(name) \ .text; \ .align 2; \ ENTRY(__sys_##name); \ WEAK_REFERENCE(__sys_##name, _##name); \ _SYSCALL(name); \ bnslr; \ - b CNAME(HIDENAME(cerror)) + b CNAME(HIDENAME(cerror)); \ +END(__sys_##name) #define RSYSCALL(name) \ .text; \ .align 2; \ ENTRY(__sys_##name); \ WEAK_REFERENCE(__sys_##name, name); \ WEAK_REFERENCE(__sys_##name, _##name); \ _SYSCALL(name); \ bnslr; \ - b CNAME(HIDENAME(cerror)) + b CNAME(HIDENAME(cerror)); \ +END(__sys_##name) Index: head/lib/libc/powerpc64/SYS.h =================================================================== --- head/lib/libc/powerpc64/SYS.h (revision 368353) +++ head/lib/libc/powerpc64/SYS.h (revision 368354) @@ -1,96 +1,98 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2002 Benno Rice. All rights reserved. * Copyright (c) 2002 David E. O'Brien. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $NetBSD: SYS.h,v 1.8 2002/01/14 00:55:56 thorpej Exp $ * $FreeBSD$ */ #include #include #define _SYSCALL(name) \ .text; \ .align 2; \ li 0,(SYS_##name); \ sc #define SYSCALL(name) \ .text; \ .align 2; \ 2: mflr %r0; \ std %r0,16(%r1); \ stdu %r1,-48(%r1); \ bl CNAME(HIDENAME(cerror)); \ nop; \ addi %r1,%r1,48; \ ld %r0,16(%r1); \ mtlr %r0; \ blr; \ ENTRY(__sys_##name); \ WEAK_REFERENCE(__sys_##name, name); \ WEAK_REFERENCE(__sys_##name, _##name); \ _SYSCALL(name); \ bso 2b #define PSEUDO(name) \ .text; \ .align 2; \ ENTRY(__sys_##name); \ WEAK_REFERENCE(__sys_##name, _##name); \ _SYSCALL(name); \ bnslr; \ mflr %r0; \ std %r0,16(%r1); \ stdu %r1,-48(%r1); \ bl CNAME(HIDENAME(cerror)); \ nop; \ addi %r1,%r1,48; \ ld %r0,16(%r1); \ mtlr %r0; \ - blr; + blr; \ +END(__sys_##name) #define RSYSCALL(name) \ .text; \ .align 2; \ ENTRY(__sys_##name); \ WEAK_REFERENCE(__sys_##name, name); \ WEAK_REFERENCE(__sys_##name, _##name); \ _SYSCALL(name); \ bnslr; \ \ mflr %r0; \ std %r0,16(%r1); \ stdu %r1,-48(%r1); \ bl CNAME(HIDENAME(cerror)); \ nop; \ addi %r1,%r1,48; \ ld %r0,16(%r1); \ mtlr %r0; \ - blr; + blr; \ +END(__sys_##name) Index: head/lib/libc/powerpc64/sys/cerror.S =================================================================== --- head/lib/libc/powerpc64/sys/cerror.S (revision 368353) +++ head/lib/libc/powerpc64/sys/cerror.S (revision 368354) @@ -1,60 +1,61 @@ /*- * Copyright (c) 2002 Peter Grehan. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $NetBSD: cerror.S,v 1.5 2000/01/27 14:58:48 kleink Exp $ */ #include __FBSDID("$FreeBSD$"); #include "SYS.h" .globl HIDENAME(cerror) .globl CNAME(__error) /* * The __error() function is thread aware. For non-threaded * programs and the initial threaded in threaded programs, * it returns a pointer to the global errno variable. */ ENTRY_NOPROF(HIDENAME(cerror)) mflr %r0 std %r0,16(%r1) /* save lr */ stdu %r1,-64(%r1) /* allocate new stack frame */ std %r31,48(%r1) mr %r31,%r3 /* stash errval in callee-saved register */ bl CNAME(__error) nop stw %r31,0(%r3) /* store errval into &errno */ ld %r31,48(%r1) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 li %r3,-1 li %r4,-1 blr +END(HIDENAME(cerror)) .section .note.GNU-stack,"",%progbits Index: head/lib/libthr/arch/i386/i386/_umtx_op_err.S =================================================================== --- head/lib/libthr/arch/i386/i386/_umtx_op_err.S (revision 368353) +++ head/lib/libthr/arch/i386/i386/_umtx_op_err.S (revision 368354) @@ -1,38 +1,41 @@ /*- * Copyright (C) 2008 David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #define SYSCALL_ERR(x) \ ENTRY(__CONCAT(x, _err)); \ - mov __CONCAT($SYS_,x),%eax; int $0x80; ret + mov __CONCAT($SYS_,x),%eax; \ + int $0x80; \ + ret; \ + END(__CONCAT(x, _err)) SYSCALL_ERR(_umtx_op) .section .note.GNU-stack,"",%progbits Index: head/lib/msun/i387/e_logf.S =================================================================== --- head/lib/msun/i387/e_logf.S (revision 368353) +++ head/lib/msun/i387/e_logf.S (revision 368354) @@ -1,17 +1,18 @@ /* * Written by J.T. Conklin . * Public domain. */ #include __FBSDID("$FreeBSD$"); /* RCSID("$NetBSD: e_logf.S,v 1.2 1996/07/06 00:15:45 jtc Exp $") */ ENTRY(logf) fldln2 flds 4(%esp) fyl2x ret +END(logf) .section .note.GNU-stack,"",%progbits Index: head/lib/msun/i387/e_remainderl.S =================================================================== --- head/lib/msun/i387/e_remainderl.S (revision 368353) +++ head/lib/msun/i387/e_remainderl.S (revision 368354) @@ -1,50 +1,51 @@ /* * Copyright (c) 1993,94 Winning Strategies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Winning Strategies, Inc. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Written by: * J.T. Conklin (jtc@wimsey.com), Winning Strategies, Inc. */ #include __FBSDID("$FreeBSD$") ENTRY(remainderl) fldt 16(%esp) fldt 4(%esp) 1: fprem1 fstsw %ax sahf jp 1b fstp %st(1) ret +END(remainderl) .section .note.GNU-stack,"",%progbits Index: head/lib/msun/i387/e_sqrtl.S =================================================================== --- head/lib/msun/i387/e_sqrtl.S (revision 368353) +++ head/lib/msun/i387/e_sqrtl.S (revision 368354) @@ -1,44 +1,45 @@ /* * Copyright (c) 1993,94 Winning Strategies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Winning Strategies, Inc. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Written by: * J.T. Conklin (jtc@wimsey.com), Winning Strategies, Inc. */ #include __FBSDID("$FreeBSD$") ENTRY(sqrtl) fldt 4(%esp) fsqrt ret +END(sqrtl) .section .note.GNU-stack,"",%progbits Index: head/lib/msun/i387/s_llrintl.S =================================================================== --- head/lib/msun/i387/s_llrintl.S (revision 368353) +++ head/lib/msun/i387/s_llrintl.S (revision 368354) @@ -1,38 +1,39 @@ /*- * Copyright (c) 2005 David Schultz * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); ENTRY(llrintl) fldt 4(%esp) subl $8,%esp fistpll (%esp) popl %eax popl %edx ret +END(llrintl) .section .note.GNU-stack,"",%progbits Index: head/lib/msun/i387/s_logbl.S =================================================================== --- head/lib/msun/i387/s_logbl.S (revision 368353) +++ head/lib/msun/i387/s_logbl.S (revision 368354) @@ -1,45 +1,46 @@ /* * Copyright (c) 1993,94 Winning Strategies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Winning Strategies, Inc. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Written by: * J.T. Conklin (jtc@wimsey.com), Winning Strategies, Inc. */ #include __FBSDID("$FreeBSD$") ENTRY(logbl) fldt 4(%esp) fxtract fstp %st ret +END(logbl) .section .note.GNU-stack,"",%progbits Index: head/lib/msun/i387/s_lrintl.S =================================================================== --- head/lib/msun/i387/s_lrintl.S (revision 368353) +++ head/lib/msun/i387/s_lrintl.S (revision 368354) @@ -1,37 +1,38 @@ /*- * Copyright (c) 2008 David Schultz * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); ENTRY(lrintl) fldt 4(%esp) subl $4,%esp fistpl (%esp) popl %eax ret +END(lrintl) .section .note.GNU-stack,"",%progbits Index: head/lib/msun/i387/s_remquol.S =================================================================== --- head/lib/msun/i387/s_remquol.S (revision 368353) +++ head/lib/msun/i387/s_remquol.S (revision 368354) @@ -1,65 +1,66 @@ /*- * Copyright (c) 2005-2008 David Schultz * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Based on public-domain remainder routine by J.T. Conklin . */ #include __FBSDID("$FreeBSD$"); ENTRY(remquol) fldt 16(%esp) fldt 4(%esp) 1: fprem1 fstsw %ax sahf jp 1b fstp %st(1) /* Extract the three low-order bits of the quotient from C0,C3,C1. */ shrl $6,%eax movl %eax,%ecx andl $0x108,%eax rorl $7,%eax orl %eax,%ecx roll $4,%eax orl %ecx,%eax andl $7,%eax /* Negate the quotient bits if x*y<0. Avoid using an unpredictable branch. */ movl 24(%esp),%ecx xorl 12(%esp),%ecx movsx %cx,%ecx sarl $16,%ecx sarl $16,%ecx xorl %ecx,%eax andl $1,%ecx addl %ecx,%eax /* Store the quotient and return. */ movl 28(%esp),%ecx movl %eax,(%ecx) ret +END(remquol) .section .note.GNU-stack,"",%progbits Index: head/lib/msun/i387/s_rintl.S =================================================================== --- head/lib/msun/i387/s_rintl.S (revision 368353) +++ head/lib/msun/i387/s_rintl.S (revision 368354) @@ -1,44 +1,45 @@ /* * Copyright (c) 1993,94 Winning Strategies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Winning Strategies, Inc. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Written by: * J.T. Conklin (jtc@wimsey.com), Winning Strategies, Inc. */ #include __FBSDID("$FreeBSD$") ENTRY(rintl) fldt 4(%esp) frndint ret +END(rintl) .section .note.GNU-stack,"",%progbits Index: head/libexec/rtld-elf/aarch64/rtld_start.S =================================================================== --- head/libexec/rtld-elf/aarch64/rtld_start.S (revision 368353) +++ head/libexec/rtld-elf/aarch64/rtld_start.S (revision 368354) @@ -1,262 +1,253 @@ /*- * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); ENTRY(.rtld_start) mov x19, x0 /* Put ps_strings in a callee-saved register */ mov x20, sp /* And the stack pointer */ sub sp, sp, #16 /* Make room for obj_main & exit proc */ mov x1, sp /* exit_proc */ add x2, x1, #8 /* obj_main */ bl _rtld /* Call the loader */ mov x8, x0 /* Backup the entry point */ ldr x2, [sp] /* Load cleanup */ ldr x1, [sp, #8] /* Load obj_main */ mov x0, x19 /* Restore ps_strings */ mov sp, x20 /* Restore the stack pointer */ br x8 /* Jump to the entry point */ END(.rtld_start) /* * sp + 0 = &GOT[x + 3] * sp + 8 = RA * x16 = &GOT[2] * x17 = &_rtld_bind_start */ ENTRY(_rtld_bind_start) - .cfi_startproc mov x17, sp /* Save frame pointer and SP */ stp x29, x30, [sp, #-16]! mov x29, sp .cfi_def_cfa x29, 16 .cfi_offset x30, -8 .cfi_offset x29, -16 /* Save the arguments */ stp x0, x1, [sp, #-16]! stp x2, x3, [sp, #-16]! stp x4, x5, [sp, #-16]! stp x6, x7, [sp, #-16]! stp x8, xzr, [sp, #-16]! /* Save any floating-point arguments */ stp q0, q1, [sp, #-32]! stp q2, q3, [sp, #-32]! stp q4, q5, [sp, #-32]! stp q6, q7, [sp, #-32]! /* Calculate reloff */ ldr x2, [x17, #0] /* Get the address of the entry */ sub x1, x2, x16 /* Find its offset */ sub x1, x1, #8 /* Adjust for x16 not being at offset 0 */ /* Each rela item has 3 entriesso we need reloff = 3 * index */ lsl x3, x1, #1 /* x3 = 2 * offset */ add x1, x1, x3 /* x1 = x3 + offset = 3 * offset */ /* Load obj */ ldr x0, [x16, #-8] /* Call into rtld */ bl _rtld_bind /* Backup the address to branch to */ mov x16, x0 /* restore the arguments */ ldp q6, q7, [sp], #32 ldp q4, q5, [sp], #32 ldp q2, q3, [sp], #32 ldp q0, q1, [sp], #32 ldp x8, xzr, [sp], #16 ldp x6, x7, [sp], #16 ldp x4, x5, [sp], #16 ldp x2, x3, [sp], #16 ldp x0, x1, [sp], #16 /* Restore frame pointer */ ldp x29, xzr, [sp], #16 /* Restore link register saved by the plt code */ ldp xzr, x30, [sp], #16 /* Call into the correct function */ br x16 - .cfi_endproc END(_rtld_bind_start) /* * struct rel_tlsdesc { * uint64_t resolver_fnc; * uint64_t resolver_arg; * * * uint64_t _rtld_tlsdesc_static(struct rel_tlsdesc *); * * Resolver function for TLS symbols resolved at load time */ ENTRY(_rtld_tlsdesc_static) - .cfi_startproc ldr x0, [x0, #8] ret - .cfi_endproc END(_rtld_tlsdesc_static) /* * uint64_t _rtld_tlsdesc_undef(void); * * Resolver function for weak and undefined TLS symbols */ ENTRY(_rtld_tlsdesc_undef) - .cfi_startproc str x1, [sp, #-16]! .cfi_adjust_cfa_offset 16 mrs x1, tpidr_el0 ldr x0, [x0, #8] sub x0, x0, x1 ldr x1, [sp], #16 .cfi_adjust_cfa_offset -16 - .cfi_endproc ret END(_rtld_tlsdesc_undef) /* * uint64_t _rtld_tlsdesc_dynamic(struct rel_tlsdesc *); * * Resolver function for TLS symbols from dlopen() */ ENTRY(_rtld_tlsdesc_dynamic) - .cfi_startproc - /* Save registers used in fast path */ stp x1, x2, [sp, #(-2 * 16)]! stp x3, x4, [sp, #(1 * 16)] .cfi_adjust_cfa_offset 2 * 16 .cfi_rel_offset x1, 0 .cfi_rel_offset x2, 8 .cfi_rel_offset x3, 16 .cfi_rel_offset x4, 24 /* Test fastpath - inlined version of tls_get_addr_common(). */ ldr x1, [x0, #8] /* tlsdesc ptr */ mrs x4, tpidr_el0 ldr x0, [x4] /* DTV pointer */ ldr x2, [x0] /* dtv[0] (generation count) */ ldr x3, [x1] /* tlsdec->dtv_gen */ cmp x2, x3 b.ne 1f /* dtv[0] != tlsdec->dtv_gen */ ldr w2, [x1, #8] /* tlsdec->tls_index */ add w2, w2, #1 ldr x3, [x0, w2, sxtw #3] /* dtv[tlsdesc->tls_index + 1] */ cbz x3, 1f /* Return (dtv[tlsdesc->tls_index + 1] + tlsdesc->tls_offs - tp) */ ldr x2, [x1, #16] /* tlsdec->tls_offs */ add x2, x2, x3 sub x0, x2, x4 /* Restore registers and return */ ldp x3, x4, [sp, #(1 * 16)] ldp x1, x2, [sp], #(2 * 16) .cfi_adjust_cfa_offset -2 * 16 ret /* * Slow path * return( * tls_get_addr_common(tp, tlsdesc->tls_index, tlsdesc->tls_offs)); * */ 1: /* Save all interger registers */ stp x29, x30, [sp, #-(8 * 16)]! .cfi_adjust_cfa_offset 8 * 16 .cfi_rel_offset x29, 0 .cfi_rel_offset x30, 8 mov x29, sp stp x5, x6, [sp, #(1 * 16)] stp x7, x8, [sp, #(2 * 16)] stp x9, x10, [sp, #(3 * 16)] stp x11, x12, [sp, #(4 * 16)] stp x13, x14, [sp, #(5 * 16)] stp x15, x16, [sp, #(6 * 16)] stp x17, x18, [sp, #(7 * 16)] .cfi_rel_offset x5, 16 .cfi_rel_offset x6, 24 .cfi_rel_offset x7, 32 .cfi_rel_offset x8, 40 .cfi_rel_offset x9, 48 .cfi_rel_offset x10, 56 .cfi_rel_offset x11, 64 .cfi_rel_offset x12, 72 .cfi_rel_offset x13, 80 .cfi_rel_offset x14, 88 .cfi_rel_offset x15, 96 .cfi_rel_offset x16, 104 .cfi_rel_offset x17, 112 .cfi_rel_offset x18, 120 /* Find the tls offset */ mov x0, x4 /* tp */ mov x3, x1 /* tlsdesc ptr */ ldr w1, [x3, #8] /* tlsdec->tls_index */ ldr x2, [x3, #16] /* tlsdec->tls_offs */ bl tls_get_addr_common mrs x1, tpidr_el0 sub x0, x0, x1 /* Restore slow patch registers */ ldp x17, x18, [sp, #(7 * 16)] ldp x15, x16, [sp, #(6 * 16)] ldp x13, x14, [sp, #(5 * 16)] ldp x11, x12, [sp, #(4 * 16)] ldp x9, x10, [sp, #(3 * 16)] ldp x7, x8, [sp, #(2 * 16)] ldp x5, x6, [sp, #(1 * 16)] ldp x29, x30, [sp], #(8 * 16) .cfi_adjust_cfa_offset -8 * 16 .cfi_restore x29 .cfi_restore x30 /* Restore fast path registers and return */ ldp x3, x4, [sp, #16] ldp x1, x2, [sp], #(2 * 16) .cfi_adjust_cfa_offset -2 * 16 - .cfi_endproc ret END(_rtld_tlsdesc_dynamic) Index: head/libexec/rtld-elf/powerpc/rtld_start.S =================================================================== --- head/libexec/rtld-elf/powerpc/rtld_start.S (revision 368353) +++ head/libexec/rtld-elf/powerpc/rtld_start.S (revision 368354) @@ -1,315 +1,321 @@ /* $NetBSD: rtld_start.S,v 1.4 2001/09/26 04:06:43 mycroft Exp $ */ /*- * Copyright (C) 1998 Tsubai Masanari * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include /* For SPR_SPEFSCR if needed. */ .extern _GLOBAL_OFFSET_TABLE_ .extern _DYNAMIC _ENTRY(.rtld_start) stwu %r1,-48(%r1) /* 16-byte aligned stack for reg saves + exit_proc & obj _rtld args + backchain & lrsave stack frame */ stw %r3,16(%r1) /* argc */ stw %r4,20(%r1) /* argv */ stw %r5,24(%r1) /* envp */ /* stw %r6,28(%r1) *//* obj (always 0) */ /* stw %r7,32(%r1) *//* cleanup (always 0) */ stw %r8,36(%r1) /* ps_strings */ /* * Perform initial relocation of ld-elf.so. Not as easy as it * sounds. * - perform small forward branch to put PC into link reg * - use link-time constants to determine offset to the * _DYNAMIC section and the GOT. Add these to the PC to * convert to absolute addresses. * - read GOT[0], which is the SVR4 ABI-specified link-time * value of _DYNAMIC. Subtract this value from the absolute * value to determine the load address * - call reloc_non_plt_self() to fix up ld-elf.so's relocations */ bcl 20,31,1f 1: mflr %r30 mr %r3,%r30 # save for _DYNAMIC addis %r30,%r30,_GLOBAL_OFFSET_TABLE_-1b@ha addi %r30,%r30,_GLOBAL_OFFSET_TABLE_-1b@l addis %r3,%r3,_DYNAMIC-1b@ha # get _DYNAMIC actual address addi %r3,%r3,_DYNAMIC-1b@l lwz %r28,0(%r30) # get base-relative &_DYNAMIC sub %r28,%r3,%r28 # r28 = relocbase mr %r4,%r28 # r4 = relocbase bl reloc_non_plt_self /* reloc_non_plt_self(&_DYNAMIC,base) */ /* * The _rtld() function likes to see a stack layout containing * { argc, argv[0], argv[1] ... argv[N], 0, env[0], ... , env[N] } * Since the PowerPC stack was 16-byte aligned at exec time, the * original stack layout has to be found by moving back a word * from the argv pointer. */ lwz %r4,20(%r1) /* restore argv */ addi %r3,%r4,-4 /* locate argc ptr, &argv[-1] */ addi %r4,%r1,8 /* &exit_proc on stack */ addi %r5,%r1,12 /* &obj_main on stack */ bl _rtld /* &_start = _rtld(sp, &exit_proc, &obj_main)*/ mtlr %r3 /* * Restore args, with new obj/exit proc */ lwz %r3,16(%r1) /* argc */ lwz %r4,20(%r1) /* argv */ lwz %r5,24(%r1) /* envp */ lwz %r6,12(%r1) /* obj */ lwz %r7,8(%r1) /* exit proc */ lwz %r8,36(%r1) /* ps_strings */ addi %r1,%r1,48 /* restore original stackptr */ blrl /* _start(argc, argv, envp, obj, cleanup, ps_strings) */ li %r0,1 /* _exit() */ sc +_END(.rtld_start) #ifdef __SPE__ /* stack space for 30 GPRs + SPEFSCR/ACC/lr/cr */ #define NREGS 31 #define GPRWIDTH 8 #define FUDGE 4 /* Fudge factor for alignment */ #else /* stack space for 30 GPRs + lr/cr */ #define NREGS 30 #define GPRWIDTH 4 #define FUDGE 4 #endif /* Stack frame needs the 12-byte ABI frame plus fudge factor. */ #define STACK_SIZE (NREGS * GPRWIDTH + 4 * 2 + 12 + FUDGE) /* * _rtld_bind_secureplt_start() * * Call into the MI binder (Secure-PLT stub). * secure-plt expects %r11 to be the offset to the rela entry. * bss-plt expects %r11 to be index of the rela entry. * So for bss-plt, we multiply the index by 12 to get the offset. */ _ENTRY(_rtld_bind_secureplt_start) stwu %r1,-STACK_SIZE(%r1) #ifdef __SPE__ evstdd %r0,24(%r1) #else stw %r0,20(%r1) # save r0 #endif /* * Instead of division which is costly we will use multiplicative * inverse. a / n = ((a * inv(n)) >> 32) * where inv(n) = (0x100000000 + n - 1) / n */ mr %r0,%r11 lis %r11,0x15555556@h # load multiplicative inverse of 12 ori %r11,%r11,0x15555556@l mulhwu %r11,%r11,%r0 # get high half of multiplication b 1f +_END(_rtld_bind_secureplt_start) /* * _rtld_bind_start() * * Call into the MI binder. This routine is reached via the PLT call cell, * and then _rtld_powerpc_pltresolve(). * On entry, %r11 contains the index of the PLT cell, and %r12 contains * a pointer to the ELF object for the file. * Save all registers, call into the binder to resolve and fixup the external * routine, and then transfer to the external routine on return. */ .globl _rtld_bind _ENTRY(_rtld_bind_start) stwu %r1,-STACK_SIZE(%r1) #ifdef __SPE__ evstdd %r0,24(%r1) #else stw %r0,20(%r1) # save r0 #endif 1: mflr %r0 stw %r0,16(%r1) # save lr mfcr %r0 stw %r0,12(%r1) # save cr #ifdef __SPE__ evstdd %r3, 32(%r1) evstdd %r4, 40(%r1) evstdd %r5, 48(%r1) evstdd %r6, 56(%r1) evstdd %r7, 64(%r1) evstdd %r8, 72(%r1) evstdd %r9, 80(%r1) evstdd %r10, 88(%r1) evstdd %r11, 96(%r1) evstdd %r12, 104(%r1) evstdd %r13, 112(%r1) evstdd %r14, 120(%r1) evstdd %r15, 128(%r1) evstdd %r16, 136(%r1) evstdd %r17, 144(%r1) evstdd %r18, 152(%r1) evstdd %r19, 160(%r1) evstdd %r20, 168(%r1) evstdd %r21, 176(%r1) evstdd %r22, 184(%r1) evstdd %r23, 192(%r1) evstdd %r24, 200(%r1) evstdd %r25, 208(%r1) evstdd %r26, 216(%r1) evstdd %r27, 224(%r1) evstdd %r28, 232(%r1) evstdd %r29, 240(%r1) evstdd %r30, 248(%r1) li %r3, 256 evstddx %r31, %r1, %r3 evxor %r0, %r0, %r0 li %r3, 264 evmwumiaa %r0, %r0, %r0 evstddx %r0, %r1, %r3 mfspr %r3, SPR_SPEFSCR stw %r3, 20(%r1) #else stmw %r3,24(%r1) # save r3-r31 #endif mr %r3,%r12 # obj mulli %r4,%r11,12 # rela index * sizeof(Elf_Rela) bl _rtld_bind # target addr = _rtld_bind(obj, reloff) mtctr %r3 # move absolute target addr into ctr #ifdef __SPE__ lwz %r3, 20(%r1) mtspr SPR_SPEFSCR, %r3 li %r3, 264 evlddx %r0, %r3, %r1 evmra %r0, %r0 evldd %r3, 32(%r1) evldd %r4, 40(%r1) evldd %r5, 48(%r1) evldd %r6, 56(%r1) evldd %r7, 64(%r1) evldd %r8, 72(%r1) evldd %r9, 80(%r1) evldd %r10, 88(%r1) evldd %r11, 96(%r1) evldd %r12, 104(%r1) evldd %r13, 112(%r1) evldd %r14, 120(%r1) evldd %r15, 128(%r1) evldd %r16, 136(%r1) evldd %r17, 144(%r1) evldd %r18, 152(%r1) evldd %r19, 160(%r1) evldd %r20, 168(%r1) evldd %r21, 176(%r1) evldd %r22, 184(%r1) evldd %r23, 192(%r1) evldd %r24, 200(%r1) evldd %r25, 208(%r1) evldd %r26, 216(%r1) evldd %r27, 224(%r1) evldd %r28, 232(%r1) evldd %r29, 240(%r1) evldd %r30, 248(%r1) li %r0, 256 evlddx %r31, %r1, %r0 #else lmw %r3,24(%r1) # restore r3-r31 #endif lwz %r0,12(%r1) # restore cr mtcr %r0 lwz %r0,16(%r1) # restore lr mtlr %r0 #ifdef __SPE__ evldd %r0,24(%r1) #else lwz %r0,20(%r1) # restore r0 #endif addi %r1,%r1,STACK_SIZE # restore stack bctr # jump to target +_END(_rtld_bind_start) /* * _rtld_powerpc_pltresolve() * * This routine is copied into the latter part of the 72-byte reserved * area at the start of the PLT. The absolute address of the _rtld_bind_start * routine, and the ELF object for the loaded file, are inserted into * the code by the reloc.c:init_pltgot() routine. * The first time an external routine is called, the PLT slot will * set up %r11 to the offset of the slot, and will jump to this routine. * The ELF object is shifted into %r11, and _rtld_bind_start is called * to complete the binding. */ _ENTRY(_rtld_powerpc_pltlongresolve) lis %r12,0 # lis 12,jmptab@ha addi %r12,%r12,0 # addi 12,12,jmptab@l subf %r11,%r12,%r11 # reloff li %r12,2 srw %r11,%r11,%r12 # index = reloff/sizeof(Elf_Addr) +_END(_rtld_powerpc_pltlongresolve) _ENTRY(_rtld_powerpc_pltresolve) lis %r12,0 # lis 12,_rtld_bind_start@ha addi %r12,%r12,0 # addi 12,12,_rtld_bind_start@l mtctr %r12 lis %r12,0 # lis 12,obj@ha addi %r12,%r12,0 # addi 12,12,obj@l bctr +_END(_rtld_powerpc_pltresolve) /* * _rtld_powerpc_pltcall() * * This routine is copied into the 72-byte reserved area at the * start of the PLT. The reloc.c:init_pltgot() routine inserts * the absolute address of the jumptable. * Control is transferred to this routine when the binder has * located the external routine, but determined that it is > 32Mb * from the PLT slot. Code is inserted into the PLT slot to set up * %r11 with the jumptable index, and jump to here, where the * absolute address of the external routine is loaded from the * jumptable and transferred to */ _ENTRY(_rtld_powerpc_pltcall) slwi %r11,%r11,2 # jmptab offset = index * 4 addis %r11,%r11,0 # addis 11,11,jmptab@ha lwz %r11,0(%r11) # lwz 11,jmptab@l(11) mtctr %r11 bctr # (*jmptab[index])() +_END(_rtld_powerpc_pltcall) .section .note.GNU-stack,"",%progbits Index: head/libexec/rtld-elf/powerpc64/rtld_start.S =================================================================== --- head/libexec/rtld-elf/powerpc64/rtld_start.S (revision 368353) +++ head/libexec/rtld-elf/powerpc64/rtld_start.S (revision 368354) @@ -1,179 +1,181 @@ /* $NetBSD: rtld_start.S,v 1.4 2001/09/26 04:06:43 mycroft Exp $ */ /*- * Copyright (C) 1998 Tsubai Masanari * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include .extern _GLOBAL_OFFSET_TABLE_ .extern _DYNAMIC _ENTRY(_rtld_start) stdu %r1,-144(%r1) /* 16-byte aligned stack for reg saves + exit_proc & obj _rtld args + backchain & lrsave stack frame */ /* Save and restore only initial argv, because _rtld will modify * argv and envp if invoked explicitly, making it necessary to * load the (possibly) adjusted values from the stack. */ std %r4,104(%r1) /* argv */ /* std %r6,120(%r1) *//* obj (always 0) */ /* std %r7,128(%r1) *//* cleanup (always 0) */ std %r8,136(%r1) /* ps_strings */ /* * Perform initial relocation of ld-elf.so. Not as easy as it * sounds. * - perform small forward branch to put PC into link reg * - use link-time constants to determine offset to the * _DYNAMIC section and the GOT. Add these to the PC to * convert to absolute addresses. * - call reloc_non_plt_self() to fix up ld-elf.so's relocations */ bl 1f .llong _DYNAMIC-. 1: mflr %r3 /* PC value at .llong */ ld %r4,0(%r3) /* offset to _DYNAMIC */ add %r3,%r4,%r3 /* r3 = &_DYNAMIC, absolute value */ ld %r4,-0x8000(%r2) /* First TOC entry is TOC base */ subf %r4,%r4,%r2 /* Subtract from real TOC base to get base */ bl reloc_non_plt_self /* reloc_non_plt_self(&_DYNAMIC,base) */ nop /* * The _rtld() function likes to see a stack layout containing * { argc, argv[0], argv[1] ... argv[N], 0, env[0], ... , env[N] } * Since the PowerPC stack was 16-byte aligned at exec time, the * original stack layout has to be found by moving back a word * from the argv pointer. */ ld %r4,104(%r1) addi %r3,%r4,-8 /* locate argc ptr, &argv[-1] */ addi %r4,%r1,128 /* &exit_proc on stack */ addi %r5,%r1,120 /* &obj_main on stack */ bl _rtld /* &_start = _rtld(sp, &exit_proc, &obj_main)*/ nop #if !defined(_CALL_ELF) || _CALL_ELF == 1 ld %r2,8(%r3) ld %r11,16(%r3) ld %r3,0(%r3) #else mr %r12,%r3 #endif mtlr %r3 /* * Restore args, with new obj/exit proc */ ld %r4,104(%r1) /* argv */ ld %r3,-8(%r4) /* argc */ /* envp = argv + argc + 1 */ addi %r5,%r3,1 sldi %r5,%r5,3 /* x8 */ add %r5,%r4,%r5 ld %r6,120(%r1) /* obj */ ld %r7,128(%r1) /* exit proc */ ld %r8,136(%r1) /* ps_strings */ blrl /* _start(argc, argv, envp, obj, cleanup, ps_strings) */ li %r0,1 /* _exit() */ sc +_END(_rtld_start) /* * _rtld_bind_start() * * Call into the MI binder. This routine is reached via the PLT call cell * * On entry, %r11 contains an object pointer and %r0 contains the PLT index. * * Save all registers, call into the binder to resolve and fixup the external * routine, and then transfer to the external routine on return. */ .globl _rtld_bind _ENTRY(_rtld_bind_start) mr %r12,%r0 # save r0 (index) immediately to r12 mflr %r0 std %r0,16(%r1) # save lr mfcr %r0 std %r0,8(%r1) # save cr stdu %r1,-48-12*8(%r1) # stack space for 8 regs + header # + 2 save regs std %r3,64+0*8(%r1) # save r3-r10 (arguments) std %r4,64+1*8(%r1) std %r5,64+2*8(%r1) std %r6,64+3*8(%r1) std %r7,64+4*8(%r1) std %r8,64+5*8(%r1) std %r9,64+6*8(%r1) std %r10,64+7*8(%r1) mr %r3,%r11 mulli %r4,%r12,24 # Multiply index by sizeof(Elf_Rela) bl _rtld_bind # target addr = _rtld_bind(obj, reloff) nop #if !defined(_CALL_ELF) || _CALL_ELF == 1 ld %r2,8(%r3) ld %r11,16(%r3) ld %r3,0(%r3) #else mr %r12,%r3 #endif mtctr %r3 # move absolute target addr into ctr ld %r3,64+0*8(%r1) # restore r3-r10 ld %r4,64+1*8(%r1) ld %r5,64+2*8(%r1) ld %r6,64+3*8(%r1) ld %r7,64+4*8(%r1) ld %r8,64+5*8(%r1) ld %r9,64+6*8(%r1) ld %r10,64+7*8(%r1) ld %r1,0(%r1) # restore stack ld %r0,8(%r1) # restore cr mtcr %r0 ld %r0,16(%r1) # restore lr mtlr %r0 bctr # jump to target +_END(_rtld_bind_start) .section .note.GNU-stack,"",%progbits Index: head/stand/libsa/powerpc/_setjmp.S =================================================================== --- head/stand/libsa/powerpc/_setjmp.S (revision 368353) +++ head/stand/libsa/powerpc/_setjmp.S (revision 368354) @@ -1,115 +1,117 @@ /* $FreeBSD$ */ /* from: NetBSD: setjmp.S,v 1.1 1998/01/27 15:13:12 sakamoto Exp $ */ /* from: OpenBSD: setjmp.S,v 1.2 1996/12/28 06:22:18 rahnds Exp */ /* kernel version of this file, does not have signal goop */ /* int setjmp(jmp_buf env) */ #include #ifdef __powerpc64__ #define LD_REG ld #define ST_REG std #define REGWIDTH 8 #else #define LD_REG lwz #define ST_REG stw #define REGWIDTH 4 #endif #define JMP_r1 1*REGWIDTH #define JMP_r2 2*REGWIDTH #define JMP_r14 3*REGWIDTH #define JMP_r15 4*REGWIDTH #define JMP_r16 5*REGWIDTH #define JMP_r17 6*REGWIDTH #define JMP_r18 7*REGWIDTH #define JMP_r19 8*REGWIDTH #define JMP_r20 9*REGWIDTH #define JMP_r21 10*REGWIDTH #define JMP_r22 11*REGWIDTH #define JMP_r23 12*REGWIDTH #define JMP_r24 13*REGWIDTH #define JMP_r25 14*REGWIDTH #define JMP_r26 15*REGWIDTH #define JMP_r27 16*REGWIDTH #define JMP_r28 17*REGWIDTH #define JMP_r29 18*REGWIDTH #define JMP_r30 19*REGWIDTH #define JMP_r31 20*REGWIDTH #define JMP_lr 21*REGWIDTH #define JMP_cr 22*REGWIDTH #define JMP_ctr 23*REGWIDTH #define JMP_xer 24*REGWIDTH #define JMP_sig 25*REGWIDTH ASENTRY_NOPROF(_setjmp) ST_REG 31, JMP_r31(3) /* r1, r2, r14-r30 */ ST_REG 1, JMP_r1 (3) ST_REG 2, JMP_r2 (3) ST_REG 14, JMP_r14(3) ST_REG 15, JMP_r15(3) ST_REG 16, JMP_r16(3) ST_REG 17, JMP_r17(3) ST_REG 18, JMP_r18(3) ST_REG 19, JMP_r19(3) ST_REG 20, JMP_r20(3) ST_REG 21, JMP_r21(3) ST_REG 22, JMP_r22(3) ST_REG 23, JMP_r23(3) ST_REG 24, JMP_r24(3) ST_REG 25, JMP_r25(3) ST_REG 26, JMP_r26(3) ST_REG 27, JMP_r27(3) ST_REG 28, JMP_r28(3) ST_REG 29, JMP_r29(3) ST_REG 30, JMP_r30(3) /* cr, lr, ctr, xer */ mfcr 0 ST_REG 0, JMP_cr(3) mflr 0 ST_REG 0, JMP_lr(3) mfctr 0 ST_REG 0, JMP_ctr(3) mfxer 0 ST_REG 0, JMP_xer(3) /* f14-f31, fpscr */ li 3, 0 blr +ASEND(_setjmp) .extern sigsetmask ASENTRY_NOPROF(_longjmp) LD_REG 31, JMP_r31(3) /* r1, r2, r14-r30 */ LD_REG 1, JMP_r1 (3) LD_REG 2, JMP_r2 (3) LD_REG 14, JMP_r14(3) LD_REG 15, JMP_r15(3) LD_REG 16, JMP_r16(3) LD_REG 17, JMP_r17(3) LD_REG 18, JMP_r18(3) LD_REG 19, JMP_r19(3) LD_REG 20, JMP_r20(3) LD_REG 21, JMP_r21(3) LD_REG 22, JMP_r22(3) LD_REG 23, JMP_r23(3) LD_REG 24, JMP_r24(3) LD_REG 25, JMP_r25(3) LD_REG 26, JMP_r26(3) LD_REG 27, JMP_r27(3) LD_REG 28, JMP_r28(3) LD_REG 29, JMP_r29(3) LD_REG 30, JMP_r30(3) /* cr, lr, ctr, xer */ LD_REG 0, JMP_cr(3) mtcr 0 LD_REG 0, JMP_lr(3) mtlr 0 LD_REG 0, JMP_ctr(3) mtctr 0 LD_REG 0, JMP_xer(3) mtxer 0 /* f14-f31, fpscr */ mr 3, 4 blr +ASEND(_longjmp) Index: head/stand/powerpc/kboot/host_syscall.S =================================================================== --- head/stand/powerpc/kboot/host_syscall.S (revision 368353) +++ head/stand/powerpc/kboot/host_syscall.S (revision 368354) @@ -1,84 +1,97 @@ /* * * $FreeBSD$ */ #include ENTRY(host_read) li %r0, 3 # SYS_read sc bso 1f blr 1: li %r3, 0 blr +END(host_read) ENTRY(host_write) li %r0, 4 # SYS_write sc blr +END(host_write) ENTRY(host_seek) mr %r4,%r5 mr %r5,%r6 mr %r6,%r7 li %r0, 140 # SYS_llseek sc blr +END(host_seek) ENTRY(host_llseek) li %r0, 140 # SYS_llseek sc blr +END(host_llseek) ENTRY(host_open) li %r0, 5 # SYS_open sc bso 1f blr 1: li %r3, 0 blr +END(host_open) ENTRY(host_close) li %r0, 6 # SYS_close sc blr +END(host_close) ENTRY(host_mmap) li %r0, 90 # SYS_mmap sc blr +END(host_mmap) ENTRY(host_uname) li %r0, 122 # SYS_uname sc blr +END(host_uname) ENTRY(host_gettimeofday) li %r0, 78 # SYS_gettimeofday sc blr +END(host_gettimeofday) ENTRY(host_select) li %r0, 142 # SYS_select sc blr +END(host_select) ENTRY(kexec_load) lis %r6,21 # KEXEC_ARCH_PPC64 li %r0,268 # __NR_kexec_load sc blr +END(kexec_load) ENTRY(host_reboot) li %r0,88 # SYS_reboot sc blr +END(host_reboot) ENTRY(host_getdents) li %r0,141 # SYS_getdents sc blr +END(host_getdents) Index: head/stand/powerpc/uboot/start.S =================================================================== --- head/stand/powerpc/uboot/start.S (revision 368353) +++ head/stand/powerpc/uboot/start.S (revision 368354) @@ -1,99 +1,100 @@ /*- * Copyright (c) 2007 Semihalf, Rafal Jaworowski * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include /* * Entry point to the loader that U-Boot passes control to. */ .text .globl _start _start: /* Hint where to look for the API signature */ lis %r11, uboot_address@ha addi %r11, %r11, uboot_address@l stw %r1, 0(%r11) /* Save U-Boot's r14 and r30 */ lis %r11, saved_regs@ha addi %r11, %r11, saved_regs@l stw %r14, 0(%r11) stw %r30, 4(%r11) /* Disable interrupts */ mfmsr %r11 andi. %r11, %r11, ~0x8000@l mtmsr %r11 b main /* * syscall() */ ENTRY(syscall) stwu %r1, -32(%r1) mflr %r0 stw %r14, 8(%r1) stw %r30, 12(%r1) stw %r0, 36(%r1) /* Restore U-Boot's r14 and r30 */ lis %r11, saved_regs@ha addi %r11, %r11, saved_regs@l lwz %r14, 0(%r11) lwz %r30, 4(%r11) /* Enable interrupts */ mfmsr %r11 ori %r11, %r11, 0x8000@l mtmsr %r11 /* Call into U-Boot */ lis %r11, syscall_ptr@ha addi %r11, %r11, syscall_ptr@l lwz %r11, 0(%r11) mtctr %r11 bctrl /* Disable interrupts */ mfmsr %r11 andi. %r11, %r11, ~0x8000@l mtmsr %r11 /* Epilogue */ lwz %r11, 0(%r1) lwz %r0, 4(%r11) mtlr %r0 lwz %r14, 8(%r1) lwz %r30, 12(%r1) mr %r1, %r11 blr +END(syscall) /* * Data section */ .data GLOBAL(syscall_ptr) .long 0 GLOBAL(saved_regs) .long 0 /* R14 */ .long 0 /* R30 */ GLOBAL(uboot_address) .long 0 Index: head/sys/arm64/include/asm.h =================================================================== --- head/sys/arm64/include/asm.h (revision 368353) +++ head/sys/arm64/include/asm.h (revision 368354) @@ -1,105 +1,106 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ASM_H_ #define _MACHINE_ASM_H_ #undef __FBSDID #if !defined(lint) && !defined(STRIP_FBSDID) #define __FBSDID(s) .ident s #else #define __FBSDID(s) /* nothing */ #endif #define _C_LABEL(x) x #define ENTRY(sym) \ - .text; .globl sym; .align 2; .type sym,#function; sym: + .text; .globl sym; .align 2; .type sym,#function; sym: \ + .cfi_startproc #define EENTRY(sym) \ .globl sym; sym: -#define END(sym) .size sym, . - sym +#define END(sym) .cfi_endproc; .size sym, . - sym #define EEND(sym) #define WEAK_REFERENCE(sym, alias) \ .weak alias; \ .set alias,sym #define UINT64_C(x) (x) #if defined(PIC) #define PIC_SYM(x,y) x ## @ ## y #else #define PIC_SYM(x,y) x #endif /* Alias for link register x30 */ #define lr x30 /* * Sets the trap fault handler. The exception handler will return to the * address in the handler register on a data abort or the xzr register to * clear the handler. The tmp parameter should be a register able to hold * the temporary data. */ #define SET_FAULT_HANDLER(handler, tmp) \ ldr tmp, [x18, #PC_CURTHREAD]; /* Load curthread */ \ ldr tmp, [tmp, #TD_PCB]; /* Load the pcb */ \ str handler, [tmp, #PCB_ONFAULT] /* Set the handler */ #define ENTER_USER_ACCESS(reg, tmp) \ ldr tmp, =has_pan; /* Get the addr of has_pan */ \ ldr reg, [tmp]; /* Read it */ \ cbz reg, 997f; /* If no PAN skip */ \ .inst 0xd500409f | (0 << 8); /* Clear PAN */ \ 997: #define EXIT_USER_ACCESS(reg) \ cbz reg, 998f; /* If no PAN skip */ \ .inst 0xd500409f | (1 << 8); /* Set PAN */ \ 998: #define EXIT_USER_ACCESS_CHECK(reg, tmp) \ ldr tmp, =has_pan; /* Get the addr of has_pan */ \ ldr reg, [tmp]; /* Read it */ \ cbz reg, 999f; /* If no PAN skip */ \ .inst 0xd500409f | (1 << 8); /* Set PAN */ \ 999: /* * Some AArch64 CPUs speculate past an eret instruction. As the user may * control the registers at this point add a speculation barrier usable on * all AArch64 CPUs after the eret instruction. * TODO: ARMv8.5 adds a specific instruction for this, we could use that * if we know we are running on something that supports it. */ #define ERET \ eret; \ dsb sy; \ isb #endif /* _MACHINE_ASM_H_ */ Index: head/sys/arm64/linux/linux_locore.asm =================================================================== --- head/sys/arm64/linux/linux_locore.asm (revision 368353) +++ head/sys/arm64/linux/linux_locore.asm (revision 368354) @@ -1,63 +1,67 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2018 Turing Robotic Industries Inc. * Copyright (C) 2020 Andrew Turner * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * arm64 Linux VDSO implementation. */ #include #include .data .globl linux_platform linux_platform: .asciz "arm64" .text ENTRY(__kernel_rt_sigreturn) brk #0 /* LINUXTODO: implement __kernel_rt_sigreturn */ ret +END(__kernel_rt_sigreturn) ENTRY(__kernel_gettimeofday) ldr x8, =LINUX_SYS_gettimeofday svc #0 ret +END(__kernel_gettimeofday) ENTRY(__kernel_clock_gettime) ldr x8, =LINUX_SYS_linux_clock_gettime svc #0 ret +END(__kernel_clock_gettime) ENTRY(__kernel_clock_getres) brk #0 /* LINUXTODO: implement __kernel_clock_getres */ ret +END(__kernel_clock_getres) Index: head/sys/arm64/linux/linux_support.s =================================================================== --- head/sys/arm64/linux/linux_support.s (revision 368353) +++ head/sys/arm64/linux/linux_support.s (revision 368354) @@ -1,57 +1,62 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2018 Turing Robotic Industries Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "linux_assym.h" #include #include "assym.inc" /* * LINUXTODO: implement futex_* */ ENTRY(futex_xchgl) brk #0 ret +END(futex_xchgl) ENTRY(futex_addl) brk #0 ret +END(futex_addl) ENTRY(futex_orl) brk #0 ret +END(futex_orl) ENTRY(futex_andl) brk #0 ret +END(futex_andl) ENTRY(futex_xorl) brk #0 ret +END(futex_xorl) Index: head/sys/crypto/des/arch/i386/des_enc.S =================================================================== --- head/sys/crypto/des/arch/i386/des_enc.S (revision 368353) +++ head/sys/crypto/des/arch/i386/des_enc.S (revision 368354) @@ -1,2815 +1,2811 @@ /* $NetBSD: des_enc.S,v 1.1 2001/09/09 11:01:02 tls Exp $ */ /* $FreeBSD$ */ /* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ /* * Modified from the output of `perl des686.pl elf' by * Thor Lancelot Simon */ #include #define _C_LABEL CNAME ENTRY(des_encrypt1) pushl %esi pushl %edi /* Load the 2 words */ movl 12(%esp), %esi xorl %ecx, %ecx pushl %ebx pushl %ebp movl (%esi), %eax movl 28(%esp), %ebx movl 4(%esi), %edi /* IP */ roll $4, %eax movl %eax, %esi xorl %edi, %eax andl $0xf0f0f0f0, %eax xorl %eax, %esi xorl %eax, %edi roll $20, %edi movl %edi, %eax xorl %esi, %edi andl $0xfff0000f, %edi xorl %edi, %eax xorl %edi, %esi roll $14, %eax movl %eax, %edi xorl %esi, %eax andl $0x33333333, %eax xorl %eax, %edi xorl %eax, %esi roll $22, %esi movl %esi, %eax xorl %edi, %esi andl $0x03fc03fc, %esi xorl %esi, %eax xorl %esi, %edi roll $9, %eax movl %eax, %esi xorl %edi, %eax andl $0xaaaaaaaa, %eax xorl %eax, %esi xorl %eax, %edi .byte 209 .byte 199 # roll $1 %edi movl 24(%esp), %ebp cmpl $0, %ebx je .L000start_decrypt /* Round 0 */ movl (%ebp), %eax xorl %ebx, %ebx movl 4(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 1 */ movl 8(%ebp), %eax xorl %ebx, %ebx movl 12(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 2 */ movl 16(%ebp), %eax xorl %ebx, %ebx movl 20(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 3 */ movl 24(%ebp), %eax xorl %ebx, %ebx movl 28(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 4 */ movl 32(%ebp), %eax xorl %ebx, %ebx movl 36(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 5 */ movl 40(%ebp), %eax xorl %ebx, %ebx movl 44(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 6 */ movl 48(%ebp), %eax xorl %ebx, %ebx movl 52(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 7 */ movl 56(%ebp), %eax xorl %ebx, %ebx movl 60(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 8 */ movl 64(%ebp), %eax xorl %ebx, %ebx movl 68(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 9 */ movl 72(%ebp), %eax xorl %ebx, %ebx movl 76(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 10 */ movl 80(%ebp), %eax xorl %ebx, %ebx movl 84(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 11 */ movl 88(%ebp), %eax xorl %ebx, %ebx movl 92(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 12 */ movl 96(%ebp), %eax xorl %ebx, %ebx movl 100(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 13 */ movl 104(%ebp), %eax xorl %ebx, %ebx movl 108(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 14 */ movl 112(%ebp), %eax xorl %ebx, %ebx movl 116(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 15 */ movl 120(%ebp), %eax xorl %ebx, %ebx movl 124(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi jmp .L001end .L000start_decrypt: /* Round 15 */ movl 120(%ebp), %eax xorl %ebx, %ebx movl 124(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 14 */ movl 112(%ebp), %eax xorl %ebx, %ebx movl 116(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 13 */ movl 104(%ebp), %eax xorl %ebx, %ebx movl 108(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 12 */ movl 96(%ebp), %eax xorl %ebx, %ebx movl 100(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 11 */ movl 88(%ebp), %eax xorl %ebx, %ebx movl 92(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 10 */ movl 80(%ebp), %eax xorl %ebx, %ebx movl 84(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 9 */ movl 72(%ebp), %eax xorl %ebx, %ebx movl 76(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 8 */ movl 64(%ebp), %eax xorl %ebx, %ebx movl 68(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 7 */ movl 56(%ebp), %eax xorl %ebx, %ebx movl 60(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 6 */ movl 48(%ebp), %eax xorl %ebx, %ebx movl 52(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 5 */ movl 40(%ebp), %eax xorl %ebx, %ebx movl 44(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 4 */ movl 32(%ebp), %eax xorl %ebx, %ebx movl 36(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 3 */ movl 24(%ebp), %eax xorl %ebx, %ebx movl 28(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 2 */ movl 16(%ebp), %eax xorl %ebx, %ebx movl 20(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 1 */ movl 8(%ebp), %eax xorl %ebx, %ebx movl 12(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 0 */ movl (%ebp), %eax xorl %ebx, %ebx movl 4(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi .L001end: /* FP */ movl 20(%esp), %edx .byte 209 .byte 206 # rorl $1 %esi movl %edi, %eax xorl %esi, %edi andl $0xaaaaaaaa, %edi xorl %edi, %eax xorl %edi, %esi roll $23, %eax movl %eax, %edi xorl %esi, %eax andl $0x03fc03fc, %eax xorl %eax, %edi xorl %eax, %esi roll $10, %edi movl %edi, %eax xorl %esi, %edi andl $0x33333333, %edi xorl %edi, %eax xorl %edi, %esi roll $18, %esi movl %esi, %edi xorl %eax, %esi andl $0xfff0000f, %esi xorl %esi, %edi xorl %esi, %eax roll $12, %edi movl %edi, %esi xorl %eax, %edi andl $0xf0f0f0f0, %edi xorl %edi, %esi xorl %edi, %eax rorl $4, %eax movl %eax, (%edx) movl %esi, 4(%edx) popl %ebp popl %ebx popl %edi popl %esi ret -.L_des_encrypt1_end: - .size _C_LABEL(des_encrypt1),.L_des_encrypt1_end-_C_LABEL(des_encrypt1) +END(des_encrypt1) ENTRY(des_encrypt2) pushl %esi pushl %edi /* Load the 2 words */ movl 12(%esp), %eax xorl %ecx, %ecx pushl %ebx pushl %ebp movl (%eax), %esi movl 28(%esp), %ebx roll $3, %esi movl 4(%eax), %edi roll $3, %edi movl 24(%esp), %ebp cmpl $0, %ebx je .L002start_decrypt /* Round 0 */ movl (%ebp), %eax xorl %ebx, %ebx movl 4(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 1 */ movl 8(%ebp), %eax xorl %ebx, %ebx movl 12(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 2 */ movl 16(%ebp), %eax xorl %ebx, %ebx movl 20(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 3 */ movl 24(%ebp), %eax xorl %ebx, %ebx movl 28(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 4 */ movl 32(%ebp), %eax xorl %ebx, %ebx movl 36(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 5 */ movl 40(%ebp), %eax xorl %ebx, %ebx movl 44(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 6 */ movl 48(%ebp), %eax xorl %ebx, %ebx movl 52(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 7 */ movl 56(%ebp), %eax xorl %ebx, %ebx movl 60(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 8 */ movl 64(%ebp), %eax xorl %ebx, %ebx movl 68(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 9 */ movl 72(%ebp), %eax xorl %ebx, %ebx movl 76(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 10 */ movl 80(%ebp), %eax xorl %ebx, %ebx movl 84(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 11 */ movl 88(%ebp), %eax xorl %ebx, %ebx movl 92(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 12 */ movl 96(%ebp), %eax xorl %ebx, %ebx movl 100(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 13 */ movl 104(%ebp), %eax xorl %ebx, %ebx movl 108(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 14 */ movl 112(%ebp), %eax xorl %ebx, %ebx movl 116(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 15 */ movl 120(%ebp), %eax xorl %ebx, %ebx movl 124(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi jmp .L003end .L002start_decrypt: /* Round 15 */ movl 120(%ebp), %eax xorl %ebx, %ebx movl 124(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 14 */ movl 112(%ebp), %eax xorl %ebx, %ebx movl 116(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 13 */ movl 104(%ebp), %eax xorl %ebx, %ebx movl 108(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 12 */ movl 96(%ebp), %eax xorl %ebx, %ebx movl 100(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 11 */ movl 88(%ebp), %eax xorl %ebx, %ebx movl 92(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 10 */ movl 80(%ebp), %eax xorl %ebx, %ebx movl 84(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 9 */ movl 72(%ebp), %eax xorl %ebx, %ebx movl 76(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 8 */ movl 64(%ebp), %eax xorl %ebx, %ebx movl 68(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 7 */ movl 56(%ebp), %eax xorl %ebx, %ebx movl 60(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 6 */ movl 48(%ebp), %eax xorl %ebx, %ebx movl 52(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 5 */ movl 40(%ebp), %eax xorl %ebx, %ebx movl 44(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 4 */ movl 32(%ebp), %eax xorl %ebx, %ebx movl 36(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 3 */ movl 24(%ebp), %eax xorl %ebx, %ebx movl 28(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 2 */ movl 16(%ebp), %eax xorl %ebx, %ebx movl 20(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi /* Round 1 */ movl 8(%ebp), %eax xorl %ebx, %ebx movl 12(%ebp), %edx xorl %esi, %eax xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %edi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %edi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %edi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %edi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %edi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %edi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %edi /* Round 0 */ movl (%ebp), %eax xorl %ebx, %ebx movl 4(%ebp), %edx xorl %edi, %eax xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx movl _C_LABEL(des_SPtrans)(%ebx),%ebp movb %dl, %bl xorl %ebp, %esi movl 0x200+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movb %dh, %cl shrl $16, %eax movl 0x100+_C_LABEL(des_SPtrans)(%ebx),%ebp xorl %ebp, %esi movb %ah, %bl shrl $16, %edx movl 0x300+_C_LABEL(des_SPtrans)(%ecx),%ebp xorl %ebp, %esi movl 24(%esp), %ebp movb %dh, %cl andl $0xff, %eax andl $0xff, %edx movl 0x600+_C_LABEL(des_SPtrans)(%ebx),%ebx xorl %ebx, %esi movl 0x700+_C_LABEL(des_SPtrans)(%ecx),%ebx xorl %ebx, %esi movl 0x400+_C_LABEL(des_SPtrans)(%eax),%ebx xorl %ebx, %esi movl 0x500+_C_LABEL(des_SPtrans)(%edx),%ebx xorl %ebx, %esi .L003end: /* Fixup */ rorl $3, %edi movl 20(%esp), %eax rorl $3, %esi movl %edi, (%eax) movl %esi, 4(%eax) popl %ebp popl %ebx popl %edi popl %esi ret -.L_des_encrypt2_end: - .size _C_LABEL(des_encrypt2),.L_des_encrypt2_end-_C_LABEL(des_encrypt2) +END(des_encrypt2) ENTRY(des_encrypt3) pushl %ebx movl 8(%esp), %ebx pushl %ebp pushl %esi pushl %edi /* Load the data words */ movl (%ebx), %edi movl 4(%ebx), %esi subl $12, %esp /* IP */ roll $4, %edi movl %edi, %edx xorl %esi, %edi andl $0xf0f0f0f0, %edi xorl %edi, %edx xorl %edi, %esi roll $20, %esi movl %esi, %edi xorl %edx, %esi andl $0xfff0000f, %esi xorl %esi, %edi xorl %esi, %edx roll $14, %edi movl %edi, %esi xorl %edx, %edi andl $0x33333333, %edi xorl %edi, %esi xorl %edi, %edx roll $22, %edx movl %edx, %edi xorl %esi, %edx andl $0x03fc03fc, %edx xorl %edx, %edi xorl %edx, %esi roll $9, %edi movl %edi, %edx xorl %esi, %edi andl $0xaaaaaaaa, %edi xorl %edi, %edx xorl %edi, %esi rorl $3, %edx rorl $2, %esi movl %esi, 4(%ebx) movl 36(%esp), %eax movl %edx, (%ebx) movl 40(%esp), %edi movl 44(%esp), %esi movl $1, 8(%esp) movl %eax, 4(%esp) movl %ebx, (%esp) call _C_LABEL(des_encrypt2) movl $0, 8(%esp) movl %edi, 4(%esp) movl %ebx, (%esp) call _C_LABEL(des_encrypt2) movl $1, 8(%esp) movl %esi, 4(%esp) movl %ebx, (%esp) call _C_LABEL(des_encrypt2) addl $12, %esp movl (%ebx), %edi movl 4(%ebx), %esi /* FP */ roll $2, %esi roll $3, %edi movl %edi, %eax xorl %esi, %edi andl $0xaaaaaaaa, %edi xorl %edi, %eax xorl %edi, %esi roll $23, %eax movl %eax, %edi xorl %esi, %eax andl $0x03fc03fc, %eax xorl %eax, %edi xorl %eax, %esi roll $10, %edi movl %edi, %eax xorl %esi, %edi andl $0x33333333, %edi xorl %edi, %eax xorl %edi, %esi roll $18, %esi movl %esi, %edi xorl %eax, %esi andl $0xfff0000f, %esi xorl %esi, %edi xorl %esi, %eax roll $12, %edi movl %edi, %esi xorl %eax, %edi andl $0xf0f0f0f0, %edi xorl %edi, %esi xorl %edi, %eax rorl $4, %eax movl %eax, (%ebx) movl %esi, 4(%ebx) popl %edi popl %esi popl %ebp popl %ebx ret -.L_des_encrypt3_end: - .size _C_LABEL(des_encrypt3),.L_des_encrypt3_end-_C_LABEL(des_encrypt3) +END(des_encrypt3) ENTRY(des_decrypt3) pushl %ebx movl 8(%esp), %ebx pushl %ebp pushl %esi pushl %edi /* Load the data words */ movl (%ebx), %edi movl 4(%ebx), %esi subl $12, %esp /* IP */ roll $4, %edi movl %edi, %edx xorl %esi, %edi andl $0xf0f0f0f0, %edi xorl %edi, %edx xorl %edi, %esi roll $20, %esi movl %esi, %edi xorl %edx, %esi andl $0xfff0000f, %esi xorl %esi, %edi xorl %esi, %edx roll $14, %edi movl %edi, %esi xorl %edx, %edi andl $0x33333333, %edi xorl %edi, %esi xorl %edi, %edx roll $22, %edx movl %edx, %edi xorl %esi, %edx andl $0x03fc03fc, %edx xorl %edx, %edi xorl %edx, %esi roll $9, %edi movl %edi, %edx xorl %esi, %edi andl $0xaaaaaaaa, %edi xorl %edi, %edx xorl %edi, %esi rorl $3, %edx rorl $2, %esi movl %esi, 4(%ebx) movl 36(%esp), %esi movl %edx, (%ebx) movl 40(%esp), %edi movl 44(%esp), %eax movl $0, 8(%esp) movl %eax, 4(%esp) movl %ebx, (%esp) call _C_LABEL(des_encrypt2) movl $1, 8(%esp) movl %edi, 4(%esp) movl %ebx, (%esp) call _C_LABEL(des_encrypt2) movl $0, 8(%esp) movl %esi, 4(%esp) movl %ebx, (%esp) call _C_LABEL(des_encrypt2) addl $12, %esp movl (%ebx), %edi movl 4(%ebx), %esi /* FP */ roll $2, %esi roll $3, %edi movl %edi, %eax xorl %esi, %edi andl $0xaaaaaaaa, %edi xorl %edi, %eax xorl %edi, %esi roll $23, %eax movl %eax, %edi xorl %esi, %eax andl $0x03fc03fc, %eax xorl %eax, %edi xorl %eax, %esi roll $10, %edi movl %edi, %eax xorl %esi, %edi andl $0x33333333, %edi xorl %edi, %eax xorl %edi, %esi roll $18, %esi movl %esi, %edi xorl %eax, %esi andl $0xfff0000f, %esi xorl %esi, %edi xorl %esi, %eax roll $12, %edi movl %edi, %esi xorl %eax, %edi andl $0xf0f0f0f0, %edi xorl %edi, %esi xorl %edi, %eax rorl $4, %eax movl %eax, (%ebx) movl %esi, 4(%ebx) popl %edi popl %esi popl %ebp popl %ebx ret -.L_des_decrypt3_end: - .size _C_LABEL(des_decrypt3),.L_des_decrypt3_end-_C_LABEL(des_decrypt3) +END(des_decrypt3) Index: head/sys/i386/bios/smapi_bios.S =================================================================== --- head/sys/i386/bios/smapi_bios.S (revision 368353) +++ head/sys/i386/bios/smapi_bios.S (revision 368354) @@ -1,40 +1,41 @@ #include __FBSDID("$FreeBSD$"); /* * This is cribbed from the Linux thinkpad-4.1 driver by * Thomas Hood. */ smapi32_entry: /* far pointer to SMAPI entry */ .globl smapi32_offset smapi32_offset: .long 0x00000000 /* set by caller */ smapi32_segment: .word 0x0000 /* %cs stored here */ .text /* * smapi32(input_param, output_param) * struct smapi_bios_parameter *input_parm; * struct smapi_bios_parameter *output_parm; * * stack frame: * 0x00 : saved ebp * 0x04 : return EIP * 0x08 : input_parm * 0x0c : output_parm */ ENTRY(smapi32) pushl %ebp /* Save frame */ movl %esp,%ebp pushl %ds pushl 0x0c(%ebp) /* Output Param */ pushl %ds pushl 0x08(%ebp) /* Input Param */ movl $0,%eax /* Clear EAX (return 0) */ movw %cs,smapi32_segment /* Save CS */ lcall *(smapi32_offset) leave ret +END(smapi32) Index: head/sys/i386/include/asm.h =================================================================== --- head/sys/i386/include/asm.h (revision 368353) +++ head/sys/i386/include/asm.h (revision 368354) @@ -1,122 +1,133 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)DEFS.h 5.1 (Berkeley) 4/23/90 * $FreeBSD$ */ #ifndef _MACHINE_ASM_H_ #define _MACHINE_ASM_H_ #include #ifdef PIC #define PIC_PROLOGUE \ pushl %ebx; \ call 1f; \ 1: \ popl %ebx; \ addl $_GLOBAL_OFFSET_TABLE_+[.-1b],%ebx #define PIC_EPILOGUE \ popl %ebx #define PIC_PLT(x) x@PLT #define PIC_GOT(x) x@GOT(%ebx) #define PIC_GOTOFF(x) x@GOTOFF(%ebx) #else #define PIC_PROLOGUE #define PIC_EPILOGUE #define PIC_PLT(x) x #define PIC_GOTOFF(x) x #endif /* * CNAME and HIDENAME manage the relationship between symbol names in C * and the equivalent assembly language names. CNAME is given a name as * it would be used in a C program. It expands to the equivalent assembly * language name. HIDENAME is given an assembly-language name, and expands * to a possibly-modified form that will be invisible to C programs. */ #define CNAME(csym) csym #define HIDENAME(asmsym) .asmsym /* XXX should use .p2align 4,0x90 for -m486. */ #define _START_ENTRY .text; .p2align 2,0x90 #define _ENTRY(x) _START_ENTRY; \ - .globl CNAME(x); .type CNAME(x),@function; CNAME(x): -#define END(x) .size x, . - x + .globl CNAME(x); .type CNAME(x),@function; CNAME(x): \ + .cfi_startproc +#define END(x) .cfi_endproc; .size x, . - x #ifdef PROF #define ALTENTRY(x) _ENTRY(x); \ - pushl %ebp; movl %esp,%ebp; \ + pushl %ebp; \ + .cfi_def_cfa_offset 8; \ + .cfi_offset %ebp, -8; \ + movl %esp,%ebp; \ call PIC_PLT(HIDENAME(mcount)); \ popl %ebp; \ + .cfi_restore %ebp; \ + .cfi_def_cfa_offset 4; \ jmp 9f #define ENTRY(x) _ENTRY(x); \ - pushl %ebp; movl %esp,%ebp; \ + pushl %ebp; \ + .cfi_def_cfa_offset 8; \ + .cfi_offset %ebp, -8; \ + movl %esp,%ebp; \ call PIC_PLT(HIDENAME(mcount)); \ popl %ebp; \ + .cfi_restore %ebp; \ + .cfi_def_cfa_offset 4; \ 9: #else #define ALTENTRY(x) _ENTRY(x) #define ENTRY(x) _ENTRY(x) #endif /* * WEAK_REFERENCE(): create a weak reference alias from sym. * The macro is not a general asm macro that takes arbitrary names, * but one that takes only C names. It does the non-null name * translation inside the macro. */ #define WEAK_REFERENCE(sym, alias) \ .weak CNAME(alias); \ .equ CNAME(alias),CNAME(sym) /* * STRONG_ALIAS: create a strong alias. */ #define STRONG_ALIAS(alias,sym) \ .globl alias; \ alias = sym #define RCSID(x) .text; .asciz x #undef __FBSDID #if !defined(STRIP_FBSDID) #define __FBSDID(s) .ident s #else #define __FBSDID(s) /* nothing */ #endif /* not STRIP_FBSDID */ #endif /* !_MACHINE_ASM_H_ */ Index: head/sys/powerpc/aim/locore.S =================================================================== --- head/sys/powerpc/aim/locore.S (revision 368353) +++ head/sys/powerpc/aim/locore.S (revision 368354) @@ -1,15 +1,16 @@ /* $FreeBSD$ */ #ifdef __powerpc64__ #include #else #include #endif /* * XXX: This should be moved to a shared AIM/booke asm file, if one ever is * created. */ ENTRY(get_spr) mfspr %r3, 0 blr +END(get_spr) Index: head/sys/powerpc/aim/locore64.S =================================================================== --- head/sys/powerpc/aim/locore64.S (revision 368353) +++ head/sys/powerpc/aim/locore64.S (revision 368354) @@ -1,274 +1,277 @@ /* $FreeBSD$ */ /*- * Copyright (C) 2010-2016 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "assym.inc" #include #include #include #include #include #include #ifdef _CALL_ELF .abiversion _CALL_ELF #endif /* Glue for linker script */ .globl kernbase .set kernbase, KERNBASE /* * Globals */ .data .align 3 GLOBAL(__startkernel) .llong begin GLOBAL(__endkernel) .llong end GLOBAL(can_wakeup) .llong 0x0 .align 4 #define TMPSTKSZ 16384 /* 16K temporary stack */ GLOBAL(tmpstk) .space TMPSTKSZ TOC_ENTRY(tmpstk) TOC_ENTRY(can_wakeup) #ifdef KDB #define TRAPSTKSZ 8192 /* 8k trap stack */ GLOBAL(trapstk) .space TRAPSTKSZ TOC_ENTRY(trapstk) #endif /* * Entry point for bootloaders that do not fully implement ELF and start * at the beginning of the image (kexec, notably). In its own section so * that it ends up before any linker-generated call stubs and actually at * the beginning of the image. kexec on some systems also enters at * (start of image) + 0x60, so put a spin loop there. */ .section ".text.kboot", "x", @progbits kbootentry: b __start . = kbootentry + 0x40 /* Magic address used in platform layer */ .global smp_spin_sem ap_kexec_spin_sem: .long -1 . = kbootentry + 0x60 /* Entry point for kexec APs */ ap_kexec_start: /* At 0x60 past start, copied to 0x60 by kexec */ /* r3 set to CPU ID by kexec */ /* Invalidate icache for low-memory copy and jump there */ li %r0,0x80 dcbst 0,%r0 sync icbi 0,%r0 isync ba 0x80 /* Absolute branch to next inst */ . = kbootentry + 0x80 /* Aligned to cache line */ 1: or 31,31,31 /* yield */ sync lwz %r1,0x40(0) /* Spin on ap_kexec_spin_sem */ cmpw %r1,%r3 /* Until it equals our CPU ID */ bne 1b /* Released */ or 2,2,2 /* unyield */ /* Make sure that it will be software reset. Clear SRR1 */ li %r1,0 mtsrr1 %r1 ba EXC_RST /* * Now start the real text section */ .text .globl btext btext: /* * Main kernel entry point. * * Calling convention: * r3: Flattened Device Tree pointer (or zero) * r4: ignored * r5: OF client interface pointer (or zero) * r6: Loader metadata pointer (or zero) * r7: Magic cookie (0xfb5d104d) to indicate that r6 has loader metadata */ .text _NAKED_ENTRY(__start) #ifdef __LITTLE_ENDIAN__ RETURN_TO_NATIVE_ENDIAN #endif /* Set 64-bit mode if not yet set before branching to C */ mfmsr %r20 li %r21,1 insrdi %r20,%r21,1,0 mtmsrd %r20 isync nop /* Make this block a multiple of 8 bytes */ /* Set up the TOC pointer */ b 0f .align 3 0: nop bl 1f .llong __tocbase + 0x8000 - . 1: mflr %r2 ld %r1,0(%r2) add %r2,%r1,%r2 /* Get load offset */ ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */ subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */ /* Set up the stack pointer */ bl 1f .llong tmpstk + TMPSTKSZ - 96 - . 1: mflr %r30 ld %r1,0(%r30) add %r1,%r1,%r30 nop /* Relocate kernel */ std %r3,48(%r1) std %r4,56(%r1) std %r5,64(%r1) std %r6,72(%r1) std %r7,80(%r1) bl 1f .llong _DYNAMIC-. 1: mflr %r3 ld %r4,0(%r3) add %r3,%r4,%r3 mr %r4,%r31 bl elf_reloc_self nop ld %r3,48(%r1) ld %r4,56(%r1) ld %r5,64(%r1) ld %r6,72(%r1) ld %r7,80(%r1) /* Begin CPU init */ mr %r4,%r2 /* Replace ignored r4 with tocbase for trap handlers */ bl powerpc_init nop /* Set stack pointer to new value and branch to mi_startup */ mr %r1, %r3 li %r3, 0 std %r3, 0(%r1) bl mi_startup nop /* Unreachable */ b . +_END(__start) ASENTRY_NOPROF(__restartkernel_virtual) /* * When coming in via this entry point, we need to alter the SLB to * shadow the segment register emulation entries in DMAP space. * We need to do this dance because we are running with virtual-mode * OpenFirmware and have not yet taken over the MMU. * * Assumptions: * 1) The kernel is currently identity-mapped. * 2) We are currently executing at an address compatible with * real mode. * 3) The first 16 SLB entries are emulating SRs. * 4) The rest of the SLB is not in use. * 5) OpenFirmware is not manipulating the SLB at runtime. * 6) We are running on 64-bit AIM. * * Tested on a G5. */ mfmsr %r14 /* Switch to real mode because we are about to mess with the SLB. */ andi. %r14, %r14, ~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l mtmsr %r14 isync /* Prepare variables for later use. */ li %r14, 0 li %r18, 0 oris %r18, %r18, 0xc000 sldi %r18, %r18, 32 /* r18: 0xc000000000000000 */ 1: /* * Loop over the first 16 SLB entries. * Offset the SLBE into the DMAP, add 16 to the index, and write * it back to the SLB. */ /* XXX add more safety checks */ slbmfev %r15, %r14 slbmfee %r16, %r14 or %r16, %r16, %r14 /* index is 0-15 */ ori %r16, %r16, 0x10 /* add 16 to index. */ or %r16, %r16, %r18 /* SLBE DMAP offset */ rldicr %r17, %r16, 0, 37 /* Invalidation SLBE */ isync slbie %r17 /* isync */ slbmte %r15, %r16 isync addi %r14, %r14, 1 cmpdi %r14, 16 blt 1b +ASEND(__restartkernel_virtual) ASENTRY_NOPROF(__restartkernel) /* * r3-r7: arguments to go to __start * r8: offset from current kernel address to apply * r9: MSR to set when (atomically) jumping to __start + r8 */ mtsrr1 %r9 bl 1f 1: mflr %r25 add %r25,%r8,%r25 addi %r25,%r25,2f-1b mtsrr0 %r25 rfid 2: bl __start nop +ASEND(__restartkernel) #include Index: head/sys/powerpc/aim/trap_subr64.S =================================================================== --- head/sys/powerpc/aim/trap_subr64.S (revision 368353) +++ head/sys/powerpc/aim/trap_subr64.S (revision 368354) @@ -1,999 +1,1000 @@ /* $FreeBSD$ */ /* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * NOTICE: This is not a standalone file. to use it, #include it in * your port's locore.S, like so: * * #include */ /* Locate the per-CPU data structure */ #define GET_CPUINFO(r) \ mfsprg0 r #define GET_TOCBASE(r) \ lis r,DMAP_BASE_ADDRESS@highesta; /* To real-mode alias/dmap */ \ sldi r,r,32; \ ori r,r,TRAP_TOCBASE; /* Magic address for TOC */ \ ld r,0(r) /* * Restore SRs for a pmap * * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache */ /* * User SRs are loaded through a pointer to the current pmap. */ restore_usersrs: GET_CPUINFO(%r28) ld %r28,PC_USERSLB(%r28) cmpdi %r28, 0 /* If user SLB pointer NULL, exit */ beqlr li %r29, 0 /* Set the counter to zero */ slbia slbmfee %r31,%r29 clrrdi %r31,%r31,28 slbie %r31 1: ld %r31, 0(%r28) /* Load SLB entry pointer */ cmpdi %r31, 0 /* If NULL, stop */ beqlr ld %r30, 0(%r31) /* Load SLBV */ ld %r31, 8(%r31) /* Load SLBE */ or %r31, %r31, %r29 /* Set SLBE slot */ slbmte %r30, %r31 /* Install SLB entry */ addi %r28, %r28, 8 /* Advance pointer */ addi %r29, %r29, 1 b 1b /* Repeat */ /* * Kernel SRs are loaded directly from the PCPU fields */ restore_kernsrs: GET_CPUINFO(%r28) lwz %r29, PC_FLAGS(%r28) mtcr %r29 btlr 0 addi %r28,%r28,PC_KERNSLB ld %r29,16(%r28) /* One past USER_SLB_SLOT */ cmpdi %r29,0 beqlr /* If first kernel entry is invalid, * SLBs not in use, so exit early */ /* Otherwise, set up SLBs */ li %r29, 0 /* Set the counter to zero */ slbia slbmfee %r31,%r29 clrrdi %r31,%r31,28 slbie %r31 1: cmpdi %r29, USER_SLB_SLOT /* Skip the user slot */ beq- 2f ld %r31, 8(%r28) /* Load SLBE */ cmpdi %r31, 0 /* If SLBE is not valid, stop */ beqlr ld %r30, 0(%r28) /* Load SLBV */ slbmte %r30, %r31 /* Install SLB entry */ 2: addi %r28, %r28, 16 /* Advance pointer */ addi %r29, %r29, 1 cmpdi %r29, 64 /* Repeat if we are not at the end */ blt 1b blr /* * FRAME_SETUP assumes: * SPRG1 SP (1) * SPRG3 trap type * savearea r27-r31,DAR,DSISR (DAR & DSISR only for DSI traps) * r28 LR * r29 CR * r30 scratch * r31 scratch * r1 kernel stack * SRR0/1 as at start of trap * * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse * in any real-mode fault handler, including those handling double faults. */ #define FRAME_SETUP(savearea) \ /* Have to enable translation to allow access of kernel stack: */ \ GET_CPUINFO(%r31); \ mfsrr0 %r30; \ std %r30,(savearea+CPUSAVE_SRR0)(%r31); /* save SRR0 */ \ mfsrr1 %r30; \ std %r30,(savearea+CPUSAVE_SRR1)(%r31); /* save SRR1 */ \ mfsprg1 %r31; /* get saved SP (clears SPRG1) */ \ mfmsr %r30; \ ori %r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */ \ mtmsr %r30; /* stack can now be accessed */ \ isync; \ stdu %r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \ std %r0, FRAME_0+48(%r1); /* save r0 in the trapframe */ \ std %r31,FRAME_1+48(%r1); /* save SP " " */ \ std %r2, FRAME_2+48(%r1); /* save r2 " " */ \ std %r28,FRAME_LR+48(%r1); /* save LR " " */ \ std %r29,FRAME_CR+48(%r1); /* save CR " " */ \ GET_CPUINFO(%r2); \ ld %r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */ \ ld %r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */ \ ld %r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */ \ ld %r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \ ld %r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \ std %r3, FRAME_3+48(%r1); /* save r3-r31 */ \ std %r4, FRAME_4+48(%r1); \ std %r5, FRAME_5+48(%r1); \ std %r6, FRAME_6+48(%r1); \ std %r7, FRAME_7+48(%r1); \ std %r8, FRAME_8+48(%r1); \ std %r9, FRAME_9+48(%r1); \ std %r10, FRAME_10+48(%r1); \ std %r11, FRAME_11+48(%r1); \ std %r12, FRAME_12+48(%r1); \ std %r13, FRAME_13+48(%r1); \ std %r14, FRAME_14+48(%r1); \ std %r15, FRAME_15+48(%r1); \ std %r16, FRAME_16+48(%r1); \ std %r17, FRAME_17+48(%r1); \ std %r18, FRAME_18+48(%r1); \ std %r19, FRAME_19+48(%r1); \ std %r20, FRAME_20+48(%r1); \ std %r21, FRAME_21+48(%r1); \ std %r22, FRAME_22+48(%r1); \ std %r23, FRAME_23+48(%r1); \ std %r24, FRAME_24+48(%r1); \ std %r25, FRAME_25+48(%r1); \ std %r26, FRAME_26+48(%r1); \ std %r27, FRAME_27+48(%r1); \ std %r28, FRAME_28+48(%r1); \ std %r29, FRAME_29+48(%r1); \ std %r30, FRAME_30+48(%r1); \ std %r31, FRAME_31+48(%r1); \ ld %r28,(savearea+CPUSAVE_AIM_DAR)(%r2); /* saved DAR */ \ ld %r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\ ld %r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */ \ ld %r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */ \ mfxer %r3; \ mfctr %r4; \ mfsprg3 %r5; \ std %r3, FRAME_XER+48(1); /* save xer/ctr/exc */ \ std %r4, FRAME_CTR+48(1); \ std %r5, FRAME_EXC+48(1); \ std %r28,FRAME_AIM_DAR+48(1); \ std %r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */ \ std %r30,FRAME_SRR0+48(1); \ std %r31,FRAME_SRR1+48(1); \ ld %r13,PC_CURTHREAD(%r2) /* set kernel curthread */ #define FRAME_LEAVE(savearea) \ /* Disable exceptions: */ \ mfmsr %r2; \ andi. %r2,%r2,~PSL_EE@l; \ mtmsr %r2; \ isync; \ /* Now restore regs: */ \ ld %r2,FRAME_SRR0+48(%r1); \ ld %r3,FRAME_SRR1+48(%r1); \ ld %r4,FRAME_CTR+48(%r1); \ ld %r5,FRAME_XER+48(%r1); \ ld %r6,FRAME_LR+48(%r1); \ GET_CPUINFO(%r7); \ std %r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */ \ std %r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */ \ ld %r7,FRAME_CR+48(%r1); \ mtctr %r4; \ mtxer %r5; \ mtlr %r6; \ mtsprg2 %r7; /* save cr */ \ ld %r31,FRAME_31+48(%r1); /* restore r0-31 */ \ ld %r30,FRAME_30+48(%r1); \ ld %r29,FRAME_29+48(%r1); \ ld %r28,FRAME_28+48(%r1); \ ld %r27,FRAME_27+48(%r1); \ ld %r26,FRAME_26+48(%r1); \ ld %r25,FRAME_25+48(%r1); \ ld %r24,FRAME_24+48(%r1); \ ld %r23,FRAME_23+48(%r1); \ ld %r22,FRAME_22+48(%r1); \ ld %r21,FRAME_21+48(%r1); \ ld %r20,FRAME_20+48(%r1); \ ld %r19,FRAME_19+48(%r1); \ ld %r18,FRAME_18+48(%r1); \ ld %r17,FRAME_17+48(%r1); \ ld %r16,FRAME_16+48(%r1); \ ld %r15,FRAME_15+48(%r1); \ ld %r14,FRAME_14+48(%r1); \ ld %r13,FRAME_13+48(%r1); \ ld %r12,FRAME_12+48(%r1); \ ld %r11,FRAME_11+48(%r1); \ ld %r10,FRAME_10+48(%r1); \ ld %r9, FRAME_9+48(%r1); \ ld %r8, FRAME_8+48(%r1); \ ld %r7, FRAME_7+48(%r1); \ ld %r6, FRAME_6+48(%r1); \ ld %r5, FRAME_5+48(%r1); \ ld %r4, FRAME_4+48(%r1); \ ld %r3, FRAME_3+48(%r1); \ ld %r2, FRAME_2+48(%r1); \ ld %r0, FRAME_0+48(%r1); \ ld %r1, FRAME_1+48(%r1); \ /* Can't touch %r1 from here on */ \ mtsprg3 %r3; /* save r3 */ \ /* Disable translation, machine check and recoverability: */ \ mfmsr %r3; \ andi. %r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l; \ mtmsr %r3; \ isync; \ /* Decide whether we return to user mode: */ \ GET_CPUINFO(%r3); \ ld %r3,(savearea+CPUSAVE_SRR1)(%r3); \ mtcr %r3; \ bf 17,1f; /* branch if PSL_PR is false */ \ /* Restore user SRs */ \ GET_CPUINFO(%r3); \ std %r27,(savearea+CPUSAVE_R27)(%r3); \ std %r28,(savearea+CPUSAVE_R28)(%r3); \ std %r29,(savearea+CPUSAVE_R29)(%r3); \ std %r30,(savearea+CPUSAVE_R30)(%r3); \ std %r31,(savearea+CPUSAVE_R31)(%r3); \ lwz %r28,PC_FLAGS(%r3); \ mtcr %r28; \ bt 0, 0f; /* Check to skip restoring SRs. */ \ mflr %r27; /* preserve LR */ \ bl restore_usersrs; /* uses r28-r31 */ \ mtlr %r27; \ 0: \ ld %r31,(savearea+CPUSAVE_R31)(%r3); \ ld %r30,(savearea+CPUSAVE_R30)(%r3); \ ld %r29,(savearea+CPUSAVE_R29)(%r3); \ ld %r28,(savearea+CPUSAVE_R28)(%r3); \ ld %r27,(savearea+CPUSAVE_R27)(%r3); \ 1: mfsprg2 %r3; /* restore cr */ \ mtcr %r3; \ GET_CPUINFO(%r3); \ ld %r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */ \ mtsrr0 %r3; \ GET_CPUINFO(%r3); \ ld %r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */ \ mtsrr1 %r3; \ mfsprg3 %r3 /* restore r3 */ #ifdef KDTRACE_HOOKS .data .globl dtrace_invop_calltrap_addr .align 8 .type dtrace_invop_calltrap_addr, @object .size dtrace_invop_calltrap_addr, 8 dtrace_invop_calltrap_addr: .word 0 .word 0 .text #endif /* * Processor reset exception handler. These are typically * the first instructions the processor executes after a * software reset. We do this in two bits so that we are * not still hanging around in the trap handling region * once the MMU is turned on. */ .globl CNAME(rstcode), CNAME(rstcodeend), CNAME(cpu_reset_handler) .globl CNAME(cpu_wakeup_handler) .p2align 3 CNAME(rstcode): #ifdef __LITTLE_ENDIAN__ /* * XXX This shouldn't be necessary. * * According to the ISA documentation, LE should be set from HILE * or the LPCR ILE bit automatically. However, the entry into this * vector from OPAL_START_CPU does not honor this correctly. * * We should be able to define an alternate entry for opal's * start_kernel_secondary asm code to branch to. */ RETURN_TO_NATIVE_ENDIAN #endif /* * Check if this is software reset or * processor is waking up from power saving mode * It is software reset when 46:47 = 0b00 */ /* 0x00 */ ld %r2,TRAP_GENTRAP(0) /* Real-mode &generictrap */ mfsrr1 %r9 /* Load SRR1 into r9 */ andis. %r9,%r9,0x3 /* Logic AND with 46:47 bits */ beq 2f /* Branch if software reset */ /* 0x10 */ /* Reset was wakeup */ addi %r9,%r2,(cpu_wakeup_handler-generictrap) b 1f /* Was power save, do the wakeup */ /* Reset was software reset */ /* Explicitly set MSR[SF] */ 2: mfmsr %r9 li %r8,1 /* 0x20 */ insrdi %r9,%r8,1,0 mtmsrd %r9 isync addi %r9,%r2,(cpu_reset_handler-generictrap) /* 0x30 */ 1: mtlr %r9 blr /* Branch to either cpu_reset_handler * or cpu_wakeup_handler. */ CNAME(rstcodeend): cpu_reset_handler: GET_TOCBASE(%r2) addis %r1,%r2,TOC_REF(tmpstk)@ha ld %r1,TOC_REF(tmpstk)@l(%r1) /* get new SP */ addi %r1,%r1,(TMPSTKSZ-48) bl CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */ nop lis %r3,1@l bl CNAME(pmap_cpu_bootstrap) /* Turn on virtual memory */ nop bl CNAME(cpudep_ap_bootstrap) /* Set up PCPU and stack */ nop mr %r1,%r3 /* Use new stack */ bl CNAME(cpudep_ap_setup) nop GET_CPUINFO(%r5) ld %r3,(PC_RESTORE)(%r5) cmpldi %cr0,%r3,0 beq %cr0,2f nop li %r4,1 bl CNAME(longjmp) nop 2: #ifdef SMP bl CNAME(machdep_ap_bootstrap) /* And away! */ nop #endif /* Should not be reached */ 9: b 9b cpu_wakeup_handler: GET_TOCBASE(%r2) /* Check for false wake up due to badly SRR1 set (eg. by OPAL) */ addis %r3,%r2,TOC_REF(can_wakeup)@ha ld %r3,TOC_REF(can_wakeup)@l(%r3) ld %r3,0(%r3) cmpdi %r3,0 beq cpu_reset_handler /* Turn on MMU after return from interrupt */ mfsrr1 %r3 ori %r3,%r3,(PSL_IR | PSL_DR) mtsrr1 %r3 /* Turn on MMU (needed to access PCB) */ mfmsr %r3 ori %r3,%r3,(PSL_IR | PSL_DR) mtmsr %r3 isync mfsprg0 %r3 ld %r3,PC_CURTHREAD(%r3) /* Get current thread */ ld %r3,TD_PCB(%r3) /* Get PCB of current thread */ ld %r12,PCB_CONTEXT(%r3) /* Load the non-volatile GP regs. */ ld %r13,PCB_CONTEXT+1*8(%r3) ld %r14,PCB_CONTEXT+2*8(%r3) ld %r15,PCB_CONTEXT+3*8(%r3) ld %r16,PCB_CONTEXT+4*8(%r3) ld %r17,PCB_CONTEXT+5*8(%r3) ld %r18,PCB_CONTEXT+6*8(%r3) ld %r19,PCB_CONTEXT+7*8(%r3) ld %r20,PCB_CONTEXT+8*8(%r3) ld %r21,PCB_CONTEXT+9*8(%r3) ld %r22,PCB_CONTEXT+10*8(%r3) ld %r23,PCB_CONTEXT+11*8(%r3) ld %r24,PCB_CONTEXT+12*8(%r3) ld %r25,PCB_CONTEXT+13*8(%r3) ld %r26,PCB_CONTEXT+14*8(%r3) ld %r27,PCB_CONTEXT+15*8(%r3) ld %r28,PCB_CONTEXT+16*8(%r3) ld %r29,PCB_CONTEXT+17*8(%r3) ld %r30,PCB_CONTEXT+18*8(%r3) ld %r31,PCB_CONTEXT+19*8(%r3) ld %r5,PCB_CR(%r3) /* Load the condition register */ mtcr %r5 ld %r5,PCB_LR(%r3) /* Load the link register */ mtsrr0 %r5 ld %r1,PCB_SP(%r3) /* Load the stack pointer */ ld %r2,PCB_TOC(%r3) /* Load the TOC pointer */ rfid /* * This code gets copied to all the trap vectors * (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions! */ .globl CNAME(trapcode),CNAME(trapcodeend) .p2align 3 CNAME(trapcode): mtsprg1 %r1 /* save SP */ mflr %r1 /* Save the old LR in r1 */ mtsprg2 %r1 /* And then in SPRG2 */ ld %r1,TRAP_ENTRY(0) mtlr %r1 li %r1, 0xe0 /* How to get the vector from LR */ blrl /* Branch to generictrap */ CNAME(trapcodeend): /* Same thing for traps setting HSRR0/HSRR1 */ .globl CNAME(hypertrapcode),CNAME(hypertrapcodeend) .p2align 3 CNAME(hypertrapcode): mtsprg1 %r1 /* save SP */ mflr %r1 /* Save the old LR in r1 */ mtsprg2 %r1 /* And then in SPRG2 */ ld %r1,TRAP_GENTRAP(0) addi %r1,%r1,(generichypertrap-generictrap) mtlr %r1 li %r1, 0xe0 /* How to get the vector from LR */ blrl /* Branch to generichypertrap */ CNAME(hypertrapcodeend): /* * For SLB misses: do special things for the kernel * * Note: SPRG1 is always safe to overwrite any time the MMU was on, which is * the only time this can be called. */ .globl CNAME(slbtrap),CNAME(slbtrapend) .p2align 3 CNAME(slbtrap): /* 0x00 */ mtsprg1 %r1 /* save SP */ GET_CPUINFO(%r1) std %r2,(PC_SLBSAVE+16)(%r1) /* save r2 */ mfcr %r2 /* 0x10 */ std %r2,(PC_SLBSAVE+104)(%r1) /* save CR */ mfsrr1 %r2 /* test kernel mode */ mtcr %r2 bf 17,2f /* branch if PSL_PR is false */ /* 0x20 */ /* User mode */ ld %r2,(PC_SLBSAVE+104)(%r1) mtcr %r2 /* restore CR */ ld %r2,(PC_SLBSAVE+16)(%r1) /* restore r2 */ mflr %r1 /* 0x30 */ mtsprg2 %r1 /* save LR in SPRG2 */ ld %r1,TRAP_ENTRY(0) /* real-mode &generictrap */ mtlr %r1 li %r1, 0x80 /* How to get the vector from LR */ /* 0x40 */ blrl /* Branch to generictrap */ 2: mflr %r2 /* Save the old LR in r2 */ /* Kernel mode */ ld %r1,TRAP_GENTRAP(0) /* Real-mode &generictrap */ addi %r1,%r1,(kern_slbtrap-generictrap) /* 0x50 */ mtlr %r1 GET_CPUINFO(%r1) blrl /* Branch to kern_slbtrap */ /* must fit in 128 bytes! */ CNAME(slbtrapend): /* * On entry: * SPRG1: SP * r1: pcpu * r2: LR * LR: branch address in trap region */ kern_slbtrap: std %r2,(PC_SLBSAVE+136)(%r1) /* old LR */ std %r3,(PC_SLBSAVE+24)(%r1) /* save R3 */ /* Check if this needs to be handled as a regular trap (userseg miss) */ mflr %r2 andi. %r2,%r2,0xff80 cmpwi %r2,EXC_DSE bne 1f mfdar %r2 b 2f 1: mfsrr0 %r2 2: /* r2 now contains the fault address */ lis %r3,SEGMENT_MASK@highesta ori %r3,%r3,SEGMENT_MASK@highera sldi %r3,%r3,32 oris %r3,%r3,SEGMENT_MASK@ha ori %r3,%r3,SEGMENT_MASK@l and %r2,%r2,%r3 /* R2 = segment base address */ lis %r3,USER_ADDR@highesta ori %r3,%r3,USER_ADDR@highera sldi %r3,%r3,32 oris %r3,%r3,USER_ADDR@ha ori %r3,%r3,USER_ADDR@l cmpd %r2,%r3 /* Compare fault base to USER_ADDR */ bne 3f /* User seg miss, handle as a regular trap */ ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */ mtcr %r2 ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */ ld %r3,(PC_SLBSAVE+24)(%r1) ld %r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */ mtsprg2 %r1 /* And then in SPRG2 */ li %r1, 0x80 /* How to get the vector from LR */ b generictrap /* Retain old LR using b */ 3: /* Real kernel SLB miss */ std %r0,(PC_SLBSAVE+0)(%r1) /* free all volatile regs */ mfsprg1 %r2 /* Old R1 */ std %r2,(PC_SLBSAVE+8)(%r1) /* R2,R3 already saved */ std %r4,(PC_SLBSAVE+32)(%r1) std %r5,(PC_SLBSAVE+40)(%r1) std %r6,(PC_SLBSAVE+48)(%r1) std %r7,(PC_SLBSAVE+56)(%r1) std %r8,(PC_SLBSAVE+64)(%r1) std %r9,(PC_SLBSAVE+72)(%r1) std %r10,(PC_SLBSAVE+80)(%r1) std %r11,(PC_SLBSAVE+88)(%r1) std %r12,(PC_SLBSAVE+96)(%r1) /* CR already saved */ mfxer %r2 /* save XER */ std %r2,(PC_SLBSAVE+112)(%r1) mflr %r2 /* save LR (SP already saved) */ std %r2,(PC_SLBSAVE+120)(%r1) mfctr %r2 /* save CTR */ std %r2,(PC_SLBSAVE+128)(%r1) /* Call handler */ addi %r1,%r1,PC_SLBSTACK-48+1024 li %r2,~15 and %r1,%r1,%r2 GET_TOCBASE(%r2) mflr %r3 andi. %r3,%r3,0xff80 mfdar %r4 mfsrr0 %r5 bl handle_kernel_slb_spill nop /* Save r28-31, restore r4-r12 */ GET_CPUINFO(%r1) ld %r4,(PC_SLBSAVE+32)(%r1) ld %r5,(PC_SLBSAVE+40)(%r1) ld %r6,(PC_SLBSAVE+48)(%r1) ld %r7,(PC_SLBSAVE+56)(%r1) ld %r8,(PC_SLBSAVE+64)(%r1) ld %r9,(PC_SLBSAVE+72)(%r1) ld %r10,(PC_SLBSAVE+80)(%r1) ld %r11,(PC_SLBSAVE+88)(%r1) ld %r12,(PC_SLBSAVE+96)(%r1) std %r28,(PC_SLBSAVE+64)(%r1) std %r29,(PC_SLBSAVE+72)(%r1) std %r30,(PC_SLBSAVE+80)(%r1) std %r31,(PC_SLBSAVE+88)(%r1) /* Restore kernel mapping */ bl restore_kernsrs /* Restore remaining registers */ ld %r28,(PC_SLBSAVE+64)(%r1) ld %r29,(PC_SLBSAVE+72)(%r1) ld %r30,(PC_SLBSAVE+80)(%r1) ld %r31,(PC_SLBSAVE+88)(%r1) ld %r2,(PC_SLBSAVE+104)(%r1) mtcr %r2 ld %r2,(PC_SLBSAVE+112)(%r1) mtxer %r2 ld %r2,(PC_SLBSAVE+120)(%r1) mtlr %r2 ld %r2,(PC_SLBSAVE+128)(%r1) mtctr %r2 ld %r2,(PC_SLBSAVE+136)(%r1) mtlr %r2 /* Restore r0-r3 */ ld %r0,(PC_SLBSAVE+0)(%r1) ld %r2,(PC_SLBSAVE+16)(%r1) ld %r3,(PC_SLBSAVE+24)(%r1) mfsprg1 %r1 /* Back to whatever we were doing */ rfid /* * For ALI: has to save DSISR and DAR */ .globl CNAME(alitrap),CNAME(aliend) CNAME(alitrap): mtsprg1 %r1 /* save SP */ GET_CPUINFO(%r1) std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */ std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) mfdar %r30 mfdsisr %r31 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) mfsprg1 %r1 /* restore SP, in case of branch */ mflr %r28 /* save LR */ mfcr %r29 /* save CR */ ld %r31,TRAP_GENTRAP(0) addi %r31,%r31,(s_trap - generictrap) mtlr %r31 /* Put our exception vector in SPRG3 */ li %r31, EXC_ALI mtsprg3 %r31 /* Test whether we already had PR set */ mfsrr1 %r31 mtcr %r31 blrl /* Branch to s_trap */ CNAME(aliend): /* * Similar to the above for DSI * Has to handle standard pagetable spills */ .globl CNAME(dsitrap),CNAME(dsiend) .p2align 3 CNAME(dsitrap): mtsprg1 %r1 /* save SP */ GET_CPUINFO(%r1) std %r27,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */ std %r28,(PC_DISISAVE+CPUSAVE_R28)(%r1) std %r29,(PC_DISISAVE+CPUSAVE_R29)(%r1) std %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) std %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) mfcr %r29 /* save CR */ mfxer %r30 /* save XER */ mtsprg2 %r30 /* in SPRG2 */ mfsrr1 %r31 /* test kernel mode */ mtcr %r31 mflr %r28 /* save LR (SP already saved) */ ld %r1,TRAP_GENTRAP(0) addi %r1,%r1,(disitrap-generictrap) mtlr %r1 blrl /* Branch to disitrap */ CNAME(dsiend): /* * Preamble code for DSI/ISI traps */ disitrap: /* Write the trap vector to SPRG3 by computing LR & 0xff00 */ mflr %r1 andi. %r1,%r1,0xff00 mtsprg3 %r1 GET_CPUINFO(%r1) ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) std %r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) std %r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) std %r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) mfdar %r30 mfdsisr %r31 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) #ifdef KDB /* Try to detect a kernel stack overflow */ mfsrr1 %r31 mtcr %r31 bt 17,realtrap /* branch is user mode */ mfsprg1 %r31 /* get old SP */ clrrdi %r31,%r31,12 /* Round SP down to nearest page */ sub. %r30,%r31,%r30 /* SP - DAR */ bge 1f neg %r30,%r30 /* modulo value */ 1: cmpldi %cr0,%r30,4096 /* is DAR within a page of SP? */ bge %cr0,realtrap /* no, too far away. */ /* Now convert this DSI into a DDB trap. */ GET_CPUINFO(%r1) ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */ std %r30,(PC_DBSAVE +CPUSAVE_AIM_DAR)(%r1) /* save DAR */ ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */ std %r30,(PC_DBSAVE +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */ ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get r27 */ std %r31,(PC_DBSAVE +CPUSAVE_R27)(%r1) /* save r27 */ ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get r28 */ std %r30,(PC_DBSAVE +CPUSAVE_R28)(%r1) /* save r28 */ ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get r29 */ std %r31,(PC_DBSAVE +CPUSAVE_R29)(%r1) /* save r29 */ ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get r30 */ std %r30,(PC_DBSAVE +CPUSAVE_R30)(%r1) /* save r30 */ ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get r31 */ std %r31,(PC_DBSAVE +CPUSAVE_R31)(%r1) /* save r31 */ b dbtrap #endif /* XXX need stack probe here */ realtrap: /* Test whether we already had PR set */ mfsrr1 %r1 mtcr %r1 mfsprg1 %r1 /* restore SP (might have been overwritten) */ bf 17,k_trap /* branch if PSL_PR is false */ GET_CPUINFO(%r1) ld %r1,PC_CURPCB(%r1) mr %r27,%r28 /* Save LR, r29 */ mtsprg2 %r29 bl restore_kernsrs /* enable kernel mapping */ mfsprg2 %r29 mr %r28,%r27 b s_trap /* * generictrap does some standard setup for trap handling to minimize * the code that need be installed in the actual vectors. It expects * the following conditions. * * R1 - Trap vector = LR & (0xff00 | R1) * SPRG1 - Original R1 contents * SPRG2 - Original LR */ generichypertrap: mtsprg3 %r1 mfspr %r1, SPR_HSRR0 mtsrr0 %r1 mfspr %r1, SPR_HSRR1 mtsrr1 %r1 mfsprg3 %r1 .globl CNAME(generictrap) generictrap: /* Save R1 for computing the exception vector */ mtsprg3 %r1 /* Save interesting registers */ GET_CPUINFO(%r1) std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */ std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) mfdar %r30 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) mfdsisr %r30 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) mfsprg1 %r1 /* restore SP, in case of branch */ mfsprg2 %r28 /* save LR */ mfcr %r29 /* save CR */ /* Compute the exception vector from the link register */ mfsprg3 %r31 ori %r31,%r31,0xff00 mflr %r30 addi %r30,%r30,-4 /* The branch instruction, not the next */ and %r30,%r30,%r31 mtsprg3 %r30 /* Test whether we already had PR set */ mfsrr1 %r31 mtcr %r31 s_trap: bf 17,k_trap /* branch if PSL_PR is false */ GET_CPUINFO(%r1) u_trap: ld %r1,PC_CURPCB(%r1) mr %r27,%r28 /* Save LR, r29 */ mtsprg2 %r29 bl restore_kernsrs /* enable kernel mapping */ mfsprg2 %r29 mr %r28,%r27 /* * Now the common trap catching code. */ k_trap: FRAME_SETUP(PC_TEMPSAVE) /* Call C interrupt dispatcher: */ trapagain: GET_TOCBASE(%r2) addi %r3,%r1,48 bl CNAME(powerpc_interrupt) nop .globl CNAME(trapexit) /* backtrace code sentinel */ CNAME(trapexit): /* Disable interrupts: */ mfmsr %r3 andi. %r3,%r3,~PSL_EE@l mtmsr %r3 isync /* Test AST pending: */ ld %r5,FRAME_SRR1+48(%r1) mtcr %r5 bf 17,1f /* branch if PSL_PR is false */ GET_CPUINFO(%r3) /* get per-CPU pointer */ lwz %r4, TD_FLAGS(%r13) /* get thread flags value */ lis %r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h ori %r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l and. %r4,%r4,%r5 beq 1f mfmsr %r3 /* re-enable interrupts */ ori %r3,%r3,PSL_EE@l mtmsr %r3 isync GET_TOCBASE(%r2) addi %r3,%r1,48 bl CNAME(ast) nop .globl CNAME(asttrapexit) /* backtrace code sentinel #2 */ CNAME(asttrapexit): b trapexit /* test ast ret value ? */ 1: FRAME_LEAVE(PC_TEMPSAVE) rfid #if defined(KDB) /* * Deliberate entry to dbtrap */ ASENTRY_NOPROF(breakpoint) mtsprg1 %r1 mfmsr %r3 mtsrr1 %r3 andi. %r3,%r3,~(PSL_EE|PSL_ME)@l mtmsr %r3 /* disable interrupts */ isync GET_CPUINFO(%r3) std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r3) std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r3) std %r29,(PC_DBSAVE+CPUSAVE_R29)(%r3) std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r3) std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r3) mflr %r28 li %r29,EXC_BPT mtlr %r29 mfcr %r29 mtsrr0 %r28 /* * Now the kdb trap catching code. */ dbtrap: /* Write the trap vector to SPRG3 by computing LR & 0xff00 */ mflr %r1 andi. %r1,%r1,0xff00 mtsprg3 %r1 GET_TOCBASE(%r1) /* get new SP */ addis %r1,%r1,TOC_REF(trapstk)@ha ld %r1,TOC_REF(trapstk)@l(%r1) addi %r1,%r1,(TRAPSTKSZ-48) FRAME_SETUP(PC_DBSAVE) /* Call C trap code: */ GET_TOCBASE(%r2) addi %r3,%r1,48 bl CNAME(db_trap_glue) nop or. %r3,%r3,%r3 bne dbleave /* This wasn't for KDB, so switch to real trap: */ ld %r3,FRAME_EXC+48(%r1) /* save exception */ GET_CPUINFO(%r4) std %r3,(PC_DBSAVE+CPUSAVE_R31)(%r4) FRAME_LEAVE(PC_DBSAVE) mtsprg1 %r1 /* prepare for entrance to realtrap */ GET_CPUINFO(%r1) std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1) std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1) std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1) mflr %r28 mfcr %r29 ld %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) mtsprg3 %r31 /* SPRG3 was clobbered by FRAME_LEAVE */ mfsprg1 %r1 b realtrap dbleave: FRAME_LEAVE(PC_DBSAVE) rfid +ASEND(breakpoint) /* * In case of KDB we want a separate trap catcher for it */ .globl CNAME(dblow),CNAME(dbend) .p2align 3 CNAME(dblow): mtsprg1 %r1 /* save SP */ mtsprg2 %r29 /* save r29 */ mfcr %r29 /* save CR in r29 */ mfsrr1 %r1 mtcr %r1 bf 17,1f /* branch if privileged */ /* Unprivileged case */ mtcr %r29 /* put the condition register back */ mfsprg2 %r29 /* ... and r29 */ mflr %r1 /* save LR */ mtsprg2 %r1 /* And then in SPRG2 */ ld %r1, TRAP_ENTRY(0) /* Get branch address */ mtlr %r1 li %r1, 0 /* How to get the vector from LR */ blrl /* Branch to generictrap */ /* No fallthrough */ 1: GET_CPUINFO(%r1) std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r1) /* free r27 */ std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */ mfsprg2 %r28 /* r29 holds cr... */ std %r28,(PC_DBSAVE+CPUSAVE_R29)(%r1) /* free r29 */ std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r1) /* free r30 */ std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) /* free r31 */ mflr %r28 /* save LR */ ld %r1,TRAP_GENTRAP(0) addi %r1,%r1,(dbtrap-generictrap) mtlr %r1 blrl /* Branch to dbtrap */ CNAME(dbend): #endif /* KDB */ Index: head/sys/powerpc/booke/locore.S =================================================================== --- head/sys/powerpc/booke/locore.S (revision 368353) +++ head/sys/powerpc/booke/locore.S (revision 368354) @@ -1,969 +1,979 @@ /*- * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "assym.inc" #include "opt_hwpmc_hooks.h" #include #include #include #include #include #include #include #include #ifdef _CALL_ELF .abiversion _CALL_ELF #endif #define TMPSTACKSZ 16384 #ifdef __powerpc64__ #define GET_TOCBASE(r) \ mfspr r, SPR_SPRG8 #define TOC_RESTORE nop #define CMPI cmpdi #define CMPL cmpld #define LOAD ld #define LOADX ldarx #define STORE std #define STOREX stdcx. #define STU stdu #define CALLSIZE 48 #define REDZONE 288 #define THREAD_REG %r13 #define ADDR(x) \ .llong x #define WORD_SIZE 8 #else #define GET_TOCBASE(r) #define TOC_RESTORE #define CMPI cmpwi #define CMPL cmplw #define LOAD lwz #define LOADX lwarx #define STOREX stwcx. #define STORE stw #define STU stwu #define CALLSIZE 8 #define REDZONE 0 #define THREAD_REG %r2 #define ADDR(x) \ .long x #define WORD_SIZE 4 #endif #ifdef __powerpc64__ /* Placate lld by creating a kboot stub. */ .section ".text.kboot", "x", @progbits b __start #endif .text .globl btext btext: /* * This symbol is here for the benefit of kvm_mkdb, and is supposed to * mark the start of kernel text. */ .globl kernel_text kernel_text: /* * Startup entry. Note, this must be the first thing in the text segment! */ .text .globl __start __start: /* * Assumptions on the boot loader: * - System memory starts from physical address 0 * - It's mapped by a single TLB1 entry * - TLB1 mapping is 1:1 pa to va * - Kernel is loaded at 64MB boundary * - All PID registers are set to the same value * - CPU is running in AS=0 * * Registers contents provided by the loader(8): * r1 : stack pointer * r3 : metadata pointer * * We rearrange the TLB1 layout as follows: * - Find TLB1 entry we started in * - Make sure it's protected, invalidate other entries * - Create temp entry in the second AS (make sure it's not TLB[1]) * - Switch to temp mapping * - Map 64MB of RAM in TLB1[1] * - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address * - Switch to TLB1[1] mapping * - Invalidate temp mapping * * locore registers use: * r1 : stack pointer * r2 : trace pointer (AP only, for early diagnostics) * r3-r27 : scratch registers * r28 : temp TLB1 entry * r29 : initial TLB1 entry we started in * r30-r31 : arguments (metadata pointer) */ /* * Keep arguments in r30 & r31 for later use. */ mr %r30, %r3 mr %r31, %r4 /* * Initial cleanup */ li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */ #ifdef __powerpc64__ oris %r3, %r3, PSL_CM@h #endif mtmsr %r3 isync /* * Initial HIDs configuration */ 1: mfpvr %r3 rlwinm %r3, %r3, 16, 16, 31 lis %r4, HID0_E500_DEFAULT_SET@h ori %r4, %r4, HID0_E500_DEFAULT_SET@l /* Check for e500mc and e5500 */ cmpli 0, 0, %r3, FSL_E500mc bne 2f lis %r4, HID0_E500MC_DEFAULT_SET@h ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l b 3f 2: cmpli 0, 0, %r3, FSL_E5500 bne 3f lis %r4, HID0_E5500_DEFAULT_SET@h ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 3: mtspr SPR_HID0, %r4 isync /* * E500mc and E5500 do not have HID1 register, so skip HID1 setup on * this core. */ cmpli 0, 0, %r3, FSL_E500mc beq 1f cmpli 0, 0, %r3, FSL_E5500 beq 1f cmpli 0, 0, %r3, FSL_E6500 beq 1f lis %r3, HID1_E500_DEFAULT_SET@h ori %r3, %r3, HID1_E500_DEFAULT_SET@l mtspr SPR_HID1, %r3 isync 1: /* Invalidate all entries in TLB0 */ li %r3, 0 bl tlb_inval_all cmpwi %r30, 0 beq done_mapping /* * Locate the TLB1 entry that maps this code */ bl 1f 1: mflr %r3 bl tlb1_find_current /* the entry found is returned in r29 */ bl tlb1_inval_all_but_current /* * Create temporary mapping in AS=1 and switch to it */ bl tlb1_temp_mapping_as1 mfmsr %r3 ori %r3, %r3, (PSL_IS | PSL_DS) bl 2f 2: mflr %r4 addi %r4, %r4, (3f - 2b) mtspr SPR_SRR0, %r4 mtspr SPR_SRR1, %r3 rfi /* Switch context */ /* * Invalidate initial entry */ 3: mr %r3, %r29 bl tlb1_inval_entry /* * Setup final mapping in TLB1[1] and switch to it */ /* Final kernel mapping, map in 64 MB of RAM */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ li %r4, 0 /* Entry 0 */ rlwimi %r3, %r4, 16, 10, 15 mtspr SPR_MAS0, %r3 isync li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ isync LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS) ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ mtspr SPR_MAS2, %r3 isync /* Discover phys load address */ bl 3f 3: mflr %r4 /* Use current address */ rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */ ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l mtspr SPR_MAS3, %r4 /* Set RPN and protection */ isync li %r4, 0 mtspr SPR_MAS7, %r4 isync tlbwe isync msync /* Switch to the above TLB1[1] mapping */ bl 4f 4: mflr %r4 #ifdef __powerpc64__ clrldi %r4, %r4, 38 clrrdi %r3, %r3, 12 #else rlwinm %r4, %r4, 0, 6, 31 /* Current offset from kernel load address */ rlwinm %r3, %r3, 0, 0, 19 #endif add %r4, %r4, %r3 /* Convert to kernel virtual address */ addi %r4, %r4, (5f - 4b) li %r3, PSL_DE /* Note AS=0 */ #ifdef __powerpc64__ oris %r3, %r3, PSL_CM@h #endif mtspr SPR_SRR0, %r4 mtspr SPR_SRR1, %r3 rfi /* * Invalidate temp mapping */ 5: mr %r3, %r28 bl tlb1_inval_entry done_mapping: #ifdef __powerpc64__ /* Set up the TOC pointer */ b 0f .align 3 0: nop bl 1f .llong __tocbase + 0x8000 - . 1: mflr %r2 ld %r1,0(%r2) add %r2,%r1,%r2 mtspr SPR_SPRG8, %r2 nop /* Get load offset */ ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */ subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */ /* Set up the stack pointer */ bl 1f .llong tmpstack + TMPSTACKSZ - 96 - . 1: mflr %r3 ld %r1,0(%r3) add %r1,%r1,%r3 /* * Relocate kernel */ bl 1f .llong _DYNAMIC-. 1: mflr %r3 ld %r4,0(%r3) add %r3,%r4,%r3 mr %r4,%r31 #else /* * Setup a temporary stack */ bl 1f .long tmpstack-. 1: mflr %r1 lwz %r2,0(%r1) add %r1,%r1,%r2 addi %r1, %r1, (TMPSTACKSZ - 16) /* * Relocate kernel */ bl 1f .long _DYNAMIC-. .long _GLOBAL_OFFSET_TABLE_-. 1: mflr %r5 lwz %r3,0(%r5) /* _DYNAMIC in %r3 */ add %r3,%r3,%r5 lwz %r4,4(%r5) /* GOT pointer */ add %r4,%r4,%r5 lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */ subf %r4,%r4,%r3 /* subtract to calculate relocbase */ #endif bl CNAME(elf_reloc_self) TOC_RESTORE /* * Initialise exception vector offsets */ bl CNAME(ivor_setup) TOC_RESTORE /* * Set up arguments and jump to system initialization code */ mr %r3, %r30 mr %r4, %r31 /* Prepare core */ bl CNAME(booke_init) TOC_RESTORE /* Switch to thread0.td_kstack now */ mr %r1, %r3 li %r3, 0 STORE %r3, 0(%r1) /* Machine independet part, does not return */ bl CNAME(mi_startup) TOC_RESTORE /* NOT REACHED */ 5: b 5b #ifdef SMP /************************************************************************/ /* AP Boot page */ /************************************************************************/ .text .globl __boot_page .align 12 __boot_page: /* * The boot page is a special page of memory used during AP bringup. * Before the AP comes out of reset, the physical 4K page holding this * code is arranged to be mapped at 0xfffff000 by use of * platform-dependent registers. * * Alternatively, this page may be executed using an ePAPR-standardized * method -- writing to the address specified in "cpu-release-addr". * * In either case, execution begins at the last instruction of the * page, which is a branch back to the start of the page. * * The code in the page must do initial MMU setup and normalize the * TLBs for regular operation in the correct address space before * reading outside the page. * * This implementation accomplishes this by: * 1) Wiping TLB0 and all TLB1 entries but the one currently in use. * 2) Establishing a temporary 4K TLB1 mapping in AS=1, and switching * to it with rfi. This entry must NOT be in TLB1 slot 0. * (This is needed to give the code freedom to clean up AS=0.) * 3) Removing the initial TLB1 entry, leaving us with a single valid * TLB1 entry, NOT in slot 0. * 4) Installing an AS0 entry in TLB1 slot 0 mapping the 64MB kernel * segment at its final virtual address. A second rfi is done to * switch to the final address space. At this point we can finally * access the rest of the kernel segment safely. * 5) The temporary TLB1 AS=1 entry is removed, finally leaving us in * a consistent (but minimal) state. * 6) Set up TOC, stack, and pcpu registers. * 7) Now that we can finally call C code, call pmap_boostrap_ap(), * which finishes copying in the shared TLB1 entries. * * At this point, the MMU is fully set up, and we can proceed with * running the actual AP bootstrap code. * * Pieces of this code are also used for UP kernel, but in this case * the sections specific to boot page functionality are dropped by * the preprocessor. */ #ifdef __powerpc64__ nop /* PPC64 alignment word. 64-bit target. */ #endif bl 1f /* 32-bit target. */ .globl bp_trace bp_trace: ADDR(0) /* Trace pointer (%r31). */ .globl bp_kernload bp_kernload: .llong 0 /* Kern phys. load address. */ .globl bp_virtaddr bp_virtaddr: ADDR(0) /* Virt. address of __boot_page. */ /* * Initial configuration */ 1: mflr %r31 /* r31 hold the address of bp_trace */ /* Set HIDs */ mfpvr %r3 rlwinm %r3, %r3, 16, 16, 31 /* HID0 for E500 is default */ lis %r4, HID0_E500_DEFAULT_SET@h ori %r4, %r4, HID0_E500_DEFAULT_SET@l cmpli 0, 0, %r3, FSL_E500mc bne 2f lis %r4, HID0_E500MC_DEFAULT_SET@h ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l b 3f 2: cmpli 0, 0, %r3, FSL_E5500 bne 3f lis %r4, HID0_E5500_DEFAULT_SET@h ori %r4, %r4, HID0_E5500_DEFAULT_SET@l 3: mtspr SPR_HID0, %r4 isync /* Enable branch prediction */ li %r3, BUCSR_BPEN mtspr SPR_BUCSR, %r3 isync /* Invalidate all entries in TLB0 */ li %r3, 0 bl tlb_inval_all /* * Find TLB1 entry which is translating us now */ bl 2f 2: mflr %r3 bl tlb1_find_current /* the entry number found is in r29 */ bl tlb1_inval_all_but_current /* * Create temporary translation in AS=1 and switch to it */ bl tlb1_temp_mapping_as1 mfmsr %r3 ori %r3, %r3, (PSL_IS | PSL_DS) #ifdef __powerpc64__ oris %r3, %r3, PSL_CM@h /* Ensure we're in 64-bit after RFI */ #endif bl 3f 3: mflr %r4 addi %r4, %r4, (4f - 3b) mtspr SPR_SRR0, %r4 mtspr SPR_SRR1, %r3 rfi /* Switch context */ /* * Invalidate initial entry */ 4: mr %r3, %r29 bl tlb1_inval_entry /* * Setup final mapping in TLB1[0] and switch to it */ /* Final kernel mapping, map in 64 MB of RAM */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ li %r4, 0 /* Entry 0 */ rlwimi %r3, %r4, 16, 4, 15 mtspr SPR_MAS0, %r3 isync li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */ isync LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS) ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */ mtspr SPR_MAS2, %r3 isync /* Retrieve kernel load [physical] address from bp_kernload */ 5: mflr %r3 #ifdef __powerpc64__ clrrdi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */ #else clrrwi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */ #endif /* Load lower half of the kernel loadaddr. */ lwz %r4, (bp_kernload - __boot_page + 4)(%r3) LOAD %r5, (bp_virtaddr - __boot_page)(%r3) /* Set RPN and protection */ ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l mtspr SPR_MAS3, %r4 isync lwz %r4, (bp_kernload - __boot_page)(%r3) mtspr SPR_MAS7, %r4 isync tlbwe isync msync /* Switch to the final mapping */ bl 6f 6: mflr %r3 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */ add %r3, %r3, %r5 /* Make this a virtual address */ addi %r3, %r3, (7f - 6b) /* And figure out return address. */ #ifdef __powerpc64__ lis %r4, PSL_CM@h /* Note AS=0 */ #else li %r4, 0 /* Note AS=0 */ #endif mtspr SPR_SRR0, %r3 mtspr SPR_SRR1, %r4 rfi 7: /* * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and * beyond so it's allowed to directly access all locations the kernel was linked * against. */ /* * Invalidate temp mapping */ mr %r3, %r28 bl tlb1_inval_entry #ifdef __powerpc64__ /* Set up the TOC pointer */ b 0f .align 3 0: nop bl 1f .llong __tocbase + 0x8000 - . 1: mflr %r2 ld %r1,0(%r2) add %r2,%r1,%r2 mtspr SPR_SPRG8, %r2 /* Set up the stack pointer */ addis %r1,%r2,TOC_REF(tmpstack)@ha ld %r1,TOC_REF(tmpstack)@l(%r1) addi %r1,%r1,TMPSTACKSZ-96 #else /* * Setup a temporary stack */ bl 1f .long tmpstack-. 1: mflr %r1 lwz %r2,0(%r1) add %r1,%r1,%r2 stw %r1, 0(%r1) addi %r1, %r1, (TMPSTACKSZ - 16) #endif /* * Initialise exception vector offsets */ bl CNAME(ivor_setup) TOC_RESTORE /* * Assign our pcpu instance */ bl 1f .long ap_pcpu-. 1: mflr %r4 lwz %r3, 0(%r4) add %r3, %r3, %r4 LOAD %r3, 0(%r3) mtsprg0 %r3 bl CNAME(pmap_bootstrap_ap) TOC_RESTORE bl CNAME(cpudep_ap_bootstrap) TOC_RESTORE /* Switch to the idle thread's kstack */ mr %r1, %r3 bl CNAME(machdep_ap_bootstrap) TOC_RESTORE /* NOT REACHED */ 6: b 6b #endif /* SMP */ #if defined (BOOKE_E500) /* * Invalidate all entries in the given TLB. * * r3 TLBSEL */ tlb_inval_all: rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */ ori %r3, %r3, (1 << 2) /* INVALL */ tlbivax 0, %r3 isync msync tlbsync msync blr /* * expects address to look up in r3, returns entry number in r29 * * FIXME: the hidden assumption is we are now running in AS=0, but we should * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS] */ tlb1_find_current: mfspr %r17, SPR_PID0 slwi %r17, %r17, MAS6_SPID0_SHIFT mtspr SPR_MAS6, %r17 isync tlbsx 0, %r3 mfspr %r17, SPR_MAS0 rlwinm %r29, %r17, 16, 26, 31 /* MAS0[ESEL] -> r29 */ /* Make sure we have IPROT set on the entry */ mfspr %r17, SPR_MAS1 oris %r17, %r17, MAS1_IPROT@h mtspr SPR_MAS1, %r17 isync tlbwe isync msync blr /* * Invalidates a single entry in TLB1. * * r3 ESEL * r4-r5 scratched */ tlb1_inval_entry: lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */ rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */ mtspr SPR_MAS0, %r4 isync tlbre li %r5, 0 /* MAS1[V] = 0 */ mtspr SPR_MAS1, %r5 isync tlbwe isync msync blr /* * r29 current entry number * r28 returned temp entry * r3-r5 scratched */ tlb1_temp_mapping_as1: /* Read our current translation */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */ mtspr SPR_MAS0, %r3 isync tlbre /* * Prepare and write temp entry * * FIXME this is not robust against overflow i.e. when the current * entry is the last in TLB1 */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ addi %r28, %r29, 1 /* Use next entry. */ rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */ mtspr SPR_MAS0, %r3 isync mfspr %r5, SPR_MAS1 li %r4, 1 /* AS=1 */ rlwimi %r5, %r4, 12, 19, 19 li %r4, 0 /* Global mapping, TID=0 */ rlwimi %r5, %r4, 16, 8, 15 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h mtspr SPR_MAS1, %r5 isync mflr %r3 li %r4, 0 mtspr SPR_MAS7, %r4 mtlr %r3 isync tlbwe isync msync blr /* * Loops over TLB1, invalidates all entries skipping the one which currently * maps this code. * * r29 current entry * r3-r5 scratched */ tlb1_inval_all_but_current: mfspr %r3, SPR_TLB1CFG /* Get number of entries */ andi. %r3, %r3, TLBCFG_NENTRY_MASK@l li %r4, 0 /* Start from Entry 0 */ 1: lis %r5, MAS0_TLBSEL1@h rlwimi %r5, %r4, 16, 10, 15 mtspr SPR_MAS0, %r5 isync tlbre mfspr %r5, SPR_MAS1 cmpw %r4, %r29 /* our current entry? */ beq 2f rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */ mtspr SPR_MAS1, %r5 isync tlbwe isync msync 2: addi %r4, %r4, 1 cmpw %r4, %r3 /* Check if this is the last entry */ bne 1b blr #endif #ifdef SMP .globl __boot_tlb1 /* * The __boot_tlb1 table is used to hold BSP TLB1 entries * marked with _TLB_ENTRY_SHARED flag during AP bootstrap. * The BSP fills in the table in tlb_ap_prep() function. Next, * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap(). */ __boot_tlb1: .space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE __boot_page_padding: /* * Boot page needs to be exactly 4K, with the last word of this page * acting as the reset vector, so we need to stuff the remainder. * Upon release from holdoff CPU fetches the last word of the boot * page. */ .space 4092 - (__boot_page_padding - __boot_page) b __boot_page /* * This is the end of the boot page. * During AP startup, the previous instruction is at 0xfffffffc * virtual (i.e. the reset vector.) */ #endif /* SMP */ /************************************************************************/ /* locore subroutines */ /************************************************************************/ /* * Cache disable/enable/inval sequences according * to section 2.16 of E500CORE RM. */ ENTRY(dcache_inval) /* Invalidate d-cache */ mfspr %r3, SPR_L1CSR0 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l msync isync mtspr SPR_L1CSR0, %r3 isync 1: mfspr %r3, SPR_L1CSR0 andi. %r3, %r3, L1CSR0_DCFI bne 1b blr +END(dcache_inval) ENTRY(dcache_disable) /* Disable d-cache */ mfspr %r3, SPR_L1CSR0 li %r4, L1CSR0_DCE@l not %r4, %r4 and %r3, %r3, %r4 msync isync mtspr SPR_L1CSR0, %r3 isync blr +END(dcache_disable) ENTRY(dcache_enable) /* Enable d-cache */ mfspr %r3, SPR_L1CSR0 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l msync isync mtspr SPR_L1CSR0, %r3 isync blr +END(dcache_enable) ENTRY(icache_inval) /* Invalidate i-cache */ mfspr %r3, SPR_L1CSR1 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l isync mtspr SPR_L1CSR1, %r3 isync 1: mfspr %r3, SPR_L1CSR1 andi. %r3, %r3, L1CSR1_ICFI bne 1b blr +END(icache_inval) ENTRY(icache_disable) /* Disable i-cache */ mfspr %r3, SPR_L1CSR1 li %r4, L1CSR1_ICE@l not %r4, %r4 and %r3, %r3, %r4 isync mtspr SPR_L1CSR1, %r3 isync blr +END(icache_disable) ENTRY(icache_enable) /* Enable i-cache */ mfspr %r3, SPR_L1CSR1 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l isync mtspr SPR_L1CSR1, %r3 isync blr +END(icache_enable) /* * L2 cache disable/enable/inval sequences for E500mc. */ ENTRY(l2cache_inval) mfspr %r3, SPR_L2CSR0 oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l isync mtspr SPR_L2CSR0, %r3 isync 1: mfspr %r3, SPR_L2CSR0 andis. %r3, %r3, L2CSR0_L2FI@h bne 1b blr +END(l2cache_inval) ENTRY(l2cache_enable) mfspr %r3, SPR_L2CSR0 oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h isync mtspr SPR_L2CSR0, %r3 isync blr +END(l2cache_enable) /* * Branch predictor setup. */ ENTRY(bpred_enable) mfspr %r3, SPR_BUCSR ori %r3, %r3, BUCSR_BBFI isync mtspr SPR_BUCSR, %r3 isync ori %r3, %r3, BUCSR_BPEN isync mtspr SPR_BUCSR, %r3 isync blr +END(bpred_enable) /* * XXX: This should be moved to a shared AIM/booke asm file, if one ever is * created. */ ENTRY(get_spr) /* Note: The spr number is patched at runtime */ mfspr %r3, 0 blr +END(get_spr) /************************************************************************/ /* Data section */ /************************************************************************/ .data .align 3 GLOBAL(__startkernel) ADDR(begin) GLOBAL(__endkernel) ADDR(end) .align 4 tmpstack: .space TMPSTACKSZ tmpstackbound: .space 10240 /* XXX: this really should not be necessary */ #ifdef __powerpc64__ TOC_ENTRY(tmpstack) #ifdef SMP TOC_ENTRY(bp_kernload) #endif #endif /* * Compiled KERNBASE locations */ .globl kernbase .set kernbase, KERNBASE #include Index: head/sys/powerpc/booke/trap_subr.S =================================================================== --- head/sys/powerpc/booke/trap_subr.S (revision 368353) +++ head/sys/powerpc/booke/trap_subr.S (revision 368354) @@ -1,1134 +1,1137 @@ /*- * Copyright (C) 2006-2009 Semihalf, Rafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz * Copyright (C) 2006 Juniper Networks, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */ /* * NOTICE: This is not a standalone file. to use it, #include it in * your port's locore.S, like so: * * #include */ /* * SPRG usage notes * * SPRG0 - pcpu pointer * SPRG1 - all interrupts except TLB miss, critical, machine check * SPRG2 - critical * SPRG3 - machine check * SPRG4-6 - scratch * */ /* Get the per-CPU data structure */ #define GET_CPUINFO(r) mfsprg0 r #define RES_GRANULE 64 #define RES_LOCK 0 /* offset to the 'lock' word */ #ifdef __powerpc64__ #define RES_RECURSE 8 /* offset to the 'recurse' word */ #else #define RES_RECURSE 4 /* offset to the 'recurse' word */ #endif /* * Standard interrupt prolog * * sprg_sp - SPRG{1-3} reg used to temporarily store the SP * savearea - temp save area (pc_{tempsave, disisave, critsave, mchksave}) * isrr0-1 - save restore registers with CPU state at interrupt time (may be * SRR0-1, CSRR0-1, MCSRR0-1 * * 1. saves in the given savearea: * - R30-31 * - DEAR, ESR * - xSRR0-1 * * 2. saves CR -> R30 * * 3. switches to kstack if needed * * 4. notes: * - R31 can be used as scratch register until a new frame is layed on * the stack with FRAME_SETUP * * - potential TLB miss: NO. Saveareas are always acessible via TLB1 * permanent entries, and within this prolog we do not dereference any * locations potentially not in the TLB */ #define STANDARD_PROLOG(sprg_sp, savearea, isrr0, isrr1) \ mtspr sprg_sp, %r1; /* Save SP */ \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ STORE %r30, (savearea+CPUSAVE_R30)(%r1); \ STORE %r31, (savearea+CPUSAVE_R31)(%r1); \ mfspr %r30, SPR_DEAR; \ mfspr %r31, SPR_ESR; \ STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \ STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \ mfspr %r30, isrr0; \ mfspr %r31, isrr1; /* MSR at interrupt time */ \ STORE %r30, (savearea+CPUSAVE_SRR0)(%r1); \ STORE %r31, (savearea+CPUSAVE_SRR1)(%r1); \ isync; \ mfspr %r1, sprg_sp; /* Restore SP */ \ mfcr %r30; /* Save CR */ \ /* switch to per-thread kstack if intr taken in user mode */ \ mtcr %r31; /* MSR at interrupt time */ \ bf 17, 1f; \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ LOAD %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \ 1: #define STANDARD_CRIT_PROLOG(sprg_sp, savearea, isrr0, isrr1) \ mtspr sprg_sp, %r1; /* Save SP */ \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ STORE %r30, (savearea+CPUSAVE_R30)(%r1); \ STORE %r31, (savearea+CPUSAVE_R31)(%r1); \ mfspr %r30, SPR_DEAR; \ mfspr %r31, SPR_ESR; \ STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \ STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \ mfspr %r30, isrr0; \ mfspr %r31, isrr1; /* MSR at interrupt time */ \ STORE %r30, (savearea+CPUSAVE_SRR0)(%r1); \ STORE %r31, (savearea+CPUSAVE_SRR1)(%r1); \ mfspr %r30, SPR_SRR0; \ mfspr %r31, SPR_SRR1; /* MSR at interrupt time */ \ STORE %r30, (savearea+BOOKE_CRITSAVE_SRR0)(%r1); \ STORE %r31, (savearea+BOOKE_CRITSAVE_SRR1)(%r1); \ isync; \ mfspr %r1, sprg_sp; /* Restore SP */ \ mfcr %r30; /* Save CR */ \ /* switch to per-thread kstack if intr taken in user mode */ \ mtcr %r31; /* MSR at interrupt time */ \ bf 17, 1f; \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ LOAD %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \ 1: /* * FRAME_SETUP assumes: * SPRG{1-3} SP at the time interrupt occured * savearea r30-r31, DEAR, ESR, xSRR0-1 * r30 CR * r31 scratch * r1 kernel stack * * sprg_sp - SPRG reg containing SP at the time interrupt occured * savearea - temp save * exc - exception number (EXC_xxx) * * 1. sets a new frame * 2. saves in the frame: * - R0, R1 (SP at the time of interrupt), R2, LR, CR * - R3-31 (R30-31 first restored from savearea) * - XER, CTR, DEAR, ESR (from savearea), xSRR0-1 * * Notes: * - potential TLB miss: YES, since we make dereferences to kstack, which * can happen not covered (we can have up to two DTLB misses if fortunate * enough i.e. when kstack crosses page boundary and both pages are * untranslated) */ #ifdef __powerpc64__ #define SAVE_REGS(r) \ std %r3, FRAME_3+CALLSIZE(r); \ std %r4, FRAME_4+CALLSIZE(r); \ std %r5, FRAME_5+CALLSIZE(r); \ std %r6, FRAME_6+CALLSIZE(r); \ std %r7, FRAME_7+CALLSIZE(r); \ std %r8, FRAME_8+CALLSIZE(r); \ std %r9, FRAME_9+CALLSIZE(r); \ std %r10, FRAME_10+CALLSIZE(r); \ std %r11, FRAME_11+CALLSIZE(r); \ std %r12, FRAME_12+CALLSIZE(r); \ std %r13, FRAME_13+CALLSIZE(r); \ std %r14, FRAME_14+CALLSIZE(r); \ std %r15, FRAME_15+CALLSIZE(r); \ std %r16, FRAME_16+CALLSIZE(r); \ std %r17, FRAME_17+CALLSIZE(r); \ std %r18, FRAME_18+CALLSIZE(r); \ std %r19, FRAME_19+CALLSIZE(r); \ std %r20, FRAME_20+CALLSIZE(r); \ std %r21, FRAME_21+CALLSIZE(r); \ std %r22, FRAME_22+CALLSIZE(r); \ std %r23, FRAME_23+CALLSIZE(r); \ std %r24, FRAME_24+CALLSIZE(r); \ std %r25, FRAME_25+CALLSIZE(r); \ std %r26, FRAME_26+CALLSIZE(r); \ std %r27, FRAME_27+CALLSIZE(r); \ std %r28, FRAME_28+CALLSIZE(r); \ std %r29, FRAME_29+CALLSIZE(r); \ std %r30, FRAME_30+CALLSIZE(r); \ std %r31, FRAME_31+CALLSIZE(r) #define LD_REGS(r) \ ld %r3, FRAME_3+CALLSIZE(r); \ ld %r4, FRAME_4+CALLSIZE(r); \ ld %r5, FRAME_5+CALLSIZE(r); \ ld %r6, FRAME_6+CALLSIZE(r); \ ld %r7, FRAME_7+CALLSIZE(r); \ ld %r8, FRAME_8+CALLSIZE(r); \ ld %r9, FRAME_9+CALLSIZE(r); \ ld %r10, FRAME_10+CALLSIZE(r); \ ld %r11, FRAME_11+CALLSIZE(r); \ ld %r12, FRAME_12+CALLSIZE(r); \ ld %r13, FRAME_13+CALLSIZE(r); \ ld %r14, FRAME_14+CALLSIZE(r); \ ld %r15, FRAME_15+CALLSIZE(r); \ ld %r16, FRAME_16+CALLSIZE(r); \ ld %r17, FRAME_17+CALLSIZE(r); \ ld %r18, FRAME_18+CALLSIZE(r); \ ld %r19, FRAME_19+CALLSIZE(r); \ ld %r20, FRAME_20+CALLSIZE(r); \ ld %r21, FRAME_21+CALLSIZE(r); \ ld %r22, FRAME_22+CALLSIZE(r); \ ld %r23, FRAME_23+CALLSIZE(r); \ ld %r24, FRAME_24+CALLSIZE(r); \ ld %r25, FRAME_25+CALLSIZE(r); \ ld %r26, FRAME_26+CALLSIZE(r); \ ld %r27, FRAME_27+CALLSIZE(r); \ ld %r28, FRAME_28+CALLSIZE(r); \ ld %r29, FRAME_29+CALLSIZE(r); \ ld %r30, FRAME_30+CALLSIZE(r); \ ld %r31, FRAME_31+CALLSIZE(r) #else #define SAVE_REGS(r) \ stmw %r3, FRAME_3+CALLSIZE(r) #define LD_REGS(r) \ lmw %r3, FRAME_3+CALLSIZE(r) #endif #define FRAME_SETUP(sprg_sp, savearea, exc) \ mfspr %r31, sprg_sp; /* get saved SP */ \ /* establish a new stack frame and put everything on it */ \ STU %r31, -(FRAMELEN+REDZONE)(%r1); \ STORE %r0, FRAME_0+CALLSIZE(%r1); /* save r0 in the trapframe */ \ STORE %r31, FRAME_1+CALLSIZE(%r1); /* save SP " " */ \ STORE %r2, FRAME_2+CALLSIZE(%r1); /* save r2 " " */ \ mflr %r31; \ STORE %r31, FRAME_LR+CALLSIZE(%r1); /* save LR " " */ \ STORE %r30, FRAME_CR+CALLSIZE(%r1); /* save CR " " */ \ GET_CPUINFO(%r2); \ LOAD %r30, (savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \ LOAD %r31, (savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \ /* save R3-31 */ \ SAVE_REGS(%r1); \ /* save DEAR, ESR */ \ LOAD %r28, (savearea+CPUSAVE_BOOKE_DEAR)(%r2); \ LOAD %r29, (savearea+CPUSAVE_BOOKE_ESR)(%r2); \ STORE %r28, FRAME_BOOKE_DEAR+CALLSIZE(%r1); \ STORE %r29, FRAME_BOOKE_ESR+CALLSIZE(%r1); \ /* save XER, CTR, exc number */ \ mfxer %r3; \ mfctr %r4; \ STORE %r3, FRAME_XER+CALLSIZE(%r1); \ STORE %r4, FRAME_CTR+CALLSIZE(%r1); \ li %r5, exc; \ STORE %r5, FRAME_EXC+CALLSIZE(%r1); \ /* save DBCR0 */ \ mfspr %r3, SPR_DBCR0; \ STORE %r3, FRAME_BOOKE_DBCR0+CALLSIZE(%r1); \ /* save xSSR0-1 */ \ LOAD %r30, (savearea+CPUSAVE_SRR0)(%r2); \ LOAD %r31, (savearea+CPUSAVE_SRR1)(%r2); \ STORE %r30, FRAME_SRR0+CALLSIZE(%r1); \ STORE %r31, FRAME_SRR1+CALLSIZE(%r1); \ LOAD THREAD_REG, PC_CURTHREAD(%r2); \ /* * * isrr0-1 - save restore registers to restore CPU state to (may be * SRR0-1, CSRR0-1, MCSRR0-1 * * Notes: * - potential TLB miss: YES. The deref'd kstack may be not covered */ #define FRAME_LEAVE(isrr0, isrr1) \ wrteei 0; \ /* restore CTR, XER, LR, CR */ \ LOAD %r4, FRAME_CTR+CALLSIZE(%r1); \ LOAD %r5, FRAME_XER+CALLSIZE(%r1); \ LOAD %r6, FRAME_LR+CALLSIZE(%r1); \ LOAD %r7, FRAME_CR+CALLSIZE(%r1); \ mtctr %r4; \ mtxer %r5; \ mtlr %r6; \ mtcr %r7; \ /* restore DBCR0 */ \ LOAD %r4, FRAME_BOOKE_DBCR0+CALLSIZE(%r1); \ mtspr SPR_DBCR0, %r4; \ /* restore xSRR0-1 */ \ LOAD %r30, FRAME_SRR0+CALLSIZE(%r1); \ LOAD %r31, FRAME_SRR1+CALLSIZE(%r1); \ mtspr isrr0, %r30; \ mtspr isrr1, %r31; \ /* restore R2-31, SP */ \ LD_REGS(%r1); \ LOAD %r2, FRAME_2+CALLSIZE(%r1); \ LOAD %r0, FRAME_0+CALLSIZE(%r1); \ LOAD %r1, FRAME_1+CALLSIZE(%r1); \ isync /* * TLB miss prolog * * saves LR, CR, SRR0-1, R20-31 in the TLBSAVE area * * Notes: * - potential TLB miss: NO. It is crucial that we do not generate a TLB * miss within the TLB prolog itself! * - TLBSAVE is always translated */ #ifdef __powerpc64__ #define TLB_SAVE_REGS(br) \ std %r20, (TLBSAVE_BOOKE_R20)(br); \ std %r21, (TLBSAVE_BOOKE_R21)(br); \ std %r22, (TLBSAVE_BOOKE_R22)(br); \ std %r23, (TLBSAVE_BOOKE_R23)(br); \ std %r24, (TLBSAVE_BOOKE_R24)(br); \ std %r25, (TLBSAVE_BOOKE_R25)(br); \ std %r26, (TLBSAVE_BOOKE_R26)(br); \ std %r27, (TLBSAVE_BOOKE_R27)(br); \ std %r28, (TLBSAVE_BOOKE_R28)(br); \ std %r29, (TLBSAVE_BOOKE_R29)(br); \ std %r30, (TLBSAVE_BOOKE_R30)(br); \ std %r31, (TLBSAVE_BOOKE_R31)(br); #define TLB_RESTORE_REGS(br) \ ld %r20, (TLBSAVE_BOOKE_R20)(br); \ ld %r21, (TLBSAVE_BOOKE_R21)(br); \ ld %r22, (TLBSAVE_BOOKE_R22)(br); \ ld %r23, (TLBSAVE_BOOKE_R23)(br); \ ld %r24, (TLBSAVE_BOOKE_R24)(br); \ ld %r25, (TLBSAVE_BOOKE_R25)(br); \ ld %r26, (TLBSAVE_BOOKE_R26)(br); \ ld %r27, (TLBSAVE_BOOKE_R27)(br); \ ld %r28, (TLBSAVE_BOOKE_R28)(br); \ ld %r29, (TLBSAVE_BOOKE_R29)(br); \ ld %r30, (TLBSAVE_BOOKE_R30)(br); \ ld %r31, (TLBSAVE_BOOKE_R31)(br); #define TLB_NEST(outr,inr) \ rlwinm outr, inr, 7, 23, 24; /* 8 x TLBSAVE_LEN */ #else #define TLB_SAVE_REGS(br) \ stmw %r20, TLBSAVE_BOOKE_R20(br) #define TLB_RESTORE_REGS(br) \ lmw %r20, TLBSAVE_BOOKE_R20(br) #define TLB_NEST(outr,inr) \ rlwinm outr, inr, 6, 24, 25; /* 4 x TLBSAVE_LEN */ #endif #define TLB_PROLOG \ mtspr SPR_SPRG4, %r1; /* Save SP */ \ mtspr SPR_SPRG5, %r28; \ mtspr SPR_SPRG6, %r29; \ /* calculate TLB nesting level and TLBSAVE instance address */ \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \ TLB_NEST(%r29,%r28); \ addi %r28, %r28, 1; \ STORE %r28, PC_BOOKE_TLB_LEVEL(%r1); \ addi %r29, %r29, PC_BOOKE_TLBSAVE@l; \ add %r1, %r1, %r29; /* current TLBSAVE ptr */ \ \ /* save R20-31 */ \ mfspr %r28, SPR_SPRG5; \ mfspr %r29, SPR_SPRG6; \ TLB_SAVE_REGS(%r1); \ /* save LR, CR */ \ mflr %r30; \ mfcr %r31; \ STORE %r30, (TLBSAVE_BOOKE_LR)(%r1); \ STORE %r31, (TLBSAVE_BOOKE_CR)(%r1); \ /* save SRR0-1 */ \ mfsrr0 %r30; /* execution addr at interrupt time */ \ mfsrr1 %r31; /* MSR at interrupt time*/ \ STORE %r30, (TLBSAVE_BOOKE_SRR0)(%r1); /* save SRR0 */ \ STORE %r31, (TLBSAVE_BOOKE_SRR1)(%r1); /* save SRR1 */ \ isync; \ mfspr %r1, SPR_SPRG4 /* * restores LR, CR, SRR0-1, R20-31 from the TLBSAVE area * * same notes as for the TLB_PROLOG */ #define TLB_RESTORE \ mtspr SPR_SPRG4, %r1; /* Save SP */ \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ /* calculate TLB nesting level and TLBSAVE instance addr */ \ LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \ subi %r28, %r28, 1; \ STORE %r28, PC_BOOKE_TLB_LEVEL(%r1); \ TLB_NEST(%r29,%r28); \ addi %r29, %r29, PC_BOOKE_TLBSAVE@l; \ add %r1, %r1, %r29; \ \ /* restore LR, CR */ \ LOAD %r30, (TLBSAVE_BOOKE_LR)(%r1); \ LOAD %r31, (TLBSAVE_BOOKE_CR)(%r1); \ mtlr %r30; \ mtcr %r31; \ /* restore SRR0-1 */ \ LOAD %r30, (TLBSAVE_BOOKE_SRR0)(%r1); \ LOAD %r31, (TLBSAVE_BOOKE_SRR1)(%r1); \ mtsrr0 %r30; \ mtsrr1 %r31; \ /* restore R20-31 */ \ TLB_RESTORE_REGS(%r1); \ mfspr %r1, SPR_SPRG4 #ifdef SMP #define TLB_LOCK \ GET_CPUINFO(%r20); \ LOAD %r21, PC_CURTHREAD(%r20); \ LOAD %r22, PC_BOOKE_TLB_LOCK(%r20); \ \ 1: LOADX %r23, 0, %r22; \ CMPI %r23, TLB_UNLOCKED; \ beq 2f; \ \ /* check if this is recursion */ \ CMPL cr0, %r21, %r23; \ bne- 1b; \ \ 2: /* try to acquire lock */ \ STOREX %r21, 0, %r22; \ bne- 1b; \ \ /* got it, update recursion counter */ \ lwz %r21, RES_RECURSE(%r22); \ addi %r21, %r21, 1; \ stw %r21, RES_RECURSE(%r22); \ isync; \ msync #define TLB_UNLOCK \ GET_CPUINFO(%r20); \ LOAD %r21, PC_CURTHREAD(%r20); \ LOAD %r22, PC_BOOKE_TLB_LOCK(%r20); \ \ /* update recursion counter */ \ lwz %r23, RES_RECURSE(%r22); \ subi %r23, %r23, 1; \ stw %r23, RES_RECURSE(%r22); \ \ cmplwi %r23, 0; \ bne 1f; \ isync; \ msync; \ \ /* release the lock */ \ li %r23, TLB_UNLOCKED; \ STORE %r23, 0(%r22); \ 1: isync; \ msync #else #define TLB_LOCK #define TLB_UNLOCK #endif /* SMP */ #define INTERRUPT(label) \ .globl label; \ .align 5; \ CNAME(label): /* * Interrupt handling routines in BookE can be flexibly placed and do not have * to live in pre-defined vectors location. Note they need to be TLB-mapped at * all times in order to be able to handle exceptions. We thus arrange for * them to be part of kernel text which is always TLB-accessible. * * The interrupt handling routines have to be 16 bytes aligned: we align them * to 32 bytes (cache line length) which supposedly performs better. * */ .text .globl CNAME(interrupt_vector_base) .align 5 interrupt_vector_base: /***************************************************************************** * Catch-all handler to handle uninstalled IVORs ****************************************************************************/ INTERRUPT(int_unknown) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_RSVD) b trap_common /***************************************************************************** * Critical input interrupt ****************************************************************************/ INTERRUPT(int_critical_input) STANDARD_CRIT_PROLOG(SPR_SPRG2, PC_BOOKE_CRITSAVE, SPR_CSRR0, SPR_CSRR1) FRAME_SETUP(SPR_SPRG2, PC_BOOKE_CRITSAVE, EXC_CRIT) GET_TOCBASE(%r2) addi %r3, %r1, CALLSIZE bl CNAME(powerpc_interrupt) TOC_RESTORE FRAME_LEAVE(SPR_CSRR0, SPR_CSRR1) rfci /***************************************************************************** * Machine check interrupt ****************************************************************************/ INTERRUPT(int_machine_check) STANDARD_PROLOG(SPR_SPRG3, PC_BOOKE_MCHKSAVE, SPR_MCSRR0, SPR_MCSRR1) FRAME_SETUP(SPR_SPRG3, PC_BOOKE_MCHKSAVE, EXC_MCHK) GET_TOCBASE(%r2) addi %r3, %r1, CALLSIZE bl CNAME(powerpc_interrupt) TOC_RESTORE FRAME_LEAVE(SPR_MCSRR0, SPR_MCSRR1) rfmci /***************************************************************************** * Data storage interrupt ****************************************************************************/ INTERRUPT(int_data_storage) STANDARD_PROLOG(SPR_SPRG1, PC_DISISAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_DISISAVE, EXC_DSI) b trap_common /***************************************************************************** * Instruction storage interrupt ****************************************************************************/ INTERRUPT(int_instr_storage) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_ISI) b trap_common /***************************************************************************** * External input interrupt ****************************************************************************/ INTERRUPT(int_external_input) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_EXI) b trap_common INTERRUPT(int_alignment) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_ALI) b trap_common INTERRUPT(int_program) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_PGM) b trap_common INTERRUPT(int_fpu) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_FPU) b trap_common /***************************************************************************** * System call ****************************************************************************/ INTERRUPT(int_syscall) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_SC) b trap_common /***************************************************************************** * Decrementer interrupt ****************************************************************************/ INTERRUPT(int_decrementer) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_DECR) b trap_common /***************************************************************************** * Fixed interval timer ****************************************************************************/ INTERRUPT(int_fixed_interval_timer) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_FIT) b trap_common /***************************************************************************** * Watchdog interrupt ****************************************************************************/ INTERRUPT(int_watchdog) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_WDOG) b trap_common /***************************************************************************** * Altivec Unavailable interrupt ****************************************************************************/ INTERRUPT(int_vec) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_VEC) b trap_common /***************************************************************************** * Altivec Assist interrupt ****************************************************************************/ INTERRUPT(int_vecast) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_VECAST_E) b trap_common #ifdef __SPE__ /***************************************************************************** * Floating point Assist interrupt ****************************************************************************/ INTERRUPT(int_spe_fpdata) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_SPFPD) addi %r3, %r1, CALLSIZE bl spe_handle_fpdata FRAME_LEAVE(SPR_SRR0, SPR_SRR1) rfi INTERRUPT(int_spe_fpround) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_SPFPR) addi %r3, %r1, CALLSIZE bl spe_handle_fpround FRAME_LEAVE(SPR_SRR0, SPR_SRR1) rfi #endif #ifdef HWPMC_HOOKS /***************************************************************************** * PMC Interrupt ****************************************************************************/ INTERRUPT(int_performance_counter) STANDARD_PROLOG(SPR_SPRG3, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG3, PC_TEMPSAVE, EXC_PERF) b trap_common #endif /***************************************************************************** * Data TLB miss interrupt * * There can be nested TLB misses - while handling a TLB miss we reference * data structures that may be not covered by translations. We support up to * TLB_NESTED_MAX-1 nested misses. * * Registers use: * r31 - dear * r30 - unused * r29 - saved mas0 * r28 - saved mas1 * r27 - saved mas2 * r26 - pmap address * r25 - pte address * * r20:r23 - scratch registers ****************************************************************************/ INTERRUPT(int_data_tlb_error) TLB_PROLOG TLB_LOCK mfspr %r31, SPR_DEAR /* * Save MAS0-MAS2 registers. There might be another tlb miss during * pte lookup overwriting current contents (which was hw filled). */ mfspr %r29, SPR_MAS0 mfspr %r28, SPR_MAS1 mfspr %r27, SPR_MAS2 /* Check faulting address. */ LOAD_ADDR(%r21, VM_MAXUSER_ADDRESS) CMPL cr0, %r31, %r21 blt search_user_pmap /* If it's kernel address, allow only supervisor mode misses. */ mfsrr1 %r21 mtcr %r21 bt 17, search_failed /* check MSR[PR] */ #ifdef __powerpc64__ srdi %r21, %r31, 48 cmpldi cr0, %r21, VM_MIN_KERNEL_ADDRESS@highest #else lis %r21, VM_MIN_KERNEL_ADDRESS@h cmplw cr0, %r31, %r21 #endif blt search_failed search_kernel_pmap: /* Load r26 with kernel_pmap address */ bl 1f #ifdef __powerpc64__ .llong kernel_pmap_store-. #else .long kernel_pmap_store-. #endif 1: mflr %r21 LOAD %r26, 0(%r21) add %r26, %r21, %r26 /* kernel_pmap_store in r26 */ /* Force kernel tid, set TID to 0 in MAS1. */ li %r21, 0 rlwimi %r28, %r21, 0, 8, 15 /* clear TID bits */ tlb_miss_handle: /* This may result in nested tlb miss. */ bl pte_lookup /* returns PTE address in R25 */ CMPI %r25, 0 /* pte found? */ beq search_failed /* Finish up, write TLB entry. */ bl tlb_fill_entry tlb_miss_return: TLB_UNLOCK TLB_RESTORE rfi search_user_pmap: /* Load r26 with current user space process pmap */ GET_CPUINFO(%r26) LOAD %r26, PC_CURPMAP(%r26) b tlb_miss_handle search_failed: /* * Whenever we don't find a TLB mapping in PT, set a TLB0 entry with * the faulting virtual address anyway, but put a fake RPN and no * access rights. This should cause a following {D,I}SI exception. */ lis %r23, 0xffff0000@h /* revoke all permissions */ /* Load MAS registers. */ mtspr SPR_MAS0, %r29 mtspr SPR_MAS1, %r28 mtspr SPR_MAS2, %r27 mtspr SPR_MAS3, %r23 li %r23, 0 mtspr SPR_MAS7, %r23 isync tlbwe msync isync b tlb_miss_return /***************************************************************************** * * Return pte address that corresponds to given pmap/va. If there is no valid * entry return 0. * * input: r26 - pmap * input: r31 - dear * output: r25 - pte address * * scratch regs used: r21 * ****************************************************************************/ pte_lookup: CMPI %r26, 0 beq 1f /* fail quickly if pmap is invalid */ #ifdef __powerpc64__ rldicl %r21, %r31, (64 - PG_ROOT_L), (64 - PG_ROOT_NUM) /* pp2d offset */ slwi %r21, %r21, PG_ROOT_ENTRY_SHIFT /* multiply by pp2d entry size */ ld %r25, PM_ROOT(%r26) /* pmap pm_pp2d[] address */ ldx %r25, %r25, %r21 /* get pdir address, i.e. pmap->pm_pp2d[pp2d_idx] * */ cmpdi %r25, 0 beq 2f rldicl %r21, %r31, (64 - PDIR_L1_L), (64 - PDIR_L1_NUM) /* pp2d offset */ slwi %r21, %r21, PDIR_L1_ENTRY_SHIFT /* multiply by pp2d entry size */ ldx %r25, %r25, %r21 /* get pdir address, i.e. pmap->pm_pp2d[pp2d_idx] * */ cmpdi %r25, 0 beq 2f rldicl %r21, %r31, (64 - PDIR_L), (64 - PDIR_NUM) /* pdir offset */ slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */ ldx %r25, %r25, %r21 /* get ptbl address, i.e. pmap->pm_pp2d[pp2d_idx][pdir_idx] */ cmpdi %r25, 0 beq 2f rldicl %r21, %r31, (64 - PTBL_L), (64 - PTBL_NUM) /* ptbl offset */ slwi %r21, %r21, PTBL_ENTRY_SHIFT /* multiply by pte entry size */ #else srwi %r21, %r31, PDIR_SHIFT /* pdir offset */ slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */ lwz %r25, PM_PDIR(%r26) /* pmap pm_dir[] address */ /* * Get ptbl address, i.e. pmap->pm_pdir[pdir_idx] * This load may cause a Data TLB miss for non-kernel pmap! */ lwzx %r25, %r25, %r21 /* offset within pm_pdir[] table */ cmpwi %r25, 0 beq 2f lis %r21, PTBL_MASK@h ori %r21, %r21, PTBL_MASK@l and %r21, %r21, %r31 /* ptbl offset, multiply by ptbl entry size */ srwi %r21, %r21, (PTBL_SHIFT - PTBL_ENTRY_SHIFT) #endif add %r25, %r25, %r21 /* address of pte entry */ /* * Get pte->flags * This load may cause a Data TLB miss for non-kernel pmap! */ lwz %r21, PTE_FLAGS(%r25) andi. %r21, %r21, PTE_VALID@l bne 2f 1: li %r25, 0 2: blr /***************************************************************************** * * Load MAS1-MAS3 registers with data, write TLB entry * * input: * r29 - mas0 * r28 - mas1 * r27 - mas2 * r25 - pte * * output: none * * scratch regs: r21-r23 * ****************************************************************************/ tlb_fill_entry: /* * Update PTE flags: we have to do it atomically, as pmap_protect() * running on other CPUs could attempt to update the flags at the same * time. */ li %r23, PTE_FLAGS 1: lwarx %r21, %r23, %r25 /* get pte->flags */ oris %r21, %r21, PTE_REFERENCED@h /* set referenced bit */ andi. %r22, %r21, (PTE_SW | PTE_UW)@l /* check if writable */ beq 2f ori %r21, %r21, PTE_MODIFIED@l /* set modified bit */ 2: stwcx. %r21, %r23, %r25 /* write it back */ bne- 1b /* Update MAS2. */ rlwimi %r27, %r21, 13, 27, 30 /* insert WIMG bits from pte */ /* Setup MAS3 value in r23. */ LOAD %r23, PTE_RPN(%r25) /* get pte->rpn */ #ifdef __powerpc64__ rldicr %r22, %r23, 52, 51 /* extract MAS3 portion of RPN */ rldicl %r23, %r23, 20, 54 /* extract MAS7 portion of RPN */ rlwimi %r22, %r21, 30, 26, 31 /* insert protection bits from pte */ #else rlwinm %r22, %r23, 20, 0, 11 /* extract MAS3 portion of RPN */ rlwimi %r22, %r21, 30, 26, 31 /* insert protection bits from pte */ rlwimi %r22, %r21, 20, 12, 19 /* insert lower 8 RPN bits to MAS3 */ rlwinm %r23, %r23, 20, 24, 31 /* MAS7 portion of RPN */ #endif /* Load MAS registers. */ mtspr SPR_MAS0, %r29 mtspr SPR_MAS1, %r28 mtspr SPR_MAS2, %r27 mtspr SPR_MAS3, %r22 mtspr SPR_MAS7, %r23 isync tlbwe isync msync blr /***************************************************************************** * Instruction TLB miss interrupt * * Same notes as for the Data TLB miss ****************************************************************************/ INTERRUPT(int_inst_tlb_error) TLB_PROLOG TLB_LOCK mfsrr0 %r31 /* faulting address */ /* * Save MAS0-MAS2 registers. There might be another tlb miss during pte * lookup overwriting current contents (which was hw filled). */ mfspr %r29, SPR_MAS0 mfspr %r28, SPR_MAS1 mfspr %r27, SPR_MAS2 mfsrr1 %r21 mtcr %r21 /* check MSR[PR] */ bt 17, search_user_pmap b search_kernel_pmap .globl interrupt_vector_top interrupt_vector_top: /***************************************************************************** * Debug interrupt ****************************************************************************/ INTERRUPT(int_debug) STANDARD_CRIT_PROLOG(SPR_SPRG2, PC_BOOKE_CRITSAVE, SPR_CSRR0, SPR_CSRR1) FRAME_SETUP(SPR_SPRG2, PC_BOOKE_CRITSAVE, EXC_DEBUG) bl int_debug_int FRAME_LEAVE(SPR_CSRR0, SPR_CSRR1) rfci INTERRUPT(int_debug_ed) STANDARD_CRIT_PROLOG(SPR_SPRG2, PC_BOOKE_CRITSAVE, SPR_DSRR0, SPR_DSRR1) FRAME_SETUP(SPR_SPRG2, PC_BOOKE_CRITSAVE, EXC_DEBUG) bl int_debug_int FRAME_LEAVE(SPR_DSRR0, SPR_DSRR1) rfdi /* .long 0x4c00004e */ /* Internal helper for debug interrupt handling. */ /* Common code between e500v1/v2 and e500mc-based cores. */ int_debug_int: mflr %r14 GET_CPUINFO(%r3) LOAD %r3, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR0)(%r3) bl 0f ADDR(interrupt_vector_base-.) ADDR(interrupt_vector_top-.) 0: mflr %r5 LOAD %r4,0(%r5) /* interrupt_vector_base in r4 */ add %r4,%r4,%r5 CMPL cr0, %r3, %r4 blt trap_common LOAD %r4,WORD_SIZE(%r5) /* interrupt_vector_top in r4 */ add %r4,%r4,%r5 addi %r4,%r4,4 CMPL cr0, %r3, %r4 bge trap_common /* Disable single-stepping for the interrupt handlers. */ LOAD %r3, FRAME_SRR1+CALLSIZE(%r1); rlwinm %r3, %r3, 0, 23, 21 STORE %r3, FRAME_SRR1+CALLSIZE(%r1); /* Restore srr0 and srr1 as they could have been clobbered. */ GET_CPUINFO(%r4) LOAD %r3, (PC_BOOKE_CRITSAVE+BOOKE_CRITSAVE_SRR0)(%r4); mtspr SPR_SRR0, %r3 LOAD %r4, (PC_BOOKE_CRITSAVE+BOOKE_CRITSAVE_SRR1)(%r4); mtspr SPR_SRR1, %r4 mtlr %r14 blr /***************************************************************************** * Common trap code ****************************************************************************/ trap_common: /* Call C trap dispatcher */ GET_TOCBASE(%r2) addi %r3, %r1, CALLSIZE bl CNAME(powerpc_interrupt) TOC_RESTORE .globl CNAME(trapexit) /* exported for db_backtrace use */ CNAME(trapexit): /* disable interrupts */ wrteei 0 /* Test AST pending - makes sense for user process only */ LOAD %r5, FRAME_SRR1+CALLSIZE(%r1) mtcr %r5 bf 17, 1f GET_CPUINFO(%r3) LOAD %r4, PC_CURTHREAD(%r3) lwz %r4, TD_FLAGS(%r4) lis %r5, (TDF_ASTPENDING | TDF_NEEDRESCHED)@h ori %r5, %r5, (TDF_ASTPENDING | TDF_NEEDRESCHED)@l and. %r4, %r4, %r5 beq 1f /* re-enable interrupts before calling ast() */ wrteei 1 addi %r3, %r1, CALLSIZE bl CNAME(ast) TOC_RESTORE .globl CNAME(asttrapexit) /* db_backtrace code sentinel #2 */ CNAME(asttrapexit): b trapexit /* test ast ret value ? */ 1: FRAME_LEAVE(SPR_SRR0, SPR_SRR1) rfi #if defined(KDB) /* * Deliberate entry to dbtrap */ /* .globl CNAME(breakpoint)*/ ASENTRY_NOPROF(breakpoint) mtsprg1 %r1 mfmsr %r3 mtsrr1 %r3 li %r4, ~(PSL_EE | PSL_ME)@l oris %r4, %r4, ~(PSL_EE | PSL_ME)@h and %r3, %r3, %r4 mtmsr %r3 /* disable interrupts */ isync GET_CPUINFO(%r3) STORE %r30, (PC_DBSAVE+CPUSAVE_R30)(%r3) STORE %r31, (PC_DBSAVE+CPUSAVE_R31)(%r3) mflr %r31 mtsrr0 %r31 mfspr %r30, SPR_DEAR mfspr %r31, SPR_ESR STORE %r30, (PC_DBSAVE+CPUSAVE_BOOKE_DEAR)(%r3) STORE %r31, (PC_DBSAVE+CPUSAVE_BOOKE_ESR)(%r3) mfsrr0 %r30 mfsrr1 %r31 STORE %r30, (PC_DBSAVE+CPUSAVE_SRR0)(%r3) STORE %r31, (PC_DBSAVE+CPUSAVE_SRR1)(%r3) isync mfcr %r30 /* * Now the kdb trap catching code. */ dbtrap: FRAME_SETUP(SPR_SPRG1, PC_DBSAVE, EXC_DEBUG) /* Call C trap code: */ GET_TOCBASE(%r2) addi %r3, %r1, CALLSIZE bl CNAME(db_trap_glue) TOC_RESTORE or. %r3, %r3, %r3 bne dbleave /* This wasn't for KDB, so switch to real trap: */ b trap_common dbleave: FRAME_LEAVE(SPR_SRR0, SPR_SRR1) rfi +ASEND(breakpoint) #endif /* KDB */ #ifdef SMP ENTRY(tlb_lock) GET_CPUINFO(%r5) LOAD %r5, PC_CURTHREAD(%r5) 1: LOADX %r4, 0, %r3 CMPI %r4, TLB_UNLOCKED bne 1b STOREX %r5, 0, %r3 bne- 1b isync msync blr +END(tlb_lock) ENTRY(tlb_unlock) isync msync li %r4, TLB_UNLOCKED STORE %r4, 0(%r3) isync msync blr +END(tlb_unlock) /* * TLB miss spin locks. For each CPU we have a reservation granule (32 bytes); * only a single word from this granule will actually be used as a spin lock * for mutual exclusion between TLB miss handler and pmap layer that * manipulates page table contents. */ .data .align 5 GLOBAL(tlb0_miss_locks) .space RES_GRANULE * MAXCPU #endif Index: head/sys/powerpc/include/asm.h =================================================================== --- head/sys/powerpc/include/asm.h (revision 368353) +++ head/sys/powerpc/include/asm.h (revision 368354) @@ -1,260 +1,268 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: asm.h,v 1.6.18.1 2000/07/25 08:37:14 kleink Exp $ * $FreeBSD$ */ #ifndef _MACHINE_ASM_H_ #define _MACHINE_ASM_H_ #include #if defined(PIC) && !defined(__powerpc64__) #define PIC_PROLOGUE XXX #define PIC_EPILOGUE XXX #define PIC_PLT(x) x@plt #ifdef __STDC__ #define PIC_GOT(x) XXX #else /* not __STDC__ */ #define PIC_GOT(x) XXX #endif /* __STDC__ */ #else #define PIC_PROLOGUE #define PIC_EPILOGUE #define PIC_PLT(x) x #define PIC_GOT(x) x #endif #define CNAME(csym) csym #define ASMNAME(asmsym) asmsym #ifdef __powerpc64__ #define HIDENAME(asmsym) __CONCAT(_,asmsym) #else #define HIDENAME(asmsym) __CONCAT(.,asmsym) #endif #if !defined(_CALL_ELF) || _CALL_ELF == 1 #ifdef _KERNEL /* ELFv1 kernel uses global dot symbols */ #define DOT_LABEL(name) __CONCAT(.,name) #define TYPE_ENTRY(name) .size name,24; \ .type DOT_LABEL(name),@function; \ .globl DOT_LABEL(name); #define END_SIZE(name) .size DOT_LABEL(name),.-DOT_LABEL(name); #else /* !_KERNEL */ /* ELFv1 user code uses local function entry points */ #define DOT_LABEL(name) __CONCAT(.L.,name) #define TYPE_ENTRY(name) .type name,@function; #define END_SIZE(name) .size name,.-DOT_LABEL(name); #endif /* _KERNEL */ #else /* ELFv2 doesn't have any of this complication */ #define DOT_LABEL(name) name #define TYPE_ENTRY(name) .type name,@function; #define END_SIZE(name) .size name,.-DOT_LABEL(name); #endif #define _GLOBAL(name) \ .data; \ .p2align 2; \ .globl name; \ name: #ifdef __powerpc64__ #define TOC_NAME_FOR_REF(name) __CONCAT(.L,name) #define TOC_REF(name) TOC_NAME_FOR_REF(name)@toc #define TOC_ENTRY(name) \ .section ".toc","aw"; \ TOC_NAME_FOR_REF(name): \ .tc name[TC],name #endif #ifdef __powerpc64__ #if !defined(_CALL_ELF) || _CALL_ELF == 1 #define _ENTRY(name) \ .section ".text"; \ .p2align 2; \ .globl name; \ .section ".opd","aw"; \ .p2align 3; \ - name: \ +name: \ .quad DOT_LABEL(name),.TOC.@tocbase,0; \ .previous; \ .p2align 4; \ TYPE_ENTRY(name) \ -DOT_LABEL(name): +DOT_LABEL(name): \ + .cfi_startproc #define _NAKED_ENTRY(name) _ENTRY(name) #else #define _ENTRY(name) \ .text; \ .p2align 4; \ .globl name; \ .type name,@function; \ name: \ + .cfi_startproc; \ addis %r2, %r12, (.TOC.-name)@ha; \ addi %r2, %r2, (.TOC.-name)@l; \ .localentry name, .-name; /* "Naked" function entry. No TOC prologue for ELFv2. */ #define _NAKED_ENTRY(name) \ .text; \ .p2align 4; \ .globl name; \ .type name,@function; \ name: \ + .cfi_startproc; \ .localentry name, .-name; #endif #define _END(name) \ + .cfi_endproc; \ .long 0; \ .byte 0,0,0,0,0,0,0,0; \ END_SIZE(name) #define LOAD_ADDR(reg, var) \ lis reg, var@highest; \ ori reg, reg, var@higher; \ rldicr reg, reg, 32, 31; \ oris reg, reg, var@h; \ ori reg, reg, var@l; #else /* !__powerpc64__ */ #define _ENTRY(name) \ .text; \ .p2align 4; \ .globl name; \ .type name,@function; \ - name: -#define _END(name) +name: \ + .cfi_startproc +#define _END(name) \ + .cfi_endproc; \ + .size name, . - name #define _NAKED_ENTRY(name) _ENTRY(name) #define LOAD_ADDR(reg, var) \ lis reg, var@ha; \ ori reg, reg, var@l; #endif /* __powerpc64__ */ #if defined(PROF) || (defined(_KERNEL) && defined(GPROF)) # ifdef __powerpc64__ # define _PROF_PROLOGUE mflr 0; \ std 3,48(1); \ std 4,56(1); \ std 5,64(1); \ std 0,16(1); \ stdu 1,-112(1); \ bl _mcount; \ nop; \ ld 0,112+16(1); \ ld 3,112+48(1); \ ld 4,112+56(1); \ ld 5,112+64(1); \ mtlr 0; \ addi 1,1,112 # else # define _PROF_PROLOGUE mflr 0; stw 0,4(1); bl _mcount # endif #else # define _PROF_PROLOGUE #endif +#define ASEND(y) _END(ASMNAME(y)) #define ASENTRY(y) _ENTRY(ASMNAME(y)); _PROF_PROLOGUE #define END(y) _END(CNAME(y)) #define ENTRY(y) _ENTRY(CNAME(y)); _PROF_PROLOGUE #define GLOBAL(y) _GLOBAL(CNAME(y)) #define ASENTRY_NOPROF(y) _ENTRY(ASMNAME(y)) #define ENTRY_NOPROF(y) _ENTRY(CNAME(y)) /* Load NIA without affecting branch prediction */ #define LOAD_LR_NIA bcl 20, 31, .+4 /* * Magic sequence to return to native endian. * Overwrites r0 and r11. * * The encoding of the instruction "tdi 0, %r0, 0x48" in opposite endian * happens to be "b . + 8". This is useful because we can write a sequence * of instructions that can execute in either endian. * * Use a sequence of handcoded instructions that switches contexts to the * instruction following the sequence, but with the correct PSL_LE bit. * * The same sequence works for both BE and LE because the xori will flip * the bit to the other state, and the code only runs when running in the * wrong endian. * * This sequence is NMI-reentrant. * * Do not change the length of this sequence without looking at the users, * this is used in size-constrained places like the reset vector! */ #define RETURN_TO_NATIVE_ENDIAN \ tdi 0, %r0, 0x48; /* Endian swapped: b . + 8 */\ b 1f; /* Will fall through to here if correct */\ .long 0xa600607d; /* mfmsr %r11 */\ .long 0x00000038; /* li %r0, 0 */\ .long 0x6401617d; /* mtmsrd %r0, 1 (L=1 EE,RI bits only) */\ .long 0x01006b69; /* xori %r11, %r11, 0x1 (PSL_LE) */\ .long 0xa602087c; /* mflr %r0 */\ .long 0x05009f42; /* LOAD_LR_NIA */\ .long 0xa6037b7d; /* 0: mtsrr1 %r11 */\ .long 0xa602687d; /* mflr %r11 */\ .long 0x18006b39; /* addi %r11, %r11, (1f - 0b) */\ .long 0xa6037a7d; /* mtsrr0 %r11 */\ .long 0xa603087c; /* mtlr %r0 */\ .long 0x2400004c; /* rfid */\ 1: /* RETURN_TO_NATIVE_ENDIAN */ #define ASMSTR .asciz #define RCSID(x) .text; .asciz x #undef __FBSDID #if !defined(lint) && !defined(STRIP_FBSDID) #define __FBSDID(s) .ident s #else #define __FBSDID(s) /* nothing */ #endif /* not lint and not STRIP_FBSDID */ #define WEAK_REFERENCE(sym, alias) \ .weak alias; \ .equ alias,sym #ifdef __STDC__ #define WARN_REFERENCES(_sym,_msg) \ .section .gnu.warning. ## _sym ; .ascii _msg ; .text #else #define WARN_REFERENCES(_sym,_msg) \ .section .gnu.warning./**/_sym ; .ascii _msg ; .text #endif /* __STDC__ */ #endif /* !_MACHINE_ASM_H_ */ Index: head/sys/powerpc/mambo/mambocall.S =================================================================== --- head/sys/powerpc/mambo/mambocall.S (revision 368353) +++ head/sys/powerpc/mambo/mambocall.S (revision 368354) @@ -1,39 +1,39 @@ /*- * Copyright 2008 by Nathan Whitehorn. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include .text ASENTRY(mambocall) /* * Use the special Mambo callout opcode and whatever arguments we * were passed. Then return whatever Mambo returned. */ .long 0x000EAEB0 blr - +ASEND(mambocall) Index: head/sys/powerpc/ofw/ofwcall32.S =================================================================== --- head/sys/powerpc/ofw/ofwcall32.S (revision 368353) +++ head/sys/powerpc/ofw/ofwcall32.S (revision 368354) @@ -1,177 +1,178 @@ /*- * Copyright (C) 2009-2011 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #define OFWSTKSZ 4096 /* 4K Open Firmware stack */ /* * Globals */ .data GLOBAL(ofmsr) .long 0, 0, 0, 0, 0 /* msr/sprg0-3 used in Open Firmware */ GLOBAL(rtasmsr) .long 0 GLOBAL(openfirmware_entry) .long 0 /* Open Firmware entry point */ GLOBAL(rtas_entry) .long 0 /* RTAS entry point */ .align 4 ofwstk: .space OFWSTKSZ rtas_regsave: .space 4 /* * Open Firmware Entry Point. May need to enter real mode. * * C prototype: int ofwcall(void *callbuffer); */ ASENTRY(ofwcall) mflr %r0 stw %r0,4(%r1) /* Record the old MSR */ mfmsr %r6 /* GOT pointer in r7 */ bl 1f 1: mflr %r7 addis %r7,%r7,(_GLOBAL_OFFSET_TABLE_-1b)@ha addi %r7,%r7,(_GLOBAL_OFFSET_TABLE_-1b)@l /* read client interface handler */ lwz %r4,openfirmware_entry@got(%r7) lwz %r4,0(%r4) /* * Set the MSR to the OF value. This has the side effect of disabling * exceptions, which prevents preemption later. */ lwz %r5,ofmsr@got(%r7) lwz %r5,0(%r5) mtmsr %r5 isync /* * Set up OF stack. This needs to be potentially accessible in real mode * The pointer to the current kernel stack is placed at the very * top of the stack along with the old MSR so we can get them back * later. */ mr %r5,%r1 lwz %r1,ofwstk@got(%r7) addi %r1,%r1,(OFWSTKSZ-32) stw %r5,20(%r1) /* Save real stack pointer */ stw %r2,24(%r1) /* Save curthread */ stw %r6,28(%r1) /* Save old MSR */ li %r5,0 stw %r5,4(%r1) stw %r5,0(%r1) /* Finally, branch to OF */ mtctr %r4 bctrl /* Reload stack pointer and MSR from the OFW stack */ lwz %r6,28(%r1) lwz %r2,24(%r1) lwz %r1,20(%r1) /* Now set the real MSR */ mtmsr %r6 isync /* Return */ lwz %r0,4(%r1) mtlr %r0 blr +ASEND(ofwcall) /* * RTAS Entry Point. Similar to the OF one, but simpler (no separate stack) * * C prototype: int rtascall(void *callbuffer, void *rtas_privdat); */ ASENTRY(rtascall) mflr %r0 stw %r0,4(%r1) /* GOT pointer in r7 */ bl 1f 1: mflr %r7 addis %r7,%r7,(_GLOBAL_OFFSET_TABLE_-1b)@ha addi %r7,%r7,(_GLOBAL_OFFSET_TABLE_-1b)@l /* Record the old MSR to real-mode-accessible area */ mfmsr %r0 lwz %r5,rtas_regsave@got(%r7) stw %r0,0(%r5) /* read client interface handler */ lwz %r5,rtas_entry@got(%r7) lwz %r5,0(%r5) /* Set the MSR to the RTAS value */ lwz %r6,rtasmsr@got(%r7) lwz %r6,0(%r6) mtmsr %r6 isync /* Branch to RTAS */ mtctr %r5 bctrl /* GOT pointer in r7 */ bl 1f 1: mflr %r7 addis %r7,%r7,(_GLOBAL_OFFSET_TABLE_-1b)@ha addi %r7,%r7,(_GLOBAL_OFFSET_TABLE_-1b)@l /* Now set the MSR back */ lwz %r6,rtas_regsave@got(%r7) lwz %r6,0(%r6) mtmsr %r6 isync /* And return */ lwz %r0,4(%r1) mtlr %r0 blr - +ASEND(rtascall) Index: head/sys/powerpc/ofw/ofwcall64.S =================================================================== --- head/sys/powerpc/ofw/ofwcall64.S (revision 368353) +++ head/sys/powerpc/ofw/ofwcall64.S (revision 368354) @@ -1,383 +1,384 @@ /*- * Copyright (C) 2009-2011 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include "opt_platform.h" #define OFWSTKSZ 4096 /* 4K Open Firmware stack */ /* * Globals */ .data .align 4 ofwstk: .space OFWSTKSZ rtas_regsave: .space 32 /* 4 * sizeof(register_t) */ GLOBAL(ofmsr) .llong 0, 0, 0, 0, 0 /* msr/sprg0-3 used in Open Firmware */ GLOBAL(rtasmsr) .llong 0 GLOBAL(openfirmware_entry) .llong 0 /* Open Firmware entry point */ GLOBAL(rtas_entry) .llong 0 /* RTAS entry point */ TOC_ENTRY(ofmsr) TOC_ENTRY(ofwstk) TOC_ENTRY(rtasmsr) TOC_ENTRY(openfirmware_entry) TOC_ENTRY(rtas_entry) TOC_ENTRY(rtas_regsave) /* * Open Firmware Real-mode Entry Point. This is a huge pain. */ ASENTRY_NOPROF(ofwcall) mflr %r8 std %r8,16(%r1) stdu %r1,-208(%r1) /* * We need to save the following, because OF's register save/ * restore code assumes that the contents of registers are * at most 32 bits wide: lr, cr, r2, r13-r31, the old MSR. These * get placed in that order in the stack. */ mfcr %r4 std %r4,48(%r1) std %r13,56(%r1) std %r14,64(%r1) std %r15,72(%r1) std %r16,80(%r1) std %r17,88(%r1) std %r18,96(%r1) std %r19,104(%r1) std %r20,112(%r1) std %r21,120(%r1) std %r22,128(%r1) std %r23,136(%r1) std %r24,144(%r1) std %r25,152(%r1) std %r26,160(%r1) std %r27,168(%r1) std %r28,176(%r1) std %r29,184(%r1) std %r30,192(%r1) std %r31,200(%r1) /* Record the old MSR */ mfmsr %r6 /* read client interface handler */ addis %r4,%r2,TOC_REF(openfirmware_entry)@ha ld %r4,TOC_REF(openfirmware_entry)@l(%r4) ld %r4,0(%r4) /* Get OF stack pointer */ addis %r7,%r2,TOC_REF(ofwstk)@ha ld %r7,TOC_REF(ofwstk)@l(%r7) addi %r7,%r7,OFWSTKSZ-40 /* * Set the MSR to the OF value. This has the side effect of disabling * exceptions, which is important for the next few steps. * This does NOT, however, cause us to switch endianness. */ addis %r5,%r2,TOC_REF(ofmsr)@ha ld %r5,TOC_REF(ofmsr)@l(%r5) ld %r5,0(%r5) #if defined(__LITTLE_ENDIAN__) && defined(QEMU) /* QEMU hack: qemu does not emulate mtmsrd correctly! */ ori %r5,%r5,1 /* Leave PSR_LE set */ #endif mtmsrd %r5 isync /* * Set up OF stack. This needs to be accessible in real mode and * use the 32-bit ABI stack frame format. The pointer to the current * kernel stack is placed at the very top of the stack along with * the old MSR so we can get them back later. */ mr %r5,%r1 mr %r1,%r7 std %r5,8(%r1) /* Save real stack pointer */ std %r2,16(%r1) /* Save old TOC */ std %r6,24(%r1) /* Save old MSR */ std %r8,32(%r1) /* Save high 32-bits of the kernel's PC */ li %r5,0 stw %r5,4(%r1) stw %r5,0(%r1) #ifdef __LITTLE_ENDIAN__ /* Atomic context switch w/ endian change */ mtmsrd %r5, 1 /* Clear PSL_EE|PSL_RI */ addis %r5,%r2,TOC_REF(ofmsr)@ha ld %r5,TOC_REF(ofmsr)@l(%r5) ld %r5,0(%r5) mtsrr0 %r4 mtsrr1 %r5 LOAD_LR_NIA 1: mflr %r5 addi %r5, %r5, (2f-1b) mtlr %r5 li %r5, 0 rfid 2: RETURN_TO_NATIVE_ENDIAN #else /* Finally, branch to OF */ mtctr %r4 bctrl #endif /* Reload stack pointer, MSR, and reference PC from the OFW stack */ ld %r7,32(%r1) ld %r6,24(%r1) ld %r2,16(%r1) ld %r1,8(%r1) /* Get back to the MSR/PC we want, using the cached high bits of PC */ mtsrr1 %r6 clrrdi %r7,%r7,32 bl 1f 1: mflr %r8 or %r8,%r8,%r7 addi %r8,%r8,2f-1b mtsrr0 %r8 rfid /* Turn on MMU, exceptions, and 64-bit mode */ 2: /* Sign-extend the return value from OF */ extsw %r3,%r3 /* Restore all the non-volatile registers */ ld %r5,48(%r1) mtcr %r5 ld %r13,56(%r1) ld %r14,64(%r1) ld %r15,72(%r1) ld %r16,80(%r1) ld %r17,88(%r1) ld %r18,96(%r1) ld %r19,104(%r1) ld %r20,112(%r1) ld %r21,120(%r1) ld %r22,128(%r1) ld %r23,136(%r1) ld %r24,144(%r1) ld %r25,152(%r1) ld %r26,160(%r1) ld %r27,168(%r1) ld %r28,176(%r1) ld %r29,184(%r1) ld %r30,192(%r1) ld %r31,200(%r1) /* Restore the stack and link register */ ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(ofwcall) /* * RTAS 32-bit Entry Point. Similar to the OF one, but simpler (no separate * stack) * * C prototype: int rtascall(void *callbuffer, void *rtas_privdat); */ ASENTRY_NOPROF(rtascall) mflr %r9 std %r9,16(%r1) stdu %r1,-208(%r1) /* * We need to save the following, because RTAS's register save/ * restore code assumes that the contents of registers are * at most 32 bits wide: lr, cr, r2, r13-r31, the old MSR. These * get placed in that order in the stack. */ mfcr %r5 std %r5,48(%r1) std %r13,56(%r1) std %r14,64(%r1) std %r15,72(%r1) std %r16,80(%r1) std %r17,88(%r1) std %r18,96(%r1) std %r19,104(%r1) std %r20,112(%r1) std %r21,120(%r1) std %r22,128(%r1) std %r23,136(%r1) std %r24,144(%r1) std %r25,152(%r1) std %r26,160(%r1) std %r27,168(%r1) std %r28,176(%r1) std %r29,184(%r1) std %r30,192(%r1) std %r31,200(%r1) /* Record the old MSR */ mfmsr %r6 /* Read RTAS entry and reg save area pointers */ addis %r5,%r2,TOC_REF(rtas_entry)@ha ld %r5,TOC_REF(rtas_entry)@l(%r5) ld %r5,0(%r5) addis %r8,%r2,TOC_REF(rtas_regsave)@ha ld %r8,TOC_REF(rtas_regsave)@l(%r8) /* * Set the MSR to the RTAS value. This has the side effect of disabling * exceptions, which is important for the next few steps. */ addis %r7,%r2,TOC_REF(rtasmsr)@ha ld %r7,TOC_REF(rtasmsr)@l(%r7) ld %r7,0(%r7) #ifdef __LITTLE_ENDIAN__ /* QEMU hack: qemu does not emulate mtmsrd correctly! */ ori %r7,%r7,1 /* Leave PSR_LE set */ #endif mtmsrd %r7 isync /* * Set up RTAS register save area, so that we can get back all of * our 64-bit pointers. Save our stack pointer, the TOC, and the MSR. * Put this in r1, since RTAS is obliged to save it. Kernel globals * are below 4 GB, so this is safe. */ mr %r7,%r1 mr %r1,%r8 std %r7,0(%r1) /* Save 64-bit stack pointer */ std %r2,8(%r1) /* Save TOC */ std %r6,16(%r1) /* Save MSR */ std %r9,24(%r1) /* Save reference PC for high 32 bits */ #ifdef __LITTLE_ENDIAN__ /* Atomic context switch w/ endian change */ li %r7, 0 mtmsrd %r7, 1 /* Clear PSL_EE|PSL_RI */ addis %r7,%r2,TOC_REF(rtasmsr)@ha ld %r7,TOC_REF(rtasmsr)@l(%r7) ld %r7,0(%r7) mtsrr0 %r5 mtsrr1 %r7 LOAD_LR_NIA 1: mflr %r5 addi %r5, %r5, (2f-1b) mtlr %r5 li %r5, 0 rfid 2: RETURN_TO_NATIVE_ENDIAN #else /* Finally, branch to RTAS */ mtctr %r5 bctrl #endif /* * Reload stack pointer, MSR, reg PC from the reg save area in r1. We * are running in 32-bit mode at this point, so it doesn't matter if r1 * has become sign-extended. */ ld %r7,24(%r1) ld %r6,16(%r1) ld %r2,8(%r1) ld %r1,0(%r1) /* * Get back to the right PC. We need to atomically re-enable * exceptions, 64-bit mode, and the MMU. One thing that has likely * happened is that, if we were running in the high-memory direct * map, we no longer are as a result of LR truncation in RTAS. * Fix this by copying the high-order bits of the LR at function * entry onto the current PC and then jumping there while flipping * all the MSR bits. */ mtsrr1 %r6 clrrdi %r7,%r7,32 bl 1f 1: mflr %r8 or %r8,%r8,%r7 addi %r8,%r8,2f-1b mtsrr0 %r8 rfid /* Turn on MMU, exceptions, and 64-bit mode */ 2: /* Sign-extend the return value from RTAS */ extsw %r3,%r3 /* Restore all the non-volatile registers */ ld %r5,48(%r1) mtcr %r5 ld %r13,56(%r1) ld %r14,64(%r1) ld %r15,72(%r1) ld %r16,80(%r1) ld %r17,88(%r1) ld %r18,96(%r1) ld %r19,104(%r1) ld %r20,112(%r1) ld %r21,120(%r1) ld %r22,128(%r1) ld %r23,136(%r1) ld %r24,144(%r1) ld %r25,152(%r1) ld %r26,160(%r1) ld %r27,168(%r1) ld %r28,176(%r1) ld %r29,184(%r1) ld %r30,192(%r1) ld %r31,200(%r1) /* Restore the stack and link register */ ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr - +ASEND(rtascall) Index: head/sys/powerpc/powernv/opalcall.S =================================================================== --- head/sys/powerpc/powernv/opalcall.S (revision 368353) +++ head/sys/powerpc/powernv/opalcall.S (revision 368354) @@ -1,132 +1,132 @@ /*- * Copyright (C) 2015 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include "opt_platform.h" GLOBAL(opal_entrypoint) .llong 0 GLOBAL(opal_data) .llong 0 GLOBAL(opal_msr) .llong 0 TOC_ENTRY(opal_entrypoint) TOC_ENTRY(opal_data) TOC_ENTRY(opal_msr) ASENTRY(opal_call) /* Args: * r3: opal token * r4-r10 opal arguments */ /* Save call stuff on stack */ mflr %r0 std %r0,16(%r1) std %r2,-16(%r1) mfcr %r0 std %r0,8(%r1) /* Load OPAL entry information */ mr %r0,%r3 addis %r3,%r2,TOC_REF(opal_entrypoint)@ha ld %r3,TOC_REF(opal_entrypoint)@l(%r3) ld %r3,0(%r3) mtctr %r3 /* Save MSR in non-volatile scratch register and turn off translation */ std %r31,-8(%r1) mfmsr %r31 /* Load last bits from the TOC */ addis %r3,%r2,TOC_REF(opal_msr)@ha ld %r3,TOC_REF(opal_msr)@l(%r3) ld %r3,0(%r3) addis %r2,%r2,TOC_REF(opal_data)@ha ld %r2,TOC_REF(opal_data)@l(%r2) ld %r2,0(%r2) #if defined(__LITTLE_ENDIAN__) && defined(QEMU) /* QEMU hack: qemu does not emulate mtmsrd correctly! */ ori %r3,%r3,1 /* Leave PSR_LE set */ #endif mtmsrd %r3 isync #if defined(__LITTLE_ENDIAN__) && defined(QEMU) /* Clean up from qemu hack */ xori %r3,%r3,1 #endif #ifdef __LITTLE_ENDIAN__ mtsrr1 %r3 #endif /* Shift registers over */ mr %r3,%r4 mr %r4,%r5 mr %r5,%r6 mr %r6,%r7 mr %r7,%r8 mr %r8,%r9 mr %r9,%r10 #ifdef __LITTLE_ENDIAN__ /* We need to rfid to switch endian. */ mfctr %r11 mtsrr0 %r11 LOAD_LR_NIA 1: mflr %r11 addi %r11, %r11, (2f-1b) mtlr %r11 /* Call OPAL */ rfid 2: RETURN_TO_NATIVE_ENDIAN #else /* Call OPAL */ bctrl #endif /* Restore MSR */ mtmsrd %r31 isync ld %r31,-8(%r1) /* Restore call stuff from stack */ ld %r0,16(%r1) mtlr %r0 ld %r2,-16(%r1) ld %r0,8(%r1) mtcr %r0 /* And return */ blr - +ASEND(opal_call) Index: head/sys/powerpc/powerpc/cpu_subr64.S =================================================================== --- head/sys/powerpc/powerpc/cpu_subr64.S (revision 368353) +++ head/sys/powerpc/powerpc/cpu_subr64.S (revision 368354) @@ -1,98 +1,99 @@ /*- * Copyright (c) 2017-2018 QCM Technologies. * Copyright (c) 2017-2018 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "assym.inc" #include .p2align 3 ENTRY(enter_idle_powerx) mfsprg0 %r3 /* Get the pcpu pointer */ ld %r3,PC_CURTHREAD(%r3) /* Get current thread */ ld %r3,TD_PCB(%r3) /* Get PCB of current thread */ std %r12,PCB_CONTEXT(%r3) /* Save the non-volatile GP regs. */ std %r13,PCB_CONTEXT+1*8(%r3) std %r14,PCB_CONTEXT+2*8(%r3) std %r15,PCB_CONTEXT+3*8(%r3) std %r16,PCB_CONTEXT+4*8(%r3) std %r17,PCB_CONTEXT+5*8(%r3) std %r18,PCB_CONTEXT+6*8(%r3) std %r19,PCB_CONTEXT+7*8(%r3) std %r20,PCB_CONTEXT+8*8(%r3) std %r21,PCB_CONTEXT+9*8(%r3) std %r22,PCB_CONTEXT+10*8(%r3) std %r23,PCB_CONTEXT+11*8(%r3) std %r24,PCB_CONTEXT+12*8(%r3) std %r25,PCB_CONTEXT+13*8(%r3) std %r26,PCB_CONTEXT+14*8(%r3) std %r27,PCB_CONTEXT+15*8(%r3) std %r28,PCB_CONTEXT+16*8(%r3) std %r29,PCB_CONTEXT+17*8(%r3) std %r30,PCB_CONTEXT+18*8(%r3) std %r31,PCB_CONTEXT+19*8(%r3) mfcr %r16 /* Save the condition register */ std %r16,PCB_CR(%r3) mflr %r16 /* Save the link register */ std %r16,PCB_LR(%r3) std %r1,PCB_SP(%r3) /* Save the stack pointer */ std %r2,PCB_TOC(%r3) /* Save the TOC pointer */ bl 1f 1: mflr %r3 addi %r3,%r3,power_save_sequence-1b mtsrr0 %r3 /* Set MSR */ li %r3,0 #ifdef __LITTLE_ENDIAN__ ori %r3,%r3,(PSL_ME | PSL_RI | PSL_LE) #else ori %r3,%r3,(PSL_ME | PSL_RI) #endif li %r8,0x9 /* PSL_SF and PSL_HV */ insrdi %r3,%r8,4,0 mtsrr1 %r3 rfid .p2align 2 power_save_sequence: bl 1f .llong 0x0 /* Playground for power-save sequence */ 1: mflr %r3 /* Start power-save sequence */ std %r2,0(%r3) ptesync ld %r2,0(%r3) 2: cmpd %r2,%r2 bne 2b nap b . +END(enter_idle_powerx) Index: head/sys/powerpc/powerpc/setjmp.S =================================================================== --- head/sys/powerpc/powerpc/setjmp.S (revision 368353) +++ head/sys/powerpc/powerpc/setjmp.S (revision 368354) @@ -1,114 +1,116 @@ /* $FreeBSD$ */ /* from: NetBSD: setjmp.S,v 1.1 1998/01/27 15:13:12 sakamoto Exp $ */ /* from: OpenBSD: setjmp.S,v 1.2 1996/12/28 06:22:18 rahnds Exp */ /* kernel version of this file, does not have signal goop */ /* int setjmp(jmp_buf env) */ #include #ifdef __powerpc64__ #define LD_REG ld #define ST_REG std #define REGWIDTH 8 #else #define LD_REG lwz #define ST_REG stw #define REGWIDTH 4 #endif #define JMP_r1 1*REGWIDTH #define JMP_r2 2*REGWIDTH #define JMP_r14 3*REGWIDTH #define JMP_r15 4*REGWIDTH #define JMP_r16 5*REGWIDTH #define JMP_r17 6*REGWIDTH #define JMP_r18 7*REGWIDTH #define JMP_r19 8*REGWIDTH #define JMP_r20 9*REGWIDTH #define JMP_r21 10*REGWIDTH #define JMP_r22 11*REGWIDTH #define JMP_r23 12*REGWIDTH #define JMP_r24 13*REGWIDTH #define JMP_r25 14*REGWIDTH #define JMP_r26 15*REGWIDTH #define JMP_r27 16*REGWIDTH #define JMP_r28 17*REGWIDTH #define JMP_r29 18*REGWIDTH #define JMP_r30 19*REGWIDTH #define JMP_r31 20*REGWIDTH #define JMP_lr 21*REGWIDTH #define JMP_cr 22*REGWIDTH #define JMP_ctr 23*REGWIDTH #define JMP_xer 24*REGWIDTH ASENTRY_NOPROF(setjmp) ST_REG 31, JMP_r31(3) /* r1, r2, r14-r30 */ ST_REG 1, JMP_r1 (3) ST_REG 2, JMP_r2 (3) ST_REG 14, JMP_r14(3) ST_REG 15, JMP_r15(3) ST_REG 16, JMP_r16(3) ST_REG 17, JMP_r17(3) ST_REG 18, JMP_r18(3) ST_REG 19, JMP_r19(3) ST_REG 20, JMP_r20(3) ST_REG 21, JMP_r21(3) ST_REG 22, JMP_r22(3) ST_REG 23, JMP_r23(3) ST_REG 24, JMP_r24(3) ST_REG 25, JMP_r25(3) ST_REG 26, JMP_r26(3) ST_REG 27, JMP_r27(3) ST_REG 28, JMP_r28(3) ST_REG 29, JMP_r29(3) ST_REG 30, JMP_r30(3) /* cr, lr, ctr, xer */ mfcr 0 ST_REG 0, JMP_cr(3) mflr 0 ST_REG 0, JMP_lr(3) mfctr 0 ST_REG 0, JMP_ctr(3) mfxer 0 ST_REG 0, JMP_xer(3) /* f14-f31, fpscr */ li 3, 0 blr +ASEND(setjmp) .extern sigsetmask ASENTRY_NOPROF(longjmp) LD_REG 31, JMP_r31(3) /* r1, r2, r14-r30 */ LD_REG 1, JMP_r1 (3) LD_REG 2, JMP_r2 (3) LD_REG 14, JMP_r14(3) LD_REG 15, JMP_r15(3) LD_REG 16, JMP_r16(3) LD_REG 17, JMP_r17(3) LD_REG 18, JMP_r18(3) LD_REG 19, JMP_r19(3) LD_REG 20, JMP_r20(3) LD_REG 21, JMP_r21(3) LD_REG 22, JMP_r22(3) LD_REG 23, JMP_r23(3) LD_REG 24, JMP_r24(3) LD_REG 25, JMP_r25(3) LD_REG 26, JMP_r26(3) LD_REG 27, JMP_r27(3) LD_REG 28, JMP_r28(3) LD_REG 29, JMP_r29(3) LD_REG 30, JMP_r30(3) /* cr, lr, ctr, xer */ LD_REG 0, JMP_cr(3) mtcr 0 LD_REG 0, JMP_lr(3) mtlr 0 LD_REG 0, JMP_ctr(3) mtctr 0 LD_REG 0, JMP_xer(3) mtxer 0 /* f14-f31, fpscr */ mr 3, 4 blr +ASEND(longjmp) Index: head/sys/powerpc/powerpc/support.S =================================================================== --- head/sys/powerpc/powerpc/support.S (revision 368353) +++ head/sys/powerpc/powerpc/support.S (revision 368354) @@ -1,539 +1,564 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018, Matthew Macy * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Assembly variants of various functions, for those that don't need generic C * implementations. Currently this includes: * * - Direct-access versions of copyin/copyout methods. * - These are used by Radix AIM pmap (ISA 3.0), and Book-E, to avoid * unnecessary pmap_map_usr_ptr() calls. */ #include "assym.inc" #include "opt_sched.h" #include #include #include #include #include #include #include #ifdef _CALL_ELF .abiversion _CALL_ELF #endif #ifdef __powerpc64__ #define LOAD ld #define STORE std #define WORD 8 #define CMPI cmpdi #define CMPLI cmpldi /* log_2(8 * WORD) */ #define LOOP_LOG 6 #define LOG_WORD 3 #else #define LOAD lwz #define STORE stw #define WORD 4 #define CMPI cmpwi #define CMPLI cmplwi /* log_2(8 * WORD) */ #define LOOP_LOG 5 #define LOG_WORD 2 #endif #ifdef AIM -#define ENTRY_DIRECT(x) ENTRY(x ## _direct) +#define ENTRY_DIRECT(x) ENTRY(x ## _direct) +#define END_DIRECT(x) END(x ## _direct) #else #define ENTRY_DIRECT(x) ENTRY(x) +#define END_DIRECT(x) END(x) #endif #ifdef __powerpc64__ #define PROLOGUE ;\ mflr %r0 ;\ std %r0, 16(%r1) ;\ #define EPILOGUE ;\ ld %r0, 16(%r1) ;\ mtlr %r0 ;\ blr ;\ nop #define VALIDATE_TRUNCATE_ADDR_COPY VALIDATE_ADDR_COPY #define VALIDATE_ADDR_COPY(raddr, len) \ srdi %r0, raddr, 52 ;\ cmpwi %r0, 1 ;\ bge- copy_fault ;\ nop #define VALIDATE_ADDR_FUSU(raddr) ;\ srdi %r0, raddr, 52 ;\ cmpwi %r0, 1 ;\ bge- fusufault ;\ nop #else #define PROLOGUE ;\ mflr %r0 ;\ stw %r0, 4(%r1) ;\ #define EPILOGUE ;\ lwz %r0, 4(%r1) ;\ mtlr %r0 ;\ blr ;\ nop /* %r0 is temporary */ /* * Validate address and length are valid. * For VALIDATE_ADDR_COPY() have to account for wraparound. */ #define VALIDATE_ADDR_COPY(raddr, len) \ lis %r0, VM_MAXUSER_ADDRESS@h ;\ ori %r0, %r0, VM_MAXUSER_ADDRESS@l ;\ cmplw %r0, raddr ;\ blt- copy_fault ;\ add %r0, raddr, len ;\ cmplw 7, %r0, raddr ;\ blt- 7, copy_fault ;\ mtcrf 0x80, %r0 ;\ bt- 0, copy_fault ;\ nop #define VALIDATE_TRUNCATE_ADDR_COPY(raddr, len) \ lis %r0, VM_MAXUSER_ADDRESS@h ;\ ori %r0, %r0, VM_MAXUSER_ADDRESS@l ;\ cmplw %r0, raddr ;\ blt- copy_fault ;\ sub %r0, %r0, raddr ;\ cmplw len, %r0 ;\ isel len, len, %r0, 0 ;\ #define VALIDATE_ADDR_FUSU(raddr) \ lis %r0, VM_MAXUSER_ADDRESS@h ;\ ori %r0, %r0, VM_MAXUSER_ADDRESS@l ;\ cmplw %r0, raddr ;\ ble- fusufault #endif #define PCPU(reg) mfsprg reg, 0 #define SET_COPYFAULT(raddr, rpcb, len) \ VALIDATE_ADDR_COPY(raddr, len) ;\ PCPU(%r9) ;\ li %r0, COPYFAULT ;\ LOAD rpcb, PC_CURPCB(%r9) ;\ STORE %r0, PCB_ONFAULT(rpcb) ;\ #define SET_COPYFAULT_TRUNCATE(raddr, rpcb, len)\ VALIDATE_TRUNCATE_ADDR_COPY(raddr, len) ;\ PCPU(%r9) ;\ li %r0, COPYFAULT ;\ LOAD rpcb, PC_CURPCB(%r9) ;\ STORE %r0, PCB_ONFAULT(rpcb) #define SET_FUSUFAULT(raddr, rpcb) \ VALIDATE_ADDR_FUSU(raddr) ;\ PCPU(%r9) ;\ li %r0, FUSUFAULT ;\ LOAD rpcb, PC_CURPCB(%r9) ;\ STORE %r0, PCB_ONFAULT(rpcb) #define CLEAR_FAULT_NO_CLOBBER(rpcb) \ PCPU(%r9) ;\ LOAD rpcb, PC_CURPCB(%r9) ;\ li %r0, 0 ;\ STORE %r0, PCB_ONFAULT(rpcb) #define CLEAR_FAULT(rpcb) \ CLEAR_FAULT_NO_CLOBBER(rpcb) ;\ li %r3, 0 /* * bcopy(src, dst, len) * %r3 %r4 %r5 * * %r7 is the pcb pointer * * %r0 and %r8-%r10 are volatile * %r11 and %r12 are generally volatile, used in linking and exception * handling. Can be clobbered here. * * Does not allocate or use stack space, but clobbers all volatile registers. */ #define rs %r3 #define rd %r4 #define rl %r5 #define t1 %r6 #define t2 %r7 #define t3 %r8 #define t4 %r9 #define t5 %r10 #define t6 %r11 #define t7 %r12 #define t8 %r0 #define Thresh WORD * 8 #define W4 3 #define W2 2 #define W1 1 #define WORDS(n) (32 - LOG_WORD - W##n) .text ENTRY(bcopy_generic) CMPLI 0, %r5, 0 beq .Lend dcbtst 0, rd dcbt 0, rs CMPLI rl, Thresh blt .Lsmall b .Llarge /* memcpy */ /* ... */ .Lsmall: /* < 8 words remaining */ mtcrf 0x3, rl .Lsmall_start: bf WORDS(4), 0f LOAD t1, 0(rs) LOAD t2, WORD*1(rs) LOAD t3, WORD*2(rs) LOAD t4, WORD*3(rs) addi rs, rs, WORD*4 STORE t1, 0(rd) STORE t2, WORD*1(rd) STORE t3, WORD*2(rd) STORE t4, WORD*3(rd) addi rd, rd, WORD*4 0: /* < 4 words remaining */ bf WORDS(2), 1f LOAD t1, 0(rs) LOAD t2, WORD*1(rs) addi rs, rs, WORD*2 STORE t1, 0(rd) STORE t2, WORD*1(rd) addi rd, rd, WORD*2 1: /* < 2 words remaining */ bf WORDS(1), 2f LOAD t1, 0(rs) addi rs, rs, WORD STORE t1, 0(rd) addi rd, rd, WORD 2: /* < 1 word remaining */ #ifdef __powerpc64__ bf 29, 3f lwz t1, 0(rs) addi rs, rs, 4 stw t1, 0(rd) addi rd, rd, 4 3: /* < 4 bytes remaining */ #endif bf 30, 4f lhz t1, 0(rs) addi rs, rs, 2 sth t1, 0(rd) addi rd, rd, 2 4: /* < 2 bytes remaining */ bf 31, .Lout lbz t1, 0(rs) addi rs, rs, 1 stb t1, 0(rd) addi rd, rd, 1 b .Lout .align 4 .Llarge: neg t3, rd andi. t6, t3, WORD-1 /* Align rd to word size */ mtctr t6 sub rl, rl, t6 beq+ .Llargealigned 1: lbz t1, 0(rs) addi rs, rs, 1 stb t1, 0(rd) addi rd, rd, 1 bdnz 1b .Llargealigned: srwi. t2, rl, LOOP_LOG /* length >> log_2(loop_size) => 8W iterations */ mtcrf 0x3, rl beq .Lsmall_start mtctr t2 b 1f .align 5 1: LOAD t1, 0(rs) LOAD t2, WORD(rs) LOAD t3, WORD*2(rs) LOAD t4, WORD*3(rs) LOAD t5, WORD*4(rs) LOAD t6, WORD*5(rs) LOAD t7, WORD*6(rs) LOAD t8, WORD*7(rs) addi rs, rs, WORD*8 STORE t1, 0(rd) STORE t2, WORD*1(rd) STORE t3, WORD*2(rd) STORE t4, WORD*3(rd) STORE t5, WORD*4(rd) STORE t6, WORD*5(rd) STORE t7, WORD*6(rd) STORE t8, WORD*7(rd) addi rd, rd, WORD*8 bdnz 1b b .Lsmall_start .Lout: /* done */ .Lend: blr +END(bcopy_generic) /* * copyout(from_kernel, to_user, len) * %r3, %r4, %r5 */ ENTRY_DIRECT(copyout) PROLOGUE SET_COPYFAULT(%r4, %r7, %r5) bl bcopy_generic nop CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(copyout) /* * copyin(from_user, to_kernel, len) * %r3, %r4, %r5 */ ENTRY_DIRECT(copyin) PROLOGUE SET_COPYFAULT(%r3, %r7, %r5) bl bcopy_generic nop CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(copyin) + /* * copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done) * %r3 %r4 %r5 %r6 */ ENTRY_DIRECT(copyinstr) PROLOGUE SET_COPYFAULT_TRUNCATE(%r3, %r7, %r5) addi %r9, %r5, 1 mtctr %r9 mr %r8, %r3 addi %r8, %r8, -1 addi %r4, %r4, -1 li %r3, ENAMETOOLONG 0: bdz- 2f lbzu %r0, 1(%r8) stbu %r0, 1(%r4) // NULL byte reached ? CMPI %r0, 0 beq- 1f b 0b 1: li %r3, 0 2: /* skip storing length if done is NULL */ CMPI %r6, 0 beq- 3f mfctr %r0 sub %r0, %r9, %r0 STORE %r0, 0(%r6) 3: CLEAR_FAULT_NO_CLOBBER(%r7) EPILOGUE +END_DIRECT(copyinstr) ENTRY_DIRECT(subyte) PROLOGUE SET_FUSUFAULT(%r3, %r7) stb %r4, 0(%r3) CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(subyte) #ifndef __powerpc64__ ENTRY_DIRECT(suword) PROLOGUE SET_FUSUFAULT(%r3, %r7) stw %r4, 0(%r3) CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(suword) #endif ENTRY_DIRECT(suword32) PROLOGUE SET_FUSUFAULT(%r3, %r7) stw %r4, 0(%r3) CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(suword32) #ifdef __powerpc64__ ENTRY_DIRECT(suword64) PROLOGUE SET_FUSUFAULT(%r3, %r7) std %r4, 0(%r3) CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(suword64) + ENTRY_DIRECT(suword) PROLOGUE SET_FUSUFAULT(%r3, %r7) std %r4, 0(%r3) CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(suword) #endif ENTRY_DIRECT(fubyte) PROLOGUE SET_FUSUFAULT(%r3, %r7) lbz %r3, 0(%r3) CLEAR_FAULT_NO_CLOBBER(%r7) EPILOGUE +END_DIRECT(fubyte) ENTRY_DIRECT(fuword16) PROLOGUE SET_FUSUFAULT(%r3, %r7) lhz %r3, 0(%r3) CLEAR_FAULT_NO_CLOBBER(%r7) EPILOGUE +END_DIRECT(fuword16) #ifndef __powerpc64__ ENTRY_DIRECT(fueword) PROLOGUE SET_FUSUFAULT(%r3, %r7) lwz %r0, 0(%r3) stw %r0, 0(%r4) CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(fueword) #endif ENTRY_DIRECT(fueword32) PROLOGUE SET_FUSUFAULT(%r3, %r7) lwz %r0, 0(%r3) stw %r0, 0(%r4) CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(fueword32) #ifdef __powerpc64__ ENTRY_DIRECT(fueword) PROLOGUE SET_FUSUFAULT(%r3, %r7) ld %r0, 0(%r3) std %r0, 0(%r4) CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(fueword) ENTRY_DIRECT(fueword64) PROLOGUE SET_FUSUFAULT(%r3, %r7) ld %r0, 0(%r3) std %r0, 0(%r4) CLEAR_FAULT(%r7) EPILOGUE +END_DIRECT(fueword64) #endif /* * casueword(volatile u_long *base, u_long old, u_long *oldp, u_long new) * %r3 %r4 %r5 %r6 */ #define CASUEWORD32(raddr, rpcb) ;\ PROLOGUE ;\ SET_FUSUFAULT(raddr, rpcb) ;\ li %r8, 0 ;\ 1: ;\ lwarx %r0, 0, %r3 ;\ cmplw %r4, %r0 ;\ bne 2f ;\ stwcx. %r6, 0, %r3 ;\ bne- 3f ;\ b 4f ;\ 2: ;\ stwcx. %r0, 0, %r3 /* clear reservation (74xx) */ ;\ 3: ;\ li %r8, 1 ;\ 4: ;\ stw %r0, 0(%r5) ;\ CLEAR_FAULT_NO_CLOBBER(rpcb) ;\ mr %r3, %r8 ;\ EPILOGUE ENTRY_DIRECT(casueword32) CASUEWORD32(%r3, %r7) +END_DIRECT(casueword32) #ifdef __powerpc64__ #define CASUEWORD64(raddr, rpcb) ;\ PROLOGUE ;\ SET_FUSUFAULT(raddr, rpcb) ;\ li %r8, 0 ;\ 1: ;\ ldarx %r0, 0, %r3 ;\ cmpld %r4, %r0 ;\ bne 2f ;\ stdcx. %r6, 0, %r3 ;\ bne- 3f ;\ b 4f ;\ 2: ;\ stdcx. %r0, 0, %r3 /* clear reservation (74xx) */ ;\ 3: ;\ li %r8, 1 ;\ 4: ;\ std %r0, 0(%r5) ;\ CLEAR_FAULT_NO_CLOBBER(rpcb) ;\ mr %r3, %r8 ;\ EPILOGUE ENTRY_DIRECT(casueword) CASUEWORD64(%r3, %r7) +END_DIRECT(casueword) ENTRY_DIRECT(casueword64) CASUEWORD64(%r3, %r7) +END_DIRECT(casueword64) #else ENTRY_DIRECT(casueword) CASUEWORD32(%r3, %r7) +END_DIRECT(casueword) #endif _NAKED_ENTRY(fusufault) CLEAR_FAULT_NO_CLOBBER(%r7) li %r3, -1 EPILOGUE +_END(fusufault) _NAKED_ENTRY(copy_fault) CLEAR_FAULT_NO_CLOBBER(%r7) li %r3, EFAULT EPILOGUE +_END(copy_fault) Index: head/sys/powerpc/powerpc/swtch32.S =================================================================== --- head/sys/powerpc/powerpc/swtch32.S (revision 368353) +++ head/sys/powerpc/powerpc/swtch32.S (revision 368354) @@ -1,227 +1,231 @@ /* $FreeBSD$ */ /* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "assym.inc" #include "opt_sched.h" #include #include #include #include #include /* * void cpu_throw(struct thread *old, struct thread *new) */ ENTRY(cpu_throw) mr %r2, %r4 li %r14,0 /* Tell cpu_switchin not to release a thread */ b cpu_switchin +END(cpu_throw) /* * void cpu_switch(struct thread *old, * struct thread *new, * struct mutex *mtx); * * Switch to a new thread saving the current state in the old thread. */ ENTRY(cpu_switch) lwz %r6,TD_PCB(%r3) /* Get the old thread's PCB ptr */ stmw %r12,PCB_CONTEXT(%r6) /* Save the non-volatile GP regs. These can now be used for scratch */ mfcr %r16 /* Save the condition register */ stw %r16,PCB_CR(%r6) mflr %r16 /* Save the link register */ stw %r16,PCB_LR(%r6) stw %r1,PCB_SP(%r6) /* Save the stack pointer */ bl 1f 1: mflr %r30 /* Prepare for secure-PLT calls */ addis %r30, %r30, (_GLOBAL_OFFSET_TABLE_-1b)@ha addi %r30, %r30, (_GLOBAL_OFFSET_TABLE_-1b)@l mr %r14,%r3 /* Copy the old thread ptr... */ mr %r2,%r4 /* and the new thread ptr in curthread */ mr %r16,%r5 /* and the new lock */ mr %r17,%r6 /* and the PCB */ lwz %r18,PCB_FLAGS(%r17) /* Save FPU context if needed */ andi. %r7, %r18, PCB_FPU beq .L1 bl save_fpu .L1: mr %r3,%r14 /* restore old thread ptr */ /* Save Altivec context if needed */ andi. %r7, %r18, PCB_VEC beq .L2 bl save_vec .L2: #if defined(__SPE__) mfspr %r3,SPR_SPEFSCR stw %r3,PCB_VSCR(%r17) #endif mr %r3,%r14 /* restore old thread ptr */ bl pmap_deactivate /* Deactivate the current pmap */ sync /* Make sure all of that finished */ cpu_switchin: #if defined(SMP) && defined(SCHED_ULE) /* Wait for the new thread to become unblocked */ bl 1f 1: mflr %r6 addis %r6,%r6,(_GLOBAL_OFFSET_TABLE_-1b)@ha addi %r6,%r6,(_GLOBAL_OFFSET_TABLE_-1b)@l mr %r30, %r6 /* Prepare for secure-PLT calls */ lwz %r6,blocked_lock@got(%r6) blocked_loop: lwz %r7,TD_LOCK(%r2) cmpw %r6,%r7 beq- blocked_loop isync #endif lwz %r17,TD_PCB(%r2) /* Get new current PCB */ lwz %r1,PCB_SP(%r17) /* Load new stack pointer */ /* Release old thread now that we have a stack pointer set up */ cmpwi %r14,0 beq- 1f stw %r16,TD_LOCK(%r14) /* ULE: update old thread's lock */ 1: mfsprg %r7,0 /* Get the pcpu pointer */ stw %r2,PC_CURTHREAD(%r7) /* Store new current thread */ lwz %r17,TD_PCB(%r2) /* Store new current PCB */ stw %r17,PC_CURPCB(%r7) mr %r3,%r2 /* Get new thread ptr */ bl pmap_activate /* Activate the new address space */ lwz %r19, PCB_FLAGS(%r17) /* Restore FPU context if needed */ andi. %r6, %r19, PCB_FPU beq .L3 mr %r3,%r2 /* Pass curthread to enable_fpu */ bl enable_fpu .L3: /* Restore Altivec context if needed */ andi. %r6, %r19, PCB_VEC beq .L4 mr %r3,%r2 /* Pass curthread to enable_vec */ bl enable_vec .L4: #if defined(__SPE__) lwz %r3,PCB_VSCR(%r17) mtspr SPR_SPEFSCR,%r3 #endif /* thread to restore is in r3 */ mr %r3,%r17 /* Recover PCB ptr */ lmw %r12,PCB_CONTEXT(%r3) /* Load the non-volatile GP regs */ lwz %r5,PCB_CR(%r3) /* Load the condition register */ mtcr %r5 lwz %r5,PCB_LR(%r3) /* Load the link register */ mtlr %r5 lwz %r1,PCB_SP(%r3) /* Load the stack pointer */ /* * Perform a dummy stwcx. to clear any reservations we may have * inherited from the previous thread. It doesn't matter if the * stwcx succeeds or not. pcb_context[0] can be clobbered. */ stwcx. %r1, 0, %r3 blr +END(cpu_switch) /* * savectx(pcb) * Update pcb, saving current processor state */ ENTRY(savectx) stmw %r12,PCB_CONTEXT(%r3) /* Save the non-volatile GP regs */ mfcr %r4 /* Save the condition register */ stw %r4,PCB_CR(%r3) stw %r1,PCB_SP(%r3) /* Save the stack pointer */ mflr %r4 /* Save the link register */ stw %r4,PCB_LR(%r3) blr +END(savectx) /* * fork_trampoline() * Set up the return from cpu_fork() */ ENTRY(fork_trampoline) lwz %r3,CF_FUNC(%r1) lwz %r4,CF_ARG0(%r1) lwz %r5,CF_ARG1(%r1) bl fork_exit addi %r1,%r1,CF_SIZE-FSP /* Allow 8 bytes in front of trapframe to simulate FRAME_SETUP does when allocating space for a frame pointer/saved LR */ #ifdef __SPE__ li %r3,SPEFSCR_FINVE|SPEFSCR_FDBZE|SPEFSCR_FUNFE|SPEFSCR_FOVFE mtspr SPR_SPEFSCR, %r3 #endif b trapexit +END(fork_trampoline) Index: head/sys/powerpc/powerpc/swtch64.S =================================================================== --- head/sys/powerpc/powerpc/swtch64.S (revision 368353) +++ head/sys/powerpc/powerpc/swtch64.S (revision 368354) @@ -1,360 +1,364 @@ /* $FreeBSD$ */ /* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "assym.inc" #include "opt_sched.h" #include #include #include #include #include #ifdef _CALL_ELF .abiversion _CALL_ELF #endif TOC_ENTRY(blocked_lock) /* * void cpu_throw(struct thread *old, struct thread *new) */ ENTRY(cpu_throw) mr %r13, %r4 li %r14,0 /* Tell cpu_switchin not to release a thread */ li %r18,0 /* No old pcb flags. The old thread is extinguished. */ b cpu_switchin +END(cpu_throw) /* * void cpu_switch(struct thread *old, * struct thread *new, * struct mutex *mtx); * * Switch to a new thread saving the current state in the old thread. * * Internally clobbers (not visible outside of this file): * r18 - old thread pcb_flags * r19 - new thread pcb_flags */ ENTRY(cpu_switch) ld %r6,TD_PCB(%r3) /* Get the old thread's PCB ptr */ std %r12,PCB_CONTEXT(%r6) /* Save the non-volatile GP regs. These can now be used for scratch */ std %r14,PCB_CONTEXT+2*8(%r6) std %r15,PCB_CONTEXT+3*8(%r6) std %r16,PCB_CONTEXT+4*8(%r6) std %r17,PCB_CONTEXT+5*8(%r6) std %r18,PCB_CONTEXT+6*8(%r6) std %r19,PCB_CONTEXT+7*8(%r6) std %r20,PCB_CONTEXT+8*8(%r6) std %r21,PCB_CONTEXT+9*8(%r6) std %r22,PCB_CONTEXT+10*8(%r6) std %r23,PCB_CONTEXT+11*8(%r6) std %r24,PCB_CONTEXT+12*8(%r6) std %r25,PCB_CONTEXT+13*8(%r6) std %r26,PCB_CONTEXT+14*8(%r6) std %r27,PCB_CONTEXT+15*8(%r6) std %r28,PCB_CONTEXT+16*8(%r6) std %r29,PCB_CONTEXT+17*8(%r6) std %r30,PCB_CONTEXT+18*8(%r6) std %r31,PCB_CONTEXT+19*8(%r6) mfcr %r16 /* Save the condition register */ std %r16,PCB_CR(%r6) mflr %r16 /* Save the link register */ std %r16,PCB_LR(%r6) std %r1,PCB_SP(%r6) /* Save the stack pointer */ std %r2,PCB_TOC(%r6) /* Save the TOC pointer */ mr %r14,%r3 /* Copy the old thread ptr... */ mr %r13,%r4 /* and the new thread ptr in curthread*/ mr %r16,%r5 /* and the new lock */ mr %r17,%r6 /* and the PCB */ stdu %r1,-48(%r1) lwz %r18, PCB_FLAGS(%r17) andi. %r7, %r18, PCB_CFSCR beq 1f mfspr %r6, SPR_FSCR std %r6, PCB_FSCR(%r17) save_ebb: andi. %r0, %r6, FSCR_EBB beq save_lm mfspr %r7, SPR_EBBHR std %r7, PCB_EBB_EBBHR(%r17) mfspr %r7, SPR_EBBRR std %r7, PCB_EBB_EBBRR(%r17) mfspr %r7, SPR_BESCR std %r7, PCB_EBB_BESCR(%r17) save_lm: andi. %r0, %r6, FSCR_LM beq save_tar mfspr %r7, SPR_LMRR std %r7, PCB_LMON_LMRR(%r17) mfspr %r7, SPR_LMSER std %r7, PCB_LMON_LMSER(%r17) save_tar: andi. %r0, %r6, FSCR_TAR beq 1f mfspr %r7, SPR_TAR std %r7, PCB_TAR(%r17) 1: andi. %r7, %r18, PCB_CDSCR beq .L0 mfspr %r6, SPR_DSCRP std %r6, PCB_DSCR(%r17) .L0: /* Save FPU context if needed */ andi. %r7, %r18, PCB_FPU beq .L1 bl save_fpu nop .L1: mr %r3,%r14 /* restore old thread ptr */ /* Save Altivec context if needed */ andi. %r7, %r18, PCB_VEC beq .L2 bl save_vec nop .L2: mr %r3,%r14 /* restore old thread ptr */ bl pmap_deactivate /* Deactivate the current pmap */ nop sync /* Make sure all of that finished */ cpu_switchin: #if defined(SMP) && defined(SCHED_ULE) /* Wait for the new thread to become unblocked */ addis %r6,%r2,TOC_REF(blocked_lock)@ha ld %r6,TOC_REF(blocked_lock)@l(%r6) blocked_loop: ld %r7,TD_LOCK(%r13) cmpd %r6,%r7 beq- blocked_loop isync #endif ld %r17,TD_PCB(%r13) /* Get new PCB */ ld %r1,PCB_SP(%r17) /* Load the stack pointer */ addi %r1,%r1,-48 /* Remember about cpu_switch stack frame */ /* Release old thread now that we have a stack pointer set up */ cmpdi %r14,0 beq- 1f std %r16,TD_LOCK(%r14) /* ULE: update old thread's lock */ 1: mfsprg %r7,0 /* Get the pcpu pointer */ std %r13,PC_CURTHREAD(%r7) /* Store new current thread */ ld %r17,TD_PCB(%r13) /* Store new current PCB */ std %r17,PC_CURPCB(%r7) mr %r3,%r13 /* Get new thread ptr */ bl pmap_activate /* Activate the new address space */ nop lwz %r19, PCB_FLAGS(%r17) /* Restore FPU context if needed */ andi. %r6, %r19, PCB_FPU beq .L3 mr %r3,%r13 /* Pass curthread to enable_fpu */ bl enable_fpu nop .L3: /* Restore Altivec context if needed */ andi. %r6, %r19, PCB_VEC beq .L31 mr %r3,%r13 /* Pass curthread to enable_vec */ bl enable_vec nop .L31: /* Load custom DSCR on PowerISA 2.06+ CPUs. */ /* Load changed FSCR on PowerISA 2.07+ CPUs. */ or %r18,%r18,%r19 /* Restore Custom DSCR if needed (zeroes if in old but not new) */ andi. %r6, %r18, PCB_CDSCR beq .L32 ld %r7, PCB_DSCR(%r17) /* Load the DSCR register*/ mtspr SPR_DSCRP, %r7 .L32: /* Restore FSCR if needed (zeroes if in old but not new) */ andi. %r6, %r18, PCB_CFSCR beq .L4 ld %r7, PCB_FSCR(%r17) /* Load the FSCR register*/ mtspr SPR_FSCR, %r7 restore_ebb: andi. %r0, %r7, FSCR_EBB beq restore_lm ld %r6, PCB_EBB_EBBHR(%r17) mtspr SPR_EBBHR, %r6 ld %r6, PCB_EBB_EBBRR(%r17) mtspr SPR_EBBRR, %r6 ld %r6, PCB_EBB_BESCR(%r17) mtspr SPR_BESCR, %r6 restore_lm: andi. %r0, %r7, FSCR_LM beq restore_tar ld %r6, PCB_LMON_LMRR(%r17) mtspr SPR_LMRR, %r6 ld %r6, PCB_LMON_LMSER(%r17) mtspr SPR_LMSER, %r6 restore_tar: andi. %r0, %r7, FSCR_TAR beq .L4 ld %r6, PCB_TAR(%r17) mtspr SPR_TAR, %r6 /* thread to restore is in r3 */ .L4: addi %r1,%r1,48 mr %r3,%r17 /* Recover PCB ptr */ ld %r12,PCB_CONTEXT(%r3) /* Load the non-volatile GP regs. */ ld %r14,PCB_CONTEXT+2*8(%r3) ld %r15,PCB_CONTEXT+3*8(%r3) ld %r16,PCB_CONTEXT+4*8(%r3) ld %r17,PCB_CONTEXT+5*8(%r3) ld %r18,PCB_CONTEXT+6*8(%r3) ld %r19,PCB_CONTEXT+7*8(%r3) ld %r20,PCB_CONTEXT+8*8(%r3) ld %r21,PCB_CONTEXT+9*8(%r3) ld %r22,PCB_CONTEXT+10*8(%r3) ld %r23,PCB_CONTEXT+11*8(%r3) ld %r24,PCB_CONTEXT+12*8(%r3) ld %r25,PCB_CONTEXT+13*8(%r3) ld %r26,PCB_CONTEXT+14*8(%r3) ld %r27,PCB_CONTEXT+15*8(%r3) ld %r28,PCB_CONTEXT+16*8(%r3) ld %r29,PCB_CONTEXT+17*8(%r3) ld %r30,PCB_CONTEXT+18*8(%r3) ld %r31,PCB_CONTEXT+19*8(%r3) ld %r5,PCB_CR(%r3) /* Load the condition register */ mtcr %r5 ld %r5,PCB_LR(%r3) /* Load the link register */ mtlr %r5 ld %r1,PCB_SP(%r3) /* Load the stack pointer */ ld %r2,PCB_TOC(%r3) /* Load the TOC pointer */ /* * Perform a dummy stdcx. to clear any reservations we may have * inherited from the previous thread. It doesn't matter if the * stdcx succeeds or not. pcb_context[0] can be clobbered. */ stdcx. %r1, 0, %r3 blr +END(cpu_switch) /* * savectx(pcb) * Update pcb, saving current processor state */ ENTRY(savectx) std %r12,PCB_CONTEXT(%r3) /* Save the non-volatile GP regs. */ std %r13,PCB_CONTEXT+1*8(%r3) std %r14,PCB_CONTEXT+2*8(%r3) std %r15,PCB_CONTEXT+3*8(%r3) std %r16,PCB_CONTEXT+4*8(%r3) std %r17,PCB_CONTEXT+5*8(%r3) std %r18,PCB_CONTEXT+6*8(%r3) std %r19,PCB_CONTEXT+7*8(%r3) std %r20,PCB_CONTEXT+8*8(%r3) std %r21,PCB_CONTEXT+9*8(%r3) std %r22,PCB_CONTEXT+10*8(%r3) std %r23,PCB_CONTEXT+11*8(%r3) std %r24,PCB_CONTEXT+12*8(%r3) std %r25,PCB_CONTEXT+13*8(%r3) std %r26,PCB_CONTEXT+14*8(%r3) std %r27,PCB_CONTEXT+15*8(%r3) std %r28,PCB_CONTEXT+16*8(%r3) std %r29,PCB_CONTEXT+17*8(%r3) std %r30,PCB_CONTEXT+18*8(%r3) std %r31,PCB_CONTEXT+19*8(%r3) mfcr %r4 /* Save the condition register */ std %r4,PCB_CR(%r3) std %r1,PCB_SP(%r3) /* Save the stack pointer */ std %r2,PCB_TOC(%r3) /* Save the TOC pointer */ mflr %r4 /* Save the link register */ std %r4,PCB_LR(%r3) blr +END(savectx) /* * fork_trampoline() * Set up the return from cpu_fork() */ ENTRY_NOPROF(fork_trampoline) ld %r3,CF_FUNC(%r1) ld %r4,CF_ARG0(%r1) ld %r5,CF_ARG1(%r1) stdu %r1,-48(%r1) bl fork_exit nop addi %r1,%r1,48+CF_SIZE-FSP /* Allow 8 bytes in front of trapframe to simulate FRAME_SETUP does when allocating space for a frame pointer/saved LR */ bl trapexit nop +END(fork_trampoline) Index: head/sys/powerpc/ps3/ps3-hvcall.S =================================================================== --- head/sys/powerpc/ps3/ps3-hvcall.S (revision 368353) +++ head/sys/powerpc/ps3/ps3-hvcall.S (revision 368354) @@ -1,1278 +1,1367 @@ /* $FreeBSD$ */ #include #define hc .long 0x44000022 ASENTRY(lv1_allocate_memory) mflr %r0 std %r0,16(%r1) stdu %r1,-64(%r1) std %r7,48(%r1) std %r8,56(%r1) li %r11,0 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_allocate_memory) ASENTRY(lv1_write_htab_entry) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,1 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_write_htab_entry) ASENTRY(lv1_construct_virtual_address_space) mflr %r0 std %r0,16(%r1) stdu %r1,-64(%r1) std %r6,48(%r1) std %r7,56(%r1) li %r11,2 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_construct_virtual_address_space) ASENTRY(lv1_get_virtual_address_space_id_of_ppe) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r4,48(%r1) li %r11,4 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_get_virtual_address_space_id_of_ppe) ASENTRY(lv1_query_logical_partition_address_region_info) mflr %r0 std %r0,16(%r1) stdu %r1,-88(%r1) std %r4,48(%r1) std %r5,56(%r1) std %r6,64(%r1) std %r7,72(%r1) std %r8,80(%r1) li %r11,6 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r11,64(%r1) std %r6,0(%r11) ld %r11,72(%r1) std %r7,0(%r11) ld %r11,80(%r1) std %r8,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_query_logical_partition_address_region_info) ASENTRY(lv1_select_virtual_address_space) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,7 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_select_virtual_address_space) ASENTRY(lv1_pause) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,9 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_pause) ASENTRY(lv1_destruct_virtual_address_space) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,10 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_destruct_virtual_address_space) ASENTRY(lv1_configure_irq_state_bitmap) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,11 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_configure_irq_state_bitmap) ASENTRY(lv1_connect_irq_plug_ext) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,12 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_connect_irq_plug_ext) ASENTRY(lv1_release_memory) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,13 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_release_memory) ASENTRY(lv1_put_iopte) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,15 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_put_iopte) ASENTRY(lv1_disconnect_irq_plug_ext) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,17 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_disconnect_irq_plug_ext) ASENTRY(lv1_construct_event_receive_port) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r3,48(%r1) li %r11,18 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_construct_event_receive_port) ASENTRY(lv1_destruct_event_receive_port) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,19 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_destruct_event_receive_port) ASENTRY(lv1_send_event_locally) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,24 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_send_event_locally) ASENTRY(lv1_end_of_interrupt) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,27 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_end_of_interrupt) ASENTRY(lv1_connect_irq_plug) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,28 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_connect_irq_plug) ASENTRY(lv1_disconnect_irq_plus) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,29 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_disconnect_irq_plus) ASENTRY(lv1_end_of_interrupt_ext) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,30 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_end_of_interrupt_ext) ASENTRY(lv1_did_update_interrupt_mask) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,31 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_did_update_interrupt_mask) ASENTRY(lv1_shutdown_logical_partition) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,44 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_shutdown_logical_partition) ASENTRY(lv1_destruct_logical_spe) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,54 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_destruct_logical_spe) ASENTRY(lv1_construct_logical_spe) mflr %r0 std %r0,16(%r1) stdu %r1,-96(%r1) std %r10,48(%r1) ld %r11,208(%r1) std %r11,56(%r1) ld %r11,216(%r1) std %r11,64(%r1) ld %r11,224(%r1) std %r11,72(%r1) ld %r11,232(%r1) std %r11,80(%r1) ld %r11,240(%r1) std %r11,88(%r1) li %r11,57 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r11,64(%r1) std %r6,0(%r11) ld %r11,72(%r1) std %r7,0(%r11) ld %r11,80(%r1) std %r8,0(%r11) ld %r11,88(%r1) std %r9,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_construct_logical_spe) ASENTRY(lv1_set_spe_interrupt_mask) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,61 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_set_spe_interrupt_mask) ASENTRY(lv1_disable_logical_spe) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,65 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_disable_logical_spe) ASENTRY(lv1_clear_spe_interrupt_status) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,66 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_clear_spe_interrupt_status) ASENTRY(lv1_get_spe_interrupt_status) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r5,48(%r1) li %r11,67 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_get_spe_interrupt_status) ASENTRY(lv1_get_logical_ppe_id) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r3,48(%r1) li %r11,69 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_get_logical_ppe_id) ASENTRY(lv1_get_logical_partition_id) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r3,48(%r1) li %r11,74 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_get_logical_partition_id) ASENTRY(lv1_get_spe_irq_outlet) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r5,48(%r1) li %r11,78 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_get_spe_irq_outlet) ASENTRY(lv1_set_spe_privilege_state_area_1_register) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,79 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_set_spe_privilege_state_area_1_register) ASENTRY(lv1_get_repository_node_value) mflr %r0 std %r0,16(%r1) stdu %r1,-64(%r1) std %r8,48(%r1) std %r9,56(%r1) li %r11,91 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_get_repository_node_value) ASENTRY(lv1_read_htab_entries) mflr %r0 std %r0,16(%r1) stdu %r1,-88(%r1) std %r5,48(%r1) std %r6,56(%r1) std %r7,64(%r1) std %r8,72(%r1) std %r9,80(%r1) li %r11,95 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r11,64(%r1) std %r6,0(%r11) ld %r11,72(%r1) std %r7,0(%r11) ld %r11,80(%r1) std %r8,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_read_htab_entries) ASENTRY(lv1_set_dabr) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,96 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_set_dabr) ASENTRY(lv1_allocate_io_segment) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r6,48(%r1) li %r11,116 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_allocate_io_segment) ASENTRY(lv1_release_io_segment) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,117 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_release_io_segment) ASENTRY(lv1_construct_io_irq_outlet) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r4,48(%r1) li %r11,120 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_construct_io_irq_outlet) ASENTRY(lv1_destruct_io_irq_outlet) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,121 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_destruct_io_irq_outlet) ASENTRY(lv1_map_htab) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r4,48(%r1) li %r11,122 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_map_htab) ASENTRY(lv1_unmap_htab) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,123 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_unmap_htab) ASENTRY(lv1_get_version_info) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r3,48(%r1) li %r11,127 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_get_version_info) ASENTRY(lv1_insert_htab_entry) mflr %r0 std %r0,16(%r1) stdu %r1,-72(%r1) std %r9,48(%r1) std %r10,56(%r1) ld %r11,184(%r1) std %r11,64(%r1) li %r11,158 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r11,64(%r1) std %r6,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_insert_htab_entry) ASENTRY(lv1_read_virtual_uart) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r6,48(%r1) li %r11,162 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_read_virtual_uart) ASENTRY(lv1_write_virtual_uart) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r6,48(%r1) li %r11,163 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_write_virtual_uart) ASENTRY(lv1_set_virtual_uart_param) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,164 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_set_virtual_uart_param) ASENTRY(lv1_get_virtual_uart_param) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r5,48(%r1) li %r11,165 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_get_virtual_uart_param) ASENTRY(lv1_configure_virtual_uart) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r4,48(%r1) li %r11,166 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_configure_virtual_uart) ASENTRY(lv1_open_device) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,170 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_open_device) ASENTRY(lv1_close_device) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,171 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_close_device) ASENTRY(lv1_map_device_mmio_region) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r8,48(%r1) li %r11,172 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_map_device_mmio_region) ASENTRY(lv1_unmap_device_mmio_region) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,173 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_unmap_device_mmio_region) ASENTRY(lv1_allocate_device_dma_region) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r8,48(%r1) li %r11,174 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_allocate_device_dma_region) ASENTRY(lv1_free_device_dma_region) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,175 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_free_device_dma_region) ASENTRY(lv1_map_device_dma_region) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,176 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_map_device_dma_region) ASENTRY(lv1_unmap_device_dma_region) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,177 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_unmap_device_dma_region) ASENTRY(lv1_read_pci_config) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r9,48(%r1) li %r11,178 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_read_pci_config) ASENTRY(lv1_write_pci_config) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,179 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_write_pci_config) ASENTRY(lv1_net_add_multicast_address) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,185 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_net_add_multicast_address) ASENTRY(lv1_net_remove_multicast_address) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,186 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_net_remove_multicast_address) ASENTRY(lv1_net_start_tx_dma) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,187 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_net_start_tx_dma) ASENTRY(lv1_net_stop_tx_dma) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,188 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_net_stop_tx_dma) ASENTRY(lv1_net_start_rx_dma) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,189 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_net_start_rx_dma) ASENTRY(lv1_net_stop_rx_dma) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,190 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_net_stop_rx_dma) ASENTRY(lv1_net_set_interrupt_status_indicator) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,191 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_net_set_interrupt_status_indicator) ASENTRY(lv1_net_set_interrupt_mask) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,193 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_net_set_interrupt_mask) ASENTRY(lv1_net_control) mflr %r0 std %r0,16(%r1) stdu %r1,-64(%r1) std %r9,48(%r1) std %r10,56(%r1) li %r11,194 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_net_control) ASENTRY(lv1_connect_interrupt_event_receive_port) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,197 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_connect_interrupt_event_receive_port) ASENTRY(lv1_disconnect_interrupt_event_receive_port) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,198 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_disconnect_interrupt_event_receive_port) ASENTRY(lv1_deconfigure_virtual_uart_irq) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,202 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_deconfigure_virtual_uart_irq) ASENTRY(lv1_enable_logical_spe) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,207 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_enable_logical_spe) ASENTRY(lv1_gpu_open) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,210 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_open) ASENTRY(lv1_gpu_close) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,211 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_close) ASENTRY(lv1_gpu_device_map) mflr %r0 std %r0,16(%r1) stdu %r1,-64(%r1) std %r4,48(%r1) std %r5,56(%r1) li %r11,212 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_device_map) ASENTRY(lv1_gpu_device_unmap) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,213 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_device_unmap) ASENTRY(lv1_gpu_memory_allocate) mflr %r0 std %r0,16(%r1) stdu %r1,-64(%r1) std %r8,48(%r1) std %r9,56(%r1) li %r11,214 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_memory_allocate) ASENTRY(lv1_gpu_memory_free) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,216 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_memory_free) ASENTRY(lv1_gpu_context_allocate) mflr %r0 std %r0,16(%r1) stdu %r1,-88(%r1) std %r5,48(%r1) std %r6,56(%r1) std %r7,64(%r1) std %r8,72(%r1) std %r9,80(%r1) li %r11,217 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r11,64(%r1) std %r6,0(%r11) ld %r11,72(%r1) std %r7,0(%r11) ld %r11,80(%r1) std %r8,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_context_allocate) ASENTRY(lv1_gpu_context_free) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,218 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_context_free) ASENTRY(lv1_gpu_context_iomap) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,221 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_context_iomap) ASENTRY(lv1_gpu_context_attribute) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,225 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_context_attribute) ASENTRY(lv1_gpu_context_intr) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r4,48(%r1) li %r11,227 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_context_intr) ASENTRY(lv1_gpu_attribute) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,228 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_gpu_attribute) ASENTRY(lv1_get_rtc) mflr %r0 std %r0,16(%r1) stdu %r1,-64(%r1) std %r3,48(%r1) std %r4,56(%r1) li %r11,232 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_get_rtc) ASENTRY(lv1_storage_read) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r9,48(%r1) li %r11,245 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_storage_read) ASENTRY(lv1_storage_write) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r9,48(%r1) li %r11,246 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_storage_write) ASENTRY(lv1_storage_send_device_command) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r9,48(%r1) li %r11,248 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_storage_send_device_command) ASENTRY(lv1_storage_get_async_status) mflr %r0 std %r0,16(%r1) stdu %r1,-64(%r1) std %r4,48(%r1) std %r5,56(%r1) li %r11,249 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_storage_get_async_status) ASENTRY(lv1_storage_check_async_status) mflr %r0 std %r0,16(%r1) stdu %r1,-56(%r1) std %r5,48(%r1) li %r11,254 hc extsw %r3,%r3 ld %r11,48(%r1) std %r4,0(%r11) ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr +ASEND(lv1_storage_check_async_status) ASENTRY(lv1_panic) mflr %r0 std %r0,16(%r1) stdu %r1,-48(%r1) li %r11,255 hc extsw %r3,%r3 ld %r1,0(%r1) ld %r0,16(%r1) mtlr %r0 blr - +ASEND(lv1_panic) Index: head/sys/powerpc/pseries/phyp-hvcall.S =================================================================== --- head/sys/powerpc/pseries/phyp-hvcall.S (revision 368353) +++ head/sys/powerpc/pseries/phyp-hvcall.S (revision 368354) @@ -1,75 +1,76 @@ /*- * Copyright (C) 2010 Andreas Tobler * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include /* Hypervisor entry call. */ #define hc .long 0x44000022 /* * Simple HV calls take the same arguments, with the same ABI, as this * C function */ ASENTRY(phyp_hcall) mflr %r0 std %r0,16(%r1) #if defined(_CALL_ELF) && _CALL_ELF == 2 ld %r11,96(%r1) /* Last couple args into volatile regs*/ ld %r12,104(%r1) #else ld %r11,112(%r1) /* Last couple args into volatile regs*/ ld %r12,120(%r1) #endif hc /* invoke the hypervisor */ ld %r0,16(%r1) mtlr %r0 blr /* return r3 = status */ +ASEND(phyp_hcall) /* * PFT HV calls take a special ABI (see PAPR 14.5.4.1) * * r3-r7 arguments passed unchanged, r8-r10 are addresses of return values * HV takes the same r3-r7, but returns values in r3, r4-r6 */ ASENTRY(phyp_pft_hcall) mflr %r0 std %r0,16(%r1) stdu %r1,-80(%r1) std %r8,48(%r1) /* save arguments */ std %r9,56(%r1) std %r10,64(%r1) hc /* invoke the hypervisor */ ld %r11,48(%r1) /* store results */ std %r4,0(%r11) ld %r11,56(%r1) std %r5,0(%r11) ld %r11,64(%r1) std %r6,0(%r11) ld %r1,0(%r1) /* exit */ ld %r0,16(%r1) mtlr %r0 blr /* return r3 = status */ - +ASEND(phyp_pft_hcall)