Index: head/sys/amd64/amd64/support.S =================================================================== --- head/sys/amd64/amd64/support.S (revision 360943) +++ head/sys/amd64/amd64/support.S (revision 360944) @@ -1,1956 +1,1919 @@ /*- * Copyright (c) 2018-2019 The FreeBSD Foundation * Copyright (c) 2003 Peter Wemm. * Copyright (c) 1993 The Regents of the University of California. * All rights reserved. * * Portions of this software were developed by * Konstantin Belousov under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_ddb.h" #include #include #include #include "assym.inc" .text /* Address: %rdi */ ENTRY(pagezero_std) PUSH_FRAME_POINTER movl $PAGE_SIZE/8,%ecx xorl %eax,%eax rep stosq POP_FRAME_POINTER ret END(pagezero_std) ENTRY(pagezero_erms) PUSH_FRAME_POINTER movl $PAGE_SIZE,%ecx xorl %eax,%eax rep stosb POP_FRAME_POINTER ret END(pagezero_erms) /* * pagecopy(%rdi=from, %rsi=to) */ ENTRY(pagecopy) PUSH_FRAME_POINTER movl $PAGE_SIZE/8,%ecx movq %rdi,%r9 movq %rsi,%rdi movq %r9,%rsi rep movsq POP_FRAME_POINTER ret END(pagecopy) /* Address: %rdi */ ENTRY(sse2_pagezero) PUSH_FRAME_POINTER movq $-PAGE_SIZE,%rdx subq %rdx,%rdi xorl %eax,%eax jmp 1f /* * The loop takes 29 bytes. Ensure that it doesn't cross a 32-byte * cache line. */ .p2align 5,0x90 1: movnti %rax,(%rdi,%rdx) movnti %rax,8(%rdi,%rdx) movnti %rax,16(%rdi,%rdx) movnti %rax,24(%rdi,%rdx) addq $32,%rdx jne 1b sfence POP_FRAME_POINTER ret END(sse2_pagezero) /* * memcmpy(b1, b2, len) * rdi,rsi,rdx */ ENTRY(memcmp) PUSH_FRAME_POINTER xorl %eax,%eax 10: cmpq $16,%rdx ja 101632f 100816: cmpb $8,%dl jl 100408f movq (%rdi),%r8 movq (%rsi),%r9 cmpq %r8,%r9 jne 80f movq -8(%rdi,%rdx),%r8 movq -8(%rsi,%rdx),%r9 cmpq %r8,%r9 jne 10081608f POP_FRAME_POINTER ret 100408: cmpb $4,%dl jl 100204f movl (%rdi),%r8d movl (%rsi),%r9d cmpl %r8d,%r9d jne 80f movl -4(%rdi,%rdx),%r8d movl -4(%rsi,%rdx),%r9d cmpl %r8d,%r9d jne 10040804f POP_FRAME_POINTER ret 100204: cmpb $2,%dl jl 100001f movzwl (%rdi),%r8d movzwl (%rsi),%r9d cmpl %r8d,%r9d jne 1f movzwl -2(%rdi,%rdx),%r8d movzwl -2(%rsi,%rdx),%r9d cmpl %r8d,%r9d jne 1f POP_FRAME_POINTER ret 100001: cmpb $1,%dl jl 100000f movzbl (%rdi),%eax movzbl (%rsi),%r8d subl %r8d,%eax 100000: POP_FRAME_POINTER ret ALIGN_TEXT 101632: cmpq $32,%rdx ja 103200f movq (%rdi),%r8 movq (%rsi),%r9 cmpq %r8,%r9 jne 80f movq 8(%rdi),%r8 movq 8(%rsi),%r9 cmpq %r8,%r9 jne 10163208f movq -16(%rdi,%rdx),%r8 movq -16(%rsi,%rdx),%r9 cmpq %r8,%r9 jne 10163216f movq -8(%rdi,%rdx),%r8 movq -8(%rsi,%rdx),%r9 cmpq %r8,%r9 jne 10163224f POP_FRAME_POINTER ret ALIGN_TEXT 103200: movq (%rdi),%r8 movq 8(%rdi),%r9 subq (%rsi),%r8 subq 8(%rsi),%r9 orq %r8,%r9 jnz 10320000f movq 16(%rdi),%r8 movq 24(%rdi),%r9 subq 16(%rsi),%r8 subq 24(%rsi),%r9 orq %r8,%r9 jnz 10320016f leaq 32(%rdi),%rdi leaq 32(%rsi),%rsi subq $32,%rdx cmpq $32,%rdx jae 103200b cmpb $0,%dl jne 10b POP_FRAME_POINTER ret /* * Mismatch was found. * * Before we compute it we narrow down the range (16 -> 8 -> 4 bytes). */ ALIGN_TEXT 10320016: leaq 16(%rdi),%rdi leaq 16(%rsi),%rsi 10320000: movq (%rdi),%r8 movq (%rsi),%r9 cmpq %r8,%r9 jne 80f leaq 8(%rdi),%rdi leaq 8(%rsi),%rsi jmp 80f ALIGN_TEXT 10081608: 10163224: leaq -8(%rdi,%rdx),%rdi leaq -8(%rsi,%rdx),%rsi jmp 80f ALIGN_TEXT 10163216: leaq -16(%rdi,%rdx),%rdi leaq -16(%rsi,%rdx),%rsi jmp 80f ALIGN_TEXT 10163208: leaq 8(%rdi),%rdi leaq 8(%rsi),%rsi jmp 80f ALIGN_TEXT 10040804: leaq -4(%rdi,%rdx),%rdi leaq -4(%rsi,%rdx),%rsi jmp 1f ALIGN_TEXT 80: movl (%rdi),%r8d movl (%rsi),%r9d cmpl %r8d,%r9d jne 1f leaq 4(%rdi),%rdi leaq 4(%rsi),%rsi /* * We have up to 4 bytes to inspect. */ 1: movzbl (%rdi),%eax movzbl (%rsi),%r8d cmpb %r8b,%al jne 2f movzbl 1(%rdi),%eax movzbl 1(%rsi),%r8d cmpb %r8b,%al jne 2f movzbl 2(%rdi),%eax movzbl 2(%rsi),%r8d cmpb %r8b,%al jne 2f movzbl 3(%rdi),%eax movzbl 3(%rsi),%r8d 2: subl %r8d,%eax POP_FRAME_POINTER ret END(memcmp) /* * memmove(dst, src, cnt) * rdi, rsi, rdx */ /* * Register state at entry is supposed to be as follows: * rdi - destination * rsi - source * rdx - count * * The macro possibly clobbers the above and: rcx, r8, r9, r10 * It does not clobber rax nor r11. */ .macro MEMMOVE erms overlap begin end \begin /* * For sizes 0..32 all data is read before it is written, so there * is no correctness issue with direction of copying. */ cmpq $32,%rcx jbe 101632f .if \overlap == 1 movq %rdi,%r8 subq %rsi,%r8 cmpq %rcx,%r8 /* overlapping && src < dst? */ jb 2f .endif cmpq $256,%rcx ja 1256f 103200: movq (%rsi),%rdx movq %rdx,(%rdi) movq 8(%rsi),%rdx movq %rdx,8(%rdi) movq 16(%rsi),%rdx movq %rdx,16(%rdi) movq 24(%rsi),%rdx movq %rdx,24(%rdi) leaq 32(%rsi),%rsi leaq 32(%rdi),%rdi subq $32,%rcx cmpq $32,%rcx jae 103200b cmpb $0,%cl jne 101632f \end ret ALIGN_TEXT 101632: cmpb $16,%cl jl 100816f movq (%rsi),%rdx movq 8(%rsi),%r8 movq -16(%rsi,%rcx),%r9 movq -8(%rsi,%rcx),%r10 movq %rdx,(%rdi) movq %r8,8(%rdi) movq %r9,-16(%rdi,%rcx) movq %r10,-8(%rdi,%rcx) \end ret ALIGN_TEXT 100816: cmpb $8,%cl jl 100408f movq (%rsi),%rdx movq -8(%rsi,%rcx),%r8 movq %rdx,(%rdi) movq %r8,-8(%rdi,%rcx,) \end ret ALIGN_TEXT 100408: cmpb $4,%cl jl 100204f movl (%rsi),%edx movl -4(%rsi,%rcx),%r8d movl %edx,(%rdi) movl %r8d,-4(%rdi,%rcx) \end ret ALIGN_TEXT 100204: cmpb $2,%cl jl 100001f movzwl (%rsi),%edx movzwl -2(%rsi,%rcx),%r8d movw %dx,(%rdi) movw %r8w,-2(%rdi,%rcx) \end ret ALIGN_TEXT 100001: cmpb $1,%cl jl 100000f movb (%rsi),%dl movb %dl,(%rdi) 100000: \end ret ALIGN_TEXT 1256: testb $15,%dil jnz 100f .if \erms == 1 rep movsb .else shrq $3,%rcx /* copy by 64-bit words */ rep movsq movq %rdx,%rcx andl $7,%ecx /* any bytes left? */ jne 100408b .endif \end ret 100: movq (%rsi),%r8 movq 8(%rsi),%r9 movq %rdi,%r10 movq %rdi,%rcx andq $15,%rcx leaq -16(%rdx,%rcx),%rdx neg %rcx leaq 16(%rdi,%rcx),%rdi leaq 16(%rsi,%rcx),%rsi movq %rdx,%rcx .if \erms == 1 rep movsb movq %r8,(%r10) movq %r9,8(%r10) .else shrq $3,%rcx /* copy by 64-bit words */ rep movsq movq %r8,(%r10) movq %r9,8(%r10) movq %rdx,%rcx andl $7,%ecx /* any bytes left? */ jne 100408b .endif \end ret .if \overlap == 1 /* * Copy backwards. */ ALIGN_TEXT 2: cmpq $256,%rcx ja 2256f leaq -8(%rdi,%rcx),%rdi leaq -8(%rsi,%rcx),%rsi cmpq $32,%rcx jb 2016f 2032: movq (%rsi),%rdx movq %rdx,(%rdi) movq -8(%rsi),%rdx movq %rdx,-8(%rdi) movq -16(%rsi),%rdx movq %rdx,-16(%rdi) movq -24(%rsi),%rdx movq %rdx,-24(%rdi) leaq -32(%rsi),%rsi leaq -32(%rdi),%rdi subq $32,%rcx cmpq $32,%rcx jae 2032b cmpb $0,%cl jne 2016f \end ret ALIGN_TEXT 2016: cmpb $16,%cl jl 2008f movq (%rsi),%rdx movq %rdx,(%rdi) movq -8(%rsi),%rdx movq %rdx,-8(%rdi) subb $16,%cl jz 2000f leaq -16(%rsi),%rsi leaq -16(%rdi),%rdi 2008: cmpb $8,%cl jl 2004f movq (%rsi),%rdx movq %rdx,(%rdi) subb $8,%cl jz 2000f leaq -8(%rsi),%rsi leaq -8(%rdi),%rdi 2004: cmpb $4,%cl jl 2002f movl 4(%rsi),%edx movl %edx,4(%rdi) subb $4,%cl jz 2000f leaq -4(%rsi),%rsi leaq -4(%rdi),%rdi 2002: cmpb $2,%cl jl 2001f movw 6(%rsi),%dx movw %dx,6(%rdi) subb $2,%cl jz 2000f leaq -2(%rsi),%rsi leaq -2(%rdi),%rdi 2001: cmpb $1,%cl jl 2000f movb 7(%rsi),%dl movb %dl,7(%rdi) 2000: \end ret ALIGN_TEXT 2256: std .if \erms == 1 leaq -1(%rdi,%rcx),%rdi leaq -1(%rsi,%rcx),%rsi rep movsb cld .else leaq -8(%rdi,%rcx),%rdi leaq -8(%rsi,%rcx),%rsi shrq $3,%rcx rep movsq cld movq %rdx,%rcx andb $7,%cl jne 2004b .endif \end ret .endif .endm .macro MEMMOVE_BEGIN PUSH_FRAME_POINTER movq %rdi,%rax movq %rdx,%rcx .endm .macro MEMMOVE_END POP_FRAME_POINTER .endm ENTRY(memmove_std) MEMMOVE erms=0 overlap=1 begin=MEMMOVE_BEGIN end=MEMMOVE_END END(memmove_std) ENTRY(memmove_erms) MEMMOVE erms=1 overlap=1 begin=MEMMOVE_BEGIN end=MEMMOVE_END END(memmove_erms) /* * memcpy(dst, src, len) * rdi, rsi, rdx * * Note: memcpy does not support overlapping copies */ ENTRY(memcpy_std) MEMMOVE erms=0 overlap=0 begin=MEMMOVE_BEGIN end=MEMMOVE_END END(memcpy_std) ENTRY(memcpy_erms) MEMMOVE erms=1 overlap=0 begin=MEMMOVE_BEGIN end=MEMMOVE_END END(memcpy_erms) /* * memset(dst, c, len) * rdi, rsi, rdx */ .macro MEMSET erms PUSH_FRAME_POINTER movq %rdi,%rax movq %rdx,%rcx movzbq %sil,%r8 movabs $0x0101010101010101,%r10 imulq %r8,%r10 cmpq $32,%rcx jbe 101632f cmpq $256,%rcx ja 1256f 103200: movq %r10,(%rdi) movq %r10,8(%rdi) movq %r10,16(%rdi) movq %r10,24(%rdi) leaq 32(%rdi),%rdi subq $32,%rcx cmpq $32,%rcx ja 103200b cmpb $16,%cl ja 201632f movq %r10,-16(%rdi,%rcx) movq %r10,-8(%rdi,%rcx) POP_FRAME_POINTER ret ALIGN_TEXT 101632: cmpb $16,%cl jl 100816f 201632: movq %r10,(%rdi) movq %r10,8(%rdi) movq %r10,-16(%rdi,%rcx) movq %r10,-8(%rdi,%rcx) POP_FRAME_POINTER ret ALIGN_TEXT 100816: cmpb $8,%cl jl 100408f movq %r10,(%rdi) movq %r10,-8(%rdi,%rcx) POP_FRAME_POINTER ret ALIGN_TEXT 100408: cmpb $4,%cl jl 100204f movl %r10d,(%rdi) movl %r10d,-4(%rdi,%rcx) POP_FRAME_POINTER ret ALIGN_TEXT 100204: cmpb $2,%cl jl 100001f movw %r10w,(%rdi) movw %r10w,-2(%rdi,%rcx) POP_FRAME_POINTER ret ALIGN_TEXT 100001: cmpb $0,%cl je 100000f movb %r10b,(%rdi) 100000: POP_FRAME_POINTER ret ALIGN_TEXT 1256: movq %rdi,%r9 movq %r10,%rax testl $15,%edi jnz 3f 1: .if \erms == 1 rep stosb movq %r9,%rax .else movq %rcx,%rdx shrq $3,%rcx rep stosq movq %r9,%rax andl $7,%edx jnz 2f POP_FRAME_POINTER ret 2: movq %r10,-8(%rdi,%rdx) .endif POP_FRAME_POINTER ret ALIGN_TEXT 3: movq %r10,(%rdi) movq %r10,8(%rdi) movq %rdi,%r8 andq $15,%r8 leaq -16(%rcx,%r8),%rcx neg %r8 leaq 16(%rdi,%r8),%rdi jmp 1b .endm ENTRY(memset_std) MEMSET erms=0 END(memset_std) ENTRY(memset_erms) MEMSET erms=1 END(memset_erms) /* fillw(pat, base, cnt) */ /* %rdi,%rsi, %rdx */ ENTRY(fillw) PUSH_FRAME_POINTER movq %rdi,%rax movq %rsi,%rdi movq %rdx,%rcx rep stosw POP_FRAME_POINTER ret END(fillw) /*****************************************************************************/ /* copyout and fubyte family */ /*****************************************************************************/ /* * Access user memory from inside the kernel. These routines should be * the only places that do this. * * These routines set curpcb->pcb_onfault for the time they execute. When a * protection violation occurs inside the functions, the trap handler * returns to *curpcb->pcb_onfault instead of the function. */ .macro SMAP_DISABLE smap .if \smap stac .endif .endm .macro SMAP_ENABLE smap .if \smap clac .endif .endm .macro COPYINOUT_BEGIN .endm .macro COPYINOUT_END movq %rax,PCB_ONFAULT(%r11) POP_FRAME_POINTER .endm .macro COPYINOUT_SMAP_END SMAP_ENABLE smap=1 COPYINOUT_END .endm /* * copyout(from_kernel, to_user, len) * %rdi, %rsi, %rdx */ .macro COPYOUT smap erms PUSH_FRAME_POINTER movq PCPU(CURPCB),%r11 movq $copy_fault,PCB_ONFAULT(%r11) /* * Check explicitly for non-user addresses. * First, prevent address wrapping. */ movq %rsi,%rax addq %rdx,%rax jc copy_fault /* * XXX STOP USING VM_MAXUSER_ADDRESS. * It is an end address, not a max, so every time it is used correctly it * looks like there is an off by one error, and of course it caused an off * by one error in several places. */ movq $VM_MAXUSER_ADDRESS,%rcx cmpq %rcx,%rax ja copy_fault /* * Set return value to zero. Remaining failure mode goes through * copy_fault. */ xorl %eax,%eax /* * Set up arguments for MEMMOVE. */ movq %rdi,%r8 movq %rsi,%rdi movq %r8,%rsi movq %rdx,%rcx SMAP_DISABLE \smap .if \smap == 1 MEMMOVE erms=\erms overlap=0 begin=COPYINOUT_BEGIN end=COPYINOUT_SMAP_END .else MEMMOVE erms=\erms overlap=0 begin=COPYINOUT_BEGIN end=COPYINOUT_END .endif /* NOTREACHED */ .endm ENTRY(copyout_nosmap_std) COPYOUT smap=0 erms=0 END(copyout_nosmap_std) ENTRY(copyout_smap_std) COPYOUT smap=1 erms=0 END(copyout_smap_std) ENTRY(copyout_nosmap_erms) COPYOUT smap=0 erms=1 END(copyout_nosmap_erms) ENTRY(copyout_smap_erms) COPYOUT smap=1 erms=1 END(copyout_smap_erms) /* * copyin(from_user, to_kernel, len) * %rdi, %rsi, %rdx */ .macro COPYIN smap erms PUSH_FRAME_POINTER movq PCPU(CURPCB),%r11 movq $copy_fault,PCB_ONFAULT(%r11) /* * make sure address is valid */ movq %rdi,%rax addq %rdx,%rax jc copy_fault movq $VM_MAXUSER_ADDRESS,%rcx cmpq %rcx,%rax ja copy_fault xorl %eax,%eax movq %rdi,%r8 movq %rsi,%rdi movq %r8,%rsi movq %rdx,%rcx SMAP_DISABLE \smap .if \smap == 1 MEMMOVE erms=\erms overlap=0 begin=COPYINOUT_BEGIN end=COPYINOUT_SMAP_END .else MEMMOVE erms=\erms overlap=0 begin=COPYINOUT_BEGIN end=COPYINOUT_END .endif /* NOTREACHED */ .endm ENTRY(copyin_nosmap_std) COPYIN smap=0 erms=0 END(copyin_nosmap_std) ENTRY(copyin_smap_std) COPYIN smap=1 erms=0 END(copyin_smap_std) ENTRY(copyin_nosmap_erms) COPYIN smap=0 erms=1 END(copyin_nosmap_erms) ENTRY(copyin_smap_erms) COPYIN smap=1 erms=1 END(copyin_smap_erms) ALIGN_TEXT /* Trap entry clears PSL.AC */ copy_fault: movq $0,PCB_ONFAULT(%r11) movl $EFAULT,%eax POP_FRAME_POINTER ret /* * casueword32. Compare and set user integer. Returns -1 on fault, * 0 if access was successful. Old value is written to *oldp. * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx */ ENTRY(casueword32_nosmap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%r8 movq $fusufault,PCB_ONFAULT(%r8) movq $VM_MAXUSER_ADDRESS-4,%rax cmpq %rax,%rdi /* verify address is valid */ ja fusufault movl %esi,%eax /* old */ #ifdef SMP lock #endif cmpxchgl %ecx,(%rdi) /* new = %ecx */ setne %cl /* * The old value is in %eax. If the store succeeded it will be the * value we expected (old) from before the store, otherwise it will * be the current value. Save %eax into %esi to prepare the return * value. */ movl %eax,%esi xorl %eax,%eax movq %rax,PCB_ONFAULT(%r8) /* * Access the oldp after the pcb_onfault is cleared, to correctly * catch corrupted pointer. */ movl %esi,(%rdx) /* oldp = %rdx */ POP_FRAME_POINTER movzbl %cl, %eax ret END(casueword32_nosmap) ENTRY(casueword32_smap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%r8 movq $fusufault,PCB_ONFAULT(%r8) movq $VM_MAXUSER_ADDRESS-4,%rax cmpq %rax,%rdi /* verify address is valid */ ja fusufault movl %esi,%eax /* old */ stac #ifdef SMP lock #endif cmpxchgl %ecx,(%rdi) /* new = %ecx */ clac setne %cl /* * The old value is in %eax. If the store succeeded it will be the * value we expected (old) from before the store, otherwise it will * be the current value. Save %eax into %esi to prepare the return * value. */ movl %eax,%esi xorl %eax,%eax movq %rax,PCB_ONFAULT(%r8) /* * Access the oldp after the pcb_onfault is cleared, to correctly * catch corrupted pointer. */ movl %esi,(%rdx) /* oldp = %rdx */ POP_FRAME_POINTER movzbl %cl, %eax ret END(casueword32_smap) /* * casueword. Compare and set user long. Returns -1 on fault, * 0 if access was successful. Old value is written to *oldp. * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx */ ENTRY(casueword_nosmap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%r8 movq $fusufault,PCB_ONFAULT(%r8) movq $VM_MAXUSER_ADDRESS-4,%rax cmpq %rax,%rdi /* verify address is valid */ ja fusufault movq %rsi,%rax /* old */ #ifdef SMP lock #endif cmpxchgq %rcx,(%rdi) /* new = %rcx */ setne %cl /* * The old value is in %rax. If the store succeeded it will be the * value we expected (old) from before the store, otherwise it will * be the current value. */ movq %rax,%rsi xorl %eax,%eax movq %rax,PCB_ONFAULT(%r8) movq %rsi,(%rdx) POP_FRAME_POINTER movzbl %cl, %eax ret END(casueword_nosmap) ENTRY(casueword_smap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%r8 movq $fusufault,PCB_ONFAULT(%r8) movq $VM_MAXUSER_ADDRESS-4,%rax cmpq %rax,%rdi /* verify address is valid */ ja fusufault movq %rsi,%rax /* old */ stac #ifdef SMP lock #endif cmpxchgq %rcx,(%rdi) /* new = %rcx */ clac setne %cl /* * The old value is in %rax. If the store succeeded it will be the * value we expected (old) from before the store, otherwise it will * be the current value. */ movq %rax,%rsi xorl %eax,%eax movq %rax,PCB_ONFAULT(%r8) movq %rsi,(%rdx) POP_FRAME_POINTER movzbl %cl, %eax ret END(casueword_smap) /* * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit * byte from user memory. * addr = %rdi, valp = %rsi */ ENTRY(fueword_nosmap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-8,%rax cmpq %rax,%rdi /* verify address is valid */ ja fusufault xorl %eax,%eax movq (%rdi),%r11 movq %rax,PCB_ONFAULT(%rcx) movq %r11,(%rsi) POP_FRAME_POINTER ret END(fueword_nosmap) ENTRY(fueword_smap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-8,%rax cmpq %rax,%rdi /* verify address is valid */ ja fusufault xorl %eax,%eax stac movq (%rdi),%r11 clac movq %rax,PCB_ONFAULT(%rcx) movq %r11,(%rsi) POP_FRAME_POINTER ret END(fueword_smap) ENTRY(fueword32_nosmap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-4,%rax cmpq %rax,%rdi /* verify address is valid */ ja fusufault xorl %eax,%eax movl (%rdi),%r11d movq %rax,PCB_ONFAULT(%rcx) movl %r11d,(%rsi) POP_FRAME_POINTER ret END(fueword32_nosmap) ENTRY(fueword32_smap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-4,%rax cmpq %rax,%rdi /* verify address is valid */ ja fusufault xorl %eax,%eax stac movl (%rdi),%r11d clac movq %rax,PCB_ONFAULT(%rcx) movl %r11d,(%rsi) POP_FRAME_POINTER ret END(fueword32_smap) ENTRY(fuword16_nosmap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-2,%rax cmpq %rax,%rdi ja fusufault movzwl (%rdi),%eax movq $0,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(fuword16_nosmap) ENTRY(fuword16_smap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-2,%rax cmpq %rax,%rdi ja fusufault stac movzwl (%rdi),%eax clac movq $0,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(fuword16_smap) ENTRY(fubyte_nosmap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-1,%rax cmpq %rax,%rdi ja fusufault movzbl (%rdi),%eax movq $0,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(fubyte_nosmap) ENTRY(fubyte_smap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-1,%rax cmpq %rax,%rdi ja fusufault stac movzbl (%rdi),%eax clac movq $0,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(fubyte_smap) /* * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to * user memory. * addr = %rdi, value = %rsi */ ENTRY(suword_nosmap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-8,%rax cmpq %rax,%rdi /* verify address validity */ ja fusufault movq %rsi,(%rdi) xorl %eax,%eax movq %rax,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(suword_nosmap) ENTRY(suword_smap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-8,%rax cmpq %rax,%rdi /* verify address validity */ ja fusufault stac movq %rsi,(%rdi) clac xorl %eax,%eax movq %rax,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(suword_smap) ENTRY(suword32_nosmap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-4,%rax cmpq %rax,%rdi /* verify address validity */ ja fusufault movl %esi,(%rdi) xorl %eax,%eax movq %rax,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(suword32_nosmap) ENTRY(suword32_smap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-4,%rax cmpq %rax,%rdi /* verify address validity */ ja fusufault stac movl %esi,(%rdi) clac xorl %eax,%eax movq %rax,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(suword32_smap) ENTRY(suword16_nosmap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-2,%rax cmpq %rax,%rdi /* verify address validity */ ja fusufault movw %si,(%rdi) xorl %eax,%eax movq %rax,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(suword16_nosmap) ENTRY(suword16_smap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-2,%rax cmpq %rax,%rdi /* verify address validity */ ja fusufault stac movw %si,(%rdi) clac xorl %eax,%eax movq %rax,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(suword16_smap) ENTRY(subyte_nosmap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-1,%rax cmpq %rax,%rdi /* verify address validity */ ja fusufault movl %esi,%eax movb %al,(%rdi) xorl %eax,%eax movq %rax,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(subyte_nosmap) ENTRY(subyte_smap) PUSH_FRAME_POINTER movq PCPU(CURPCB),%rcx movq $fusufault,PCB_ONFAULT(%rcx) movq $VM_MAXUSER_ADDRESS-1,%rax cmpq %rax,%rdi /* verify address validity */ ja fusufault movl %esi,%eax stac movb %al,(%rdi) clac xorl %eax,%eax movq %rax,PCB_ONFAULT(%rcx) POP_FRAME_POINTER ret END(subyte_smap) ALIGN_TEXT /* Fault entry clears PSL.AC */ fusufault: movq PCPU(CURPCB),%rcx xorl %eax,%eax movq %rax,PCB_ONFAULT(%rcx) decq %rax POP_FRAME_POINTER ret /* * copyinstr(from, to, maxlen, int *lencopied) * %rdi, %rsi, %rdx, %rcx * * copy a string from 'from' to 'to', stop when a 0 character is reached. * return ENAMETOOLONG if string is longer than maxlen, and * EFAULT on protection violations. If lencopied is non-zero, * return the actual length in *lencopied. */ .macro COPYINSTR smap PUSH_FRAME_POINTER movq %rdx,%r8 /* %r8 = maxlen */ movq PCPU(CURPCB),%r9 movq $cpystrflt,PCB_ONFAULT(%r9) movq $VM_MAXUSER_ADDRESS,%rax /* make sure 'from' is within bounds */ subq %rdi,%rax jbe cpystrflt SMAP_DISABLE \smap /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */ cmpq %rdx,%rax jb 8f 1: incq %rdx 2: decq %rdx .if \smap == 0 jz copyinstr_toolong .else jz copyinstr_toolong_smap .endif movb (%rdi),%al movb %al,(%rsi) incq %rsi incq %rdi testb %al,%al jnz 2b SMAP_ENABLE \smap /* Success -- 0 byte reached */ decq %rdx xorl %eax,%eax /* set *lencopied and return %eax */ movq %rax,PCB_ONFAULT(%r9) testq %rcx,%rcx jz 3f subq %rdx,%r8 movq %r8,(%rcx) 3: POP_FRAME_POINTER ret ALIGN_TEXT 8: movq %rax,%rdx movq %rax,%r8 jmp 1b .endm ENTRY(copyinstr_nosmap) COPYINSTR smap=0 END(copyinstr_nosmap) ENTRY(copyinstr_smap) COPYINSTR smap=1 END(copyinstr_smap) cpystrflt: /* Fault entry clears PSL.AC */ movl $EFAULT,%eax cpystrflt_x: /* set *lencopied and return %eax */ movq $0,PCB_ONFAULT(%r9) testq %rcx,%rcx jz 1f subq %rdx,%r8 movq %r8,(%rcx) 1: POP_FRAME_POINTER ret copyinstr_toolong_smap: clac copyinstr_toolong: /* rdx is zero - return ENAMETOOLONG or EFAULT */ movq $VM_MAXUSER_ADDRESS,%rax cmpq %rax,%rdi jae cpystrflt movl $ENAMETOOLONG,%eax jmp cpystrflt_x /* - * copystr(from, to, maxlen, int *lencopied) - * %rdi, %rsi, %rdx, %rcx - */ -ENTRY(copystr) - PUSH_FRAME_POINTER - movq %rdx,%r8 /* %r8 = maxlen */ - - incq %rdx -1: - decq %rdx - jz 4f - movb (%rdi),%al - movb %al,(%rsi) - incq %rsi - incq %rdi - testb %al,%al - jnz 1b - - /* Success -- 0 byte reached */ - decq %rdx - xorl %eax,%eax -2: - testq %rcx,%rcx - jz 3f - /* set *lencopied and return %rax */ - subq %rdx,%r8 - movq %r8,(%rcx) -3: - POP_FRAME_POINTER - ret -4: - /* rdx is zero -- return ENAMETOOLONG */ - movl $ENAMETOOLONG,%eax - jmp 2b -END(copystr) - -/* * Handling of special amd64 registers and descriptor tables etc */ /* void lgdt(struct region_descriptor *rdp); */ ENTRY(lgdt) /* reload the descriptor table */ lgdt (%rdi) /* flush the prefetch q */ jmp 1f nop 1: movl $KDSEL,%eax movl %eax,%ds movl %eax,%es movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */ movl %eax,%gs movl %eax,%ss /* reload code selector by turning return into intersegmental return */ popq %rax pushq $KCSEL pushq %rax MEXITCOUNT lretq END(lgdt) /*****************************************************************************/ /* setjump, longjump */ /*****************************************************************************/ ENTRY(setjmp) movq %rbx,0(%rdi) /* save rbx */ movq %rsp,8(%rdi) /* save rsp */ movq %rbp,16(%rdi) /* save rbp */ movq %r12,24(%rdi) /* save r12 */ movq %r13,32(%rdi) /* save r13 */ movq %r14,40(%rdi) /* save r14 */ movq %r15,48(%rdi) /* save r15 */ movq 0(%rsp),%rdx /* get rta */ movq %rdx,56(%rdi) /* save rip */ xorl %eax,%eax /* return(0); */ ret END(setjmp) ENTRY(longjmp) movq 0(%rdi),%rbx /* restore rbx */ movq 8(%rdi),%rsp /* restore rsp */ movq 16(%rdi),%rbp /* restore rbp */ movq 24(%rdi),%r12 /* restore r12 */ movq 32(%rdi),%r13 /* restore r13 */ movq 40(%rdi),%r14 /* restore r14 */ movq 48(%rdi),%r15 /* restore r15 */ movq 56(%rdi),%rdx /* get rta */ movq %rdx,0(%rsp) /* put in return frame */ xorl %eax,%eax /* return(1); */ incl %eax ret END(longjmp) /* * Support for reading MSRs in the safe manner. (Instead of panic on #gp, * return an error.) */ ENTRY(rdmsr_safe) /* int rdmsr_safe(u_int msr, uint64_t *data) */ PUSH_FRAME_POINTER movq PCPU(CURPCB),%r8 movq $msr_onfault,PCB_ONFAULT(%r8) movl %edi,%ecx rdmsr /* Read MSR pointed by %ecx. Returns hi byte in edx, lo in %eax */ salq $32,%rdx /* sign-shift %rdx left */ movl %eax,%eax /* zero-extend %eax -> %rax */ orq %rdx,%rax movq %rax,(%rsi) xorq %rax,%rax movq %rax,PCB_ONFAULT(%r8) POP_FRAME_POINTER ret /* * Support for writing MSRs in the safe manner. (Instead of panic on #gp, * return an error.) */ ENTRY(wrmsr_safe) /* int wrmsr_safe(u_int msr, uint64_t data) */ PUSH_FRAME_POINTER movq PCPU(CURPCB),%r8 movq $msr_onfault,PCB_ONFAULT(%r8) movl %edi,%ecx movl %esi,%eax sarq $32,%rsi movl %esi,%edx wrmsr /* Write MSR pointed by %ecx. Accepts hi byte in edx, lo in %eax. */ xorq %rax,%rax movq %rax,PCB_ONFAULT(%r8) POP_FRAME_POINTER ret /* * MSR operations fault handler */ ALIGN_TEXT msr_onfault: movq $0,PCB_ONFAULT(%r8) movl $EFAULT,%eax POP_FRAME_POINTER ret /* * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3); * Invalidates address space addressed by ucr3, then returns to kcr3. * Done in assembler to ensure no other memory accesses happen while * on ucr3. */ ALIGN_TEXT ENTRY(pmap_pti_pcid_invalidate) pushfq cli movq %rdi,%cr3 /* to user page table */ movq %rsi,%cr3 /* back to kernel */ popfq retq /* * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va); * Invalidates virtual address va in address space ucr3, then returns to kcr3. */ ALIGN_TEXT ENTRY(pmap_pti_pcid_invlpg) pushfq cli movq %rdi,%cr3 /* to user page table */ invlpg (%rdx) movq %rsi,%cr3 /* back to kernel */ popfq retq /* * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva, * vm_offset_t eva); * Invalidates virtual addresses between sva and eva in address space ucr3, * then returns to kcr3. */ ALIGN_TEXT ENTRY(pmap_pti_pcid_invlrng) pushfq cli movq %rdi,%cr3 /* to user page table */ 1: invlpg (%rdx) addq $PAGE_SIZE,%rdx cmpq %rdx,%rcx ja 1b movq %rsi,%cr3 /* back to kernel */ popfq retq .altmacro .macro ibrs_seq_label l handle_ibrs_\l: .endm .macro ibrs_call_label l call handle_ibrs_\l .endm .macro ibrs_seq count ll=1 .rept \count ibrs_call_label %(ll) nop ibrs_seq_label %(ll) addq $8,%rsp ll=ll+1 .endr .endm /* all callers already saved %rax, %rdx, and %rcx */ ENTRY(handle_ibrs_entry) cmpb $0,hw_ibrs_ibpb_active(%rip) je 1f movl $MSR_IA32_SPEC_CTRL,%ecx rdmsr orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx wrmsr movb $1,PCPU(IBPB_SET) testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip) jne 1f ibrs_seq 32 1: ret END(handle_ibrs_entry) ENTRY(handle_ibrs_exit) cmpb $0,PCPU(IBPB_SET) je 1f movl $MSR_IA32_SPEC_CTRL,%ecx rdmsr andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx wrmsr movb $0,PCPU(IBPB_SET) 1: ret END(handle_ibrs_exit) /* registers-neutral version, but needs stack */ ENTRY(handle_ibrs_exit_rs) cmpb $0,PCPU(IBPB_SET) je 1f pushq %rax pushq %rdx pushq %rcx movl $MSR_IA32_SPEC_CTRL,%ecx rdmsr andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx wrmsr popq %rcx popq %rdx popq %rax movb $0,PCPU(IBPB_SET) 1: ret END(handle_ibrs_exit_rs) .noaltmacro /* * Flush L1D cache. Load enough of the data from the kernel text * to flush existing L1D content. * * N.B. The function does not follow ABI calling conventions, it corrupts %rbx. * The vmm.ko caller expects that only %rax, %rdx, %rbx, %rcx, %r9, and %rflags * registers are clobbered. The NMI handler caller only needs %r13 preserved. */ ENTRY(flush_l1d_sw) #define L1D_FLUSH_SIZE (64 * 1024) movq $KERNBASE, %r9 movq $-L1D_FLUSH_SIZE, %rcx /* * pass 1: Preload TLB. * Kernel text is mapped using superpages. TLB preload is * done for the benefit of older CPUs which split 2M page * into 4k TLB entries. */ 1: movb L1D_FLUSH_SIZE(%r9, %rcx), %al addq $PAGE_SIZE, %rcx jne 1b xorl %eax, %eax cpuid movq $-L1D_FLUSH_SIZE, %rcx /* pass 2: Read each cache line. */ 2: movb L1D_FLUSH_SIZE(%r9, %rcx), %al addq $64, %rcx jne 2b lfence ret #undef L1D_FLUSH_SIZE END(flush_l1d_sw) ENTRY(flush_l1d_sw_abi) pushq %rbx call flush_l1d_sw popq %rbx ret END(flush_l1d_sw_abi) ENTRY(mds_handler_void) retq END(mds_handler_void) ENTRY(mds_handler_verw) subq $8, %rsp movw %ds, (%rsp) verw (%rsp) addq $8, %rsp retq END(mds_handler_verw) ENTRY(mds_handler_ivb) pushq %rax pushq %rdx pushq %rcx movq %cr0, %rax testb $CR0_TS, %al je 1f clts 1: movq PCPU(MDS_BUF), %rdx movdqa %xmm0, PCPU(MDS_TMP) pxor %xmm0, %xmm0 lfence orpd (%rdx), %xmm0 orpd (%rdx), %xmm0 mfence movl $40, %ecx addq $16, %rdx 2: movntdq %xmm0, (%rdx) addq $16, %rdx decl %ecx jnz 2b mfence movdqa PCPU(MDS_TMP),%xmm0 testb $CR0_TS, %al je 3f movq %rax, %cr0 3: popq %rcx popq %rdx popq %rax retq END(mds_handler_ivb) ENTRY(mds_handler_bdw) pushq %rax pushq %rbx pushq %rcx pushq %rdi pushq %rsi movq %cr0, %rax testb $CR0_TS, %al je 1f clts 1: movq PCPU(MDS_BUF), %rbx movdqa %xmm0, PCPU(MDS_TMP) pxor %xmm0, %xmm0 movq %rbx, %rdi movq %rbx, %rsi movl $40, %ecx 2: movntdq %xmm0, (%rbx) addq $16, %rbx decl %ecx jnz 2b mfence movl $1536, %ecx rep; movsb lfence movdqa PCPU(MDS_TMP),%xmm0 testb $CR0_TS, %al je 3f movq %rax, %cr0 3: popq %rsi popq %rdi popq %rcx popq %rbx popq %rax retq END(mds_handler_bdw) ENTRY(mds_handler_skl_sse) pushq %rax pushq %rdx pushq %rcx pushq %rdi movq %cr0, %rax testb $CR0_TS, %al je 1f clts 1: movq PCPU(MDS_BUF), %rdi movq PCPU(MDS_BUF64), %rdx movdqa %xmm0, PCPU(MDS_TMP) pxor %xmm0, %xmm0 lfence orpd (%rdx), %xmm0 orpd (%rdx), %xmm0 xorl %eax, %eax 2: clflushopt 5376(%rdi, %rax, 8) addl $8, %eax cmpl $8 * 12, %eax jb 2b sfence movl $6144, %ecx xorl %eax, %eax rep; stosb mfence movdqa PCPU(MDS_TMP), %xmm0 testb $CR0_TS, %al je 3f movq %rax, %cr0 3: popq %rdi popq %rcx popq %rdx popq %rax retq END(mds_handler_skl_sse) ENTRY(mds_handler_skl_avx) pushq %rax pushq %rdx pushq %rcx pushq %rdi movq %cr0, %rax testb $CR0_TS, %al je 1f clts 1: movq PCPU(MDS_BUF), %rdi movq PCPU(MDS_BUF64), %rdx vmovdqa %ymm0, PCPU(MDS_TMP) vpxor %ymm0, %ymm0, %ymm0 lfence vorpd (%rdx), %ymm0, %ymm0 vorpd (%rdx), %ymm0, %ymm0 xorl %eax, %eax 2: clflushopt 5376(%rdi, %rax, 8) addl $8, %eax cmpl $8 * 12, %eax jb 2b sfence movl $6144, %ecx xorl %eax, %eax rep; stosb mfence vmovdqa PCPU(MDS_TMP), %ymm0 testb $CR0_TS, %al je 3f movq %rax, %cr0 3: popq %rdi popq %rcx popq %rdx popq %rax retq END(mds_handler_skl_avx) ENTRY(mds_handler_skl_avx512) pushq %rax pushq %rdx pushq %rcx pushq %rdi movq %cr0, %rax testb $CR0_TS, %al je 1f clts 1: movq PCPU(MDS_BUF), %rdi movq PCPU(MDS_BUF64), %rdx vmovdqa64 %zmm0, PCPU(MDS_TMP) vpxord %zmm0, %zmm0, %zmm0 lfence vorpd (%rdx), %zmm0, %zmm0 vorpd (%rdx), %zmm0, %zmm0 xorl %eax, %eax 2: clflushopt 5376(%rdi, %rax, 8) addl $8, %eax cmpl $8 * 12, %eax jb 2b sfence movl $6144, %ecx xorl %eax, %eax rep; stosb mfence vmovdqa64 PCPU(MDS_TMP), %zmm0 testb $CR0_TS, %al je 3f movq %rax, %cr0 3: popq %rdi popq %rcx popq %rdx popq %rax retq END(mds_handler_skl_avx512) ENTRY(mds_handler_silvermont) pushq %rax pushq %rdx pushq %rcx movq %cr0, %rax testb $CR0_TS, %al je 1f clts 1: movq PCPU(MDS_BUF), %rdx movdqa %xmm0, PCPU(MDS_TMP) pxor %xmm0, %xmm0 movl $16, %ecx 2: movntdq %xmm0, (%rdx) addq $16, %rdx decl %ecx jnz 2b mfence movdqa PCPU(MDS_TMP),%xmm0 testb $CR0_TS, %al je 3f movq %rax, %cr0 3: popq %rcx popq %rdx popq %rax retq END(mds_handler_silvermont) Index: head/sys/arm/arm/copystr.S =================================================================== --- head/sys/arm/arm/copystr.S (revision 360943) +++ head/sys/arm/arm/copystr.S (revision 360944) @@ -1,171 +1,138 @@ /* $NetBSD: copystr.S,v 1.8 2002/10/13 14:54:48 bjh21 Exp $ */ /*- * Copyright (c) 1995 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * copystr.S * * optimised and fault protected copystr functions * * Created : 16/05/95 */ #include "assym.inc" #include #include __FBSDID("$FreeBSD$"); #include .text .align 2 #if __ARM_ARCH >= 6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) #else .Lpcb: .word _C_LABEL(__pcpu) + PC_CURPCB #define GET_PCB(tmp) \ ldr tmp, .Lpcb #endif -/* - * r0 - from - * r1 - to - * r2 - maxlens - * r3 - lencopied - * - * Copy string from r0 to r1 - */ -ENTRY(copystr) - stmfd sp!, {r4-r5} /* stack is 8 byte aligned */ - teq r2, #0x00000000 - mov r5, #0x00000000 - moveq r0, #ENAMETOOLONG - beq 2f - -1: ldrb r4, [r0], #0x0001 - add r5, r5, #0x00000001 - teq r4, #0x00000000 - strb r4, [r1], #0x0001 - teqne r5, r2 - bne 1b - - teq r4, #0x00000000 - moveq r0, #0x00000000 - movne r0, #ENAMETOOLONG - -2: teq r3, #0x00000000 - strne r5, [r3] - - ldmfd sp!, {r4-r5} /* stack is 8 byte aligned */ - RET -END(copystr) - #define SAVE_REGS stmfd sp!, {r4-r6} #define RESTORE_REGS ldmfd sp!, {r4-r6} /* * r0 - user space address * r1 - kernel space address * r2 - maxlens * r3 - lencopied * * Copy string from user space to kernel space */ ENTRY(copyinstr) SAVE_REGS teq r2, #0x00000000 mov r6, #0x00000000 moveq r0, #ENAMETOOLONG beq 2f ldr r12, =VM_MAXUSER_ADDRESS GET_PCB(r4) ldr r4, [r4] #ifdef DIAGNOSTIC teq r4, #0x00000000 beq .Lcopystrpcbfault #endif adr r5, .Lcopystrfault str r5, [r4, #PCB_ONFAULT] 1: cmp r0, r12 bcs .Lcopystrfault ldrbt r5, [r0], #0x0001 add r6, r6, #0x00000001 teq r5, #0x00000000 strb r5, [r1], #0x0001 teqne r6, r2 bne 1b mov r0, #0x00000000 str r0, [r4, #PCB_ONFAULT] teq r5, #0x00000000 moveq r0, #0x00000000 movne r0, #ENAMETOOLONG 2: teq r3, #0x00000000 strne r6, [r3] RESTORE_REGS RET END(copyinstr) /* A fault occurred during the copy */ .Lcopystrfault: mov r1, #0x00000000 str r1, [r4, #PCB_ONFAULT] mov r0, #EFAULT RESTORE_REGS RET #ifdef DIAGNOSTIC .Lcopystrpcbfault: mov r2, r1 mov r1, r0 adr r0, Lcopystrpcbfaulttext bic sp, sp, #7 /* align stack to 8 bytes */ b _C_LABEL(panic) Lcopystrpcbfaulttext: .asciz "No valid PCB during copyinoutstr() addr1=%08x addr2=%08x\n" .align 2 #endif Index: head/sys/arm64/arm64/copystr.c =================================================================== --- head/sys/arm64/arm64/copystr.c (revision 360943) +++ head/sys/arm64/arm64/copystr.c (nonexistent) @@ -1,61 +0,0 @@ -/*- - * Copyright (c) 2014 Andrew Turner - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include - -int -(copystr)(const void * __restrict kfaddr, void * __restrict kdaddr, size_t len, - size_t * __restrict lencopied) -{ - const char *src; - size_t pos; - char *dst; - int error; - - error = ENAMETOOLONG; - src = kfaddr; - dst = kdaddr; - for (pos = 0; pos < len; pos++) { - dst[pos] = src[pos]; - if (src[pos] == '\0') { - /* Increment pos to hold the number of bytes copied */ - pos++; - error = 0; - break; - } - } - - if (lencopied != NULL) - *lencopied = pos; - - return (error); -} - Property changes on: head/sys/arm64/arm64/copystr.c ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: head/sys/fs/fuse/fuse_vfsops.c =================================================================== --- head/sys/fs/fuse/fuse_vfsops.c (revision 360943) +++ head/sys/fs/fuse/fuse_vfsops.c (revision 360944) @@ -1,692 +1,690 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. and Amit Singh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Copyright (c) 2019 The FreeBSD Foundation * * Portions of this software were developed by BFF Storage Systems, LLC under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fuse.h" #include "fuse_node.h" #include "fuse_ipc.h" #include "fuse_internal.h" #include #include SDT_PROVIDER_DECLARE(fusefs); /* * Fuse trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(fusefs, , vfsops, trace, "int", "char*"); /* This will do for privilege types for now */ #ifndef PRIV_VFS_FUSE_ALLOWOTHER #define PRIV_VFS_FUSE_ALLOWOTHER PRIV_VFS_MOUNT_NONUSER #endif #ifndef PRIV_VFS_FUSE_MOUNT_NONUSER #define PRIV_VFS_FUSE_MOUNT_NONUSER PRIV_VFS_MOUNT_NONUSER #endif #ifndef PRIV_VFS_FUSE_SYNC_UNMOUNT #define PRIV_VFS_FUSE_SYNC_UNMOUNT PRIV_VFS_MOUNT_NONUSER #endif static vfs_fhtovp_t fuse_vfsop_fhtovp; static vfs_mount_t fuse_vfsop_mount; static vfs_unmount_t fuse_vfsop_unmount; static vfs_root_t fuse_vfsop_root; static vfs_statfs_t fuse_vfsop_statfs; static vfs_vget_t fuse_vfsop_vget; struct vfsops fuse_vfsops = { .vfs_fhtovp = fuse_vfsop_fhtovp, .vfs_mount = fuse_vfsop_mount, .vfs_unmount = fuse_vfsop_unmount, .vfs_root = fuse_vfsop_root, .vfs_statfs = fuse_vfsop_statfs, .vfs_vget = fuse_vfsop_vget, }; static int fuse_enforce_dev_perms = 0; SYSCTL_INT(_vfs_fusefs, OID_AUTO, enforce_dev_perms, CTLFLAG_RW, &fuse_enforce_dev_perms, 0, "enforce fuse device permissions for secondary mounts"); MALLOC_DEFINE(M_FUSEVFS, "fuse_filesystem", "buffer for fuse vfs layer"); static int fuse_getdevice(const char *fspec, struct thread *td, struct cdev **fdevp) { struct nameidata nd, *ndp = &nd; struct vnode *devvp; struct cdev *fdev; int err; /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible disk device. */ NDINIT(ndp, LOOKUP, FOLLOW, UIO_SYSSPACE, fspec, td); if ((err = namei(ndp)) != 0) return err; NDFREE(ndp, NDF_ONLY_PNBUF); devvp = ndp->ni_vp; if (devvp->v_type != VCHR) { vrele(devvp); return ENXIO; } fdev = devvp->v_rdev; dev_ref(fdev); if (fuse_enforce_dev_perms) { /* * Check if mounter can open the fuse device. * * This has significance only if we are doing a secondary mount * which doesn't involve actually opening fuse devices, but we * still want to enforce the permissions of the device (in * order to keep control over the circle of fuse users). * * (In case of primary mounts, we are either the superuser so * we can do anything anyway, or we can mount only if the * device is already opened by us, ie. we are permitted to open * the device.) */ #if 0 #ifdef MAC err = mac_check_vnode_open(td->td_ucred, devvp, VREAD | VWRITE); if (!err) #endif #endif /* 0 */ err = VOP_ACCESS(devvp, VREAD | VWRITE, td->td_ucred, td); if (err) { vrele(devvp); dev_rel(fdev); return err; } } /* * according to coda code, no extra lock is needed -- * although in sys/vnode.h this field is marked "v" */ vrele(devvp); if (!fdev->si_devsw || strcmp("fuse", fdev->si_devsw->d_name)) { dev_rel(fdev); return ENXIO; } *fdevp = fdev; return 0; } #define FUSE_FLAGOPT(fnam, fval) do { \ vfs_flagopt(opts, #fnam, &mntopts, fval); \ vfs_flagopt(opts, "__" #fnam, &__mntopts, fval); \ } while (0) SDT_PROBE_DEFINE1(fusefs, , vfsops, mntopts, "uint64_t"); SDT_PROBE_DEFINE4(fusefs, , vfsops, mount_err, "char*", "struct fuse_data*", "struct mount*", "int"); static int fuse_vfs_remount(struct mount *mp, struct thread *td, uint64_t mntopts, uint32_t max_read, int daemon_timeout) { int err = 0; struct fuse_data *data = fuse_get_mpdata(mp); /* Don't allow these options to be changed */ const static unsigned long long cant_update_opts = MNT_USER; /* Mount owner must be the user running the daemon */ FUSE_LOCK(); if ((mp->mnt_flag ^ data->mnt_flag) & cant_update_opts) { err = EOPNOTSUPP; SDT_PROBE4(fusefs, , vfsops, mount_err, "Can't change these mount options during remount", data, mp, err); goto out; } if (((data->dataflags ^ mntopts) & FSESS_MNTOPTS_MASK) || (data->max_read != max_read) || (data->daemon_timeout != daemon_timeout)) { // TODO: allow changing options where it makes sense err = EOPNOTSUPP; SDT_PROBE4(fusefs, , vfsops, mount_err, "Can't change fuse mount options during remount", data, mp, err); goto out; } if (fdata_get_dead(data)) { err = ENOTCONN; SDT_PROBE4(fusefs, , vfsops, mount_err, "device is dead during mount", data, mp, err); goto out; } /* Sanity + permission checks */ if (!data->daemoncred) panic("fuse daemon found, but identity unknown"); if (mntopts & FSESS_DAEMON_CAN_SPY) err = priv_check(td, PRIV_VFS_FUSE_ALLOWOTHER); if (err == 0 && td->td_ucred->cr_uid != data->daemoncred->cr_uid) /* are we allowed to do the first mount? */ err = priv_check(td, PRIV_VFS_FUSE_MOUNT_NONUSER); out: FUSE_UNLOCK(); return err; } static int fuse_vfsop_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp) { struct fuse_fid *ffhp = (struct fuse_fid *)fhp; struct fuse_vnode_data *fvdat; struct vnode *nvp; int error; if (!(fuse_get_mpdata(mp)->dataflags & FSESS_EXPORT_SUPPORT)) return EOPNOTSUPP; error = VFS_VGET(mp, ffhp->nid, LK_EXCLUSIVE, &nvp); if (error) { *vpp = NULLVP; return (error); } fvdat = VTOFUD(nvp); if (fvdat->generation != ffhp->gen ) { vput(nvp); *vpp = NULLVP; return (ESTALE); } *vpp = nvp; vnode_create_vobject(*vpp, 0, curthread); return (0); } static int fuse_vfsop_mount(struct mount *mp) { int err; uint64_t mntopts, __mntopts; uint32_t max_read; int daemon_timeout; int fd; - size_t len; - struct cdev *fdev; struct fuse_data *data = NULL; struct thread *td; struct file *fp, *fptmp; char *fspec, *subtype; struct vfsoptlist *opts; subtype = NULL; max_read = ~0; err = 0; mntopts = 0; __mntopts = 0; td = curthread; /* Get the new options passed to mount */ opts = mp->mnt_optnew; if (!opts) return EINVAL; /* `fspath' contains the mount point (eg. /mnt/fuse/sshfs); REQUIRED */ if (!vfs_getopts(opts, "fspath", &err)) return err; /* * With the help of underscored options the mount program * can inform us from the flags it sets by default */ FUSE_FLAGOPT(allow_other, FSESS_DAEMON_CAN_SPY); FUSE_FLAGOPT(push_symlinks_in, FSESS_PUSH_SYMLINKS_IN); FUSE_FLAGOPT(default_permissions, FSESS_DEFAULT_PERMISSIONS); FUSE_FLAGOPT(intr, FSESS_INTR); (void)vfs_scanopt(opts, "max_read=", "%u", &max_read); if (vfs_scanopt(opts, "timeout=", "%u", &daemon_timeout) == 1) { if (daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT) daemon_timeout = FUSE_MIN_DAEMON_TIMEOUT; else if (daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) daemon_timeout = FUSE_MAX_DAEMON_TIMEOUT; } else { daemon_timeout = FUSE_DEFAULT_DAEMON_TIMEOUT; } subtype = vfs_getopts(opts, "subtype=", &err); SDT_PROBE1(fusefs, , vfsops, mntopts, mntopts); if (mp->mnt_flag & MNT_UPDATE) { return fuse_vfs_remount(mp, td, mntopts, max_read, daemon_timeout); } /* `from' contains the device name (eg. /dev/fuse0); REQUIRED */ fspec = vfs_getopts(opts, "from", &err); if (!fspec) return err; /* `fd' contains the filedescriptor for this session; REQUIRED */ if (vfs_scanopt(opts, "fd", "%d", &fd) != 1) return EINVAL; err = fuse_getdevice(fspec, td, &fdev); if (err != 0) return err; err = fget(td, fd, &cap_read_rights, &fp); if (err != 0) { SDT_PROBE2(fusefs, , vfsops, trace, 1, "invalid or not opened device"); goto out; } fptmp = td->td_fpop; td->td_fpop = fp; err = devfs_get_cdevpriv((void **)&data); td->td_fpop = fptmp; fdrop(fp, td); FUSE_LOCK(); if (err != 0 || data == NULL) { err = ENXIO; SDT_PROBE4(fusefs, , vfsops, mount_err, "invalid or not opened device", data, mp, err); FUSE_UNLOCK(); goto out; } if (fdata_get_dead(data)) { err = ENOTCONN; SDT_PROBE4(fusefs, , vfsops, mount_err, "device is dead during mount", data, mp, err); FUSE_UNLOCK(); goto out; } /* Sanity + permission checks */ if (!data->daemoncred) panic("fuse daemon found, but identity unknown"); if (mntopts & FSESS_DAEMON_CAN_SPY) err = priv_check(td, PRIV_VFS_FUSE_ALLOWOTHER); if (err == 0 && td->td_ucred->cr_uid != data->daemoncred->cr_uid) /* are we allowed to do the first mount? */ err = priv_check(td, PRIV_VFS_FUSE_MOUNT_NONUSER); if (err) { FUSE_UNLOCK(); goto out; } data->ref++; data->mp = mp; data->dataflags |= mntopts; data->max_read = max_read; data->daemon_timeout = daemon_timeout; data->mnt_flag = mp->mnt_flag & MNT_UPDATEMASK; FUSE_UNLOCK(); vfs_getnewfsid(mp); MNT_ILOCK(mp); mp->mnt_data = data; /* * FUSE file systems can be either local or remote, but the kernel * can't tell the difference. */ mp->mnt_flag &= ~MNT_LOCAL; mp->mnt_kern_flag |= MNTK_USES_BCACHE; MNT_IUNLOCK(mp); /* We need this here as this slot is used by getnewvnode() */ mp->mnt_stat.f_iosize = maxbcachebuf; if (subtype) { strlcat(mp->mnt_stat.f_fstypename, ".", MFSNAMELEN); strlcat(mp->mnt_stat.f_fstypename, subtype, MFSNAMELEN); } - copystr(fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &len); - bzero(mp->mnt_stat.f_mntfromname + len, MNAMELEN - len); + memset(mp->mnt_stat.f_mntfromname, 0, MNAMELEN); + strlcpy(mp->mnt_stat.f_mntfromname, fspec, MNAMELEN); mp->mnt_iosize_max = MAXPHYS; /* Now handshaking with daemon */ fuse_internal_send_init(data, td); out: if (err) { FUSE_LOCK(); if (data != NULL && data->mp == mp) { /* * Destroy device only if we acquired reference to * it */ SDT_PROBE4(fusefs, , vfsops, mount_err, "mount failed, destroy device", data, mp, err); data->mp = NULL; mp->mnt_data = NULL; fdata_trydestroy(data); } FUSE_UNLOCK(); dev_rel(fdev); } return err; } static int fuse_vfsop_unmount(struct mount *mp, int mntflags) { int err = 0; int flags = 0; struct cdev *fdev; struct fuse_data *data; struct fuse_dispatcher fdi; struct thread *td = curthread; if (mntflags & MNT_FORCE) { flags |= FORCECLOSE; } data = fuse_get_mpdata(mp); if (!data) { panic("no private data for mount point?"); } /* There is 1 extra root vnode reference (mp->mnt_data). */ FUSE_LOCK(); if (data->vroot != NULL) { struct vnode *vroot = data->vroot; data->vroot = NULL; FUSE_UNLOCK(); vrele(vroot); } else FUSE_UNLOCK(); err = vflush(mp, 0, flags, td); if (err) { return err; } if (fdata_get_dead(data)) { goto alreadydead; } if (fsess_isimpl(mp, FUSE_DESTROY)) { fdisp_init(&fdi, 0); fdisp_make(&fdi, FUSE_DESTROY, mp, 0, td, NULL); (void)fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); } fdata_set_dead(data); alreadydead: FUSE_LOCK(); data->mp = NULL; fdev = data->fdev; fdata_trydestroy(data); FUSE_UNLOCK(); MNT_ILOCK(mp); mp->mnt_data = NULL; MNT_IUNLOCK(mp); dev_rel(fdev); return 0; } SDT_PROBE_DEFINE1(fusefs, , vfsops, invalidate_without_export, "struct mount*"); static int fuse_vfsop_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) { struct fuse_data *data = fuse_get_mpdata(mp); uint64_t nodeid = ino; struct thread *td = curthread; struct fuse_dispatcher fdi; struct fuse_entry_out *feo; struct fuse_vnode_data *fvdat; const char dot[] = "."; off_t filesize; enum vtype vtyp; int error; if (!(data->dataflags & FSESS_EXPORT_SUPPORT)) { /* * Unreachable unless you do something stupid, like export a * nullfs mount of a fusefs file system. */ SDT_PROBE1(fusefs, , vfsops, invalidate_without_export, mp); return (EOPNOTSUPP); } error = fuse_internal_get_cached_vnode(mp, ino, flags, vpp); if (error || *vpp != NULL) return error; /* Do a LOOKUP, using nodeid as the parent and "." as filename */ fdisp_init(&fdi, sizeof(dot)); fdisp_make(&fdi, FUSE_LOOKUP, mp, nodeid, td, td->td_ucred); memcpy(fdi.indata, dot, sizeof(dot)); error = fdisp_wait_answ(&fdi); if (error) return error; feo = (struct fuse_entry_out *)fdi.answ; if (feo->nodeid == 0) { /* zero nodeid means ENOENT and cache it */ error = ENOENT; goto out; } vtyp = IFTOVT(feo->attr.mode); error = fuse_vnode_get(mp, feo, nodeid, NULL, vpp, NULL, vtyp); if (error) goto out; filesize = feo->attr.size; /* * In the case where we are looking up a FUSE node represented by an * existing cached vnode, and the true size reported by FUSE_LOOKUP * doesn't match the vnode's cached size, then any cached writes beyond * the file's current size are lost. * * We can get here: * * following attribute cache expiration, or * * due a bug in the daemon, or */ fvdat = VTOFUD(*vpp); if (vnode_isreg(*vpp) && filesize != fvdat->cached_attrs.va_size && fvdat->flag & FN_SIZECHANGE) { printf("%s: WB cache incoherent on %s!\n", __func__, vnode_mount(*vpp)->mnt_stat.f_mntonname); fvdat->flag &= ~FN_SIZECHANGE; } fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid, feo->attr_valid_nsec, NULL); fuse_validity_2_bintime(feo->entry_valid, feo->entry_valid_nsec, &fvdat->entry_cache_timeout); out: fdisp_destroy(&fdi); return error; } static int fuse_vfsop_root(struct mount *mp, int lkflags, struct vnode **vpp) { struct fuse_data *data = fuse_get_mpdata(mp); int err = 0; if (data->vroot != NULL) { err = vget(data->vroot, lkflags, curthread); if (err == 0) *vpp = data->vroot; } else { err = fuse_vnode_get(mp, NULL, FUSE_ROOT_ID, NULL, vpp, NULL, VDIR); if (err == 0) { FUSE_LOCK(); MPASS(data->vroot == NULL || data->vroot == *vpp); if (data->vroot == NULL) { SDT_PROBE2(fusefs, , vfsops, trace, 1, "new root vnode"); data->vroot = *vpp; FUSE_UNLOCK(); vref(*vpp); } else if (data->vroot != *vpp) { SDT_PROBE2(fusefs, , vfsops, trace, 1, "root vnode race"); FUSE_UNLOCK(); VOP_UNLOCK(*vpp); vrele(*vpp); vrecycle(*vpp); *vpp = data->vroot; } else FUSE_UNLOCK(); } } return err; } static int fuse_vfsop_statfs(struct mount *mp, struct statfs *sbp) { struct fuse_dispatcher fdi; int err = 0; struct fuse_statfs_out *fsfo; struct fuse_data *data; data = fuse_get_mpdata(mp); if (!(data->dataflags & FSESS_INITED)) goto fake; fdisp_init(&fdi, 0); fdisp_make(&fdi, FUSE_STATFS, mp, FUSE_ROOT_ID, NULL, NULL); err = fdisp_wait_answ(&fdi); if (err) { fdisp_destroy(&fdi); if (err == ENOTCONN) { /* * We want to seem a legitimate fs even if the daemon * is stiff dead... (so that, eg., we can still do path * based unmounting after the daemon dies). */ goto fake; } return err; } fsfo = fdi.answ; sbp->f_blocks = fsfo->st.blocks; sbp->f_bfree = fsfo->st.bfree; sbp->f_bavail = fsfo->st.bavail; sbp->f_files = fsfo->st.files; sbp->f_ffree = fsfo->st.ffree; /* cast from uint64_t to int64_t */ sbp->f_namemax = fsfo->st.namelen; sbp->f_bsize = fsfo->st.frsize; /* cast from uint32_t to uint64_t */ fdisp_destroy(&fdi); return 0; fake: sbp->f_blocks = 0; sbp->f_bfree = 0; sbp->f_bavail = 0; sbp->f_files = 0; sbp->f_ffree = 0; sbp->f_namemax = 0; sbp->f_bsize = S_BLKSIZE; return 0; } Index: head/sys/fs/unionfs/union_vfsops.c =================================================================== --- head/sys/fs/unionfs/union_vfsops.c (revision 360943) +++ head/sys/fs/unionfs/union_vfsops.c (revision 360944) @@ -1,509 +1,504 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1994, 1995 The Regents of the University of California. * Copyright (c) 1994, 1995 Jan-Simon Pendry. * Copyright (c) 2005, 2006, 2012 Masanori Ozawa , ONGS Inc. * Copyright (c) 2006, 2012 Daichi Goto * All rights reserved. * * This code is derived from software donated to Berkeley by * Jan-Simon Pendry. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)union_vfsops.c 8.20 (Berkeley) 5/20/95 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_UNIONFSMNT, "UNIONFS mount", "UNIONFS mount structure"); static vfs_fhtovp_t unionfs_fhtovp; static vfs_checkexp_t unionfs_checkexp; static vfs_mount_t unionfs_domount; static vfs_quotactl_t unionfs_quotactl; static vfs_root_t unionfs_root; static vfs_sync_t unionfs_sync; static vfs_statfs_t unionfs_statfs; static vfs_unmount_t unionfs_unmount; static vfs_vget_t unionfs_vget; static vfs_extattrctl_t unionfs_extattrctl; static struct vfsops unionfs_vfsops; /* * Mount unionfs layer. */ static int unionfs_domount(struct mount *mp) { int error; struct vnode *lowerrootvp; struct vnode *upperrootvp; struct unionfs_mount *ump; struct thread *td; char *target; char *tmp; char *ep; int len; - size_t done; int below; uid_t uid; gid_t gid; u_short udir; u_short ufile; unionfs_copymode copymode; unionfs_whitemode whitemode; struct nameidata nd, *ndp; struct vattr va; UNIONFSDEBUG("unionfs_mount(mp = %p)\n", (void *)mp); error = 0; below = 0; uid = 0; gid = 0; udir = 0; ufile = 0; copymode = UNIONFS_TRANSPARENT; /* default */ whitemode = UNIONFS_WHITE_ALWAYS; ndp = &nd; td = curthread; if (mp->mnt_flag & MNT_ROOTFS) { vfs_mount_error(mp, "Cannot union mount root filesystem"); return (EOPNOTSUPP); } /* * Update is a no operation. */ if (mp->mnt_flag & MNT_UPDATE) { vfs_mount_error(mp, "unionfs does not support mount update"); return (EOPNOTSUPP); } /* * Get argument */ error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); if (error) error = vfs_getopt(mp->mnt_optnew, "from", (void **)&target, &len); if (error || target[len - 1] != '\0') { vfs_mount_error(mp, "Invalid target"); return (EINVAL); } if (vfs_getopt(mp->mnt_optnew, "below", NULL, NULL) == 0) below = 1; if (vfs_getopt(mp->mnt_optnew, "udir", (void **)&tmp, NULL) == 0) { if (tmp != NULL) udir = (mode_t)strtol(tmp, &ep, 8); if (tmp == NULL || *ep) { vfs_mount_error(mp, "Invalid udir"); return (EINVAL); } udir &= S_IRWXU | S_IRWXG | S_IRWXO; } if (vfs_getopt(mp->mnt_optnew, "ufile", (void **)&tmp, NULL) == 0) { if (tmp != NULL) ufile = (mode_t)strtol(tmp, &ep, 8); if (tmp == NULL || *ep) { vfs_mount_error(mp, "Invalid ufile"); return (EINVAL); } ufile &= S_IRWXU | S_IRWXG | S_IRWXO; } /* check umask, uid and gid */ if (udir == 0 && ufile != 0) udir = ufile; if (ufile == 0 && udir != 0) ufile = udir; vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); if (!error) { if (udir == 0) udir = va.va_mode; if (ufile == 0) ufile = va.va_mode; uid = va.va_uid; gid = va.va_gid; } VOP_UNLOCK(mp->mnt_vnodecovered); if (error) return (error); if (mp->mnt_cred->cr_ruid == 0) { /* root only */ if (vfs_getopt(mp->mnt_optnew, "uid", (void **)&tmp, NULL) == 0) { if (tmp != NULL) uid = (uid_t)strtol(tmp, &ep, 10); if (tmp == NULL || *ep) { vfs_mount_error(mp, "Invalid uid"); return (EINVAL); } } if (vfs_getopt(mp->mnt_optnew, "gid", (void **)&tmp, NULL) == 0) { if (tmp != NULL) gid = (gid_t)strtol(tmp, &ep, 10); if (tmp == NULL || *ep) { vfs_mount_error(mp, "Invalid gid"); return (EINVAL); } } if (vfs_getopt(mp->mnt_optnew, "copymode", (void **)&tmp, NULL) == 0) { if (tmp == NULL) { vfs_mount_error(mp, "Invalid copymode"); return (EINVAL); } else if (strcasecmp(tmp, "traditional") == 0) copymode = UNIONFS_TRADITIONAL; else if (strcasecmp(tmp, "transparent") == 0) copymode = UNIONFS_TRANSPARENT; else if (strcasecmp(tmp, "masquerade") == 0) copymode = UNIONFS_MASQUERADE; else { vfs_mount_error(mp, "Invalid copymode"); return (EINVAL); } } if (vfs_getopt(mp->mnt_optnew, "whiteout", (void **)&tmp, NULL) == 0) { if (tmp == NULL) { vfs_mount_error(mp, "Invalid whiteout mode"); return (EINVAL); } else if (strcasecmp(tmp, "always") == 0) whitemode = UNIONFS_WHITE_ALWAYS; else if (strcasecmp(tmp, "whenneeded") == 0) whitemode = UNIONFS_WHITE_WHENNEEDED; else { vfs_mount_error(mp, "Invalid whiteout mode"); return (EINVAL); } } } /* If copymode is UNIONFS_TRADITIONAL, uid/gid is mounted user. */ if (copymode == UNIONFS_TRADITIONAL) { uid = mp->mnt_cred->cr_ruid; gid = mp->mnt_cred->cr_rgid; } UNIONFSDEBUG("unionfs_mount: uid=%d, gid=%d\n", uid, gid); UNIONFSDEBUG("unionfs_mount: udir=0%03o, ufile=0%03o\n", udir, ufile); UNIONFSDEBUG("unionfs_mount: copymode=%d\n", copymode); /* * Find upper node */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, target, td); if ((error = namei(ndp))) return (error); NDFREE(ndp, NDF_ONLY_PNBUF); /* get root vnodes */ lowerrootvp = mp->mnt_vnodecovered; upperrootvp = ndp->ni_vp; /* create unionfs_mount */ ump = (struct unionfs_mount *)malloc(sizeof(struct unionfs_mount), M_UNIONFSMNT, M_WAITOK | M_ZERO); /* * Save reference */ if (below) { VOP_UNLOCK(upperrootvp); vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY); ump->um_lowervp = upperrootvp; ump->um_uppervp = lowerrootvp; } else { ump->um_lowervp = lowerrootvp; ump->um_uppervp = upperrootvp; } ump->um_rootvp = NULLVP; ump->um_uid = uid; ump->um_gid = gid; ump->um_udir = udir; ump->um_ufile = ufile; ump->um_copymode = copymode; ump->um_whitemode = whitemode; mp->mnt_data = ump; /* * Copy upper layer's RDONLY flag. */ mp->mnt_flag |= ump->um_uppervp->v_mount->mnt_flag & MNT_RDONLY; /* * Unlock the node */ VOP_UNLOCK(ump->um_uppervp); /* * Get the unionfs root vnode. */ error = unionfs_nodeget(mp, ump->um_uppervp, ump->um_lowervp, NULLVP, &(ump->um_rootvp), NULL, td); vrele(upperrootvp); if (error) { free(ump, M_UNIONFSMNT); mp->mnt_data = NULL; return (error); } MNT_ILOCK(mp); if ((ump->um_lowervp->v_mount->mnt_flag & MNT_LOCAL) && (ump->um_uppervp->v_mount->mnt_flag & MNT_LOCAL)) mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_NOMSYNC | MNTK_UNIONFS; MNT_IUNLOCK(mp); /* * Get new fsid */ vfs_getnewfsid(mp); - len = MNAMELEN - 1; - tmp = mp->mnt_stat.f_mntfromname; - copystr((below ? ":" : ":"), tmp, len, &done); - len -= done - 1; - tmp += done - 1; - copystr(target, tmp, len, NULL); + snprintf(mp->mnt_stat.f_mntfromname, MNAMELEN, "<%s>:%s", + below ? "below" : "above", target); UNIONFSDEBUG("unionfs_mount: from %s, on %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); return (0); } /* * Free reference to unionfs layer */ static int unionfs_unmount(struct mount *mp, int mntflags) { struct unionfs_mount *ump; int error; int num; int freeing; int flags; UNIONFSDEBUG("unionfs_unmount: mp = %p\n", (void *)mp); ump = MOUNTTOUNIONFSMOUNT(mp); flags = 0; if (mntflags & MNT_FORCE) flags |= FORCECLOSE; /* vflush (no need to call vrele) */ for (freeing = 0; (error = vflush(mp, 1, flags, curthread)) != 0;) { num = mp->mnt_nvnodelistsize; if (num == freeing) break; freeing = num; } if (error) return (error); free(ump, M_UNIONFSMNT); mp->mnt_data = NULL; return (0); } static int unionfs_root(struct mount *mp, int flags, struct vnode **vpp) { struct unionfs_mount *ump; struct vnode *vp; ump = MOUNTTOUNIONFSMOUNT(mp); vp = ump->um_rootvp; UNIONFSDEBUG("unionfs_root: rootvp=%p locked=%x\n", vp, VOP_ISLOCKED(vp)); vref(vp); if (flags & LK_TYPE_MASK) vn_lock(vp, flags); *vpp = vp; return (0); } static int unionfs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg) { struct unionfs_mount *ump; ump = MOUNTTOUNIONFSMOUNT(mp); /* * Writing is always performed to upper vnode. */ return (VFS_QUOTACTL(ump->um_uppervp->v_mount, cmd, uid, arg)); } static int unionfs_statfs(struct mount *mp, struct statfs *sbp) { struct unionfs_mount *ump; int error; struct statfs *mstat; uint64_t lbsize; ump = MOUNTTOUNIONFSMOUNT(mp); UNIONFSDEBUG("unionfs_statfs(mp = %p, lvp = %p, uvp = %p)\n", (void *)mp, (void *)ump->um_lowervp, (void *)ump->um_uppervp); mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO); error = VFS_STATFS(ump->um_lowervp->v_mount, mstat); if (error) { free(mstat, M_STATFS); return (error); } /* now copy across the "interesting" information and fake the rest */ sbp->f_blocks = mstat->f_blocks; sbp->f_files = mstat->f_files; lbsize = mstat->f_bsize; error = VFS_STATFS(ump->um_uppervp->v_mount, mstat); if (error) { free(mstat, M_STATFS); return (error); } /* * The FS type etc is copy from upper vfs. * (write able vfs have priority) */ sbp->f_type = mstat->f_type; sbp->f_flags = mstat->f_flags; sbp->f_bsize = mstat->f_bsize; sbp->f_iosize = mstat->f_iosize; if (mstat->f_bsize != lbsize) sbp->f_blocks = ((off_t)sbp->f_blocks * lbsize) / mstat->f_bsize; sbp->f_blocks += mstat->f_blocks; sbp->f_bfree = mstat->f_bfree; sbp->f_bavail = mstat->f_bavail; sbp->f_files += mstat->f_files; sbp->f_ffree = mstat->f_ffree; free(mstat, M_STATFS); return (0); } static int unionfs_sync(struct mount *mp, int waitfor) { /* nothing to do */ return (0); } static int unionfs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) { return (EOPNOTSUPP); } static int unionfs_fhtovp(struct mount *mp, struct fid *fidp, int flags, struct vnode **vpp) { return (EOPNOTSUPP); } static int unionfs_checkexp(struct mount *mp, struct sockaddr *nam, int *extflagsp, struct ucred **credanonp, int *numsecflavors, int **secflavors) { return (EOPNOTSUPP); } static int unionfs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, int namespace, const char *attrname) { struct unionfs_mount *ump; struct unionfs_node *unp; ump = MOUNTTOUNIONFSMOUNT(mp); unp = VTOUNIONFS(filename_vp); if (unp->un_uppervp != NULLVP) { return (VFS_EXTATTRCTL(ump->um_uppervp->v_mount, cmd, unp->un_uppervp, namespace, attrname)); } else { return (VFS_EXTATTRCTL(ump->um_lowervp->v_mount, cmd, unp->un_lowervp, namespace, attrname)); } } static struct vfsops unionfs_vfsops = { .vfs_checkexp = unionfs_checkexp, .vfs_extattrctl = unionfs_extattrctl, .vfs_fhtovp = unionfs_fhtovp, .vfs_init = unionfs_init, .vfs_mount = unionfs_domount, .vfs_quotactl = unionfs_quotactl, .vfs_root = unionfs_root, .vfs_statfs = unionfs_statfs, .vfs_sync = unionfs_sync, .vfs_uninit = unionfs_uninit, .vfs_unmount = unionfs_unmount, .vfs_vget = unionfs_vget, }; VFS_SET(unionfs_vfsops, unionfs, VFCF_LOOPBACK); Index: head/sys/i386/i386/support.s =================================================================== --- head/sys/i386/i386/support.s (revision 360943) +++ head/sys/i386/i386/support.s (revision 360944) @@ -1,658 +1,617 @@ /*- * Copyright (c) 1993 The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "assym.inc" #define IDXSHIFT 10 .text /* * bcopy family * void bzero(void *buf, u_int len) */ ENTRY(bzero) pushl %edi movl 8(%esp),%edi movl 12(%esp),%ecx xorl %eax,%eax shrl $2,%ecx rep stosl movl 12(%esp),%ecx andl $3,%ecx rep stosb popl %edi ret END(bzero) ENTRY(sse2_pagezero) pushl %ebx movl 8(%esp),%ecx movl %ecx,%eax addl $4096,%eax xor %ebx,%ebx jmp 1f /* * The loop takes 14 bytes. Ensure that it doesn't cross a 16-byte * cache line. */ .p2align 4,0x90 1: movnti %ebx,(%ecx) movnti %ebx,4(%ecx) addl $8,%ecx cmpl %ecx,%eax jne 1b sfence popl %ebx ret END(sse2_pagezero) ENTRY(i686_pagezero) pushl %edi pushl %ebx movl 12(%esp),%edi movl $1024,%ecx ALIGN_TEXT 1: xorl %eax,%eax repe scasl jnz 2f popl %ebx popl %edi ret ALIGN_TEXT 2: incl %ecx subl $4,%edi movl %ecx,%edx cmpl $16,%ecx jge 3f movl %edi,%ebx andl $0x3f,%ebx shrl %ebx shrl %ebx movl $16,%ecx subl %ebx,%ecx 3: subl %ecx,%edx rep stosl movl %edx,%ecx testl %edx,%edx jnz 1b popl %ebx popl %edi ret END(i686_pagezero) /* fillw(pat, base, cnt) */ ENTRY(fillw) pushl %edi movl 8(%esp),%eax movl 12(%esp),%edi movl 16(%esp),%ecx rep stosw popl %edi ret END(fillw) /* * memmove(dst, src, cnt) (return dst) * bcopy(src, dst, cnt) * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800 */ ENTRY(bcopy) movl 4(%esp),%eax movl 8(%esp),%edx movl %eax,8(%esp) movl %edx,4(%esp) MEXITCOUNT jmp memmove END(bcopy) ENTRY(memmove) pushl %ebp movl %esp,%ebp pushl %esi pushl %edi movl 8(%ebp),%edi movl 12(%ebp),%esi 1: movl 16(%ebp),%ecx movl %edi,%eax subl %esi,%eax cmpl %ecx,%eax /* overlapping && src < dst? */ jb 1f shrl $2,%ecx /* copy by 32-bit words */ rep movsl movl 16(%ebp),%ecx andl $3,%ecx /* any bytes left? */ rep movsb popl %edi popl %esi movl 8(%ebp),%eax /* return dst for memmove */ popl %ebp ret ALIGN_TEXT 1: addl %ecx,%edi /* copy backwards */ addl %ecx,%esi decl %edi decl %esi andl $3,%ecx /* any fractional bytes? */ std rep movsb movl 16(%ebp),%ecx /* copy remainder by 32-bit words */ shrl $2,%ecx subl $3,%esi subl $3,%edi rep movsl popl %edi popl %esi cld movl 8(%ebp),%eax /* return dst for memmove */ popl %ebp ret END(memmove) /* * Note: memcpy does not support overlapping copies */ ENTRY(memcpy) pushl %edi pushl %esi movl 12(%esp),%edi movl 16(%esp),%esi movl 20(%esp),%ecx movl %edi,%eax shrl $2,%ecx /* copy by 32-bit words */ rep movsl movl 20(%esp),%ecx andl $3,%ecx /* any bytes left? */ rep movsb popl %esi popl %edi ret END(memcpy) -/* - * copystr(from, to, maxlen, int *lencopied) - MP SAFE - */ -ENTRY(copystr) - pushl %esi - pushl %edi - - movl 12(%esp),%esi /* %esi = from */ - movl 16(%esp),%edi /* %edi = to */ - movl 20(%esp),%edx /* %edx = maxlen */ - incl %edx -1: - decl %edx - jz 4f - lodsb - stosb - orb %al,%al - jnz 1b - - /* Success -- 0 byte reached */ - decl %edx - xorl %eax,%eax - jmp 6f -4: - /* edx is zero -- return ENAMETOOLONG */ - movl $ENAMETOOLONG,%eax - -6: - /* set *lencopied and return %eax */ - movl 20(%esp),%ecx - subl %edx,%ecx - movl 24(%esp),%edx - testl %edx,%edx - jz 7f - movl %ecx,(%edx) -7: - popl %edi - popl %esi - ret -END(copystr) - ENTRY(bcmp) pushl %edi pushl %esi movl 12(%esp),%edi movl 16(%esp),%esi movl 20(%esp),%edx movl %edx,%ecx shrl $2,%ecx repe cmpsl jne 1f movl %edx,%ecx andl $3,%ecx repe cmpsb 1: setne %al movsbl %al,%eax popl %esi popl %edi ret END(bcmp) /* * Handling of special 386 registers and descriptor tables etc */ /* void lgdt(struct region_descriptor *rdp); */ ENTRY(lgdt) /* reload the descriptor table */ movl 4(%esp),%eax lgdt (%eax) /* flush the prefetch q */ jmp 1f nop 1: /* reload "stale" selectors */ movl $KDSEL,%eax movl %eax,%ds movl %eax,%es movl %eax,%gs movl %eax,%ss movl $KPSEL,%eax movl %eax,%fs /* reload code selector by turning return into intersegmental return */ movl (%esp),%eax pushl %eax movl $KCSEL,4(%esp) MEXITCOUNT lret END(lgdt) /* ssdtosd(*ssdp,*sdp) */ ENTRY(ssdtosd) pushl %ebx movl 8(%esp),%ecx movl 8(%ecx),%ebx shll $16,%ebx movl (%ecx),%edx roll $16,%edx movb %dh,%bl movb %dl,%bh rorl $8,%ebx movl 4(%ecx),%eax movw %ax,%dx andl $0xf0000,%eax orl %eax,%ebx movl 12(%esp),%ecx movl %edx,(%ecx) movl %ebx,4(%ecx) popl %ebx ret END(ssdtosd) /* void reset_dbregs() */ ENTRY(reset_dbregs) movl $0,%eax movl %eax,%dr7 /* disable all breakpoints first */ movl %eax,%dr0 movl %eax,%dr1 movl %eax,%dr2 movl %eax,%dr3 movl %eax,%dr6 ret END(reset_dbregs) /*****************************************************************************/ /* setjump, longjump */ /*****************************************************************************/ ENTRY(setjmp) movl 4(%esp),%eax movl %ebx,(%eax) /* save ebx */ movl %esp,4(%eax) /* save esp */ movl %ebp,8(%eax) /* save ebp */ movl %esi,12(%eax) /* save esi */ movl %edi,16(%eax) /* save edi */ movl (%esp),%edx /* get rta */ movl %edx,20(%eax) /* save eip */ xorl %eax,%eax /* return(0); */ ret END(setjmp) ENTRY(longjmp) movl 4(%esp),%eax movl (%eax),%ebx /* restore ebx */ movl 4(%eax),%esp /* restore esp */ movl 8(%eax),%ebp /* restore ebp */ movl 12(%eax),%esi /* restore esi */ movl 16(%eax),%edi /* restore edi */ movl 20(%eax),%edx /* get rta */ movl %edx,(%esp) /* put in return frame */ xorl %eax,%eax /* return(1); */ incl %eax ret END(longjmp) /* * Support for reading MSRs in the safe manner. (Instead of panic on #gp, * return an error.) */ ENTRY(rdmsr_safe) /* int rdmsr_safe(u_int msr, uint64_t *data) */ movl PCPU(CURPCB),%ecx movl $msr_onfault,PCB_ONFAULT(%ecx) movl 4(%esp),%ecx rdmsr movl 8(%esp),%ecx movl %eax,(%ecx) movl %edx,4(%ecx) xorl %eax,%eax movl PCPU(CURPCB),%ecx movl %eax,PCB_ONFAULT(%ecx) ret /* * Support for writing MSRs in the safe manner. (Instead of panic on #gp, * return an error.) */ ENTRY(wrmsr_safe) /* int wrmsr_safe(u_int msr, uint64_t data) */ movl PCPU(CURPCB),%ecx movl $msr_onfault,PCB_ONFAULT(%ecx) movl 4(%esp),%ecx movl 8(%esp),%eax movl 12(%esp),%edx wrmsr xorl %eax,%eax movl PCPU(CURPCB),%ecx movl %eax,PCB_ONFAULT(%ecx) ret /* * MSR operations fault handler */ ALIGN_TEXT msr_onfault: movl PCPU(CURPCB),%ecx movl $0,PCB_ONFAULT(%ecx) movl $EFAULT,%eax ret ENTRY(handle_ibrs_entry) cmpb $0,hw_ibrs_ibpb_active je 1f movl $MSR_IA32_SPEC_CTRL,%ecx rdmsr orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx wrmsr movb $1,PCPU(IBPB_SET) /* * i386 does not implement SMEP, but the 4/4 split makes this not * that important. */ 1: ret END(handle_ibrs_entry) ENTRY(handle_ibrs_exit) cmpb $0,PCPU(IBPB_SET) je 1f movl $MSR_IA32_SPEC_CTRL,%ecx rdmsr andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx wrmsr movb $0,PCPU(IBPB_SET) 1: ret END(handle_ibrs_exit) ENTRY(mds_handler_void) ret END(mds_handler_void) ENTRY(mds_handler_verw) subl $4, %esp movw %ds, (%esp) verw (%esp) addl $4, %esp ret END(mds_handler_verw) ENTRY(mds_handler_ivb) movl %cr0, %eax testb $CR0_TS, %al je 1f clts 1: movl PCPU(MDS_BUF), %edx movdqa %xmm0, PCPU(MDS_TMP) pxor %xmm0, %xmm0 lfence orpd (%edx), %xmm0 orpd (%edx), %xmm0 mfence movl $40, %ecx addl $16, %edx 2: movntdq %xmm0, (%edx) addl $16, %edx decl %ecx jnz 2b mfence movdqa PCPU(MDS_TMP),%xmm0 testb $CR0_TS, %al je 3f movl %eax, %cr0 3: ret END(mds_handler_ivb) ENTRY(mds_handler_bdw) movl %cr0, %eax testb $CR0_TS, %al je 1f clts 1: movl PCPU(MDS_BUF), %ebx movdqa %xmm0, PCPU(MDS_TMP) pxor %xmm0, %xmm0 movl %ebx, %edi movl %ebx, %esi movl $40, %ecx 2: movntdq %xmm0, (%ebx) addl $16, %ebx decl %ecx jnz 2b mfence movl $1536, %ecx rep; movsb lfence movdqa PCPU(MDS_TMP),%xmm0 testb $CR0_TS, %al je 3f movl %eax, %cr0 3: ret END(mds_handler_bdw) ENTRY(mds_handler_skl_sse) movl %cr0, %eax testb $CR0_TS, %al je 1f clts 1: movl PCPU(MDS_BUF), %edi movl PCPU(MDS_BUF64), %edx movdqa %xmm0, PCPU(MDS_TMP) pxor %xmm0, %xmm0 lfence orpd (%edx), %xmm0 orpd (%edx), %xmm0 xorl %eax, %eax 2: clflushopt 5376(%edi, %eax, 8) addl $8, %eax cmpl $8 * 12, %eax jb 2b sfence movl $6144, %ecx xorl %eax, %eax rep; stosb mfence movdqa PCPU(MDS_TMP), %xmm0 testb $CR0_TS, %al je 3f movl %eax, %cr0 3: ret END(mds_handler_skl_sse) ENTRY(mds_handler_skl_avx) movl %cr0, %eax testb $CR0_TS, %al je 1f clts 1: movl PCPU(MDS_BUF), %edi movl PCPU(MDS_BUF64), %edx vmovdqa %ymm0, PCPU(MDS_TMP) vpxor %ymm0, %ymm0, %ymm0 lfence vorpd (%edx), %ymm0, %ymm0 vorpd (%edx), %ymm0, %ymm0 xorl %eax, %eax 2: clflushopt 5376(%edi, %eax, 8) addl $8, %eax cmpl $8 * 12, %eax jb 2b sfence movl $6144, %ecx xorl %eax, %eax rep; stosb mfence vmovdqa PCPU(MDS_TMP), %ymm0 testb $CR0_TS, %al je 3f movl %eax, %cr0 3: ret END(mds_handler_skl_avx) ENTRY(mds_handler_skl_avx512) movl %cr0, %eax testb $CR0_TS, %al je 1f clts 1: movl PCPU(MDS_BUF), %edi movl PCPU(MDS_BUF64), %edx vmovdqa64 %zmm0, PCPU(MDS_TMP) vpxord %zmm0, %zmm0, %zmm0 lfence vorpd (%edx), %zmm0, %zmm0 vorpd (%edx), %zmm0, %zmm0 xorl %eax, %eax 2: clflushopt 5376(%edi, %eax, 8) addl $8, %eax cmpl $8 * 12, %eax jb 2b sfence movl $6144, %ecx xorl %eax, %eax rep; stosb mfence vmovdqa64 PCPU(MDS_TMP), %zmm0 testb $CR0_TS, %al je 3f movl %eax, %cr0 3: ret END(mds_handler_skl_avx512) ENTRY(mds_handler_silvermont) movl %cr0, %eax testb $CR0_TS, %al je 1f clts 1: movl PCPU(MDS_BUF), %edx movdqa %xmm0, PCPU(MDS_TMP) pxor %xmm0, %xmm0 movl $16, %ecx 2: movntdq %xmm0, (%edx) addl $16, %edx decl %ecx jnz 2b mfence movdqa PCPU(MDS_TMP),%xmm0 testb $CR0_TS, %al je 3f movl %eax, %cr0 3: ret END(mds_handler_silvermont) Index: head/sys/kern/subr_csan.c =================================================================== --- head/sys/kern/subr_csan.c (revision 360943) +++ head/sys/kern/subr_csan.c (revision 360944) @@ -1,857 +1,849 @@ /* $NetBSD: subr_csan.c,v 1.5 2019/11/15 08:11:37 maxv Exp $ */ /* * Copyright (c) 2019 The NetBSD Foundation, Inc. * All rights reserved. * Copyright (c) 2019 Andrew Turner * * This code is derived from software contributed to The NetBSD Foundation * by Maxime Villard. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #define KCSAN_RUNTIME #include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #ifdef KCSAN_PANIC #define REPORT panic #else #define REPORT printf #endif typedef struct { uintptr_t addr; uint32_t size; bool write:1; bool atomic:1; uintptr_t pc; } csan_cell_t; typedef struct { bool inited; uint32_t cnt; csan_cell_t cell; } csan_cpu_t; static csan_cpu_t kcsan_cpus[MAXCPU]; static bool kcsan_enabled __read_mostly; #define __RET_ADDR (uintptr_t)__builtin_return_address(0) #define KCSAN_NACCESSES 1024 #define KCSAN_DELAY 10 /* 10 microseconds */ /* -------------------------------------------------------------------------- */ /* The MD code. */ #include /* -------------------------------------------------------------------------- */ static void kcsan_enable(void *dummy __unused) { printf("Enabling KCSCAN, expect reduced performance.\n"); kcsan_enabled = true; } SYSINIT(kcsan_enable, SI_SUB_SMP, SI_ORDER_SECOND, kcsan_enable, NULL); void kcsan_cpu_init(u_int cpu) { kcsan_cpus[cpu].inited = true; } /* -------------------------------------------------------------------------- */ static inline void kcsan_report(csan_cell_t *new, u_int newcpu, csan_cell_t *old, u_int oldcpu) { const char *newsym, *oldsym; #ifdef DDB c_db_sym_t sym; db_expr_t offset; sym = db_search_symbol((vm_offset_t)new->pc, DB_STGY_PROC, &offset); db_symbol_values(sym, &newsym, NULL); sym = db_search_symbol((vm_offset_t)old->pc, DB_STGY_PROC, &offset); db_symbol_values(sym, &oldsym, NULL); #else newsym = ""; oldsym = ""; #endif REPORT("CSan: Racy Access " "[Cpu%u %s%s Addr=%p Size=%u PC=%p<%s>] " "[Cpu%u %s%s Addr=%p Size=%u PC=%p<%s>]\n", newcpu, (new->atomic ? "Atomic " : ""), (new->write ? "Write" : "Read"), (void *)new->addr, new->size, (void *)new->pc, newsym, oldcpu, (old->atomic ? "Atomic " : ""), (old->write ? "Write" : "Read"), (void *)old->addr, old->size, (void *)old->pc, oldsym); kcsan_md_unwind(); } static inline bool kcsan_access_is_atomic(csan_cell_t *new, csan_cell_t *old) { if (new->write && !new->atomic) return false; if (old->write && !old->atomic) return false; return true; } static inline void kcsan_access(uintptr_t addr, size_t size, bool write, bool atomic, uintptr_t pc) { csan_cell_t old, new; csan_cpu_t *cpu; uint64_t intr; size_t i; if (__predict_false(!kcsan_enabled)) return; if (__predict_false(kcsan_md_unsupported((vm_offset_t)addr))) return; if (KERNEL_PANICKED()) return; new.addr = addr; new.size = size; new.write = write; new.atomic = atomic; new.pc = pc; CPU_FOREACH(i) { __builtin_memcpy(&old, &kcsan_cpus[i].cell, sizeof(old)); if (old.addr + old.size <= new.addr) continue; if (new.addr + new.size <= old.addr) continue; if (__predict_true(!old.write && !new.write)) continue; if (__predict_true(kcsan_access_is_atomic(&new, &old))) continue; kcsan_report(&new, PCPU_GET(cpuid), &old, i); break; } if (__predict_false(!kcsan_md_is_avail())) return; kcsan_md_disable_intrs(&intr); cpu = &kcsan_cpus[PCPU_GET(cpuid)]; if (__predict_false(!cpu->inited)) goto out; cpu->cnt = (cpu->cnt + 1) % KCSAN_NACCESSES; if (__predict_true(cpu->cnt != 0)) goto out; __builtin_memcpy(&cpu->cell, &new, sizeof(new)); kcsan_md_delay(KCSAN_DELAY); __builtin_memset(&cpu->cell, 0, sizeof(new)); out: kcsan_md_enable_intrs(&intr); } #define CSAN_READ(size) \ void __tsan_read##size(uintptr_t); \ void __tsan_read##size(uintptr_t addr) \ { \ kcsan_access(addr, size, false, false, __RET_ADDR); \ } \ void __tsan_unaligned_read##size(uintptr_t); \ void __tsan_unaligned_read##size(uintptr_t addr) \ { \ kcsan_access(addr, size, false, false, __RET_ADDR); \ } CSAN_READ(1) CSAN_READ(2) CSAN_READ(4) CSAN_READ(8) CSAN_READ(16) #define CSAN_WRITE(size) \ void __tsan_write##size(uintptr_t); \ void __tsan_write##size(uintptr_t addr) \ { \ kcsan_access(addr, size, true, false, __RET_ADDR); \ } \ void __tsan_unaligned_write##size(uintptr_t); \ void __tsan_unaligned_write##size(uintptr_t addr) \ { \ kcsan_access(addr, size, true, false, __RET_ADDR); \ } CSAN_WRITE(1) CSAN_WRITE(2) CSAN_WRITE(4) CSAN_WRITE(8) CSAN_WRITE(16) void __tsan_read_range(uintptr_t, size_t); void __tsan_write_range(uintptr_t, size_t); void __tsan_read_range(uintptr_t addr, size_t size) { kcsan_access(addr, size, false, false, __RET_ADDR); } void __tsan_write_range(uintptr_t addr, size_t size) { kcsan_access(addr, size, true, false, __RET_ADDR); } void __tsan_init(void); void __tsan_func_entry(void *); void __tsan_func_exit(void); void __tsan_init(void) { } void __tsan_func_entry(void *call_pc) { } void __tsan_func_exit(void) { } /* -------------------------------------------------------------------------- */ void * kcsan_memcpy(void *dst, const void *src, size_t len) { kcsan_access((uintptr_t)src, len, false, false, __RET_ADDR); kcsan_access((uintptr_t)dst, len, true, false, __RET_ADDR); return __builtin_memcpy(dst, src, len); } int kcsan_memcmp(const void *b1, const void *b2, size_t len) { kcsan_access((uintptr_t)b1, len, false, false, __RET_ADDR); kcsan_access((uintptr_t)b2, len, false, false, __RET_ADDR); return __builtin_memcmp(b1, b2, len); } void * kcsan_memset(void *b, int c, size_t len) { kcsan_access((uintptr_t)b, len, true, false, __RET_ADDR); return __builtin_memset(b, c, len); } void * kcsan_memmove(void *dst, const void *src, size_t len) { kcsan_access((uintptr_t)src, len, false, false, __RET_ADDR); kcsan_access((uintptr_t)dst, len, true, false, __RET_ADDR); return __builtin_memmove(dst, src, len); } char * kcsan_strcpy(char *dst, const char *src) { char *save = dst; while (1) { kcsan_access((uintptr_t)src, 1, false, false, __RET_ADDR); kcsan_access((uintptr_t)dst, 1, true, false, __RET_ADDR); *dst = *src; if (*src == '\0') break; src++, dst++; } return save; } int kcsan_strcmp(const char *s1, const char *s2) { while (1) { kcsan_access((uintptr_t)s1, 1, false, false, __RET_ADDR); kcsan_access((uintptr_t)s2, 1, false, false, __RET_ADDR); if (*s1 != *s2) break; if (*s1 == '\0') return 0; s1++, s2++; } return (*(const unsigned char *)s1 - *(const unsigned char *)s2); } size_t kcsan_strlen(const char *str) { const char *s; s = str; while (1) { kcsan_access((uintptr_t)s, 1, false, false, __RET_ADDR); if (*s == '\0') break; s++; } return (s - str); } -#undef copystr #undef copyin #undef copyin_nofault #undef copyinstr #undef copyout #undef copyout_nofault - -int -kcsan_copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done) -{ - kcsan_access((uintptr_t)kdaddr, len, true, false, __RET_ADDR); - return copystr(kfaddr, kdaddr, len, done); -} int kcsan_copyin(const void *uaddr, void *kaddr, size_t len) { kcsan_access((uintptr_t)kaddr, len, true, false, __RET_ADDR); return copyin(uaddr, kaddr, len); } int kcsan_copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) { kcsan_access((uintptr_t)kaddr, len, true, false, __RET_ADDR); return copyinstr(uaddr, kaddr, len, done); } int kcsan_copyout(const void *kaddr, void *uaddr, size_t len) { kcsan_access((uintptr_t)kaddr, len, false, false, __RET_ADDR); return copyout(kaddr, uaddr, len); } /* -------------------------------------------------------------------------- */ #include #include #define _CSAN_ATOMIC_FUNC_ADD(name, type) \ void kcsan_atomic_add_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ atomic_add_##name(ptr, val); \ } #define CSAN_ATOMIC_FUNC_ADD(name, type) \ _CSAN_ATOMIC_FUNC_ADD(name, type) \ _CSAN_ATOMIC_FUNC_ADD(acq_##name, type) \ _CSAN_ATOMIC_FUNC_ADD(rel_##name, type) #define _CSAN_ATOMIC_FUNC_CLEAR(name, type) \ void kcsan_atomic_clear_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ atomic_clear_##name(ptr, val); \ } #define CSAN_ATOMIC_FUNC_CLEAR(name, type) \ _CSAN_ATOMIC_FUNC_CLEAR(name, type) \ _CSAN_ATOMIC_FUNC_CLEAR(acq_##name, type) \ _CSAN_ATOMIC_FUNC_CLEAR(rel_##name, type) #define _CSAN_ATOMIC_FUNC_CMPSET(name, type) \ int kcsan_atomic_cmpset_##name(volatile type *ptr, type val1, \ type val2) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return (atomic_cmpset_##name(ptr, val1, val2)); \ } #define CSAN_ATOMIC_FUNC_CMPSET(name, type) \ _CSAN_ATOMIC_FUNC_CMPSET(name, type) \ _CSAN_ATOMIC_FUNC_CMPSET(acq_##name, type) \ _CSAN_ATOMIC_FUNC_CMPSET(rel_##name, type) #define _CSAN_ATOMIC_FUNC_FCMPSET(name, type) \ int kcsan_atomic_fcmpset_##name(volatile type *ptr, type *val1, \ type val2) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return (atomic_fcmpset_##name(ptr, val1, val2)); \ } #define CSAN_ATOMIC_FUNC_FCMPSET(name, type) \ _CSAN_ATOMIC_FUNC_FCMPSET(name, type) \ _CSAN_ATOMIC_FUNC_FCMPSET(acq_##name, type) \ _CSAN_ATOMIC_FUNC_FCMPSET(rel_##name, type) #define CSAN_ATOMIC_FUNC_FETCHADD(name, type) \ type kcsan_atomic_fetchadd_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return (atomic_fetchadd_##name(ptr, val)); \ } #define _CSAN_ATOMIC_FUNC_LOAD(name, type) \ type kcsan_atomic_load_##name(volatile type *ptr) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), false, true, \ __RET_ADDR); \ return (atomic_load_##name(ptr)); \ } #define CSAN_ATOMIC_FUNC_LOAD(name, type) \ _CSAN_ATOMIC_FUNC_LOAD(name, type) \ _CSAN_ATOMIC_FUNC_LOAD(acq_##name, type) \ #define CSAN_ATOMIC_FUNC_READANDCLEAR(name, type) \ type kcsan_atomic_readandclear_##name(volatile type *ptr) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return (atomic_readandclear_##name(ptr)); \ } #define _CSAN_ATOMIC_FUNC_SET(name, type) \ void kcsan_atomic_set_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ atomic_set_##name(ptr, val); \ } #define CSAN_ATOMIC_FUNC_SET(name, type) \ _CSAN_ATOMIC_FUNC_SET(name, type) \ _CSAN_ATOMIC_FUNC_SET(acq_##name, type) \ _CSAN_ATOMIC_FUNC_SET(rel_##name, type) #define _CSAN_ATOMIC_FUNC_SUBTRACT(name, type) \ void kcsan_atomic_subtract_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ atomic_subtract_##name(ptr, val); \ } #define CSAN_ATOMIC_FUNC_SUBTRACT(name, type) \ _CSAN_ATOMIC_FUNC_SUBTRACT(name, type) \ _CSAN_ATOMIC_FUNC_SUBTRACT(acq_##name, type) \ _CSAN_ATOMIC_FUNC_SUBTRACT(rel_##name, type) #define _CSAN_ATOMIC_FUNC_STORE(name, type) \ void kcsan_atomic_store_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ atomic_store_##name(ptr, val); \ } #define CSAN_ATOMIC_FUNC_STORE(name, type) \ _CSAN_ATOMIC_FUNC_STORE(name, type) \ _CSAN_ATOMIC_FUNC_STORE(rel_##name, type) #define CSAN_ATOMIC_FUNC_SWAP(name, type) \ type kcsan_atomic_swap_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return(atomic_swap_##name(ptr, val)); \ } #define CSAN_ATOMIC_FUNC_TESTANDCLEAR(name, type) \ int kcsan_atomic_testandclear_##name(volatile type *ptr, u_int val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return(atomic_testandclear_##name(ptr, val)); \ } #define CSAN_ATOMIC_FUNC_TESTANDSET(name, type) \ int kcsan_atomic_testandset_##name(volatile type *ptr, u_int val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return (atomic_testandset_##name(ptr, val)); \ } CSAN_ATOMIC_FUNC_ADD(8, uint8_t) CSAN_ATOMIC_FUNC_CLEAR(8, uint8_t) CSAN_ATOMIC_FUNC_CMPSET(8, uint8_t) CSAN_ATOMIC_FUNC_FCMPSET(8, uint8_t) CSAN_ATOMIC_FUNC_LOAD(8, uint8_t) CSAN_ATOMIC_FUNC_SET(8, uint8_t) CSAN_ATOMIC_FUNC_SUBTRACT(8, uint8_t) _CSAN_ATOMIC_FUNC_STORE(8, uint8_t) #if 0 CSAN_ATOMIC_FUNC_FETCHADD(8, uint8_t) CSAN_ATOMIC_FUNC_READANDCLEAR(8, uint8_t) CSAN_ATOMIC_FUNC_SWAP(8, uint8_t) CSAN_ATOMIC_FUNC_TESTANDCLEAR(8, uint8_t) CSAN_ATOMIC_FUNC_TESTANDSET(8, uint8_t) #endif CSAN_ATOMIC_FUNC_ADD(16, uint16_t) CSAN_ATOMIC_FUNC_CLEAR(16, uint16_t) CSAN_ATOMIC_FUNC_CMPSET(16, uint16_t) CSAN_ATOMIC_FUNC_FCMPSET(16, uint16_t) CSAN_ATOMIC_FUNC_LOAD(16, uint16_t) CSAN_ATOMIC_FUNC_SET(16, uint16_t) CSAN_ATOMIC_FUNC_SUBTRACT(16, uint16_t) _CSAN_ATOMIC_FUNC_STORE(16, uint16_t) #if 0 CSAN_ATOMIC_FUNC_FETCHADD(16, uint16_t) CSAN_ATOMIC_FUNC_READANDCLEAR(16, uint16_t) CSAN_ATOMIC_FUNC_SWAP(16, uint16_t) CSAN_ATOMIC_FUNC_TESTANDCLEAR(16, uint16_t) CSAN_ATOMIC_FUNC_TESTANDSET(16, uint16_t) #endif CSAN_ATOMIC_FUNC_ADD(32, uint32_t) CSAN_ATOMIC_FUNC_CLEAR(32, uint32_t) CSAN_ATOMIC_FUNC_CMPSET(32, uint32_t) CSAN_ATOMIC_FUNC_FCMPSET(32, uint32_t) CSAN_ATOMIC_FUNC_FETCHADD(32, uint32_t) CSAN_ATOMIC_FUNC_LOAD(32, uint32_t) CSAN_ATOMIC_FUNC_READANDCLEAR(32, uint32_t) CSAN_ATOMIC_FUNC_SET(32, uint32_t) CSAN_ATOMIC_FUNC_SUBTRACT(32, uint32_t) CSAN_ATOMIC_FUNC_STORE(32, uint32_t) CSAN_ATOMIC_FUNC_SWAP(32, uint32_t) #if !defined(__aarch64__) CSAN_ATOMIC_FUNC_TESTANDCLEAR(32, uint32_t) CSAN_ATOMIC_FUNC_TESTANDSET(32, uint32_t) #endif CSAN_ATOMIC_FUNC_ADD(64, uint64_t) CSAN_ATOMIC_FUNC_CLEAR(64, uint64_t) CSAN_ATOMIC_FUNC_CMPSET(64, uint64_t) CSAN_ATOMIC_FUNC_FCMPSET(64, uint64_t) CSAN_ATOMIC_FUNC_FETCHADD(64, uint64_t) CSAN_ATOMIC_FUNC_LOAD(64, uint64_t) CSAN_ATOMIC_FUNC_READANDCLEAR(64, uint64_t) CSAN_ATOMIC_FUNC_SET(64, uint64_t) CSAN_ATOMIC_FUNC_SUBTRACT(64, uint64_t) CSAN_ATOMIC_FUNC_STORE(64, uint64_t) CSAN_ATOMIC_FUNC_SWAP(64, uint64_t) #if !defined(__aarch64__) CSAN_ATOMIC_FUNC_TESTANDCLEAR(64, uint64_t) CSAN_ATOMIC_FUNC_TESTANDSET(64, uint64_t) #endif CSAN_ATOMIC_FUNC_ADD(int, u_int) CSAN_ATOMIC_FUNC_CLEAR(int, u_int) CSAN_ATOMIC_FUNC_CMPSET(int, u_int) CSAN_ATOMIC_FUNC_FCMPSET(int, u_int) CSAN_ATOMIC_FUNC_FETCHADD(int, u_int) CSAN_ATOMIC_FUNC_LOAD(int, u_int) CSAN_ATOMIC_FUNC_READANDCLEAR(int, u_int) CSAN_ATOMIC_FUNC_SET(int, u_int) CSAN_ATOMIC_FUNC_SUBTRACT(int, u_int) CSAN_ATOMIC_FUNC_STORE(int, u_int) CSAN_ATOMIC_FUNC_SWAP(int, u_int) #if !defined(__aarch64__) CSAN_ATOMIC_FUNC_TESTANDCLEAR(int, u_int) CSAN_ATOMIC_FUNC_TESTANDSET(int, u_int) #endif CSAN_ATOMIC_FUNC_ADD(long, u_long) CSAN_ATOMIC_FUNC_CLEAR(long, u_long) CSAN_ATOMIC_FUNC_CMPSET(long, u_long) CSAN_ATOMIC_FUNC_FCMPSET(long, u_long) CSAN_ATOMIC_FUNC_FETCHADD(long, u_long) CSAN_ATOMIC_FUNC_LOAD(long, u_long) CSAN_ATOMIC_FUNC_READANDCLEAR(long, u_long) CSAN_ATOMIC_FUNC_SET(long, u_long) CSAN_ATOMIC_FUNC_SUBTRACT(long, u_long) CSAN_ATOMIC_FUNC_STORE(long, u_long) CSAN_ATOMIC_FUNC_SWAP(long, u_long) #if !defined(__aarch64__) CSAN_ATOMIC_FUNC_TESTANDCLEAR(long, u_long) CSAN_ATOMIC_FUNC_TESTANDSET(long, u_long) CSAN_ATOMIC_FUNC_TESTANDSET(acq_long, u_long) #endif CSAN_ATOMIC_FUNC_ADD(ptr, uintptr_t) CSAN_ATOMIC_FUNC_CLEAR(ptr, uintptr_t) CSAN_ATOMIC_FUNC_CMPSET(ptr, uintptr_t) CSAN_ATOMIC_FUNC_FCMPSET(ptr, uintptr_t) #if !defined(__amd64__) CSAN_ATOMIC_FUNC_FETCHADD(ptr, uintptr_t) #endif CSAN_ATOMIC_FUNC_LOAD(ptr, uintptr_t) CSAN_ATOMIC_FUNC_READANDCLEAR(ptr, uintptr_t) CSAN_ATOMIC_FUNC_SET(ptr, uintptr_t) CSAN_ATOMIC_FUNC_SUBTRACT(ptr, uintptr_t) CSAN_ATOMIC_FUNC_STORE(ptr, uintptr_t) CSAN_ATOMIC_FUNC_SWAP(ptr, uintptr_t) #if 0 CSAN_ATOMIC_FUNC_TESTANDCLEAR(ptr, uintptr_t) CSAN_ATOMIC_FUNC_TESTANDSET(ptr, uintptr_t) #endif #define CSAN_ATOMIC_FUNC_THREAD_FENCE(name) \ void kcsan_atomic_thread_fence_##name(void) \ { \ atomic_thread_fence_##name(); \ } CSAN_ATOMIC_FUNC_THREAD_FENCE(acq) CSAN_ATOMIC_FUNC_THREAD_FENCE(acq_rel) CSAN_ATOMIC_FUNC_THREAD_FENCE(rel) CSAN_ATOMIC_FUNC_THREAD_FENCE(seq_cst) /* -------------------------------------------------------------------------- */ #include #include #include int kcsan_bus_space_map(bus_space_tag_t tag, bus_addr_t hnd, bus_size_t size, int flags, bus_space_handle_t *handlep) { return (bus_space_map(tag, hnd, size, flags, handlep)); } void kcsan_bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t size) { bus_space_unmap(tag, hnd, size); } int kcsan_bus_space_subregion(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t offset, bus_size_t size, bus_space_handle_t *handlep) { return (bus_space_subregion(tag, hnd, offset, size, handlep)); } #if !defined(__amd64__) int kcsan_bus_space_alloc(bus_space_tag_t tag, bus_addr_t reg_start, bus_addr_t reg_end, bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags, bus_addr_t *addrp, bus_space_handle_t *handlep) { return (bus_space_alloc(tag, reg_start, reg_end, size, alignment, boundary, flags, addrp, handlep)); } #endif void kcsan_bus_space_free(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t size) { bus_space_free(tag, hnd, size); } void kcsan_bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t offset, bus_size_t size, int flags) { bus_space_barrier(tag, hnd, offset, size, flags); } #define CSAN_BUS_READ_FUNC(func, width, type) \ type kcsan_bus_space_read##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset) \ { \ return (bus_space_read##func##_##width(tag, hnd, \ offset)); \ } \ #define CSAN_BUS_READ_PTR_FUNC(func, width, type) \ void kcsan_bus_space_read_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t size, type *buf, \ bus_size_t count) \ { \ kcsan_access((uintptr_t)buf, sizeof(type) * count, \ false, false, __RET_ADDR); \ bus_space_read_##func##_##width(tag, hnd, size, buf, \ count); \ } CSAN_BUS_READ_FUNC(, 1, uint8_t) CSAN_BUS_READ_FUNC(_stream, 1, uint8_t) CSAN_BUS_READ_PTR_FUNC(multi, 1, uint8_t) CSAN_BUS_READ_PTR_FUNC(multi_stream, 1, uint8_t) CSAN_BUS_READ_PTR_FUNC(region, 1, uint8_t) CSAN_BUS_READ_PTR_FUNC(region_stream, 1, uint8_t) CSAN_BUS_READ_FUNC(, 2, uint16_t) CSAN_BUS_READ_FUNC(_stream, 2, uint16_t) CSAN_BUS_READ_PTR_FUNC(multi, 2, uint16_t) CSAN_BUS_READ_PTR_FUNC(multi_stream, 2, uint16_t) CSAN_BUS_READ_PTR_FUNC(region, 2, uint16_t) CSAN_BUS_READ_PTR_FUNC(region_stream, 2, uint16_t) CSAN_BUS_READ_FUNC(, 4, uint32_t) CSAN_BUS_READ_FUNC(_stream, 4, uint32_t) CSAN_BUS_READ_PTR_FUNC(multi, 4, uint32_t) CSAN_BUS_READ_PTR_FUNC(multi_stream, 4, uint32_t) CSAN_BUS_READ_PTR_FUNC(region, 4, uint32_t) CSAN_BUS_READ_PTR_FUNC(region_stream, 4, uint32_t) CSAN_BUS_READ_FUNC(, 8, uint64_t) #if defined(__aarch64__) CSAN_BUS_READ_FUNC(_stream, 8, uint64_t) CSAN_BUS_READ_PTR_FUNC(multi, 8, uint64_t) CSAN_BUS_READ_PTR_FUNC(multi_stream, 8, uint64_t) CSAN_BUS_READ_PTR_FUNC(region, 8, uint64_t) CSAN_BUS_READ_PTR_FUNC(region_stream, 8, uint64_t) #endif #define CSAN_BUS_WRITE_FUNC(func, width, type) \ void kcsan_bus_space_write##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value) \ { \ bus_space_write##func##_##width(tag, hnd, offset, value); \ } \ #define CSAN_BUS_WRITE_PTR_FUNC(func, width, type) \ void kcsan_bus_space_write_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t size, const type *buf, \ bus_size_t count) \ { \ kcsan_access((uintptr_t)buf, sizeof(type) * count, \ true, false, __RET_ADDR); \ bus_space_write_##func##_##width(tag, hnd, size, buf, \ count); \ } CSAN_BUS_WRITE_FUNC(, 1, uint8_t) CSAN_BUS_WRITE_FUNC(_stream, 1, uint8_t) CSAN_BUS_WRITE_PTR_FUNC(multi, 1, uint8_t) CSAN_BUS_WRITE_PTR_FUNC(multi_stream, 1, uint8_t) CSAN_BUS_WRITE_PTR_FUNC(region, 1, uint8_t) CSAN_BUS_WRITE_PTR_FUNC(region_stream, 1, uint8_t) CSAN_BUS_WRITE_FUNC(, 2, uint16_t) CSAN_BUS_WRITE_FUNC(_stream, 2, uint16_t) CSAN_BUS_WRITE_PTR_FUNC(multi, 2, uint16_t) CSAN_BUS_WRITE_PTR_FUNC(multi_stream, 2, uint16_t) CSAN_BUS_WRITE_PTR_FUNC(region, 2, uint16_t) CSAN_BUS_WRITE_PTR_FUNC(region_stream, 2, uint16_t) CSAN_BUS_WRITE_FUNC(, 4, uint32_t) CSAN_BUS_WRITE_FUNC(_stream, 4, uint32_t) CSAN_BUS_WRITE_PTR_FUNC(multi, 4, uint32_t) CSAN_BUS_WRITE_PTR_FUNC(multi_stream, 4, uint32_t) CSAN_BUS_WRITE_PTR_FUNC(region, 4, uint32_t) CSAN_BUS_WRITE_PTR_FUNC(region_stream, 4, uint32_t) CSAN_BUS_WRITE_FUNC(, 8, uint64_t) #if defined(__aarch64__) CSAN_BUS_WRITE_FUNC(_stream, 8, uint64_t) CSAN_BUS_WRITE_PTR_FUNC(multi, 8, uint64_t) CSAN_BUS_WRITE_PTR_FUNC(multi_stream, 8, uint64_t) CSAN_BUS_WRITE_PTR_FUNC(region, 8, uint64_t) CSAN_BUS_WRITE_PTR_FUNC(region_stream, 8, uint64_t) #endif #define CSAN_BUS_SET_FUNC(func, width, type) \ void kcsan_bus_space_set_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value, \ bus_size_t count) \ { \ bus_space_set_##func##_##width(tag, hnd, offset, value, \ count); \ } CSAN_BUS_SET_FUNC(multi, 1, uint8_t) CSAN_BUS_SET_FUNC(region, 1, uint8_t) #if !defined(__aarch64__) CSAN_BUS_SET_FUNC(multi_stream, 1, uint8_t) CSAN_BUS_SET_FUNC(region_stream, 1, uint8_t) #endif CSAN_BUS_SET_FUNC(multi, 2, uint16_t) CSAN_BUS_SET_FUNC(region, 2, uint16_t) #if !defined(__aarch64__) CSAN_BUS_SET_FUNC(multi_stream, 2, uint16_t) CSAN_BUS_SET_FUNC(region_stream, 2, uint16_t) #endif CSAN_BUS_SET_FUNC(multi, 4, uint32_t) CSAN_BUS_SET_FUNC(region, 4, uint32_t) #if !defined(__aarch64__) CSAN_BUS_SET_FUNC(multi_stream, 4, uint32_t) CSAN_BUS_SET_FUNC(region_stream, 4, uint32_t) #endif #if !defined(__amd64__) CSAN_BUS_SET_FUNC(multi, 8, uint64_t) CSAN_BUS_SET_FUNC(region, 8, uint64_t) #if !defined(__aarch64__) CSAN_BUS_SET_FUNC(multi_stream, 8, uint64_t) CSAN_BUS_SET_FUNC(region_stream, 8, uint64_t) #endif #endif Index: head/sys/mips/mips/support.S =================================================================== --- head/sys/mips/mips/support.S (revision 360943) +++ head/sys/mips/mips/support.S (revision 360944) @@ -1,859 +1,846 @@ /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Digital Equipment Corporation and Ralph Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Copyright (C) 1989 Digital Equipment Corporation. * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby granted, * provided that the above copyright notice appears in all copies. * Digital Equipment Corporation makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s, * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL) * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s, * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL) * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s, * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL) * * from: @(#)locore.s 8.5 (Berkeley) 1/4/94 * JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish * $FreeBSD$ */ /* * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Jonathan R. Stone for * the NetBSD Project. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Contains assembly language support routines. */ #include "opt_ddb.h" #include #include #include #include #include #include #include "assym.inc" .set noreorder # Noreorder is default style! /* * Primitives */ .text /* - * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied) - * Copy a NIL-terminated string, at most maxlen characters long. Return the - * number of characters copied (including the NIL) in *lencopied. If the - * string is too long, return ENAMETOOLONG; else return 0. + * Copy a null terminated string from the user address space into + * the kernel address space. + * + * copyinstr(fromaddr, toaddr, maxlength, &lencopied) + * caddr_t fromaddr; + * caddr_t toaddr; + * u_int maxlength; + * u_int *lencopied; */ -LEAF(copystr) +LEAF(copyinstr) + PTR_LA v0, __copyinstr_err + blt a0, zero, __copyinstr_err # make sure address is in user space + GET_CPU_PCPU(v1) + PTR_L v1, PC_CURPCB(v1) + PTR_S v0, U_PCB_ONFAULT(v1) + move t0, a2 beq a2, zero, 4f 1: lbu v0, 0(a0) PTR_SUBU a2, a2, 1 beq v0, zero, 2f sb v0, 0(a1) # each byte until NIL PTR_ADDU a0, a0, 1 bne a2, zero, 1b # less than maxlen PTR_ADDU a1, a1, 1 4: li v0, ENAMETOOLONG # run out of space 2: beq a3, zero, 3f # return num. of copied bytes PTR_SUBU a2, t0, a2 # if the 4th arg was non-NULL PTR_S a2, 0(a3) 3: - j ra # v0 is 0 or ENAMETOOLONG + + PTR_S zero, U_PCB_ONFAULT(v1) + j ra nop -END(copystr) - -/* - * Copy a null terminated string from the user address space into - * the kernel address space. - * - * copyinstr(fromaddr, toaddr, maxlength, &lencopied) - * caddr_t fromaddr; - * caddr_t toaddr; - * u_int maxlength; - * u_int *lencopied; - */ -NESTED(copyinstr, CALLFRAME_SIZ, ra) - PTR_SUBU sp, sp, CALLFRAME_SIZ - .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ) - PTR_LA v0, copyerr - blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space - REG_S ra, CALLFRAME_RA(sp) - GET_CPU_PCPU(v1) - PTR_L v1, PC_CURPCB(v1) - jal _C_LABEL(copystr) - PTR_S v0, U_PCB_ONFAULT(v1) - REG_L ra, CALLFRAME_RA(sp) - GET_CPU_PCPU(v1) - PTR_L v1, PC_CURPCB(v1) - PTR_S zero, U_PCB_ONFAULT(v1) - j ra - PTR_ADDU sp, sp, CALLFRAME_SIZ +__copyinstr_err: + j ra + li v0, EFAULT END(copyinstr) /* * Copy specified amount of data from user space into the kernel * copyin(from, to, len) * caddr_t *from; (user source address) * caddr_t *to; (kernel destination address) * unsigned len; */ NESTED(copyin, CALLFRAME_SIZ, ra) PTR_SUBU sp, sp, CALLFRAME_SIZ .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ) PTR_LA v0, copyerr blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space REG_S ra, CALLFRAME_RA(sp) GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) jal _C_LABEL(bcopy) PTR_S v0, U_PCB_ONFAULT(v1) REG_L ra, CALLFRAME_RA(sp) GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload PTR_S zero, U_PCB_ONFAULT(v1) PTR_ADDU sp, sp, CALLFRAME_SIZ j ra move v0, zero END(copyin) /* * Copy specified amount of data from kernel to the user space * copyout(from, to, len) * caddr_t *from; (kernel source address) * caddr_t *to; (user destination address) * unsigned len; */ NESTED(copyout, CALLFRAME_SIZ, ra) PTR_SUBU sp, sp, CALLFRAME_SIZ .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ) PTR_LA v0, copyerr blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space REG_S ra, CALLFRAME_RA(sp) GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) jal _C_LABEL(bcopy) PTR_S v0, U_PCB_ONFAULT(v1) REG_L ra, CALLFRAME_RA(sp) GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload PTR_S zero, U_PCB_ONFAULT(v1) PTR_ADDU sp, sp, CALLFRAME_SIZ j ra move v0, zero END(copyout) LEAF(copyerr) REG_L ra, CALLFRAME_RA(sp) PTR_ADDU sp, sp, CALLFRAME_SIZ j ra li v0, EFAULT # return error END(copyerr) /* * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to * user-space. */ #ifdef __mips_n64 LEAF(fueword64) XLEAF(fueword) PTR_LA v0, fswberr blt a0, zero, fswberr # make sure address is in user space nop GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) PTR_S v0, U_PCB_ONFAULT(v1) ld v0, 0(a0) # fetch word PTR_S zero, U_PCB_ONFAULT(v1) sd v0, 0(a1) # store word j ra li v0, 0 END(fueword64) #endif LEAF(fueword32) #ifndef __mips_n64 XLEAF(fueword) #endif PTR_LA v0, fswberr blt a0, zero, fswberr # make sure address is in user space nop GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) PTR_S v0, U_PCB_ONFAULT(v1) lw v0, 0(a0) # fetch word PTR_S zero, U_PCB_ONFAULT(v1) sw v0, 0(a1) # store word j ra li v0, 0 END(fueword32) LEAF(fuesword) PTR_LA v0, fswberr blt a0, zero, fswberr # make sure address is in user space nop GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) PTR_S v0, U_PCB_ONFAULT(v1) lhu v0, 0(a0) # fetch short PTR_S zero, U_PCB_ONFAULT(v1) sh v0, 0(a1) # store short j ra li v0, 0 END(fuesword) LEAF(fubyte) PTR_LA v0, fswberr blt a0, zero, fswberr # make sure address is in user space nop GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) PTR_S v0, U_PCB_ONFAULT(v1) lbu v0, 0(a0) # fetch byte j ra PTR_S zero, U_PCB_ONFAULT(v1) END(fubyte) LEAF(suword32) #ifndef __mips_n64 XLEAF(suword) #endif PTR_LA v0, fswberr blt a0, zero, fswberr # make sure address is in user space nop GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) PTR_S v0, U_PCB_ONFAULT(v1) sw a1, 0(a0) # store word PTR_S zero, U_PCB_ONFAULT(v1) j ra move v0, zero END(suword32) #ifdef __mips_n64 LEAF(suword64) XLEAF(suword) PTR_LA v0, fswberr blt a0, zero, fswberr # make sure address is in user space nop GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) PTR_S v0, U_PCB_ONFAULT(v1) sd a1, 0(a0) # store word PTR_S zero, U_PCB_ONFAULT(v1) j ra move v0, zero END(suword64) #endif /* * casueword(9) * u_long casueword(u_long *p, u_long oldval, u_long *oldval_p, * u_long newval) */ /* * casueword32(9) * uint32_t casueword(uint32_t *p, uint32_t oldval, * uint32_t newval) */ LEAF(casueword32) #ifndef __mips_n64 XLEAF(casueword) #endif PTR_LA v0, fswberr blt a0, zero, fswberr # make sure address is in user space nop GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) PTR_S v0, U_PCB_ONFAULT(v1) li v0, 1 move t0, a3 ll t1, 0(a0) bne a1, t1, 1f nop sc t0, 0(a0) # store word xori v0, t0, 1 1: PTR_S zero, U_PCB_ONFAULT(v1) jr ra sw t1, 0(a2) # unconditionally store old word END(casueword32) #ifdef __mips_n64 LEAF(casueword64) XLEAF(casueword) PTR_LA v0, fswberr blt a0, zero, fswberr # make sure address is in user space nop GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) PTR_S v0, U_PCB_ONFAULT(v1) li v0, 1 move t0, a3 lld t1, 0(a0) bne a1, t1, 1f nop scd t0, 0(a0) # store double word xori v0, t0, 1 1: PTR_S zero, U_PCB_ONFAULT(v1) jr ra sd t1, 0(a2) # unconditionally store old word END(casueword64) #endif /* * Will have to flush the instruction cache if byte merging is done in hardware. */ LEAF(susword) PTR_LA v0, fswberr blt a0, zero, fswberr # make sure address is in user space nop GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) PTR_S v0, U_PCB_ONFAULT(v1) sh a1, 0(a0) # store short PTR_S zero, U_PCB_ONFAULT(v1) j ra move v0, zero END(susword) LEAF(subyte) PTR_LA v0, fswberr blt a0, zero, fswberr # make sure address is in user space nop GET_CPU_PCPU(v1) PTR_L v1, PC_CURPCB(v1) PTR_S v0, U_PCB_ONFAULT(v1) sb a1, 0(a0) # store byte PTR_S zero, U_PCB_ONFAULT(v1) j ra move v0, zero END(subyte) LEAF(fswberr) j ra li v0, -1 END(fswberr) /* * memset(void *s1, int c, int len) * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp */ LEAF(memset) .set noreorder blt a2, 12, memsetsmallclr # small amount to clear? move v0, a0 # save s1 for result sll t1, a1, 8 # compute c << 8 in t1 or t1, t1, a1 # compute c << 8 | c in 11 sll t2, t1, 16 # shift that left 16 or t1, t2, t1 # or together PTR_SUBU t0, zero, a0 # compute # bytes to word align address and t0, t0, 3 beq t0, zero, 1f # skip if word aligned PTR_SUBU a2, a2, t0 # subtract from remaining count SWHI t1, 0(a0) # store 1, 2, or 3 bytes to align PTR_ADDU a0, a0, t0 1: and v1, a2, 3 # compute number of whole words left PTR_SUBU t0, a2, v1 PTR_SUBU a2, a2, t0 PTR_ADDU t0, t0, a0 # compute ending address 2: PTR_ADDU a0, a0, 4 # clear words bne a0, t0, 2b # unrolling loop does not help sw t1, -4(a0) # since we are limited by memory speed memsetsmallclr: ble a2, zero, 2f PTR_ADDU t0, a2, a0 # compute ending address 1: PTR_ADDU a0, a0, 1 # clear bytes bne a0, t0, 1b sb a1, -1(a0) 2: j ra nop .set reorder END(memset) /* * bzero(s1, n) */ LEAF(bzero) XLEAF(blkclr) .set noreorder blt a1, 12, smallclr # small amount to clear? PTR_SUBU a3, zero, a0 # compute # bytes to word align address and a3, a3, 3 beq a3, zero, 1f # skip if word aligned PTR_SUBU a1, a1, a3 # subtract from remaining count SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align PTR_ADDU a0, a0, a3 1: and v0, a1, 3 # compute number of words left PTR_SUBU a3, a1, v0 move a1, v0 PTR_ADDU a3, a3, a0 # compute ending address 2: PTR_ADDU a0, a0, 4 # clear words bne a0, a3, 2b # unrolling loop does not help sw zero, -4(a0) # since we are limited by memory speed smallclr: ble a1, zero, 2f PTR_ADDU a3, a1, a0 # compute ending address 1: PTR_ADDU a0, a0, 1 # clear bytes bne a0, a3, 1b sb zero, -1(a0) 2: j ra nop END(bzero) /* * bcmp(s1, s2, n) */ LEAF(bcmp) .set noreorder blt a2, 16, smallcmp # is it worth any trouble? xor v0, a0, a1 # compare low two bits of addresses and v0, v0, 3 PTR_SUBU a3, zero, a1 # compute # bytes to word align address bne v0, zero, unalignedcmp # not possible to align addresses and a3, a3, 3 beq a3, zero, 1f PTR_SUBU a2, a2, a3 # subtract from remaining count move v0, v1 # init v0,v1 so unmodified bytes match LWHI v0, 0(a0) # read 1, 2, or 3 bytes LWHI v1, 0(a1) PTR_ADDU a1, a1, a3 bne v0, v1, nomatch PTR_ADDU a0, a0, a3 1: and a3, a2, ~3 # compute number of whole words left PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3 PTR_ADDU a3, a3, a0 # compute ending address 2: lw v0, 0(a0) # compare words lw v1, 0(a1) PTR_ADDU a0, a0, 4 bne v0, v1, nomatch PTR_ADDU a1, a1, 4 bne a0, a3, 2b nop b smallcmp # finish remainder nop unalignedcmp: beq a3, zero, 2f PTR_SUBU a2, a2, a3 # subtract from remaining count PTR_ADDU a3, a3, a0 # compute ending address 1: lbu v0, 0(a0) # compare bytes until a1 word aligned lbu v1, 0(a1) PTR_ADDU a0, a0, 1 bne v0, v1, nomatch PTR_ADDU a1, a1, 1 bne a0, a3, 1b nop 2: and a3, a2, ~3 # compute number of whole words left PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3 PTR_ADDU a3, a3, a0 # compute ending address 3: LWHI v0, 0(a0) # compare words a0 unaligned, a1 aligned LWLO v0, 3(a0) lw v1, 0(a1) PTR_ADDU a0, a0, 4 bne v0, v1, nomatch PTR_ADDU a1, a1, 4 bne a0, a3, 3b nop smallcmp: ble a2, zero, match PTR_ADDU a3, a2, a0 # compute ending address 1: lbu v0, 0(a0) lbu v1, 0(a1) PTR_ADDU a0, a0, 1 bne v0, v1, nomatch PTR_ADDU a1, a1, 1 bne a0, a3, 1b nop match: j ra move v0, zero nomatch: j ra li v0, 1 END(bcmp) /* * bit = ffs(value) */ LEAF(ffs) .set noreorder beq a0, zero, 2f move v0, zero 1: and v1, a0, 1 # bit set? addu v0, v0, 1 beq v1, zero, 1b # no, continue srl a0, a0, 1 2: j ra nop END(ffs) /** * void * atomic_set_16(u_int16_t *a, u_int16_t b) * { * *a |= b; * } */ LEAF(atomic_set_16) .set noreorder srl a0, a0, 2 # round down address to be 32-bit aligned sll a0, a0, 2 andi a1, a1, 0xffff 1: ll t0, 0(a0) or t0, t0, a1 sc t0, 0(a0) beq t0, zero, 1b nop j ra nop END(atomic_set_16) /** * void * atomic_clear_16(u_int16_t *a, u_int16_t b) * { * *a &= ~b; * } */ LEAF(atomic_clear_16) .set noreorder srl a0, a0, 2 # round down address to be 32-bit aligned sll a0, a0, 2 nor a1, zero, a1 1: ll t0, 0(a0) move t1, t0 andi t1, t1, 0xffff # t1 has the original lower 16 bits and t1, t1, a1 # t1 has the new lower 16 bits srl t0, t0, 16 # preserve original top 16 bits sll t0, t0, 16 or t0, t0, t1 sc t0, 0(a0) beq t0, zero, 1b nop j ra nop END(atomic_clear_16) /** * void * atomic_subtract_16(uint16_t *a, uint16_t b) * { * *a -= b; * } */ LEAF(atomic_subtract_16) .set noreorder srl a0, a0, 2 # round down address to be 32-bit aligned sll a0, a0, 2 1: ll t0, 0(a0) move t1, t0 andi t1, t1, 0xffff # t1 has the original lower 16 bits subu t1, t1, a1 andi t1, t1, 0xffff # t1 has the new lower 16 bits srl t0, t0, 16 # preserve original top 16 bits sll t0, t0, 16 or t0, t0, t1 sc t0, 0(a0) beq t0, zero, 1b nop j ra nop END(atomic_subtract_16) /** * void * atomic_add_16(uint16_t *a, uint16_t b) * { * *a += b; * } */ LEAF(atomic_add_16) .set noreorder srl a0, a0, 2 # round down address to be 32-bit aligned sll a0, a0, 2 1: ll t0, 0(a0) move t1, t0 andi t1, t1, 0xffff # t1 has the original lower 16 bits addu t1, t1, a1 andi t1, t1, 0xffff # t1 has the new lower 16 bits srl t0, t0, 16 # preserve original top 16 bits sll t0, t0, 16 or t0, t0, t1 sc t0, 0(a0) beq t0, zero, 1b nop j ra nop END(atomic_add_16) /** * void * atomic_add_8(uint8_t *a, uint8_t b) * { * *a += b; * } */ LEAF(atomic_add_8) .set noreorder srl a0, a0, 2 # round down address to be 32-bit aligned sll a0, a0, 2 1: ll t0, 0(a0) move t1, t0 andi t1, t1, 0xff # t1 has the original lower 8 bits addu t1, t1, a1 andi t1, t1, 0xff # t1 has the new lower 8 bits srl t0, t0, 8 # preserve original top 24 bits sll t0, t0, 8 or t0, t0, t1 sc t0, 0(a0) beq t0, zero, 1b nop j ra nop END(atomic_add_8) /** * void * atomic_subtract_8(uint8_t *a, uint8_t b) * { * *a += b; * } */ LEAF(atomic_subtract_8) .set noreorder srl a0, a0, 2 # round down address to be 32-bit aligned sll a0, a0, 2 1: ll t0, 0(a0) move t1, t0 andi t1, t1, 0xff # t1 has the original lower 8 bits subu t1, t1, a1 andi t1, t1, 0xff # t1 has the new lower 8 bits srl t0, t0, 8 # preserve original top 24 bits sll t0, t0, 8 or t0, t0, t1 sc t0, 0(a0) beq t0, zero, 1b nop j ra nop END(atomic_subtract_8) .set noreorder # Noreorder is default style! #if defined(DDB) || defined(DEBUG) LEAF(kdbpeek) PTR_LA v1, ddberr and v0, a0, 3 # unaligned ? GET_CPU_PCPU(t1) PTR_L t1, PC_CURPCB(t1) bne v0, zero, 1f PTR_S v1, U_PCB_ONFAULT(t1) lw v0, (a0) jr ra PTR_S zero, U_PCB_ONFAULT(t1) 1: LWHI v0, 0(a0) LWLO v0, 3(a0) jr ra PTR_S zero, U_PCB_ONFAULT(t1) END(kdbpeek) LEAF(kdbpeekd) PTR_LA v1, ddberr and v0, a0, 3 # unaligned ? GET_CPU_PCPU(t1) PTR_L t1, PC_CURPCB(t1) bne v0, zero, 1f PTR_S v1, U_PCB_ONFAULT(t1) ld v0, (a0) jr ra PTR_S zero, U_PCB_ONFAULT(t1) 1: REG_LHI v0, 0(a0) REG_LLO v0, 7(a0) jr ra PTR_S zero, U_PCB_ONFAULT(t1) END(kdbpeekd) ddberr: jr ra nop #if defined(DDB) LEAF(kdbpoke) PTR_LA v1, ddberr and v0, a0, 3 # unaligned ? GET_CPU_PCPU(t1) PTR_L t1, PC_CURPCB(t1) bne v0, zero, 1f PTR_S v1, U_PCB_ONFAULT(t1) sw a1, (a0) jr ra PTR_S zero, U_PCB_ONFAULT(t1) 1: SWHI a1, 0(a0) SWLO a1, 3(a0) jr ra PTR_S zero, U_PCB_ONFAULT(t1) END(kdbpoke) .data .globl esym esym: .word 0 #endif /* DDB */ #endif /* DDB || DEBUG */ .text LEAF(breakpoint) break MIPS_BREAK_SOVER_VAL jr ra nop END(breakpoint) LEAF(setjmp) mfc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value! REG_S s0, (SZREG * PCB_REG_S0)(a0) REG_S s1, (SZREG * PCB_REG_S1)(a0) REG_S s2, (SZREG * PCB_REG_S2)(a0) REG_S s3, (SZREG * PCB_REG_S3)(a0) REG_S s4, (SZREG * PCB_REG_S4)(a0) REG_S s5, (SZREG * PCB_REG_S5)(a0) REG_S s6, (SZREG * PCB_REG_S6)(a0) REG_S s7, (SZREG * PCB_REG_S7)(a0) REG_S s8, (SZREG * PCB_REG_S8)(a0) REG_S sp, (SZREG * PCB_REG_SP)(a0) REG_S ra, (SZREG * PCB_REG_RA)(a0) REG_S v0, (SZREG * PCB_REG_SR)(a0) jr ra li v0, 0 # setjmp return END(setjmp) LEAF(longjmp) REG_L v0, (SZREG * PCB_REG_SR)(a0) REG_L ra, (SZREG * PCB_REG_RA)(a0) REG_L s0, (SZREG * PCB_REG_S0)(a0) REG_L s1, (SZREG * PCB_REG_S1)(a0) REG_L s2, (SZREG * PCB_REG_S2)(a0) REG_L s3, (SZREG * PCB_REG_S3)(a0) REG_L s4, (SZREG * PCB_REG_S4)(a0) REG_L s5, (SZREG * PCB_REG_S5)(a0) REG_L s6, (SZREG * PCB_REG_S6)(a0) REG_L s7, (SZREG * PCB_REG_S7)(a0) REG_L s8, (SZREG * PCB_REG_S8)(a0) REG_L sp, (SZREG * PCB_REG_SP)(a0) mtc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value! ITLBNOPFIX jr ra li v0, 1 # longjmp return END(longjmp) Index: head/sys/powerpc/powerpc/copystr.c =================================================================== --- head/sys/powerpc/powerpc/copystr.c (revision 360943) +++ head/sys/powerpc/powerpc/copystr.c (nonexistent) @@ -1,70 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-4-Clause - * - * Copyright (C) 1995 Wolfgang Solfrank. - * Copyright (C) 1995 TooLs GmbH. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by TooLs GmbH. - * 4. The name of TooLs GmbH may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * $NetBSD: copystr.c,v 1.3 2000/06/08 06:47:17 kleink Exp $ - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include - -/* - * Emulate copyinstr. - */ -int -copystr(kfaddr, kdaddr, len, done) - const void *kfaddr; - void *kdaddr; - size_t len; - size_t *done; -{ - const u_char *kfp = kfaddr; - u_char *kdp = kdaddr; - size_t l; - int rv; - - rv = ENAMETOOLONG; - for (l = 0; len-- > 0; l++) { - if (!(*kdp++ = *kfp++)) { - l++; - rv = 0; - break; - } - } - if (done != NULL) { - *done = l; - } - return rv; -} Property changes on: head/sys/powerpc/powerpc/copystr.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/riscv/riscv/copystr.c =================================================================== --- head/sys/riscv/riscv/copystr.c (revision 360943) +++ head/sys/riscv/riscv/copystr.c (nonexistent) @@ -1,59 +0,0 @@ -/*- - * Copyright (c) 2014 Andrew Turner - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include - -int -copystr(const void * __restrict kfaddr, void * __restrict kdaddr, size_t len, - size_t * __restrict lencopied) -{ - const char *src; - size_t pos; - char *dst; - int error; - - error = ENAMETOOLONG; - src = kfaddr; - dst = kdaddr; - for (pos = 0; pos < len; pos++) { - dst[pos] = src[pos]; - if (src[pos] == '\0') { - /* Increment pos to hold the number of bytes copied */ - pos++; - error = 0; - break; - } - } - - if (lencopied != NULL) - *lencopied = pos; - - return (error); -} Property changes on: head/sys/riscv/riscv/copystr.c ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: head/sys/sys/systm.h =================================================================== --- head/sys/sys/systm.h (revision 360943) +++ head/sys/sys/systm.h (revision 360944) @@ -1,615 +1,621 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1988, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)systm.h 8.7 (Berkeley) 3/29/95 * $FreeBSD$ */ #ifndef _SYS_SYSTM_H_ #define _SYS_SYSTM_H_ #include #include #include #include #include #include /* for people using printf mainly */ __NULLABILITY_PRAGMA_PUSH extern int cold; /* nonzero if we are doing a cold boot */ extern int suspend_blocked; /* block suspend due to pending shutdown */ extern int rebooting; /* kern_reboot() has been called. */ extern const char *panicstr; /* panic message */ extern bool panicked; #define KERNEL_PANICKED() __predict_false(panicked) extern char version[]; /* system version */ extern char compiler_version[]; /* compiler version */ extern char copyright[]; /* system copyright */ extern int kstack_pages; /* number of kernel stack pages */ extern u_long pagesizes[]; /* supported page sizes */ extern long physmem; /* physical memory */ extern long realmem; /* 'real' memory */ extern char *rootdevnames[2]; /* names of possible root devices */ extern int boothowto; /* reboot flags, from console subsystem */ extern int bootverbose; /* nonzero to print verbose messages */ extern int maxusers; /* system tune hint */ extern int ngroups_max; /* max # of supplemental groups */ extern int vm_guest; /* Running as virtual machine guest? */ /* * Detected virtual machine guest types. The intention is to expand * and/or add to the VM_GUEST_VM type if specific VM functionality is * ever implemented (e.g. vendor-specific paravirtualization features). * Keep in sync with vm_guest_sysctl_names[]. */ enum VM_GUEST { VM_GUEST_NO = 0, VM_GUEST_VM, VM_GUEST_XEN, VM_GUEST_HV, VM_GUEST_VMWARE, VM_GUEST_KVM, VM_GUEST_BHYVE, VM_GUEST_VBOX, VM_GUEST_PARALLELS, VM_LAST }; /* * These functions need to be declared before the KASSERT macro is invoked in * !KASSERT_PANIC_OPTIONAL builds, so their declarations are sort of out of * place compared to other function definitions in this header. On the other * hand, this header is a bit disorganized anyway. */ void panic(const char *, ...) __dead2 __printflike(1, 2); void vpanic(const char *, __va_list) __dead2 __printflike(1, 0); #if defined(WITNESS) || defined(INVARIANT_SUPPORT) #ifdef KASSERT_PANIC_OPTIONAL void kassert_panic(const char *fmt, ...) __printflike(1, 2); #else #define kassert_panic panic #endif #endif #ifdef INVARIANTS /* The option is always available */ #define KASSERT(exp,msg) do { \ if (__predict_false(!(exp))) \ kassert_panic msg; \ } while (0) #define VNASSERT(exp, vp, msg) do { \ if (__predict_false(!(exp))) { \ vn_printf(vp, "VNASSERT failed: %s not true at %s:%d (%s)\n",\ #exp, __FILE__, __LINE__, __func__); \ kassert_panic msg; \ } \ } while (0) #define VNPASS(exp, vp) do { \ const char *_exp = #exp; \ VNASSERT(exp, vp, ("condition %s not met at %s:%d (%s)", \ _exp, __FILE__, __LINE__, __func__)); \ } while (0) #else #define KASSERT(exp,msg) do { \ } while (0) #define VNASSERT(exp, vp, msg) do { \ } while (0) #define VNPASS(exp, vp) do { \ } while (0) #endif #ifndef CTASSERT /* Allow lint to override */ #define CTASSERT(x) _Static_assert(x, "compile-time assertion failed") #endif #if defined(_KERNEL) #include /* MAXCPU */ #include /* curthread */ #include #endif /* * Assert that a pointer can be loaded from memory atomically. * * This assertion enforces stronger alignment than necessary. For example, * on some architectures, atomicity for unaligned loads will depend on * whether or not the load spans multiple cache lines. */ #define ASSERT_ATOMIC_LOAD_PTR(var, msg) \ KASSERT(sizeof(var) == sizeof(void *) && \ ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg) /* * Assert that a thread is in critical(9) section. */ #define CRITICAL_ASSERT(td) \ KASSERT((td)->td_critnest >= 1, ("Not in critical section")); /* * If we have already panic'd and this is the thread that called * panic(), then don't block on any mutexes but silently succeed. * Otherwise, the kernel will deadlock since the scheduler isn't * going to run the thread that holds any lock we need. */ #define SCHEDULER_STOPPED_TD(td) ({ \ MPASS((td) == curthread); \ __predict_false((td)->td_stopsched); \ }) #define SCHEDULER_STOPPED() SCHEDULER_STOPPED_TD(curthread) /* * Align variables. */ #define __read_mostly __section(".data.read_mostly") #define __read_frequently __section(".data.read_frequently") #define __exclusive_cache_line __aligned(CACHE_LINE_SIZE) \ __section(".data.exclusive_cache_line") /* * XXX the hints declarations are even more misplaced than most declarations * in this file, since they are needed in one file (per arch) and only used * in two files. * XXX most of these variables should be const. */ extern int osreldate; extern bool dynamic_kenv; extern struct mtx kenv_lock; extern char *kern_envp; extern char *md_envp; extern char static_env[]; extern char static_hints[]; /* by config for now */ extern char **kenvp; extern const void *zero_region; /* address space maps to a zeroed page */ extern int unmapped_buf_allowed; #ifdef __LP64__ #define IOSIZE_MAX iosize_max() #define DEVFS_IOSIZE_MAX devfs_iosize_max() #else #define IOSIZE_MAX SSIZE_MAX #define DEVFS_IOSIZE_MAX SSIZE_MAX #endif /* * General function declarations. */ struct inpcb; struct lock_object; struct malloc_type; struct mtx; struct proc; struct socket; struct thread; struct tty; struct ucred; struct uio; struct _jmp_buf; struct trapframe; struct eventtimer; int setjmp(struct _jmp_buf *) __returns_twice; void longjmp(struct _jmp_buf *, int) __dead2; int dumpstatus(vm_offset_t addr, off_t count); int nullop(void); int eopnotsupp(void); int ureadc(int, struct uio *); void hashdestroy(void *, struct malloc_type *, u_long); void *hashinit(int count, struct malloc_type *type, u_long *hashmask); void *hashinit_flags(int count, struct malloc_type *type, u_long *hashmask, int flags); #define HASH_NOWAIT 0x00000001 #define HASH_WAITOK 0x00000002 void *phashinit(int count, struct malloc_type *type, u_long *nentries); void *phashinit_flags(int count, struct malloc_type *type, u_long *nentries, int flags); void g_waitidle(void); void cpu_boot(int); void cpu_flush_dcache(void *, size_t); void cpu_rootconf(void); void critical_enter_KBI(void); void critical_exit_KBI(void); void critical_exit_preempt(void); void init_param1(void); void init_param2(long physpages); void init_static_kenv(char *, size_t); void tablefull(const char *); /* * Allocate per-thread "current" state in the linuxkpi */ extern int (*lkpi_alloc_current)(struct thread *, int); int linux_alloc_current_noop(struct thread *, int); #if defined(KLD_MODULE) || defined(KTR_CRITICAL) || !defined(_KERNEL) || defined(GENOFFSET) #define critical_enter() critical_enter_KBI() #define critical_exit() critical_exit_KBI() #else static __inline void critical_enter(void) { struct thread_lite *td; td = (struct thread_lite *)curthread; td->td_critnest++; __compiler_membar(); } static __inline void critical_exit(void) { struct thread_lite *td; td = (struct thread_lite *)curthread; KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); __compiler_membar(); td->td_critnest--; __compiler_membar(); if (__predict_false(td->td_owepreempt)) critical_exit_preempt(); } #endif #ifdef EARLY_PRINTF typedef void early_putc_t(int ch); extern early_putc_t *early_putc; #endif int kvprintf(char const *, void (*)(int, void*), void *, int, __va_list) __printflike(1, 0); void log(int, const char *, ...) __printflike(2, 3); void log_console(struct uio *); void vlog(int, const char *, __va_list) __printflike(2, 0); int asprintf(char **ret, struct malloc_type *mtp, const char *format, ...) __printflike(3, 4); int printf(const char *, ...) __printflike(1, 2); int snprintf(char *, size_t, const char *, ...) __printflike(3, 4); int sprintf(char *buf, const char *, ...) __printflike(2, 3); int uprintf(const char *, ...) __printflike(1, 2); int vprintf(const char *, __va_list) __printflike(1, 0); int vasprintf(char **ret, struct malloc_type *mtp, const char *format, __va_list ap) __printflike(3, 0); int vsnprintf(char *, size_t, const char *, __va_list) __printflike(3, 0); int vsnrprintf(char *, size_t, int, const char *, __va_list) __printflike(4, 0); int vsprintf(char *buf, const char *, __va_list) __printflike(2, 0); int sscanf(const char *, char const * _Nonnull, ...) __scanflike(2, 3); int vsscanf(const char * _Nonnull, char const * _Nonnull, __va_list) __scanflike(2, 0); long strtol(const char *, char **, int); u_long strtoul(const char *, char **, int); quad_t strtoq(const char *, char **, int); u_quad_t strtouq(const char *, char **, int); void tprintf(struct proc *p, int pri, const char *, ...) __printflike(3, 4); void vtprintf(struct proc *, int, const char *, __va_list) __printflike(3, 0); void hexdump(const void *ptr, int length, const char *hdr, int flags); #define HD_COLUMN_MASK 0xff #define HD_DELIM_MASK 0xff00 #define HD_OMIT_COUNT (1 << 16) #define HD_OMIT_HEX (1 << 17) #define HD_OMIT_CHARS (1 << 18) #define ovbcopy(f, t, l) bcopy((f), (t), (l)) void bcopy(const void * _Nonnull from, void * _Nonnull to, size_t len); void bzero(void * _Nonnull buf, size_t len); void explicit_bzero(void * _Nonnull, size_t); int bcmp(const void *b1, const void *b2, size_t len); void *memset(void * _Nonnull buf, int c, size_t len); void *memcpy(void * _Nonnull to, const void * _Nonnull from, size_t len); void *memmove(void * _Nonnull dest, const void * _Nonnull src, size_t n); int memcmp(const void *b1, const void *b2, size_t len); #ifdef KCSAN void *kcsan_memset(void *, int, size_t); void *kcsan_memcpy(void *, const void *, size_t); void *kcsan_memmove(void *, const void *, size_t); int kcsan_memcmp(const void *, const void *, size_t); #define bcopy(from, to, len) kcsan_memmove((to), (from), (len)) #define bzero(buf, len) kcsan_memset((buf), 0, (len)) #define bcmp(b1, b2, len) kcsan_memcmp((b1), (b2), (len)) #define memset(buf, c, len) kcsan_memset((buf), (c), (len)) #define memcpy(to, from, len) kcsan_memcpy((to), (from), (len)) #define memmove(dest, src, n) kcsan_memmove((dest), (src), (n)) #define memcmp(b1, b2, len) kcsan_memcmp((b1), (b2), (len)) #else #define bcopy(from, to, len) __builtin_memmove((to), (from), (len)) #define bzero(buf, len) __builtin_memset((buf), 0, (len)) #define bcmp(b1, b2, len) __builtin_memcmp((b1), (b2), (len)) #define memset(buf, c, len) __builtin_memset((buf), (c), (len)) #define memcpy(to, from, len) __builtin_memcpy((to), (from), (len)) #define memmove(dest, src, n) __builtin_memmove((dest), (src), (n)) #define memcmp(b1, b2, len) __builtin_memcmp((b1), (b2), (len)) #endif void *memset_early(void * _Nonnull buf, int c, size_t len); #define bzero_early(buf, len) memset_early((buf), 0, (len)) void *memcpy_early(void * _Nonnull to, const void * _Nonnull from, size_t len); void *memmove_early(void * _Nonnull dest, const void * _Nonnull src, size_t n); #define bcopy_early(from, to, len) memmove_early((to), (from), (len)) -int copystr(const void * _Nonnull __restrict kfaddr, - void * _Nonnull __restrict kdaddr, size_t len, - size_t * __restrict lencopied); +#define copystr(src, dst, len, outlen) ({ \ + size_t __r, __len, *__outlen; \ + \ + __len = (len); \ + __outlen = (outlen); \ + __r = strlcpy((dst), (src), __len); \ + if (__outlen != NULL) \ + *__outlen = ((__r >= __len) ? __len : __r); \ + ((__r >= __len) ? ENAMETOOLONG : 0); \ +}) + int copyinstr(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len, size_t * __restrict lencopied); int copyin(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len); int copyin_nofault(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len); int copyout(const void * _Nonnull __restrict kaddr, void * __restrict udaddr, size_t len); int copyout_nofault(const void * _Nonnull __restrict kaddr, void * __restrict udaddr, size_t len); #ifdef KCSAN -int kcsan_copystr(const void *, void *, size_t, size_t *); int kcsan_copyin(const void *, void *, size_t); int kcsan_copyinstr(const void *, void *, size_t, size_t *); int kcsan_copyout(const void *, void *, size_t); -#define copystr(kf, k, l, lc) kcsan_copystr((kf), (k), (l), (lc)) #define copyin(u, k, l) kcsan_copyin((u), (k), (l)) #define copyinstr(u, k, l, lc) kcsan_copyinstr((u), (k), (l), (lc)) #define copyout(k, u, l) kcsan_copyout((k), (u), (l)) #endif int fubyte(volatile const void *base); long fuword(volatile const void *base); int fuword16(volatile const void *base); int32_t fuword32(volatile const void *base); int64_t fuword64(volatile const void *base); int fueword(volatile const void *base, long *val); int fueword32(volatile const void *base, int32_t *val); int fueword64(volatile const void *base, int64_t *val); int subyte(volatile void *base, int byte); int suword(volatile void *base, long word); int suword16(volatile void *base, int word); int suword32(volatile void *base, int32_t word); int suword64(volatile void *base, int64_t word); uint32_t casuword32(volatile uint32_t *base, uint32_t oldval, uint32_t newval); u_long casuword(volatile u_long *p, u_long oldval, u_long newval); int casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp, uint32_t newval); int casueword(volatile u_long *p, u_long oldval, u_long *oldvalp, u_long newval); void realitexpire(void *); int sysbeep(int hertz, int period); void hardclock(int cnt, int usermode); void hardclock_sync(int cpu); void softclock(void *); void statclock(int cnt, int usermode); void profclock(int cnt, int usermode, uintfptr_t pc); int hardclockintr(void); void startprofclock(struct proc *); void stopprofclock(struct proc *); void cpu_startprofclock(void); void cpu_stopprofclock(void); void suspendclock(void); void resumeclock(void); sbintime_t cpu_idleclock(void); void cpu_activeclock(void); void cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt); void cpu_et_frequency(struct eventtimer *et, uint64_t newfreq); extern int cpu_disable_c2_sleep; extern int cpu_disable_c3_sleep; char *kern_getenv(const char *name); void freeenv(char *env); int getenv_int(const char *name, int *data); int getenv_uint(const char *name, unsigned int *data); int getenv_long(const char *name, long *data); int getenv_ulong(const char *name, unsigned long *data); int getenv_string(const char *name, char *data, int size); int getenv_int64(const char *name, int64_t *data); int getenv_uint64(const char *name, uint64_t *data); int getenv_quad(const char *name, quad_t *data); int kern_setenv(const char *name, const char *value); int kern_unsetenv(const char *name); int testenv(const char *name); int getenv_array(const char *name, void *data, int size, int *psize, int type_size, bool allow_signed); #define GETENV_UNSIGNED false /* negative numbers not allowed */ #define GETENV_SIGNED true /* negative numbers allowed */ typedef uint64_t (cpu_tick_f)(void); void set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var); extern cpu_tick_f *cpu_ticks; uint64_t cpu_tickrate(void); uint64_t cputick2usec(uint64_t tick); #ifdef APM_FIXUP_CALLTODO struct timeval; void adjust_timeout_calltodo(struct timeval *time_change); #endif /* APM_FIXUP_CALLTODO */ #include /* Initialize the world */ void consinit(void); void cpu_initclocks(void); void cpu_initclocks_bsp(void); void cpu_initclocks_ap(void); void usrinfoinit(void); /* Finalize the world */ void kern_reboot(int) __dead2; void shutdown_nice(int); /* Stubs for obsolete functions that used to be for interrupt management */ static __inline intrmask_t splbio(void) { return 0; } static __inline intrmask_t splcam(void) { return 0; } static __inline intrmask_t splclock(void) { return 0; } static __inline intrmask_t splhigh(void) { return 0; } static __inline intrmask_t splimp(void) { return 0; } static __inline intrmask_t splnet(void) { return 0; } static __inline intrmask_t spltty(void) { return 0; } static __inline void splx(intrmask_t ipl __unused) { return; } /* * Common `proc' functions are declared here so that proc.h can be included * less often. */ int _sleep(const void * _Nonnull chan, struct lock_object *lock, int pri, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define msleep(chan, mtx, pri, wmesg, timo) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ tick_sbt * (timo), 0, C_HARDCLOCK) #define msleep_sbt(chan, mtx, pri, wmesg, bt, pr, flags) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (bt), (pr), \ (flags)) int msleep_spin_sbt(const void * _Nonnull chan, struct mtx *mtx, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define msleep_spin(chan, mtx, wmesg, timo) \ msleep_spin_sbt((chan), (mtx), (wmesg), tick_sbt * (timo), \ 0, C_HARDCLOCK) int pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define pause(wmesg, timo) \ pause_sbt((wmesg), tick_sbt * (timo), 0, C_HARDCLOCK) #define pause_sig(wmesg, timo) \ pause_sbt((wmesg), tick_sbt * (timo), 0, C_HARDCLOCK | C_CATCH) #define tsleep(chan, pri, wmesg, timo) \ _sleep((chan), NULL, (pri), (wmesg), tick_sbt * (timo), \ 0, C_HARDCLOCK) #define tsleep_sbt(chan, pri, wmesg, bt, pr, flags) \ _sleep((chan), NULL, (pri), (wmesg), (bt), (pr), (flags)) void wakeup(const void *chan); void wakeup_one(const void *chan); void wakeup_any(const void *chan); /* * Common `struct cdev *' stuff are declared here to avoid #include poisoning */ struct cdev; dev_t dev2udev(struct cdev *x); const char *devtoname(struct cdev *cdev); #ifdef __LP64__ size_t devfs_iosize_max(void); size_t iosize_max(void); #endif int poll_no_poll(int events); /* XXX: Should be void nanodelay(u_int nsec); */ void DELAY(int usec); /* Root mount holdback API */ struct root_hold_token { int flags; const char *who; TAILQ_ENTRY(root_hold_token) list; }; struct root_hold_token *root_mount_hold(const char *identifier); void root_mount_hold_token(const char *identifier, struct root_hold_token *h); void root_mount_rel(struct root_hold_token *h); int root_mounted(void); /* * Unit number allocation API. (kern/subr_unit.c) */ struct unrhdr; struct unrhdr *new_unrhdr(int low, int high, struct mtx *mutex); void init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex); void delete_unrhdr(struct unrhdr *uh); void clear_unrhdr(struct unrhdr *uh); void clean_unrhdr(struct unrhdr *uh); void clean_unrhdrl(struct unrhdr *uh); int alloc_unr(struct unrhdr *uh); int alloc_unr_specific(struct unrhdr *uh, u_int item); int alloc_unrl(struct unrhdr *uh); void free_unr(struct unrhdr *uh, u_int item); #ifndef __LP64__ #define UNR64_LOCKED #endif struct unrhdr64 { uint64_t counter; }; static __inline void new_unrhdr64(struct unrhdr64 *unr64, uint64_t low) { unr64->counter = low; } #ifdef UNR64_LOCKED uint64_t alloc_unr64(struct unrhdr64 *); #else static __inline uint64_t alloc_unr64(struct unrhdr64 *unr64) { return (atomic_fetchadd_64(&unr64->counter, 1)); } #endif void intr_prof_stack_use(struct thread *td, struct trapframe *frame); void counted_warning(unsigned *counter, const char *msg); /* * APIs to manage deprecation and obsolescence. */ struct device; void _gone_in(int major, const char *msg); void _gone_in_dev(struct device *dev, int major, const char *msg); #ifdef NO_OBSOLETE_CODE #define __gone_ok(m, msg) \ _Static_assert(m < P_OSREL_MAJOR(__FreeBSD_version)), \ "Obsolete code: " msg); #else #define __gone_ok(m, msg) #endif #define gone_in(major, msg) __gone_ok(major, msg) _gone_in(major, msg) #define gone_in_dev(dev, major, msg) __gone_ok(major, msg) _gone_in_dev(dev, major, msg) __NULLABILITY_PRAGMA_POP #endif /* !_SYS_SYSTM_H_ */ Index: head/tools/coccinelle/copystr9.cocci =================================================================== --- head/tools/coccinelle/copystr9.cocci (nonexistent) +++ head/tools/coccinelle/copystr9.cocci (revision 360944) @@ -0,0 +1,39 @@ +@ nostorederror_nostoredlen @ + expression __src, __dst, __len; + statement S1; +@@ + + S1 +-copystr(__src, __dst, __len, NULL); ++strlcpy(__dst, __src, __len); + +@ ifcondition_nostoredlen @ + expression __src, __dst, __len; + statement S1; +@@ + if ( +( +-copystr(__src, __dst, __len, NULL) == ENAMETOOLONG +| +-copystr(__src, __dst, __len, NULL) != 0 +| +-copystr(__src, __dst, __len, NULL) +) ++strlcpy(__dst, __src, __len) >= __len + ) S1 + +@ nostorederror_storedlen1 @ + expression __src, __dst, __len; + identifier __done; + statement S1; +@@ + S1 +( +-copystr(__src, __dst, __len, &__done); ++__done = strlcpy(__dst, __src, __len); ++__done = MIN(__done, __len); +| +-copystr(__src, __dst, __len, __done); ++ *__done = strlcpy(__dst, __src, __len); ++ *__done = MIN(*__done, __len); +) Property changes on: head/tools/coccinelle/copystr9.cocci ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +1 \ No newline at end of property