diff --git a/lib/libc/sys/procctl.2 b/lib/libc/sys/procctl.2 index ce7a2be5d5e4..30933875ccbc 100644 --- a/lib/libc/sys/procctl.2 +++ b/lib/libc/sys/procctl.2 @@ -1,781 +1,843 @@ .\" Copyright (c) 2013 Hudson River Trading LLC .\" Written by: John H. Baldwin .\" All rights reserved. .\" .\" Copyright (c) 2014 The FreeBSD Foundation .\" Portions of this documentation were written by Konstantin Belousov .\" under sponsorship from the FreeBSD Foundation. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd July 1, 2021 +.Dd September 2, 2021 .Dt PROCCTL 2 .Os .Sh NAME .Nm procctl .Nd control processes .Sh LIBRARY .Lb libc .Sh SYNOPSIS .In sys/procctl.h .Ft int .Fn procctl "idtype_t idtype" "id_t id" "int cmd" "void *data" .Sh DESCRIPTION The .Fn procctl system call provides for control over processes. The .Fa idtype and .Fa id arguments specify the set of processes to control. If multiple processes match the identifier, .Nm will make a .Dq best effort to control as many of the selected processes as possible. An error is only returned if no selected processes successfully complete the request. The following identifier types are supported: .Bl -tag -width P_PGID .It Dv P_PID Control the process with the process ID .Fa id . .It Dv P_PGID Control processes belonging to the process group with the ID .Fa id . .El .Pp The control request to perform is specified by the .Fa cmd argument. The following commands are supported: .Bl -tag -width PROC_TRAPCAP_STATUS .It Dv PROC_ASLR_CTL Controls the Address Space Layout Randomization (ASLR) in the program images created by .Xr execve 2 in the specified process or its descendants that did not changed the control nor modified it by other means. The .Fa data parameter must point to the integer variable holding one of the following values: .Bl -tag -width PROC_ASLR_FORCE_DISABLE .It Dv PROC_ASLR_FORCE_ENABLE Request that ASLR is enabled after execution, even if it is disabled system-wide. The image flag and set-uid might prevent ASLR enablement still. .It Dv PROC_ASLR_FORCE_DISABLE Request that ASLR is disabled after execution. Same notes as for .Dv PROC_ASLR_FORCE_ENABLE apply. .It Dv PROC_ASLR_NOFORCE Use the system-wide configured policy for ASLR. .El .It Dv PROC_ASLR_STATUS Returns the current status of ASLR enablement for the target process. The .Fa data parameter must point to the integer variable, where one of the following values is written: .Bl -tag -width PROC_ASLR_FORCE_DISABLE .It Dv PROC_ASLR_FORCE_ENABLE .It Dv PROC_ASLR_FORCE_DISABLE .It Dv PROC_ASLR_NOFORCE .El .Pp If the currently executed image in the process itself has ASLR enabled, the .Dv PROC_ASLR_ACTIVE flag is or-ed with the value listed above. .It Dv PROC_PROTMAX_CTL Controls implicit application of PROT_MAX protection equal to the .Fa prot argument of the .Xr mmap 2 syscall, in the target process. The .Fa data parameter must point to the integer variable holding one of the following values: .Bl -tag -width PROC_PROTMAX_FORCE_DISABLE .It Dv PROC_PROTMAX_FORCE_ENABLE Enables implicit PROT_MAX application, even if it is disabled system-wide by the sysctl .Va vm.imply_prot_max . The image flag might still prevent the enablement. .It Dv PROC_PROTMAX_FORCE_DISABLE Request that implicit application of PROT_MAX be disabled. Same notes as for .Dv PROC_PROTMAX_FORCE_ENABLE apply. .It Dv PROC_PROTMAX_NOFORCE Use the system-wide configured policy for PROT_MAX. .El .It Dv PROC_PROTMAX_STATUS Returns the current status of implicit PROT_MAX enablement for the target process. The .Fa data parameter must point to the integer variable, where one of the following values is written: .Bl -tag -width PROC_PROTMAX_FORCE_DISABLE .It Dv PROC_PROTMAX_FORCE_ENABLE .It Dv PROC_PROTMAX_FORCE_DISABLE .It Dv PROC_PROTMAX_NOFORCE .El .Pp If the currently executed image in the process itself has implicit PROT_MAX application enabled, the .Dv PROC_PROTMAX_ACTIVE flag is or-ed with the value listed above. .It Dv PROC_SPROTECT Set process protection state. This is used to mark a process as protected from being killed if the system exhausts the available memory and swap. The .Fa data parameter must point to an integer containing an operation and zero or more optional flags. The following operations are supported: .Bl -tag -width PPROT_CLEAR .It Dv PPROT_SET Mark the selected processes as protected. .It Dv PPROT_CLEAR Clear the protected state of selected processes. .El .Pp The following optional flags are supported: .Bl -tag -width PPROT_DESCEND .It Dv PPROT_DESCEND Apply the requested operation to all child processes of each selected process in addition to each selected process. .It Dv PPROT_INHERIT When used with .Dv PPROT_SET , mark all future child processes of each selected process as protected. Future child processes will also mark all of their future child processes. .El .It Dv PROC_REAP_ACQUIRE Acquires the reaper status for the current process. Reaper status means that children orphaned by the reaper's descendants that were forked after the acquisition of reaper status are reparented to the reaper process. After system initialization, .Xr init 8 is the default reaper. .It Dv PROC_REAP_RELEASE Release the reaper state for the current process. The reaper of the current process becomes the new reaper of the current process's descendants. .It Dv PROC_REAP_STATUS Provides information about the reaper of the specified process, or the process itself when it is a reaper. The .Fa data argument must point to a .Vt procctl_reaper_status structure which is filled in by the syscall on successful return. .Bd -literal struct procctl_reaper_status { u_int rs_flags; u_int rs_children; u_int rs_descendants; pid_t rs_reaper; pid_t rs_pid; }; .Ed The .Fa rs_flags may have the following flags returned: .Bl -tag -width REAPER_STATUS_REALINIT .It Dv REAPER_STATUS_OWNED The specified process has acquired reaper status and has not released it. When the flag is returned, the specified process .Fa id , pid, identifies the reaper, otherwise the .Fa rs_reaper field of the structure is set to the pid of the reaper for the specified process id. .It Dv REAPER_STATUS_REALINIT The specified process is the root of the reaper tree, i.e., .Xr init 8 . .El .Pp The .Fa rs_children field returns the number of children of the reaper among the descendants. It is possible to have a child whose reaper is not the specified process, since the reaper for any existing children is not reset on the .Dv PROC_REAP_ACQUIRE operation. The .Fa rs_descendants field returns the total number of descendants of the reaper(s), not counting descendants of the reaper in the subtree. The .Fa rs_reaper field returns the reaper pid. The .Fa rs_pid returns the pid of one reaper child if there are any descendants. .It Dv PROC_REAP_GETPIDS Queries the list of descendants of the reaper of the specified process. The request takes a pointer to a .Vt procctl_reaper_pids structure in the .Fa data parameter. .Bd -literal struct procctl_reaper_pids { u_int rp_count; struct procctl_reaper_pidinfo *rp_pids; }; .Ed When called, the .Fa rp_pids field must point to an array of .Vt procctl_reaper_pidinfo structures, to be filled in on return, and the .Fa rp_count field must specify the size of the array, into which no more than .Fa rp_count elements will be filled in by the kernel. .Pp The .Vt "struct procctl_reaper_pidinfo" structure provides some information about one of the reaper's descendants. Note that for a descendant that is not a child, it may be incorrectly identified because of a race in which the original child process exited and the exited process's pid was reused for an unrelated process. .Bd -literal struct procctl_reaper_pidinfo { pid_t pi_pid; pid_t pi_subtree; u_int pi_flags; }; .Ed The .Fa pi_pid field is the process id of the descendant. The .Fa pi_subtree field provides the pid of the child of the reaper, which is the (grand-)parent of the process. The .Fa pi_flags field returns the following flags, further describing the descendant: .Bl -tag -width REAPER_PIDINFO_REAPER .It Dv REAPER_PIDINFO_VALID Set to indicate that the .Vt procctl_reaper_pidinfo structure was filled in by the kernel. Zero-filling the .Fa rp_pids array and testing the .Dv REAPER_PIDINFO_VALID flag allows the caller to detect the end of the returned array. .It Dv REAPER_PIDINFO_CHILD The .Fa pi_pid field identifies the direct child of the reaper. .It Dv REAPER_PIDINFO_REAPER The reported process is itself a reaper. The descendants of the subordinate reaper are not reported. .El .It Dv PROC_REAP_KILL Request to deliver a signal to some subset of the descendants of the reaper. The .Fa data parameter must point to a .Vt procctl_reaper_kill structure, which is used both for parameters and status return. .Bd -literal struct procctl_reaper_kill { int rk_sig; u_int rk_flags; pid_t rk_subtree; u_int rk_killed; pid_t rk_fpid; }; .Ed The .Fa rk_sig field specifies the signal to be delivered. Zero is not a valid signal number, unlike for .Xr kill 2 . The .Fa rk_flags field further directs the operation. It is or-ed from the following flags: .Bl -tag -width REAPER_KILL_CHILDREN .It Dv REAPER_KILL_CHILDREN Deliver the specified signal only to direct children of the reaper. .It Dv REAPER_KILL_SUBTREE Deliver the specified signal only to descendants that were forked by the direct child with pid specified in the .Fa rk_subtree field. .El If neither the .Dv REAPER_KILL_CHILDREN nor the .Dv REAPER_KILL_SUBTREE flags are specified, all current descendants of the reaper are signalled. .Pp If a signal was delivered to any process, the return value from the request is zero. In this case, the .Fa rk_killed field identifies the number of processes signalled. The .Fa rk_fpid field is set to the pid of the first process for which signal delivery failed, e.g., due to permission problems. If no such process exists, the .Fa rk_fpid field is set to -1. .It Dv PROC_TRACE_CTL Enable or disable tracing of the specified process(es), according to the value of the integer argument. Tracing includes attachment to the process using the .Xr ptrace 2 and .Xr ktrace 2 , debugging sysctls, .Xr hwpmc 4 , .Xr dtrace 1 , and core dumping. Possible values for the .Fa data argument are: .Bl -tag -width PROC_TRACE_CTL_DISABLE_EXEC .It Dv PROC_TRACE_CTL_ENABLE Enable tracing, after it was disabled by .Dv PROC_TRACE_CTL_DISABLE . Only allowed for self. .It Dv PROC_TRACE_CTL_DISABLE Disable tracing for the specified process. Tracing is re-enabled when the process changes the executing program with the .Xr execve 2 syscall. A child inherits the trace settings from the parent on .Xr fork 2 . .It Dv PROC_TRACE_CTL_DISABLE_EXEC Same as .Dv PROC_TRACE_CTL_DISABLE , but the setting persists for the process even after .Xr execve 2 . .El .It Dv PROC_TRACE_STATUS Returns the current tracing status for the specified process in the integer variable pointed to by .Fa data . If tracing is disabled, .Fa data is set to -1. If tracing is enabled, but no debugger is attached by the .Xr ptrace 2 syscall, .Fa data is set to 0. If a debugger is attached, .Fa data is set to the pid of the debugger process. .It Dv PROC_TRAPCAP_CTL Controls the capability mode sandbox actions for the specified sandboxed processes, on a return from any syscall which gives either a .Er ENOTCAPABLE or .Er ECAPMODE error. If the control is enabled, such errors from the syscalls cause delivery of the synchronous .Dv SIGTRAP signal to the thread immediately before returning from the syscalls. .Pp Possible values for the .Fa data argument are: .Bl -tag -width PROC_TRAPCAP_CTL_DISABLE .It Dv PROC_TRAPCAP_CTL_ENABLE Enable the .Dv SIGTRAP signal delivery on capability mode access violations. The enabled mode is inherited by the children of the process, and is kept after .Xr fexecve 2 calls. .It Dv PROC_TRAPCAP_CTL_DISABLE Disable the signal delivery on capability mode access violations. Note that the global sysctl .Dv kern.trap_enotcap might still cause the signal to be delivered. See .Xr capsicum 4 . .El .Pp On signal delivery, the .Va si_errno member of the .Fa siginfo signal handler parameter is set to the syscall error value, and the .Va si_code member is set to .Dv TRAP_CAP . The system call number is stored in the .Va si_syscall field of the .Fa siginfo signal handler parameter. The other system call parameters can be read from the .Fa ucontext_t but the system call number is typically stored in the register that also contains the return value and so is unavailable in the signal handler. .Pp See .Xr capsicum 4 for more information about the capability mode. .It Dv PROC_TRAPCAP_STATUS Return the current status of signalling capability mode access violations for the specified process. The integer value pointed to by the .Fa data argument is set to the .Dv PROC_TRAPCAP_CTL_ENABLE value if the process control enables signal delivery, and to .Dv PROC_TRAPCAP_CTL_DISABLE otherwise. .Pp See the note about sysctl .Dv kern.trap_enotcap above, which gives independent global control of signal delivery. .It Dv PROC_PDEATHSIG_CTL Request the delivery of a signal when the parent of the calling process exits. .Fa idtype must be .Dv P_PID and .Fa id must be the either caller's pid or zero, with no difference in effect. The value is cleared for child processes and when executing set-user-ID or set-group-ID binaries. .Fa data must point to a value of type .Vt int indicating the signal that should be delivered to the caller. Use zero to cancel a previously requested signal delivery. .It Dv PROC_PDEATHSIG_STATUS Query the current signal number that will be delivered when the parent of the calling process exits. .Fa idtype must be .Dv P_PID and .Fa id must be the either caller's pid or zero, with no difference in effect. .Fa data must point to a memory location that can hold a value of type .Vt int . If signal delivery has not been requested, it will contain zero on return. .It Dv PROC_STACKGAP_CTL Controls the stack gaps in the specified process. A stack gap is the part of the growth area for a .Dv MAP_STACK mapped region that is reserved and never filled by memory. Instead, the process is guaranteed to receive a .Dv SIGSEGV signal on accessing pages in the gap. Gaps protect against stack overflow corrupting memory adjacent to the stack. .Pp The .Fa data argument must point to an integer variable containing flags. The following flags are allowed: .Bl -tag -width PROC_STACKGAP_DISABLE_EXEC .It Dv PROC_STACKGAP_ENABLE This flag is only accepted for consistency with .Dv PROC_STACKGAP_STATUS . If stack gaps are enabled, the flag is ignored. If disabled, the flag causes an .Ev EINVAL error to be returned. After gaps are disabled in a process, they can only be re-enabled when an .Xr execve 2 is performed. .It Dv PROC_STACKGAP_DISABLE Disable stack gaps for the process. For existing stacks, the gap is no longer a reserved part of the growth area and can be filled by memory on access. .It Dv PROC_STACKGAP_ENABLE_EXEC Enable stack gaps for programs started after an .Xr execve 2 by the specified process. .It Dv PROC_STACKGAP_DISABLE_EXEC Inherit disabled stack gaps state after .Xr execve 2 . In other words, if the currently executing program has stack gaps disabled, they are kept disabled on exec. If gaps were enabled, they are kept enabled after exec. .El .Pp The stack gap state is inherited from the parent on .Xr fork 2 . .It Dv PROC_STACKGAP_STATUS Returns the current stack gap state for the specified process. .Fa data must point to an integer variable, which is used to return a bitmask consisting of the following flags: .Bl -tag -width PROC_STACKGAP_DISABLE_EXEC .It Dv PROC_STACKGAP_ENABLE Stack gaps are enabled. .It Dv PROC_STACKGAP_DISABLE Stack gaps are disabled. .It Dv PROC_STACKGAP_ENABLE_EXEC Stack gaps are enabled in the process after .Xr execve 2 . .It Dv PROC_STACKGAP_DISABLE_EXEC Stack gaps are disabled in the process after .Xr execve 2 . .El .It Dv PROC_NO_NEW_PRIVS_CTL Allows one to ignore the SUID and SGID bits on the program images activated by .Xr execve 2 in the specified process and its future descendants. The .Fa data parameter must point to the integer variable holding the following value: .Bl -tag -width PROC_NO_NEW_PRIVS_ENABLE .It Dv PROC_NO_NEW_PRIVS_ENABLE Request SUID and SGID bits to be ignored. .El .Pp It is not possible to disable it once it has been enabled. .It Dv PROC_NO_NEW_PRIVS_STATUS Returns the current status of SUID/SGID enablement for the target process. The .Fa data parameter must point to the integer variable, where one of the following values is written: .Bl -tag -width PROC_NO_NEW_PRIVS_DISABLE .It Dv PROC_NO_NEW_PRIVS_ENABLE .It Dv PROC_NO_NEW_PRIVS_DISABLE .El +.It Dv PROC_WXMAP_CTL +Controls the 'write exclusive against execution' permissions for the +mappings in the process address space. +It overrides the global settings established by the +.Dv kern.elf{32/64}.allow_wx +sysctl, +and the corresponding bit in the ELF control note, see +.Xr elfctl 1 . +.Pp +The +.Fa data +parameter must point to the integer variable holding one of the +following values: +.Bl -tag -width PROC_WX_MAPPINGS_DISALLOW_EXEC +.It Dv PROC_WX_MAPPINGS_PERMIT +Enable creation of mappings that have both write and execute +protection attributes, in the specified process' address space. +.It Dv PROC_WX_MAPPINGS_DISALLOW_EXEC +In the new address space created by +.Xr execve 2 , +disallow creation of mappings that have both write and execute +permissions. +.El +.Pp +Once creation of writeable and executable mappings is allowed, +it is impossible (and pointless) to disallow it. +The only way to ensure the absence of such mappings after they +were enabled in a given process, is to set the +.Dv PROC_WX_MAPPINGS_DISALLOW_EXEC +flag and +.Xr execve 2 +an image. +.It Dv PROC_WXMAP_STATUS +Returns the current status of the 'write exclusive against execution' +enforcement for the specified process. +The +.Dv data +parameter must point to the integer variable, where one of the +following values is written: +.Bl -tag -width PROC_WX_MAPPINGS_DISALLOW_EXEC +.It Dv PROC_WX_MAPPINGS_PERMIT +Creation of simultaneously writable and executable mapping is permitted, +otherwise the process cannot create such mappings. +.It Dv PROC_WX_MAPPINGS_DISALLOW_EXEC +After +.Xr execve 2 , +the new address space should disallow creation of simultaneously +writable and executable mappings. +.El +.Pp +Additionally, if the address space of the process disallows +creation of simultaneously writable and executable mappings and +it is guaranteed that no such mapping was created since address space +creation, the +.Dv PROC_WXORX_ENFORCE +flag is set in the returned value. .El .Sh x86 MACHINE-SPECIFIC REQUESTS .Bl -tag -width PROC_KPTI_STATUS .It Dv PROC_KPTI_CTL AMD64 only. Controls the Kernel Page Table Isolation (KPTI) option for the children of the specified process. For the command to work, the .Va vm.pmap.kpti tunable must be enabled on boot. It is not possible to change the KPTI setting for a running process, except at the .Xr execve 2 , where the address space is reinitialized. .Pp The .Fa data parameter must point to an integer variable containing one of the following commands: .Bl -tag -width PROC_KPTI_CTL_DISABLE_ON_EXEC .It Dv PROC_KPTI_CTL_ENABLE_ON_EXEC Enable KPTI after .Xr execve 2 . .It Dv PROC_KPTI_CTL_DISABLE_ON_EXEC Disable KPTI after .Xr execve 2 . Only root or a process having the .Va PRIV_IO privilege might use this option. .El .It Dv PROC_KPTI_STATUS Returns the current KPTI status for the specified process. .Fa data must point to the integer variable, which returns the following statuses: .Bl -tag -width PROC_KPTI_CTL_DISABLE_ON_EXEC .It Dv PROC_KPTI_CTL_ENABLE_ON_EXEC .It Dv PROC_KPTI_CTL_DISABLE_ON_EXEC .El .Pp The status is or-ed with the .Va PROC_KPTI_STATUS_ACTIVE in case KPTI is active for the current address space of the process. .Sh NOTES Disabling tracing on a process should not be considered a security feature, as it is bypassable both by the kernel and privileged processes, and via other system mechanisms. As such, it should not be utilized to reliably protect cryptographic keying material or other confidential data. +.Pp +Note that processes can trivially bypass the 'no simultaneously +writable and executable mappings' policy by first marking some mapping +as writeable and write code to it, then removing write and adding +execute permission. +This may be legitimately required by some programs, such as JIT compilers. .Sh RETURN VALUES If an error occurs, a value of -1 is returned and .Va errno is set to indicate the error. .Sh ERRORS The .Fn procctl system call will fail if: .Bl -tag -width Er .It Bq Er EFAULT The .Fa data parameter points outside the process's allocated address space. .It Bq Er EINVAL The .Fa cmd argument specifies an unsupported command. .Pp The .Fa idtype argument specifies an unsupported identifier type. .It Bq Er EPERM The calling process does not have permission to perform the requested operation on any of the selected processes. .It Bq Er ESRCH No processes matched the requested .Fa idtype and .Fa id . .It Bq Er EINVAL An invalid operation or flag was passed in .Fa data for a .Dv PROC_SPROTECT command. .It Bq Er EPERM The .Fa idtype argument is not equal to .Dv P_PID , or .Fa id is not equal to the pid of the calling process, for .Dv PROC_REAP_ACQUIRE or .Dv PROC_REAP_RELEASE requests. .It Bq Er EINVAL Invalid or undefined flags were passed to a .Dv PROC_REAP_KILL request. .It Bq Er EINVAL An invalid or zero signal number was requested for a .Dv PROC_REAP_KILL request. .It Bq Er EINVAL The .Dv PROC_REAP_RELEASE request was issued by the .Xr init 8 process. .It Bq Er EBUSY The .Dv PROC_REAP_ACQUIRE request was issued by a process that had already acquired reaper status and has not yet released it. .It Bq Er EBUSY The .Dv PROC_TRACE_CTL request was issued for a process already being traced. .It Bq Er EPERM The .Dv PROC_TRACE_CTL request to re-enable tracing of the process .Po Dv PROC_TRACE_CTL_ENABLE Pc , or to disable persistence of .Dv PROC_TRACE_CTL_DISABLE on .Xr execve 2 was issued for a non-current process. .It Bq Er EINVAL The value of the integer .Fa data parameter for the .Dv PROC_TRACE_CTL or .Dv PROC_TRAPCAP_CTL request is invalid. .It Bq Er EINVAL The .Dv PROC_PDEATHSIG_CTL or .Dv PROC_PDEATHSIG_STATUS request referenced an unsupported .Fa id , .Fa idtype or invalid signal number. .El .Sh SEE ALSO .Xr dtrace 1 , .Xr proccontrol 1 , .Xr protect 1 , .Xr cap_enter 2 , .Xr kill 2 , .Xr ktrace 2 , .Xr mmap 2 , .Xr mprotect 2 , .Xr ptrace 2 , .Xr wait 2 , .Xr capsicum 4 , .Xr hwpmc 4 , .Xr init 8 .Sh HISTORY The .Fn procctl function appeared in .Fx 10.0 . .Pp The reaper facility is based on a similar feature of Linux and DragonflyBSD, and first appeared in .Fx 10.2 . .Pp The .Dv PROC_PDEATHSIG_CTL facility is based on the prctl(PR_SET_PDEATHSIG, ...) feature of Linux, and first appeared in .Fx 11.2 . .Pp The ASLR support was added to system for the checklists compliance in .Fx 13.0 . diff --git a/sys/compat/freebsd32/freebsd32_misc.c b/sys/compat/freebsd32/freebsd32_misc.c index c417a64d286a..f4b1edb6c7b0 100644 --- a/sys/compat/freebsd32/freebsd32_misc.c +++ b/sys/compat/freebsd32/freebsd32_misc.c @@ -1,3893 +1,3896 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ktrace.h" #define __ELF_WORD_SIZE 32 #ifdef COMPAT_FREEBSD11 #define _WANT_FREEBSD11_KEVENT #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/selinfo.h */ #include /* Must come after sys/selinfo.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #ifdef INET #include #endif #include #include #include #include #include #include #include #include #ifdef __amd64__ #include #endif #include #include #include #include #include #include #include FEATURE(compat_freebsd_32bit, "Compatible with 32-bit FreeBSD"); struct ptrace_io_desc32 { int piod_op; uint32_t piod_offs; uint32_t piod_addr; uint32_t piod_len; }; struct ptrace_sc_ret32 { uint32_t sr_retval[2]; int sr_error; }; struct ptrace_vm_entry32 { int pve_entry; int pve_timestamp; uint32_t pve_start; uint32_t pve_end; uint32_t pve_offset; u_int pve_prot; u_int pve_pathlen; int32_t pve_fileid; u_int pve_fsid; uint32_t pve_path; }; #ifdef __amd64__ CTASSERT(sizeof(struct timeval32) == 8); CTASSERT(sizeof(struct timespec32) == 8); CTASSERT(sizeof(struct itimerval32) == 16); CTASSERT(sizeof(struct bintime32) == 12); #endif CTASSERT(sizeof(struct statfs32) == 256); #ifdef __amd64__ CTASSERT(sizeof(struct rusage32) == 72); #endif CTASSERT(sizeof(struct sigaltstack32) == 12); #ifdef __amd64__ CTASSERT(sizeof(struct kevent32) == 56); #else CTASSERT(sizeof(struct kevent32) == 64); #endif CTASSERT(sizeof(struct iovec32) == 8); CTASSERT(sizeof(struct msghdr32) == 28); #ifdef __amd64__ CTASSERT(sizeof(struct stat32) == 208); CTASSERT(sizeof(struct freebsd11_stat32) == 96); #endif CTASSERT(sizeof(struct sigaction32) == 24); static int freebsd32_kevent_copyout(void *arg, struct kevent *kevp, int count); static int freebsd32_kevent_copyin(void *arg, struct kevent *kevp, int count); static int freebsd32_user_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags, const struct timespec32 *ua_rqtp, struct timespec32 *ua_rmtp); void freebsd32_rusage_out(const struct rusage *s, struct rusage32 *s32) { TV_CP(*s, *s32, ru_utime); TV_CP(*s, *s32, ru_stime); CP(*s, *s32, ru_maxrss); CP(*s, *s32, ru_ixrss); CP(*s, *s32, ru_idrss); CP(*s, *s32, ru_isrss); CP(*s, *s32, ru_minflt); CP(*s, *s32, ru_majflt); CP(*s, *s32, ru_nswap); CP(*s, *s32, ru_inblock); CP(*s, *s32, ru_oublock); CP(*s, *s32, ru_msgsnd); CP(*s, *s32, ru_msgrcv); CP(*s, *s32, ru_nsignals); CP(*s, *s32, ru_nvcsw); CP(*s, *s32, ru_nivcsw); } int freebsd32_wait4(struct thread *td, struct freebsd32_wait4_args *uap) { int error, status; struct rusage32 ru32; struct rusage ru, *rup; if (uap->rusage != NULL) rup = &ru; else rup = NULL; error = kern_wait(td, uap->pid, &status, uap->options, rup); if (error) return (error); if (uap->status != NULL) error = copyout(&status, uap->status, sizeof(status)); if (uap->rusage != NULL && error == 0) { freebsd32_rusage_out(&ru, &ru32); error = copyout(&ru32, uap->rusage, sizeof(ru32)); } return (error); } int freebsd32_wait6(struct thread *td, struct freebsd32_wait6_args *uap) { struct wrusage32 wru32; struct __wrusage wru, *wrup; struct siginfo32 si32; struct __siginfo si, *sip; int error, status; if (uap->wrusage != NULL) wrup = &wru; else wrup = NULL; if (uap->info != NULL) { sip = &si; bzero(sip, sizeof(*sip)); } else sip = NULL; error = kern_wait6(td, uap->idtype, PAIR32TO64(id_t, uap->id), &status, uap->options, wrup, sip); if (error != 0) return (error); if (uap->status != NULL) error = copyout(&status, uap->status, sizeof(status)); if (uap->wrusage != NULL && error == 0) { freebsd32_rusage_out(&wru.wru_self, &wru32.wru_self); freebsd32_rusage_out(&wru.wru_children, &wru32.wru_children); error = copyout(&wru32, uap->wrusage, sizeof(wru32)); } if (uap->info != NULL && error == 0) { siginfo_to_siginfo32 (&si, &si32); error = copyout(&si32, uap->info, sizeof(si32)); } return (error); } #ifdef COMPAT_FREEBSD4 static void copy_statfs(struct statfs *in, struct statfs32 *out) { statfs_scale_blocks(in, INT32_MAX); bzero(out, sizeof(*out)); CP(*in, *out, f_bsize); out->f_iosize = MIN(in->f_iosize, INT32_MAX); CP(*in, *out, f_blocks); CP(*in, *out, f_bfree); CP(*in, *out, f_bavail); out->f_files = MIN(in->f_files, INT32_MAX); out->f_ffree = MIN(in->f_ffree, INT32_MAX); CP(*in, *out, f_fsid); CP(*in, *out, f_owner); CP(*in, *out, f_type); CP(*in, *out, f_flags); out->f_syncwrites = MIN(in->f_syncwrites, INT32_MAX); out->f_asyncwrites = MIN(in->f_asyncwrites, INT32_MAX); strlcpy(out->f_fstypename, in->f_fstypename, MFSNAMELEN); strlcpy(out->f_mntonname, in->f_mntonname, min(MNAMELEN, FREEBSD4_MNAMELEN)); out->f_syncreads = MIN(in->f_syncreads, INT32_MAX); out->f_asyncreads = MIN(in->f_asyncreads, INT32_MAX); strlcpy(out->f_mntfromname, in->f_mntfromname, min(MNAMELEN, FREEBSD4_MNAMELEN)); } #endif #ifdef COMPAT_FREEBSD4 int freebsd4_freebsd32_getfsstat(struct thread *td, struct freebsd4_freebsd32_getfsstat_args *uap) { struct statfs *buf, *sp; struct statfs32 stat32; size_t count, size, copycount; int error; count = uap->bufsize / sizeof(struct statfs32); size = count * sizeof(struct statfs); error = kern_getfsstat(td, &buf, size, &count, UIO_SYSSPACE, uap->mode); if (size > 0) { sp = buf; copycount = count; while (copycount > 0 && error == 0) { copy_statfs(sp, &stat32); error = copyout(&stat32, uap->buf, sizeof(stat32)); sp++; uap->buf++; copycount--; } free(buf, M_STATFS); } if (error == 0) td->td_retval[0] = count; return (error); } #endif #ifdef COMPAT_FREEBSD10 int freebsd10_freebsd32_pipe(struct thread *td, struct freebsd10_freebsd32_pipe_args *uap) { return (freebsd10_pipe(td, (struct freebsd10_pipe_args*)uap)); } #endif int freebsd32_sigaltstack(struct thread *td, struct freebsd32_sigaltstack_args *uap) { struct sigaltstack32 s32; struct sigaltstack ss, oss, *ssp; int error; if (uap->ss != NULL) { error = copyin(uap->ss, &s32, sizeof(s32)); if (error) return (error); PTRIN_CP(s32, ss, ss_sp); CP(s32, ss, ss_size); CP(s32, ss, ss_flags); ssp = &ss; } else ssp = NULL; error = kern_sigaltstack(td, ssp, &oss); if (error == 0 && uap->oss != NULL) { PTROUT_CP(oss, s32, ss_sp); CP(oss, s32, ss_size); CP(oss, s32, ss_flags); error = copyout(&s32, uap->oss, sizeof(s32)); } return (error); } /* * Custom version of exec_copyin_args() so that we can translate * the pointers. */ int freebsd32_exec_copyin_args(struct image_args *args, const char *fname, enum uio_seg segflg, u_int32_t *argv, u_int32_t *envv) { char *argp, *envp; u_int32_t *p32, arg; int error; bzero(args, sizeof(*args)); if (argv == NULL) return (EFAULT); /* * Allocate demand-paged memory for the file name, argument, and * environment strings. */ error = exec_alloc_args(args); if (error != 0) return (error); /* * Copy the file name. */ error = exec_args_add_fname(args, fname, segflg); if (error != 0) goto err_exit; /* * extract arguments first */ p32 = argv; for (;;) { error = copyin(p32++, &arg, sizeof(arg)); if (error) goto err_exit; if (arg == 0) break; argp = PTRIN(arg); error = exec_args_add_arg(args, argp, UIO_USERSPACE); if (error != 0) goto err_exit; } /* * extract environment strings */ if (envv) { p32 = envv; for (;;) { error = copyin(p32++, &arg, sizeof(arg)); if (error) goto err_exit; if (arg == 0) break; envp = PTRIN(arg); error = exec_args_add_env(args, envp, UIO_USERSPACE); if (error != 0) goto err_exit; } } return (0); err_exit: exec_free_args(args); return (error); } int freebsd32_execve(struct thread *td, struct freebsd32_execve_args *uap) { struct image_args eargs; struct vmspace *oldvmspace; int error; error = pre_execve(td, &oldvmspace); if (error != 0) return (error); error = freebsd32_exec_copyin_args(&eargs, uap->fname, UIO_USERSPACE, uap->argv, uap->envv); if (error == 0) error = kern_execve(td, &eargs, NULL, oldvmspace); post_execve(td, error, oldvmspace); AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td); return (error); } int freebsd32_fexecve(struct thread *td, struct freebsd32_fexecve_args *uap) { struct image_args eargs; struct vmspace *oldvmspace; int error; error = pre_execve(td, &oldvmspace); if (error != 0) return (error); error = freebsd32_exec_copyin_args(&eargs, NULL, UIO_SYSSPACE, uap->argv, uap->envv); if (error == 0) { eargs.fd = uap->fd; error = kern_execve(td, &eargs, NULL, oldvmspace); } post_execve(td, error, oldvmspace); AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td); return (error); } int freebsd32_mknodat(struct thread *td, struct freebsd32_mknodat_args *uap) { return (kern_mknodat(td, uap->fd, uap->path, UIO_USERSPACE, uap->mode, PAIR32TO64(dev_t, uap->dev))); } int freebsd32_mprotect(struct thread *td, struct freebsd32_mprotect_args *uap) { int prot; prot = uap->prot; #if defined(__amd64__) if (i386_read_exec && (prot & PROT_READ) != 0) prot |= PROT_EXEC; #endif return (kern_mprotect(td, (uintptr_t)PTRIN(uap->addr), uap->len, prot)); } int freebsd32_mmap(struct thread *td, struct freebsd32_mmap_args *uap) { int prot; prot = uap->prot; #if defined(__amd64__) if (i386_read_exec && (prot & PROT_READ)) prot |= PROT_EXEC; #endif return (kern_mmap(td, &(struct mmap_req){ .mr_hint = (uintptr_t)uap->addr, .mr_len = uap->len, .mr_prot = prot, .mr_flags = uap->flags, .mr_fd = uap->fd, .mr_pos = PAIR32TO64(off_t, uap->pos), })); } #ifdef COMPAT_FREEBSD6 int freebsd6_freebsd32_mmap(struct thread *td, struct freebsd6_freebsd32_mmap_args *uap) { int prot; prot = uap->prot; #if defined(__amd64__) if (i386_read_exec && (prot & PROT_READ)) prot |= PROT_EXEC; #endif return (kern_mmap(td, &(struct mmap_req){ .mr_hint = (uintptr_t)uap->addr, .mr_len = uap->len, .mr_prot = prot, .mr_flags = uap->flags, .mr_fd = uap->fd, .mr_pos = PAIR32TO64(off_t, uap->pos), })); } #endif int freebsd32_setitimer(struct thread *td, struct freebsd32_setitimer_args *uap) { struct itimerval itv, oitv, *itvp; struct itimerval32 i32; int error; if (uap->itv != NULL) { error = copyin(uap->itv, &i32, sizeof(i32)); if (error) return (error); TV_CP(i32, itv, it_interval); TV_CP(i32, itv, it_value); itvp = &itv; } else itvp = NULL; error = kern_setitimer(td, uap->which, itvp, &oitv); if (error || uap->oitv == NULL) return (error); TV_CP(oitv, i32, it_interval); TV_CP(oitv, i32, it_value); return (copyout(&i32, uap->oitv, sizeof(i32))); } int freebsd32_getitimer(struct thread *td, struct freebsd32_getitimer_args *uap) { struct itimerval itv; struct itimerval32 i32; int error; error = kern_getitimer(td, uap->which, &itv); if (error || uap->itv == NULL) return (error); TV_CP(itv, i32, it_interval); TV_CP(itv, i32, it_value); return (copyout(&i32, uap->itv, sizeof(i32))); } int freebsd32_select(struct thread *td, struct freebsd32_select_args *uap) { struct timeval32 tv32; struct timeval tv, *tvp; int error; if (uap->tv != NULL) { error = copyin(uap->tv, &tv32, sizeof(tv32)); if (error) return (error); CP(tv32, tv, tv_sec); CP(tv32, tv, tv_usec); tvp = &tv; } else tvp = NULL; /* * XXX Do pointers need PTRIN()? */ return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp, sizeof(int32_t) * 8)); } int freebsd32_pselect(struct thread *td, struct freebsd32_pselect_args *uap) { struct timespec32 ts32; struct timespec ts; struct timeval tv, *tvp; sigset_t set, *uset; int error; if (uap->ts != NULL) { error = copyin(uap->ts, &ts32, sizeof(ts32)); if (error != 0) return (error); CP(ts32, ts, tv_sec); CP(ts32, ts, tv_nsec); TIMESPEC_TO_TIMEVAL(&tv, &ts); tvp = &tv; } else tvp = NULL; if (uap->sm != NULL) { error = copyin(uap->sm, &set, sizeof(set)); if (error != 0) return (error); uset = &set; } else uset = NULL; /* * XXX Do pointers need PTRIN()? */ error = kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp, uset, sizeof(int32_t) * 8); return (error); } /* * Copy 'count' items into the destination list pointed to by uap->eventlist. */ static int freebsd32_kevent_copyout(void *arg, struct kevent *kevp, int count) { struct freebsd32_kevent_args *uap; struct kevent32 ks32[KQ_NEVENTS]; uint64_t e; int i, j, error; KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); uap = (struct freebsd32_kevent_args *)arg; for (i = 0; i < count; i++) { CP(kevp[i], ks32[i], ident); CP(kevp[i], ks32[i], filter); CP(kevp[i], ks32[i], flags); CP(kevp[i], ks32[i], fflags); #if BYTE_ORDER == LITTLE_ENDIAN ks32[i].data1 = kevp[i].data; ks32[i].data2 = kevp[i].data >> 32; #else ks32[i].data1 = kevp[i].data >> 32; ks32[i].data2 = kevp[i].data; #endif PTROUT_CP(kevp[i], ks32[i], udata); for (j = 0; j < nitems(kevp->ext); j++) { e = kevp[i].ext[j]; #if BYTE_ORDER == LITTLE_ENDIAN ks32[i].ext64[2 * j] = e; ks32[i].ext64[2 * j + 1] = e >> 32; #else ks32[i].ext64[2 * j] = e >> 32; ks32[i].ext64[2 * j + 1] = e; #endif } } error = copyout(ks32, uap->eventlist, count * sizeof *ks32); if (error == 0) uap->eventlist += count; return (error); } /* * Copy 'count' items from the list pointed to by uap->changelist. */ static int freebsd32_kevent_copyin(void *arg, struct kevent *kevp, int count) { struct freebsd32_kevent_args *uap; struct kevent32 ks32[KQ_NEVENTS]; uint64_t e; int i, j, error; KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); uap = (struct freebsd32_kevent_args *)arg; error = copyin(uap->changelist, ks32, count * sizeof *ks32); if (error) goto done; uap->changelist += count; for (i = 0; i < count; i++) { CP(ks32[i], kevp[i], ident); CP(ks32[i], kevp[i], filter); CP(ks32[i], kevp[i], flags); CP(ks32[i], kevp[i], fflags); kevp[i].data = PAIR32TO64(uint64_t, ks32[i].data); PTRIN_CP(ks32[i], kevp[i], udata); for (j = 0; j < nitems(kevp->ext); j++) { #if BYTE_ORDER == LITTLE_ENDIAN e = ks32[i].ext64[2 * j + 1]; e <<= 32; e += ks32[i].ext64[2 * j]; #else e = ks32[i].ext64[2 * j]; e <<= 32; e += ks32[i].ext64[2 * j + 1]; #endif kevp[i].ext[j] = e; } } done: return (error); } int freebsd32_kevent(struct thread *td, struct freebsd32_kevent_args *uap) { struct timespec32 ts32; struct timespec ts, *tsp; struct kevent_copyops k_ops = { .arg = uap, .k_copyout = freebsd32_kevent_copyout, .k_copyin = freebsd32_kevent_copyin, }; #ifdef KTRACE struct kevent32 *eventlist = uap->eventlist; #endif int error; if (uap->timeout) { error = copyin(uap->timeout, &ts32, sizeof(ts32)); if (error) return (error); CP(ts32, ts, tv_sec); CP(ts32, ts, tv_nsec); tsp = &ts; } else tsp = NULL; #ifdef KTRACE if (KTRPOINT(td, KTR_STRUCT_ARRAY)) ktrstructarray("kevent32", UIO_USERSPACE, uap->changelist, uap->nchanges, sizeof(struct kevent32)); #endif error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, &k_ops, tsp); #ifdef KTRACE if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) ktrstructarray("kevent32", UIO_USERSPACE, eventlist, td->td_retval[0], sizeof(struct kevent32)); #endif return (error); } #ifdef COMPAT_FREEBSD11 static int freebsd32_kevent11_copyout(void *arg, struct kevent *kevp, int count) { struct freebsd11_freebsd32_kevent_args *uap; struct kevent32_freebsd11 ks32[KQ_NEVENTS]; int i, error; KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); uap = (struct freebsd11_freebsd32_kevent_args *)arg; for (i = 0; i < count; i++) { CP(kevp[i], ks32[i], ident); CP(kevp[i], ks32[i], filter); CP(kevp[i], ks32[i], flags); CP(kevp[i], ks32[i], fflags); CP(kevp[i], ks32[i], data); PTROUT_CP(kevp[i], ks32[i], udata); } error = copyout(ks32, uap->eventlist, count * sizeof *ks32); if (error == 0) uap->eventlist += count; return (error); } /* * Copy 'count' items from the list pointed to by uap->changelist. */ static int freebsd32_kevent11_copyin(void *arg, struct kevent *kevp, int count) { struct freebsd11_freebsd32_kevent_args *uap; struct kevent32_freebsd11 ks32[KQ_NEVENTS]; int i, j, error; KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); uap = (struct freebsd11_freebsd32_kevent_args *)arg; error = copyin(uap->changelist, ks32, count * sizeof *ks32); if (error) goto done; uap->changelist += count; for (i = 0; i < count; i++) { CP(ks32[i], kevp[i], ident); CP(ks32[i], kevp[i], filter); CP(ks32[i], kevp[i], flags); CP(ks32[i], kevp[i], fflags); CP(ks32[i], kevp[i], data); PTRIN_CP(ks32[i], kevp[i], udata); for (j = 0; j < nitems(kevp->ext); j++) kevp[i].ext[j] = 0; } done: return (error); } int freebsd11_freebsd32_kevent(struct thread *td, struct freebsd11_freebsd32_kevent_args *uap) { struct timespec32 ts32; struct timespec ts, *tsp; struct kevent_copyops k_ops = { .arg = uap, .k_copyout = freebsd32_kevent11_copyout, .k_copyin = freebsd32_kevent11_copyin, }; #ifdef KTRACE struct kevent32_freebsd11 *eventlist = uap->eventlist; #endif int error; if (uap->timeout) { error = copyin(uap->timeout, &ts32, sizeof(ts32)); if (error) return (error); CP(ts32, ts, tv_sec); CP(ts32, ts, tv_nsec); tsp = &ts; } else tsp = NULL; #ifdef KTRACE if (KTRPOINT(td, KTR_STRUCT_ARRAY)) ktrstructarray("kevent32_freebsd11", UIO_USERSPACE, uap->changelist, uap->nchanges, sizeof(struct kevent32_freebsd11)); #endif error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, &k_ops, tsp); #ifdef KTRACE if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) ktrstructarray("kevent32_freebsd11", UIO_USERSPACE, eventlist, td->td_retval[0], sizeof(struct kevent32_freebsd11)); #endif return (error); } #endif int freebsd32_gettimeofday(struct thread *td, struct freebsd32_gettimeofday_args *uap) { struct timeval atv; struct timeval32 atv32; struct timezone rtz; int error = 0; if (uap->tp) { microtime(&atv); CP(atv, atv32, tv_sec); CP(atv, atv32, tv_usec); error = copyout(&atv32, uap->tp, sizeof (atv32)); } if (error == 0 && uap->tzp != NULL) { rtz.tz_minuteswest = 0; rtz.tz_dsttime = 0; error = copyout(&rtz, uap->tzp, sizeof (rtz)); } return (error); } int freebsd32_getrusage(struct thread *td, struct freebsd32_getrusage_args *uap) { struct rusage32 s32; struct rusage s; int error; error = kern_getrusage(td, uap->who, &s); if (error == 0) { freebsd32_rusage_out(&s, &s32); error = copyout(&s32, uap->rusage, sizeof(s32)); } return (error); } static void ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, struct ptrace_lwpinfo32 *pl32) { bzero(pl32, sizeof(*pl32)); pl32->pl_lwpid = pl->pl_lwpid; pl32->pl_event = pl->pl_event; pl32->pl_flags = pl->pl_flags; pl32->pl_sigmask = pl->pl_sigmask; pl32->pl_siglist = pl->pl_siglist; siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); strcpy(pl32->pl_tdname, pl->pl_tdname); pl32->pl_child_pid = pl->pl_child_pid; pl32->pl_syscall_code = pl->pl_syscall_code; pl32->pl_syscall_narg = pl->pl_syscall_narg; } static void ptrace_sc_ret_to32(const struct ptrace_sc_ret *psr, struct ptrace_sc_ret32 *psr32) { bzero(psr32, sizeof(*psr32)); psr32->sr_retval[0] = psr->sr_retval[0]; psr32->sr_retval[1] = psr->sr_retval[1]; psr32->sr_error = psr->sr_error; } int freebsd32_ptrace(struct thread *td, struct freebsd32_ptrace_args *uap) { union { struct ptrace_io_desc piod; struct ptrace_lwpinfo pl; struct ptrace_vm_entry pve; struct ptrace_coredump pc; struct dbreg32 dbreg; struct fpreg32 fpreg; struct reg32 reg; register_t args[nitems(td->td_sa.args)]; struct ptrace_sc_ret psr; int ptevents; } r; union { struct ptrace_io_desc32 piod; struct ptrace_lwpinfo32 pl; struct ptrace_vm_entry32 pve; struct ptrace_coredump32 pc; uint32_t args[nitems(td->td_sa.args)]; struct ptrace_sc_ret32 psr; } r32; void *addr; int data, error = 0, i; AUDIT_ARG_PID(uap->pid); AUDIT_ARG_CMD(uap->req); AUDIT_ARG_VALUE(uap->data); addr = &r; data = uap->data; switch (uap->req) { case PT_GET_EVENT_MASK: case PT_GET_SC_ARGS: case PT_GET_SC_RET: break; case PT_LWPINFO: if (uap->data > sizeof(r32.pl)) return (EINVAL); /* * Pass size of native structure in 'data'. Truncate * if necessary to avoid siginfo. */ data = sizeof(r.pl); if (uap->data < offsetof(struct ptrace_lwpinfo32, pl_siginfo) + sizeof(struct siginfo32)) data = offsetof(struct ptrace_lwpinfo, pl_siginfo); break; case PT_GETREGS: bzero(&r.reg, sizeof(r.reg)); break; case PT_GETFPREGS: bzero(&r.fpreg, sizeof(r.fpreg)); break; case PT_GETDBREGS: bzero(&r.dbreg, sizeof(r.dbreg)); break; case PT_SETREGS: error = copyin(uap->addr, &r.reg, sizeof(r.reg)); break; case PT_SETFPREGS: error = copyin(uap->addr, &r.fpreg, sizeof(r.fpreg)); break; case PT_SETDBREGS: error = copyin(uap->addr, &r.dbreg, sizeof(r.dbreg)); break; case PT_SET_EVENT_MASK: if (uap->data != sizeof(r.ptevents)) error = EINVAL; else error = copyin(uap->addr, &r.ptevents, uap->data); break; case PT_IO: error = copyin(uap->addr, &r32.piod, sizeof(r32.piod)); if (error) break; CP(r32.piod, r.piod, piod_op); PTRIN_CP(r32.piod, r.piod, piod_offs); PTRIN_CP(r32.piod, r.piod, piod_addr); CP(r32.piod, r.piod, piod_len); break; case PT_VM_ENTRY: error = copyin(uap->addr, &r32.pve, sizeof(r32.pve)); if (error) break; CP(r32.pve, r.pve, pve_entry); CP(r32.pve, r.pve, pve_timestamp); CP(r32.pve, r.pve, pve_start); CP(r32.pve, r.pve, pve_end); CP(r32.pve, r.pve, pve_offset); CP(r32.pve, r.pve, pve_prot); CP(r32.pve, r.pve, pve_pathlen); CP(r32.pve, r.pve, pve_fileid); CP(r32.pve, r.pve, pve_fsid); PTRIN_CP(r32.pve, r.pve, pve_path); break; case PT_COREDUMP: if (uap->data != sizeof(r32.pc)) error = EINVAL; else error = copyin(uap->addr, &r32.pc, uap->data); CP(r32.pc, r.pc, pc_fd); CP(r32.pc, r.pc, pc_flags); r.pc.pc_limit = PAIR32TO64(off_t, r32.pc.pc_limit); data = sizeof(r.pc); break; default: addr = uap->addr; break; } if (error) return (error); error = kern_ptrace(td, uap->req, uap->pid, addr, data); if (error) return (error); switch (uap->req) { case PT_VM_ENTRY: CP(r.pve, r32.pve, pve_entry); CP(r.pve, r32.pve, pve_timestamp); CP(r.pve, r32.pve, pve_start); CP(r.pve, r32.pve, pve_end); CP(r.pve, r32.pve, pve_offset); CP(r.pve, r32.pve, pve_prot); CP(r.pve, r32.pve, pve_pathlen); CP(r.pve, r32.pve, pve_fileid); CP(r.pve, r32.pve, pve_fsid); error = copyout(&r32.pve, uap->addr, sizeof(r32.pve)); break; case PT_IO: CP(r.piod, r32.piod, piod_len); error = copyout(&r32.piod, uap->addr, sizeof(r32.piod)); break; case PT_GETREGS: error = copyout(&r.reg, uap->addr, sizeof(r.reg)); break; case PT_GETFPREGS: error = copyout(&r.fpreg, uap->addr, sizeof(r.fpreg)); break; case PT_GETDBREGS: error = copyout(&r.dbreg, uap->addr, sizeof(r.dbreg)); break; case PT_GET_EVENT_MASK: /* NB: The size in uap->data is validated in kern_ptrace(). */ error = copyout(&r.ptevents, uap->addr, uap->data); break; case PT_LWPINFO: ptrace_lwpinfo_to32(&r.pl, &r32.pl); error = copyout(&r32.pl, uap->addr, uap->data); break; case PT_GET_SC_ARGS: for (i = 0; i < nitems(r.args); i++) r32.args[i] = (uint32_t)r.args[i]; error = copyout(r32.args, uap->addr, MIN(uap->data, sizeof(r32.args))); break; case PT_GET_SC_RET: ptrace_sc_ret_to32(&r.psr, &r32.psr); error = copyout(&r32.psr, uap->addr, MIN(uap->data, sizeof(r32.psr))); break; } return (error); } int freebsd32_copyinuio(struct iovec32 *iovp, u_int iovcnt, struct uio **uiop) { struct iovec32 iov32; struct iovec *iov; struct uio *uio; u_int iovlen; int error, i; *uiop = NULL; if (iovcnt > UIO_MAXIOV) return (EINVAL); iovlen = iovcnt * sizeof(struct iovec); uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK); iov = (struct iovec *)(uio + 1); for (i = 0; i < iovcnt; i++) { error = copyin(&iovp[i], &iov32, sizeof(struct iovec32)); if (error) { free(uio, M_IOV); return (error); } iov[i].iov_base = PTRIN(iov32.iov_base); iov[i].iov_len = iov32.iov_len; } uio->uio_iov = iov; uio->uio_iovcnt = iovcnt; uio->uio_segflg = UIO_USERSPACE; uio->uio_offset = -1; uio->uio_resid = 0; for (i = 0; i < iovcnt; i++) { if (iov->iov_len > INT_MAX - uio->uio_resid) { free(uio, M_IOV); return (EINVAL); } uio->uio_resid += iov->iov_len; iov++; } *uiop = uio; return (0); } int freebsd32_readv(struct thread *td, struct freebsd32_readv_args *uap) { struct uio *auio; int error; error = freebsd32_copyinuio(uap->iovp, uap->iovcnt, &auio); if (error) return (error); error = kern_readv(td, uap->fd, auio); free(auio, M_IOV); return (error); } int freebsd32_writev(struct thread *td, struct freebsd32_writev_args *uap) { struct uio *auio; int error; error = freebsd32_copyinuio(uap->iovp, uap->iovcnt, &auio); if (error) return (error); error = kern_writev(td, uap->fd, auio); free(auio, M_IOV); return (error); } int freebsd32_preadv(struct thread *td, struct freebsd32_preadv_args *uap) { struct uio *auio; int error; error = freebsd32_copyinuio(uap->iovp, uap->iovcnt, &auio); if (error) return (error); error = kern_preadv(td, uap->fd, auio, PAIR32TO64(off_t,uap->offset)); free(auio, M_IOV); return (error); } int freebsd32_pwritev(struct thread *td, struct freebsd32_pwritev_args *uap) { struct uio *auio; int error; error = freebsd32_copyinuio(uap->iovp, uap->iovcnt, &auio); if (error) return (error); error = kern_pwritev(td, uap->fd, auio, PAIR32TO64(off_t,uap->offset)); free(auio, M_IOV); return (error); } int freebsd32_copyiniov(struct iovec32 *iovp32, u_int iovcnt, struct iovec **iovp, int error) { struct iovec32 iov32; struct iovec *iov; u_int iovlen; int i; *iovp = NULL; if (iovcnt > UIO_MAXIOV) return (error); iovlen = iovcnt * sizeof(struct iovec); iov = malloc(iovlen, M_IOV, M_WAITOK); for (i = 0; i < iovcnt; i++) { error = copyin(&iovp32[i], &iov32, sizeof(struct iovec32)); if (error) { free(iov, M_IOV); return (error); } iov[i].iov_base = PTRIN(iov32.iov_base); iov[i].iov_len = iov32.iov_len; } *iovp = iov; return (0); } static int freebsd32_copyinmsghdr(struct msghdr32 *msg32, struct msghdr *msg) { struct msghdr32 m32; int error; error = copyin(msg32, &m32, sizeof(m32)); if (error) return (error); msg->msg_name = PTRIN(m32.msg_name); msg->msg_namelen = m32.msg_namelen; msg->msg_iov = PTRIN(m32.msg_iov); msg->msg_iovlen = m32.msg_iovlen; msg->msg_control = PTRIN(m32.msg_control); msg->msg_controllen = m32.msg_controllen; msg->msg_flags = m32.msg_flags; return (0); } static int freebsd32_copyoutmsghdr(struct msghdr *msg, struct msghdr32 *msg32) { struct msghdr32 m32; int error; m32.msg_name = PTROUT(msg->msg_name); m32.msg_namelen = msg->msg_namelen; m32.msg_iov = PTROUT(msg->msg_iov); m32.msg_iovlen = msg->msg_iovlen; m32.msg_control = PTROUT(msg->msg_control); m32.msg_controllen = msg->msg_controllen; m32.msg_flags = msg->msg_flags; error = copyout(&m32, msg32, sizeof(m32)); return (error); } #ifndef __mips__ #define FREEBSD32_ALIGNBYTES (sizeof(int) - 1) #else #define FREEBSD32_ALIGNBYTES (sizeof(long) - 1) #endif #define FREEBSD32_ALIGN(p) \ (((u_long)(p) + FREEBSD32_ALIGNBYTES) & ~FREEBSD32_ALIGNBYTES) #define FREEBSD32_CMSG_SPACE(l) \ (FREEBSD32_ALIGN(sizeof(struct cmsghdr)) + FREEBSD32_ALIGN(l)) #define FREEBSD32_CMSG_DATA(cmsg) ((unsigned char *)(cmsg) + \ FREEBSD32_ALIGN(sizeof(struct cmsghdr))) static size_t freebsd32_cmsg_convert(const struct cmsghdr *cm, void *data, socklen_t datalen) { size_t copylen; union { struct timespec32 ts; struct timeval32 tv; struct bintime32 bt; } tmp32; union { struct timespec ts; struct timeval tv; struct bintime bt; } *in; in = data; copylen = 0; switch (cm->cmsg_level) { case SOL_SOCKET: switch (cm->cmsg_type) { case SCM_TIMESTAMP: TV_CP(*in, tmp32, tv); copylen = sizeof(tmp32.tv); break; case SCM_BINTIME: BT_CP(*in, tmp32, bt); copylen = sizeof(tmp32.bt); break; case SCM_REALTIME: case SCM_MONOTONIC: TS_CP(*in, tmp32, ts); copylen = sizeof(tmp32.ts); break; default: break; } default: break; } if (copylen == 0) return (datalen); KASSERT((datalen >= copylen), ("corrupted cmsghdr")); bcopy(&tmp32, data, copylen); return (copylen); } static int freebsd32_copy_msg_out(struct msghdr *msg, struct mbuf *control) { struct cmsghdr *cm; void *data; socklen_t clen, datalen, datalen_out, oldclen; int error; caddr_t ctlbuf; int len, maxlen, copylen; struct mbuf *m; error = 0; len = msg->msg_controllen; maxlen = msg->msg_controllen; msg->msg_controllen = 0; ctlbuf = msg->msg_control; for (m = control; m != NULL && len > 0; m = m->m_next) { cm = mtod(m, struct cmsghdr *); clen = m->m_len; while (cm != NULL) { if (sizeof(struct cmsghdr) > clen || cm->cmsg_len > clen) { error = EINVAL; break; } data = CMSG_DATA(cm); datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; datalen_out = freebsd32_cmsg_convert(cm, data, datalen); /* * Copy out the message header. Preserve the native * message size in case we need to inspect the message * contents later. */ copylen = sizeof(struct cmsghdr); if (len < copylen) { msg->msg_flags |= MSG_CTRUNC; m_dispose_extcontrolm(m); goto exit; } oldclen = cm->cmsg_len; cm->cmsg_len = FREEBSD32_ALIGN(sizeof(struct cmsghdr)) + datalen_out; error = copyout(cm, ctlbuf, copylen); cm->cmsg_len = oldclen; if (error != 0) goto exit; ctlbuf += FREEBSD32_ALIGN(copylen); len -= FREEBSD32_ALIGN(copylen); copylen = datalen_out; if (len < copylen) { msg->msg_flags |= MSG_CTRUNC; m_dispose_extcontrolm(m); break; } /* Copy out the message data. */ error = copyout(data, ctlbuf, copylen); if (error) goto exit; ctlbuf += FREEBSD32_ALIGN(copylen); len -= FREEBSD32_ALIGN(copylen); if (CMSG_SPACE(datalen) < clen) { clen -= CMSG_SPACE(datalen); cm = (struct cmsghdr *) ((caddr_t)cm + CMSG_SPACE(datalen)); } else { clen = 0; cm = NULL; } msg->msg_controllen += FREEBSD32_CMSG_SPACE(datalen_out); } } if (len == 0 && m != NULL) { msg->msg_flags |= MSG_CTRUNC; m_dispose_extcontrolm(m); } exit: return (error); } int freebsd32_recvmsg(td, uap) struct thread *td; struct freebsd32_recvmsg_args /* { int s; struct msghdr32 *msg; int flags; } */ *uap; { struct msghdr msg; struct msghdr32 m32; struct iovec *uiov, *iov; struct mbuf *control = NULL; struct mbuf **controlp; int error; error = copyin(uap->msg, &m32, sizeof(m32)); if (error) return (error); error = freebsd32_copyinmsghdr(uap->msg, &msg); if (error) return (error); error = freebsd32_copyiniov(PTRIN(m32.msg_iov), m32.msg_iovlen, &iov, EMSGSIZE); if (error) return (error); msg.msg_flags = uap->flags; uiov = msg.msg_iov; msg.msg_iov = iov; controlp = (msg.msg_control != NULL) ? &control : NULL; error = kern_recvit(td, uap->s, &msg, UIO_USERSPACE, controlp); if (error == 0) { msg.msg_iov = uiov; if (control != NULL) error = freebsd32_copy_msg_out(&msg, control); else msg.msg_controllen = 0; if (error == 0) error = freebsd32_copyoutmsghdr(&msg, uap->msg); } free(iov, M_IOV); if (control != NULL) { if (error != 0) m_dispose_extcontrolm(control); m_freem(control); } return (error); } /* * Copy-in the array of control messages constructed using alignment * and padding suitable for a 32-bit environment and construct an * mbuf using alignment and padding suitable for a 64-bit kernel. * The alignment and padding are defined indirectly by CMSG_DATA(), * CMSG_SPACE() and CMSG_LEN(). */ static int freebsd32_copyin_control(struct mbuf **mp, caddr_t buf, u_int buflen) { struct cmsghdr *cm; struct mbuf *m; void *in, *in1, *md; u_int msglen, outlen; int error; if (buflen > MCLBYTES) return (EINVAL); in = malloc(buflen, M_TEMP, M_WAITOK); error = copyin(buf, in, buflen); if (error != 0) goto out; /* * Make a pass over the input buffer to determine the amount of space * required for 64 bit-aligned copies of the control messages. */ in1 = in; outlen = 0; while (buflen > 0) { if (buflen < sizeof(*cm)) { error = EINVAL; break; } cm = (struct cmsghdr *)in1; if (cm->cmsg_len < FREEBSD32_ALIGN(sizeof(*cm))) { error = EINVAL; break; } msglen = FREEBSD32_ALIGN(cm->cmsg_len); if (msglen > buflen || msglen < cm->cmsg_len) { error = EINVAL; break; } buflen -= msglen; in1 = (char *)in1 + msglen; outlen += CMSG_ALIGN(sizeof(*cm)) + CMSG_ALIGN(msglen - FREEBSD32_ALIGN(sizeof(*cm))); } if (error == 0 && outlen > MCLBYTES) { /* * XXXMJ This implies that the upper limit on 32-bit aligned * control messages is less than MCLBYTES, and so we are not * perfectly compatible. However, there is no platform * guarantee that mbuf clusters larger than MCLBYTES can be * allocated. */ error = EINVAL; } if (error != 0) goto out; m = m_get2(outlen, M_WAITOK, MT_CONTROL, 0); m->m_len = outlen; md = mtod(m, void *); /* * Make a second pass over input messages, copying them into the output * buffer. */ in1 = in; while (outlen > 0) { /* Copy the message header and align the length field. */ cm = md; memcpy(cm, in1, sizeof(*cm)); msglen = cm->cmsg_len - FREEBSD32_ALIGN(sizeof(*cm)); cm->cmsg_len = CMSG_ALIGN(sizeof(*cm)) + msglen; /* Copy the message body. */ in1 = (char *)in1 + FREEBSD32_ALIGN(sizeof(*cm)); md = (char *)md + CMSG_ALIGN(sizeof(*cm)); memcpy(md, in1, msglen); in1 = (char *)in1 + FREEBSD32_ALIGN(msglen); md = (char *)md + CMSG_ALIGN(msglen); KASSERT(outlen >= CMSG_ALIGN(sizeof(*cm)) + CMSG_ALIGN(msglen), ("outlen %u underflow, msglen %u", outlen, msglen)); outlen -= CMSG_ALIGN(sizeof(*cm)) + CMSG_ALIGN(msglen); } *mp = m; out: free(in, M_TEMP); return (error); } int freebsd32_sendmsg(struct thread *td, struct freebsd32_sendmsg_args *uap) { struct msghdr msg; struct msghdr32 m32; struct iovec *iov; struct mbuf *control = NULL; struct sockaddr *to = NULL; int error; error = copyin(uap->msg, &m32, sizeof(m32)); if (error) return (error); error = freebsd32_copyinmsghdr(uap->msg, &msg); if (error) return (error); error = freebsd32_copyiniov(PTRIN(m32.msg_iov), m32.msg_iovlen, &iov, EMSGSIZE); if (error) return (error); msg.msg_iov = iov; if (msg.msg_name != NULL) { error = getsockaddr(&to, msg.msg_name, msg.msg_namelen); if (error) { to = NULL; goto out; } msg.msg_name = to; } if (msg.msg_control) { if (msg.msg_controllen < sizeof(struct cmsghdr)) { error = EINVAL; goto out; } error = freebsd32_copyin_control(&control, msg.msg_control, msg.msg_controllen); if (error) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; } error = kern_sendit(td, uap->s, &msg, uap->flags, control, UIO_USERSPACE); out: free(iov, M_IOV); if (to) free(to, M_SONAME); return (error); } int freebsd32_recvfrom(struct thread *td, struct freebsd32_recvfrom_args *uap) { struct msghdr msg; struct iovec aiov; int error; if (uap->fromlenaddr) { error = copyin(PTRIN(uap->fromlenaddr), &msg.msg_namelen, sizeof(msg.msg_namelen)); if (error) return (error); } else { msg.msg_namelen = 0; } msg.msg_name = PTRIN(uap->from); msg.msg_iov = &aiov; msg.msg_iovlen = 1; aiov.iov_base = PTRIN(uap->buf); aiov.iov_len = uap->len; msg.msg_control = NULL; msg.msg_flags = uap->flags; error = kern_recvit(td, uap->s, &msg, UIO_USERSPACE, NULL); if (error == 0 && uap->fromlenaddr) error = copyout(&msg.msg_namelen, PTRIN(uap->fromlenaddr), sizeof (msg.msg_namelen)); return (error); } int freebsd32_settimeofday(struct thread *td, struct freebsd32_settimeofday_args *uap) { struct timeval32 tv32; struct timeval tv, *tvp; struct timezone tz, *tzp; int error; if (uap->tv) { error = copyin(uap->tv, &tv32, sizeof(tv32)); if (error) return (error); CP(tv32, tv, tv_sec); CP(tv32, tv, tv_usec); tvp = &tv; } else tvp = NULL; if (uap->tzp) { error = copyin(uap->tzp, &tz, sizeof(tz)); if (error) return (error); tzp = &tz; } else tzp = NULL; return (kern_settimeofday(td, tvp, tzp)); } int freebsd32_utimes(struct thread *td, struct freebsd32_utimes_args *uap) { struct timeval32 s32[2]; struct timeval s[2], *sp; int error; if (uap->tptr != NULL) { error = copyin(uap->tptr, s32, sizeof(s32)); if (error) return (error); CP(s32[0], s[0], tv_sec); CP(s32[0], s[0], tv_usec); CP(s32[1], s[1], tv_sec); CP(s32[1], s[1], tv_usec); sp = s; } else sp = NULL; return (kern_utimesat(td, AT_FDCWD, uap->path, UIO_USERSPACE, sp, UIO_SYSSPACE)); } int freebsd32_lutimes(struct thread *td, struct freebsd32_lutimes_args *uap) { struct timeval32 s32[2]; struct timeval s[2], *sp; int error; if (uap->tptr != NULL) { error = copyin(uap->tptr, s32, sizeof(s32)); if (error) return (error); CP(s32[0], s[0], tv_sec); CP(s32[0], s[0], tv_usec); CP(s32[1], s[1], tv_sec); CP(s32[1], s[1], tv_usec); sp = s; } else sp = NULL; return (kern_lutimes(td, uap->path, UIO_USERSPACE, sp, UIO_SYSSPACE)); } int freebsd32_futimes(struct thread *td, struct freebsd32_futimes_args *uap) { struct timeval32 s32[2]; struct timeval s[2], *sp; int error; if (uap->tptr != NULL) { error = copyin(uap->tptr, s32, sizeof(s32)); if (error) return (error); CP(s32[0], s[0], tv_sec); CP(s32[0], s[0], tv_usec); CP(s32[1], s[1], tv_sec); CP(s32[1], s[1], tv_usec); sp = s; } else sp = NULL; return (kern_futimes(td, uap->fd, sp, UIO_SYSSPACE)); } int freebsd32_futimesat(struct thread *td, struct freebsd32_futimesat_args *uap) { struct timeval32 s32[2]; struct timeval s[2], *sp; int error; if (uap->times != NULL) { error = copyin(uap->times, s32, sizeof(s32)); if (error) return (error); CP(s32[0], s[0], tv_sec); CP(s32[0], s[0], tv_usec); CP(s32[1], s[1], tv_sec); CP(s32[1], s[1], tv_usec); sp = s; } else sp = NULL; return (kern_utimesat(td, uap->fd, uap->path, UIO_USERSPACE, sp, UIO_SYSSPACE)); } int freebsd32_futimens(struct thread *td, struct freebsd32_futimens_args *uap) { struct timespec32 ts32[2]; struct timespec ts[2], *tsp; int error; if (uap->times != NULL) { error = copyin(uap->times, ts32, sizeof(ts32)); if (error) return (error); CP(ts32[0], ts[0], tv_sec); CP(ts32[0], ts[0], tv_nsec); CP(ts32[1], ts[1], tv_sec); CP(ts32[1], ts[1], tv_nsec); tsp = ts; } else tsp = NULL; return (kern_futimens(td, uap->fd, tsp, UIO_SYSSPACE)); } int freebsd32_utimensat(struct thread *td, struct freebsd32_utimensat_args *uap) { struct timespec32 ts32[2]; struct timespec ts[2], *tsp; int error; if (uap->times != NULL) { error = copyin(uap->times, ts32, sizeof(ts32)); if (error) return (error); CP(ts32[0], ts[0], tv_sec); CP(ts32[0], ts[0], tv_nsec); CP(ts32[1], ts[1], tv_sec); CP(ts32[1], ts[1], tv_nsec); tsp = ts; } else tsp = NULL; return (kern_utimensat(td, uap->fd, uap->path, UIO_USERSPACE, tsp, UIO_SYSSPACE, uap->flag)); } int freebsd32_adjtime(struct thread *td, struct freebsd32_adjtime_args *uap) { struct timeval32 tv32; struct timeval delta, olddelta, *deltap; int error; if (uap->delta) { error = copyin(uap->delta, &tv32, sizeof(tv32)); if (error) return (error); CP(tv32, delta, tv_sec); CP(tv32, delta, tv_usec); deltap = δ } else deltap = NULL; error = kern_adjtime(td, deltap, &olddelta); if (uap->olddelta && error == 0) { CP(olddelta, tv32, tv_sec); CP(olddelta, tv32, tv_usec); error = copyout(&tv32, uap->olddelta, sizeof(tv32)); } return (error); } #ifdef COMPAT_FREEBSD4 int freebsd4_freebsd32_statfs(struct thread *td, struct freebsd4_freebsd32_statfs_args *uap) { struct statfs32 s32; struct statfs *sp; int error; sp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); error = kern_statfs(td, uap->path, UIO_USERSPACE, sp); if (error == 0) { copy_statfs(sp, &s32); error = copyout(&s32, uap->buf, sizeof(s32)); } free(sp, M_STATFS); return (error); } #endif #ifdef COMPAT_FREEBSD4 int freebsd4_freebsd32_fstatfs(struct thread *td, struct freebsd4_freebsd32_fstatfs_args *uap) { struct statfs32 s32; struct statfs *sp; int error; sp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); error = kern_fstatfs(td, uap->fd, sp); if (error == 0) { copy_statfs(sp, &s32); error = copyout(&s32, uap->buf, sizeof(s32)); } free(sp, M_STATFS); return (error); } #endif #ifdef COMPAT_FREEBSD4 int freebsd4_freebsd32_fhstatfs(struct thread *td, struct freebsd4_freebsd32_fhstatfs_args *uap) { struct statfs32 s32; struct statfs *sp; fhandle_t fh; int error; if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0) return (error); sp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); error = kern_fhstatfs(td, fh, sp); if (error == 0) { copy_statfs(sp, &s32); error = copyout(&s32, uap->buf, sizeof(s32)); } free(sp, M_STATFS); return (error); } #endif int freebsd32_pread(struct thread *td, struct freebsd32_pread_args *uap) { return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, PAIR32TO64(off_t, uap->offset))); } int freebsd32_pwrite(struct thread *td, struct freebsd32_pwrite_args *uap) { return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, PAIR32TO64(off_t, uap->offset))); } #ifdef COMPAT_43 int ofreebsd32_lseek(struct thread *td, struct ofreebsd32_lseek_args *uap) { return (kern_lseek(td, uap->fd, uap->offset, uap->whence)); } #endif int freebsd32_lseek(struct thread *td, struct freebsd32_lseek_args *uap) { int error; off_t pos; error = kern_lseek(td, uap->fd, PAIR32TO64(off_t, uap->offset), uap->whence); /* Expand the quad return into two parts for eax and edx */ pos = td->td_uretoff.tdu_off; td->td_retval[RETVAL_LO] = pos & 0xffffffff; /* %eax */ td->td_retval[RETVAL_HI] = pos >> 32; /* %edx */ return error; } int freebsd32_truncate(struct thread *td, struct freebsd32_truncate_args *uap) { return (kern_truncate(td, uap->path, UIO_USERSPACE, PAIR32TO64(off_t, uap->length))); } int freebsd32_ftruncate(struct thread *td, struct freebsd32_ftruncate_args *uap) { return (kern_ftruncate(td, uap->fd, PAIR32TO64(off_t, uap->length))); } #ifdef COMPAT_43 int ofreebsd32_getdirentries(struct thread *td, struct ofreebsd32_getdirentries_args *uap) { struct ogetdirentries_args ap; int error; long loff; int32_t loff_cut; ap.fd = uap->fd; ap.buf = uap->buf; ap.count = uap->count; ap.basep = NULL; error = kern_ogetdirentries(td, &ap, &loff); if (error == 0) { loff_cut = loff; error = copyout(&loff_cut, uap->basep, sizeof(int32_t)); } return (error); } #endif #if defined(COMPAT_FREEBSD11) int freebsd11_freebsd32_getdirentries(struct thread *td, struct freebsd11_freebsd32_getdirentries_args *uap) { long base; int32_t base32; int error; error = freebsd11_kern_getdirentries(td, uap->fd, uap->buf, uap->count, &base, NULL); if (error) return (error); if (uap->basep != NULL) { base32 = base; error = copyout(&base32, uap->basep, sizeof(int32_t)); } return (error); } int freebsd11_freebsd32_getdents(struct thread *td, struct freebsd11_freebsd32_getdents_args *uap) { struct freebsd11_freebsd32_getdirentries_args ap; ap.fd = uap->fd; ap.buf = uap->buf; ap.count = uap->count; ap.basep = NULL; return (freebsd11_freebsd32_getdirentries(td, &ap)); } #endif /* COMPAT_FREEBSD11 */ #ifdef COMPAT_FREEBSD6 /* versions with the 'int pad' argument */ int freebsd6_freebsd32_pread(struct thread *td, struct freebsd6_freebsd32_pread_args *uap) { return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, PAIR32TO64(off_t, uap->offset))); } int freebsd6_freebsd32_pwrite(struct thread *td, struct freebsd6_freebsd32_pwrite_args *uap) { return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, PAIR32TO64(off_t, uap->offset))); } int freebsd6_freebsd32_lseek(struct thread *td, struct freebsd6_freebsd32_lseek_args *uap) { int error; off_t pos; error = kern_lseek(td, uap->fd, PAIR32TO64(off_t, uap->offset), uap->whence); /* Expand the quad return into two parts for eax and edx */ pos = *(off_t *)(td->td_retval); td->td_retval[RETVAL_LO] = pos & 0xffffffff; /* %eax */ td->td_retval[RETVAL_HI] = pos >> 32; /* %edx */ return error; } int freebsd6_freebsd32_truncate(struct thread *td, struct freebsd6_freebsd32_truncate_args *uap) { return (kern_truncate(td, uap->path, UIO_USERSPACE, PAIR32TO64(off_t, uap->length))); } int freebsd6_freebsd32_ftruncate(struct thread *td, struct freebsd6_freebsd32_ftruncate_args *uap) { return (kern_ftruncate(td, uap->fd, PAIR32TO64(off_t, uap->length))); } #endif /* COMPAT_FREEBSD6 */ struct sf_hdtr32 { uint32_t headers; int hdr_cnt; uint32_t trailers; int trl_cnt; }; static int freebsd32_do_sendfile(struct thread *td, struct freebsd32_sendfile_args *uap, int compat) { struct sf_hdtr32 hdtr32; struct sf_hdtr hdtr; struct uio *hdr_uio, *trl_uio; struct file *fp; cap_rights_t rights; struct iovec32 *iov32; off_t offset, sbytes; int error; offset = PAIR32TO64(off_t, uap->offset); if (offset < 0) return (EINVAL); hdr_uio = trl_uio = NULL; if (uap->hdtr != NULL) { error = copyin(uap->hdtr, &hdtr32, sizeof(hdtr32)); if (error) goto out; PTRIN_CP(hdtr32, hdtr, headers); CP(hdtr32, hdtr, hdr_cnt); PTRIN_CP(hdtr32, hdtr, trailers); CP(hdtr32, hdtr, trl_cnt); if (hdtr.headers != NULL) { iov32 = PTRIN(hdtr32.headers); error = freebsd32_copyinuio(iov32, hdtr32.hdr_cnt, &hdr_uio); if (error) goto out; #ifdef COMPAT_FREEBSD4 /* * In FreeBSD < 5.0 the nbytes to send also included * the header. If compat is specified subtract the * header size from nbytes. */ if (compat) { if (uap->nbytes > hdr_uio->uio_resid) uap->nbytes -= hdr_uio->uio_resid; else uap->nbytes = 0; } #endif } if (hdtr.trailers != NULL) { iov32 = PTRIN(hdtr32.trailers); error = freebsd32_copyinuio(iov32, hdtr32.trl_cnt, &trl_uio); if (error) goto out; } } AUDIT_ARG_FD(uap->fd); if ((error = fget_read(td, uap->fd, cap_rights_init_one(&rights, CAP_PREAD), &fp)) != 0) goto out; error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, offset, uap->nbytes, &sbytes, uap->flags, td); fdrop(fp, td); if (uap->sbytes != NULL) copyout(&sbytes, uap->sbytes, sizeof(off_t)); out: if (hdr_uio) free(hdr_uio, M_IOV); if (trl_uio) free(trl_uio, M_IOV); return (error); } #ifdef COMPAT_FREEBSD4 int freebsd4_freebsd32_sendfile(struct thread *td, struct freebsd4_freebsd32_sendfile_args *uap) { return (freebsd32_do_sendfile(td, (struct freebsd32_sendfile_args *)uap, 1)); } #endif int freebsd32_sendfile(struct thread *td, struct freebsd32_sendfile_args *uap) { return (freebsd32_do_sendfile(td, uap, 0)); } static void copy_stat(struct stat *in, struct stat32 *out) { CP(*in, *out, st_dev); CP(*in, *out, st_ino); CP(*in, *out, st_mode); CP(*in, *out, st_nlink); CP(*in, *out, st_uid); CP(*in, *out, st_gid); CP(*in, *out, st_rdev); TS_CP(*in, *out, st_atim); TS_CP(*in, *out, st_mtim); TS_CP(*in, *out, st_ctim); CP(*in, *out, st_size); CP(*in, *out, st_blocks); CP(*in, *out, st_blksize); CP(*in, *out, st_flags); CP(*in, *out, st_gen); TS_CP(*in, *out, st_birthtim); out->st_padding0 = 0; out->st_padding1 = 0; #ifdef __STAT32_TIME_T_EXT out->st_atim_ext = 0; out->st_mtim_ext = 0; out->st_ctim_ext = 0; out->st_btim_ext = 0; #endif bzero(out->st_spare, sizeof(out->st_spare)); } #ifdef COMPAT_43 static void copy_ostat(struct stat *in, struct ostat32 *out) { bzero(out, sizeof(*out)); CP(*in, *out, st_dev); CP(*in, *out, st_ino); CP(*in, *out, st_mode); CP(*in, *out, st_nlink); CP(*in, *out, st_uid); CP(*in, *out, st_gid); CP(*in, *out, st_rdev); out->st_size = MIN(in->st_size, INT32_MAX); TS_CP(*in, *out, st_atim); TS_CP(*in, *out, st_mtim); TS_CP(*in, *out, st_ctim); CP(*in, *out, st_blksize); CP(*in, *out, st_blocks); CP(*in, *out, st_flags); CP(*in, *out, st_gen); } #endif #ifdef COMPAT_43 int ofreebsd32_stat(struct thread *td, struct ofreebsd32_stat_args *uap) { struct stat sb; struct ostat32 sb32; int error; error = kern_statat(td, 0, AT_FDCWD, uap->path, UIO_USERSPACE, &sb, NULL); if (error) return (error); copy_ostat(&sb, &sb32); error = copyout(&sb32, uap->ub, sizeof (sb32)); return (error); } #endif int freebsd32_fstat(struct thread *td, struct freebsd32_fstat_args *uap) { struct stat ub; struct stat32 ub32; int error; error = kern_fstat(td, uap->fd, &ub); if (error) return (error); copy_stat(&ub, &ub32); error = copyout(&ub32, uap->ub, sizeof(ub32)); return (error); } #ifdef COMPAT_43 int ofreebsd32_fstat(struct thread *td, struct ofreebsd32_fstat_args *uap) { struct stat ub; struct ostat32 ub32; int error; error = kern_fstat(td, uap->fd, &ub); if (error) return (error); copy_ostat(&ub, &ub32); error = copyout(&ub32, uap->ub, sizeof(ub32)); return (error); } #endif int freebsd32_fstatat(struct thread *td, struct freebsd32_fstatat_args *uap) { struct stat ub; struct stat32 ub32; int error; error = kern_statat(td, uap->flag, uap->fd, uap->path, UIO_USERSPACE, &ub, NULL); if (error) return (error); copy_stat(&ub, &ub32); error = copyout(&ub32, uap->buf, sizeof(ub32)); return (error); } #ifdef COMPAT_43 int ofreebsd32_lstat(struct thread *td, struct ofreebsd32_lstat_args *uap) { struct stat sb; struct ostat32 sb32; int error; error = kern_statat(td, AT_SYMLINK_NOFOLLOW, AT_FDCWD, uap->path, UIO_USERSPACE, &sb, NULL); if (error) return (error); copy_ostat(&sb, &sb32); error = copyout(&sb32, uap->ub, sizeof (sb32)); return (error); } #endif int freebsd32_fhstat(struct thread *td, struct freebsd32_fhstat_args *uap) { struct stat sb; struct stat32 sb32; struct fhandle fh; int error; error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t)); if (error != 0) return (error); error = kern_fhstat(td, fh, &sb); if (error != 0) return (error); copy_stat(&sb, &sb32); error = copyout(&sb32, uap->sb, sizeof (sb32)); return (error); } #if defined(COMPAT_FREEBSD11) extern int ino64_trunc_error; static int freebsd11_cvtstat32(struct stat *in, struct freebsd11_stat32 *out) { CP(*in, *out, st_ino); if (in->st_ino != out->st_ino) { switch (ino64_trunc_error) { default: case 0: break; case 1: return (EOVERFLOW); case 2: out->st_ino = UINT32_MAX; break; } } CP(*in, *out, st_nlink); if (in->st_nlink != out->st_nlink) { switch (ino64_trunc_error) { default: case 0: break; case 1: return (EOVERFLOW); case 2: out->st_nlink = UINT16_MAX; break; } } out->st_dev = in->st_dev; if (out->st_dev != in->st_dev) { switch (ino64_trunc_error) { default: break; case 1: return (EOVERFLOW); } } CP(*in, *out, st_mode); CP(*in, *out, st_uid); CP(*in, *out, st_gid); out->st_rdev = in->st_rdev; if (out->st_rdev != in->st_rdev) { switch (ino64_trunc_error) { default: break; case 1: return (EOVERFLOW); } } TS_CP(*in, *out, st_atim); TS_CP(*in, *out, st_mtim); TS_CP(*in, *out, st_ctim); CP(*in, *out, st_size); CP(*in, *out, st_blocks); CP(*in, *out, st_blksize); CP(*in, *out, st_flags); CP(*in, *out, st_gen); TS_CP(*in, *out, st_birthtim); out->st_lspare = 0; bzero((char *)&out->st_birthtim + sizeof(out->st_birthtim), sizeof(*out) - offsetof(struct freebsd11_stat32, st_birthtim) - sizeof(out->st_birthtim)); return (0); } int freebsd11_freebsd32_stat(struct thread *td, struct freebsd11_freebsd32_stat_args *uap) { struct stat sb; struct freebsd11_stat32 sb32; int error; error = kern_statat(td, 0, AT_FDCWD, uap->path, UIO_USERSPACE, &sb, NULL); if (error != 0) return (error); error = freebsd11_cvtstat32(&sb, &sb32); if (error == 0) error = copyout(&sb32, uap->ub, sizeof (sb32)); return (error); } int freebsd11_freebsd32_fstat(struct thread *td, struct freebsd11_freebsd32_fstat_args *uap) { struct stat sb; struct freebsd11_stat32 sb32; int error; error = kern_fstat(td, uap->fd, &sb); if (error != 0) return (error); error = freebsd11_cvtstat32(&sb, &sb32); if (error == 0) error = copyout(&sb32, uap->ub, sizeof (sb32)); return (error); } int freebsd11_freebsd32_fstatat(struct thread *td, struct freebsd11_freebsd32_fstatat_args *uap) { struct stat sb; struct freebsd11_stat32 sb32; int error; error = kern_statat(td, uap->flag, uap->fd, uap->path, UIO_USERSPACE, &sb, NULL); if (error != 0) return (error); error = freebsd11_cvtstat32(&sb, &sb32); if (error == 0) error = copyout(&sb32, uap->buf, sizeof (sb32)); return (error); } int freebsd11_freebsd32_lstat(struct thread *td, struct freebsd11_freebsd32_lstat_args *uap) { struct stat sb; struct freebsd11_stat32 sb32; int error; error = kern_statat(td, AT_SYMLINK_NOFOLLOW, AT_FDCWD, uap->path, UIO_USERSPACE, &sb, NULL); if (error != 0) return (error); error = freebsd11_cvtstat32(&sb, &sb32); if (error == 0) error = copyout(&sb32, uap->ub, sizeof (sb32)); return (error); } int freebsd11_freebsd32_fhstat(struct thread *td, struct freebsd11_freebsd32_fhstat_args *uap) { struct stat sb; struct freebsd11_stat32 sb32; struct fhandle fh; int error; error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t)); if (error != 0) return (error); error = kern_fhstat(td, fh, &sb); if (error != 0) return (error); error = freebsd11_cvtstat32(&sb, &sb32); if (error == 0) error = copyout(&sb32, uap->sb, sizeof (sb32)); return (error); } #endif int freebsd32___sysctl(struct thread *td, struct freebsd32___sysctl_args *uap) { int error, name[CTL_MAXNAME]; size_t j, oldlen; uint32_t tmp; if (uap->namelen > CTL_MAXNAME || uap->namelen < 2) return (EINVAL); error = copyin(uap->name, name, uap->namelen * sizeof(int)); if (error) return (error); if (uap->oldlenp) { error = fueword32(uap->oldlenp, &tmp); oldlen = tmp; } else { oldlen = 0; } if (error != 0) return (EFAULT); error = userland_sysctl(td, name, uap->namelen, uap->old, &oldlen, 1, uap->new, uap->newlen, &j, SCTL_MASK32); if (error) return (error); if (uap->oldlenp) suword32(uap->oldlenp, j); return (0); } int freebsd32___sysctlbyname(struct thread *td, struct freebsd32___sysctlbyname_args *uap) { size_t oldlen, rv; int error; uint32_t tmp; if (uap->oldlenp != NULL) { error = fueword32(uap->oldlenp, &tmp); oldlen = tmp; } else { error = oldlen = 0; } if (error != 0) return (EFAULT); error = kern___sysctlbyname(td, uap->name, uap->namelen, uap->old, &oldlen, uap->new, uap->newlen, &rv, SCTL_MASK32, 1); if (error != 0) return (error); if (uap->oldlenp != NULL) error = suword32(uap->oldlenp, rv); return (error); } int freebsd32_jail(struct thread *td, struct freebsd32_jail_args *uap) { uint32_t version; int error; struct jail j; error = copyin(uap->jail, &version, sizeof(uint32_t)); if (error) return (error); switch (version) { case 0: { /* FreeBSD single IPv4 jails. */ struct jail32_v0 j32_v0; bzero(&j, sizeof(struct jail)); error = copyin(uap->jail, &j32_v0, sizeof(struct jail32_v0)); if (error) return (error); CP(j32_v0, j, version); PTRIN_CP(j32_v0, j, path); PTRIN_CP(j32_v0, j, hostname); j.ip4s = htonl(j32_v0.ip_number); /* jail_v0 is host order */ break; } case 1: /* * Version 1 was used by multi-IPv4 jail implementations * that never made it into the official kernel. */ return (EINVAL); case 2: /* JAIL_API_VERSION */ { /* FreeBSD multi-IPv4/IPv6,noIP jails. */ struct jail32 j32; error = copyin(uap->jail, &j32, sizeof(struct jail32)); if (error) return (error); CP(j32, j, version); PTRIN_CP(j32, j, path); PTRIN_CP(j32, j, hostname); PTRIN_CP(j32, j, jailname); CP(j32, j, ip4s); CP(j32, j, ip6s); PTRIN_CP(j32, j, ip4); PTRIN_CP(j32, j, ip6); break; } default: /* Sci-Fi jails are not supported, sorry. */ return (EINVAL); } return (kern_jail(td, &j)); } int freebsd32_jail_set(struct thread *td, struct freebsd32_jail_set_args *uap) { struct uio *auio; int error; /* Check that we have an even number of iovecs. */ if (uap->iovcnt & 1) return (EINVAL); error = freebsd32_copyinuio(uap->iovp, uap->iovcnt, &auio); if (error) return (error); error = kern_jail_set(td, auio, uap->flags); free(auio, M_IOV); return (error); } int freebsd32_jail_get(struct thread *td, struct freebsd32_jail_get_args *uap) { struct iovec32 iov32; struct uio *auio; int error, i; /* Check that we have an even number of iovecs. */ if (uap->iovcnt & 1) return (EINVAL); error = freebsd32_copyinuio(uap->iovp, uap->iovcnt, &auio); if (error) return (error); error = kern_jail_get(td, auio, uap->flags); if (error == 0) for (i = 0; i < uap->iovcnt; i++) { PTROUT_CP(auio->uio_iov[i], iov32, iov_base); CP(auio->uio_iov[i], iov32, iov_len); error = copyout(&iov32, uap->iovp + i, sizeof(iov32)); if (error != 0) break; } free(auio, M_IOV); return (error); } int freebsd32_sigaction(struct thread *td, struct freebsd32_sigaction_args *uap) { struct sigaction32 s32; struct sigaction sa, osa, *sap; int error; if (uap->act) { error = copyin(uap->act, &s32, sizeof(s32)); if (error) return (error); sa.sa_handler = PTRIN(s32.sa_u); CP(s32, sa, sa_flags); CP(s32, sa, sa_mask); sap = &sa; } else sap = NULL; error = kern_sigaction(td, uap->sig, sap, &osa, 0); if (error == 0 && uap->oact != NULL) { s32.sa_u = PTROUT(osa.sa_handler); CP(osa, s32, sa_flags); CP(osa, s32, sa_mask); error = copyout(&s32, uap->oact, sizeof(s32)); } return (error); } #ifdef COMPAT_FREEBSD4 int freebsd4_freebsd32_sigaction(struct thread *td, struct freebsd4_freebsd32_sigaction_args *uap) { struct sigaction32 s32; struct sigaction sa, osa, *sap; int error; if (uap->act) { error = copyin(uap->act, &s32, sizeof(s32)); if (error) return (error); sa.sa_handler = PTRIN(s32.sa_u); CP(s32, sa, sa_flags); CP(s32, sa, sa_mask); sap = &sa; } else sap = NULL; error = kern_sigaction(td, uap->sig, sap, &osa, KSA_FREEBSD4); if (error == 0 && uap->oact != NULL) { s32.sa_u = PTROUT(osa.sa_handler); CP(osa, s32, sa_flags); CP(osa, s32, sa_mask); error = copyout(&s32, uap->oact, sizeof(s32)); } return (error); } #endif #ifdef COMPAT_43 struct osigaction32 { u_int32_t sa_u; osigset_t sa_mask; int sa_flags; }; #define ONSIG 32 int ofreebsd32_sigaction(struct thread *td, struct ofreebsd32_sigaction_args *uap) { struct osigaction32 s32; struct sigaction sa, osa, *sap; int error; if (uap->signum <= 0 || uap->signum >= ONSIG) return (EINVAL); if (uap->nsa) { error = copyin(uap->nsa, &s32, sizeof(s32)); if (error) return (error); sa.sa_handler = PTRIN(s32.sa_u); CP(s32, sa, sa_flags); OSIG2SIG(s32.sa_mask, sa.sa_mask); sap = &sa; } else sap = NULL; error = kern_sigaction(td, uap->signum, sap, &osa, KSA_OSIGSET); if (error == 0 && uap->osa != NULL) { s32.sa_u = PTROUT(osa.sa_handler); CP(osa, s32, sa_flags); SIG2OSIG(osa.sa_mask, s32.sa_mask); error = copyout(&s32, uap->osa, sizeof(s32)); } return (error); } int ofreebsd32_sigprocmask(struct thread *td, struct ofreebsd32_sigprocmask_args *uap) { sigset_t set, oset; int error; OSIG2SIG(uap->mask, set); error = kern_sigprocmask(td, uap->how, &set, &oset, SIGPROCMASK_OLD); SIG2OSIG(oset, td->td_retval[0]); return (error); } int ofreebsd32_sigpending(struct thread *td, struct ofreebsd32_sigpending_args *uap) { struct proc *p = td->td_proc; sigset_t siglist; PROC_LOCK(p); siglist = p->p_siglist; SIGSETOR(siglist, td->td_siglist); PROC_UNLOCK(p); SIG2OSIG(siglist, td->td_retval[0]); return (0); } struct sigvec32 { u_int32_t sv_handler; int sv_mask; int sv_flags; }; int ofreebsd32_sigvec(struct thread *td, struct ofreebsd32_sigvec_args *uap) { struct sigvec32 vec; struct sigaction sa, osa, *sap; int error; if (uap->signum <= 0 || uap->signum >= ONSIG) return (EINVAL); if (uap->nsv) { error = copyin(uap->nsv, &vec, sizeof(vec)); if (error) return (error); sa.sa_handler = PTRIN(vec.sv_handler); OSIG2SIG(vec.sv_mask, sa.sa_mask); sa.sa_flags = vec.sv_flags; sa.sa_flags ^= SA_RESTART; sap = &sa; } else sap = NULL; error = kern_sigaction(td, uap->signum, sap, &osa, KSA_OSIGSET); if (error == 0 && uap->osv != NULL) { vec.sv_handler = PTROUT(osa.sa_handler); SIG2OSIG(osa.sa_mask, vec.sv_mask); vec.sv_flags = osa.sa_flags; vec.sv_flags &= ~SA_NOCLDWAIT; vec.sv_flags ^= SA_RESTART; error = copyout(&vec, uap->osv, sizeof(vec)); } return (error); } int ofreebsd32_sigblock(struct thread *td, struct ofreebsd32_sigblock_args *uap) { sigset_t set, oset; OSIG2SIG(uap->mask, set); kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0); SIG2OSIG(oset, td->td_retval[0]); return (0); } int ofreebsd32_sigsetmask(struct thread *td, struct ofreebsd32_sigsetmask_args *uap) { sigset_t set, oset; OSIG2SIG(uap->mask, set); kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0); SIG2OSIG(oset, td->td_retval[0]); return (0); } int ofreebsd32_sigsuspend(struct thread *td, struct ofreebsd32_sigsuspend_args *uap) { sigset_t mask; OSIG2SIG(uap->mask, mask); return (kern_sigsuspend(td, mask)); } struct sigstack32 { u_int32_t ss_sp; int ss_onstack; }; int ofreebsd32_sigstack(struct thread *td, struct ofreebsd32_sigstack_args *uap) { struct sigstack32 s32; struct sigstack nss, oss; int error = 0, unss; if (uap->nss != NULL) { error = copyin(uap->nss, &s32, sizeof(s32)); if (error) return (error); nss.ss_sp = PTRIN(s32.ss_sp); CP(s32, nss, ss_onstack); unss = 1; } else { unss = 0; } oss.ss_sp = td->td_sigstk.ss_sp; oss.ss_onstack = sigonstack(cpu_getstack(td)); if (unss) { td->td_sigstk.ss_sp = nss.ss_sp; td->td_sigstk.ss_size = 0; td->td_sigstk.ss_flags |= (nss.ss_onstack & SS_ONSTACK); td->td_pflags |= TDP_ALTSTACK; } if (uap->oss != NULL) { s32.ss_sp = PTROUT(oss.ss_sp); CP(oss, s32, ss_onstack); error = copyout(&s32, uap->oss, sizeof(s32)); } return (error); } #endif int freebsd32_nanosleep(struct thread *td, struct freebsd32_nanosleep_args *uap) { return (freebsd32_user_clock_nanosleep(td, CLOCK_REALTIME, TIMER_RELTIME, uap->rqtp, uap->rmtp)); } int freebsd32_clock_nanosleep(struct thread *td, struct freebsd32_clock_nanosleep_args *uap) { int error; error = freebsd32_user_clock_nanosleep(td, uap->clock_id, uap->flags, uap->rqtp, uap->rmtp); return (kern_posix_error(td, error)); } static int freebsd32_user_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags, const struct timespec32 *ua_rqtp, struct timespec32 *ua_rmtp) { struct timespec32 rmt32, rqt32; struct timespec rmt, rqt; int error, error2; error = copyin(ua_rqtp, &rqt32, sizeof(rqt32)); if (error) return (error); CP(rqt32, rqt, tv_sec); CP(rqt32, rqt, tv_nsec); error = kern_clock_nanosleep(td, clock_id, flags, &rqt, &rmt); if (error == EINTR && ua_rmtp != NULL && (flags & TIMER_ABSTIME) == 0) { CP(rmt, rmt32, tv_sec); CP(rmt, rmt32, tv_nsec); error2 = copyout(&rmt32, ua_rmtp, sizeof(rmt32)); if (error2 != 0) error = error2; } return (error); } int freebsd32_clock_gettime(struct thread *td, struct freebsd32_clock_gettime_args *uap) { struct timespec ats; struct timespec32 ats32; int error; error = kern_clock_gettime(td, uap->clock_id, &ats); if (error == 0) { CP(ats, ats32, tv_sec); CP(ats, ats32, tv_nsec); error = copyout(&ats32, uap->tp, sizeof(ats32)); } return (error); } int freebsd32_clock_settime(struct thread *td, struct freebsd32_clock_settime_args *uap) { struct timespec ats; struct timespec32 ats32; int error; error = copyin(uap->tp, &ats32, sizeof(ats32)); if (error) return (error); CP(ats32, ats, tv_sec); CP(ats32, ats, tv_nsec); return (kern_clock_settime(td, uap->clock_id, &ats)); } int freebsd32_clock_getres(struct thread *td, struct freebsd32_clock_getres_args *uap) { struct timespec ts; struct timespec32 ts32; int error; if (uap->tp == NULL) return (0); error = kern_clock_getres(td, uap->clock_id, &ts); if (error == 0) { CP(ts, ts32, tv_sec); CP(ts, ts32, tv_nsec); error = copyout(&ts32, uap->tp, sizeof(ts32)); } return (error); } int freebsd32_ktimer_create(struct thread *td, struct freebsd32_ktimer_create_args *uap) { struct sigevent32 ev32; struct sigevent ev, *evp; int error, id; if (uap->evp == NULL) { evp = NULL; } else { evp = &ev; error = copyin(uap->evp, &ev32, sizeof(ev32)); if (error != 0) return (error); error = convert_sigevent32(&ev32, &ev); if (error != 0) return (error); } error = kern_ktimer_create(td, uap->clock_id, evp, &id, -1); if (error == 0) { error = copyout(&id, uap->timerid, sizeof(int)); if (error != 0) kern_ktimer_delete(td, id); } return (error); } int freebsd32_ktimer_settime(struct thread *td, struct freebsd32_ktimer_settime_args *uap) { struct itimerspec32 val32, oval32; struct itimerspec val, oval, *ovalp; int error; error = copyin(uap->value, &val32, sizeof(val32)); if (error != 0) return (error); ITS_CP(val32, val); ovalp = uap->ovalue != NULL ? &oval : NULL; error = kern_ktimer_settime(td, uap->timerid, uap->flags, &val, ovalp); if (error == 0 && uap->ovalue != NULL) { ITS_CP(oval, oval32); error = copyout(&oval32, uap->ovalue, sizeof(oval32)); } return (error); } int freebsd32_ktimer_gettime(struct thread *td, struct freebsd32_ktimer_gettime_args *uap) { struct itimerspec32 val32; struct itimerspec val; int error; error = kern_ktimer_gettime(td, uap->timerid, &val); if (error == 0) { ITS_CP(val, val32); error = copyout(&val32, uap->value, sizeof(val32)); } return (error); } int freebsd32_clock_getcpuclockid2(struct thread *td, struct freebsd32_clock_getcpuclockid2_args *uap) { clockid_t clk_id; int error; error = kern_clock_getcpuclockid2(td, PAIR32TO64(id_t, uap->id), uap->which, &clk_id); if (error == 0) error = copyout(&clk_id, uap->clock_id, sizeof(clockid_t)); return (error); } int freebsd32_thr_new(struct thread *td, struct freebsd32_thr_new_args *uap) { struct thr_param32 param32; struct thr_param param; int error; if (uap->param_size < 0 || uap->param_size > sizeof(struct thr_param32)) return (EINVAL); bzero(¶m, sizeof(struct thr_param)); bzero(¶m32, sizeof(struct thr_param32)); error = copyin(uap->param, ¶m32, uap->param_size); if (error != 0) return (error); param.start_func = PTRIN(param32.start_func); param.arg = PTRIN(param32.arg); param.stack_base = PTRIN(param32.stack_base); param.stack_size = param32.stack_size; param.tls_base = PTRIN(param32.tls_base); param.tls_size = param32.tls_size; param.child_tid = PTRIN(param32.child_tid); param.parent_tid = PTRIN(param32.parent_tid); param.flags = param32.flags; param.rtp = PTRIN(param32.rtp); param.spare[0] = PTRIN(param32.spare[0]); param.spare[1] = PTRIN(param32.spare[1]); param.spare[2] = PTRIN(param32.spare[2]); return (kern_thr_new(td, ¶m)); } int freebsd32_thr_suspend(struct thread *td, struct freebsd32_thr_suspend_args *uap) { struct timespec32 ts32; struct timespec ts, *tsp; int error; error = 0; tsp = NULL; if (uap->timeout != NULL) { error = copyin((const void *)uap->timeout, (void *)&ts32, sizeof(struct timespec32)); if (error != 0) return (error); ts.tv_sec = ts32.tv_sec; ts.tv_nsec = ts32.tv_nsec; tsp = &ts; } return (kern_thr_suspend(td, tsp)); } void siginfo_to_siginfo32(const siginfo_t *src, struct siginfo32 *dst) { bzero(dst, sizeof(*dst)); dst->si_signo = src->si_signo; dst->si_errno = src->si_errno; dst->si_code = src->si_code; dst->si_pid = src->si_pid; dst->si_uid = src->si_uid; dst->si_status = src->si_status; dst->si_addr = (uintptr_t)src->si_addr; dst->si_value.sival_int = src->si_value.sival_int; dst->si_timerid = src->si_timerid; dst->si_overrun = src->si_overrun; } #ifndef _FREEBSD32_SYSPROTO_H_ struct freebsd32_sigqueue_args { pid_t pid; int signum; /* union sigval32 */ int value; }; #endif int freebsd32_sigqueue(struct thread *td, struct freebsd32_sigqueue_args *uap) { union sigval sv; /* * On 32-bit ABIs, sival_int and sival_ptr are the same. * On 64-bit little-endian ABIs, the low bits are the same. * In 64-bit big-endian ABIs, sival_int overlaps with * sival_ptr's HIGH bits. We choose to support sival_int * rather than sival_ptr in this case as it seems to be * more common. */ bzero(&sv, sizeof(sv)); sv.sival_int = uap->value; return (kern_sigqueue(td, uap->pid, uap->signum, &sv)); } int freebsd32_sigtimedwait(struct thread *td, struct freebsd32_sigtimedwait_args *uap) { struct timespec32 ts32; struct timespec ts; struct timespec *timeout; sigset_t set; ksiginfo_t ksi; struct siginfo32 si32; int error; if (uap->timeout) { error = copyin(uap->timeout, &ts32, sizeof(ts32)); if (error) return (error); ts.tv_sec = ts32.tv_sec; ts.tv_nsec = ts32.tv_nsec; timeout = &ts; } else timeout = NULL; error = copyin(uap->set, &set, sizeof(set)); if (error) return (error); error = kern_sigtimedwait(td, set, &ksi, timeout); if (error) return (error); if (uap->info) { siginfo_to_siginfo32(&ksi.ksi_info, &si32); error = copyout(&si32, uap->info, sizeof(struct siginfo32)); } if (error == 0) td->td_retval[0] = ksi.ksi_signo; return (error); } /* * MPSAFE */ int freebsd32_sigwaitinfo(struct thread *td, struct freebsd32_sigwaitinfo_args *uap) { ksiginfo_t ksi; struct siginfo32 si32; sigset_t set; int error; error = copyin(uap->set, &set, sizeof(set)); if (error) return (error); error = kern_sigtimedwait(td, set, &ksi, NULL); if (error) return (error); if (uap->info) { siginfo_to_siginfo32(&ksi.ksi_info, &si32); error = copyout(&si32, uap->info, sizeof(struct siginfo32)); } if (error == 0) td->td_retval[0] = ksi.ksi_signo; return (error); } int freebsd32_cpuset_setid(struct thread *td, struct freebsd32_cpuset_setid_args *uap) { return (kern_cpuset_setid(td, uap->which, PAIR32TO64(id_t, uap->id), uap->setid)); } int freebsd32_cpuset_getid(struct thread *td, struct freebsd32_cpuset_getid_args *uap) { return (kern_cpuset_getid(td, uap->level, uap->which, PAIR32TO64(id_t, uap->id), uap->setid)); } int freebsd32_cpuset_getaffinity(struct thread *td, struct freebsd32_cpuset_getaffinity_args *uap) { return (kern_cpuset_getaffinity(td, uap->level, uap->which, PAIR32TO64(id_t,uap->id), uap->cpusetsize, uap->mask)); } int freebsd32_cpuset_setaffinity(struct thread *td, struct freebsd32_cpuset_setaffinity_args *uap) { return (kern_cpuset_setaffinity(td, uap->level, uap->which, PAIR32TO64(id_t,uap->id), uap->cpusetsize, uap->mask)); } int freebsd32_cpuset_getdomain(struct thread *td, struct freebsd32_cpuset_getdomain_args *uap) { return (kern_cpuset_getdomain(td, uap->level, uap->which, PAIR32TO64(id_t,uap->id), uap->domainsetsize, uap->mask, uap->policy)); } int freebsd32_cpuset_setdomain(struct thread *td, struct freebsd32_cpuset_setdomain_args *uap) { return (kern_cpuset_setdomain(td, uap->level, uap->which, PAIR32TO64(id_t,uap->id), uap->domainsetsize, uap->mask, uap->policy)); } int freebsd32_nmount(struct thread *td, struct freebsd32_nmount_args /* { struct iovec *iovp; unsigned int iovcnt; int flags; } */ *uap) { struct uio *auio; uint64_t flags; int error; /* * Mount flags are now 64-bits. On 32-bit archtectures only * 32-bits are passed in, but from here on everything handles * 64-bit flags correctly. */ flags = uap->flags; AUDIT_ARG_FFLAGS(flags); /* * Filter out MNT_ROOTFS. We do not want clients of nmount() in * userspace to set this flag, but we must filter it out if we want * MNT_UPDATE on the root file system to work. * MNT_ROOTFS should only be set by the kernel when mounting its * root file system. */ flags &= ~MNT_ROOTFS; /* * check that we have an even number of iovec's * and that we have at least two options. */ if ((uap->iovcnt & 1) || (uap->iovcnt < 4)) return (EINVAL); error = freebsd32_copyinuio(uap->iovp, uap->iovcnt, &auio); if (error) return (error); error = vfs_donmount(td, flags, auio); free(auio, M_IOV); return error; } #if 0 int freebsd32_xxx(struct thread *td, struct freebsd32_xxx_args *uap) { struct yyy32 *p32, s32; struct yyy *p = NULL, s; struct xxx_arg ap; int error; if (uap->zzz) { error = copyin(uap->zzz, &s32, sizeof(s32)); if (error) return (error); /* translate in */ p = &s; } error = kern_xxx(td, p); if (error) return (error); if (uap->zzz) { /* translate out */ error = copyout(&s32, p32, sizeof(s32)); } return (error); } #endif int syscall32_module_handler(struct module *mod, int what, void *arg) { return (kern_syscall_module_handler(freebsd32_sysent, mod, what, arg)); } int syscall32_helper_register(struct syscall_helper_data *sd, int flags) { return (kern_syscall_helper_register(freebsd32_sysent, sd, flags)); } int syscall32_helper_unregister(struct syscall_helper_data *sd) { return (kern_syscall_helper_unregister(freebsd32_sysent, sd)); } int freebsd32_copyout_strings(struct image_params *imgp, uintptr_t *stack_base) { int argc, envc, i; u_int32_t *vectp; char *stringp; uintptr_t destp, ustringp; struct freebsd32_ps_strings *arginfo; char canary[sizeof(long) * 8]; int32_t pagesizes32[MAXPAGESIZES]; size_t execpath_len; int error, szsigcode; /* * Calculate string base and vector table pointers. * Also deal with signal trampoline code for this exec type. */ if (imgp->execpath != NULL && imgp->auxargs != NULL) execpath_len = strlen(imgp->execpath) + 1; else execpath_len = 0; arginfo = (struct freebsd32_ps_strings *)curproc->p_sysent-> sv_psstrings; imgp->ps_strings = arginfo; if (imgp->proc->p_sysent->sv_sigcode_base == 0) szsigcode = *(imgp->proc->p_sysent->sv_szsigcode); else szsigcode = 0; destp = (uintptr_t)arginfo; /* * install sigcode */ if (szsigcode != 0) { destp -= szsigcode; destp = rounddown2(destp, sizeof(uint32_t)); error = copyout(imgp->proc->p_sysent->sv_sigcode, (void *)destp, szsigcode); if (error != 0) return (error); } /* * Copy the image path for the rtld. */ if (execpath_len != 0) { destp -= execpath_len; imgp->execpathp = (void *)destp; error = copyout(imgp->execpath, imgp->execpathp, execpath_len); if (error != 0) return (error); } /* * Prepare the canary for SSP. */ arc4rand(canary, sizeof(canary), 0); destp -= sizeof(canary); imgp->canary = (void *)destp; error = copyout(canary, imgp->canary, sizeof(canary)); if (error != 0) return (error); imgp->canarylen = sizeof(canary); /* * Prepare the pagesizes array. */ for (i = 0; i < MAXPAGESIZES; i++) pagesizes32[i] = (uint32_t)pagesizes[i]; destp -= sizeof(pagesizes32); destp = rounddown2(destp, sizeof(uint32_t)); imgp->pagesizes = (void *)destp; error = copyout(pagesizes32, imgp->pagesizes, sizeof(pagesizes32)); if (error != 0) return (error); imgp->pagesizeslen = sizeof(pagesizes32); /* * Allocate room for the argument and environment strings. */ destp -= ARG_MAX - imgp->args->stringspace; destp = rounddown2(destp, sizeof(uint32_t)); ustringp = destp; exec_stackgap(imgp, &destp); if (imgp->auxargs) { /* * Allocate room on the stack for the ELF auxargs * array. It has up to AT_COUNT entries. */ destp -= AT_COUNT * sizeof(Elf32_Auxinfo); destp = rounddown2(destp, sizeof(uint32_t)); } vectp = (uint32_t *)destp; /* * Allocate room for the argv[] and env vectors including the * terminating NULL pointers. */ vectp -= imgp->args->argc + 1 + imgp->args->envc + 1; /* * vectp also becomes our initial stack base */ *stack_base = (uintptr_t)vectp; stringp = imgp->args->begin_argv; argc = imgp->args->argc; envc = imgp->args->envc; /* * Copy out strings - arguments and environment. */ error = copyout(stringp, (void *)ustringp, ARG_MAX - imgp->args->stringspace); if (error != 0) return (error); /* * Fill in "ps_strings" struct for ps, w, etc. */ imgp->argv = vectp; if (suword32(&arginfo->ps_argvstr, (u_int32_t)(intptr_t)vectp) != 0 || suword32(&arginfo->ps_nargvstr, argc) != 0) return (EFAULT); /* * Fill in argument portion of vector table. */ for (; argc > 0; --argc) { if (suword32(vectp++, ustringp) != 0) return (EFAULT); while (*stringp++ != 0) ustringp++; ustringp++; } /* a null vector table pointer separates the argp's from the envp's */ if (suword32(vectp++, 0) != 0) return (EFAULT); imgp->envv = vectp; if (suword32(&arginfo->ps_envstr, (u_int32_t)(intptr_t)vectp) != 0 || suword32(&arginfo->ps_nenvstr, envc) != 0) return (EFAULT); /* * Fill in environment portion of vector table. */ for (; envc > 0; --envc) { if (suword32(vectp++, ustringp) != 0) return (EFAULT); while (*stringp++ != 0) ustringp++; ustringp++; } /* end of vector table is a null pointer */ if (suword32(vectp, 0) != 0) return (EFAULT); if (imgp->auxargs) { vectp++; error = imgp->sysent->sv_copyout_auxargs(imgp, (uintptr_t)vectp); if (error != 0) return (error); } return (0); } int freebsd32_kldstat(struct thread *td, struct freebsd32_kldstat_args *uap) { struct kld_file_stat *stat; struct kld32_file_stat *stat32; int error, version; if ((error = copyin(&uap->stat->version, &version, sizeof(version))) != 0) return (error); if (version != sizeof(struct kld32_file_stat_1) && version != sizeof(struct kld32_file_stat)) return (EINVAL); stat = malloc(sizeof(*stat), M_TEMP, M_WAITOK | M_ZERO); stat32 = malloc(sizeof(*stat32), M_TEMP, M_WAITOK | M_ZERO); error = kern_kldstat(td, uap->fileid, stat); if (error == 0) { bcopy(&stat->name[0], &stat32->name[0], sizeof(stat->name)); CP(*stat, *stat32, refs); CP(*stat, *stat32, id); PTROUT_CP(*stat, *stat32, address); CP(*stat, *stat32, size); bcopy(&stat->pathname[0], &stat32->pathname[0], sizeof(stat->pathname)); stat32->version = version; error = copyout(stat32, uap->stat, version); } free(stat, M_TEMP); free(stat32, M_TEMP); return (error); } int freebsd32_posix_fallocate(struct thread *td, struct freebsd32_posix_fallocate_args *uap) { int error; error = kern_posix_fallocate(td, uap->fd, PAIR32TO64(off_t, uap->offset), PAIR32TO64(off_t, uap->len)); return (kern_posix_error(td, error)); } int freebsd32_posix_fadvise(struct thread *td, struct freebsd32_posix_fadvise_args *uap) { int error; error = kern_posix_fadvise(td, uap->fd, PAIR32TO64(off_t, uap->offset), PAIR32TO64(off_t, uap->len), uap->advice); return (kern_posix_error(td, error)); } int convert_sigevent32(struct sigevent32 *sig32, struct sigevent *sig) { CP(*sig32, *sig, sigev_notify); switch (sig->sigev_notify) { case SIGEV_NONE: break; case SIGEV_THREAD_ID: CP(*sig32, *sig, sigev_notify_thread_id); /* FALLTHROUGH */ case SIGEV_SIGNAL: CP(*sig32, *sig, sigev_signo); PTRIN_CP(*sig32, *sig, sigev_value.sival_ptr); break; case SIGEV_KEVENT: CP(*sig32, *sig, sigev_notify_kqueue); CP(*sig32, *sig, sigev_notify_kevent_flags); PTRIN_CP(*sig32, *sig, sigev_value.sival_ptr); break; default: return (EINVAL); } return (0); } int freebsd32_procctl(struct thread *td, struct freebsd32_procctl_args *uap) { void *data; union { struct procctl_reaper_status rs; struct procctl_reaper_pids rp; struct procctl_reaper_kill rk; } x; union { struct procctl_reaper_pids32 rp; } x32; int error, error1, flags, signum; if (uap->com >= PROC_PROCCTL_MD_MIN) return (cpu_procctl(td, uap->idtype, PAIR32TO64(id_t, uap->id), uap->com, PTRIN(uap->data))); switch (uap->com) { case PROC_ASLR_CTL: case PROC_PROTMAX_CTL: case PROC_SPROTECT: case PROC_STACKGAP_CTL: case PROC_TRACE_CTL: case PROC_TRAPCAP_CTL: case PROC_NO_NEW_PRIVS_CTL: + case PROC_WXMAP_CTL: error = copyin(PTRIN(uap->data), &flags, sizeof(flags)); if (error != 0) return (error); data = &flags; break; case PROC_REAP_ACQUIRE: case PROC_REAP_RELEASE: if (uap->data != NULL) return (EINVAL); data = NULL; break; case PROC_REAP_STATUS: data = &x.rs; break; case PROC_REAP_GETPIDS: error = copyin(uap->data, &x32.rp, sizeof(x32.rp)); if (error != 0) return (error); CP(x32.rp, x.rp, rp_count); PTRIN_CP(x32.rp, x.rp, rp_pids); data = &x.rp; break; case PROC_REAP_KILL: error = copyin(uap->data, &x.rk, sizeof(x.rk)); if (error != 0) return (error); data = &x.rk; break; case PROC_ASLR_STATUS: case PROC_PROTMAX_STATUS: case PROC_STACKGAP_STATUS: case PROC_TRACE_STATUS: case PROC_TRAPCAP_STATUS: case PROC_NO_NEW_PRIVS_STATUS: + case PROC_WXMAP_STATUS: data = &flags; break; case PROC_PDEATHSIG_CTL: error = copyin(uap->data, &signum, sizeof(signum)); if (error != 0) return (error); data = &signum; break; case PROC_PDEATHSIG_STATUS: data = &signum; break; default: return (EINVAL); } error = kern_procctl(td, uap->idtype, PAIR32TO64(id_t, uap->id), uap->com, data); switch (uap->com) { case PROC_REAP_STATUS: if (error == 0) error = copyout(&x.rs, uap->data, sizeof(x.rs)); break; case PROC_REAP_KILL: error1 = copyout(&x.rk, uap->data, sizeof(x.rk)); if (error == 0) error = error1; break; case PROC_ASLR_STATUS: case PROC_PROTMAX_STATUS: case PROC_STACKGAP_STATUS: case PROC_TRACE_STATUS: case PROC_TRAPCAP_STATUS: case PROC_NO_NEW_PRIVS_STATUS: + case PROC_WXMAP_STATUS: if (error == 0) error = copyout(&flags, uap->data, sizeof(flags)); break; case PROC_PDEATHSIG_STATUS: if (error == 0) error = copyout(&signum, uap->data, sizeof(signum)); break; } return (error); } int freebsd32_fcntl(struct thread *td, struct freebsd32_fcntl_args *uap) { long tmp; switch (uap->cmd) { /* * Do unsigned conversion for arg when operation * interprets it as flags or pointer. */ case F_SETLK_REMOTE: case F_SETLKW: case F_SETLK: case F_GETLK: case F_SETFD: case F_SETFL: case F_OGETLK: case F_OSETLK: case F_OSETLKW: tmp = (unsigned int)(uap->arg); break; default: tmp = uap->arg; break; } return (kern_fcntl_freebsd(td, uap->fd, uap->cmd, tmp)); } int freebsd32_ppoll(struct thread *td, struct freebsd32_ppoll_args *uap) { struct timespec32 ts32; struct timespec ts, *tsp; sigset_t set, *ssp; int error; if (uap->ts != NULL) { error = copyin(uap->ts, &ts32, sizeof(ts32)); if (error != 0) return (error); CP(ts32, ts, tv_sec); CP(ts32, ts, tv_nsec); tsp = &ts; } else tsp = NULL; if (uap->set != NULL) { error = copyin(uap->set, &set, sizeof(set)); if (error != 0) return (error); ssp = &set; } else ssp = NULL; return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp)); } int freebsd32_sched_rr_get_interval(struct thread *td, struct freebsd32_sched_rr_get_interval_args *uap) { struct timespec ts; struct timespec32 ts32; int error; error = kern_sched_rr_get_interval(td, uap->pid, &ts); if (error == 0) { CP(ts, ts32, tv_sec); CP(ts, ts32, tv_nsec); error = copyout(&ts32, uap->interval, sizeof(ts32)); } return (error); } static void timex_to_32(struct timex32 *dst, struct timex *src) { CP(*src, *dst, modes); CP(*src, *dst, offset); CP(*src, *dst, freq); CP(*src, *dst, maxerror); CP(*src, *dst, esterror); CP(*src, *dst, status); CP(*src, *dst, constant); CP(*src, *dst, precision); CP(*src, *dst, tolerance); CP(*src, *dst, ppsfreq); CP(*src, *dst, jitter); CP(*src, *dst, shift); CP(*src, *dst, stabil); CP(*src, *dst, jitcnt); CP(*src, *dst, calcnt); CP(*src, *dst, errcnt); CP(*src, *dst, stbcnt); } static void timex_from_32(struct timex *dst, struct timex32 *src) { CP(*src, *dst, modes); CP(*src, *dst, offset); CP(*src, *dst, freq); CP(*src, *dst, maxerror); CP(*src, *dst, esterror); CP(*src, *dst, status); CP(*src, *dst, constant); CP(*src, *dst, precision); CP(*src, *dst, tolerance); CP(*src, *dst, ppsfreq); CP(*src, *dst, jitter); CP(*src, *dst, shift); CP(*src, *dst, stabil); CP(*src, *dst, jitcnt); CP(*src, *dst, calcnt); CP(*src, *dst, errcnt); CP(*src, *dst, stbcnt); } int freebsd32_ntp_adjtime(struct thread *td, struct freebsd32_ntp_adjtime_args *uap) { struct timex tx; struct timex32 tx32; int error, retval; error = copyin(uap->tp, &tx32, sizeof(tx32)); if (error == 0) { timex_from_32(&tx, &tx32); error = kern_ntp_adjtime(td, &tx, &retval); if (error == 0) { timex_to_32(&tx32, &tx); error = copyout(&tx32, uap->tp, sizeof(tx32)); if (error == 0) td->td_retval[0] = retval; } } return (error); } int freebsd32_fspacectl(struct thread *td, struct freebsd32_fspacectl_args *uap) { struct spacectl_range rqsr, rmsr; struct spacectl_range32 rqsr32, rmsr32; int error, cerror; error = copyin(uap->rqsr, &rqsr32, sizeof(rqsr32)); if (error != 0) return (error); rqsr.r_offset = PAIR32TO64(off_t, rqsr32.r_offset); rqsr.r_len = PAIR32TO64(off_t, rqsr32.r_len); error = kern_fspacectl(td, uap->fd, uap->cmd, &rqsr, uap->flags, &rmsr); if (uap->rmsr != NULL) { #if BYTE_ORDER == LITTLE_ENDIAN rmsr32.r_offset1 = rmsr.r_offset; rmsr32.r_offset2 = rmsr.r_offset >> 32; rmsr32.r_len1 = rmsr.r_len; rmsr32.r_len2 = rmsr.r_len >> 32; #else rmsr32.r_offset1 = rmsr.r_offset >> 32; rmsr32.r_offset2 = rmsr.r_offset; rmsr32.r_len1 = rmsr.r_len >> 32; rmsr32.r_len2 = rmsr.r_len; #endif cerror = copyout(&rmsr32, uap->rmsr, sizeof(rmsr32)); if (error == 0) error = cerror; } return (error); } diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index 5b888766daea..ef1edfcabaf0 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -1,2700 +1,2703 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2017 Dell EMC * Copyright (c) 2000-2001, 2003 David O'Brien * Copyright (c) 1995-1996 Søren Schmidt * Copyright (c) 1996 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ELF_NOTE_ROUNDSIZE 4 #define OLD_EI_BRAND 8 static int __elfN(check_header)(const Elf_Ehdr *hdr); static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int32_t *osrel, uint32_t *fctl0); static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry); static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot); static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel); static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); static boolean_t __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote, int32_t *osrel, boolean_t *has_fctl0, uint32_t *fctl0); static vm_prot_t __elfN(trans_prot)(Elf_Word); static Elf_Word __elfN(untrans_prot)(vm_prot_t); SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW | CTLFLAG_MPSAFE, 0, ""); int __elfN(fallback_brand) = -1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); static int elf_legacy_coredump = 0; SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, &elf_legacy_coredump, 0, "include all and only RW pages in core dumps"); int __elfN(nxstack) = #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \ (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \ defined(__riscv) 1; #else 0; #endif SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, nxstack, CTLFLAG_RW, &__elfN(nxstack), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack"); #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) int i386_read_exec = 0; SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, "enable execution from readable segments"); #endif static u_long __elfN(pie_base) = ET_DYN_LOAD_ADDR; static int sysctl_pie_base(SYSCTL_HANDLER_ARGS) { u_long val; int error; val = __elfN(pie_base); error = sysctl_handle_long(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if ((val & PAGE_MASK) != 0) return (EINVAL); __elfN(pie_base) = val; return (0); } SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base, CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_pie_base, "LU", "PIE load base without randomization"); SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, ""); #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr) static int __elfN(aslr_enabled) = 0; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable address map randomization"); static int __elfN(pie_aslr_enabled) = 0; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN, &__elfN(pie_aslr_enabled), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable address map randomization for PIE binaries"); static int __elfN(aslr_honor_sbrk) = 1; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW, &__elfN(aslr_honor_sbrk), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); static int __elfN(aslr_stack_gap) = 3; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack_gap, CTLFLAG_RW, &__elfN(aslr_stack_gap), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": maximum percentage of main stack to waste on a random gap"); static int __elfN(sigfastblock) = 1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock, CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, "enable sigfastblock for new processes"); static bool __elfN(allow_wx) = true; SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, CTLFLAG_RWTUN, &__elfN(allow_wx), 0, "Allow pages to be mapped simultaneously writable and executable"); static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) Elf_Brandnote __elfN(freebsd_brandnote) = { .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), .hdr.n_descsz = sizeof(int32_t), .hdr.n_type = NT_FREEBSD_ABI_TAG, .vendor = FREEBSD_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = __elfN(freebsd_trans_osrel) }; static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) { uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); *osrel = *(const int32_t *)(p); return (true); } static const char GNU_ABI_VENDOR[] = "GNU"; static int GNU_KFREEBSD_ABI_DESC = 3; Elf_Brandnote __elfN(kfreebsd_brandnote) = { .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), .hdr.n_descsz = 16, /* XXX at least 16 */ .hdr.n_type = 1, .vendor = GNU_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = kfreebsd_trans_osrel }; static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) { const Elf32_Word *desc; uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); desc = (const Elf32_Word *)p; if (desc[0] != GNU_KFREEBSD_ABI_DESC) return (false); /* * Debian GNU/kFreeBSD embed the earliest compatible kernel version * (__FreeBSD_version: Rxx) in the LSB way. */ *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; return (true); } int __elfN(insert_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == NULL) { elf_brand_list[i] = entry; break; } } if (i == MAX_BRANDS) { printf("WARNING: %s: could not insert brandinfo entry: %p\n", __func__, entry); return (-1); } return (0); } int __elfN(remove_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == entry) { elf_brand_list[i] = NULL; break; } } if (i == MAX_BRANDS) return (-1); return (0); } int __elfN(brand_inuse)(Elf_Brandinfo *entry) { struct proc *p; int rval = FALSE; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { if (p->p_sysent == entry->sysvec) { rval = TRUE; break; } } sx_sunlock(&allproc_lock); return (rval); } static Elf_Brandinfo * __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int32_t *osrel, uint32_t *fctl0) { const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; Elf_Brandinfo *bi, *bi_m; boolean_t ret, has_fctl0; int i, interp_name_len; interp_name_len = interp != NULL ? strlen(interp) + 1 : 0; /* * We support four types of branding -- (1) the ELF EI_OSABI field * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string * branding w/in the ELF header, (3) path of the `interp_path' * field, and (4) the ".note.ABI-tag" ELF section. */ /* Look for an ".note.ABI-tag" ELF section */ bi_m = NULL; for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL) continue; if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0) continue; if (hdr->e_machine == bi->machine && (bi->flags & (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { has_fctl0 = false; *fctl0 = 0; *osrel = 0; ret = __elfN(check_note)(imgp, bi->brand_note, osrel, &has_fctl0, fctl0); /* Give brand a chance to veto check_note's guess */ if (ret && bi->header_supported) { ret = bi->header_supported(imgp, osrel, has_fctl0 ? fctl0 : NULL); } /* * If note checker claimed the binary, but the * interpreter path in the image does not * match default one for the brand, try to * search for other brands with the same * interpreter. Either there is better brand * with the right interpreter, or, failing * this, we return first brand which accepted * our note and, optionally, header. */ if (ret && bi_m == NULL && interp != NULL && (bi->interp_path == NULL || (strlen(bi->interp_path) + 1 != interp_name_len || strncmp(interp, bi->interp_path, interp_name_len) != 0))) { bi_m = bi; ret = 0; } if (ret) return (bi); } } if (bi_m != NULL) return (bi_m); /* If the executable has a brand, search for it in the brand list. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) continue; if (hdr->e_machine == bi->machine && (hdr->e_ident[EI_OSABI] == bi->brand || (bi->compat_3_brand != NULL && strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND], bi->compat_3_brand) == 0))) { /* Looks good, but give brand a chance to veto */ if (bi->header_supported == NULL || bi->header_supported(imgp, NULL, NULL)) { /* * Again, prefer strictly matching * interpreter path. */ if (interp_name_len == 0 && bi->interp_path == NULL) return (bi); if (bi->interp_path != NULL && strlen(bi->interp_path) + 1 == interp_name_len && strncmp(interp, bi->interp_path, interp_name_len) == 0) return (bi); if (bi_m == NULL) bi_m = bi; } } } if (bi_m != NULL) return (bi_m); /* No known brand, see if the header is recognized by any brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY || bi->header_supported == NULL) continue; if (hdr->e_machine == bi->machine) { ret = bi->header_supported(imgp, NULL, NULL); if (ret) return (bi); } } /* Lacking a known brand, search for a recognized interpreter. */ if (interp != NULL) { for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || (bi->flags & (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC)) != 0) continue; if (hdr->e_machine == bi->machine && bi->interp_path != NULL && /* ELF image p_filesz includes terminating zero */ strlen(bi->interp_path) + 1 == interp_name_len && strncmp(interp, bi->interp_path, interp_name_len) == 0 && (bi->header_supported == NULL || bi->header_supported(imgp, NULL, NULL))) return (bi); } } /* Lacking a recognized interpreter, try the default brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) continue; if (hdr->e_machine == bi->machine && __elfN(fallback_brand) == bi->brand && (bi->header_supported == NULL || bi->header_supported(imgp, NULL, NULL))) return (bi); } return (NULL); } static bool __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr) { return (hdr->e_phoff <= PAGE_SIZE && (u_int)hdr->e_phentsize * hdr->e_phnum <= PAGE_SIZE - hdr->e_phoff); } static int __elfN(check_header)(const Elf_Ehdr *hdr) { Elf_Brandinfo *bi; int i; if (!IS_ELF(*hdr) || hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || hdr->e_ident[EI_DATA] != ELF_TARG_DATA || hdr->e_ident[EI_VERSION] != EV_CURRENT || hdr->e_phentsize != sizeof(Elf_Phdr) || hdr->e_version != ELF_TARG_VER) return (ENOEXEC); /* * Make sure we have at least one brand for this machine. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi != NULL && bi->machine == hdr->e_machine) break; } if (i == MAX_BRANDS) return (ENOEXEC); return (0); } static int __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot) { struct sf_buf *sf; int error; vm_offset_t off; /* * Create the page if it doesn't exist yet. Ignore errors. */ vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) - trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL); /* * Find the page from the underlying object. */ if (object != NULL) { sf = vm_imgact_map_page(object, offset); if (sf == NULL) return (KERN_FAILURE); off = offset - trunc_page(offset); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, end - start); vm_imgact_unmap_page(sf); if (error != 0) return (KERN_FAILURE); } return (KERN_SUCCESS); } static int __elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow) { struct sf_buf *sf; vm_offset_t off; vm_size_t sz; int error, locked, rv; if (start != trunc_page(start)) { rv = __elfN(map_partial)(map, object, offset, start, round_page(start), prot); if (rv != KERN_SUCCESS) return (rv); offset += round_page(start) - start; start = round_page(start); } if (end != round_page(end)) { rv = __elfN(map_partial)(map, object, offset + trunc_page(end) - start, trunc_page(end), end, prot); if (rv != KERN_SUCCESS) return (rv); end = trunc_page(end); } if (start >= end) return (KERN_SUCCESS); if ((offset & PAGE_MASK) != 0) { /* * The mapping is not page aligned. This means that we have * to copy the data. */ rv = vm_map_fixed(map, NULL, 0, start, end - start, prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL); if (rv != KERN_SUCCESS) return (rv); if (object == NULL) return (KERN_SUCCESS); for (; start < end; start += sz) { sf = vm_imgact_map_page(object, offset); if (sf == NULL) return (KERN_FAILURE); off = offset - trunc_page(offset); sz = end - start; if (sz > PAGE_SIZE - off) sz = PAGE_SIZE - off; error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, sz); vm_imgact_unmap_page(sf); if (error != 0) return (KERN_FAILURE); offset += sz; } } else { vm_object_reference(object); rv = vm_map_fixed(map, object, offset, start, end - start, prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL | (object != NULL ? MAP_VN_EXEC : 0)); if (rv != KERN_SUCCESS) { locked = VOP_ISLOCKED(imgp->vp); VOP_UNLOCK(imgp->vp); vm_object_deallocate(object); vn_lock(imgp->vp, locked | LK_RETRY); return (rv); } else if (object != NULL) { MPASS(imgp->vp->v_object == object); VOP_SET_TEXT_CHECKED(imgp->vp); } } return (KERN_SUCCESS); } static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) { struct sf_buf *sf; size_t map_len; vm_map_t map; vm_object_t object; vm_offset_t map_addr; int error, rv, cow; size_t copy_len; vm_ooffset_t file_addr; /* * It's necessary to fail if the filsz + offset taken from the * header is greater than the actual file pager object's size. * If we were to allow this, then the vm_map_find() below would * walk right off the end of the file object and into the ether. * * While I'm here, might as well check for something else that * is invalid: filsz cannot be greater than memsz. */ if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) || filsz > memsz) { uprintf("elf_load_section: truncated ELF file\n"); return (ENOEXEC); } object = imgp->object; map = &imgp->proc->p_vmspace->vm_map; map_addr = trunc_page((vm_offset_t)vmaddr); file_addr = trunc_page(offset); /* * We have two choices. We can either clear the data in the last page * of an oversized mapping, or we can start the anon mapping a page * early and copy the initialized data into that first page. We * choose the second. */ if (filsz == 0) map_len = 0; else if (memsz > filsz) map_len = trunc_page(offset + filsz) - file_addr; else map_len = round_page(offset + filsz) - file_addr; if (map_len != 0) { /* cow flags: don't dump readonly sections in core */ cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); rv = __elfN(map_insert)(imgp, map, object, file_addr, map_addr, map_addr + map_len, prot, cow); if (rv != KERN_SUCCESS) return (EINVAL); /* we can stop now if we've covered it all */ if (memsz == filsz) return (0); } /* * We have to get the remaining bit of the file into the first part * of the oversized map segment. This is normally because the .data * segment in the file is extended to provide bss. It's a neat idea * to try and save a page, but it's a pain in the behind to implement. */ copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset + filsz); map_addr = trunc_page((vm_offset_t)vmaddr + filsz); map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; /* This had damn well better be true! */ if (map_len != 0) { rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr, map_addr + map_len, prot, 0); if (rv != KERN_SUCCESS) return (EINVAL); } if (copy_len != 0) { sf = vm_imgact_map_page(object, offset + filsz); if (sf == NULL) return (EIO); /* send the page fragment to user space */ error = copyout((caddr_t)sf_buf_kva(sf), (caddr_t)map_addr, copy_len); vm_imgact_unmap_page(sf); if (error != 0) return (error); } /* * Remove write access to the page if it was only granted by map_insert * to allow copyout. */ if ((prot & VM_PROT_WRITE) == 0) vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + map_len), prot, 0, VM_MAP_PROTECT_SET_PROT); return (0); } static int __elfN(load_sections)(struct image_params *imgp, const Elf_Ehdr *hdr, const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp) { vm_prot_t prot; u_long base_addr; bool first; int error, i; ASSERT_VOP_LOCKED(imgp->vp, __func__); base_addr = 0; first = true; for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) continue; /* Loadable segment */ prot = __elfN(trans_prot)(phdr[i].p_flags); error = __elfN(load_section)(imgp, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, phdr[i].p_memsz, phdr[i].p_filesz, prot); if (error != 0) return (error); /* * Establish the base address if this is the first segment. */ if (first) { base_addr = trunc_page(phdr[i].p_vaddr + rbase); first = false; } } if (base_addrp != NULL) *base_addrp = base_addr; return (0); } /* * Load the file "file" into memory. It may be either a shared object * or an executable. * * The "addr" reference parameter is in/out. On entry, it specifies * the address where a shared object should be loaded. If the file is * an executable, this value is ignored. On exit, "addr" specifies * where the file was actually loaded. * * The "entry" reference parameter is out only. On exit, it specifies * the entry point for the loaded file. */ static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry) { struct { struct nameidata nd; struct vattr attr; struct image_params image_params; } *tempdata; const Elf_Ehdr *hdr = NULL; const Elf_Phdr *phdr = NULL; struct nameidata *nd; struct vattr *attr; struct image_params *imgp; u_long rbase; u_long base_addr = 0; int error; #ifdef CAPABILITY_MODE /* * XXXJA: This check can go away once we are sufficiently confident * that the checks in namei() are correct. */ if (IN_CAPABILITY_MODE(curthread)) return (ECAPMODE); #endif tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_ZERO); nd = &tempdata->nd; attr = &tempdata->attr; imgp = &tempdata->image_params; /* * Initialize part of the common data */ imgp->proc = p; imgp->attr = attr; NDINIT(nd, LOOKUP, ISOPEN | FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, file, curthread); if ((error = namei(nd)) != 0) { nd->ni_vp = NULL; goto fail; } NDFREE(nd, NDF_ONLY_PNBUF); imgp->vp = nd->ni_vp; /* * Check permissions, modes, uid, etc on the file, and "open" it. */ error = exec_check_permissions(imgp); if (error) goto fail; error = exec_map_first_page(imgp); if (error) goto fail; imgp->object = nd->ni_vp->v_object; hdr = (const Elf_Ehdr *)imgp->image_header; if ((error = __elfN(check_header)(hdr)) != 0) goto fail; if (hdr->e_type == ET_DYN) rbase = *addr; else if (hdr->e_type == ET_EXEC) rbase = 0; else { error = ENOEXEC; goto fail; } /* Only support headers that fit within first page for now */ if (!__elfN(phdr_in_zero_page)(hdr)) { error = ENOEXEC; goto fail; } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); if (!aligned(phdr, Elf_Addr)) { error = ENOEXEC; goto fail; } error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr); if (error != 0) goto fail; *addr = base_addr; *entry = (unsigned long)hdr->e_entry + rbase; fail: if (imgp->firstpage) exec_unmap_first_page(imgp); if (nd->ni_vp) { if (imgp->textset) VOP_UNSET_TEXT_CHECKED(nd->ni_vp); vput(nd->ni_vp); } free(tempdata, M_TEMP); return (error); } static u_long __CONCAT(rnd_, __elfN(base))(vm_map_t map __unused, u_long minv, u_long maxv, u_int align) { u_long rbase, res; MPASS(vm_map_min(map) <= minv); MPASS(maxv <= vm_map_max(map)); MPASS(minv < maxv); MPASS(minv + align < maxv); arc4rand(&rbase, sizeof(rbase), 0); res = roundup(minv, (u_long)align) + rbase % (maxv - minv); res &= ~((u_long)align - 1); if (res >= maxv) res -= align; KASSERT(res >= minv, ("res %#lx < minv %#lx, maxv %#lx rbase %#lx", res, minv, maxv, rbase)); KASSERT(res < maxv, ("res %#lx > maxv %#lx, minv %#lx rbase %#lx", res, maxv, minv, rbase)); return (res); } static int __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr, const Elf_Phdr *phdr, u_long et_dyn_addr) { struct vmspace *vmspace; const char *err_str; u_long text_size, data_size, total_size, text_addr, data_addr; u_long seg_size, seg_addr; int i; err_str = NULL; text_size = data_size = total_size = text_addr = data_addr = 0; for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) continue; seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); seg_size = round_page(phdr[i].p_memsz + phdr[i].p_vaddr + et_dyn_addr - seg_addr); /* * Make the largest executable segment the official * text segment and all others data. * * Note that obreak() assumes that data_addr + data_size == end * of data load area, and the ELF file format expects segments * to be sorted by address. If multiple data segments exist, * the last one will be used. */ if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) { text_size = seg_size; text_addr = seg_addr; } else { data_size = seg_size; data_addr = seg_addr; } total_size += seg_size; } if (data_addr == 0 && data_size == 0) { data_addr = text_addr; data_size = text_size; } /* * Check limits. It should be safe to check the * limits after loading the segments since we do * not actually fault in all the segments pages. */ PROC_LOCK(imgp->proc); if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA)) err_str = "Data segment size exceeds process limit"; else if (text_size > maxtsiz) err_str = "Text segment size exceeds system limit"; else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM)) err_str = "Total segment size exceeds process limit"; else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0) err_str = "Data segment size exceeds resource limit"; else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) err_str = "Total segment size exceeds resource limit"; PROC_UNLOCK(imgp->proc); if (err_str != NULL) { uprintf("%s\n", err_str); return (ENOMEM); } vmspace = imgp->proc->p_vmspace; vmspace->vm_tsize = text_size >> PAGE_SHIFT; vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; vmspace->vm_dsize = data_size >> PAGE_SHIFT; vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; return (0); } static int __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr, char **interpp, bool *free_interpp) { struct thread *td; char *interp; int error, interp_name_len; KASSERT(phdr->p_type == PT_INTERP, ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type)); ASSERT_VOP_LOCKED(imgp->vp, __func__); td = curthread; /* Path to interpreter */ if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) { uprintf("Invalid PT_INTERP\n"); return (ENOEXEC); } interp_name_len = phdr->p_filesz; if (phdr->p_offset > PAGE_SIZE || interp_name_len > PAGE_SIZE - phdr->p_offset) { /* * The vnode lock might be needed by the pagedaemon to * clean pages owned by the vnode. Do not allow sleep * waiting for memory with the vnode locked, instead * try non-sleepable allocation first, and if it * fails, go to the slow path were we drop the lock * and do M_WAITOK. A text reference prevents * modifications to the vnode content. */ interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT); if (interp == NULL) { VOP_UNLOCK(imgp->vp); interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); } error = vn_rdwr(UIO_READ, imgp->vp, interp, interp_name_len, phdr->p_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, NULL, td); if (error != 0) { free(interp, M_TEMP); uprintf("i/o error PT_INTERP %d\n", error); return (error); } interp[interp_name_len] = '\0'; *interpp = interp; *free_interpp = true; return (0); } interp = __DECONST(char *, imgp->image_header) + phdr->p_offset; if (interp[interp_name_len - 1] != '\0') { uprintf("Invalid PT_INTERP\n"); return (ENOEXEC); } *interpp = interp; *free_interpp = false; return (0); } static int __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info, const char *interp, u_long *addr, u_long *entry) { char *path; int error; if (brand_info->emul_path != NULL && brand_info->emul_path[0] != '\0') { path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path, interp); error = __elfN(load_file)(imgp->proc, path, addr, entry); free(path, M_TEMP); if (error == 0) return (0); } if (brand_info->interp_newpath != NULL && (brand_info->interp_path == NULL || strcmp(interp, brand_info->interp_path) == 0)) { error = __elfN(load_file)(imgp->proc, brand_info->interp_newpath, addr, entry); if (error == 0) return (0); } error = __elfN(load_file)(imgp->proc, interp, addr, entry); if (error == 0) return (0); uprintf("ELF interpreter %s not found, error %d\n", interp, error); return (error); } /* * Impossible et_dyn_addr initial value indicating that the real base * must be calculated later with some randomization applied. */ #define ET_DYN_ADDR_RAND 1 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) { struct thread *td; const Elf_Ehdr *hdr; const Elf_Phdr *phdr; Elf_Auxargs *elf_auxargs; struct vmspace *vmspace; vm_map_t map; char *interp; Elf_Brandinfo *brand_info; struct sysentvec *sv; u_long addr, baddr, et_dyn_addr, entry, proghdr; u_long maxalign, mapsz, maxv, maxv1; uint32_t fctl0; int32_t osrel; bool free_interp; int error, i, n; hdr = (const Elf_Ehdr *)imgp->image_header; /* * Do we have a valid ELF header ? * * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later * if particular brand doesn't support it. */ if (__elfN(check_header)(hdr) != 0 || (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) return (-1); /* * From here on down, we return an errno, not -1, as we've * detected an ELF file. */ if (!__elfN(phdr_in_zero_page)(hdr)) { uprintf("Program headers not in the first page\n"); return (ENOEXEC); } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); if (!aligned(phdr, Elf_Addr)) { uprintf("Unaligned program headers\n"); return (ENOEXEC); } n = error = 0; baddr = 0; osrel = 0; fctl0 = 0; entry = proghdr = 0; interp = NULL; free_interp = false; td = curthread; maxalign = PAGE_SIZE; mapsz = 0; for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_LOAD: if (n == 0) baddr = phdr[i].p_vaddr; if (phdr[i].p_align > maxalign) maxalign = phdr[i].p_align; mapsz += phdr[i].p_memsz; n++; /* * If this segment contains the program headers, * remember their virtual address for the AT_PHDR * aux entry. Static binaries don't usually include * a PT_PHDR entry. */ if (phdr[i].p_offset == 0 && hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize <= phdr[i].p_filesz) proghdr = phdr[i].p_vaddr + hdr->e_phoff; break; case PT_INTERP: /* Path to interpreter */ if (interp != NULL) { uprintf("Multiple PT_INTERP headers\n"); error = ENOEXEC; goto ret; } error = __elfN(get_interp)(imgp, &phdr[i], &interp, &free_interp); if (error != 0) goto ret; break; case PT_GNU_STACK: if (__elfN(nxstack)) imgp->stack_prot = __elfN(trans_prot)(phdr[i].p_flags); imgp->stack_sz = phdr[i].p_memsz; break; case PT_PHDR: /* Program header table info */ proghdr = phdr[i].p_vaddr; break; } } brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0); if (brand_info == NULL) { uprintf("ELF binary type \"%u\" not known.\n", hdr->e_ident[EI_OSABI]); error = ENOEXEC; goto ret; } sv = brand_info->sysvec; et_dyn_addr = 0; if (hdr->e_type == ET_DYN) { if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { uprintf("Cannot execute shared object\n"); error = ENOEXEC; goto ret; } /* * Honour the base load address from the dso if it is * non-zero for some reason. */ if (baddr == 0) { if ((sv->sv_flags & SV_ASLR) == 0 || (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) et_dyn_addr = __elfN(pie_base); else if ((__elfN(pie_aslr_enabled) && (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) et_dyn_addr = ET_DYN_ADDR_RAND; else et_dyn_addr = __elfN(pie_base); } } /* * Avoid a possible deadlock if the current address space is destroyed * and that address space maps the locked vnode. In the common case, * the locked vnode's v_usecount is decremented but remains greater * than zero. Consequently, the vnode lock is not needed by vrele(). * However, in cases where the vnode lock is external, such as nullfs, * v_usecount may become zero. * * The VV_TEXT flag prevents modifications to the executable while * the vnode is unlocked. */ VOP_UNLOCK(imgp->vp); /* * Decide whether to enable randomization of user mappings. * First, reset user preferences for the setid binaries. * Then, account for the support of the randomization by the * ABI, by user preferences, and make special treatment for * PIE binaries. */ if (imgp->credential_setid) { PROC_LOCK(imgp->proc); - imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE); + imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE | + P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC); PROC_UNLOCK(imgp->proc); } if ((sv->sv_flags & SV_ASLR) == 0 || (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 || (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) { KASSERT(et_dyn_addr != ET_DYN_ADDR_RAND, ("et_dyn_addr == RAND and !ASLR")); } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) || et_dyn_addr == ET_DYN_ADDR_RAND) { imgp->map_flags |= MAP_ASLR; /* * If user does not care about sbrk, utilize the bss * grow region for mappings as well. We can select * the base for the image anywere and still not suffer * from the fragmentation. */ if (!__elfN(aslr_honor_sbrk) || (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) imgp->map_flags |= MAP_ASLR_IGNSTART; } - if (!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0) + if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 && + (imgp->proc->p_flag2 & P2_WXORX_DISABLE) == 0) || + (imgp->proc->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0) imgp->map_flags |= MAP_WXORX; error = exec_new_vmspace(imgp, sv); vmspace = imgp->proc->p_vmspace; map = &vmspace->vm_map; imgp->proc->p_sysent = sv; imgp->proc->p_elf_brandinfo = brand_info; maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK); if (et_dyn_addr == ET_DYN_ADDR_RAND) { KASSERT((map->flags & MAP_ASLR) != 0, ("ET_DYN_ADDR_RAND but !MAP_ASLR")); et_dyn_addr = __CONCAT(rnd_, __elfN(base))(map, vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA), /* reserve half of the address space to interpreter */ maxv / 2, 1UL << flsl(maxalign)); } vn_lock(imgp->vp, LK_SHARED | LK_RETRY); if (error != 0) goto ret; error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL); if (error != 0) goto ret; error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr); if (error != 0) goto ret; entry = (u_long)hdr->e_entry + et_dyn_addr; /* * We load the dynamic linker where a userland call * to mmap(0, ...) would put it. The rationale behind this * calculation is that it leaves room for the heap to grow to * its maximum allowed size. */ addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, RLIMIT_DATA)); if ((map->flags & MAP_ASLR) != 0) { maxv1 = maxv / 2 + addr / 2; MPASS(maxv1 >= addr); /* No overflow */ map->anon_loc = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, (MAXPAGESIZES > 1 && pagesizes[1] != 0) ? pagesizes[1] : pagesizes[0]); } else { map->anon_loc = addr; } imgp->entry_addr = entry; if (interp != NULL) { VOP_UNLOCK(imgp->vp); if ((map->flags & MAP_ASLR) != 0) { /* Assume that interpreter fits into 1/4 of AS */ maxv1 = maxv / 2 + addr / 2; MPASS(maxv1 >= addr); /* No overflow */ addr = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, PAGE_SIZE); } error = __elfN(load_interp)(imgp, brand_info, interp, &addr, &imgp->entry_addr); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); if (error != 0) goto ret; } else addr = et_dyn_addr; /* * Construct auxargs table (used by the copyout_auxargs routine) */ elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT); if (elf_auxargs == NULL) { VOP_UNLOCK(imgp->vp); elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); } elf_auxargs->execfd = -1; elf_auxargs->phdr = proghdr + et_dyn_addr; elf_auxargs->phent = hdr->e_phentsize; elf_auxargs->phnum = hdr->e_phnum; elf_auxargs->pagesz = PAGE_SIZE; elf_auxargs->base = addr; elf_auxargs->flags = 0; elf_auxargs->entry = entry; elf_auxargs->hdr_eflags = hdr->e_flags; imgp->auxargs = elf_auxargs; imgp->interpreted = 0; imgp->reloc_base = addr; imgp->proc->p_osrel = osrel; imgp->proc->p_fctl0 = fctl0; imgp->proc->p_elf_flags = hdr->e_flags; ret: if (free_interp) free(interp, M_TEMP); return (error); } #define elf_suword __CONCAT(suword, __ELF_WORD_SIZE) int __elfN(freebsd_copyout_auxargs)(struct image_params *imgp, uintptr_t base) { Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; Elf_Auxinfo *argarray, *pos; int error; argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP, M_WAITOK | M_ZERO); if (args->execfd != -1) AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); AUXARGS_ENTRY(pos, AT_PHENT, args->phent); AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); AUXARGS_ENTRY(pos, AT_BASE, args->base); AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags); if (imgp->execpathp != 0) AUXARGS_ENTRY_PTR(pos, AT_EXECPATH, imgp->execpathp); AUXARGS_ENTRY(pos, AT_OSRELDATE, imgp->proc->p_ucred->cr_prison->pr_osreldate); if (imgp->canary != 0) { AUXARGS_ENTRY_PTR(pos, AT_CANARY, imgp->canary); AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen); } AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus); if (imgp->pagesizes != 0) { AUXARGS_ENTRY_PTR(pos, AT_PAGESIZES, imgp->pagesizes); AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen); } if (imgp->sysent->sv_timekeep_base != 0) { AUXARGS_ENTRY(pos, AT_TIMEKEEP, imgp->sysent->sv_timekeep_base); } AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : imgp->sysent->sv_stackprot); if (imgp->sysent->sv_hwcap != NULL) AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap); if (imgp->sysent->sv_hwcap2 != NULL) AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2); AUXARGS_ENTRY(pos, AT_BSDFLAGS, __elfN(sigfastblock) ? ELF_BSDF_SIGFASTBLK : 0); AUXARGS_ENTRY(pos, AT_ARGC, imgp->args->argc); AUXARGS_ENTRY_PTR(pos, AT_ARGV, imgp->argv); AUXARGS_ENTRY(pos, AT_ENVC, imgp->args->envc); AUXARGS_ENTRY_PTR(pos, AT_ENVV, imgp->envv); AUXARGS_ENTRY_PTR(pos, AT_PS_STRINGS, imgp->ps_strings); if (imgp->sysent->sv_fxrng_gen_base != 0) AUXARGS_ENTRY(pos, AT_FXRNG, imgp->sysent->sv_fxrng_gen_base); AUXARGS_ENTRY(pos, AT_NULL, 0); free(imgp->auxargs, M_TEMP); imgp->auxargs = NULL; KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs")); error = copyout(argarray, (void *)base, sizeof(*argarray) * AT_COUNT); free(argarray, M_TEMP); return (error); } int __elfN(freebsd_fixup)(uintptr_t *stack_base, struct image_params *imgp) { Elf_Addr *base; base = (Elf_Addr *)*stack_base; base--; if (elf_suword(base, imgp->args->argc) == -1) return (EFAULT); *stack_base = (uintptr_t)base; return (0); } /* * Code for generating ELF core dumps. */ typedef void (*segment_callback)(vm_map_entry_t, void *); /* Closure for cb_put_phdr(). */ struct phdr_closure { Elf_Phdr *phdr; /* Program header to fill in */ Elf_Off offset; /* Offset of segment in core file */ }; struct note_info { int type; /* Note type. */ outfunc_t outfunc; /* Output function. */ void *outarg; /* Argument for the output function. */ size_t outsize; /* Output size. */ TAILQ_ENTRY(note_info) link; /* Link to the next note info. */ }; TAILQ_HEAD(note_info_list, note_info); extern int compress_user_cores; extern int compress_user_cores_level; static void cb_put_phdr(vm_map_entry_t, void *); static void cb_size_segment(vm_map_entry_t, void *); static void each_dumpable_segment(struct thread *, segment_callback, void *, int); static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, struct note_info_list *, size_t, int); static void __elfN(putnote)(struct thread *td, struct note_info *, struct sbuf *); static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *); static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *); static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *); static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *); static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *); static void __elfN(note_ptlwpinfo)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *); static void note_procstat_files(void *, struct sbuf *, size_t *); static void note_procstat_groups(void *, struct sbuf *, size_t *); static void note_procstat_osrel(void *, struct sbuf *, size_t *); static void note_procstat_rlimit(void *, struct sbuf *, size_t *); static void note_procstat_umask(void *, struct sbuf *, size_t *); static void note_procstat_vmmap(void *, struct sbuf *, size_t *); static int core_compressed_write(void *base, size_t len, off_t offset, void *arg) { return (core_write((struct coredump_params *)arg, base, len, offset, UIO_SYSSPACE, NULL)); } int __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) { struct ucred *cred = td->td_ucred; int compm, error = 0; struct sseg_closure seginfo; struct note_info_list notelst; struct coredump_params params; struct note_info *ninfo; void *hdr, *tmpbuf; size_t hdrsize, notesz, coresize; hdr = NULL; tmpbuf = NULL; TAILQ_INIT(¬elst); /* Size the program segments. */ __elfN(size_segments)(td, &seginfo, flags); /* * Collect info about the core file header area. */ hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); if (seginfo.count + 1 >= PN_XNUM) hdrsize += sizeof(Elf_Shdr); td->td_proc->p_sysent->sv_elf_core_prepare_notes(td, ¬elst, ¬esz); coresize = round_page(hdrsize + notesz) + seginfo.size; /* Set up core dump parameters. */ params.offset = 0; params.active_cred = cred; params.file_cred = NOCRED; params.td = td; params.vp = vp; params.comp = NULL; #ifdef RACCT if (racct_enable) { PROC_LOCK(td->td_proc); error = racct_add(td->td_proc, RACCT_CORE, coresize); PROC_UNLOCK(td->td_proc); if (error != 0) { error = EFAULT; goto done; } } #endif if (coresize >= limit) { error = EFAULT; goto done; } /* Create a compression stream if necessary. */ compm = compress_user_cores; if ((flags & (SVC_PT_COREDUMP | SVC_NOCOMPRESS)) == SVC_PT_COREDUMP && compm == 0) compm = COMPRESS_GZIP; if (compm != 0) { params.comp = compressor_init(core_compressed_write, compm, CORE_BUF_SIZE, compress_user_cores_level, ¶ms); if (params.comp == NULL) { error = EFAULT; goto done; } tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); } /* * Allocate memory for building the header, fill it up, * and write it out following the notes. */ hdr = malloc(hdrsize, M_TEMP, M_WAITOK); error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, notesz, flags); /* Write the contents of all of the writable segments. */ if (error == 0) { Elf_Phdr *php; off_t offset; int i; php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; offset = round_page(hdrsize + notesz); for (i = 0; i < seginfo.count; i++) { error = core_output((char *)(uintptr_t)php->p_vaddr, php->p_filesz, offset, ¶ms, tmpbuf); if (error != 0) break; offset += php->p_filesz; php++; } if (error == 0 && params.comp != NULL) error = compressor_flush(params.comp); } if (error) { log(LOG_WARNING, "Failed to write core file for process %s (error %d)\n", curproc->p_comm, error); } done: free(tmpbuf, M_TEMP); if (params.comp != NULL) compressor_fini(params.comp); while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { TAILQ_REMOVE(¬elst, ninfo, link); free(ninfo, M_TEMP); } if (hdr != NULL) free(hdr, M_TEMP); return (error); } /* * A callback for each_dumpable_segment() to write out the segment's * program header entry. */ static void cb_put_phdr(vm_map_entry_t entry, void *closure) { struct phdr_closure *phc = (struct phdr_closure *)closure; Elf_Phdr *phdr = phc->phdr; phc->offset = round_page(phc->offset); phdr->p_type = PT_LOAD; phdr->p_offset = phc->offset; phdr->p_vaddr = entry->start; phdr->p_paddr = 0; phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; phdr->p_align = PAGE_SIZE; phdr->p_flags = __elfN(untrans_prot)(entry->protection); phc->offset += phdr->p_filesz; phc->phdr++; } /* * A callback for each_dumpable_segment() to gather information about * the number of segments and their total size. */ static void cb_size_segment(vm_map_entry_t entry, void *closure) { struct sseg_closure *ssc = (struct sseg_closure *)closure; ssc->count++; ssc->size += entry->end - entry->start; } void __elfN(size_segments)(struct thread *td, struct sseg_closure *seginfo, int flags) { seginfo->count = 0; seginfo->size = 0; each_dumpable_segment(td, cb_size_segment, seginfo, flags); } /* * For each writable segment in the process's memory map, call the given * function with a pointer to the map entry and some arbitrary * caller-supplied data. */ static void each_dumpable_segment(struct thread *td, segment_callback func, void *closure, int flags) { struct proc *p = td->td_proc; vm_map_t map = &p->p_vmspace->vm_map; vm_map_entry_t entry; vm_object_t backing_object, object; bool ignore_entry; vm_map_lock_read(map); VM_MAP_ENTRY_FOREACH(entry, map) { /* * Don't dump inaccessible mappings, deal with legacy * coredump mode. * * Note that read-only segments related to the elf binary * are marked MAP_ENTRY_NOCOREDUMP now so we no longer * need to arbitrarily ignore such segments. */ if ((flags & SVC_ALL) == 0) { if (elf_legacy_coredump) { if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) continue; } else { if ((entry->protection & VM_PROT_ALL) == 0) continue; } } /* * Dont include memory segment in the coredump if * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in * madvise(2). Do not dump submaps (i.e. parts of the * kernel map). */ if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) continue; if ((entry->eflags & MAP_ENTRY_NOCOREDUMP) != 0 && (flags & SVC_ALL) == 0) continue; if ((object = entry->object.vm_object) == NULL) continue; /* Ignore memory-mapped devices and such things. */ VM_OBJECT_RLOCK(object); while ((backing_object = object->backing_object) != NULL) { VM_OBJECT_RLOCK(backing_object); VM_OBJECT_RUNLOCK(object); object = backing_object; } ignore_entry = (object->flags & OBJ_FICTITIOUS) != 0; VM_OBJECT_RUNLOCK(object); if (ignore_entry) continue; (*func)(entry, closure); } vm_map_unlock_read(map); } /* * Write the core file header to the file, including padding up to * the page boundary. */ static int __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, size_t hdrsize, struct note_info_list *notelst, size_t notesz, int flags) { struct note_info *ninfo; struct sbuf *sb; int error; /* Fill in the header. */ bzero(hdr, hdrsize); __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz, flags); sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_core_output, p); sbuf_start_section(sb, NULL); sbuf_bcat(sb, hdr, hdrsize); TAILQ_FOREACH(ninfo, notelst, link) __elfN(putnote)(p->td, ninfo, sb); /* Align up to a page boundary for the program segments. */ sbuf_end_section(sb, -1, PAGE_SIZE, 0); error = sbuf_finish(sb); sbuf_delete(sb); return (error); } void __elfN(prepare_notes)(struct thread *td, struct note_info_list *list, size_t *sizep) { struct proc *p; struct thread *thr; size_t size; p = td->td_proc; size = 0; size += __elfN(register_note)(td, list, NT_PRPSINFO, __elfN(note_prpsinfo), p); /* * To have the debugger select the right thread (LWP) as the initial * thread, we dump the state of the thread passed to us in td first. * This is the thread that causes the core dump and thus likely to * be the right thread one wants to have selected in the debugger. */ thr = td; while (thr != NULL) { size += __elfN(register_note)(td, list, NT_PRSTATUS, __elfN(note_prstatus), thr); size += __elfN(register_note)(td, list, NT_FPREGSET, __elfN(note_fpregset), thr); size += __elfN(register_note)(td, list, NT_THRMISC, __elfN(note_thrmisc), thr); size += __elfN(register_note)(td, list, NT_PTLWPINFO, __elfN(note_ptlwpinfo), thr); size += __elfN(register_note)(td, list, -1, __elfN(note_threadmd), thr); thr = thr == td ? TAILQ_FIRST(&p->p_threads) : TAILQ_NEXT(thr, td_plist); if (thr == td) thr = TAILQ_NEXT(thr, td_plist); } size += __elfN(register_note)(td, list, NT_PROCSTAT_PROC, __elfN(note_procstat_proc), p); size += __elfN(register_note)(td, list, NT_PROCSTAT_FILES, note_procstat_files, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_VMMAP, note_procstat_vmmap, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_GROUPS, note_procstat_groups, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_UMASK, note_procstat_umask, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_RLIMIT, note_procstat_rlimit, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_OSREL, note_procstat_osrel, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_PSSTRINGS, __elfN(note_procstat_psstrings), p); size += __elfN(register_note)(td, list, NT_PROCSTAT_AUXV, __elfN(note_procstat_auxv), p); *sizep = size; } void __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs, size_t notesz, int flags) { Elf_Ehdr *ehdr; Elf_Phdr *phdr; Elf_Shdr *shdr; struct phdr_closure phc; Elf_Brandinfo *bi; ehdr = (Elf_Ehdr *)hdr; bi = td->td_proc->p_elf_brandinfo; ehdr->e_ident[EI_MAG0] = ELFMAG0; ehdr->e_ident[EI_MAG1] = ELFMAG1; ehdr->e_ident[EI_MAG2] = ELFMAG2; ehdr->e_ident[EI_MAG3] = ELFMAG3; ehdr->e_ident[EI_CLASS] = ELF_CLASS; ehdr->e_ident[EI_DATA] = ELF_DATA; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = td->td_proc->p_sysent->sv_elf_core_osabi; ehdr->e_ident[EI_ABIVERSION] = 0; ehdr->e_ident[EI_PAD] = 0; ehdr->e_type = ET_CORE; ehdr->e_machine = bi->machine; ehdr->e_version = EV_CURRENT; ehdr->e_entry = 0; ehdr->e_phoff = sizeof(Elf_Ehdr); ehdr->e_flags = td->td_proc->p_elf_flags; ehdr->e_ehsize = sizeof(Elf_Ehdr); ehdr->e_phentsize = sizeof(Elf_Phdr); ehdr->e_shentsize = sizeof(Elf_Shdr); ehdr->e_shstrndx = SHN_UNDEF; if (numsegs + 1 < PN_XNUM) { ehdr->e_phnum = numsegs + 1; ehdr->e_shnum = 0; } else { ehdr->e_phnum = PN_XNUM; ehdr->e_shnum = 1; ehdr->e_shoff = ehdr->e_phoff + (numsegs + 1) * ehdr->e_phentsize; KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr), ("e_shoff: %zu, hdrsize - shdr: %zu", (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr))); shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff); memset(shdr, 0, sizeof(*shdr)); /* * A special first section is used to hold large segment and * section counts. This was proposed by Sun Microsystems in * Solaris and has been adopted by Linux; the standard ELF * tools are already familiar with the technique. * * See table 7-7 of the Solaris "Linker and Libraries Guide" * (or 12-7 depending on the version of the document) for more * details. */ shdr->sh_type = SHT_NULL; shdr->sh_size = ehdr->e_shnum; shdr->sh_link = ehdr->e_shstrndx; shdr->sh_info = numsegs + 1; } /* * Fill in the program header entries. */ phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff); /* The note segement. */ phdr->p_type = PT_NOTE; phdr->p_offset = hdrsize; phdr->p_vaddr = 0; phdr->p_paddr = 0; phdr->p_filesz = notesz; phdr->p_memsz = 0; phdr->p_flags = PF_R; phdr->p_align = ELF_NOTE_ROUNDSIZE; phdr++; /* All the writable segments from the program. */ phc.phdr = phdr; phc.offset = round_page(hdrsize + notesz); each_dumpable_segment(td, cb_put_phdr, &phc, flags); } size_t __elfN(register_note)(struct thread *td, struct note_info_list *list, int type, outfunc_t out, void *arg) { const struct sysentvec *sv; struct note_info *ninfo; size_t size, notesize; sv = td->td_proc->p_sysent; size = 0; out(arg, NULL, &size); ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); ninfo->type = type; ninfo->outfunc = out; ninfo->outarg = arg; ninfo->outsize = size; TAILQ_INSERT_TAIL(list, ninfo, link); if (type == -1) return (size); notesize = sizeof(Elf_Note) + /* note header */ roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } static size_t append_note_data(const void *src, void *dst, size_t len) { size_t padded_len; padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE); if (dst != NULL) { bcopy(src, dst, len); bzero((char *)dst + len, padded_len - len); } return (padded_len); } size_t __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp) { Elf_Note *note; char *buf; size_t notesize; buf = dst; if (buf != NULL) { note = (Elf_Note *)buf; note->n_namesz = sizeof(FREEBSD_ABI_VENDOR); note->n_descsz = size; note->n_type = type; buf += sizeof(*note); buf += append_note_data(FREEBSD_ABI_VENDOR, buf, sizeof(FREEBSD_ABI_VENDOR)); append_note_data(src, buf, size); if (descp != NULL) *descp = buf; } notesize = sizeof(Elf_Note) + /* note header */ roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } static void __elfN(putnote)(struct thread *td, struct note_info *ninfo, struct sbuf *sb) { Elf_Note note; const struct sysentvec *sv; ssize_t old_len, sect_len; size_t new_len, descsz, i; if (ninfo->type == -1) { ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); return; } sv = td->td_proc->p_sysent; note.n_namesz = strlen(sv->sv_elf_core_abi_vendor) + 1; note.n_descsz = ninfo->outsize; note.n_type = ninfo->type; sbuf_bcat(sb, ¬e, sizeof(note)); sbuf_start_section(sb, &old_len); sbuf_bcat(sb, sv->sv_elf_core_abi_vendor, strlen(sv->sv_elf_core_abi_vendor) + 1); sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); if (note.n_descsz == 0) return; sbuf_start_section(sb, &old_len); ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); if (sect_len < 0) return; new_len = (size_t)sect_len; descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE); if (new_len < descsz) { /* * It is expected that individual note emitters will correctly * predict their expected output size and fill up to that size * themselves, padding in a format-specific way if needed. * However, in case they don't, just do it here with zeros. */ for (i = 0; i < descsz - new_len; i++) sbuf_putc(sb, 0); } else if (new_len > descsz) { /* * We can't always truncate sb -- we may have drained some * of it already. */ KASSERT(new_len == descsz, ("%s: Note type %u changed as we " "read it (%zu > %zu). Since it is longer than " "expected, this coredump's notes are corrupt. THIS " "IS A BUG in the note_procstat routine for type %u.\n", __func__, (unsigned)note.n_type, new_len, descsz, (unsigned)note.n_type)); } } /* * Miscellaneous note out functions. */ #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 #include #include typedef struct prstatus32 elf_prstatus_t; typedef struct prpsinfo32 elf_prpsinfo_t; typedef struct fpreg32 elf_prfpregset_t; typedef struct fpreg32 elf_fpregset_t; typedef struct reg32 elf_gregset_t; typedef struct thrmisc32 elf_thrmisc_t; #define ELF_KERN_PROC_MASK KERN_PROC_MASK32 typedef struct kinfo_proc32 elf_kinfo_proc_t; typedef uint32_t elf_ps_strings_t; #else typedef prstatus_t elf_prstatus_t; typedef prpsinfo_t elf_prpsinfo_t; typedef prfpregset_t elf_prfpregset_t; typedef prfpregset_t elf_fpregset_t; typedef gregset_t elf_gregset_t; typedef thrmisc_t elf_thrmisc_t; #define ELF_KERN_PROC_MASK 0 typedef struct kinfo_proc elf_kinfo_proc_t; typedef vm_offset_t elf_ps_strings_t; #endif static void __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep) { struct sbuf sbarg; size_t len; char *cp, *end; struct proc *p; elf_prpsinfo_t *psinfo; int error; p = arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*psinfo), ("invalid size")); psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK); psinfo->pr_version = PRPSINFO_VERSION; psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); PROC_LOCK(p); if (p->p_args != NULL) { len = sizeof(psinfo->pr_psargs) - 1; if (len > p->p_args->ar_length) len = p->p_args->ar_length; memcpy(psinfo->pr_psargs, p->p_args->ar_args, len); PROC_UNLOCK(p); error = 0; } else { _PHOLD(p); PROC_UNLOCK(p); sbuf_new(&sbarg, psinfo->pr_psargs, sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN); error = proc_getargv(curthread, p, &sbarg); PRELE(p); if (sbuf_finish(&sbarg) == 0) len = sbuf_len(&sbarg) - 1; else len = sizeof(psinfo->pr_psargs) - 1; sbuf_delete(&sbarg); } if (error || len == 0) strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs)); else { KASSERT(len < sizeof(psinfo->pr_psargs), ("len is too long: %zu vs %zu", len, sizeof(psinfo->pr_psargs))); cp = psinfo->pr_psargs; end = cp + len - 1; for (;;) { cp = memchr(cp, '\0', end - cp); if (cp == NULL) break; *cp = ' '; } } psinfo->pr_pid = p->p_pid; sbuf_bcat(sb, psinfo, sizeof(*psinfo)); free(psinfo, M_TEMP); } *sizep = sizeof(*psinfo); } static void __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_prstatus_t *status; td = arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*status), ("invalid size")); status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK); status->pr_version = PRSTATUS_VERSION; status->pr_statussz = sizeof(elf_prstatus_t); status->pr_gregsetsz = sizeof(elf_gregset_t); status->pr_fpregsetsz = sizeof(elf_fpregset_t); status->pr_osreldate = osreldate; status->pr_cursig = td->td_proc->p_sig; status->pr_pid = td->td_tid; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 fill_regs32(td, &status->pr_reg); #else fill_regs(td, &status->pr_reg); #endif sbuf_bcat(sb, status, sizeof(*status)); free(status, M_TEMP); } *sizep = sizeof(*status); } static void __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_prfpregset_t *fpregset; td = arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*fpregset), ("invalid size")); fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK); #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 fill_fpregs32(td, fpregset); #else fill_fpregs(td, fpregset); #endif sbuf_bcat(sb, fpregset, sizeof(*fpregset)); free(fpregset, M_TEMP); } *sizep = sizeof(*fpregset); } static void __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_thrmisc_t thrmisc; td = arg; if (sb != NULL) { KASSERT(*sizep == sizeof(thrmisc), ("invalid size")); bzero(&thrmisc, sizeof(thrmisc)); strcpy(thrmisc.pr_tname, td->td_name); sbuf_bcat(sb, &thrmisc, sizeof(thrmisc)); } *sizep = sizeof(thrmisc); } static void __elfN(note_ptlwpinfo)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; size_t size; int structsize; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 struct ptrace_lwpinfo32 pl; #else struct ptrace_lwpinfo pl; #endif td = arg; size = sizeof(structsize) + sizeof(pl); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(pl); sbuf_bcat(sb, &structsize, sizeof(structsize)); bzero(&pl, sizeof(pl)); pl.pl_lwpid = td->td_tid; pl.pl_event = PL_EVENT_NONE; pl.pl_sigmask = td->td_sigmask; pl.pl_siglist = td->td_siglist; if (td->td_si.si_signo != 0) { pl.pl_event = PL_EVENT_SIGNAL; pl.pl_flags |= PL_FLAG_SI; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo); #else pl.pl_siginfo = td->td_si; #endif } strcpy(pl.pl_tdname, td->td_name); /* XXX TODO: supply more information in struct ptrace_lwpinfo*/ sbuf_bcat(sb, &pl, sizeof(pl)); } *sizep = size; } /* * Allow for MD specific notes, as well as any MD * specific preparations for writing MI notes. */ static void __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; void *buf; size_t size; td = (struct thread *)arg; size = *sizep; if (size != 0 && sb != NULL) buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK); else buf = NULL; size = 0; __elfN(dump_thread)(td, buf, &size); KASSERT(sb == NULL || *sizep == size, ("invalid size")); if (size != 0 && sb != NULL) sbuf_bcat(sb, buf, size); free(buf, M_TEMP); *sizep = size; } #ifdef KINFO_PROC_SIZE CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); #endif static void __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = arg; size = sizeof(structsize) + p->p_numthreads * sizeof(elf_kinfo_proc_t); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(elf_kinfo_proc_t); sbuf_bcat(sb, &structsize, sizeof(structsize)); sx_slock(&proctree_lock); PROC_LOCK(p); kern_proc_out(p, sb, ELF_KERN_PROC_MASK); sx_sunlock(&proctree_lock); } *sizep = size; } #ifdef KINFO_FILE_SIZE CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); #endif static void note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size, sect_sz, i; ssize_t start_len, sect_len; int structsize, filedesc_flags; if (coredump_pack_fileinfo) filedesc_flags = KERN_FILEDESC_PACK_KINFO; else filedesc_flags = 0; p = arg; structsize = sizeof(struct kinfo_file); if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_count_drain, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_filedesc_out(p, sb, -1, filedesc_flags); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { sbuf_start_section(sb, &start_len); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize), filedesc_flags); sect_len = sbuf_end_section(sb, start_len, 0, 0); if (sect_len < 0) return; sect_sz = sect_len; KASSERT(sect_sz <= *sizep, ("kern_proc_filedesc_out did not respect maxlen; " "requested %zu, got %zu", *sizep - sizeof(structsize), sect_sz - sizeof(structsize))); for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++) sbuf_putc(sb, 0); } } #ifdef KINFO_VMENTRY_SIZE CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); #endif static void note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize, vmmap_flags; if (coredump_pack_vmmapinfo) vmmap_flags = KERN_VMMAP_PACK_KINFO; else vmmap_flags = 0; p = arg; structsize = sizeof(struct kinfo_vmentry); if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_count_drain, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_vmmap_out(p, sb, -1, vmmap_flags); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize), vmmap_flags); } } static void note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = arg; size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(gid_t); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups * sizeof(gid_t)); } *sizep = size; } static void note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = arg; size = sizeof(structsize) + sizeof(p->p_pd->pd_cmask); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(p->p_pd->pd_cmask); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &p->p_pd->pd_cmask, sizeof(p->p_pd->pd_cmask)); } *sizep = size; } static void note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; struct rlimit rlim[RLIM_NLIMITS]; size_t size; int structsize, i; p = arg; size = sizeof(structsize) + sizeof(rlim); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(rlim); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); for (i = 0; i < RLIM_NLIMITS; i++) lim_rlimit_proc(p, i, &rlim[i]); PROC_UNLOCK(p); sbuf_bcat(sb, rlim, sizeof(rlim)); } *sizep = size; } static void note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = arg; size = sizeof(structsize) + sizeof(p->p_osrel); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(p->p_osrel); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel)); } *sizep = size; } static void __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; elf_ps_strings_t ps_strings; size_t size; int structsize; p = arg; size = sizeof(structsize) + sizeof(ps_strings); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(ps_strings); #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 ps_strings = PTROUT(p->p_sysent->sv_psstrings); #else ps_strings = p->p_sysent->sv_psstrings; #endif sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); } *sizep = size; } static void __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = arg; if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, AT_COUNT * sizeof(Elf_Auxinfo), SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_count_drain, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PHOLD(p); proc_getauxv(curthread, p, sb); PRELE(p); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { structsize = sizeof(Elf_Auxinfo); sbuf_bcat(sb, &structsize, sizeof(structsize)); PHOLD(p); proc_getauxv(curthread, p, sb); PRELE(p); } } static boolean_t __elfN(parse_notes)(struct image_params *imgp, Elf_Note *checknote, const char *note_vendor, const Elf_Phdr *pnote, boolean_t (*cb)(const Elf_Note *, void *, boolean_t *), void *cb_arg) { const Elf_Note *note, *note0, *note_end; const char *note_name; char *buf; int i, error; boolean_t res; /* We need some limit, might as well use PAGE_SIZE. */ if (pnote == NULL || pnote->p_filesz > PAGE_SIZE) return (FALSE); ASSERT_VOP_LOCKED(imgp->vp, "parse_notes"); if (pnote->p_offset > PAGE_SIZE || pnote->p_filesz > PAGE_SIZE - pnote->p_offset) { buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT); if (buf == NULL) { VOP_UNLOCK(imgp->vp); buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); } error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz, pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED, curthread->td_ucred, NOCRED, NULL, curthread); if (error != 0) { uprintf("i/o error PT_NOTE\n"); goto retf; } note = note0 = (const Elf_Note *)buf; note_end = (const Elf_Note *)(buf + pnote->p_filesz); } else { note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset); note_end = (const Elf_Note *)(imgp->image_header + pnote->p_offset + pnote->p_filesz); buf = NULL; } for (i = 0; i < 100 && note >= note0 && note < note_end; i++) { if (!aligned(note, Elf32_Addr) || (const char *)note_end - (const char *)note < sizeof(Elf_Note)) { goto retf; } if (note->n_namesz != checknote->n_namesz || note->n_descsz != checknote->n_descsz || note->n_type != checknote->n_type) goto nextnote; note_name = (const char *)(note + 1); if (note_name + checknote->n_namesz >= (const char *)note_end || strncmp(note_vendor, note_name, checknote->n_namesz) != 0) goto nextnote; if (cb(note, cb_arg, &res)) goto ret; nextnote: note = (const Elf_Note *)((const char *)(note + 1) + roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE)); } retf: res = FALSE; ret: free(buf, M_TEMP); return (res); } struct brandnote_cb_arg { Elf_Brandnote *brandnote; int32_t *osrel; }; static boolean_t brandnote_cb(const Elf_Note *note, void *arg0, boolean_t *res) { struct brandnote_cb_arg *arg; arg = arg0; /* * Fetch the osreldate for binary from the ELF OSABI-note if * necessary. */ *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 && arg->brandnote->trans_osrel != NULL ? arg->brandnote->trans_osrel(note, arg->osrel) : TRUE; return (TRUE); } static Elf_Note fctl_note = { .n_namesz = sizeof(FREEBSD_ABI_VENDOR), .n_descsz = sizeof(uint32_t), .n_type = NT_FREEBSD_FEATURE_CTL, }; struct fctl_cb_arg { boolean_t *has_fctl0; uint32_t *fctl0; }; static boolean_t note_fctl_cb(const Elf_Note *note, void *arg0, boolean_t *res) { struct fctl_cb_arg *arg; const Elf32_Word *desc; uintptr_t p; arg = arg0; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); desc = (const Elf32_Word *)p; *arg->has_fctl0 = TRUE; *arg->fctl0 = desc[0]; *res = TRUE; return (TRUE); } /* * Try to find the appropriate ABI-note section for checknote, fetch * the osreldate and feature control flags for binary from the ELF * OSABI-note. Only the first page of the image is searched, the same * as for headers. */ static boolean_t __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote, int32_t *osrel, boolean_t *has_fctl0, uint32_t *fctl0) { const Elf_Phdr *phdr; const Elf_Ehdr *hdr; struct brandnote_cb_arg b_arg; struct fctl_cb_arg f_arg; int i, j; hdr = (const Elf_Ehdr *)imgp->image_header; phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); b_arg.brandnote = brandnote; b_arg.osrel = osrel; f_arg.has_fctl0 = has_fctl0; f_arg.fctl0 = fctl0; for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb, &b_arg)) { for (j = 0; j < hdr->e_phnum; j++) { if (phdr[j].p_type == PT_NOTE && __elfN(parse_notes)(imgp, &fctl_note, FREEBSD_ABI_VENDOR, &phdr[j], note_fctl_cb, &f_arg)) break; } return (TRUE); } } return (FALSE); } /* * Tell kern_execve.c about it, with a little help from the linker. */ static struct execsw __elfN(execsw) = { .ex_imgact = __CONCAT(exec_, __elfN(imgact)), .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) }; EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); static vm_prot_t __elfN(trans_prot)(Elf_Word flags) { vm_prot_t prot; prot = 0; if (flags & PF_X) prot |= VM_PROT_EXECUTE; if (flags & PF_W) prot |= VM_PROT_WRITE; if (flags & PF_R) prot |= VM_PROT_READ; #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) if (i386_read_exec && (flags & PF_R)) prot |= VM_PROT_EXECUTE; #endif return (prot); } static Elf_Word __elfN(untrans_prot)(vm_prot_t prot) { Elf_Word flags; flags = 0; if (prot & VM_PROT_EXECUTE) flags |= PF_X; if (prot & VM_PROT_READ) flags |= PF_R; if (prot & VM_PROT_WRITE) flags |= PF_W; return (flags); } void __elfN(stackgap)(struct image_params *imgp, uintptr_t *stack_base) { uintptr_t range, rbase, gap; int pct; pct = __elfN(aslr_stack_gap); if (pct == 0) return; if (pct > 50) pct = 50; range = imgp->eff_stack_sz * pct / 100; arc4rand(&rbase, sizeof(rbase), 0); gap = rbase % range; gap &= ~(sizeof(u_long) - 1); *stack_base -= gap; } diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 1135f91e0510..2d8381894a7c 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -1,1159 +1,1160 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_ktrace.h" #include "opt_kstack_pages.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KDTRACE_HOOKS #include dtrace_fork_func_t dtrace_fasttrap_fork; #endif SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE3(proc, , , create, "struct proc *", "struct proc *", "int"); #ifndef _SYS_SYSPROTO_H_ struct fork_args { int dummy; }; #endif /* ARGSUSED */ int sys_fork(struct thread *td, struct fork_args *uap) { struct fork_req fr; int error, pid; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC; fr.fr_pidp = &pid; error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; } return (error); } /* ARGUSED */ int sys_pdfork(struct thread *td, struct pdfork_args *uap) { struct fork_req fr; int error, fd, pid; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC | RFPROCDESC; fr.fr_pidp = &pid; fr.fr_pd_fd = &fd; fr.fr_pd_flags = uap->flags; AUDIT_ARG_FFLAGS(uap->flags); /* * It is necessary to return fd by reference because 0 is a valid file * descriptor number, and the child needs to be able to distinguish * itself from the parent using the return value. */ error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; error = copyout(&fd, uap->fdp, sizeof(fd)); } return (error); } /* ARGSUSED */ int sys_vfork(struct thread *td, struct vfork_args *uap) { struct fork_req fr; int error, pid; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM; fr.fr_pidp = &pid; error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; } return (error); } int sys_rfork(struct thread *td, struct rfork_args *uap) { struct fork_req fr; int error, pid; /* Don't allow kernel-only flags. */ if ((uap->flags & RFKERNELONLY) != 0) return (EINVAL); /* RFSPAWN must not appear with others */ if ((uap->flags & RFSPAWN) != 0 && uap->flags != RFSPAWN) return (EINVAL); AUDIT_ARG_FFLAGS(uap->flags); bzero(&fr, sizeof(fr)); if ((uap->flags & RFSPAWN) != 0) { fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM; fr.fr_flags2 = FR2_DROPSIG_CAUGHT; } else { fr.fr_flags = uap->flags; } fr.fr_pidp = &pid; error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; } return (error); } int __exclusive_cache_line nprocs = 1; /* process 0 */ int lastpid = 0; SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, "Last used PID"); /* * Random component to lastpid generation. We mix in a random factor to make * it a little harder to predict. We sanity check the modulus value to avoid * doing it in critical paths. Don't let it be too small or we pointlessly * waste randomness entropy, and don't let it be impossibly large. Using a * modulus that is too big causes a LOT more process table scans and slows * down fork processing as the pidchecked caching is defeated. */ static int randompid = 0; static int sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) { int error, pid; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error != 0) return(error); sx_xlock(&allproc_lock); pid = randompid; error = sysctl_handle_int(oidp, &pid, 0, req); if (error == 0 && req->newptr != NULL) { if (pid == 0) randompid = 0; else if (pid == 1) /* generate a random PID modulus between 100 and 1123 */ randompid = 100 + arc4random() % 1024; else if (pid < 0 || pid > pid_max - 100) /* out of range */ randompid = pid_max - 100; else if (pid < 100) /* Make it reasonable */ randompid = 100; else randompid = pid; } sx_xunlock(&allproc_lock); return (error); } SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, sysctl_kern_randompid, "I", "Random PID modulus. Special values: 0: disable, 1: choose random value"); extern bitstr_t proc_id_pidmap; extern bitstr_t proc_id_grpidmap; extern bitstr_t proc_id_sessidmap; extern bitstr_t proc_id_reapmap; /* * Find an unused process ID * * If RFHIGHPID is set (used during system boot), do not allocate * low-numbered pids. */ static int fork_findpid(int flags) { pid_t result; int trypid, random; /* * Avoid calling arc4random with procid_lock held. */ random = 0; if (__predict_false(randompid)) random = arc4random() % randompid; mtx_lock(&procid_lock); trypid = lastpid + 1; if (flags & RFHIGHPID) { if (trypid < 10) trypid = 10; } else { trypid += random; } retry: if (trypid >= pid_max) trypid = 2; bit_ffc_at(&proc_id_pidmap, trypid, pid_max, &result); if (result == -1) { KASSERT(trypid != 2, ("unexpectedly ran out of IDs")); trypid = 2; goto retry; } if (bit_test(&proc_id_grpidmap, result) || bit_test(&proc_id_sessidmap, result) || bit_test(&proc_id_reapmap, result)) { trypid = result + 1; goto retry; } /* * RFHIGHPID does not mess with the lastpid counter during boot. */ if ((flags & RFHIGHPID) == 0) lastpid = result; bit_set(&proc_id_pidmap, result); mtx_unlock(&procid_lock); return (result); } static int fork_norfproc(struct thread *td, int flags) { int error; struct proc *p1; KASSERT((flags & RFPROC) == 0, ("fork_norfproc called with RFPROC set")); p1 = td->td_proc; /* * Quiesce other threads if necessary. If RFMEM is not specified we * must ensure that other threads do not concurrently create a second * process sharing the vmspace, see vmspace_unshare(). */ if ((p1->p_flag & (P_HADTHREADS | P_SYSTEM)) == P_HADTHREADS && ((flags & (RFCFDG | RFFDG)) != 0 || (flags & RFMEM) == 0)) { PROC_LOCK(p1); if (thread_single(p1, SINGLE_BOUNDARY)) { PROC_UNLOCK(p1); return (ERESTART); } PROC_UNLOCK(p1); } error = vm_forkproc(td, NULL, NULL, NULL, flags); if (error) goto fail; /* * Close all file descriptors. */ if (flags & RFCFDG) { struct filedesc *fdtmp; struct pwddesc *pdtmp; pdtmp = pdinit(td->td_proc->p_pd, false); fdtmp = fdinit(td->td_proc->p_fd, false, NULL); pdescfree(td); fdescfree(td); p1->p_fd = fdtmp; p1->p_pd = pdtmp; } /* * Unshare file descriptors (from parent). */ if (flags & RFFDG) { fdunshare(td); pdunshare(td); } fail: if ((p1->p_flag & (P_HADTHREADS | P_SYSTEM)) == P_HADTHREADS && ((flags & (RFCFDG | RFFDG)) != 0 || (flags & RFMEM) == 0)) { PROC_LOCK(p1); thread_single_end(p1, SINGLE_BOUNDARY); PROC_UNLOCK(p1); } return (error); } static void do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2, struct vmspace *vm2, struct file *fp_procdesc) { struct proc *p1, *pptr; struct filedesc *fd; struct filedesc_to_leader *fdtol; struct pwddesc *pd; struct sigacts *newsigacts; p1 = td->td_proc; PROC_LOCK(p1); bcopy(&p1->p_startcopy, &p2->p_startcopy, __rangeof(struct proc, p_startcopy, p_endcopy)); pargs_hold(p2->p_args); PROC_UNLOCK(p1); bzero(&p2->p_startzero, __rangeof(struct proc, p_startzero, p_endzero)); /* Tell the prison that we exist. */ prison_proc_hold(p2->p_ucred->cr_prison); p2->p_state = PRS_NEW; /* protect against others */ p2->p_pid = fork_findpid(fr->fr_flags); AUDIT_ARG_PID(p2->p_pid); sx_xlock(&allproc_lock); LIST_INSERT_HEAD(&allproc, p2, p_list); allproc_gen++; sx_xunlock(&allproc_lock); sx_xlock(PIDHASHLOCK(p2->p_pid)); LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); sx_xunlock(PIDHASHLOCK(p2->p_pid)); tidhash_add(td2); /* * Malloc things while we don't hold any locks. */ if (fr->fr_flags & RFSIGSHARE) newsigacts = NULL; else newsigacts = sigacts_alloc(); /* * Copy filedesc. */ if (fr->fr_flags & RFCFDG) { pd = pdinit(p1->p_pd, false); fd = fdinit(p1->p_fd, false, NULL); fdtol = NULL; } else if (fr->fr_flags & RFFDG) { if (fr->fr_flags2 & FR2_SHARE_PATHS) pd = pdshare(p1->p_pd); else pd = pdcopy(p1->p_pd); fd = fdcopy(p1->p_fd); fdtol = NULL; } else { if (fr->fr_flags2 & FR2_SHARE_PATHS) pd = pdcopy(p1->p_pd); else pd = pdshare(p1->p_pd); fd = fdshare(p1->p_fd); if (p1->p_fdtol == NULL) p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL, p1->p_leader); if ((fr->fr_flags & RFTHREAD) != 0) { /* * Shared file descriptor table, and shared * process leaders. */ fdtol = p1->p_fdtol; FILEDESC_XLOCK(p1->p_fd); fdtol->fdl_refcount++; FILEDESC_XUNLOCK(p1->p_fd); } else { /* * Shared file descriptor table, and different * process leaders. */ fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p1->p_fd, p2); } } /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ PROC_LOCK(p2); PROC_LOCK(p1); bzero(&td2->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &td2->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name)); td2->td_sigstk = td->td_sigstk; td2->td_flags = TDF_INMEM; td2->td_lend_user_pri = PRI_MAX; #ifdef VIMAGE td2->td_vnet = NULL; td2->td_vnet_lpush = NULL; #endif /* * Allow the scheduler to initialize the child. */ thread_lock(td); sched_fork(td, td2); thread_unlock(td); /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. */ p2->p_flag = P_INMEM; p2->p_flag2 = p1->p_flag2 & (P2_ASLR_DISABLE | P2_ASLR_ENABLE | P2_ASLR_IGNSTART | P2_NOTRACE | P2_NOTRACE_EXEC | P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE | P2_TRAPCAP | - P2_STKGAP_DISABLE | P2_STKGAP_DISABLE_EXEC | P2_NO_NEW_PRIVS); + P2_STKGAP_DISABLE | P2_STKGAP_DISABLE_EXEC | P2_NO_NEW_PRIVS | + P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC); p2->p_swtick = ticks; if (p1->p_flag & P_PROFIL) startprofclock(p2); if (fr->fr_flags & RFSIGSHARE) { p2->p_sigacts = sigacts_hold(p1->p_sigacts); } else { sigacts_copy(newsigacts, p1->p_sigacts); p2->p_sigacts = newsigacts; if ((fr->fr_flags2 & (FR2_DROPSIG_CAUGHT | FR2_KPROC)) != 0) { mtx_lock(&p2->p_sigacts->ps_mtx); if ((fr->fr_flags2 & FR2_DROPSIG_CAUGHT) != 0) sig_drop_caught(p2); if ((fr->fr_flags2 & FR2_KPROC) != 0) p2->p_sigacts->ps_flag |= PS_NOCLDWAIT; mtx_unlock(&p2->p_sigacts->ps_mtx); } } if (fr->fr_flags & RFTSIGZMB) p2->p_sigparent = RFTSIGNUM(fr->fr_flags); else if (fr->fr_flags & RFLINUXTHPN) p2->p_sigparent = SIGUSR1; else p2->p_sigparent = SIGCHLD; if ((fr->fr_flags2 & FR2_KPROC) != 0) { p2->p_flag |= P_SYSTEM | P_KPROC; td2->td_pflags |= TDP_KTHREAD; } p2->p_textvp = p1->p_textvp; p2->p_fd = fd; p2->p_fdtol = fdtol; p2->p_pd = pd; if (p1->p_flag2 & P2_INHERIT_PROTECTED) { p2->p_flag |= P_PROTECTED; p2->p_flag2 |= P2_INHERIT_PROTECTED; } /* * p_limit is copy-on-write. Bump its refcount. */ lim_fork(p1, p2); thread_cow_get_proc(td2, p2); pstats_fork(p1->p_stats, p2->p_stats); PROC_UNLOCK(p1); PROC_UNLOCK(p2); /* Bump references to the text vnode (for procfs). */ if (p2->p_textvp) vrefact(p2->p_textvp); /* * Set up linkage for kernel based threading. */ if ((fr->fr_flags & RFTHREAD) != 0) { mtx_lock(&ppeers_lock); p2->p_peers = p1->p_peers; p1->p_peers = p2; p2->p_leader = p1->p_leader; mtx_unlock(&ppeers_lock); PROC_LOCK(p1->p_leader); if ((p1->p_leader->p_flag & P_WEXIT) != 0) { PROC_UNLOCK(p1->p_leader); /* * The task leader is exiting, so process p1 is * going to be killed shortly. Since p1 obviously * isn't dead yet, we know that the leader is either * sending SIGKILL's to all the processes in this * task or is sleeping waiting for all the peers to * exit. We let p1 complete the fork, but we need * to go ahead and kill the new process p2 since * the task leader may not get a chance to send * SIGKILL to it. We leave it on the list so that * the task leader will wait for this new process * to commit suicide. */ PROC_LOCK(p2); kern_psignal(p2, SIGKILL); PROC_UNLOCK(p2); } else PROC_UNLOCK(p1->p_leader); } else { p2->p_peers = NULL; p2->p_leader = p2; } sx_xlock(&proctree_lock); PGRP_LOCK(p1->p_pgrp); PROC_LOCK(p2); PROC_LOCK(p1); /* * Preserve some more flags in subprocess. P_PROFIL has already * been preserved. */ p2->p_flag |= p1->p_flag & P_SUGID; td2->td_pflags |= (td->td_pflags & (TDP_ALTSTACK | TDP_SIGFASTBLOCK)) | TDP_FORKING; SESS_LOCK(p1->p_session); if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) p2->p_flag |= P_CONTROLT; SESS_UNLOCK(p1->p_session); if (fr->fr_flags & RFPPWAIT) p2->p_flag |= P_PPWAIT; p2->p_pgrp = p1->p_pgrp; LIST_INSERT_AFTER(p1, p2, p_pglist); PGRP_UNLOCK(p1->p_pgrp); LIST_INIT(&p2->p_children); LIST_INIT(&p2->p_orphans); callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0); TAILQ_INIT(&p2->p_kqtim_stop); /* * This begins the section where we must prevent the parent * from being swapped. */ _PHOLD(p1); PROC_UNLOCK(p1); /* * Attach the new process to its parent. * * If RFNOWAIT is set, the newly created process becomes a child * of init. This effectively disassociates the child from the * parent. */ if ((fr->fr_flags & RFNOWAIT) != 0) { pptr = p1->p_reaper; p2->p_reaper = pptr; } else { p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ? p1 : p1->p_reaper; pptr = p1; } p2->p_pptr = pptr; p2->p_oppid = pptr->p_pid; LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); LIST_INIT(&p2->p_reaplist); LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling); if (p2->p_reaper == p1 && p1 != initproc) { p2->p_reapsubtree = p2->p_pid; proc_id_set_cond(PROC_ID_REAP, p2->p_pid); } sx_xunlock(&proctree_lock); /* Inform accounting that we have forked. */ p2->p_acflag = AFORK; PROC_UNLOCK(p2); #ifdef KTRACE ktrprocfork(p1, p2); #endif /* * Finish creating the child process. It will return via a different * execution path later. (ie: directly into user mode) */ vm_forkproc(td, p2, td2, vm2, fr->fr_flags); if (fr->fr_flags == (RFFDG | RFPROC)) { VM_CNT_INC(v_forks); VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { VM_CNT_INC(v_vforks); VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (p1 == &proc0) { VM_CNT_INC(v_kthreads); VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else { VM_CNT_INC(v_rforks); VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } /* * Associate the process descriptor with the process before anything * can happen that might cause that process to need the descriptor. * However, don't do this until after fork(2) can no longer fail. */ if (fr->fr_flags & RFPROCDESC) procdesc_new(p2, fr->fr_pd_flags); /* * Both processes are set up, now check if any loadable modules want * to adjust anything. */ EVENTHANDLER_DIRECT_INVOKE(process_fork, p1, p2, fr->fr_flags); /* * Set the child start time and mark the process as being complete. */ PROC_LOCK(p2); PROC_LOCK(p1); microuptime(&p2->p_stats->p_start); PROC_SLOCK(p2); p2->p_state = PRS_NORMAL; PROC_SUNLOCK(p2); #ifdef KDTRACE_HOOKS /* * Tell the DTrace fasttrap provider about the new process so that any * tracepoints inherited from the parent can be removed. We have to do * this only after p_state is PRS_NORMAL since the fasttrap module will * use pfind() later on. */ if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork) dtrace_fasttrap_fork(p1, p2); #endif if (fr->fr_flags & RFPPWAIT) { td->td_pflags |= TDP_RFPPWAIT; td->td_rfppwait_p = p2; td->td_dbgflags |= TDB_VFORK; } PROC_UNLOCK(p2); /* * Tell any interested parties about the new process. */ knote_fork(p1->p_klist, p2->p_pid); /* * Now can be swapped. */ _PRELE(p1); PROC_UNLOCK(p1); SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags); if (fr->fr_flags & RFPROCDESC) { procdesc_finit(p2->p_procdesc, fp_procdesc); fdrop(fp_procdesc, td); } /* * Speculative check for PTRACE_FORK. PTRACE_FORK is not * synced with forks in progress so it is OK if we miss it * if being set atm. */ if ((p1->p_ptevents & PTRACE_FORK) != 0) { sx_xlock(&proctree_lock); PROC_LOCK(p2); /* * p1->p_ptevents & p1->p_pptr are protected by both * process and proctree locks for modifications, * so owning proctree_lock allows the race-free read. */ if ((p1->p_ptevents & PTRACE_FORK) != 0) { /* * Arrange for debugger to receive the fork event. * * We can report PL_FLAG_FORKED regardless of * P_FOLLOWFORK settings, but it does not make a sense * for runaway child. */ td->td_dbgflags |= TDB_FORK; td->td_dbg_forked = p2->p_pid; td2->td_dbgflags |= TDB_STOPATFORK; proc_set_traced(p2, true); CTR2(KTR_PTRACE, "do_fork: attaching to new child pid %d: oppid %d", p2->p_pid, p2->p_oppid); proc_reparent(p2, p1->p_pptr, false); } PROC_UNLOCK(p2); sx_xunlock(&proctree_lock); } racct_proc_fork_done(p2); if ((fr->fr_flags & RFSTOPPED) == 0) { if (fr->fr_pidp != NULL) *fr->fr_pidp = p2->p_pid; /* * If RFSTOPPED not requested, make child runnable and * add to run queue. */ thread_lock(td2); TD_SET_CAN_RUN(td2); sched_add(td2, SRQ_BORING); } else { *fr->fr_procp = p2; } } void fork_rfppwait(struct thread *td) { struct proc *p, *p2; MPASS(td->td_pflags & TDP_RFPPWAIT); p = td->td_proc; /* * Preserve synchronization semantics of vfork. If * waiting for child to exec or exit, fork set * P_PPWAIT on child, and there we sleep on our proc * (in case of exit). * * Do it after the ptracestop() above is finished, to * not block our debugger until child execs or exits * to finish vfork wait. */ td->td_pflags &= ~TDP_RFPPWAIT; p2 = td->td_rfppwait_p; again: PROC_LOCK(p2); while (p2->p_flag & P_PPWAIT) { PROC_LOCK(p); if (thread_suspend_check_needed()) { PROC_UNLOCK(p2); thread_suspend_check(0); PROC_UNLOCK(p); goto again; } else { PROC_UNLOCK(p); } cv_timedwait(&p2->p_pwait, &p2->p_mtx, hz); } PROC_UNLOCK(p2); if (td->td_dbgflags & TDB_VFORK) { PROC_LOCK(p); if (p->p_ptevents & PTRACE_VFORK) ptracestop(td, SIGTRAP, NULL); td->td_dbgflags &= ~TDB_VFORK; PROC_UNLOCK(p); } } int fork1(struct thread *td, struct fork_req *fr) { struct proc *p1, *newproc; struct thread *td2; struct vmspace *vm2; struct ucred *cred; struct file *fp_procdesc; vm_ooffset_t mem_charged; int error, nprocs_new; static int curfail; static struct timeval lastfail; int flags, pages; flags = fr->fr_flags; pages = fr->fr_pages; if ((flags & RFSTOPPED) != 0) MPASS(fr->fr_procp != NULL && fr->fr_pidp == NULL); else MPASS(fr->fr_procp == NULL); /* Check for the undefined or unimplemented flags. */ if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0) return (EINVAL); /* Signal value requires RFTSIGZMB. */ if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0) return (EINVAL); /* Can't copy and clear. */ if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) return (EINVAL); /* Check the validity of the signal number. */ if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG) return (EINVAL); if ((flags & RFPROCDESC) != 0) { /* Can't not create a process yet get a process descriptor. */ if ((flags & RFPROC) == 0) return (EINVAL); /* Must provide a place to put a procdesc if creating one. */ if (fr->fr_pd_fd == NULL) return (EINVAL); /* Check if we are using supported flags. */ if ((fr->fr_pd_flags & ~PD_ALLOWED_AT_FORK) != 0) return (EINVAL); } p1 = td->td_proc; /* * Here we don't create a new process, but we divorce * certain parts of a process from itself. */ if ((flags & RFPROC) == 0) { if (fr->fr_procp != NULL) *fr->fr_procp = NULL; else if (fr->fr_pidp != NULL) *fr->fr_pidp = 0; return (fork_norfproc(td, flags)); } fp_procdesc = NULL; newproc = NULL; vm2 = NULL; /* * Increment the nprocs resource before allocations occur. * Although process entries are dynamically created, we still * keep a global limit on the maximum number we will * create. There are hard-limits as to the number of processes * that can run, established by the KVA and memory usage for * the process data. * * Don't allow a nonprivileged user to use the last ten * processes; don't let root exceed the limit. */ nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1; if (nprocs_new >= maxproc - 10) { if (priv_check_cred(td->td_ucred, PRIV_MAXPROC) != 0 || nprocs_new >= maxproc) { error = EAGAIN; sx_xlock(&allproc_lock); if (ppsratecheck(&lastfail, &curfail, 1)) { printf("maxproc limit exceeded by uid %u " "(pid %d); see tuning(7) and " "login.conf(5)\n", td->td_ucred->cr_ruid, p1->p_pid); } sx_xunlock(&allproc_lock); goto fail2; } } /* * If required, create a process descriptor in the parent first; we * will abandon it if something goes wrong. We don't finit() until * later. */ if (flags & RFPROCDESC) { error = procdesc_falloc(td, &fp_procdesc, fr->fr_pd_fd, fr->fr_pd_flags, fr->fr_pd_fcaps); if (error != 0) goto fail2; AUDIT_ARG_FD(*fr->fr_pd_fd); } mem_charged = 0; if (pages == 0) pages = kstack_pages; /* Allocate new proc. */ newproc = uma_zalloc(proc_zone, M_WAITOK); td2 = FIRST_THREAD_IN_PROC(newproc); if (td2 == NULL) { td2 = thread_alloc(pages); if (td2 == NULL) { error = ENOMEM; goto fail2; } proc_linkup(newproc, td2); } else { kmsan_thread_alloc(td2); if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) { if (td2->td_kstack != 0) vm_thread_dispose(td2); if (!thread_alloc_stack(td2, pages)) { error = ENOMEM; goto fail2; } } } if ((flags & RFMEM) == 0) { vm2 = vmspace_fork(p1->p_vmspace, &mem_charged); if (vm2 == NULL) { error = ENOMEM; goto fail2; } if (!swap_reserve(mem_charged)) { /* * The swap reservation failed. The accounting * from the entries of the copied vm2 will be * subtracted in vmspace_free(), so force the * reservation there. */ swap_reserve_force(mem_charged); error = ENOMEM; goto fail2; } } else vm2 = NULL; /* * XXX: This is ugly; when we copy resource usage, we need to bump * per-cred resource counters. */ proc_set_cred_init(newproc, td->td_ucred); /* * Initialize resource accounting for the child process. */ error = racct_proc_fork(p1, newproc); if (error != 0) { error = EAGAIN; goto fail1; } #ifdef MAC mac_proc_init(newproc); #endif newproc->p_klist = knlist_alloc(&newproc->p_mtx); STAILQ_INIT(&newproc->p_ktr); /* * Increment the count of procs running with this uid. Don't allow * a nonprivileged user to exceed their current limit. */ cred = td->td_ucred; if (!chgproccnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_NPROC))) { if (priv_check_cred(cred, PRIV_PROC_LIMIT) != 0) goto fail0; chgproccnt(cred->cr_ruidinfo, 1, 0); } do_fork(td, fr, newproc, td2, vm2, fp_procdesc); return (0); fail0: error = EAGAIN; #ifdef MAC mac_proc_destroy(newproc); #endif racct_proc_exit(newproc); fail1: proc_unset_cred(newproc); fail2: if (vm2 != NULL) vmspace_free(vm2); uma_zfree(proc_zone, newproc); if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) { fdclose(td, fp_procdesc, *fr->fr_pd_fd); fdrop(fp_procdesc, td); } atomic_add_int(&nprocs, -1); pause("fork", hz / 2); return (error); } /* * Handle the return of a child process from fork1(). This function * is called from the MD fork_trampoline() entry point. */ void fork_exit(void (*callout)(void *, struct trapframe *), void *arg, struct trapframe *frame) { struct proc *p; struct thread *td; struct thread *dtd; kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED); td = curthread; p = td->td_proc; KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)", td, td_get_sched(td), p->p_pid, td->td_name); sched_fork_exit(td); /* * Processes normally resume in mi_switch() after being * cpu_switch()'ed to, but when children start up they arrive here * instead, so we must do much the same things as mi_switch() would. */ if ((dtd = PCPU_GET(deadthread))) { PCPU_SET(deadthread, NULL); thread_stash(dtd); } thread_unlock(td); /* * cpu_fork_kthread_handler intercepts this function call to * have this call a non-return function to stay in kernel mode. * initproc has its own fork handler, but it does return. */ KASSERT(callout != NULL, ("NULL callout in fork_exit")); callout(arg, frame); /* * Check if a kernel thread misbehaved and returned from its main * function. */ if (p->p_flag & P_KPROC) { printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", td->td_name, p->p_pid); kthread_exit(); } mtx_assert(&Giant, MA_NOTOWNED); if (p->p_sysent->sv_schedtail != NULL) (p->p_sysent->sv_schedtail)(td); td->td_pflags &= ~TDP_FORKING; } /* * Simplified back end of syscall(), used when returning from fork() * directly into user mode. This function is passed in to fork_exit() * as the first parameter and is called when returning to a new * userland process. */ void fork_return(struct thread *td, struct trapframe *frame) { struct proc *p; p = td->td_proc; if (td->td_dbgflags & TDB_STOPATFORK) { PROC_LOCK(p); if ((p->p_flag & P_TRACED) != 0) { /* * Inform the debugger if one is still present. */ td->td_dbgflags |= TDB_CHILD | TDB_SCX | TDB_FSTP; ptracestop(td, SIGSTOP, NULL); td->td_dbgflags &= ~(TDB_CHILD | TDB_SCX); } else { /* * ... otherwise clear the request. */ td->td_dbgflags &= ~TDB_STOPATFORK; } PROC_UNLOCK(p); } else if (p->p_flag & P_TRACED || td->td_dbgflags & TDB_BORN) { /* * This is the start of a new thread in a traced * process. Report a system call exit event. */ PROC_LOCK(p); td->td_dbgflags |= TDB_SCX; if ((p->p_ptevents & PTRACE_SCX) != 0 || (td->td_dbgflags & TDB_BORN) != 0) ptracestop(td, SIGTRAP, NULL); td->td_dbgflags &= ~(TDB_SCX | TDB_BORN); PROC_UNLOCK(p); } /* * If the prison was killed mid-fork, die along with it. */ if (!prison_isalive(td->td_ucred->cr_prison)) exit1(td, 0, SIGKILL); userret(td, frame); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) ktrsysret(SYS_fork, 0, 0); #endif } diff --git a/sys/kern/kern_procctl.c b/sys/kern/kern_procctl.c index 68fa4bc0c3ac..53626caa0fd9 100644 --- a/sys/kern/kern_procctl.c +++ b/sys/kern/kern_procctl.c @@ -1,888 +1,964 @@ /*- * Copyright (c) 2014 John Baldwin * Copyright (c) 2014, 2016 The FreeBSD Foundation * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int protect_setchild(struct thread *td, struct proc *p, int flags) { PROC_LOCK_ASSERT(p, MA_OWNED); if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0) return (0); if (flags & PPROT_SET) { p->p_flag |= P_PROTECTED; if (flags & PPROT_INHERIT) p->p_flag2 |= P2_INHERIT_PROTECTED; } else { p->p_flag &= ~P_PROTECTED; p->p_flag2 &= ~P2_INHERIT_PROTECTED; } return (1); } static int protect_setchildren(struct thread *td, struct proc *top, int flags) { struct proc *p; int ret; p = top; ret = 0; sx_assert(&proctree_lock, SX_LOCKED); for (;;) { ret |= protect_setchild(td, p, flags); PROC_UNLOCK(p); /* * If this process has children, descend to them next, * otherwise do any siblings, and if done with this level, * follow back up the tree (but not past top). */ if (!LIST_EMPTY(&p->p_children)) p = LIST_FIRST(&p->p_children); else for (;;) { if (p == top) { PROC_LOCK(p); return (ret); } if (LIST_NEXT(p, p_sibling)) { p = LIST_NEXT(p, p_sibling); break; } p = p->p_pptr; } PROC_LOCK(p); } } static int protect_set(struct thread *td, struct proc *p, int flags) { int error, ret; switch (PPROT_OP(flags)) { case PPROT_SET: case PPROT_CLEAR: break; default: return (EINVAL); } if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0) return (EINVAL); error = priv_check(td, PRIV_VM_MADV_PROTECT); if (error) return (error); if (flags & PPROT_DESCEND) ret = protect_setchildren(td, p, flags); else ret = protect_setchild(td, p, flags); if (ret == 0) return (EPERM); return (0); } static int reap_acquire(struct thread *td, struct proc *p) { sx_assert(&proctree_lock, SX_XLOCKED); if (p != curproc) return (EPERM); if ((p->p_treeflag & P_TREE_REAPER) != 0) return (EBUSY); p->p_treeflag |= P_TREE_REAPER; /* * We do not reattach existing children and the whole tree * under them to us, since p->p_reaper already seen them. */ return (0); } static int reap_release(struct thread *td, struct proc *p) { sx_assert(&proctree_lock, SX_XLOCKED); if (p != curproc) return (EPERM); if (p == initproc) return (EINVAL); if ((p->p_treeflag & P_TREE_REAPER) == 0) return (EINVAL); reaper_abandon_children(p, false); return (0); } static int reap_status(struct thread *td, struct proc *p, struct procctl_reaper_status *rs) { struct proc *reap, *p2, *first_p; sx_assert(&proctree_lock, SX_LOCKED); bzero(rs, sizeof(*rs)); if ((p->p_treeflag & P_TREE_REAPER) == 0) { reap = p->p_reaper; } else { reap = p; rs->rs_flags |= REAPER_STATUS_OWNED; } if (reap == initproc) rs->rs_flags |= REAPER_STATUS_REALINIT; rs->rs_reaper = reap->p_pid; rs->rs_descendants = 0; rs->rs_children = 0; if (!LIST_EMPTY(&reap->p_reaplist)) { first_p = LIST_FIRST(&reap->p_children); if (first_p == NULL) first_p = LIST_FIRST(&reap->p_reaplist); rs->rs_pid = first_p->p_pid; LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) { if (proc_realparent(p2) == reap) rs->rs_children++; rs->rs_descendants++; } } else { rs->rs_pid = -1; } return (0); } static int reap_getpids(struct thread *td, struct proc *p, struct procctl_reaper_pids *rp) { struct proc *reap, *p2; struct procctl_reaper_pidinfo *pi, *pip; u_int i, n; int error; sx_assert(&proctree_lock, SX_LOCKED); PROC_UNLOCK(p); reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p; n = i = 0; error = 0; LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) n++; sx_unlock(&proctree_lock); if (rp->rp_count < n) n = rp->rp_count; pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK); sx_slock(&proctree_lock); LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) { if (i == n) break; pip = &pi[i]; bzero(pip, sizeof(*pip)); pip->pi_pid = p2->p_pid; pip->pi_subtree = p2->p_reapsubtree; pip->pi_flags = REAPER_PIDINFO_VALID; if (proc_realparent(p2) == reap) pip->pi_flags |= REAPER_PIDINFO_CHILD; if ((p2->p_treeflag & P_TREE_REAPER) != 0) pip->pi_flags |= REAPER_PIDINFO_REAPER; i++; } sx_sunlock(&proctree_lock); error = copyout(pi, rp->rp_pids, i * sizeof(*pi)); free(pi, M_TEMP); sx_slock(&proctree_lock); PROC_LOCK(p); return (error); } static void reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi, struct procctl_reaper_kill *rk, int *error) { int error1; PROC_LOCK(p2); error1 = p_cansignal(td, p2, rk->rk_sig); if (error1 == 0) { pksignal(p2, rk->rk_sig, ksi); rk->rk_killed++; *error = error1; } else if (*error == ESRCH) { rk->rk_fpid = p2->p_pid; *error = error1; } PROC_UNLOCK(p2); } struct reap_kill_tracker { struct proc *parent; TAILQ_ENTRY(reap_kill_tracker) link; }; TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker); static void reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2) { struct reap_kill_tracker *t; t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK); t->parent = p2; TAILQ_INSERT_TAIL(tracker, t, link); } static int reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk) { struct proc *reap, *p2; ksiginfo_t ksi; struct reap_kill_tracker_head tracker; struct reap_kill_tracker *t; int error; sx_assert(&proctree_lock, SX_LOCKED); if (IN_CAPABILITY_MODE(td)) return (ECAPMODE); if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG || (rk->rk_flags & ~(REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags & (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) == (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) return (EINVAL); PROC_UNLOCK(p); reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p; ksiginfo_init(&ksi); ksi.ksi_signo = rk->rk_sig; ksi.ksi_code = SI_USER; ksi.ksi_pid = td->td_proc->p_pid; ksi.ksi_uid = td->td_ucred->cr_ruid; error = ESRCH; rk->rk_killed = 0; rk->rk_fpid = -1; if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) { for (p2 = LIST_FIRST(&reap->p_children); p2 != NULL; p2 = LIST_NEXT(p2, p_sibling)) { reap_kill_proc(td, p2, &ksi, rk, &error); /* * Do not end the loop on error, signal * everything we can. */ } } else { TAILQ_INIT(&tracker); reap_kill_sched(&tracker, reap); while ((t = TAILQ_FIRST(&tracker)) != NULL) { MPASS((t->parent->p_treeflag & P_TREE_REAPER) != 0); TAILQ_REMOVE(&tracker, t, link); for (p2 = LIST_FIRST(&t->parent->p_reaplist); p2 != NULL; p2 = LIST_NEXT(p2, p_reapsibling)) { if (t->parent == reap && (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 && p2->p_reapsubtree != rk->rk_subtree) continue; if ((p2->p_treeflag & P_TREE_REAPER) != 0) reap_kill_sched(&tracker, p2); reap_kill_proc(td, p2, &ksi, rk, &error); } free(t, M_TEMP); } } PROC_LOCK(p); return (error); } static int trace_ctl(struct thread *td, struct proc *p, int state) { PROC_LOCK_ASSERT(p, MA_OWNED); /* * Ktrace changes p_traceflag from or to zero under the * process lock, so the test does not need to acquire ktrace * mutex. */ if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0) return (EBUSY); switch (state) { case PROC_TRACE_CTL_ENABLE: if (td->td_proc != p) return (EPERM); p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC); break; case PROC_TRACE_CTL_DISABLE_EXEC: p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE; break; case PROC_TRACE_CTL_DISABLE: if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) { KASSERT((p->p_flag2 & P2_NOTRACE) != 0, ("dandling P2_NOTRACE_EXEC")); if (td->td_proc != p) return (EPERM); p->p_flag2 &= ~P2_NOTRACE_EXEC; } else { p->p_flag2 |= P2_NOTRACE; } break; default: return (EINVAL); } return (0); } static int trace_status(struct thread *td, struct proc *p, int *data) { if ((p->p_flag2 & P2_NOTRACE) != 0) { KASSERT((p->p_flag & P_TRACED) == 0, ("%d traced but tracing disabled", p->p_pid)); *data = -1; } else if ((p->p_flag & P_TRACED) != 0) { *data = p->p_pptr->p_pid; } else { *data = 0; } return (0); } static int trapcap_ctl(struct thread *td, struct proc *p, int state) { PROC_LOCK_ASSERT(p, MA_OWNED); switch (state) { case PROC_TRAPCAP_CTL_ENABLE: p->p_flag2 |= P2_TRAPCAP; break; case PROC_TRAPCAP_CTL_DISABLE: p->p_flag2 &= ~P2_TRAPCAP; break; default: return (EINVAL); } return (0); } static int trapcap_status(struct thread *td, struct proc *p, int *data) { *data = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE : PROC_TRAPCAP_CTL_DISABLE; return (0); } static int no_new_privs_ctl(struct thread *td, struct proc *p, int state) { PROC_LOCK_ASSERT(p, MA_OWNED); if (state != PROC_NO_NEW_PRIVS_ENABLE) return (EINVAL); p->p_flag2 |= P2_NO_NEW_PRIVS; return (0); } static int no_new_privs_status(struct thread *td, struct proc *p, int *data) { *data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ? PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE; return (0); } static int protmax_ctl(struct thread *td, struct proc *p, int state) { PROC_LOCK_ASSERT(p, MA_OWNED); switch (state) { case PROC_PROTMAX_FORCE_ENABLE: p->p_flag2 &= ~P2_PROTMAX_DISABLE; p->p_flag2 |= P2_PROTMAX_ENABLE; break; case PROC_PROTMAX_FORCE_DISABLE: p->p_flag2 |= P2_PROTMAX_DISABLE; p->p_flag2 &= ~P2_PROTMAX_ENABLE; break; case PROC_PROTMAX_NOFORCE: p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE); break; default: return (EINVAL); } return (0); } static int protmax_status(struct thread *td, struct proc *p, int *data) { int d; switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) { case 0: d = PROC_PROTMAX_NOFORCE; break; case P2_PROTMAX_ENABLE: d = PROC_PROTMAX_FORCE_ENABLE; break; case P2_PROTMAX_DISABLE: d = PROC_PROTMAX_FORCE_DISABLE; break; } if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ) d |= PROC_PROTMAX_ACTIVE; *data = d; return (0); } static int aslr_ctl(struct thread *td, struct proc *p, int state) { PROC_LOCK_ASSERT(p, MA_OWNED); switch (state) { case PROC_ASLR_FORCE_ENABLE: p->p_flag2 &= ~P2_ASLR_DISABLE; p->p_flag2 |= P2_ASLR_ENABLE; break; case PROC_ASLR_FORCE_DISABLE: p->p_flag2 |= P2_ASLR_DISABLE; p->p_flag2 &= ~P2_ASLR_ENABLE; break; case PROC_ASLR_NOFORCE: p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE); break; default: return (EINVAL); } return (0); } static int aslr_status(struct thread *td, struct proc *p, int *data) { struct vmspace *vm; int d; switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) { case 0: d = PROC_ASLR_NOFORCE; break; case P2_ASLR_ENABLE: d = PROC_ASLR_FORCE_ENABLE; break; case P2_ASLR_DISABLE: d = PROC_ASLR_FORCE_DISABLE; break; } if ((p->p_flag & P_WEXIT) == 0) { _PHOLD(p); PROC_UNLOCK(p); vm = vmspace_acquire_ref(p); if (vm != NULL) { if ((vm->vm_map.flags & MAP_ASLR) != 0) d |= PROC_ASLR_ACTIVE; vmspace_free(vm); } PROC_LOCK(p); _PRELE(p); } *data = d; return (0); } static int stackgap_ctl(struct thread *td, struct proc *p, int state) { PROC_LOCK_ASSERT(p, MA_OWNED); if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE | PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0) return (EINVAL); switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) { case PROC_STACKGAP_ENABLE: if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0) return (EINVAL); break; case PROC_STACKGAP_DISABLE: p->p_flag2 |= P2_STKGAP_DISABLE; break; case 0: break; default: return (EINVAL); } switch (state & (PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) { case PROC_STACKGAP_ENABLE_EXEC: p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC; break; case PROC_STACKGAP_DISABLE_EXEC: p->p_flag2 |= P2_STKGAP_DISABLE_EXEC; break; case 0: break; default: return (EINVAL); } return (0); } static int stackgap_status(struct thread *td, struct proc *p, int *data) { PROC_LOCK_ASSERT(p, MA_OWNED); *data = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE : PROC_STACKGAP_ENABLE; *data |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ? PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC; return (0); } +static int +wxmap_ctl(struct thread *td, struct proc *p, int state) +{ + struct vmspace *vm; + vm_map_t map; + + PROC_LOCK_ASSERT(p, MA_OWNED); + if ((p->p_flag & P_WEXIT) != 0) + return (ESRCH); + + switch (state) { + case PROC_WX_MAPPINGS_PERMIT: + p->p_flag2 |= P2_WXORX_DISABLE; + _PHOLD(p); + PROC_UNLOCK(p); + vm = vmspace_acquire_ref(p); + if (vm != NULL) { + map = &vm->vm_map; + vm_map_lock(map); + map->flags &= ~MAP_WXORX; + vm_map_unlock(map); + vmspace_free(vm); + } + PROC_LOCK(p); + _PRELE(p); + break; + case PROC_WX_MAPPINGS_DISALLOW_EXEC: + p->p_flag2 |= P2_WXORX_ENABLE_EXEC; + break; + default: + return (EINVAL); + } + + return (0); +} + +static int +wxmap_status(struct thread *td, struct proc *p, int *data) +{ + struct vmspace *vm; + int d; + + PROC_LOCK_ASSERT(p, MA_OWNED); + if ((p->p_flag & P_WEXIT) != 0) + return (ESRCH); + + d = 0; + if ((p->p_flag2 & P2_WXORX_DISABLE) != 0) + d |= PROC_WX_MAPPINGS_PERMIT; + if ((p->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0) + d |= PROC_WX_MAPPINGS_DISALLOW_EXEC; + _PHOLD(p); + PROC_UNLOCK(p); + vm = vmspace_acquire_ref(p); + if (vm != NULL) { + if ((vm->vm_map.flags & MAP_WXORX) != 0) + d |= PROC_WXORX_ENFORCE; + vmspace_free(vm); + } + PROC_LOCK(p); + _PRELE(p); + *data = d; + return (0); +} + #ifndef _SYS_SYSPROTO_H_ struct procctl_args { idtype_t idtype; id_t id; int com; void *data; }; #endif /* ARGSUSED */ int sys_procctl(struct thread *td, struct procctl_args *uap) { void *data; union { struct procctl_reaper_status rs; struct procctl_reaper_pids rp; struct procctl_reaper_kill rk; } x; int error, error1, flags, signum; if (uap->com >= PROC_PROCCTL_MD_MIN) return (cpu_procctl(td, uap->idtype, uap->id, uap->com, uap->data)); switch (uap->com) { case PROC_ASLR_CTL: case PROC_PROTMAX_CTL: case PROC_SPROTECT: case PROC_STACKGAP_CTL: case PROC_TRACE_CTL: case PROC_TRAPCAP_CTL: case PROC_NO_NEW_PRIVS_CTL: + case PROC_WXMAP_CTL: error = copyin(uap->data, &flags, sizeof(flags)); if (error != 0) return (error); data = &flags; break; case PROC_REAP_ACQUIRE: case PROC_REAP_RELEASE: if (uap->data != NULL) return (EINVAL); data = NULL; break; case PROC_REAP_STATUS: data = &x.rs; break; case PROC_REAP_GETPIDS: error = copyin(uap->data, &x.rp, sizeof(x.rp)); if (error != 0) return (error); data = &x.rp; break; case PROC_REAP_KILL: error = copyin(uap->data, &x.rk, sizeof(x.rk)); if (error != 0) return (error); data = &x.rk; break; case PROC_ASLR_STATUS: case PROC_PROTMAX_STATUS: case PROC_STACKGAP_STATUS: case PROC_TRACE_STATUS: case PROC_TRAPCAP_STATUS: case PROC_NO_NEW_PRIVS_STATUS: + case PROC_WXMAP_STATUS: data = &flags; break; case PROC_PDEATHSIG_CTL: error = copyin(uap->data, &signum, sizeof(signum)); if (error != 0) return (error); data = &signum; break; case PROC_PDEATHSIG_STATUS: data = &signum; break; default: return (EINVAL); } error = kern_procctl(td, uap->idtype, uap->id, uap->com, data); switch (uap->com) { case PROC_REAP_STATUS: if (error == 0) error = copyout(&x.rs, uap->data, sizeof(x.rs)); break; case PROC_REAP_KILL: error1 = copyout(&x.rk, uap->data, sizeof(x.rk)); if (error == 0) error = error1; break; case PROC_ASLR_STATUS: case PROC_PROTMAX_STATUS: case PROC_STACKGAP_STATUS: case PROC_TRACE_STATUS: case PROC_TRAPCAP_STATUS: case PROC_NO_NEW_PRIVS_STATUS: + case PROC_WXMAP_STATUS: if (error == 0) error = copyout(&flags, uap->data, sizeof(flags)); break; case PROC_PDEATHSIG_STATUS: if (error == 0) error = copyout(&signum, uap->data, sizeof(signum)); break; } return (error); } static int kern_procctl_single(struct thread *td, struct proc *p, int com, void *data) { PROC_LOCK_ASSERT(p, MA_OWNED); switch (com) { case PROC_ASLR_CTL: return (aslr_ctl(td, p, *(int *)data)); case PROC_ASLR_STATUS: return (aslr_status(td, p, data)); case PROC_SPROTECT: return (protect_set(td, p, *(int *)data)); case PROC_PROTMAX_CTL: return (protmax_ctl(td, p, *(int *)data)); case PROC_PROTMAX_STATUS: return (protmax_status(td, p, data)); case PROC_STACKGAP_CTL: return (stackgap_ctl(td, p, *(int *)data)); case PROC_STACKGAP_STATUS: return (stackgap_status(td, p, data)); case PROC_REAP_ACQUIRE: return (reap_acquire(td, p)); case PROC_REAP_RELEASE: return (reap_release(td, p)); case PROC_REAP_STATUS: return (reap_status(td, p, data)); case PROC_REAP_GETPIDS: return (reap_getpids(td, p, data)); case PROC_REAP_KILL: return (reap_kill(td, p, data)); case PROC_TRACE_CTL: return (trace_ctl(td, p, *(int *)data)); case PROC_TRACE_STATUS: return (trace_status(td, p, data)); case PROC_TRAPCAP_CTL: return (trapcap_ctl(td, p, *(int *)data)); case PROC_TRAPCAP_STATUS: return (trapcap_status(td, p, data)); case PROC_NO_NEW_PRIVS_CTL: return (no_new_privs_ctl(td, p, *(int *)data)); case PROC_NO_NEW_PRIVS_STATUS: return (no_new_privs_status(td, p, data)); + case PROC_WXMAP_CTL: + return (wxmap_ctl(td, p, *(int *)data)); + case PROC_WXMAP_STATUS: + return (wxmap_status(td, p, data)); default: return (EINVAL); } } int kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data) { struct pgrp *pg; struct proc *p; int error, first_error, ok; int signum; bool tree_locked; switch (com) { case PROC_ASLR_CTL: case PROC_ASLR_STATUS: case PROC_PROTMAX_CTL: case PROC_PROTMAX_STATUS: case PROC_REAP_ACQUIRE: case PROC_REAP_RELEASE: case PROC_REAP_STATUS: case PROC_REAP_GETPIDS: case PROC_REAP_KILL: case PROC_STACKGAP_CTL: case PROC_STACKGAP_STATUS: case PROC_TRACE_STATUS: case PROC_TRAPCAP_STATUS: case PROC_PDEATHSIG_CTL: case PROC_PDEATHSIG_STATUS: case PROC_NO_NEW_PRIVS_CTL: case PROC_NO_NEW_PRIVS_STATUS: + case PROC_WXMAP_CTL: + case PROC_WXMAP_STATUS: if (idtype != P_PID) return (EINVAL); } switch (com) { case PROC_PDEATHSIG_CTL: signum = *(int *)data; p = td->td_proc; if ((id != 0 && id != p->p_pid) || (signum != 0 && !_SIG_VALID(signum))) return (EINVAL); PROC_LOCK(p); p->p_pdeathsig = signum; PROC_UNLOCK(p); return (0); case PROC_PDEATHSIG_STATUS: p = td->td_proc; if (id != 0 && id != p->p_pid) return (EINVAL); PROC_LOCK(p); *(int *)data = p->p_pdeathsig; PROC_UNLOCK(p); return (0); } switch (com) { case PROC_SPROTECT: case PROC_REAP_STATUS: case PROC_REAP_GETPIDS: case PROC_REAP_KILL: case PROC_TRACE_CTL: case PROC_TRAPCAP_CTL: case PROC_NO_NEW_PRIVS_CTL: sx_slock(&proctree_lock); tree_locked = true; break; case PROC_REAP_ACQUIRE: case PROC_REAP_RELEASE: sx_xlock(&proctree_lock); tree_locked = true; break; case PROC_ASLR_CTL: case PROC_ASLR_STATUS: case PROC_PROTMAX_CTL: case PROC_PROTMAX_STATUS: case PROC_STACKGAP_CTL: case PROC_STACKGAP_STATUS: case PROC_TRACE_STATUS: case PROC_TRAPCAP_STATUS: case PROC_NO_NEW_PRIVS_STATUS: + case PROC_WXMAP_CTL: + case PROC_WXMAP_STATUS: tree_locked = false; break; default: return (EINVAL); } switch (idtype) { case P_PID: p = pfind(id); if (p == NULL) { error = ESRCH; break; } error = p_cansee(td, p); if (error == 0) error = kern_procctl_single(td, p, com, data); PROC_UNLOCK(p); break; case P_PGID: /* * Attempt to apply the operation to all members of the * group. Ignore processes in the group that can't be * seen. Ignore errors so long as at least one process is * able to complete the request successfully. */ pg = pgfind(id); if (pg == NULL) { error = ESRCH; break; } PGRP_UNLOCK(pg); ok = 0; first_error = 0; LIST_FOREACH(p, &pg->pg_members, p_pglist) { PROC_LOCK(p); if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) { PROC_UNLOCK(p); continue; } error = kern_procctl_single(td, p, com, data); PROC_UNLOCK(p); if (error == 0) ok = 1; else if (first_error == 0) first_error = error; } if (ok) error = 0; else if (first_error != 0) error = first_error; else /* * Was not able to see any processes in the * process group. */ error = ESRCH; break; default: error = EINVAL; break; } if (tree_locked) sx_unlock(&proctree_lock); return (error); } diff --git a/sys/sys/proc.h b/sys/sys/proc.h index ddc8392481aa..3d01aae2d06b 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -1,1301 +1,1303 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)proc.h 8.15 (Berkeley) 5/19/95 * $FreeBSD$ */ #ifndef _SYS_PROC_H_ #define _SYS_PROC_H_ #include /* For struct callout. */ #include /* For struct klist. */ #ifdef _KERNEL #include #endif #include #ifndef _KERNEL #include #endif #include #include #include #include #include #include #include /* XXX. */ #include #include #include #include #include #ifndef _KERNEL #include /* For structs itimerval, timeval. */ #else #include #include #endif #include #include #include #include #include /* Machine-dependent proc substruct. */ #ifdef _KERNEL #include #endif /* * One structure allocated per session. * * List of locks * (m) locked by s_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct session { u_int s_count; /* Ref cnt; pgrps in session - atomic. */ struct proc *s_leader; /* (m + e) Session leader. */ struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ struct cdev_priv *s_ttydp; /* (m) Device of controlling tty. */ struct tty *s_ttyp; /* (e) Controlling tty. */ pid_t s_sid; /* (c) Session ID. */ /* (m) Setlogin() name: */ char s_login[roundup(MAXLOGNAME, sizeof(long))]; struct mtx s_mtx; /* Mutex to protect members. */ }; /* * One structure allocated per process group. * * List of locks * (m) locked by pg_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct pgrp { LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ struct session *pg_session; /* (c) Pointer to session. */ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ pid_t pg_id; /* (c) Process group id. */ struct mtx pg_mtx; /* Mutex to protect members */ int pg_flags; /* (m) PGRP_ flags */ }; #define PGRP_ORPHANED 0x00000001 /* Group is orphaned */ /* * pargs, used to hold a copy of the command line, if it had a sane length. */ struct pargs { u_int ar_ref; /* Reference count. */ u_int ar_length; /* Length. */ u_char ar_args[1]; /* Arguments. */ }; /*- * Description of a process. * * This structure contains the information needed to manage a thread of * control, known in UN*X as a process; it has references to substructures * containing descriptions of things that the process uses, but may share * with related processes. The process structure and the substructures * are always addressable except for those marked "(CPU)" below, * which might be addressable only on a processor on which the process * is running. * * Below is a key of locks used to protect each member of struct proc. The * lock is indicated by a reference to a specific character in parens in the * associated comment. * * - not yet protected * a - only touched by curproc or parent during fork/wait * b - created at fork, never changes * (exception aiods switch vmspaces, but they are also * marked 'P_SYSTEM' so hopefully it will be left alone) * c - locked by proc mtx * d - locked by allproc_lock lock * e - locked by proctree_lock lock * f - session mtx * g - process group mtx * h - callout_lock mtx * i - by curproc or the master session mtx * j - locked by proc slock * k - only accessed by curthread * k*- only accessed by curthread and from an interrupt * kx- only accessed by curthread and by debugger * l - the attaching proc or attaching proc parent * m - Giant * n - not locked, lazy * o - ktrace lock * q - td_contested lock * r - p_peers lock * s - see sleepq_switch(), sleeping_on_old_rtc(), and sleep(9) * t - thread lock * u - process stat lock * w - process timer lock * x - created at fork, only changes during single threading in exec * y - created at first aio, doesn't change until exit or exec at which * point we are single-threaded and only curthread changes it * z - zombie threads lock * * If the locking key specifies two identifiers (for example, p_pptr) then * either lock is sufficient for read access, but both locks must be held * for write access. */ struct cpuset; struct filecaps; struct filemon; struct kaioinfo; struct kaudit_record; struct kcov_info; struct kdtrace_proc; struct kdtrace_thread; struct kmsan_td; struct kq_timer_cb_data; struct mqueue_notifier; struct p_sched; struct proc; struct procdesc; struct racct; struct sbuf; struct sleepqueue; struct socket; struct syscall_args; struct td_sched; struct thread; struct trapframe; struct turnstile; struct vm_map; struct vm_map_entry; struct epoch_tracker; /* * XXX: Does this belong in resource.h or resourcevar.h instead? * Resource usage extension. The times in rusage structs in the kernel are * never up to date. The actual times are kept as runtimes and tick counts * (with control info in the "previous" times), and are converted when * userland asks for rusage info. Backwards compatibility prevents putting * this directly in the user-visible rusage struct. * * Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux. * Locking for td_rux: (t) for all fields. */ struct rusage_ext { uint64_t rux_runtime; /* (cu) Real time. */ uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */ uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */ uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */ uint64_t rux_uu; /* (c) Previous user time in usec. */ uint64_t rux_su; /* (c) Previous sys time in usec. */ uint64_t rux_tu; /* (c) Previous total time in usec. */ }; /* * Kernel runnable context (thread). * This is what is put to sleep and reactivated. * Thread context. Processes may have multiple threads. */ struct thread { struct mtx *volatile td_lock; /* replaces sched lock */ struct proc *td_proc; /* (*) Associated process. */ TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ union { TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ struct thread *td_zombie; /* Zombie list linkage */ }; TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */ struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ struct domainset_ref td_domain; /* (a) NUMA policy */ struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */ struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ lwpid_t td_tid; /* (b) Thread ID. */ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ #define td_siglist td_sigqueue.sq_signals u_char td_lend_user_pri; /* (t) Lend user pri. */ u_char td_allocdomain; /* (b) NUMA domain backing this struct thread. */ struct kmsan_td *td_kmsan; /* (k) KMSAN state */ /* Cleared during fork1() */ #define td_startzero td_flags int td_flags; /* (t) TDF_* flags. */ int td_inhibitors; /* (t) Why can not run. */ int td_pflags; /* (k) Private thread (TDP_*) flags. */ int td_pflags2; /* (k) Private thread (TDP2_*) flags. */ int td_dupfd; /* (k) Ret value from fdopen. XXX */ int td_sqqueue; /* (t) Sleepqueue queue blocked on. */ const void *td_wchan; /* (t) Sleep address. */ const char *td_wmesg; /* (t) Reason for sleep. */ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ short td_locks; /* (k) Debug: count of non-spin locks */ short td_rw_rlocks; /* (k) Count of rwlock read locks. */ short td_sx_slocks; /* (k) Count of sx shared locks. */ short td_lk_slocks; /* (k) Count of lockmgr shared locks. */ short td_stopsched; /* (k) Scheduler stopped. */ struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */ const char *td_lockname; /* (t) Name of lock blocked on. */ LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ int td_intr_nesting_level; /* (k) Interrupt recursion. */ int td_pinned; /* (k) Temporary cpu pin count. */ struct ucred *td_realucred; /* (k) Reference to credentials. */ struct ucred *td_ucred; /* (k) Used credentials, temporarily switchable. */ struct plimit *td_limit; /* (k) Resource limits. */ int td_slptick; /* (t) Time at sleep. */ int td_blktick; /* (t) Time spent blocked. */ int td_swvoltick; /* (t) Time at last SW_VOL switch. */ int td_swinvoltick; /* (t) Time at last SW_INVOL switch. */ u_int td_cow; /* (*) Number of copy-on-write faults */ struct rusage td_ru; /* (t) rusage information. */ struct rusage_ext td_rux; /* (t) Internal rusage information. */ uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */ uint64_t td_runtime; /* (t) How many cpu ticks we've run. */ u_int td_pticks; /* (t) Statclock hits for profiling */ u_int td_sticks; /* (t) Statclock hits in system mode. */ u_int td_iticks; /* (t) Statclock hits in intr mode. */ u_int td_uticks; /* (t) Statclock hits in user mode. */ int td_intrval; /* (t) Return value for sleepq. */ sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ volatile u_int td_generation; /* (k) For detection of preemption */ stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ int td_xsig; /* (c) Signal for ptrace */ u_long td_profil_addr; /* (k) Temporary addr until AST. */ u_int td_profil_ticks; /* (k) Temporary ticks until AST. */ char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */ struct file *td_fpop; /* (k) file referencing cdev under op */ int td_dbgflags; /* (c) Userland debugger flags */ siginfo_t td_si; /* (c) For debugger or core file */ int td_ng_outbound; /* (k) Thread entered ng from above. */ struct osd td_osd; /* (k) Object specific data. */ struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ pid_t td_dbg_forked; /* (c) Child pid for debugger. */ struct vnode *td_vp_reserved;/* (k) Prealloated vnode. */ u_int td_no_sleeping; /* (k) Sleeping disabled count. */ void *td_su; /* (k) FFS SU private */ sbintime_t td_sleeptimo; /* (t) Sleep timeout. */ int td_rtcgen; /* (s) rtc_generation of abs. sleep */ int td_errno; /* (k) Error from last syscall. */ size_t td_vslock_sz; /* (k) amount of vslock-ed space */ struct kcov_info *td_kcov_info; /* (*) Kernel code coverage data */ u_int td_ucredref; /* (k) references on td_realucred */ #define td_endzero td_sigmask /* Copied during fork1() or create_thread(). */ #define td_startcopy td_endzero sigset_t td_sigmask; /* (c) Current signal mask. */ u_char td_rqindex; /* (t) Run queue index. */ u_char td_base_pri; /* (t) Thread base kernel priority. */ u_char td_priority; /* (t) Thread active priority. */ u_char td_pri_class; /* (t) Scheduling class. */ u_char td_user_pri; /* (t) User pri from estcpu and nice. */ u_char td_base_user_pri; /* (t) Base user pri */ uintptr_t td_rb_list; /* (k) Robust list head. */ uintptr_t td_rbp_list; /* (k) Robust priv list head. */ uintptr_t td_rb_inact; /* (k) Current in-action mutex loc. */ struct syscall_args td_sa; /* (kx) Syscall parameters. Copied on fork for child tracing. */ void *td_sigblock_ptr; /* (k) uptr for fast sigblock. */ uint32_t td_sigblock_val; /* (k) fast sigblock value read at td_sigblock_ptr on kern entry */ #define td_endcopy td_pcb /* * Fields that must be manually set in fork1() or create_thread() * or already have been set in the allocator, constructor, etc. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ enum td_states { TDS_INACTIVE = 0x0, TDS_INHIBITED, TDS_CAN_RUN, TDS_RUNQ, TDS_RUNNING } td_state; /* (t) thread state */ /* Note: td_state must be accessed using TD_{GET,SET}_STATE(). */ union { register_t tdu_retval[2]; off_t tdu_off; } td_uretoff; /* (k) Syscall aux returns. */ #define td_retval td_uretoff.tdu_retval u_int td_cowgen; /* (k) Generation of COW pointers. */ /* LP64 hole */ struct callout td_slpcallout; /* (h) Callout for sleep. */ struct trapframe *td_frame; /* (k) */ vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ int td_kstack_pages; /* (a) Size of the kstack. */ volatile u_int td_critnest; /* (k*) Critical section nest level. */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ struct kaudit_record *td_ar; /* (k) Active audit record, if any. */ struct lpohead td_lprof[2]; /* (a) lock profiling objects. */ struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */ struct vnet *td_vnet; /* (k) Effective vnet. */ const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */ struct trapframe *td_intr_frame;/* (k) Frame of the current irq */ struct proc *td_rfppwait_p; /* (k) The vforked child */ struct vm_page **td_ma; /* (k) uio pages held */ int td_ma_cnt; /* (k) size of *td_ma */ /* LP64 hole */ void *td_emuldata; /* Emulator state data */ int td_lastcpu; /* (t) Last cpu we were on. */ int td_oncpu; /* (t) Which cpu we are on. */ void *td_lkpi_task; /* LinuxKPI task struct pointer */ int td_pmcpend; void *td_coredump; /* (c) coredump request. */ off_t td_ktr_io_lim; /* (k) limit for ktrace file size */ #ifdef EPOCH_TRACE SLIST_HEAD(, epoch_tracker) td_epochs; #endif }; struct thread0_storage { struct thread t0st_thread; uint64_t t0st_sched[10]; }; struct mtx *thread_lock_block(struct thread *); void thread_lock_block_wait(struct thread *); void thread_lock_set(struct thread *, struct mtx *); void thread_lock_unblock(struct thread *, struct mtx *); #define THREAD_LOCK_ASSERT(td, type) \ mtx_assert((td)->td_lock, (type)) #define THREAD_LOCK_BLOCKED_ASSERT(td, type) \ do { \ struct mtx *__m = (td)->td_lock; \ if (__m != &blocked_lock) \ mtx_assert(__m, (type)); \ } while (0) #ifdef INVARIANTS #define THREAD_LOCKPTR_ASSERT(td, lock) \ do { \ struct mtx *__m; \ __m = (td)->td_lock; \ KASSERT(__m == (lock), \ ("Thread %p lock %p does not match %p", td, __m, (lock))); \ } while (0) #define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock) \ do { \ struct mtx *__m; \ __m = (td)->td_lock; \ KASSERT(__m == (lock) || __m == &blocked_lock, \ ("Thread %p lock %p does not match %p", td, __m, (lock))); \ } while (0) #define TD_LOCKS_INC(td) ((td)->td_locks++) #define TD_LOCKS_DEC(td) do { \ KASSERT(SCHEDULER_STOPPED_TD(td) || (td)->td_locks > 0, \ ("thread %p owns no locks", (td))); \ (td)->td_locks--; \ } while (0) #else #define THREAD_LOCKPTR_ASSERT(td, lock) #define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock) #define TD_LOCKS_INC(td) #define TD_LOCKS_DEC(td) #endif /* * Flags kept in td_flags: * To change these you MUST have the scheduler lock. */ #define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */ #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ #define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */ #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ #define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */ #define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */ #define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */ #define TDF_UNUSED80 0x00000080 /* unused. */ #define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */ #define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */ #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ #define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */ #define TDF_KQTICKLED 0x00001000 /* AST drain kqueue taskqueue */ #define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */ #define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */ #define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */ #define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */ #define TDF_NEEDSIGCHK 0x00020000 /* Thread may need signal delivery. */ #define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */ #define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */ #define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */ #define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */ #define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */ #define TDF_UNUSED23 0x00800000 /* --available-- */ #define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */ #define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */ #define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */ #define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */ #define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */ #define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */ #define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */ /* Userland debug flags */ #define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */ #define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */ #define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */ #define TDB_SCE 0x00000008 /* Thread performs syscall enter */ #define TDB_SCX 0x00000010 /* Thread performs syscall exit */ #define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */ #define TDB_FORK 0x00000040 /* TDB_SCX from fork(2) that created new process */ #define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child only) */ #define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */ #define TDB_BORN 0x00000200 /* New LWP indicator for ptrace() */ #define TDB_EXIT 0x00000400 /* Exiting LWP indicator for ptrace() */ #define TDB_VFORK 0x00000800 /* vfork indicator for ptrace() */ #define TDB_FSTP 0x00001000 /* The thread is PT_ATTACH leader */ #define TDB_STEP 0x00002000 /* (x86) PSL_T set for PT_STEP */ #define TDB_SSWITCH 0x00004000 /* Suspended in ptracestop */ #define TDB_COREDUMPRQ 0x00008000 /* Coredump request */ /* * "Private" flags kept in td_pflags: * These are only written by curthread and thus need no locking. */ #define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */ #define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */ #define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */ #define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */ #define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */ #define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */ #define TDP_DEADLKTREAT 0x00000040 /* Lock acquisition - deadlock treatment. */ #define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */ #define TDP_SIGFASTBLOCK 0x00000100 /* Fast sigblock active */ #define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */ #define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */ #define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */ #define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */ #define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */ #define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */ #define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */ #define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */ #define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */ #define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */ #define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */ #define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */ #define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */ #define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */ #define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */ #define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */ #define TDP_RFPPWAIT 0x02000000 /* Handle RFPPWAIT on syscall exit */ #define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */ #define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */ #define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */ #define TDP_FORKING 0x20000000 /* Thread is being created through fork() */ #define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */ #define TDP_SIGFASTPENDING 0x80000000 /* Pending signal due to sigfastblock */ #define TDP2_SBPAGES 0x00000001 /* Owns sbusy on some pages */ #define TDP2_COMPAT32RB 0x00000002 /* compat32 ABI for robust lists */ #define TDP2_ACCT 0x00000004 /* Doing accounting */ #define TDP2_SIGWAIT 0x00000008 /* Ignore ignored signals */ /* * Reasons that the current thread can not be run yet. * More than one may apply. */ #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ #define TDI_SWAPPED 0x0004 /* Stack not in mem. Bad juju if run. */ #define TDI_LOCK 0x0008 /* Stopped on a lock. */ #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) #define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED) #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) #ifdef _KERNEL #define TD_GET_STATE(td) atomic_load_int(&(td)->td_state) #else #define TD_GET_STATE(td) ((td)->td_state) #endif #define TD_IS_RUNNING(td) (TD_GET_STATE(td) == TDS_RUNNING) #define TD_ON_RUNQ(td) (TD_GET_STATE(td) == TDS_RUNQ) #define TD_CAN_RUN(td) (TD_GET_STATE(td) == TDS_CAN_RUN) #define TD_IS_INHIBITED(td) (TD_GET_STATE(td) == TDS_INHIBITED) #define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED) #define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD) #define TD_CAN_ABORT(td) (TD_ON_SLEEPQ((td)) && \ ((td)->td_flags & TDF_SINTR) != 0) #define KTDSTATE(td) \ (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \ ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \ ((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \ ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \ ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding") #define TD_SET_INHIB(td, inhib) do { \ TD_SET_STATE(td, TDS_INHIBITED); \ (td)->td_inhibitors |= (inhib); \ } while (0) #define TD_CLR_INHIB(td, inhib) do { \ if (((td)->td_inhibitors & (inhib)) && \ (((td)->td_inhibitors &= ~(inhib)) == 0)) \ TD_SET_STATE(td, TDS_CAN_RUN); \ } while (0) #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) #define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED) #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) #define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED) #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) #ifdef _KERNEL #define TD_SET_STATE(td, state) atomic_store_int(&(td)->td_state, state) #else #define TD_SET_STATE(td, state) (td)->td_state = state #endif #define TD_SET_RUNNING(td) TD_SET_STATE(td, TDS_RUNNING) #define TD_SET_RUNQ(td) TD_SET_STATE(td, TDS_RUNQ) #define TD_SET_CAN_RUN(td) TD_SET_STATE(td, TDS_CAN_RUN) #define TD_SBDRY_INTR(td) \ (((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0) #define TD_SBDRY_ERRNO(td) \ (((td)->td_flags & TDF_SEINTR) != 0 ? EINTR : ERESTART) /* * Process structure. */ struct proc { LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */ struct mtx p_slock; /* process spin lock */ struct ucred *p_ucred; /* (c) Process owner's identity. */ struct filedesc *p_fd; /* (b) Open files. */ struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */ struct pwddesc *p_pd; /* (b) Cwd, chroot, jail, umask */ struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ struct plimit *p_limit; /* (c) Resource limits. */ struct callout p_limco; /* (c) Limit callout handle */ struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */ int p_flag; /* (c) P_* flags. */ int p_flag2; /* (c) P2_* flags. */ enum p_states { PRS_NEW = 0, /* In creation */ PRS_NORMAL, /* threads can be run. */ PRS_ZOMBIE } p_state; /* (j/c) Process status. */ pid_t p_pid; /* (b) Process identifier. */ LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ struct proc *p_pptr; /* (c + e) Pointer to parent process. */ LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ struct proc *p_reaper; /* (e) My reaper. */ LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants (if I am reaper). */ LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of the same reaper. */ struct mtx p_mtx; /* (n) Lock for this struct. */ struct mtx p_statmtx; /* Lock for the stats */ struct mtx p_itimmtx; /* Lock for the virt/prof timers */ struct mtx p_profmtx; /* Lock for the profiling */ struct ksiginfo *p_ksi; /* Locked by parent proc lock */ sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */ #define p_siglist p_sigqueue.sq_signals pid_t p_oppid; /* (c + e) Real parent pid. */ /* The following fields are all zeroed upon creation in fork. */ #define p_startzero p_vmspace struct vmspace *p_vmspace; /* (b) Address space. */ u_int p_swtick; /* (c) Tick when swapped in or out. */ u_int p_cowgen; /* (c) Generation of COW pointers. */ struct itimerval p_realtimer; /* (c) Alarm timer. */ struct rusage p_ru; /* (a) Exit information. */ struct rusage_ext p_rux; /* (cu) Internal resource usage. */ struct rusage_ext p_crux; /* (c) Internal child resource usage. */ int p_profthreads; /* (c) Num threads in addupc_task. */ volatile int p_exitthreads; /* (j) Number of threads exiting */ int p_traceflag; /* (o) Kernel trace points. */ struct ktr_io_params *p_ktrioparms; /* (c + o) Params for ktrace. */ struct vnode *p_textvp; /* (b) Vnode of executable. */ u_int p_lock; /* (c) Proclock (prevent swap) count. */ struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ int p_sigparent; /* (c) Signal to parent on exit. */ int p_sig; /* (n) For core dump/debugger XXX. */ u_int p_ptevents; /* (c + e) ptrace() event mask. */ struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */ struct thread *p_singlethread;/* (c + j) If single threading this is it */ int p_suspcount; /* (j) Num threads in suspended mode. */ struct thread *p_xthread; /* (c) Trap thread */ int p_boundary_count;/* (j) Num threads at user boundary */ int p_pendingcnt; /* how many signals are pending */ struct itimers *p_itimers; /* (c) POSIX interval timers. */ struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */ u_int p_treeflag; /* (e) P_TREE flags */ int p_pendingexits; /* (c) Count of pending thread exits. */ struct filemon *p_filemon; /* (c) filemon-specific data. */ int p_pdeathsig; /* (c) Signal from parent on exit. */ /* End area that is zeroed on creation. */ #define p_endzero p_magic /* The following fields are all copied upon creation in fork. */ #define p_startcopy p_endzero u_int p_magic; /* (b) Magic number. */ int p_osrel; /* (x) osreldate for the binary (from ELF note, if any) */ uint32_t p_fctl0; /* (x) ABI feature control, ELF note */ char p_comm[MAXCOMLEN + 1]; /* (x) Process name. */ struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ struct pargs *p_args; /* (c) Process arguments. */ rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */ signed char p_nice; /* (c) Process "nice" value. */ int p_fibnum; /* in this routing domain XXX MRT */ pid_t p_reapsubtree; /* (e) Pid of the direct child of the reaper which spawned our subtree. */ uint64_t p_elf_flags; /* (x) ELF flags */ void *p_elf_brandinfo; /* (x) Elf_Brandinfo, NULL for non ELF binaries. */ /* End area that is copied on creation. */ #define p_endcopy p_xexit u_int p_xexit; /* (c) Exit code. */ u_int p_xsig; /* (c) Stop/kill sig. */ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ struct knlist *p_klist; /* (c) Knotes attached to this proc. */ int p_numthreads; /* (c) Number of threads. */ struct mdproc p_md; /* Any machine-dependent fields. */ struct callout p_itcallout; /* (h + c) Interval timer callout. */ u_short p_acflag; /* (c) Accounting flags. */ struct proc *p_peers; /* (r) */ struct proc *p_leader; /* (b) */ void *p_emuldata; /* (c) Emulator state data. */ struct label *p_label; /* (*) Proc (not subject) MAC label. */ STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */ LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/ struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */ struct cv p_pwait; /* (*) wait cv for exit/exec. */ uint64_t p_prev_runtime; /* (c) Resource usage accounting. */ struct racct *p_racct; /* (b) Resource accounting. */ int p_throttled; /* (c) Flag for racct pcpu throttling */ /* * An orphan is the child that has been re-parented to the * debugger as a result of attaching to it. Need to keep * track of them for parent to be able to collect the exit * status of what used to be children. */ LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */ LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */ TAILQ_HEAD(, kq_timer_cb_data) p_kqtim_stop; /* (c) */ }; #define p_session p_pgrp->pg_session #define p_pgid p_pgrp->pg_id #define NOCPU (-1) /* For when we aren't on a CPU. */ #define NOCPU_OLD (255) #define MAXCPU_OLD (254) #define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock) #define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) #define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) #define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx) #define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx) #define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type)) #define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx) #define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx) #define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type)) #define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx) #define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx) #define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type)) /* These flags are kept in p_flag. */ #define P_ADVLOCK 0x00000001 /* Process may hold a POSIX advisory lock. */ #define P_CONTROLT 0x00000002 /* Has a controlling terminal. */ #define P_KPROC 0x00000004 /* Kernel process. */ #define P_UNUSED3 0x00000008 /* --available-- */ #define P_PPWAIT 0x00000010 /* Parent is waiting for child to exec/exit. */ #define P_PROFIL 0x00000020 /* Has started profiling. */ #define P_STOPPROF 0x00000040 /* Has thread requesting to stop profiling. */ #define P_HADTHREADS 0x00000080 /* Has had threads (no cleanup shortcuts) */ #define P_SUGID 0x00000100 /* Had set id privileges since last exec. */ #define P_SYSTEM 0x00000200 /* System proc: no sigs, stats or swapping. */ #define P_SINGLE_EXIT 0x00000400 /* Threads suspending should exit, not wait. */ #define P_TRACED 0x00000800 /* Debugged process being traced. */ #define P_WAITED 0x00001000 /* Someone is waiting for us. */ #define P_WEXIT 0x00002000 /* Working on exiting. */ #define P_EXEC 0x00004000 /* Process called exec. */ #define P_WKILLED 0x00008000 /* Killed, go to kernel/user boundary ASAP. */ #define P_CONTINUED 0x00010000 /* Proc has continued from a stopped state. */ #define P_STOPPED_SIG 0x00020000 /* Stopped due to SIGSTOP/SIGTSTP. */ #define P_STOPPED_TRACE 0x00040000 /* Stopped because of tracing. */ #define P_STOPPED_SINGLE 0x00080000 /* Only 1 thread can continue (not to user). */ #define P_PROTECTED 0x00100000 /* Do not kill on memory overcommit. */ #define P_SIGEVENT 0x00200000 /* Process pending signals changed. */ #define P_SINGLE_BOUNDARY 0x00400000 /* Threads should suspend at user boundary. */ #define P_HWPMC 0x00800000 /* Process is using HWPMCs */ #define P_JAILED 0x01000000 /* Process is in jail. */ #define P_TOTAL_STOP 0x02000000 /* Stopped in stop_all_proc. */ #define P_INEXEC 0x04000000 /* Process is in execve(). */ #define P_STATCHILD 0x08000000 /* Child process stopped or exited. */ #define P_INMEM 0x10000000 /* Loaded into memory. */ #define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */ #define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */ #define P_PPTRACE 0x80000000 /* PT_TRACEME by vforked child. */ #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) #define P_KILLED(p) ((p)->p_flag & P_WKILLED) /* These flags are kept in p_flag2. */ #define P2_INHERIT_PROTECTED 0x00000001 /* New children get P_PROTECTED. */ #define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or coredumps. */ #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */ #define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */ #define P2_PTRACE_FSTP 0x00000010 /* SIGSTOP from PT_ATTACH not yet handled. */ #define P2_TRAPCAP 0x00000020 /* SIGTRAP on ENOTCAPABLE */ #define P2_ASLR_ENABLE 0x00000040 /* Force enable ASLR. */ #define P2_ASLR_DISABLE 0x00000080 /* Force disable ASLR. */ #define P2_ASLR_IGNSTART 0x00000100 /* Enable ASLR to consume sbrk area. */ #define P2_PROTMAX_ENABLE 0x00000200 /* Force enable implied PROT_MAX. */ #define P2_PROTMAX_DISABLE 0x00000400 /* Force disable implied PROT_MAX. */ #define P2_STKGAP_DISABLE 0x00000800 /* Disable stack gap for MAP_STACK */ #define P2_STKGAP_DISABLE_EXEC 0x00001000 /* Stack gap disabled after exec */ #define P2_ITSTOPPED 0x00002000 #define P2_PTRACEREQ 0x00004000 /* Active ptrace req */ #define P2_NO_NEW_PRIVS 0x00008000 /* Ignore setuid */ +#define P2_WXORX_DISABLE 0x00010000 /* WX mappings enabled */ +#define P2_WXORX_ENABLE_EXEC 0x00020000 /* WXORX enabled after exec */ /* Flags protected by proctree_lock, kept in p_treeflags. */ #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ #define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan list */ #define P_TREE_REAPER 0x00000004 /* Reaper of subtree */ #define P_TREE_GRPEXITED 0x00000008 /* exit1() done with job ctl */ /* * These were process status values (p_stat), now they are only used in * legacy conversion code. */ #define SIDL 1 /* Process being created by fork. */ #define SRUN 2 /* Currently runnable. */ #define SSLEEP 3 /* Sleeping on an address. */ #define SSTOP 4 /* Process debugging or suspension. */ #define SZOMB 5 /* Awaiting collection by parent. */ #define SWAIT 6 /* Waiting for interrupt. */ #define SLOCK 7 /* Blocked on a lock. */ #define P_MAGIC 0xbeefface #ifdef _KERNEL /* Types and flags for mi_switch(). */ #define SW_TYPE_MASK 0xff /* First 8 bits are switch type */ #define SWT_NONE 0 /* Unspecified switch. */ #define SWT_PREEMPT 1 /* Switching due to preemption. */ #define SWT_OWEPREEMPT 2 /* Switching due to owepreempt. */ #define SWT_TURNSTILE 3 /* Turnstile contention. */ #define SWT_SLEEPQ 4 /* Sleepq wait. */ #define SWT_SLEEPQTIMO 5 /* Sleepq timeout wait. */ #define SWT_RELINQUISH 6 /* yield call. */ #define SWT_NEEDRESCHED 7 /* NEEDRESCHED was set. */ #define SWT_IDLE 8 /* Switching from the idle thread. */ #define SWT_IWAIT 9 /* Waiting for interrupts. */ #define SWT_SUSPEND 10 /* Thread suspended. */ #define SWT_REMOTEPREEMPT 11 /* Remote processor preempted. */ #define SWT_REMOTEWAKEIDLE 12 /* Remote processor preempted idle. */ #define SWT_COUNT 13 /* Number of switch types. */ /* Flags */ #define SW_VOL 0x0100 /* Voluntary switch. */ #define SW_INVOL 0x0200 /* Involuntary switch. */ #define SW_PREEMPT 0x0400 /* The invol switch is a preemption */ /* How values for thread_single(). */ #define SINGLE_NO_EXIT 0 #define SINGLE_EXIT 1 #define SINGLE_BOUNDARY 2 #define SINGLE_ALLPROC 3 #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_PARGS); MALLOC_DECLARE(M_SESSION); MALLOC_DECLARE(M_SUBPROC); #endif #define FOREACH_PROC_IN_SYSTEM(p) \ LIST_FOREACH((p), &allproc, p_list) #define FOREACH_THREAD_IN_PROC(p, td) \ TAILQ_FOREACH((td), &(p)->p_threads, td_plist) #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads) /* * We use process IDs <= pid_max <= PID_MAX; PID_MAX + 1 must also fit * in a pid_t, as it is used to represent "no process group". */ #define PID_MAX 99999 #define NO_PID 100000 #define THREAD0_TID NO_PID extern pid_t pid_max; #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) /* Lock and unlock a process. */ #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) #define PROC_WAIT_UNLOCKED(p) mtx_wait_unlocked(&(p)->p_mtx) #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) /* Lock and unlock a process group. */ #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) #define PGRP_LOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_LOCK(pg); \ } while (0) #define PGRP_UNLOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_UNLOCK(pg); \ } while (0) /* Lock and unlock a session. */ #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) /* * Non-zero p_lock ensures that: * - exit1() is not performed until p_lock reaches zero; * - the process' threads stack are not swapped out if they are currently * not (P_INMEM). * * PHOLD() asserts that the process (except the current process) is * not exiting, increments p_lock and swaps threads stacks into memory, * if needed. * _PHOLD() is same as PHOLD(), it takes the process locked. * _PHOLD_LITE() also takes the process locked, but comparing with * _PHOLD(), it only guarantees that exit1() is not executed, * faultin() is not called. */ #define PHOLD(p) do { \ PROC_LOCK(p); \ _PHOLD(p); \ PROC_UNLOCK(p); \ } while (0) #define _PHOLD(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process %p", p)); \ (p)->p_lock++; \ if (((p)->p_flag & P_INMEM) == 0) \ faultin((p)); \ } while (0) #define _PHOLD_LITE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process %p", p)); \ (p)->p_lock++; \ } while (0) #define PROC_ASSERT_HELD(p) do { \ KASSERT((p)->p_lock > 0, ("process %p not held", p)); \ } while (0) #define PRELE(p) do { \ PROC_LOCK((p)); \ _PRELE((p)); \ PROC_UNLOCK((p)); \ } while (0) #define _PRELE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ PROC_ASSERT_HELD(p); \ (--(p)->p_lock); \ if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \ wakeup(&(p)->p_lock); \ } while (0) #define PROC_ASSERT_NOT_HELD(p) do { \ KASSERT((p)->p_lock == 0, ("process %p held", p)); \ } while (0) #define PROC_UPDATE_COW(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ (p)->p_cowgen++; \ } while (0) /* Check whether a thread is safe to be swapped out. */ #define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP) /* Control whether or not it is safe for curthread to sleep. */ #define THREAD_NO_SLEEPING() do { \ curthread->td_no_sleeping++; \ MPASS(curthread->td_no_sleeping > 0); \ } while (0) #define THREAD_SLEEPING_OK() do { \ MPASS(curthread->td_no_sleeping > 0); \ curthread->td_no_sleeping--; \ } while (0) #define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0) #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) #define PIDHASHLOCK(pid) (&pidhashtbl_lock[((pid) & pidhashlock)]) extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; extern struct sx *pidhashtbl_lock; extern u_long pidhash; extern u_long pidhashlock; #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; extern u_long pgrphash; extern struct sx allproc_lock; extern int allproc_gen; extern struct sx proctree_lock; extern struct mtx ppeers_lock; extern struct mtx procid_lock; extern struct proc proc0; /* Process slot for swapper. */ extern struct thread0_storage thread0_st; /* Primary thread in proc0. */ #define thread0 (thread0_st.t0st_thread) extern struct vmspace vmspace0; /* VM space for proc0. */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int lastpid; extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ extern u_long ps_arg_cache_limit; LIST_HEAD(proclist, proc); TAILQ_HEAD(procqueue, proc); TAILQ_HEAD(threadqueue, thread); extern struct proclist allproc; /* List of all processes. */ extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ extern struct uma_zone *proc_zone; extern struct uma_zone *pgrp_zone; struct proc *pfind(pid_t); /* Find process by id. */ struct proc *pfind_any(pid_t); /* Find (zombie) process by id. */ struct proc *pfind_any_locked(pid_t pid); /* Find process by id, locked. */ struct pgrp *pgfind(pid_t); /* Find process group by id. */ void pidhash_slockall(void); /* Shared lock all pid hash lists. */ void pidhash_sunlockall(void); /* Shared unlock all pid hash lists. */ struct fork_req { int fr_flags; int fr_pages; int *fr_pidp; struct proc **fr_procp; int *fr_pd_fd; int fr_pd_flags; struct filecaps *fr_pd_fcaps; int fr_flags2; #define FR2_DROPSIG_CAUGHT 0x00000001 /* Drop caught non-DFL signals */ #define FR2_SHARE_PATHS 0x00000002 /* Invert sense of RFFDG for paths */ #define FR2_KPROC 0x00000004 /* Create a kernel process */ }; /* * pget() flags. */ #define PGET_HOLD 0x00001 /* Hold the process. */ #define PGET_CANSEE 0x00002 /* Check against p_cansee(). */ #define PGET_CANDEBUG 0x00004 /* Check against p_candebug(). */ #define PGET_ISCURRENT 0x00008 /* Check that the found process is current. */ #define PGET_NOTWEXIT 0x00010 /* Check that the process is not in P_WEXIT. */ #define PGET_NOTINEXEC 0x00020 /* Check that the process is not in P_INEXEC. */ #define PGET_NOTID 0x00040 /* Do not assume tid if pid > PID_MAX. */ #define PGET_WANTREAD (PGET_HOLD | PGET_CANDEBUG | PGET_NOTWEXIT) int pget(pid_t pid, int flags, struct proc **pp); void ast(struct trapframe *framep); struct thread *choosethread(void); int cr_cansee(struct ucred *u1, struct ucred *u2); int cr_canseesocket(struct ucred *cred, struct socket *so); int cr_canseeothergids(struct ucred *u1, struct ucred *u2); int cr_canseeotheruids(struct ucred *u1, struct ucred *u2); int cr_canseejailproc(struct ucred *u1, struct ucred *u2); int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess); int enterthispgrp(struct proc *p, struct pgrp *pgrp); void faultin(struct proc *p); int fork1(struct thread *, struct fork_req *); void fork_rfppwait(struct thread *); void fork_exit(void (*)(void *, struct trapframe *), void *, struct trapframe *); void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); void itimer_proc_continue(struct proc *p); void kqtimer_proc_continue(struct proc *p); void kern_proc_vmmap_resident(struct vm_map *map, struct vm_map_entry *entry, int *resident_count, bool *super); void kern_yield(int); void kick_proc0(void); void killjobc(void); int leavepgrp(struct proc *p); int maybe_preempt(struct thread *td); void maybe_yield(void); void mi_switch(int flags); int p_candebug(struct thread *td, struct proc *p); int p_cansee(struct thread *td, struct proc *p); int p_cansched(struct thread *td, struct proc *p); int p_cansignal(struct thread *td, struct proc *p, int signum); int p_canwait(struct thread *td, struct proc *p); struct pargs *pargs_alloc(int len); void pargs_drop(struct pargs *pa); void pargs_hold(struct pargs *pa); int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb); void procinit(void); int proc_iterate(int (*cb)(struct proc *, void *), void *cbarg); void proc_linkup0(struct proc *p, struct thread *td); void proc_linkup(struct proc *p, struct thread *td); struct proc *proc_realparent(struct proc *child); void proc_reap(struct thread *td, struct proc *p, int *status, int options); void proc_reparent(struct proc *child, struct proc *newparent, bool set_oppid); void proc_add_orphan(struct proc *child, struct proc *parent); void proc_set_traced(struct proc *p, bool stop); void proc_wkilled(struct proc *p); struct pstats *pstats_alloc(void); void pstats_fork(struct pstats *src, struct pstats *dst); void pstats_free(struct pstats *ps); void proc_clear_orphan(struct proc *p); void reaper_abandon_children(struct proc *p, bool exiting); int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void sess_hold(struct session *); void sess_release(struct session *); int setrunnable(struct thread *, int); void setsugid(struct proc *p); int should_yield(void); int sigonstack(size_t sp); void stopevent(struct proc *, u_int, u_int); struct thread *tdfind(lwpid_t, pid_t); void threadinit(void); void tidhash_add(struct thread *); void tidhash_remove(struct thread *); void cpu_idle(int); int cpu_idle_wakeup(int); extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */ void cpu_switch(struct thread *, struct thread *, struct mtx *); void cpu_throw(struct thread *, struct thread *) __dead2; void unsleep(struct thread *); void userret(struct thread *, struct trapframe *); void cpu_exit(struct thread *); void exit1(struct thread *, int, int) __dead2; void cpu_copy_thread(struct thread *td, struct thread *td0); bool cpu_exec_vmspace_reuse(struct proc *p, struct vm_map *map); int cpu_fetch_syscall_args(struct thread *td); void cpu_fork(struct thread *, struct proc *, struct thread *, int); void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *); int cpu_procctl(struct thread *td, int idtype, id_t id, int com, void *data); void cpu_set_syscall_retval(struct thread *, int); void cpu_set_upcall(struct thread *, void (*)(void *), void *, stack_t *); int cpu_set_user_tls(struct thread *, void *tls_base); void cpu_thread_alloc(struct thread *); void cpu_thread_clean(struct thread *); void cpu_thread_exit(struct thread *); void cpu_thread_free(struct thread *); void cpu_thread_swapin(struct thread *); void cpu_thread_swapout(struct thread *); struct thread *thread_alloc(int pages); int thread_alloc_stack(struct thread *, int pages); int thread_check_susp(struct thread *td, bool sleep); void thread_cow_get_proc(struct thread *newtd, struct proc *p); void thread_cow_get(struct thread *newtd, struct thread *td); void thread_cow_free(struct thread *td); void thread_cow_update(struct thread *td); int thread_create(struct thread *td, struct rtprio *rtp, int (*initialize_thread)(struct thread *, void *), void *thunk); void thread_exit(void) __dead2; void thread_free(struct thread *td); void thread_link(struct thread *td, struct proc *p); void thread_reap_barrier(void); int thread_single(struct proc *p, int how); void thread_single_end(struct proc *p, int how); void thread_stash(struct thread *td); void thread_stopped(struct proc *p); void childproc_stopped(struct proc *child, int reason); void childproc_continued(struct proc *child); void childproc_exited(struct proc *child); void thread_run_flash(struct thread *td); int thread_suspend_check(int how); bool thread_suspend_check_needed(void); void thread_suspend_switch(struct thread *, struct proc *p); void thread_suspend_one(struct thread *td); void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); void thread_wait(struct proc *p); void stop_all_proc(void); void resume_all_proc(void); static __inline int curthread_pflags_set(int flags) { struct thread *td; int save; td = curthread; save = ~flags | (td->td_pflags & flags); td->td_pflags |= flags; return (save); } static __inline void curthread_pflags_restore(int save) { curthread->td_pflags &= save; } static __inline int curthread_pflags2_set(int flags) { struct thread *td; int save; td = curthread; save = ~flags | (td->td_pflags2 & flags); td->td_pflags2 |= flags; return (save); } static __inline void curthread_pflags2_restore(int save) { curthread->td_pflags2 &= save; } static __inline bool kstack_contains(struct thread *td, vm_offset_t va, size_t len) { return (va >= td->td_kstack && va + len >= va && va + len <= td->td_kstack + td->td_kstack_pages * PAGE_SIZE); } static __inline __pure2 struct td_sched * td_get_sched(struct thread *td) { return ((struct td_sched *)&td[1]); } extern void (*softdep_ast_cleanup)(struct thread *); static __inline void td_softdep_cleanup(struct thread *td) { if (td->td_su != NULL && softdep_ast_cleanup != NULL) softdep_ast_cleanup(td); } #define PROC_ID_PID 0 #define PROC_ID_GROUP 1 #define PROC_ID_SESSION 2 #define PROC_ID_REAP 3 void proc_id_set(int type, pid_t id); void proc_id_set_cond(int type, pid_t id); void proc_id_clear(int type, pid_t id); EVENTHANDLER_LIST_DECLARE(process_ctor); EVENTHANDLER_LIST_DECLARE(process_dtor); EVENTHANDLER_LIST_DECLARE(process_init); EVENTHANDLER_LIST_DECLARE(process_fini); EVENTHANDLER_LIST_DECLARE(process_exit); EVENTHANDLER_LIST_DECLARE(process_fork); EVENTHANDLER_LIST_DECLARE(process_exec); EVENTHANDLER_LIST_DECLARE(thread_ctor); EVENTHANDLER_LIST_DECLARE(thread_dtor); EVENTHANDLER_LIST_DECLARE(thread_init); #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */ diff --git a/sys/sys/procctl.h b/sys/sys/procctl.h index cc0279fb0d08..0fcb62e94bb9 100644 --- a/sys/sys/procctl.h +++ b/sys/sys/procctl.h @@ -1,156 +1,162 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 Hudson River Trading LLC * Copyright (c) 2014, 2016 The FreeBSD Foundation * Written by: John H. Baldwin * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_PROCCTL_H_ #define _SYS_PROCCTL_H_ #ifndef _KERNEL #include #include #endif /* MD PROCCTL verbs start at 0x10000000 */ #define PROC_PROCCTL_MD_MIN 0x10000000 #include #define PROC_SPROTECT 1 /* set protected state */ #define PROC_REAP_ACQUIRE 2 /* reaping enable */ #define PROC_REAP_RELEASE 3 /* reaping disable */ #define PROC_REAP_STATUS 4 /* reaping status */ #define PROC_REAP_GETPIDS 5 /* get descendants */ #define PROC_REAP_KILL 6 /* kill descendants */ #define PROC_TRACE_CTL 7 /* en/dis ptrace and coredumps */ #define PROC_TRACE_STATUS 8 /* query tracing status */ #define PROC_TRAPCAP_CTL 9 /* trap capability errors */ #define PROC_TRAPCAP_STATUS 10 /* query trap capability status */ #define PROC_PDEATHSIG_CTL 11 /* set parent death signal */ #define PROC_PDEATHSIG_STATUS 12 /* get parent death signal */ #define PROC_ASLR_CTL 13 /* en/dis ASLR */ #define PROC_ASLR_STATUS 14 /* query ASLR status */ #define PROC_PROTMAX_CTL 15 /* en/dis implicit PROT_MAX */ #define PROC_PROTMAX_STATUS 16 /* query implicit PROT_MAX status */ #define PROC_STACKGAP_CTL 17 /* en/dis stack gap on MAP_STACK */ #define PROC_STACKGAP_STATUS 18 /* query stack gap */ #define PROC_NO_NEW_PRIVS_CTL 19 /* disable setuid/setgid */ #define PROC_NO_NEW_PRIVS_STATUS 20 /* query suid/sgid disabled status */ +#define PROC_WXMAP_CTL 21 /* control W^X */ +#define PROC_WXMAP_STATUS 22 /* query W^X */ /* Operations for PROC_SPROTECT (passed in integer arg). */ #define PPROT_OP(x) ((x) & 0xf) #define PPROT_SET 1 #define PPROT_CLEAR 2 /* Flags for PROC_SPROTECT (ORed in with operation). */ #define PPROT_FLAGS(x) ((x) & ~0xf) #define PPROT_DESCEND 0x10 #define PPROT_INHERIT 0x20 /* Result of PREAP_STATUS (returned by value). */ struct procctl_reaper_status { u_int rs_flags; u_int rs_children; u_int rs_descendants; pid_t rs_reaper; pid_t rs_pid; u_int rs_pad0[15]; }; /* struct procctl_reaper_status rs_flags */ #define REAPER_STATUS_OWNED 0x00000001 #define REAPER_STATUS_REALINIT 0x00000002 struct procctl_reaper_pidinfo { pid_t pi_pid; pid_t pi_subtree; u_int pi_flags; u_int pi_pad0[15]; }; #define REAPER_PIDINFO_VALID 0x00000001 #define REAPER_PIDINFO_CHILD 0x00000002 #define REAPER_PIDINFO_REAPER 0x00000004 struct procctl_reaper_pids { u_int rp_count; u_int rp_pad0[15]; struct procctl_reaper_pidinfo *rp_pids; }; struct procctl_reaper_kill { int rk_sig; /* in - signal to send */ u_int rk_flags; /* in - REAPER_KILL flags */ pid_t rk_subtree; /* in - subtree, if REAPER_KILL_SUBTREE */ u_int rk_killed; /* out - count of processes successfully killed */ pid_t rk_fpid; /* out - first failed pid for which error is returned */ u_int rk_pad0[15]; }; #define REAPER_KILL_CHILDREN 0x00000001 #define REAPER_KILL_SUBTREE 0x00000002 #define PROC_TRACE_CTL_ENABLE 1 #define PROC_TRACE_CTL_DISABLE 2 #define PROC_TRACE_CTL_DISABLE_EXEC 3 #define PROC_TRAPCAP_CTL_ENABLE 1 #define PROC_TRAPCAP_CTL_DISABLE 2 #define PROC_ASLR_FORCE_ENABLE 1 #define PROC_ASLR_FORCE_DISABLE 2 #define PROC_ASLR_NOFORCE 3 #define PROC_ASLR_ACTIVE 0x80000000 #define PROC_PROTMAX_FORCE_ENABLE 1 #define PROC_PROTMAX_FORCE_DISABLE 2 #define PROC_PROTMAX_NOFORCE 3 #define PROC_PROTMAX_ACTIVE 0x80000000 #define PROC_STACKGAP_ENABLE 0x0001 #define PROC_STACKGAP_DISABLE 0x0002 #define PROC_STACKGAP_ENABLE_EXEC 0x0004 #define PROC_STACKGAP_DISABLE_EXEC 0x0008 #define PROC_NO_NEW_PRIVS_ENABLE 1 #define PROC_NO_NEW_PRIVS_DISABLE 2 +#define PROC_WX_MAPPINGS_PERMIT 0x0001 +#define PROC_WX_MAPPINGS_DISALLOW_EXEC 0x0002 +#define PROC_WXORX_ENFORCE 0x80000000 + #ifndef _KERNEL __BEGIN_DECLS int procctl(idtype_t, id_t, int, void *); __END_DECLS #endif #endif /* !_SYS_PROCCTL_H_ */