Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F108618785
D48022.id147789.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
8 KB
Referenced Files
None
Subscribers
None
D48022.id147789.diff
View Options
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -1145,6 +1145,188 @@
void ast_sched(struct thread *td, int tda);
void ast_unsched_locked(struct thread *td, int tda);
+/*
+ * These functions provide architecture-specific implementations of
+ * machine-independent abstractions.
+ */
+
+/*
+ * Returns true if exec_new_vmspace() can reuse an existing VM space
+ * that is not shared. If this returns false, exec_new_vmspace() will
+ * always create a new VM space.
+ */
+bool cpu_exec_vmspace_reuse(struct proc *p, struct vm_map *map);
+
+/*
+ * Release machine-dependent resources other than the address space
+ * for a process during process exit.
+ */
+void cpu_exit(struct thread *);
+
+/*
+ * Copy and update machine-dependent state (e.g. the pcb) from the
+ * forking thread in an existing process (td1) to the new process.
+ *
+ * Set up the new thread's kernel stack and pcb so that it calls
+ * fork_exit() when it begins execution passing fork_return() as the
+ * callout argument to fork_exit().
+ */
+void cpu_fork(struct thread *, struct proc *, struct thread *, int);
+
+/*
+ * Adjust a new thread's initial pcb and/or kernel stack to call the
+ * supplied callback function after starting instead of the default
+ * fork_return() callback.
+ *
+ * This must be called before a new thread is scheduled to run and is
+ * used to set the "main" function for kernel threads.
+ */
+void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *);
+
+/*
+ * Copy machine-dependent state (e.g. the pcb) from an old thread (td)
+ * to a new thread (td0) when creating a new thread in the same
+ * process.
+ *
+ * Set up the new thread's kernel stack and pcb so that it calls
+ * fork_exit() when it begins execution passing fork_return() as the
+ * callout argument to fork_exit().
+ */
+void cpu_copy_thread(struct thread *td, struct thread *td0);
+
+/*
+ * Update a new thread's initial userspace register state to call the
+ * supplied callback function (first argument) with the supplied
+ * argument (second argument) using the user stack described in the
+ * third argument.
+ */
+int cpu_set_upcall(struct thread *, void (*)(void *), void *,
+ stack_t *);
+
+/*
+ * Set a new thread's initial userspace thread pointer register to
+ * reference the userspace TLS base pointer (TCB).
+ */
+int cpu_set_user_tls(struct thread *, void *tls_base);
+
+/*
+ * Fetch system call arguments for the native FreeBSD ABI from the
+ * current thread's trapframe and/or userspace stack. The arguments
+ * are saved in td_sa.
+ */
+int cpu_fetch_syscall_args(struct thread *td);
+
+/*
+ * Update thread register state to store system call error and return
+ * values. If the supplied error is 0, indicate success and return
+ * the two values in td_retval. If the supplied error is ERESTART,
+ * adjust the PC to re-invoke the current system call after returning
+ * to user mode. If the supplied error is EJUSTRETURN, leave the
+ * current register state unchanged. For any other error, indicate
+ * error and return the supplied error.
+ */
+void cpu_set_syscall_retval(struct thread *, int);
+
+/*
+ * Wait for the next interrupt to occur on the current CPU. If an
+ * architecture supports low power idling, this should place the CPU
+ * into a low power state while waiting. The sole argument is a hint
+ * from the scheduler. If it is non-zero, the scheduler expects a
+ * short sleep, so the CPU should prefer low-latency over maximum
+ * power savings. If the argument is zero, the CPU should maximumize
+ * power savings including deferring unnecessary clock interrupts via
+ * cpu_idleclock().
+ */
+void cpu_idle(int);
+extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */
+
+/* Wakeup an idle CPU in a low-power state. */
+int cpu_idle_wakeup(int);
+
+/* Handle machine-dependent procctl(2) requests. */
+int cpu_procctl(struct thread *td, int idtype, id_t id, int com,
+ void *data);
+
+/*
+ * Switch the current CPU between threads by swapping register state.
+ * Save the current CPU register state in the pcb of the old thread
+ * and load register values from the pcb of the new thread before
+ * returning.
+ *
+ * After saving the current CPU register state of the old thread, the
+ * mtx argument should be stored in the td_lock member of the old
+ * thread transferring ownership of the old thread.
+ *
+ * When SCHED_ULE is being used, this function must wait (via
+ * spinning) for the td_lock member of the new thread to change to a
+ * value not equal to &blocked_lock before loading register values
+ * from the new thread.
+ */
+void cpu_switch(struct thread *, struct thread *, struct mtx *);
+
+/*
+ * Similar to cpu_switch but does not save any state for the old
+ * thread or write to the old thread's td_lock member.
+ */
+void cpu_throw(struct thread *, struct thread *) __dead2;
+
+/*
+ * Ensure that all possible speculation and out-of-order execution is
+ * serialized on the current CPU. Note that this is called from an
+ * IPI handler so only has to handle additional serialization beyond
+ * that provided by handling an IPI.
+ */
+void cpu_sync_core(void);
+
+/*
+ * Callouts to support the management of machine-dependent thread
+ * state in conjunction with a kernel thread's lifecycle.
+ *
+ * The general model is that a thread object is allocated each time a
+ * new kernel thread is created either by system calls like fork(2) or
+ * thr_new(2) or when kernel-only threads are created via
+ * kproc_create(9), kproc_kthread_add(9), or kthread_add(9). When a
+ * kernel thread exits, the thread object is freed. However, there is
+ * one special case which is that when a process exits, it does not
+ * free the last thread object. Instead, when the process object is
+ * reused for a new process in fork(2), the kernel recycles that last
+ * thread to use as the initial thread in the new process. When a the
+ * thread is recycled, some of the steps in the thread alloc/free
+ * cycle are skipped as an optimization.
+ */
+
+/*
+ * Initialize machine-dependent fields in a thread after allocating a
+ * new kernel stack. This typically sets the initial PCB and
+ * trapframe pointers. This is called both when allocating a new
+ * thread object or when a recycled thread allocates a new kernel
+ * stack. Note that this function is _not_ called if a recycled
+ * thread reuses its existing kernel stack.
+ */
+void cpu_thread_alloc(struct thread *);
+
+/*
+ * Release any machine-dependent resources for the last thread in a
+ * process during wait(2). The thread is a candidate for recycling so
+ * should be reset to run a new thread in case it is recycled by a
+ * future fork(2).
+ */
+void cpu_thread_clean(struct thread *);
+
+/*
+ * Clean any machine-dependent state in a thread object while a thread
+ * is exiting. This is called by the exiting thread so cannot free
+ * state needed during in-kernel execution.
+ */
+void cpu_thread_exit(struct thread *);
+
+/*
+ * Free any machine-dependent state in a thread object when it is
+ * being freed. This is called for any thread that was not the last
+ * thread in a process once it has finished execution.
+ */
+void cpu_thread_free(struct thread *);
+
struct thread *choosethread(void);
int cr_bsd_visible(struct ucred *u1, struct ucred *u2);
int cr_cansee(struct ucred *u1, struct ucred *u2);
@@ -1210,32 +1392,10 @@
void threadinit(void);
void tidhash_add(struct thread *);
void tidhash_remove(struct thread *);
-void cpu_idle(int);
-int cpu_idle_wakeup(int);
-extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */
-void cpu_switch(struct thread *, struct thread *, struct mtx *);
-void cpu_sync_core(void);
-void cpu_throw(struct thread *, struct thread *) __dead2;
bool curproc_sigkilled(void);
void userret(struct thread *, struct trapframe *);
-void cpu_exit(struct thread *);
void exit1(struct thread *, int, int) __dead2;
-void cpu_copy_thread(struct thread *td, struct thread *td0);
-bool cpu_exec_vmspace_reuse(struct proc *p, struct vm_map *map);
-int cpu_fetch_syscall_args(struct thread *td);
-void cpu_fork(struct thread *, struct proc *, struct thread *, int);
-void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *);
-int cpu_procctl(struct thread *td, int idtype, id_t id, int com,
- void *data);
-void cpu_set_syscall_retval(struct thread *, int);
-int cpu_set_upcall(struct thread *, void (*)(void *), void *,
- stack_t *);
-int cpu_set_user_tls(struct thread *, void *tls_base);
-void cpu_thread_alloc(struct thread *);
-void cpu_thread_clean(struct thread *);
-void cpu_thread_exit(struct thread *);
-void cpu_thread_free(struct thread *);
struct thread *thread_alloc(int pages);
int thread_check_susp(struct thread *td, bool sleep);
void thread_cow_get_proc(struct thread *newtd, struct proc *p);
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Mon, Jan 27, 10:26 PM (9 h, 15 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15292124
Default Alt Text
D48022.id147789.diff (8 KB)
Attached To
Mode
D48022: cpu_machdep.9: New manpage describing the semantics of several cpu_*
Attached
Detach File
Event Timeline
Log In to Comment