Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F131943041
D6711.id17283.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
17 KB
Referenced Files
None
Subscribers
None
D6711.id17283.diff
View Options
Index: sys/kern/init_main.c
===================================================================
--- sys/kern/init_main.c
+++ sys/kern/init_main.c
@@ -99,7 +99,7 @@
static struct session session0;
static struct pgrp pgrp0;
struct proc proc0;
-struct thread thread0 __aligned(16);
+struct thread0_storage thread0_st __aligned(16);
struct vmspace vmspace0;
struct proc *initproc;
@@ -443,6 +443,9 @@
GIANT_REQUIRED;
p = &proc0;
td = &thread0;
+ KASSERT(sizeof(thread0_st.t0st_sched) >= sched_sizeof_thread(),
+ ("increase thread0_st.t0st_sched array size %zu %u",
+ sizeof(thread0_st.t0st_sched), sched_sizeof_thread()));
/*
* Initialize magic number and osrel.
Index: sys/kern/kern_fork.c
===================================================================
--- sys/kern/kern_fork.c
+++ sys/kern/kern_fork.c
@@ -1011,7 +1011,7 @@
KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
- td, td->td_sched, p->p_pid, td->td_name);
+ td, td_get_sched(td), p->p_pid, td->td_name);
sched_fork_exit(td);
/*
Index: sys/kern/kern_proc.c
===================================================================
--- sys/kern/kern_proc.c
+++ sys/kern/kern_proc.c
@@ -168,7 +168,7 @@
* Initialize global process hashing structures.
*/
void
-procinit()
+procinit(void)
{
sx_init(&allproc_lock, "allproc");
@@ -237,7 +237,6 @@
p = (struct proc *)mem;
SDT_PROBE3(proc, , init, entry, p, size, flags);
- p->p_sched = (struct p_sched *)&p[1];
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK | MTX_NEW);
mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_NEW);
mtx_init(&p->p_statmtx, "pstatl", NULL, MTX_SPIN | MTX_NEW);
Index: sys/kern/kern_synch.c
===================================================================
--- sys/kern/kern_synch.c
+++ sys/kern/kern_synch.c
@@ -441,7 +441,7 @@
PCPU_INC(cnt.v_swtch);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
- td->td_tid, td->td_sched, td->td_proc->p_pid, td->td_name);
+ td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name);
#if (KTR_COMPILE & KTR_SCHED) != 0
if (TD_IS_IDLETHREAD(td))
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
@@ -457,7 +457,7 @@
"prio:%d", td->td_priority);
CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
- td->td_tid, td->td_sched, td->td_proc->p_pid, td->td_name);
+ td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name);
/*
* If the last thread was exiting, finish cleaning it up.
Index: sys/kern/kern_thread.c
===================================================================
--- sys/kern/kern_thread.c
+++ sys/kern/kern_thread.c
@@ -211,7 +211,6 @@
td->td_turnstile = turnstile_alloc();
td->td_rlqe = NULL;
EVENTHANDLER_INVOKE(thread_init, td);
- td->td_sched = (struct td_sched *)&td[1];
umtx_thread_init(td);
td->td_kstack = 0;
td->td_sel = NULL;
Index: sys/kern/sched_4bsd.c
===================================================================
--- sys/kern/sched_4bsd.c
+++ sys/kern/sched_4bsd.c
@@ -117,7 +117,6 @@
#define THREAD_CAN_SCHED(td, cpu) \
CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
-static struct td_sched td_sched0;
static struct mtx sched_lock;
static int realstathz = 127; /* stathz is sometimes 0 and run off of hz. */
@@ -491,8 +490,8 @@
}
FOREACH_THREAD_IN_PROC(p, td) {
awake = 0;
+ ts = td_get_sched(td);
thread_lock(td);
- ts = td->td_sched;
/*
* Increment sleep time (if sleeping). We
* ignore overflow, as above.
@@ -596,7 +595,7 @@
fixpt_t loadfac;
unsigned int newcpu;
- ts = td->td_sched;
+ ts = td_get_sched(td);
loadfac = loadfactor(averunnable.ldavg[0]);
if (ts->ts_slptime > 5 * loadfac)
ts->ts_estcpu = 0;
@@ -682,13 +681,12 @@
void
schedinit(void)
{
+
/*
- * Set up the scheduler specific parts of proc0.
+ * Set up the scheduler specific parts of thread0.
*/
- proc0.p_sched = NULL; /* XXX */
- thread0.td_sched = &td_sched0;
thread0.td_lock = &sched_lock;
- td_sched0.ts_slice = sched_slice;
+ td_get_sched(&thread0)->ts_slice = sched_slice;
mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
}
@@ -731,7 +729,7 @@
struct td_sched *ts;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- ts = td->td_sched;
+ ts = td_get_sched(td);
ts->ts_cpticks++;
ts->ts_estcpu = ESTCPULIM(ts->ts_estcpu + 1);
@@ -775,8 +773,8 @@
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
"prio:%d", child->td_priority);
thread_lock(td);
- td->td_sched->ts_estcpu = ESTCPULIM(td->td_sched->ts_estcpu +
- child->td_sched->ts_estcpu);
+ td_get_sched(td)->ts_estcpu = ESTCPULIM(td_get_sched(td)->ts_estcpu +
+ td_get_sched(child)->ts_estcpu);
thread_unlock(td);
thread_lock(child);
if ((child->td_flags & TDF_NOLOAD) == 0)
@@ -800,10 +798,10 @@
childtd->td_lock = &sched_lock;
childtd->td_cpuset = cpuset_ref(td->td_cpuset);
childtd->td_priority = childtd->td_base_pri;
- ts = childtd->td_sched;
+ ts = td_get_sched(childtd);
bzero(ts, sizeof(*ts));
- ts->ts_estcpu = td->td_sched->ts_estcpu;
- ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
+ ts->ts_estcpu = td_get_ched(td)->ts_estcpu;
+ ts->ts_flags |= (td_get_sched(td)->ts_flags & TSF_AFFINITY);
ts->ts_slice = 1;
}
@@ -952,7 +950,7 @@
THREAD_LOCK_ASSERT(td, MA_OWNED);
td->td_slptick = ticks;
- td->td_sched->ts_slptime = 0;
+ td_get_sched(td)->ts_slptime = 0;
if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
sched_prio(td, pri);
if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
@@ -968,7 +966,7 @@
int preempted;
tmtx = NULL;
- ts = td->td_sched;
+ ts = td_get_sched(td);
p = td->td_proc;
THREAD_LOCK_ASSERT(td, MA_OWNED);
@@ -1095,7 +1093,7 @@
struct td_sched *ts;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- ts = td->td_sched;
+ ts = td_get_sched(td);
td->td_flags &= ~TDF_CANSWAP;
if (ts->ts_slptime > 1) {
updatepri(td);
@@ -1266,7 +1264,7 @@
int forwarded = 0;
int single_cpu = 0;
- ts = td->td_sched;
+ ts = td_get_sched(td);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT((td->td_inhibitors == 0),
("sched_add: trying to run inhibited thread"));
@@ -1361,7 +1359,7 @@
{
struct td_sched *ts;
- ts = td->td_sched;
+ ts = td_get_sched(td);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT((td->td_inhibitors == 0),
("sched_add: trying to run inhibited thread"));
@@ -1414,7 +1412,7 @@
{
struct td_sched *ts;
- ts = td->td_sched;
+ ts = td_get_sched(td);
KASSERT(td->td_flags & TDF_INMEM,
("sched_rem: thread swapped out"));
KASSERT(TD_ON_RUNQ(td),
@@ -1527,7 +1525,7 @@
THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
- ts = td->td_sched;
+ ts = td_get_sched(td);
td->td_flags |= TDF_BOUND;
#ifdef SMP
@@ -1586,7 +1584,7 @@
struct td_sched *ts;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- ts = td->td_sched;
+ ts = td_get_sched(td);
return (ts->ts_pctcpu);
}
@@ -1603,7 +1601,7 @@
int realstathz;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- ts = td->td_sched;
+ ts = td_get_sched(td);
delta = 0;
realstathz = stathz ? stathz : hz;
if (ts->ts_cpticks != 0) {
@@ -1628,7 +1626,7 @@
sched_estcpu(struct thread *td)
{
- return (td->td_sched->ts_estcpu);
+ return (td_get_sched(td)->ts_estcpu);
}
/*
@@ -1707,7 +1705,7 @@
#ifdef KTR
struct td_sched *ts;
- ts = td->td_sched;
+ ts = td_get_sched(td);
if (ts->ts_name[0] == '\0')
snprintf(ts->ts_name, sizeof(ts->ts_name),
"%s tid %d", td->td_name, td->td_tid);
@@ -1723,7 +1721,7 @@
{
struct td_sched *ts;
- ts = td->td_sched;
+ ts = td_get_sched(td);
ts->ts_name[0] = '\0';
}
#endif
@@ -1741,7 +1739,7 @@
* Set the TSF_AFFINITY flag if there is at least one CPU this
* thread can't run on.
*/
- ts = td->td_sched;
+ ts = td_get_sched(td);
ts->ts_flags &= ~TSF_AFFINITY;
CPU_FOREACH(cpu) {
if (!THREAD_CAN_SCHED(td, cpu)) {
Index: sys/kern/sched_ule.c
===================================================================
--- sys/kern/sched_ule.c
+++ sys/kern/sched_ule.c
@@ -106,8 +106,6 @@
#define TSF_BOUND 0x0001 /* Thread can not migrate. */
#define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */
-static struct td_sched td_sched0;
-
#define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0)
#define THREAD_CAN_SCHED(td, cpu) \
CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
@@ -460,7 +458,7 @@
THREAD_LOCK_ASSERT(td, MA_OWNED);
pri = td->td_priority;
- ts = td->td_sched;
+ ts = td_get_sched(td);
TD_SET_RUNQ(td);
if (THREAD_CAN_MIGRATE(td)) {
tdq->tdq_transferable++;
@@ -506,7 +504,7 @@
{
struct td_sched *ts;
- ts = td->td_sched;
+ ts = td_get_sched(td);
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
KASSERT(ts->ts_runq != NULL,
("tdq_runq_remove: thread %p null ts_runq", td));
@@ -962,7 +960,7 @@
td = tdq_steal(tdq, cpu);
if (td == NULL)
return (0);
- ts = td->td_sched;
+ ts = td_get_sched(td);
/*
* Although the run queue is locked the thread may be blocked. Lock
* it to clear this and acquire the run-queue lock.
@@ -1174,7 +1172,7 @@
THREAD_LOCK_ASSERT(td, MA_OWNED);
tdq = TDQ_CPU(cpu);
- td->td_sched->ts_cpu = cpu;
+ td_get_sched(td)->ts_cpu = cpu;
/*
* If the lock matches just return the queue.
*/
@@ -1221,7 +1219,7 @@
int cpu, pri, self;
self = PCPU_GET(cpuid);
- ts = td->td_sched;
+ ts = td_get_sched(td);
if (smp_started == 0)
return (self);
/*
@@ -1472,7 +1470,7 @@
struct td_sched *ts;
int div;
- ts = td->td_sched;
+ ts = td_get_sched(td);
/*
* The score is only needed if this is likely to be an interactive
* task. Don't go through the expense of computing it if there's
@@ -1537,16 +1535,16 @@
pri, score));
} else {
pri = SCHED_PRI_MIN;
- if (td->td_sched->ts_ticks)
- pri += min(SCHED_PRI_TICKS(td->td_sched),
+ if (td_get_sched(td)->ts_ticks)
+ pri += min(SCHED_PRI_TICKS(td_get_sched(td)),
SCHED_PRI_RANGE - 1);
pri += SCHED_PRI_NICE(td->td_proc->p_nice);
KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH,
("sched_priority: invalid priority %d: nice %d, "
"ticks %d ftick %d ltick %d tick pri %d",
- pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
- td->td_sched->ts_ftick, td->td_sched->ts_ltick,
- SCHED_PRI_TICKS(td->td_sched)));
+ pri, td->td_proc->p_nice, td_get_sched(td)->ts_ticks,
+ td_get_sched(td)->ts_ftick, td_get_sched(td)->ts_ltick,
+ SCHED_PRI_TICKS(td_get_sched(td))));
}
sched_user_prio(td, pri);
@@ -1564,7 +1562,7 @@
struct td_sched *ts;
u_int sum;
- ts = td->td_sched;
+ ts = td_get_sched(td);
sum = ts->ts_runtime + ts->ts_slptime;
if (sum < SCHED_SLP_RUN_MAX)
return;
@@ -1609,11 +1607,11 @@
int ratio;
int sum;
- sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
+ sum = td_get_sched(td)->ts_runtime + td_get_sched(td)->ts_slptime;
if (sum > SCHED_SLP_RUN_FORK) {
ratio = sum / SCHED_SLP_RUN_FORK;
- td->td_sched->ts_runtime /= ratio;
- td->td_sched->ts_slptime /= ratio;
+ td_get_sched(td)->ts_runtime /= ratio;
+ td_get_sched(td)->ts_slptime /= ratio;
}
}
@@ -1625,13 +1623,11 @@
{
/*
- * Set up the scheduler specific parts of proc0.
+ * Set up the scheduler specific parts of thread0.
*/
- proc0.p_sched = NULL; /* XXX */
- thread0.td_sched = &td_sched0;
- td_sched0.ts_ltick = ticks;
- td_sched0.ts_ftick = ticks;
- td_sched0.ts_slice = 0;
+ td_get_sched(&thread0)->ts_ltick = ticks;
+ td_get_sched(&thread0)->ts_ftick = ticks;
+ td_get_sched(&thread0)->ts_slice = 0;
}
/*
@@ -1694,7 +1690,7 @@
SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
curthread);
}
- ts = td->td_sched;
+ ts = td_get_sched(td);
THREAD_LOCK_ASSERT(td, MA_OWNED);
if (td->td_priority == prio)
return;
@@ -1829,7 +1825,7 @@
{
struct tdq *tdn;
- tdn = TDQ_CPU(td->td_sched->ts_cpu);
+ tdn = TDQ_CPU(td_get_sched(td)->ts_cpu);
#ifdef SMP
tdq_load_rem(tdq, td);
/*
@@ -1888,7 +1884,7 @@
cpuid = PCPU_GET(cpuid);
tdq = TDQ_CPU(cpuid);
- ts = td->td_sched;
+ ts = td_get_sched(td);
mtx = td->td_lock;
sched_pctcpu_update(ts, 1);
ts->ts_rltick = ticks;
@@ -1948,7 +1944,7 @@
SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
- sched_pctcpu_update(newtd->td_sched, 0);
+ sched_pctcpu_update(td_get_sched(newtd), 0);
#ifdef KDTRACE_HOOKS
/*
@@ -2038,7 +2034,7 @@
int slptick;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- ts = td->td_sched;
+ ts = td_get_sched(td);
td->td_flags &= ~TDF_CANSWAP;
/*
* If we slept for more than a tick update our interactivity and
@@ -2066,14 +2062,14 @@
sched_fork(struct thread *td, struct thread *child)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
- sched_pctcpu_update(td->td_sched, 1);
+ sched_pctcpu_update(td_get_sched(td), 1);
sched_fork_thread(td, child);
/*
* Penalize the parent and child for forking.
*/
sched_interact_fork(child);
sched_priority(child);
- td->td_sched->ts_runtime += tickincr;
+ td_get_sched(td)->ts_runtime += tickincr;
sched_interact_update(td);
sched_priority(td);
}
@@ -2093,8 +2089,8 @@
/*
* Initialize child.
*/
- ts = td->td_sched;
- ts2 = child->td_sched;
+ ts = td_get_sched(td);
+ ts2 = td_get_sched(child);
child->td_oncpu = NOCPU;
child->td_lastcpu = NOCPU;
child->td_lock = TDQ_LOCKPTR(tdq);
@@ -2169,7 +2165,7 @@
* launch expensive things to mark their children as expensive.
*/
thread_lock(td);
- td->td_sched->ts_runtime += child->td_sched->ts_runtime;
+ td_get_sched(td)->ts_runtime += td_get_sched(child)->ts_runtime;
sched_interact_update(td);
sched_priority(td);
thread_unlock(td);
@@ -2264,7 +2260,7 @@
if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
tdq->tdq_ridx = tdq->tdq_idx;
}
- ts = td->td_sched;
+ ts = td_get_sched(td);
sched_pctcpu_update(ts, 1);
if (td->td_pri_class & PRI_FIFO_BIT)
return;
@@ -2273,7 +2269,7 @@
* We used a tick; charge it to the thread so
* that we can compute our interactivity.
*/
- td->td_sched->ts_runtime += tickincr;
+ td_get_sched(td)->ts_runtime += tickincr;
sched_interact_update(td);
sched_priority(td);
}
@@ -2455,7 +2451,7 @@
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
"prio:%d", td->td_priority);
SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
- tdq = TDQ_CPU(td->td_sched->ts_cpu);
+ tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
KASSERT(TD_ON_RUNQ(td),
@@ -2477,9 +2473,7 @@
struct td_sched *ts;
pctcpu = 0;
- ts = td->td_sched;
- if (ts == NULL)
- return (0);
+ ts = td_get_sched(td);
THREAD_LOCK_ASSERT(td, MA_OWNED);
sched_pctcpu_update(ts, TD_IS_RUNNING(td));
@@ -2505,7 +2499,7 @@
struct td_sched *ts;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- ts = td->td_sched;
+ ts = td_get_sched(td);
if (THREAD_CAN_SCHED(td, ts->ts_cpu))
return;
if (TD_ON_RUNQ(td)) {
@@ -2536,7 +2530,7 @@
THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
- ts = td->td_sched;
+ ts = td_get_sched(td);
if (ts->ts_flags & TSF_BOUND)
sched_unbind(td);
KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td));
@@ -2559,7 +2553,7 @@
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
- ts = td->td_sched;
+ ts = td_get_sched(td);
if ((ts->ts_flags & TSF_BOUND) == 0)
return;
ts->ts_flags &= ~TSF_BOUND;
@@ -2570,7 +2564,7 @@
sched_is_bound(struct thread *td)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
- return (td->td_sched->ts_flags & TSF_BOUND);
+ return (td_get_sched(td)->ts_flags & TSF_BOUND);
}
/*
@@ -2761,7 +2755,7 @@
#ifdef KTR
struct td_sched *ts;
- ts = td->td_sched;
+ ts = td_get_sched(td);
if (ts->ts_name[0] == '\0')
snprintf(ts->ts_name, sizeof(ts->ts_name),
"%s tid %d", td->td_name, td->td_tid);
@@ -2777,7 +2771,7 @@
{
struct td_sched *ts;
- ts = td->td_sched;
+ ts = td_get_sched(td);
ts->ts_name[0] = '\0';
}
#endif
Index: sys/sys/proc.h
===================================================================
--- sys/sys/proc.h
+++ sys/sys/proc.h
@@ -325,7 +325,6 @@
int td_kstack_pages; /* (a) Size of the kstack. */
volatile u_int td_critnest; /* (k*) Critical section nest level. */
struct mdthread td_md; /* (k) Any machine-dependent fields. */
- struct td_sched *td_sched; /* (*) Scheduler-specific data. */
struct kaudit_record *td_ar; /* (k) Active audit record, if any. */
struct lpohead td_lprof[2]; /* (a) lock profiling objects. */
struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */
@@ -616,7 +615,6 @@
struct proc *p_leader; /* (b) */
void *p_emuldata; /* (c) Emulator state data. */
struct label *p_label; /* (*) Proc (not subject) MAC label. */
- struct p_sched *p_sched; /* (*) Scheduler-specific data. */
STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */
LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/
struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */
@@ -885,12 +883,17 @@
extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl;
extern u_long pgrphash;
+struct thread0_storage {
+ struct thread t0st_thread;
+ uint64_t t0st_sched[176];
+};
extern struct sx allproc_lock;
extern int allproc_gen;
extern struct sx proctree_lock;
extern struct mtx ppeers_lock;
extern struct proc proc0; /* Process slot for swapper. */
-extern struct thread thread0; /* Primary thread in proc0. */
+extern struct thread0_storage thread0_st; /* Primary thread in proc0. */
+#define thread0 (thread0_st.t0st_thread)
extern struct vmspace vmspace0; /* VM space for proc0. */
extern int hogticks; /* Limit on kernel cpu hogs. */
extern int lastpid;
@@ -1065,6 +1068,13 @@
curthread->td_pflags &= save;
}
+static __inline __pure2 struct td_sched *
+td_get_sched(struct thread *td)
+{
+
+ return ((struct td_sched *)&td[1]);
+}
+
#endif /* _KERNEL */
#endif /* !_SYS_PROC_H_ */
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Mon, Oct 13, 9:33 AM (14 h, 29 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
23669416
Default Alt Text
D6711.id17283.diff (17 KB)
Attached To
Mode
D6711: Remove struct proc p_sched and struct thread td_sched.
Attached
Detach File
Event Timeline
Log In to Comment