Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F144580026
D55062.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
9 KB
Referenced Files
None
Subscribers
None
D55062.diff
View Options
diff --git a/sys/kern/sched_shim.c b/sys/kern/sched_shim.c
--- a/sys/kern/sched_shim.c
+++ b/sys/kern/sched_shim.c
@@ -16,6 +16,7 @@
#include <sys/runq.h>
#include <sys/sbuf.h>
#include <sys/sched.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#include <machine/ifunc.h>
@@ -171,9 +172,12 @@
active_sched->init();
}
+struct cpu_group __read_mostly *cpu_top; /* CPU topology */
+
static void
sched_setup(void *dummy)
{
+ cpu_top = smp_topo();
active_sched->setup();
}
SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
@@ -232,3 +236,90 @@
fixpt_t ccpu;
SYSCTL_UINT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0,
"Decay factor used for updating %CPU");
+
+/*
+ * Build the CPU topology dump string. Is recursively called to collect
+ * the topology tree.
+ */
+static int
+sysctl_kern_sched_topology_spec_internal(struct sbuf *sb,
+ struct cpu_group *cg, int indent)
+{
+ char cpusetbuf[CPUSETBUFSIZ];
+ int i, first;
+
+ if (cpu_top == NULL) {
+ sbuf_printf(sb, "%*s<group level=\"1\" cache-level=\"1\">\n",
+ indent, "");
+ sbuf_printf(sb, "%*s</group>\n", indent, "");
+ return (0);
+ }
+
+ sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
+ "", 1 + indent / 2, cg->cg_level);
+ sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "",
+ cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask));
+ first = TRUE;
+ for (i = cg->cg_first; i <= cg->cg_last; i++) {
+ if (CPU_ISSET(i, &cg->cg_mask)) {
+ if (!first)
+ sbuf_cat(sb, ", ");
+ else
+ first = FALSE;
+ sbuf_printf(sb, "%d", i);
+ }
+ }
+ sbuf_cat(sb, "</cpu>\n");
+
+ if (cg->cg_flags != 0) {
+ sbuf_printf(sb, "%*s <flags>", indent, "");
+ if ((cg->cg_flags & CG_FLAG_HTT) != 0)
+ sbuf_cat(sb, "<flag name=\"HTT\">HTT group</flag>");
+ if ((cg->cg_flags & CG_FLAG_THREAD) != 0)
+ sbuf_cat(sb, "<flag name=\"THREAD\">THREAD group</flag>");
+ if ((cg->cg_flags & CG_FLAG_SMT) != 0)
+ sbuf_cat(sb, "<flag name=\"SMT\">SMT group</flag>");
+ if ((cg->cg_flags & CG_FLAG_NODE) != 0)
+ sbuf_cat(sb, "<flag name=\"NODE\">NUMA node</flag>");
+ sbuf_cat(sb, "</flags>\n");
+ }
+
+ if (cg->cg_children > 0) {
+ sbuf_printf(sb, "%*s <children>\n", indent, "");
+ for (i = 0; i < cg->cg_children; i++)
+ sysctl_kern_sched_topology_spec_internal(sb,
+ &cg->cg_child[i], indent + 2);
+ sbuf_printf(sb, "%*s </children>\n", indent, "");
+ }
+ sbuf_printf(sb, "%*s</group>\n", indent, "");
+ return (0);
+}
+
+/*
+ * Sysctl handler for retrieving topology dump. It's a wrapper for
+ * the recursive sysctl_kern_smp_topology_spec_internal().
+ */
+static int
+sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS)
+{
+ struct sbuf *topo;
+ int err;
+
+ topo = sbuf_new_for_sysctl(NULL, NULL, 512, req);
+ if (topo == NULL)
+ return (ENOMEM);
+
+ sbuf_cat(topo, "<groups>\n");
+ err = sysctl_kern_sched_topology_spec_internal(topo, cpu_top, 1);
+ sbuf_cat(topo, "</groups>\n");
+
+ if (err == 0)
+ err = sbuf_finish(topo);
+ sbuf_delete(topo);
+ return (err);
+}
+
+SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING |
+ CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0,
+ sysctl_kern_sched_topology_spec, "A",
+ "XML dump of detected CPU topology");
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -304,7 +304,6 @@
atomic_load_short(&(tdq)->tdq_switchcnt) + 1))
#ifdef SMP
-struct cpu_group __read_mostly *cpu_top; /* CPU topology */
#define SCHED_AFFINITY_DEFAULT (max(1, hz / 1000))
/*
@@ -398,9 +397,6 @@
static bool sched_balance_pair(struct tdq *, struct tdq *);
static inline struct tdq *sched_setcpu(struct thread *, int, int);
static inline void thread_unblock_switch(struct thread *, struct mtx *);
-static int sysctl_kern_sched_ule_topology_spec(SYSCTL_HANDLER_ARGS);
-static int sysctl_kern_sched_ule_topology_spec_internal(struct sbuf *sb,
- struct cpu_group *cg, int indent);
#endif
/*
@@ -1590,7 +1586,6 @@
struct tdq *tdq;
int i;
- cpu_top = smp_topo();
CPU_FOREACH(i) {
tdq = DPCPU_ID_PTR(i, tdq);
tdq_setup(tdq, i);
@@ -3452,89 +3447,6 @@
};
DECLARE_SCHEDULER(ule_sched_selector, "ULE", &sched_ule_instance);
-#ifdef SMP
-
-/*
- * Build the CPU topology dump string. Is recursively called to collect
- * the topology tree.
- */
-static int
-sysctl_kern_sched_ule_topology_spec_internal(struct sbuf *sb,
- struct cpu_group *cg, int indent)
-{
- char cpusetbuf[CPUSETBUFSIZ];
- int i, first;
-
- sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
- "", 1 + indent / 2, cg->cg_level);
- sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "",
- cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask));
- first = TRUE;
- for (i = cg->cg_first; i <= cg->cg_last; i++) {
- if (CPU_ISSET(i, &cg->cg_mask)) {
- if (!first)
- sbuf_cat(sb, ", ");
- else
- first = FALSE;
- sbuf_printf(sb, "%d", i);
- }
- }
- sbuf_cat(sb, "</cpu>\n");
-
- if (cg->cg_flags != 0) {
- sbuf_printf(sb, "%*s <flags>", indent, "");
- if ((cg->cg_flags & CG_FLAG_HTT) != 0)
- sbuf_cat(sb, "<flag name=\"HTT\">HTT group</flag>");
- if ((cg->cg_flags & CG_FLAG_THREAD) != 0)
- sbuf_cat(sb, "<flag name=\"THREAD\">THREAD group</flag>");
- if ((cg->cg_flags & CG_FLAG_SMT) != 0)
- sbuf_cat(sb, "<flag name=\"SMT\">SMT group</flag>");
- if ((cg->cg_flags & CG_FLAG_NODE) != 0)
- sbuf_cat(sb, "<flag name=\"NODE\">NUMA node</flag>");
- sbuf_cat(sb, "</flags>\n");
- }
-
- if (cg->cg_children > 0) {
- sbuf_printf(sb, "%*s <children>\n", indent, "");
- for (i = 0; i < cg->cg_children; i++)
- sysctl_kern_sched_ule_topology_spec_internal(sb,
- &cg->cg_child[i], indent+2);
- sbuf_printf(sb, "%*s </children>\n", indent, "");
- }
- sbuf_printf(sb, "%*s</group>\n", indent, "");
- return (0);
-}
-
-/*
- * Sysctl handler for retrieving topology dump. It's a wrapper for
- * the recursive sysctl_kern_smp_topology_spec_internal().
- */
-static int
-sysctl_kern_sched_ule_topology_spec(SYSCTL_HANDLER_ARGS)
-{
- struct sbuf *topo;
- int err;
-
- if (cpu_top == NULL)
- return (ENOTTY);
-
- topo = sbuf_new_for_sysctl(NULL, NULL, 512, req);
- if (topo == NULL)
- return (ENOMEM);
-
- sbuf_cat(topo, "<groups>\n");
- err = sysctl_kern_sched_ule_topology_spec_internal(topo, cpu_top, 1);
- sbuf_cat(topo, "</groups>\n");
-
- if (err == 0) {
- err = sbuf_finish(topo);
- }
- sbuf_delete(topo);
- return (err);
-}
-
-#endif
-
static int
sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
{
@@ -3597,8 +3509,4 @@
SYSCTL_INT(_kern_sched_ule, OID_AUTO, always_steal, CTLFLAG_RWTUN,
&always_steal, 0,
"Always run the stealer from the idle thread");
-SYSCTL_PROC(_kern_sched_ule, OID_AUTO, topology_spec, CTLTYPE_STRING |
- CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0,
- sysctl_kern_sched_ule_topology_spec, "A",
- "XML dump of detected CPU topology");
#endif
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -50,9 +50,43 @@
#include "opt_sched.h"
-#ifdef SMP
MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data");
+struct cpu_group *
+smp_topo_alloc(u_int count)
+{
+ static struct cpu_group *group = NULL;
+ static u_int index;
+ u_int curr;
+
+ if (group == NULL) {
+ group = mallocarray((mp_maxid + 1) * MAX_CACHE_LEVELS + 1,
+ sizeof(*group), M_DEVBUF, M_WAITOK | M_ZERO);
+ }
+ curr = index;
+ index += count;
+ return (&group[curr]);
+}
+
+struct cpu_group *
+smp_topo_none(void)
+{
+ struct cpu_group *top;
+
+ top = smp_topo_alloc(1);
+ top->cg_parent = NULL;
+ top->cg_child = NULL;
+ top->cg_mask = all_cpus;
+ top->cg_count = mp_ncpus;
+ top->cg_children = 0;
+ top->cg_level = CG_SHARE_NONE;
+ top->cg_flags = 0;
+
+ return (top);
+}
+
+#ifdef SMP
+
volatile cpuset_t stopped_cpus;
volatile cpuset_t started_cpus;
volatile cpuset_t suspended_cpus;
@@ -731,39 +765,6 @@
return (top);
}
-struct cpu_group *
-smp_topo_alloc(u_int count)
-{
- static struct cpu_group *group = NULL;
- static u_int index;
- u_int curr;
-
- if (group == NULL) {
- group = mallocarray((mp_maxid + 1) * MAX_CACHE_LEVELS + 1,
- sizeof(*group), M_DEVBUF, M_WAITOK | M_ZERO);
- }
- curr = index;
- index += count;
- return (&group[curr]);
-}
-
-struct cpu_group *
-smp_topo_none(void)
-{
- struct cpu_group *top;
-
- top = smp_topo_alloc(1);
- top->cg_parent = NULL;
- top->cg_child = NULL;
- top->cg_mask = all_cpus;
- top->cg_count = mp_ncpus;
- top->cg_children = 0;
- top->cg_level = CG_SHARE_NONE;
- top->cg_flags = 0;
-
- return (top);
-}
-
static int
smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
int count, int flags, int start)
@@ -901,6 +902,18 @@
arg);
}
+struct cpu_group *
+smp_topo(void)
+{
+ static struct cpu_group *top = NULL;
+
+ if (top != NULL)
+ return (top);
+
+ top = smp_topo_none();
+ return (top);
+}
+
/*
* Provide dummy SMP support for UP kernels. Modules that need to use SMP
* APIs will still work using this dummy support.
diff --git a/sys/sys/smp.h b/sys/sys/smp.h
--- a/sys/sys/smp.h
+++ b/sys/sys/smp.h
@@ -89,6 +89,8 @@
typedef struct cpu_group *cpu_group_t;
+extern cpu_group_t cpu_top;
+
/*
* Defines common resources for CPUs in the group. The highest level
* resource should be used when multiple are shared.
@@ -147,9 +149,6 @@
#define TOPO_FOREACH(i, root) \
for (i = root; i != NULL; i = topo_next_node(root, i))
-struct cpu_group *smp_topo(void);
-struct cpu_group *smp_topo_alloc(u_int count);
-struct cpu_group *smp_topo_none(void);
struct cpu_group *smp_topo_1level(int l1share, int l1count, int l1flags);
struct cpu_group *smp_topo_2level(int l2share, int l2count, int l1share,
int l1count, int l1flags);
@@ -166,6 +165,10 @@
extern cpuset_t logical_cpus_mask;
#endif /* SMP */
+struct cpu_group *smp_topo(void);
+struct cpu_group *smp_topo_alloc(u_int count);
+struct cpu_group *smp_topo_none(void);
+
extern u_int mp_maxid;
extern int mp_maxcpus;
extern int mp_ncores;
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Tue, Feb 10, 8:46 PM (6 h, 22 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28630886
Default Alt Text
D55062.diff (9 KB)
Attached To
Mode
D55062: Re-introduce kern.sched.topology_spec
Attached
Detach File
Event Timeline
Log In to Comment