diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c --- a/sys/kern/subr_smp.c +++ b/sys/kern/subr_smp.c @@ -630,8 +630,6 @@ smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); } -static struct cpu_group group[MAXCPU * MAX_CACHE_LEVELS + 1]; - static void smp_topo_fill(struct cpu_group *cg) { @@ -647,7 +645,14 @@ smp_topo(void) { char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; - struct cpu_group *top; + static struct cpu_group *top = NULL; + + /* + * The first call to smp_topo() is guaranteed to occur + * during the kernel boot while we are still single-threaded. + */ + if (top != NULL) + return (top); /* * Check for a fake topology request for debugging purposes. @@ -713,9 +718,14 @@ struct cpu_group * smp_topo_alloc(u_int count) { + static struct cpu_group *group = NULL; static u_int index; u_int curr; + if (group == NULL) { + group = mallocarray((mp_maxid + 1) * MAX_CACHE_LEVELS + 1, + sizeof(*group), M_DEVBUF, M_WAITOK | M_ZERO); + } curr = index; index += count; return (&group[curr]); @@ -726,7 +736,7 @@ { struct cpu_group *top; - top = &group[0]; + top = smp_topo_alloc(1); top->cg_parent = NULL; top->cg_child = NULL; top->cg_mask = all_cpus; @@ -780,9 +790,9 @@ int i; cpu = 0; - top = &group[0]; + top = smp_topo_alloc(1); packages = mp_ncpus / count; - top->cg_child = child = &group[1]; + top->cg_child = child = top + 1; top->cg_level = CG_SHARE_NONE; for (i = 0; i < packages; i++, child++) cpu = smp_topo_addleaf(top, child, share, count, flags, cpu); @@ -801,8 +811,8 @@ int j; cpu = 0; - top = &group[0]; - l2g = &group[1]; + top = smp_topo_alloc(1); + l2g = top + 1; top->cg_child = l2g; top->cg_level = CG_SHARE_NONE; top->cg_children = mp_ncpus / (l2count * l1count); @@ -975,7 +985,8 @@ error = 0; if ((prio & PDROP) == 0) { - gen = malloc(sizeof(u_int) * MAXCPU, M_TEMP, M_WAITOK); + gen = mallocarray(sizeof(u_int), mp_maxid + 1, M_TEMP, + M_WAITOK); for (cpu = 0; cpu <= mp_maxid; cpu++) { if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) continue;