diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c --- a/sys/kern/subr_smp.c +++ b/sys/kern/subr_smp.c @@ -630,7 +630,7 @@ smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); } -static struct cpu_group group[MAXCPU * MAX_CACHE_LEVELS + 1]; +static struct cpu_group *group; static void smp_topo_fill(struct cpu_group *cg) @@ -647,7 +647,14 @@ smp_topo(void) { char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; - struct cpu_group *top; + static struct cpu_group *top = NULL; + + /* + * The first caller of smp_topo() is guaranteed to occur + * during the kernel boot while we are still single-threaded. + */ + if (top != NULL) + return (top); /* * Check for a fake topology request for debugging purposes. @@ -710,12 +717,22 @@ return (top); } +static void +smp_topo_alloc_group(void) +{ + if (group != NULL) + return; + group = mallocarray((mp_maxid + 1) * MAX_CACHE_LEVELS + 1, + sizeof(*group), M_DEVBUF, M_WAITOK | M_ZERO); +} + struct cpu_group * smp_topo_alloc(u_int count) { static u_int index; u_int curr; + smp_topo_alloc_group(); curr = index; index += count; return (&group[curr]); @@ -726,6 +743,7 @@ { struct cpu_group *top; + smp_topo_alloc_group(); top = &group[0]; top->cg_parent = NULL; top->cg_child = NULL; @@ -779,6 +797,7 @@ int cpu; int i; + smp_topo_alloc_group(); cpu = 0; top = &group[0]; packages = mp_ncpus / count; @@ -800,6 +819,7 @@ int i; int j; + smp_topo_alloc_group(); cpu = 0; top = &group[0]; l2g = &group[1]; @@ -975,7 +995,8 @@ error = 0; if ((prio & PDROP) == 0) { - gen = malloc(sizeof(u_int) * MAXCPU, M_TEMP, M_WAITOK); + gen = mallocarray(sizeof(u_int), mp_maxid + 1, M_TEMP, + M_WAITOK); for (cpu = 0; cpu <= mp_maxid; cpu++) { if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) continue;