diff --git a/UPDATING b/UPDATING --- a/UPDATING +++ b/UPDATING @@ -27,6 +27,13 @@ world, or to merely disable the most expensive debugging functionality at runtime, run "ln -s 'abort:false,junk:false' /etc/malloc.conf".) +202112xx: + The macros provided for the manipulation of CPU sets (e.g. CPU_AND) + have been modified to take 2 source arguments instead of only 1. + Externally maintained sources that use these macros will have to + be adapted. The FreeBSD version has been bumped to 1400046 to + reflect this change. + 20211214: A number of the kernel include files are able to be included by themselves. A test has been added to buildworld to enforce this. diff --git a/contrib/ofed/libmlx5/mlx5.c b/contrib/ofed/libmlx5/mlx5.c --- a/contrib/ofed/libmlx5/mlx5.c +++ b/contrib/ofed/libmlx5/mlx5.c @@ -363,8 +363,12 @@ mlx5_local_cpu_set(ibdev, &dev_local_cpus); /* check if my cpu set is in dev cpu */ +#if __FreeBSD_version < 1400046 CPU_OR(&result_set, &my_cpus); CPU_OR(&result_set, &dev_local_cpus); +#else + CPU_OR(&result_set, &my_cpus, &dev_local_cpus); +#endif stall_enable = CPU_EQUAL(&result_set, &dev_local_cpus) ? 0 : 1; out: diff --git a/lib/libc/gen/Makefile.inc b/lib/libc/gen/Makefile.inc --- a/lib/libc/gen/Makefile.inc +++ b/lib/libc/gen/Makefile.inc @@ -30,6 +30,8 @@ clock_getcpuclockid.c \ closedir.c \ confstr.c \ + cpuset_alloc.c \ + cpuset_free.c \ crypt.c \ ctermid.c \ daemon.c \ diff --git a/lib/libc/gen/Symbol.map b/lib/libc/gen/Symbol.map --- a/lib/libc/gen/Symbol.map +++ b/lib/libc/gen/Symbol.map @@ -442,6 +442,8 @@ sched_getaffinity; sched_setaffinity; sched_getcpu; + __cpuset_alloc; + __cpuset_free; }; FBSDprivate_1.0 { diff --git a/lib/libc/gen/cpuset_alloc.c b/lib/libc/gen/cpuset_alloc.c new file mode 100644 --- /dev/null +++ b/lib/libc/gen/cpuset_alloc.c @@ -0,0 +1,34 @@ +/*- + * Copyright (c) 2021 Stefan Esser + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include + +cpuset_t * +__cpuset_alloc(size_t ncpus) +{ + return (malloc(CPU_ALLOC_SIZE(ncpus))); +} diff --git a/lib/libc/gen/cpuset_free.c b/lib/libc/gen/cpuset_free.c new file mode 100644 --- /dev/null +++ b/lib/libc/gen/cpuset_free.c @@ -0,0 +1,34 @@ +/*- + * Copyright (c) 2021 Stefan Esser + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include + +void +__cpuset_free(cpuset_t *ptr) +{ + free(ptr); +} diff --git a/lib/libc/gen/sched_getaffinity.c b/lib/libc/gen/sched_getaffinity.c --- a/lib/libc/gen/sched_getaffinity.c +++ b/lib/libc/gen/sched_getaffinity.c @@ -26,7 +26,6 @@ * SUCH DAMAGE. */ -#define _WITH_CPU_SET_T #include int diff --git a/lib/libc/gen/sched_getcpu_gen.c b/lib/libc/gen/sched_getcpu_gen.c --- a/lib/libc/gen/sched_getcpu_gen.c +++ b/lib/libc/gen/sched_getcpu_gen.c @@ -26,7 +26,6 @@ * SUCH DAMAGE. */ -#define _WITH_CPU_SET_T #include #include "libc_private.h" diff --git a/lib/libc/gen/sched_setaffinity.c b/lib/libc/gen/sched_setaffinity.c --- a/lib/libc/gen/sched_setaffinity.c +++ b/lib/libc/gen/sched_setaffinity.c @@ -26,7 +26,6 @@ * SUCH DAMAGE. */ -#define _WITH_CPU_SET_T #include int diff --git a/lib/libc/x86/sys/sched_getcpu_x86.c b/lib/libc/x86/sys/sched_getcpu_x86.c --- a/lib/libc/x86/sys/sched_getcpu_x86.c +++ b/lib/libc/x86/sys/sched_getcpu_x86.c @@ -32,7 +32,6 @@ #include #include #include -#define _WITH_CPU_SET_T #include #include "libc_private.h" diff --git a/share/man/man9/cpuset.9 b/share/man/man9/cpuset.9 --- a/share/man/man9/cpuset.9 +++ b/share/man/man9/cpuset.9 @@ -49,6 +49,7 @@ .Nm CPU_OR , .Nm CPU_AND , .Nm CPU_ANDNOT , +.Nm CPU_XOR , .Nm CPU_CLR_ATOMIC , .Nm CPU_SET_ATOMIC , .Nm CPU_SET_ATOMIC_ACQ , @@ -86,9 +87,10 @@ .Fn CPU_OVERLAP "cpuset_t *cpuset1" "cpuset_t *cpuset2" .Ft bool .Fn CPU_CMP "cpuset_t *cpuset1" "cpuset_t *cpuset2" -.Fn CPU_OR "cpuset_t *dst" "cpuset_t *src" -.Fn CPU_AND "cpuset_t *dst" "cpuset_t *src" -.Fn CPU_ANDNOT "cpuset_t *dst" "cpuset_t *src" +.Fn CPU_OR "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2" +.Fn CPU_AND "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2" +.Fn CPU_ANDNOT "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2" +.Fn CPU_XOR "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2" .\" .Fn CPU_CLR_ATOMIC "size_t cpu_idx" "cpuset_t *cpuset" .Fn CPU_SET_ATOMIC "size_t cpu_idx" "cpuset_t *cpuset" diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -8290,7 +8290,7 @@ other_cpus = all_cpus; critical_enter(); CPU_CLR(PCPU_GET(cpuid), &other_cpus); - CPU_AND(&other_cpus, &pmap->pm_active); + CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); critical_exit(); KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap)); } diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -1304,7 +1304,7 @@ mtx_lock(&vm->rendezvous_mtx); while (vm->rendezvous_func != NULL) { /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ - CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); + CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus); if (vcpuid != -1 && CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1173,7 +1173,7 @@ return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); - CPU_AND(cpuset, &cpuset_domain[d]); + CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -1243,7 +1243,7 @@ cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); - CPU_AND(&other_cpus, &pmap->pm_active); + CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); mask = &other_cpus; } smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy); @@ -1276,7 +1276,7 @@ cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); - CPU_AND(&other_cpus, &pmap->pm_active); + CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); mask = &other_cpus; } smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy); @@ -1299,7 +1299,7 @@ cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); - CPU_AND(&other_cpus, &pmap->pm_active); + CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); mask = &other_cpus; } smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy); diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -604,9 +604,9 @@ if ((flags & SFB_CPUPRIVATE) == 0) { other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); - CPU_ANDNOT(&other_cpus, &sf->cpumask); + CPU_ANDNOT(&other_cpus, &other_cpus, &sf->cpumask); if (!CPU_EMPTY(&other_cpus)) { - CPU_OR(&sf->cpumask, &other_cpus); + CPU_OR(&sf->cpumask, &sf->cpumask, &other_cpus); smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap, sf_buf_shootdown_curcpu_cb); } diff --git a/sys/kern/kern_cpuset.c b/sys/kern/kern_cpuset.c --- a/sys/kern/kern_cpuset.c +++ b/sys/kern/kern_cpuset.c @@ -326,7 +326,7 @@ set->cs_flags = 0; mtx_lock_spin(&cpuset_lock); set->cs_domain = domain; - CPU_AND(&set->cs_mask, &parent->cs_mask); + CPU_AND(&set->cs_mask, &set->cs_mask, &parent->cs_mask); set->cs_id = id; set->cs_parent = cpuset_ref(parent); LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); @@ -645,8 +645,7 @@ if (set->cs_flags & CPU_SET_RDONLY) return (EPERM); if (augment_mask) { - CPU_COPY(&set->cs_mask, &newmask); - CPU_AND(&newmask, mask); + CPU_AND(&newmask, &set->cs_mask, mask); } else CPU_COPY(mask, &newmask); @@ -668,7 +667,7 @@ struct cpuset *nset; mtx_assert(&cpuset_lock, MA_OWNED); - CPU_AND(&set->cs_mask, mask); + CPU_AND(&set->cs_mask, &set->cs_mask, mask); LIST_FOREACH(nset, &set->cs_children, cs_siblings) cpuset_update(nset, &set->cs_mask); @@ -1083,8 +1082,7 @@ * restriction to the new set, otherwise take it wholesale. */ if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) { - CPU_COPY(&tdset->cs_mask, mask); - CPU_AND(mask, &set->cs_mask); + CPU_AND(mask, &tdset->cs_mask, &set->cs_mask); } else CPU_COPY(&set->cs_mask, mask); @@ -1153,8 +1151,7 @@ pbase = cpuset_getbase(td->td_cpuset); /* Copy process mask, then further apply the new root mask. */ - CPU_COPY(&pbase->cs_mask, &nmask); - CPU_AND(&nmask, &nroot->cs_mask); + CPU_AND(&nmask, &pbase->cs_mask, &nroot->cs_mask); domainset_copy(pbase->cs_domain, &ndomain); DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask); @@ -1946,7 +1943,7 @@ case CPU_WHICH_PID: FOREACH_THREAD_IN_PROC(p, ttd) { thread_lock(ttd); - CPU_OR(mask, &ttd->td_cpuset->cs_mask); + CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask); thread_unlock(ttd); } break; diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c --- a/sys/kern/kern_rmlock.c +++ b/sys/kern/kern_rmlock.c @@ -548,8 +548,7 @@ if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { /* Get all read tokens back */ - readcpus = all_cpus; - CPU_ANDNOT(&readcpus, &rm->rm_writecpus); + CPU_ANDNOT(&readcpus, &all_cpus, &rm->rm_writecpus); rm->rm_writecpus = all_cpus; /* diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -1165,8 +1165,8 @@ return (0); CPU_SETOF(me, &dontuse); - CPU_OR(&dontuse, &stopped_cpus); - CPU_OR(&dontuse, &hlt_cpus_mask); + CPU_OR(&dontuse, &dontuse, &stopped_cpus); + CPU_OR(&dontuse, &dontuse, &hlt_cpus_mask); CPU_ZERO(&map2); if (forward_wakeup_use_loop) { STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { @@ -1179,8 +1179,7 @@ } if (forward_wakeup_use_mask) { - map = idle_cpus_mask; - CPU_ANDNOT(&map, &dontuse); + CPU_ANDNOT(&map, &idle_cpus_mask, &dontuse); /* If they are both on, compare and use loop if different. */ if (forward_wakeup_use_loop) { @@ -1366,8 +1365,7 @@ kick_other_cpu(td->td_priority, cpu); } else { if (!single_cpu) { - tidlemsk = idle_cpus_mask; - CPU_ANDNOT(&tidlemsk, &hlt_cpus_mask); + CPU_ANDNOT(&tidlemsk, &idle_cpus_mask, &hlt_cpus_mask); CPU_CLR(cpuid, &tidlemsk); if (!CPU_ISSET(cpuid, &idle_cpus_mask) && diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c --- a/sys/kern/subr_kdb.c +++ b/sys/kern/subr_kdb.c @@ -707,8 +707,7 @@ if (!SCHEDULER_STOPPED()) { #ifdef SMP - other_cpus = all_cpus; - CPU_ANDNOT(&other_cpus, &stopped_cpus); + CPU_ANDNOT(&other_cpus, &all_cpus, &stopped_cpus); CPU_CLR(PCPU_GET(cpuid), &other_cpus); stop_cpus_hard(other_cpus); #endif @@ -746,7 +745,7 @@ if (did_stop_cpus) { curthread->td_stopsched = 0; #ifdef SMP - CPU_AND(&other_cpus, &stopped_cpus); + CPU_AND(&other_cpus, &other_cpus, &stopped_cpus); restart_cpus(other_cpus); #endif } diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c --- a/sys/kern/subr_smp.c +++ b/sys/kern/subr_smp.c @@ -761,7 +761,7 @@ parent, cpusetobj_strprint(cpusetbuf, &parent->cg_mask), cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); - CPU_OR(&parent->cg_mask, &child->cg_mask); + CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask); parent->cg_count += child->cg_count; } diff --git a/sys/net/iflib.c b/sys/net/iflib.c --- a/sys/net/iflib.c +++ b/sys/net/iflib.c @@ -4970,7 +4970,7 @@ for (i = 0; i < scctx->isc_nrxqsets; i++) CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, false), &assigned_cpus); - CPU_AND(&assigned_cpus, &ctx->ifc_cpus); + CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus); cores_consumed = CPU_COUNT(&assigned_cpus); mtx_lock(&cpu_offset_mtx); diff --git a/sys/powerpc/ofw/ofw_pcibus.c b/sys/powerpc/ofw/ofw_pcibus.c --- a/sys/powerpc/ofw/ofw_pcibus.c +++ b/sys/powerpc/ofw/ofw_pcibus.c @@ -405,7 +405,7 @@ return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); - CPU_AND(cpuset, &cpuset_domain[d]); + CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); diff --git a/sys/sys/_cpuset.h b/sys/sys/_cpuset.h --- a/sys/sys/_cpuset.h +++ b/sys/sys/_cpuset.h @@ -1,4 +1,4 @@ -/*- +#/*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, Jeffrey Roberson @@ -49,4 +49,6 @@ __BITSET_DEFINE(_cpuset, CPU_SETSIZE); typedef struct _cpuset cpuset_t; +extern cpuset_t *__cpuset_alloc(size_t set_size); + #endif /* !_SYS__CPUSET_H_ */ diff --git a/sys/sys/bitset.h b/sys/sys/bitset.h --- a/sys/sys/bitset.h +++ b/sys/sys/bitset.h @@ -312,8 +312,6 @@ /* * Dynamically allocate a bitset. */ -#define __BITSET_ALLOC(_s, mt, mf) malloc(__BITSET_SIZE((_s)), mt, (mf)) - #define BIT_AND(_s, d, s) __BIT_AND(_s, d, s) #define BIT_AND2(_s, d, s1, s2) __BIT_AND2(_s, d, s1, s2) #define BIT_ANDNOT(_s, d, s) __BIT_ANDNOT(_s, d, s) @@ -351,7 +349,11 @@ #define BIT_XOR2(_s, d, s1, s2) __BIT_XOR2(_s, d, s1, s2) #define BIT_ZERO(_s, p) __BIT_ZERO(_s, p) -#define BITSET_ALLOC(_s, mt, mf) __BITSET_ALLOC(_s, mt, mf) +#if defined(_KERNEL) +#define BITSET_ALLOC(_s, mt, mf) malloc(__BITSET_SIZE((_s)), mt, (mf)) +#define BITSET_FREE(p, mt) free(p, mt) +#endif /* _KERNEL */ + #define BITSET_FSET(n) __BITSET_FSET(n) #define BITSET_SIZE(_s) __BITSET_SIZE(_s) #define BITSET_T_INITIALIZER(x) __BITSET_T_INITIALIZER(x) diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h --- a/sys/sys/cpuset.h +++ b/sys/sys/cpuset.h @@ -36,6 +36,7 @@ #include +#include #include #define _NCPUBITS _BITSET_BITS @@ -56,9 +57,10 @@ #define CPU_SUBSET(p, c) __BIT_SUBSET(CPU_SETSIZE, p, c) #define CPU_OVERLAP(p, c) __BIT_OVERLAP(CPU_SETSIZE, p, c) #define CPU_CMP(p, c) __BIT_CMP(CPU_SETSIZE, p, c) -#define CPU_OR(d, s) __BIT_OR(CPU_SETSIZE, d, s) -#define CPU_AND(d, s) __BIT_AND(CPU_SETSIZE, d, s) -#define CPU_ANDNOT(d, s) __BIT_ANDNOT(CPU_SETSIZE, d, s) +#define CPU_OR(d, s1, s2) __BIT_OR2(CPU_SETSIZE, d, s1, s2) +#define CPU_AND(d, s1, s2) __BIT_AND2(CPU_SETSIZE, d, s1, s2) +#define CPU_ANDNOT(d, s1, s2) __BIT_ANDNOT2(CPU_SETSIZE, d, s1, s2) +#define CPU_XOR(d, s1, s2) __BIT_XOR2(CPU_SETSIZE, d, s1, s2) #define CPU_CLR_ATOMIC(n, p) __BIT_CLR_ATOMIC(CPU_SETSIZE, n, p) #define CPU_SET_ATOMIC(n, p) __BIT_SET_ATOMIC(CPU_SETSIZE, n, p) #define CPU_SET_ATOMIC_ACQ(n, p) __BIT_SET_ATOMIC_ACQ(CPU_SETSIZE, n, p) @@ -73,6 +75,20 @@ #define CPUSET_FSET __BITSET_FSET(_NCPUWORDS) #define CPUSET_T_INITIALIZER(x) __BITSET_T_INITIALIZER(x) +#if !defined(_KERNEL) +#define CPU_ALLOC_SIZE(_s) __BITSET_SIZE(_s) +#define CPU_ALLOC(_s) __cpuset_alloc(_s) +#define CPU_FREE(p) __cpuset_free(p) + +#define CPU_ISSET_S(n, _s, p) __BIT_ISSET(_s, n, p) +#define CPU_SET_S(n, _s, p) __BIT_SET(_s, n, p) +#define CPU_ZERO_S(_s, p) __BIT_ZERO(_s, p) + +#define CPU_OR_S(_s, d, s1, s2) __BIT_OR2(_s, d, s1, s2) +#define CPU_AND_S(_s, d, s1, s2) __BIT_AND2(_s, d, s1, s2) +#define CPU_XOR_S(_s, d, s1, s2) __BIT_XOR2(_s, d, s1, s2) +#endif + /* * Valid cpulevel_t values. */ diff --git a/sys/sys/param.h b/sys/sys/param.h --- a/sys/sys/param.h +++ b/sys/sys/param.h @@ -76,7 +76,7 @@ * cannot include sys/param.h and should only be updated here. */ #undef __FreeBSD_version -#define __FreeBSD_version 1400045 +#define __FreeBSD_version 1400046 /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, diff --git a/sys/x86/x86/cpu_machdep.c b/sys/x86/x86/cpu_machdep.c --- a/sys/x86/x86/cpu_machdep.c +++ b/sys/x86/x86/cpu_machdep.c @@ -475,7 +475,7 @@ if (smp_started) { map = all_cpus; CPU_CLR(PCPU_GET(cpuid), &map); - CPU_ANDNOT(&map, &stopped_cpus); + CPU_ANDNOT(&map, &map, &stopped_cpus); if (!CPU_EMPTY(&map)) { printf("cpu_reset: Stopping other CPUs\n"); stop_cpus(map);