diff --git a/contrib/ofed/libmlx5/mlx5.c b/contrib/ofed/libmlx5/mlx5.c --- a/contrib/ofed/libmlx5/mlx5.c +++ b/contrib/ofed/libmlx5/mlx5.c @@ -363,8 +363,12 @@ mlx5_local_cpu_set(ibdev, &dev_local_cpus); /* check if my cpu set is in dev cpu */ +#if __FreeBSD_version < 1400045 /* TO BE ADJUSTED */ CPU_OR(&result_set, &my_cpus); CPU_OR(&result_set, &dev_local_cpus); +#else + CPU_OR(&result_set, &my_cpus, &dev_local_cpus); +#endif stall_enable = CPU_EQUAL(&result_set, &dev_local_cpus) ? 0 : 1; out: diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -8290,7 +8290,7 @@ other_cpus = all_cpus; critical_enter(); CPU_CLR(PCPU_GET(cpuid), &other_cpus); - CPU_AND(&other_cpus, &pmap->pm_active); + CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); critical_exit(); KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap)); } diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -1304,7 +1304,7 @@ mtx_lock(&vm->rendezvous_mtx); while (vm->rendezvous_func != NULL) { /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ - CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); + CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus); if (vcpuid != -1 && CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1173,7 +1173,7 @@ return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); - CPU_AND(cpuset, &cpuset_domain[d]); + CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -1243,7 +1243,7 @@ cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); - CPU_AND(&other_cpus, &pmap->pm_active); + CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); mask = &other_cpus; } smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy); @@ -1276,7 +1276,7 @@ cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); - CPU_AND(&other_cpus, &pmap->pm_active); + CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); mask = &other_cpus; } smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy); @@ -1299,7 +1299,7 @@ cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); - CPU_AND(&other_cpus, &pmap->pm_active); + CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); mask = &other_cpus; } smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy); diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -604,9 +604,9 @@ if ((flags & SFB_CPUPRIVATE) == 0) { other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); - CPU_ANDNOT(&other_cpus, &sf->cpumask); + CPU_ANDNOT(&other_cpus, &other_cpus, &sf->cpumask); if (!CPU_EMPTY(&other_cpus)) { - CPU_OR(&sf->cpumask, &other_cpus); + CPU_OR(&sf->cpumask, &sf->cpumask, &other_cpus); smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap, sf_buf_shootdown_curcpu_cb); } diff --git a/sys/kern/kern_cpuset.c b/sys/kern/kern_cpuset.c --- a/sys/kern/kern_cpuset.c +++ b/sys/kern/kern_cpuset.c @@ -326,7 +326,7 @@ set->cs_flags = 0; mtx_lock_spin(&cpuset_lock); set->cs_domain = domain; - CPU_AND(&set->cs_mask, &parent->cs_mask); + CPU_AND(&set->cs_mask, &set->cs_mask, &parent->cs_mask); set->cs_id = id; set->cs_parent = cpuset_ref(parent); LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); @@ -645,8 +645,7 @@ if (set->cs_flags & CPU_SET_RDONLY) return (EPERM); if (augment_mask) { - CPU_COPY(&set->cs_mask, &newmask); - CPU_AND(&newmask, mask); + CPU_AND(&newmask, &set->cs_mask, mask); } else CPU_COPY(mask, &newmask); @@ -668,7 +667,7 @@ struct cpuset *nset; mtx_assert(&cpuset_lock, MA_OWNED); - CPU_AND(&set->cs_mask, mask); + CPU_AND(&set->cs_mask, &set->cs_mask, mask); LIST_FOREACH(nset, &set->cs_children, cs_siblings) cpuset_update(nset, &set->cs_mask); @@ -1083,8 +1082,7 @@ * restriction to the new set, otherwise take it wholesale. */ if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) { - CPU_COPY(&tdset->cs_mask, mask); - CPU_AND(mask, &set->cs_mask); + CPU_AND(mask, &tdset->cs_mask, &set->cs_mask); } else CPU_COPY(&set->cs_mask, mask); @@ -1153,8 +1151,7 @@ pbase = cpuset_getbase(td->td_cpuset); /* Copy process mask, then further apply the new root mask. */ - CPU_COPY(&pbase->cs_mask, &nmask); - CPU_AND(&nmask, &nroot->cs_mask); + CPU_AND(&nmask, &pbase->cs_mask, &nroot->cs_mask); domainset_copy(pbase->cs_domain, &ndomain); DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask); @@ -1946,7 +1943,7 @@ case CPU_WHICH_PID: FOREACH_THREAD_IN_PROC(p, ttd) { thread_lock(ttd); - CPU_OR(mask, &ttd->td_cpuset->cs_mask); + CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask); thread_unlock(ttd); } break; diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c --- a/sys/kern/kern_rmlock.c +++ b/sys/kern/kern_rmlock.c @@ -548,8 +548,7 @@ if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { /* Get all read tokens back */ - readcpus = all_cpus; - CPU_ANDNOT(&readcpus, &rm->rm_writecpus); + CPU_ANDNOT(&readcpus, &all_cpus, &rm->rm_writecpus); rm->rm_writecpus = all_cpus; /* diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -1165,8 +1165,8 @@ return (0); CPU_SETOF(me, &dontuse); - CPU_OR(&dontuse, &stopped_cpus); - CPU_OR(&dontuse, &hlt_cpus_mask); + CPU_OR(&dontuse, &dontuse, &stopped_cpus); + CPU_OR(&dontuse, &dontuse, &hlt_cpus_mask); CPU_ZERO(&map2); if (forward_wakeup_use_loop) { STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { @@ -1179,8 +1179,7 @@ } if (forward_wakeup_use_mask) { - map = idle_cpus_mask; - CPU_ANDNOT(&map, &dontuse); + CPU_ANDNOT(&map, &idle_cpus_mask, &dontuse); /* If they are both on, compare and use loop if different. */ if (forward_wakeup_use_loop) { @@ -1366,8 +1365,7 @@ kick_other_cpu(td->td_priority, cpu); } else { if (!single_cpu) { - tidlemsk = idle_cpus_mask; - CPU_ANDNOT(&tidlemsk, &hlt_cpus_mask); + CPU_ANDNOT(&tidlemsk, &idle_cpus_mask, &hlt_cpus_mask); CPU_CLR(cpuid, &tidlemsk); if (!CPU_ISSET(cpuid, &idle_cpus_mask) && diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c --- a/sys/kern/subr_kdb.c +++ b/sys/kern/subr_kdb.c @@ -707,8 +707,7 @@ if (!SCHEDULER_STOPPED()) { #ifdef SMP - other_cpus = all_cpus; - CPU_ANDNOT(&other_cpus, &stopped_cpus); + CPU_ANDNOT(&other_cpus, &all_cpus, &stopped_cpus); CPU_CLR(PCPU_GET(cpuid), &other_cpus); stop_cpus_hard(other_cpus); #endif @@ -746,7 +745,7 @@ if (did_stop_cpus) { curthread->td_stopsched = 0; #ifdef SMP - CPU_AND(&other_cpus, &stopped_cpus); + CPU_AND(&other_cpus, &other_cpus, &stopped_cpus); restart_cpus(other_cpus); #endif } diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c --- a/sys/kern/subr_smp.c +++ b/sys/kern/subr_smp.c @@ -761,7 +761,7 @@ parent, cpusetobj_strprint(cpusetbuf, &parent->cg_mask), cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); - CPU_OR(&parent->cg_mask, &child->cg_mask); + CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask); parent->cg_count += child->cg_count; } diff --git a/sys/net/iflib.c b/sys/net/iflib.c --- a/sys/net/iflib.c +++ b/sys/net/iflib.c @@ -4969,7 +4969,7 @@ for (i = 0; i < scctx->isc_nrxqsets; i++) CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, false), &assigned_cpus); - CPU_AND(&assigned_cpus, &ctx->ifc_cpus); + CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus); cores_consumed = CPU_COUNT(&assigned_cpus); mtx_lock(&cpu_offset_mtx); diff --git a/sys/powerpc/ofw/ofw_pcibus.c b/sys/powerpc/ofw/ofw_pcibus.c --- a/sys/powerpc/ofw/ofw_pcibus.c +++ b/sys/powerpc/ofw/ofw_pcibus.c @@ -405,7 +405,7 @@ return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); - CPU_AND(cpuset, &cpuset_domain[d]); + CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); diff --git a/sys/sys/bitset.h b/sys/sys/bitset.h --- a/sys/sys/bitset.h +++ b/sys/sys/bitset.h @@ -312,7 +312,13 @@ /* * Dynamically allocate a bitset. */ +#if defined(_KERNEL) #define __BITSET_ALLOC(_s, mt, mf) malloc(__BITSET_SIZE((_s)), mt, (mf)) +#define __BITSET_FREE(p, mt) free(p, mt) +#else /* _KERNEL */ +#define __BITSET_ALLOC(_s) malloc(__BITSET_SIZE((_s))) +#define __BITSET_FREE(p) free(p) +#endif /* _KERNEL */ #define BIT_AND(_s, d, s) __BIT_AND(_s, d, s) #define BIT_AND2(_s, d, s1, s2) __BIT_AND2(_s, d, s1, s2) @@ -352,6 +358,7 @@ #define BIT_ZERO(_s, p) __BIT_ZERO(_s, p) #define BITSET_ALLOC(_s, mt, mf) __BITSET_ALLOC(_s, mt, mf) +#define BITSET_FREE(p) __BITSET_FREE(p) #define BITSET_FSET(n) __BITSET_FSET(n) #define BITSET_SIZE(_s) __BITSET_SIZE(_s) #define BITSET_T_INITIALIZER(x) __BITSET_T_INITIALIZER(x) diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h --- a/sys/sys/cpuset.h +++ b/sys/sys/cpuset.h @@ -36,6 +36,7 @@ #include +#include #include #define _NCPUBITS _BITSET_BITS @@ -56,9 +57,10 @@ #define CPU_SUBSET(p, c) __BIT_SUBSET(CPU_SETSIZE, p, c) #define CPU_OVERLAP(p, c) __BIT_OVERLAP(CPU_SETSIZE, p, c) #define CPU_CMP(p, c) __BIT_CMP(CPU_SETSIZE, p, c) -#define CPU_OR(d, s) __BIT_OR(CPU_SETSIZE, d, s) -#define CPU_AND(d, s) __BIT_AND(CPU_SETSIZE, d, s) -#define CPU_ANDNOT(d, s) __BIT_ANDNOT(CPU_SETSIZE, d, s) +#define CPU_OR(d, s1, s2) __BIT_OR2(CPU_SETSIZE, d, s1, s2) +#define CPU_AND(d, s1, s2) __BIT_AND2(CPU_SETSIZE, d, s1, s2) +#define CPU_ANDNOT(d, s1, s2) __BIT_ANDNOT2(CPU_SETSIZE, d, s1, s2) +#define CPU_XOR(d, s1, s2) __BIT_XOR2(CPU_SETSIZE, d, s1, s2) #define CPU_CLR_ATOMIC(n, p) __BIT_CLR_ATOMIC(CPU_SETSIZE, n, p) #define CPU_SET_ATOMIC(n, p) __BIT_SET_ATOMIC(CPU_SETSIZE, n, p) #define CPU_SET_ATOMIC_ACQ(n, p) __BIT_SET_ATOMIC_ACQ(CPU_SETSIZE, n, p) @@ -73,6 +75,20 @@ #define CPUSET_FSET __BITSET_FSET(_NCPUWORDS) #define CPUSET_T_INITIALIZER(x) __BITSET_T_INITIALIZER(x) +#if !defined(_KERNEL) +#define CPU_ALLOC(_s) __BITSET_ALLOC(_s) +#define CPU_ALLOC_SIZE(_s) __BITSET_SIZE(_s) +#define CPU_FREE(p) __BITSET_FREE(p) + +#define CPU_ISSET_S(n, _s, p) __BIT_ISSET(_s, n, p) +#define CPU_SET_S(n, _s, p) __BIT_SET(_s, n, p) +#define CPU_ZERO_S(_s, p) __BIT_ZERO(_s, p) + +#define CPU_OR_S(_s, d, s1, s2) __BIT_OR2(_s, d, s1, s2) +#define CPU_AND_S(_s, d, s1, s2) __BIT_AND2(_s, d, s1, s2) +#define CPU_XOR_S(_s, d, s1, s2) __BIT_XOR2(_s, d, s1, s2) +#endif + /* * Valid cpulevel_t values. */ diff --git a/sys/x86/x86/cpu_machdep.c b/sys/x86/x86/cpu_machdep.c --- a/sys/x86/x86/cpu_machdep.c +++ b/sys/x86/x86/cpu_machdep.c @@ -475,7 +475,7 @@ if (smp_started) { map = all_cpus; CPU_CLR(PCPU_GET(cpuid), &map); - CPU_ANDNOT(&map, &stopped_cpus); + CPU_ANDNOT(&map, &map, &stopped_cpus); if (!CPU_EMPTY(&map)) { printf("cpu_reset: Stopping other CPUs\n"); stop_cpus(map);