diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -10256,6 +10256,12 @@ PCPU_SET(ucr3, PMAP_NO_CR3); } +void +pmap_active_cpus(pmap_t pmap, cpuset_t *res) +{ + *res = pmap->pm_active; +} + void pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) { diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -6194,6 +6194,12 @@ critical_exit(); } +void +pmap_active_cpus(pmap_t pmap, cpuset_t *res) +{ + *res = pmap->pm_active; +} + /* * Perform the pmap work for mincore(2). If the page is not both referenced and * modified by this pmap, returns its physical address so that the caller can diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h --- a/sys/arm64/include/pmap.h +++ b/sys/arm64/include/pmap.h @@ -129,6 +129,8 @@ (uint64_t)(asid) << TTBR_ASID_SHIFT; \ }) +#define PMAP_WANT_ACTIVE_CPUS_NAIVE + extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; diff --git a/sys/i386/i386/pmap_base.c b/sys/i386/i386/pmap_base.c --- a/sys/i386/i386/pmap_base.c +++ b/sys/i386/i386/pmap_base.c @@ -944,6 +944,12 @@ pmap_methods_ptr->pm_kremove(va); } +void +pmap_active_cpus(pmap_t pmap, cpuset_t *res) +{ + *res = pmap->pm_active; +} + extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods; int pae_mode; SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -253,3 +253,9 @@ return (FALSE); } } + +void +pmap_active_cpus(pmap_t pmap, cpuset_t *res) +{ + *res = pmap->pm_active; +} diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -4687,6 +4687,12 @@ PCPU_SET(curpmap, pmap); } +void +pmap_active_cpus(pmap_t pmap, cpuset_t *res) +{ + *res = pmap->pm_active; +} + void pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz) { diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -90,6 +90,7 @@ #include #ifdef _KERNEL +#include struct thread; /* @@ -118,6 +119,7 @@ #define PMAP_TS_REFERENCED_MAX 5 void pmap_activate(struct thread *td); +void pmap_active_cpus(pmap_t pmap, cpuset_t *res); void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice); void pmap_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *, diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -78,6 +78,7 @@ #include #include #include +#include #include #include #include @@ -907,6 +908,31 @@ #endif } +#ifdef PMAP_WANT_ACTIVE_CPUS_NAIVE +void +pmap_active_cpus(pmap_t pmap, cpuset_t *res) +{ + struct thread *td; + struct proc *p; + struct vmspace *vm; + int c; + + CPU_ZERO(res); + CPU_FOREACH(c) { + td = cpuid_to_pcpu[c]->pc_curthread; + p = td->td_proc; + if (p == NULL) + continue; + vm = vmspace_acquire_ref(p); + if (vm == NULL) + continue; + if (pmap == vmspace_pmap(vm)) + CPU_SET(c, res); + vmspace_free(vm); + } +} +#endif + /* * Allow userspace to directly trigger the VM drain routine for testing * purposes.