Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F111555174
D4346.id10665.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
17 KB
Referenced Files
None
Subscribers
None
D4346.id10665.diff
View Options
Index: sys/amd64/amd64/mp_machdep.c
===================================================================
--- sys/amd64/amd64/mp_machdep.c
+++ sys/amd64/amd64/mp_machdep.c
@@ -87,11 +87,6 @@
char *doublefault_stack;
char *nmi_stack;
-/* Variables needed for SMP tlb shootdown. */
-static vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
-static pmap_t smp_tlb_pmap;
-volatile int smp_tlb_wait;
-
extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
/*
@@ -410,120 +405,7 @@
return 0; /* return FAILURE */
}
-/*
- * Flush the TLB on other CPU's
- */
-
-static void
-smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
- vm_offset_t addr1, vm_offset_t addr2)
-{
- int cpu, ncpu, othercpus;
-
- othercpus = mp_ncpus - 1; /* does not shootdown self */
-
- /*
- * Check for other cpus. Return if none.
- */
- if (CPU_ISFULLSET(&mask)) {
- if (othercpus < 1)
- return;
- } else {
- CPU_CLR(PCPU_GET(cpuid), &mask);
- if (CPU_EMPTY(&mask))
- return;
- }
-
- if (!(read_rflags() & PSL_I))
- panic("%s: interrupts disabled", __func__);
- mtx_lock_spin(&smp_ipi_mtx);
- smp_tlb_addr1 = addr1;
- smp_tlb_addr2 = addr2;
- smp_tlb_pmap = pmap;
- smp_tlb_wait = 0;
- if (CPU_ISFULLSET(&mask)) {
- ncpu = othercpus;
- ipi_all_but_self(vector);
- } else {
- ncpu = 0;
- while ((cpu = CPU_FFS(&mask)) != 0) {
- cpu--;
- CPU_CLR(cpu, &mask);
- CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__,
- cpu, vector);
- ipi_send_cpu(cpu, vector);
- ncpu++;
- }
- }
- while (smp_tlb_wait < ncpu)
- ia32_pause();
- mtx_unlock_spin(&smp_ipi_mtx);
-}
-
-void
-smp_masked_invltlb(cpuset_t mask, pmap_t pmap)
-{
-
- if (smp_started) {
- smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, pmap, 0, 0);
-#ifdef COUNT_XINVLTLB_HITS
- ipi_global++;
-#endif
- }
-}
-
-void
-smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
-{
-
- if (smp_started) {
- smp_targeted_tlb_shootdown(mask, IPI_INVLPG, NULL, addr, 0);
-#ifdef COUNT_XINVLTLB_HITS
- ipi_page++;
-#endif
- }
-}
-
-void
-smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
-{
-
- if (smp_started) {
- smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, NULL,
- addr1, addr2);
-#ifdef COUNT_XINVLTLB_HITS
- ipi_range++;
- ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
-#endif
- }
-}
-
-void
-smp_cache_flush(void)
-{
-
- if (smp_started) {
- smp_targeted_tlb_shootdown(all_cpus, IPI_INVLCACHE, NULL,
- 0, 0);
- }
-}
-
-/*
- * Handlers for TLB related IPIs
- */
-void
-invltlb_handler(void)
-{
-#ifdef COUNT_XINVLTLB_HITS
- xhits_gbl[PCPU_GET(cpuid)]++;
-#endif /* COUNT_XINVLTLB_HITS */
-#ifdef COUNT_IPIS
- (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
-#endif /* COUNT_IPIS */
-
- invltlb();
- atomic_add_int(&smp_tlb_wait, 1);
-}
+extern pmap_t smp_tlb_pmap;
void
invltlb_invpcid_handler(void)
@@ -556,7 +438,7 @@
#endif /* COUNT_IPIS */
if (smp_tlb_pmap == kernel_pmap) {
- invltlb_globpcid();
+ invltlb_glob();
} else {
/*
* The current pmap might not be equal to
@@ -572,38 +454,3 @@
}
atomic_add_int(&smp_tlb_wait, 1);
}
-
-void
-invlpg_handler(void)
-{
-#ifdef COUNT_XINVLTLB_HITS
- xhits_pg[PCPU_GET(cpuid)]++;
-#endif /* COUNT_XINVLTLB_HITS */
-#ifdef COUNT_IPIS
- (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
-#endif /* COUNT_IPIS */
-
- invlpg(smp_tlb_addr1);
- atomic_add_int(&smp_tlb_wait, 1);
-}
-
-void
-invlrng_handler(void)
-{
- vm_offset_t addr;
-
-#ifdef COUNT_XINVLTLB_HITS
- xhits_rng[PCPU_GET(cpuid)]++;
-#endif /* COUNT_XINVLTLB_HITS */
-#ifdef COUNT_IPIS
- (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
-#endif /* COUNT_IPIS */
-
- addr = smp_tlb_addr1;
- do {
- invlpg(addr);
- addr += PAGE_SIZE;
- } while (addr < smp_tlb_addr2);
-
- atomic_add_int(&smp_tlb_wait, 1);
-}
Index: sys/amd64/amd64/pmap.c
===================================================================
--- sys/amd64/amd64/pmap.c
+++ sys/amd64/amd64/pmap.c
@@ -1321,7 +1321,7 @@
* Promotion: flush every 4KB page mapping from the TLB,
* including any global (PG_G) mappings.
*/
- invltlb_globpcid();
+ invltlb_glob();
}
}
#ifdef SMP
@@ -1482,7 +1482,7 @@
bzero(&d, sizeof(d));
invpcid(&d, INVPCID_CTXGLOB);
} else {
- invltlb_globpcid();
+ invltlb_glob();
}
mask = &all_cpus;
} else {
@@ -1653,7 +1653,7 @@
bzero(&d, sizeof(d));
invpcid(&d, INVPCID_CTXGLOB);
} else {
- invltlb_globpcid();
+ invltlb_glob();
}
} else if (pmap == PCPU_GET(curpmap)) {
if (pmap_pcid_enabled) {
Index: sys/amd64/include/cpufunc.h
===================================================================
--- sys/amd64/include/cpufunc.h
+++ sys/amd64/include/cpufunc.h
@@ -505,7 +505,7 @@
* Operations that Invalidate TLBs and Paging-Structure Caches.
*/
static __inline void
-invltlb_globpcid(void)
+invltlb_glob(void)
{
uint64_t cr4;
Index: sys/i386/i386/mp_machdep.c
===================================================================
--- sys/i386/i386/mp_machdep.c
+++ sys/i386/i386/mp_machdep.c
@@ -132,11 +132,6 @@
extern struct pcpu __pcpu[];
-/* Variables needed for SMP tlb shootdown. */
-vm_offset_t smp_tlb_addr1;
-vm_offset_t smp_tlb_addr2;
-volatile int smp_tlb_wait;
-
/*
* Local data and functions.
*/
@@ -487,201 +482,3 @@
}
return 0; /* return FAILURE */
}
-
-/*
- * Flush the TLB on all other CPU's
- */
-static void
-smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
-{
- u_int ncpu;
-
- ncpu = mp_ncpus - 1; /* does not shootdown self */
- if (ncpu < 1)
- return; /* no other cpus */
- if (!(read_eflags() & PSL_I))
- panic("%s: interrupts disabled", __func__);
- mtx_lock_spin(&smp_ipi_mtx);
- smp_tlb_addr1 = addr1;
- smp_tlb_addr2 = addr2;
- smp_tlb_wait = 0;
- ipi_all_but_self(vector);
- while (smp_tlb_wait < ncpu)
- ia32_pause();
- mtx_unlock_spin(&smp_ipi_mtx);
-}
-
-static void
-smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
-{
- int cpu, ncpu, othercpus;
-
- othercpus = mp_ncpus - 1;
- if (CPU_ISFULLSET(&mask)) {
- if (othercpus < 1)
- return;
- } else {
- CPU_CLR(PCPU_GET(cpuid), &mask);
- if (CPU_EMPTY(&mask))
- return;
- }
- if (!(read_eflags() & PSL_I))
- panic("%s: interrupts disabled", __func__);
- mtx_lock_spin(&smp_ipi_mtx);
- smp_tlb_addr1 = addr1;
- smp_tlb_addr2 = addr2;
- atomic_store_rel_int(&smp_tlb_wait, 0);
- if (CPU_ISFULLSET(&mask)) {
- ncpu = othercpus;
- ipi_all_but_self(vector);
- } else {
- ncpu = 0;
- while ((cpu = CPU_FFS(&mask)) != 0) {
- cpu--;
- CPU_CLR(cpu, &mask);
- CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu,
- vector);
- ipi_send_cpu(cpu, vector);
- ncpu++;
- }
- }
- while (smp_tlb_wait < ncpu)
- ia32_pause();
- mtx_unlock_spin(&smp_ipi_mtx);
-}
-
-void
-smp_invltlb(void)
-{
-
- if (smp_started) {
- smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
-#ifdef COUNT_XINVLTLB_HITS
- ipi_global++;
-#endif
- }
-}
-
-void
-smp_invlpg(vm_offset_t addr)
-{
-
- if (smp_started) {
- smp_tlb_shootdown(IPI_INVLPG, addr, 0);
-#ifdef COUNT_XINVLTLB_HITS
- ipi_page++;
-#endif
- }
-}
-
-void
-smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
-{
-
- if (smp_started) {
- smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
-#ifdef COUNT_XINVLTLB_HITS
- ipi_range++;
- ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
-#endif
- }
-}
-
-void
-smp_masked_invltlb(cpuset_t mask)
-{
-
- if (smp_started) {
- smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
-#ifdef COUNT_XINVLTLB_HITS
- ipi_masked_global++;
-#endif
- }
-}
-
-void
-smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
-{
-
- if (smp_started) {
- smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
-#ifdef COUNT_XINVLTLB_HITS
- ipi_masked_page++;
-#endif
- }
-}
-
-void
-smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
-{
-
- if (smp_started) {
- smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
-#ifdef COUNT_XINVLTLB_HITS
- ipi_masked_range++;
- ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
-#endif
- }
-}
-
-void
-smp_cache_flush(void)
-{
-
- if (smp_started)
- smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
-}
-
-/*
- * Handlers for TLB related IPIs
- */
-void
-invltlb_handler(void)
-{
- uint64_t cr3;
-#ifdef COUNT_XINVLTLB_HITS
- xhits_gbl[PCPU_GET(cpuid)]++;
-#endif /* COUNT_XINVLTLB_HITS */
-#ifdef COUNT_IPIS
- (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
-#endif /* COUNT_IPIS */
-
- cr3 = rcr3();
- load_cr3(cr3);
- atomic_add_int(&smp_tlb_wait, 1);
-}
-
-void
-invlpg_handler(void)
-{
-#ifdef COUNT_XINVLTLB_HITS
- xhits_pg[PCPU_GET(cpuid)]++;
-#endif /* COUNT_XINVLTLB_HITS */
-#ifdef COUNT_IPIS
- (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
-#endif /* COUNT_IPIS */
-
- invlpg(smp_tlb_addr1);
-
- atomic_add_int(&smp_tlb_wait, 1);
-}
-
-void
-invlrng_handler(void)
-{
- vm_offset_t addr;
-#ifdef COUNT_XINVLTLB_HITS
- xhits_rng[PCPU_GET(cpuid)]++;
-#endif /* COUNT_XINVLTLB_HITS */
-#ifdef COUNT_IPIS
- (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
-#endif /* COUNT_IPIS */
-
- addr = smp_tlb_addr1;
- do {
- invlpg(addr);
- addr += PAGE_SIZE;
- } while (addr < smp_tlb_addr2);
-
- atomic_add_int(&smp_tlb_wait, 1);
-}
Index: sys/i386/i386/pmap.c
===================================================================
--- sys/i386/i386/pmap.c
+++ sys/i386/i386/pmap.c
@@ -655,7 +655,7 @@
va = KERNBASE + KERNLOAD;
while (va < endva) {
pdir_pde(PTD, va) |= pgeflag;
- invltlb(); /* Play it safe, invltlb() every time */
+ invltlb_glob(); /* Play it safe, invltlb() every time */
va += NBPDR;
}
} else {
@@ -664,7 +664,7 @@
pte = vtopte(va);
if (*pte)
*pte |= pgeflag;
- invltlb(); /* Play it safe, invltlb() every time */
+ invltlb_glob(); /* Play it safe, invltlb() every time */
va += PAGE_SIZE;
}
}
@@ -973,6 +973,22 @@
load_cr4(cr4 | CR4_PGE);
}
}
+
+void
+invltlb_glob(void)
+{
+ uint64_t cr4;
+
+ if (pgeflag == 0) {
+ invltlb();
+ } else {
+ cr4 = rcr4();
+ load_cr4(cr4 & ~CR4_PGE);
+ load_cr4(cr4 | CR4_PGE);
+ }
+}
+
+
#ifdef SMP
/*
* For SMP, these functions have to use the IPI mechanism for coherence.
@@ -996,13 +1012,13 @@
void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
- cpuset_t other_cpus;
+ cpuset_t *mask, other_cpus;
u_int cpuid;
sched_pin();
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invlpg(va);
- smp_invlpg(va);
+ mask = &all_cpus;
} else {
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
@@ -1010,16 +1026,16 @@
if (CPU_ISSET(cpuid, &pmap->pm_active))
invlpg(va);
CPU_AND(&other_cpus, &pmap->pm_active);
- if (!CPU_EMPTY(&other_cpus))
- smp_masked_invlpg(other_cpus, va);
+ mask = &other_cpus;
}
+ smp_masked_invlpg(*mask, va);
sched_unpin();
}
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
- cpuset_t other_cpus;
+ cpuset_t *mask, other_cpus;
vm_offset_t addr;
u_int cpuid;
@@ -1027,7 +1043,7 @@
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
- smp_invlpg_range(sva, eva);
+ mask = &all_cpus;
} else {
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
@@ -1036,22 +1052,25 @@
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
CPU_AND(&other_cpus, &pmap->pm_active);
- if (!CPU_EMPTY(&other_cpus))
- smp_masked_invlpg_range(other_cpus, sva, eva);
+ mask = &other_cpus;
}
+ smp_masked_invlpg_range(*mask, sva, eva);
sched_unpin();
}
void
pmap_invalidate_all(pmap_t pmap)
{
- cpuset_t other_cpus;
+ cpuset_t *mask, other_cpus;
u_int cpuid;
sched_pin();
- if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
+ if (pmap == kernel_pmap) {
+ invltlb_glob();
+ mask = &all_cpus;
+ } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) {
invltlb();
- smp_invltlb();
+ mask = &all_cpus;
} else {
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
@@ -1059,9 +1078,9 @@
if (CPU_ISSET(cpuid, &pmap->pm_active))
invltlb();
CPU_AND(&other_cpus, &pmap->pm_active);
- if (!CPU_EMPTY(&other_cpus))
- smp_masked_invltlb(other_cpus);
+ mask = &other_cpus;
}
+ smp_masked_invltlb(*mask, pmap);
sched_unpin();
}
@@ -1193,7 +1212,9 @@
pmap_invalidate_all(pmap_t pmap)
{
- if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
+ if (pmap == kernel_pmap)
+ invltlb_glob();
+ else if (!CPU_EMPTY(&pmap->pm_active))
invltlb();
}
Index: sys/i386/include/pmap.h
===================================================================
--- sys/i386/include/pmap.h
+++ sys/i386/include/pmap.h
@@ -394,6 +394,8 @@
void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
boolean_t force);
+void invltlb_glob(void);
+
#endif /* _KERNEL */
#endif /* !LOCORE */
Index: sys/i386/include/smp.h
===================================================================
--- sys/i386/include/smp.h
+++ sys/i386/include/smp.h
@@ -53,10 +53,6 @@
extern u_int ipi_page;
extern u_int ipi_range;
extern u_int ipi_range_size;
-extern u_int ipi_masked_global;
-extern u_int ipi_masked_page;
-extern u_int ipi_masked_range;
-extern u_int ipi_masked_range_size;
struct cpu_info {
int cpu_present:1;
@@ -105,13 +101,10 @@
u_int mp_bootaddress(u_int);
void set_interrupt_apic_ids(void);
void smp_cache_flush(void);
-void smp_invlpg(vm_offset_t addr);
void smp_masked_invlpg(cpuset_t mask, vm_offset_t addr);
-void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);
void smp_masked_invlpg_range(cpuset_t mask, vm_offset_t startva,
vm_offset_t endva);
-void smp_invltlb(void);
-void smp_masked_invltlb(cpuset_t mask);
+void smp_masked_invltlb(cpuset_t mask, struct pmap *pmap);
void mem_range_AP_init(void);
void topo_probe(void);
void ipi_send_cpu(int cpu, u_int ipi);
Index: sys/x86/x86/mp_x86.c
===================================================================
--- sys/x86/x86/mp_x86.c
+++ sys/x86/x86/mp_x86.c
@@ -713,19 +713,6 @@
SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
0, "");
-
-u_int ipi_masked_global;
-u_int ipi_masked_page;
-u_int ipi_masked_range;
-u_int ipi_masked_range_size;
-SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
- &ipi_masked_global, 0, "");
-SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
- &ipi_masked_page, 0, "");
-SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
- &ipi_masked_range, 0, "");
-SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
- &ipi_masked_range_size, 0, "");
#endif /* COUNT_XINVLTLB_HITS */
/*
@@ -1090,3 +1077,165 @@
}
SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);
#endif
+
+/*
+ * Flush the TLB on other CPU's
+ */
+
+/* Variables needed for SMP tlb shootdown. */
+static vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
+pmap_t smp_tlb_pmap;
+volatile int smp_tlb_wait;
+
+#ifdef __amd64__
+#define read_eflags() read_rflags()
+#endif
+
+static void
+smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
+ vm_offset_t addr1, vm_offset_t addr2)
+{
+ int cpu, ncpu, othercpus;
+
+ othercpus = mp_ncpus - 1; /* does not shootdown self */
+
+ /*
+ * Check for other cpus. Return if none.
+ */
+ if (CPU_ISFULLSET(&mask)) {
+ if (othercpus < 1)
+ return;
+ } else {
+ CPU_CLR(PCPU_GET(cpuid), &mask);
+ if (CPU_EMPTY(&mask))
+ return;
+ }
+
+ if (!(read_eflags() & PSL_I))
+ panic("%s: interrupts disabled", __func__);
+ mtx_lock_spin(&smp_ipi_mtx);
+ smp_tlb_addr1 = addr1;
+ smp_tlb_addr2 = addr2;
+ smp_tlb_pmap = pmap;
+ smp_tlb_wait = 0;
+ if (CPU_ISFULLSET(&mask)) {
+ ncpu = othercpus;
+ ipi_all_but_self(vector);
+ } else {
+ ncpu = 0;
+ while ((cpu = CPU_FFS(&mask)) != 0) {
+ cpu--;
+ CPU_CLR(cpu, &mask);
+ CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__,
+ cpu, vector);
+ ipi_send_cpu(cpu, vector);
+ ncpu++;
+ }
+ }
+ while (smp_tlb_wait < ncpu)
+ ia32_pause();
+ mtx_unlock_spin(&smp_ipi_mtx);
+}
+
+void
+smp_masked_invltlb(cpuset_t mask, pmap_t pmap)
+{
+
+ if (smp_started) {
+ smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, pmap, 0, 0);
+#ifdef COUNT_XINVLTLB_HITS
+ ipi_global++;
+#endif
+ }
+}
+
+void
+smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
+{
+
+ if (smp_started) {
+ smp_targeted_tlb_shootdown(mask, IPI_INVLPG, NULL, addr, 0);
+#ifdef COUNT_XINVLTLB_HITS
+ ipi_page++;
+#endif
+ }
+}
+
+void
+smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
+{
+
+ if (smp_started) {
+ smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, NULL,
+ addr1, addr2);
+#ifdef COUNT_XINVLTLB_HITS
+ ipi_range++;
+ ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
+#endif
+ }
+}
+
+void
+smp_cache_flush(void)
+{
+
+ if (smp_started) {
+ smp_targeted_tlb_shootdown(all_cpus, IPI_INVLCACHE, NULL,
+ 0, 0);
+ }
+}
+
+/*
+ * Handlers for TLB related IPIs
+ */
+void
+invltlb_handler(void)
+{
+#ifdef COUNT_XINVLTLB_HITS
+ xhits_gbl[PCPU_GET(cpuid)]++;
+#endif /* COUNT_XINVLTLB_HITS */
+#ifdef COUNT_IPIS
+ (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
+#endif /* COUNT_IPIS */
+
+ if (smp_tlb_pmap == kernel_pmap)
+ invltlb_glob();
+ else
+ invltlb();
+ atomic_add_int(&smp_tlb_wait, 1);
+}
+
+void
+invlpg_handler(void)
+{
+#ifdef COUNT_XINVLTLB_HITS
+ xhits_pg[PCPU_GET(cpuid)]++;
+#endif /* COUNT_XINVLTLB_HITS */
+#ifdef COUNT_IPIS
+ (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
+#endif /* COUNT_IPIS */
+
+ invlpg(smp_tlb_addr1);
+ atomic_add_int(&smp_tlb_wait, 1);
+}
+
+void
+invlrng_handler(void)
+{
+ vm_offset_t addr;
+
+#ifdef COUNT_XINVLTLB_HITS
+ xhits_rng[PCPU_GET(cpuid)]++;
+#endif /* COUNT_XINVLTLB_HITS */
+#ifdef COUNT_IPIS
+ (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
+#endif /* COUNT_IPIS */
+
+ addr = smp_tlb_addr1;
+ do {
+ invlpg(addr);
+ addr += PAGE_SIZE;
+ } while (addr < smp_tlb_addr2);
+
+ atomic_add_int(&smp_tlb_wait, 1);
+}
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Thu, Mar 6, 5:16 AM (24 m, 21 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17008450
Default Alt Text
D4346.id10665.diff (17 KB)
Attached To
Mode
D4346: Fix pmap_invalidate_all() for kernel_pmap on x86 CPUs with PG_G support.
Attached
Detach File
Event Timeline
Log In to Comment