Changeset View
Changeset View
Standalone View
Standalone View
head/sys/amd64/amd64/pmap.c
Show First 20 Lines • Show All 1,715 Lines • ▼ Show 20 Lines | if (pmap == PCPU_GET(curpmap)) { | ||||
} | } | ||||
} else if (pmap_pcid_enabled) | } else if (pmap_pcid_enabled) | ||||
pmap->pm_pcids[cpuid].pm_gen = 0; | pmap->pm_pcids[cpuid].pm_gen = 0; | ||||
if (pmap_pcid_enabled) { | if (pmap_pcid_enabled) { | ||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
if (cpuid != i) | if (cpuid != i) | ||||
pmap->pm_pcids[i].pm_gen = 0; | pmap->pm_pcids[i].pm_gen = 0; | ||||
} | } | ||||
/* | |||||
* The fence is between stores to pm_gen and the read of | |||||
* the pm_active mask. We need to ensure that it is | |||||
* impossible for us to miss the bit update in pm_active | |||||
* and simultaneously observe a non-zero pm_gen in | |||||
* pmap_activate_sw(), otherwise TLB update is missed. | |||||
* Without the fence, IA32 allows such an outcome. | |||||
* Note that pm_active is updated by a locked operation, | |||||
* which provides the reciprocal fence. | |||||
*/ | |||||
atomic_thread_fence_seq_cst(); | |||||
} | } | ||||
mask = &pmap->pm_active; | mask = &pmap->pm_active; | ||||
} | } | ||||
smp_masked_invlpg(*mask, va, pmap); | smp_masked_invlpg(*mask, va, pmap); | ||||
sched_unpin(); | sched_unpin(); | ||||
} | } | ||||
/* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ | /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | if (pmap == kernel_pmap) { | ||||
} else if (pmap_pcid_enabled) { | } else if (pmap_pcid_enabled) { | ||||
pmap->pm_pcids[cpuid].pm_gen = 0; | pmap->pm_pcids[cpuid].pm_gen = 0; | ||||
} | } | ||||
if (pmap_pcid_enabled) { | if (pmap_pcid_enabled) { | ||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
if (cpuid != i) | if (cpuid != i) | ||||
pmap->pm_pcids[i].pm_gen = 0; | pmap->pm_pcids[i].pm_gen = 0; | ||||
} | } | ||||
/* See comment int pmap_invalidate_page(). */ | |||||
atomic_thread_fence_seq_cst(); | |||||
} | } | ||||
mask = &pmap->pm_active; | mask = &pmap->pm_active; | ||||
} | } | ||||
smp_masked_invlpg_range(*mask, sva, eva, pmap); | smp_masked_invlpg_range(*mask, sva, eva, pmap); | ||||
sched_unpin(); | sched_unpin(); | ||||
} | } | ||||
void | void | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | if (pmap == kernel_pmap) { | ||||
} else if (pmap_pcid_enabled) { | } else if (pmap_pcid_enabled) { | ||||
pmap->pm_pcids[cpuid].pm_gen = 0; | pmap->pm_pcids[cpuid].pm_gen = 0; | ||||
} | } | ||||
if (pmap_pcid_enabled) { | if (pmap_pcid_enabled) { | ||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
if (cpuid != i) | if (cpuid != i) | ||||
pmap->pm_pcids[i].pm_gen = 0; | pmap->pm_pcids[i].pm_gen = 0; | ||||
} | } | ||||
/* See comment int pmap_invalidate_page(). */ | |||||
atomic_thread_fence_seq_cst(); | |||||
} | } | ||||
mask = &pmap->pm_active; | mask = &pmap->pm_active; | ||||
} | } | ||||
smp_masked_invltlb(*mask, pmap); | smp_masked_invltlb(*mask, pmap); | ||||
sched_unpin(); | sched_unpin(); | ||||
} | } | ||||
void | void | ||||
▲ Show 20 Lines • Show All 6,259 Lines • Show Last 20 Lines |