Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -3010,16 +3010,16 @@ if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) { /* - * Do per-cache line flush. Use the sfence + * Do per-cache line flush. Use a locked * instruction to insure that previous stores are * included in the write-back. The processor * propagates flush to other processors in the cache * coherence domain. */ - sfence(); + atomic_thread_fence_seq_cst(); for (; sva < eva; sva += cpu_clflush_line_size) clflushopt(sva); - sfence(); + atomic_thread_fence_seq_cst(); } else { /* * Writes are ordered by CLFLUSH on Intel CPUs. @@ -3061,7 +3061,7 @@ pmap_invalidate_cache(); else { if (useclflushopt) - sfence(); + atomic_thread_fence_seq_cst(); else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); for (i = 0; i < count; i++) { @@ -3075,7 +3075,7 @@ } } if (useclflushopt) - sfence(); + atomic_thread_fence_seq_cst(); else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); } @@ -3096,10 +3096,10 @@ if (pmap_kextract(sva) == lapic_paddr) return; - sfence(); + atomic_thread_fence_seq_cst(); for (; sva < eva; sva += cpu_clflush_line_size) clwb(sva); - sfence(); + atomic_thread_fence_seq_cst(); } void @@ -3132,7 +3132,7 @@ sched_pin(); pte_store(pte, spa | pte_bits); invlpg(vaddr); - /* XXXKIB sfences inside flush_cache_range are excessive */ + /* XXXKIB atomic inside flush_cache_range are excessive */ pmap_flush_cache_range(vaddr, vaddr + PAGE_SIZE); sched_unpin(); } @@ -8695,8 +8695,10 @@ oldpmap = PCPU_GET(curpmap); pmap = vmspace_pmap(td->td_proc->p_vmspace); - if (oldpmap == pmap) + if (oldpmap == pmap) { + mfence(); return; + } cpuid = PCPU_GET(cpuid); #ifdef SMP CPU_SET_ATOMIC(cpuid, &pmap->pm_active); @@ -9389,10 +9391,10 @@ } static void -pmap_large_map_wb_fence_sfence(void) +pmap_large_map_wb_fence_atomic(void) { - sfence(); + atomic_thread_fence_seq_cst(); } static void @@ -9407,7 +9409,7 @@ return (pmap_large_map_wb_fence_mfence); else if ((cpu_stdext_feature & (CPUID_STDEXT_CLWB | CPUID_STDEXT_CLFLUSHOPT)) == 0) - return (pmap_large_map_wb_fence_sfence); + return (pmap_large_map_wb_fence_atomic); else /* clflush is strongly enough ordered */ return (pmap_large_map_wb_fence_nop);