Index: head/sys/kern/subr_atomic64.c =================================================================== --- head/sys/kern/subr_atomic64.c +++ head/sys/kern/subr_atomic64.c @@ -55,9 +55,12 @@ }; #ifdef _KERNEL +#ifdef SMP + #define A64_POOL_SIZE MAXCPU /* Estimated size of a cacheline */ #define CACHE_ALIGN CACHE_LINE_SIZE +static struct mtx a64_mtx_pool[A64_POOL_SIZE]; #define GET_MUTEX(p) \ (&a64_mtx_pool[(pmap_kextract((vm_offset_t)p) / CACHE_ALIGN) % (A64_POOL_SIZE)]) @@ -68,6 +71,13 @@ #define UNLOCK_A64() if (smp_started) mtx_unlock(_amtx) +#else /* !SMP */ + +#define LOCK_A64() { register_t s = intr_disable() +#define UNLOCK_A64() intr_restore(s); } + +#endif /* SMP */ + #define ATOMIC64_EMU_UN(op, rt, block, ret) \ rt \ atomic_##op##_64(volatile u_int64_t *p) { \ @@ -86,8 +96,6 @@ UNLOCK_A64(); \ ret; } struct hack -static struct mtx a64_mtx_pool[A64_POOL_SIZE]; - ATOMIC64_EMU_BIN(add, void, (*p = *p + v), return); ATOMIC64_EMU_BIN(clear, void, *p &= ~v, return); ATOMIC64_EMU_BIN(fetchadd, u_int64_t, (*p = *p + v, v = *p - v), return (v)); @@ -126,6 +134,7 @@ return (tmp == tmp_old); } +#ifdef SMP static void atomic64_mtxinit(void *x __unused) { @@ -136,5 +145,6 @@ } SYSINIT(atomic64_mtxinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, atomic64_mtxinit, NULL); +#endif /* SMP */ #endif /* _KERNEL */