diff --git a/cmd/zed/Makefile.am b/cmd/zed/Makefile.am index 7d2fe124fb67..884760415f53 100644 --- a/cmd/zed/Makefile.am +++ b/cmd/zed/Makefile.am @@ -1,51 +1,51 @@ include $(top_srcdir)/config/Rules.am AM_CFLAGS += $(LIBUDEV_CFLAGS) $(LIBUUID_CFLAGS) SUBDIRS = zed.d sbin_PROGRAMS = zed ZED_SRC = \ zed.c \ zed.h \ zed_conf.c \ zed_conf.h \ zed_disk_event.c \ zed_disk_event.h \ zed_event.c \ zed_event.h \ zed_exec.c \ zed_exec.h \ zed_file.c \ zed_file.h \ zed_log.c \ zed_log.h \ zed_strings.c \ zed_strings.h FMA_SRC = \ agents/zfs_agents.c \ agents/zfs_agents.h \ agents/zfs_diagnosis.c \ agents/zfs_mod.c \ agents/zfs_retire.c \ agents/fmd_api.c \ agents/fmd_api.h \ agents/fmd_serd.c \ agents/fmd_serd.h zed_SOURCES = $(ZED_SRC) $(FMA_SRC) zed_LDADD = \ $(abs_top_builddir)/lib/libzfs/libzfs.la \ $(abs_top_builddir)/lib/libzfs_core/libzfs_core.la \ $(abs_top_builddir)/lib/libnvpair/libnvpair.la \ $(abs_top_builddir)/lib/libuutil/libuutil.la -zed_LDADD += -lrt $(LIBUDEV_LIBS) $(LIBUUID_LIBS) +zed_LDADD += -lrt $(LIBATOMIC_LIBS) $(LIBUDEV_LIBS) $(LIBUUID_LIBS) zed_LDFLAGS = -pthread EXTRA_DIST = agents/README.md include $(top_srcdir)/config/CppCheck.am diff --git a/config/user-libatomic.m4 b/config/user-libatomic.m4 new file mode 100644 index 000000000000..14a60bbea9d0 --- /dev/null +++ b/config/user-libatomic.m4 @@ -0,0 +1,34 @@ +dnl # +dnl # If -latomic exists, it's needed for __atomic intrinsics. +dnl # +dnl # Some systems (like FreeBSD 13) don't have a libatomic at all because +dnl # their toolchain doesn't ship it – they obviously don't need it. +dnl # +dnl # Others (like sufficiently ancient CentOS) have one, +dnl # but terminally broken or unlinkable (e.g. it's a dangling symlink, +dnl # or a linker script that points to a nonexistent file) – +dnl # most arches affected by this don't actually need -latomic (and if they do, +dnl # then they should have libatomic that actually exists and links, +dnl # so don't fall into this category). +dnl # +dnl # Technically, we could check if the platform *actually* needs -latomic, +dnl # or if it has native support for all the intrinsics we use, +dnl # but it /really/ doesn't matter, and C11 recommends to always link it. +dnl # +AC_DEFUN([ZFS_AC_CONFIG_USER_LIBATOMIC], [ + AC_MSG_CHECKING([whether -latomic is present]) + + saved_libs="$LIBS" + LIBS="$LIBS -latomic" + + AC_LINK_IFELSE([AC_LANG_PROGRAM([], [])], [ + LIBATOMIC_LIBS="-latomic" + AC_MSG_RESULT([yes]) + ], [ + LIBATOMIC_LIBS="" + AC_MSG_RESULT([no]) + ]) + + LIBS="$saved_libs" + AC_SUBST([LIBATOMIC_LIBS]) +]) diff --git a/config/user.m4 b/config/user.m4 index c220675514e6..e799faffb61c 100644 --- a/config/user.m4 +++ b/config/user.m4 @@ -1,45 +1,46 @@ dnl # dnl # Default ZFS user configuration dnl # AC_DEFUN([ZFS_AC_CONFIG_USER], [ ZFS_AC_CONFIG_USER_GETTEXT ZFS_AC_CONFIG_USER_MOUNT_HELPER ZFS_AC_CONFIG_USER_SYSVINIT ZFS_AC_CONFIG_USER_DRACUT AM_COND_IF([BUILD_FREEBSD], [ PKG_INSTALLDIR(['${prefix}/libdata/pkgconfig'])], [ PKG_INSTALLDIR ]) ZFS_AC_CONFIG_USER_ZLIB AM_COND_IF([BUILD_LINUX], [ ZFS_AC_CONFIG_USER_UDEV ZFS_AC_CONFIG_USER_SYSTEMD ZFS_AC_CONFIG_USER_LIBUUID ZFS_AC_CONFIG_USER_LIBBLKID ]) ZFS_AC_CONFIG_USER_LIBTIRPC ZFS_AC_CONFIG_USER_LIBUDEV ZFS_AC_CONFIG_USER_LIBCRYPTO ZFS_AC_CONFIG_USER_LIBAIO + ZFS_AC_CONFIG_USER_LIBATOMIC ZFS_AC_CONFIG_USER_CLOCK_GETTIME ZFS_AC_CONFIG_USER_PAM ZFS_AC_CONFIG_USER_RUNSTATEDIR ZFS_AC_CONFIG_USER_MAKEDEV_IN_SYSMACROS ZFS_AC_CONFIG_USER_MAKEDEV_IN_MKDEV ZFS_AC_CONFIG_USER_ZFSEXEC ZFS_AC_TEST_FRAMEWORK AC_CHECK_FUNCS([issetugid mlockall strlcat strlcpy]) ]) dnl # dnl # Setup the environment for the ZFS Test Suite. Currently only dnl # Linux style systems are supported but this infrastructure can dnl # be extended to support other platforms if needed. dnl # AC_DEFUN([ZFS_AC_TEST_FRAMEWORK], [ ZONENAME="echo global" AC_SUBST(ZONENAME) AC_SUBST(RM) ]) diff --git a/lib/libspl/Makefile.am b/lib/libspl/Makefile.am index fbde2b57f08b..8457df6dcdf4 100644 --- a/lib/libspl/Makefile.am +++ b/lib/libspl/Makefile.am @@ -1,68 +1,47 @@ include $(top_srcdir)/config/Rules.am -if TARGET_CPU_I386 -TARGET_CPU_ATOMIC_SOURCE = asm-i386/atomic.S -else -if TARGET_CPU_X86_64 -TARGET_CPU_ATOMIC_SOURCE = asm-x86_64/atomic.S -else -TARGET_CPU_ATOMIC_SOURCE = asm-generic/atomic.c -endif -endif - SUBDIRS = include -AM_CCASFLAGS = \ - $(CFLAGS) - noinst_LTLIBRARIES = libspl_assert.la libspl.la libspl_assert_la_SOURCES = \ assert.c USER_C = \ libspl_impl.h \ + atomic.c \ getexecname.c \ list.c \ mkdirp.c \ page.c \ strlcat.c \ strlcpy.c \ timestamp.c \ include/sys/list.h \ include/sys/list_impl.h if BUILD_LINUX USER_C += \ os/linux/getexecname.c \ os/linux/gethostid.c \ os/linux/getmntany.c \ os/linux/zone.c endif if BUILD_FREEBSD USER_C += \ os/freebsd/getexecname.c \ os/freebsd/gethostid.c \ os/freebsd/getmntany.c \ os/freebsd/mnttab.c \ os/freebsd/zone.c endif -libspl_la_SOURCES = \ - $(USER_C) \ - $(TARGET_CPU_ATOMIC_SOURCE) +libspl_la_SOURCES = $(USER_C) libspl_la_LIBADD = \ libspl_assert.la -libspl_la_LIBADD += $(LIBCLOCK_GETTIME) +libspl_la_LIBADD += $(LIBATOMIC_LIBS) $(LIBCLOCK_GETTIME) include $(top_srcdir)/config/CppCheck.am - -# Override the default SOURCES which includes TARGET_CPU_ATOMIC_SOURCE -# in order to always evaluate the generic asm-generic/atomic.c source. -CPPCHECKSRC = $(USER_C) asm-generic/atomic.c -cppcheck: - $(CPPCHECK) -j$(CPU_COUNT) $(CPPCHECKFLAGS) --force \ - $(DEFAULT_INCLUDES) $(CPPCHECKSRC) diff --git a/lib/libspl/asm-generic/.gitignore b/lib/libspl/asm-generic/.gitignore deleted file mode 100644 index 2792cf7b4551..000000000000 --- a/lib/libspl/asm-generic/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/atomic.S diff --git a/lib/libspl/asm-i386/atomic.S b/lib/libspl/asm-i386/atomic.S deleted file mode 100644 index 7a574b0d1729..000000000000 --- a/lib/libspl/asm-i386/atomic.S +++ /dev/null @@ -1,840 +0,0 @@ -/* - * CDDL HEADER START - * - * The contents of this file are subject to the terms of the - * Common Development and Distribution License (the "License"). - * You may not use this file except in compliance with the License. - * - * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE - * or http://www.opensolaris.org/os/licensing. - * See the License for the specific language governing permissions - * and limitations under the License. - * - * When distributing Covered Code, include this CDDL HEADER in each - * file and include the License file at usr/src/OPENSOLARIS.LICENSE. - * If applicable, add the following below this CDDL HEADER, with the - * fields enclosed by brackets "[]" replaced with your own identifying - * information: Portions Copyright [yyyy] [name of copyright owner] - * - * CDDL HEADER END - */ -/* - * Copyright 2007 Sun Microsystems, Inc. All rights reserved. - * Use is subject to license terms. - */ - - .ident "%Z%%M% %I% %E% SMI" - - .file "%M%" - -#define _ASM -#ifdef __linux__ -#include -#elif __FreeBSD__ -#include -#define SET_SIZE(x) -#endif - ENTRY(atomic_inc_8) - ALTENTRY(atomic_inc_uchar) - movl 4(%esp), %eax - lock - incb (%eax) - ret - SET_SIZE(atomic_inc_uchar) - SET_SIZE(atomic_inc_8) - - ENTRY(atomic_inc_16) - ALTENTRY(atomic_inc_ushort) - movl 4(%esp), %eax - lock - incw (%eax) - ret - SET_SIZE(atomic_inc_ushort) - SET_SIZE(atomic_inc_16) - - ENTRY(atomic_inc_32) - ALTENTRY(atomic_inc_uint) - ALTENTRY(atomic_inc_ulong) - movl 4(%esp), %eax - lock - incl (%eax) - ret - SET_SIZE(atomic_inc_ulong) - SET_SIZE(atomic_inc_uint) - SET_SIZE(atomic_inc_32) - - ENTRY(atomic_inc_8_nv) - ALTENTRY(atomic_inc_uchar_nv) - movl 4(%esp), %edx - movb (%edx), %al -1: - leal 1(%eax), %ecx - lock - cmpxchgb %cl, (%edx) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_inc_uchar_nv) - SET_SIZE(atomic_inc_8_nv) - - ENTRY(atomic_inc_16_nv) - ALTENTRY(atomic_inc_ushort_nv) - movl 4(%esp), %edx - movw (%edx), %ax -1: - leal 1(%eax), %ecx - lock - cmpxchgw %cx, (%edx) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_inc_ushort_nv) - SET_SIZE(atomic_inc_16_nv) - - ENTRY(atomic_inc_32_nv) - ALTENTRY(atomic_inc_uint_nv) - ALTENTRY(atomic_inc_ulong_nv) - movl 4(%esp), %edx - movl (%edx), %eax -1: - leal 1(%eax), %ecx - lock - cmpxchgl %ecx, (%edx) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_inc_ulong_nv) - SET_SIZE(atomic_inc_uint_nv) - SET_SIZE(atomic_inc_32_nv) - - /* - * NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever - * separated, you need to also edit the libc i386 platform - * specific mapfile and remove the NODYNSORT attribute - * from atomic_inc_64_nv. - */ - ENTRY(atomic_inc_64) - ALTENTRY(atomic_inc_64_nv) - pushl %edi - pushl %ebx - movl 12(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %edx -1: - xorl %ebx, %ebx - xorl %ecx, %ecx - incl %ebx - addl %eax, %ebx - adcl %edx, %ecx - lock - cmpxchg8b (%edi) - jne 1b - movl %ebx, %eax - movl %ecx, %edx - popl %ebx - popl %edi - ret - SET_SIZE(atomic_inc_64_nv) - SET_SIZE(atomic_inc_64) - - ENTRY(atomic_dec_8) - ALTENTRY(atomic_dec_uchar) - movl 4(%esp), %eax - lock - decb (%eax) - ret - SET_SIZE(atomic_dec_uchar) - SET_SIZE(atomic_dec_8) - - ENTRY(atomic_dec_16) - ALTENTRY(atomic_dec_ushort) - movl 4(%esp), %eax - lock - decw (%eax) - ret - SET_SIZE(atomic_dec_ushort) - SET_SIZE(atomic_dec_16) - - ENTRY(atomic_dec_32) - ALTENTRY(atomic_dec_uint) - ALTENTRY(atomic_dec_ulong) - movl 4(%esp), %eax - lock - decl (%eax) - ret - SET_SIZE(atomic_dec_ulong) - SET_SIZE(atomic_dec_uint) - SET_SIZE(atomic_dec_32) - - ENTRY(atomic_dec_8_nv) - ALTENTRY(atomic_dec_uchar_nv) - movl 4(%esp), %edx - movb (%edx), %al -1: - leal -1(%eax), %ecx - lock - cmpxchgb %cl, (%edx) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_dec_uchar_nv) - SET_SIZE(atomic_dec_8_nv) - - ENTRY(atomic_dec_16_nv) - ALTENTRY(atomic_dec_ushort_nv) - movl 4(%esp), %edx - movw (%edx), %ax -1: - leal -1(%eax), %ecx - lock - cmpxchgw %cx, (%edx) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_dec_ushort_nv) - SET_SIZE(atomic_dec_16_nv) - - ENTRY(atomic_dec_32_nv) - ALTENTRY(atomic_dec_uint_nv) - ALTENTRY(atomic_dec_ulong_nv) - movl 4(%esp), %edx - movl (%edx), %eax -1: - leal -1(%eax), %ecx - lock - cmpxchgl %ecx, (%edx) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_dec_ulong_nv) - SET_SIZE(atomic_dec_uint_nv) - SET_SIZE(atomic_dec_32_nv) - - /* - * NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever - * separated, it is important to edit the libc i386 platform - * specific mapfile and remove the NODYNSORT attribute - * from atomic_dec_64_nv. - */ - ENTRY(atomic_dec_64) - ALTENTRY(atomic_dec_64_nv) - pushl %edi - pushl %ebx - movl 12(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %edx -1: - xorl %ebx, %ebx - xorl %ecx, %ecx - not %ecx - not %ebx - addl %eax, %ebx - adcl %edx, %ecx - lock - cmpxchg8b (%edi) - jne 1b - movl %ebx, %eax - movl %ecx, %edx - popl %ebx - popl %edi - ret - SET_SIZE(atomic_dec_64_nv) - SET_SIZE(atomic_dec_64) - - ENTRY(atomic_add_8) - ALTENTRY(atomic_add_char) - movl 4(%esp), %eax - movl 8(%esp), %ecx - lock - addb %cl, (%eax) - ret - SET_SIZE(atomic_add_char) - SET_SIZE(atomic_add_8) - - ENTRY(atomic_add_16) - ALTENTRY(atomic_add_short) - movl 4(%esp), %eax - movl 8(%esp), %ecx - lock - addw %cx, (%eax) - ret - SET_SIZE(atomic_add_short) - SET_SIZE(atomic_add_16) - - ENTRY(atomic_add_32) - ALTENTRY(atomic_add_int) - ALTENTRY(atomic_add_ptr) - ALTENTRY(atomic_add_long) - movl 4(%esp), %eax - movl 8(%esp), %ecx - lock - addl %ecx, (%eax) - ret - SET_SIZE(atomic_add_long) - SET_SIZE(atomic_add_ptr) - SET_SIZE(atomic_add_int) - SET_SIZE(atomic_add_32) - - ENTRY(atomic_sub_8) - ALTENTRY(atomic_sub_char) - movl 4(%esp), %eax - movl 8(%esp), %ecx - lock - subb %cl, (%eax) - ret - SET_SIZE(atomic_sub_char) - SET_SIZE(atomic_sub_8) - - ENTRY(atomic_sub_16) - ALTENTRY(atomic_sub_short) - movl 4(%esp), %eax - movl 8(%esp), %ecx - lock - subw %cx, (%eax) - ret - SET_SIZE(atomic_sub_short) - SET_SIZE(atomic_sub_16) - - ENTRY(atomic_sub_32) - ALTENTRY(atomic_sub_int) - ALTENTRY(atomic_sub_ptr) - ALTENTRY(atomic_sub_long) - movl 4(%esp), %eax - movl 8(%esp), %ecx - lock - subl %ecx, (%eax) - ret - SET_SIZE(atomic_sub_long) - SET_SIZE(atomic_sub_ptr) - SET_SIZE(atomic_sub_int) - SET_SIZE(atomic_sub_32) - - ENTRY(atomic_or_8) - ALTENTRY(atomic_or_uchar) - movl 4(%esp), %eax - movb 8(%esp), %cl - lock - orb %cl, (%eax) - ret - SET_SIZE(atomic_or_uchar) - SET_SIZE(atomic_or_8) - - ENTRY(atomic_or_16) - ALTENTRY(atomic_or_ushort) - movl 4(%esp), %eax - movw 8(%esp), %cx - lock - orw %cx, (%eax) - ret - SET_SIZE(atomic_or_ushort) - SET_SIZE(atomic_or_16) - - ENTRY(atomic_or_32) - ALTENTRY(atomic_or_uint) - ALTENTRY(atomic_or_ulong) - movl 4(%esp), %eax - movl 8(%esp), %ecx - lock - orl %ecx, (%eax) - ret - SET_SIZE(atomic_or_ulong) - SET_SIZE(atomic_or_uint) - SET_SIZE(atomic_or_32) - - ENTRY(atomic_and_8) - ALTENTRY(atomic_and_uchar) - movl 4(%esp), %eax - movb 8(%esp), %cl - lock - andb %cl, (%eax) - ret - SET_SIZE(atomic_and_uchar) - SET_SIZE(atomic_and_8) - - ENTRY(atomic_and_16) - ALTENTRY(atomic_and_ushort) - movl 4(%esp), %eax - movw 8(%esp), %cx - lock - andw %cx, (%eax) - ret - SET_SIZE(atomic_and_ushort) - SET_SIZE(atomic_and_16) - - ENTRY(atomic_and_32) - ALTENTRY(atomic_and_uint) - ALTENTRY(atomic_and_ulong) - movl 4(%esp), %eax - movl 8(%esp), %ecx - lock - andl %ecx, (%eax) - ret - SET_SIZE(atomic_and_ulong) - SET_SIZE(atomic_and_uint) - SET_SIZE(atomic_and_32) - - ENTRY(atomic_add_8_nv) - ALTENTRY(atomic_add_char_nv) - movl 4(%esp), %edx - movb (%edx), %al -1: - movl 8(%esp), %ecx - addb %al, %cl - lock - cmpxchgb %cl, (%edx) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_add_char_nv) - SET_SIZE(atomic_add_8_nv) - - ENTRY(atomic_add_16_nv) - ALTENTRY(atomic_add_short_nv) - movl 4(%esp), %edx - movw (%edx), %ax -1: - movl 8(%esp), %ecx - addw %ax, %cx - lock - cmpxchgw %cx, (%edx) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_add_short_nv) - SET_SIZE(atomic_add_16_nv) - - ENTRY(atomic_add_32_nv) - ALTENTRY(atomic_add_int_nv) - ALTENTRY(atomic_add_ptr_nv) - ALTENTRY(atomic_add_long_nv) - movl 4(%esp), %edx - movl (%edx), %eax -1: - movl 8(%esp), %ecx - addl %eax, %ecx - lock - cmpxchgl %ecx, (%edx) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_add_long_nv) - SET_SIZE(atomic_add_ptr_nv) - SET_SIZE(atomic_add_int_nv) - SET_SIZE(atomic_add_32_nv) - - ENTRY(atomic_sub_8_nv) - ALTENTRY(atomic_sub_char_nv) - movl 4(%esp), %edx - movb (%edx), %al -1: - movl 8(%esp), %ecx - subb %al, %cl - lock - cmpxchgb %cl, (%edx) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_sub_char_nv) - SET_SIZE(atomic_sub_8_nv) - - ENTRY(atomic_sub_16_nv) - ALTENTRY(atomic_sub_short_nv) - movl 4(%esp), %edx - movw (%edx), %ax -1: - movl 8(%esp), %ecx - subw %ax, %cx - lock - cmpxchgw %cx, (%edx) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_sub_short_nv) - SET_SIZE(atomic_sub_16_nv) - - ENTRY(atomic_sub_32_nv) - ALTENTRY(atomic_sub_int_nv) - ALTENTRY(atomic_sub_ptr_nv) - ALTENTRY(atomic_sub_long_nv) - movl 4(%esp), %edx - movl (%edx), %eax -1: - movl 8(%esp), %ecx - subl %eax, %ecx - lock - cmpxchgl %ecx, (%edx) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_sub_long_nv) - SET_SIZE(atomic_sub_ptr_nv) - SET_SIZE(atomic_sub_int_nv) - SET_SIZE(atomic_sub_32_nv) - - /* - * NOTE: If atomic_add_64 and atomic_add_64_nv are ever - * separated, it is important to edit the libc i386 platform - * specific mapfile and remove the NODYNSORT attribute - * from atomic_add_64_nv. - */ - ENTRY(atomic_add_64) - ALTENTRY(atomic_add_64_nv) - pushl %edi - pushl %ebx - movl 12(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %edx -1: - movl 16(%esp), %ebx - movl 20(%esp), %ecx - addl %eax, %ebx - adcl %edx, %ecx - lock - cmpxchg8b (%edi) - jne 1b - movl %ebx, %eax - movl %ecx, %edx - popl %ebx - popl %edi - ret - SET_SIZE(atomic_add_64_nv) - SET_SIZE(atomic_add_64) - - ENTRY(atomic_sub_64) - ALTENTRY(atomic_sub_64_nv) - pushl %edi - pushl %ebx - movl 12(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %edx -1: - movl 16(%esp), %ebx - movl 20(%esp), %ecx - subl %eax, %ebx - sbbl %edx, %ecx - lock - cmpxchg8b (%edi) - jne 1b - movl %ebx, %eax - movl %ecx, %edx - popl %ebx - popl %edi - ret - SET_SIZE(atomic_sub_64_nv) - SET_SIZE(atomic_sub_64) - - ENTRY(atomic_or_8_nv) - ALTENTRY(atomic_or_uchar_nv) - movl 4(%esp), %edx - movb (%edx), %al -1: - movl 8(%esp), %ecx - orb %al, %cl - lock - cmpxchgb %cl, (%edx) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_or_uchar_nv) - SET_SIZE(atomic_or_8_nv) - - ENTRY(atomic_or_16_nv) - ALTENTRY(atomic_or_ushort_nv) - movl 4(%esp), %edx - movw (%edx), %ax -1: - movl 8(%esp), %ecx - orw %ax, %cx - lock - cmpxchgw %cx, (%edx) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_or_ushort_nv) - SET_SIZE(atomic_or_16_nv) - - ENTRY(atomic_or_32_nv) - ALTENTRY(atomic_or_uint_nv) - ALTENTRY(atomic_or_ulong_nv) - movl 4(%esp), %edx - movl (%edx), %eax -1: - movl 8(%esp), %ecx - orl %eax, %ecx - lock - cmpxchgl %ecx, (%edx) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_or_ulong_nv) - SET_SIZE(atomic_or_uint_nv) - SET_SIZE(atomic_or_32_nv) - - /* - * NOTE: If atomic_or_64 and atomic_or_64_nv are ever - * separated, it is important to edit the libc i386 platform - * specific mapfile and remove the NODYNSORT attribute - * from atomic_or_64_nv. - */ - ENTRY(atomic_or_64) - ALTENTRY(atomic_or_64_nv) - pushl %edi - pushl %ebx - movl 12(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %edx -1: - movl 16(%esp), %ebx - movl 20(%esp), %ecx - orl %eax, %ebx - orl %edx, %ecx - lock - cmpxchg8b (%edi) - jne 1b - movl %ebx, %eax - movl %ecx, %edx - popl %ebx - popl %edi - ret - SET_SIZE(atomic_or_64_nv) - SET_SIZE(atomic_or_64) - - ENTRY(atomic_and_8_nv) - ALTENTRY(atomic_and_uchar_nv) - movl 4(%esp), %edx - movb (%edx), %al -1: - movl 8(%esp), %ecx - andb %al, %cl - lock - cmpxchgb %cl, (%edx) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_and_uchar_nv) - SET_SIZE(atomic_and_8_nv) - - ENTRY(atomic_and_16_nv) - ALTENTRY(atomic_and_ushort_nv) - movl 4(%esp), %edx - movw (%edx), %ax -1: - movl 8(%esp), %ecx - andw %ax, %cx - lock - cmpxchgw %cx, (%edx) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_and_ushort_nv) - SET_SIZE(atomic_and_16_nv) - - ENTRY(atomic_and_32_nv) - ALTENTRY(atomic_and_uint_nv) - ALTENTRY(atomic_and_ulong_nv) - movl 4(%esp), %edx - movl (%edx), %eax -1: - movl 8(%esp), %ecx - andl %eax, %ecx - lock - cmpxchgl %ecx, (%edx) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_and_ulong_nv) - SET_SIZE(atomic_and_uint_nv) - SET_SIZE(atomic_and_32_nv) - - /* - * NOTE: If atomic_and_64 and atomic_and_64_nv are ever - * separated, it is important to edit the libc i386 platform - * specific mapfile and remove the NODYNSORT attribute - * from atomic_and_64_nv. - */ - ENTRY(atomic_and_64) - ALTENTRY(atomic_and_64_nv) - pushl %edi - pushl %ebx - movl 12(%esp), %edi - movl (%edi), %eax - movl 4(%edi), %edx -1: - movl 16(%esp), %ebx - movl 20(%esp), %ecx - andl %eax, %ebx - andl %edx, %ecx - lock - cmpxchg8b (%edi) - jne 1b - movl %ebx, %eax - movl %ecx, %edx - popl %ebx - popl %edi - ret - SET_SIZE(atomic_and_64_nv) - SET_SIZE(atomic_and_64) - - ENTRY(atomic_cas_8) - ALTENTRY(atomic_cas_uchar) - movl 4(%esp), %edx - movzbl 8(%esp), %eax - movb 12(%esp), %cl - lock - cmpxchgb %cl, (%edx) - ret - SET_SIZE(atomic_cas_uchar) - SET_SIZE(atomic_cas_8) - - ENTRY(atomic_cas_16) - ALTENTRY(atomic_cas_ushort) - movl 4(%esp), %edx - movzwl 8(%esp), %eax - movw 12(%esp), %cx - lock - cmpxchgw %cx, (%edx) - ret - SET_SIZE(atomic_cas_ushort) - SET_SIZE(atomic_cas_16) - - ENTRY(atomic_cas_32) - ALTENTRY(atomic_cas_uint) - ALTENTRY(atomic_cas_ulong) - ALTENTRY(atomic_cas_ptr) - movl 4(%esp), %edx - movl 8(%esp), %eax - movl 12(%esp), %ecx - lock - cmpxchgl %ecx, (%edx) - ret - SET_SIZE(atomic_cas_ptr) - SET_SIZE(atomic_cas_ulong) - SET_SIZE(atomic_cas_uint) - SET_SIZE(atomic_cas_32) - - ENTRY(atomic_cas_64) - pushl %ebx - pushl %esi - movl 12(%esp), %esi - movl 16(%esp), %eax - movl 20(%esp), %edx - movl 24(%esp), %ebx - movl 28(%esp), %ecx - lock - cmpxchg8b (%esi) - popl %esi - popl %ebx - ret - SET_SIZE(atomic_cas_64) - - ENTRY(atomic_swap_8) - ALTENTRY(atomic_swap_uchar) - movl 4(%esp), %edx - movzbl 8(%esp), %eax - lock - xchgb %al, (%edx) - ret - SET_SIZE(atomic_swap_uchar) - SET_SIZE(atomic_swap_8) - - ENTRY(atomic_swap_16) - ALTENTRY(atomic_swap_ushort) - movl 4(%esp), %edx - movzwl 8(%esp), %eax - lock - xchgw %ax, (%edx) - ret - SET_SIZE(atomic_swap_ushort) - SET_SIZE(atomic_swap_16) - - ENTRY(atomic_swap_32) - ALTENTRY(atomic_swap_uint) - ALTENTRY(atomic_swap_ptr) - ALTENTRY(atomic_swap_ulong) - movl 4(%esp), %edx - movl 8(%esp), %eax - lock - xchgl %eax, (%edx) - ret - SET_SIZE(atomic_swap_ulong) - SET_SIZE(atomic_swap_ptr) - SET_SIZE(atomic_swap_uint) - SET_SIZE(atomic_swap_32) - - ENTRY(atomic_swap_64) - pushl %esi - pushl %ebx - movl 12(%esp), %esi - movl 16(%esp), %ebx - movl 20(%esp), %ecx - movl (%esi), %eax - movl 4(%esi), %edx -1: - lock - cmpxchg8b (%esi) - jne 1b - popl %ebx - popl %esi - ret - SET_SIZE(atomic_swap_64) - - ENTRY(atomic_set_long_excl) - movl 4(%esp), %edx - movl 8(%esp), %ecx - xorl %eax, %eax - lock - btsl %ecx, (%edx) - jnc 1f - decl %eax -1: - ret - SET_SIZE(atomic_set_long_excl) - - ENTRY(atomic_clear_long_excl) - movl 4(%esp), %edx - movl 8(%esp), %ecx - xorl %eax, %eax - lock - btrl %ecx, (%edx) - jc 1f - decl %eax -1: - ret - SET_SIZE(atomic_clear_long_excl) - - /* - * NOTE: membar_enter, membar_exit, membar_producer, and - * membar_consumer are all identical routines. We define them - * separately, instead of using ALTENTRY definitions to alias them - * together, so that DTrace and debuggers will see a unique address - * for them, allowing more accurate tracing. - */ - - - ENTRY(membar_enter) - lock - xorl $0, (%esp) - ret - SET_SIZE(membar_enter) - - ENTRY(membar_exit) - lock - xorl $0, (%esp) - ret - SET_SIZE(membar_exit) - - ENTRY(membar_producer) - lock - xorl $0, (%esp) - ret - SET_SIZE(membar_producer) - - ENTRY(membar_consumer) - lock - xorl $0, (%esp) - ret - SET_SIZE(membar_consumer) - -#ifdef __ELF__ -.section .note.GNU-stack,"",%progbits -#endif diff --git a/lib/libspl/asm-x86_64/atomic.S b/lib/libspl/asm-x86_64/atomic.S deleted file mode 100644 index 50edfe878cc9..000000000000 --- a/lib/libspl/asm-x86_64/atomic.S +++ /dev/null @@ -1,691 +0,0 @@ -/* - * CDDL HEADER START - * - * The contents of this file are subject to the terms of the - * Common Development and Distribution License (the "License"). - * You may not use this file except in compliance with the License. - * - * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE - * or http://www.opensolaris.org/os/licensing. - * See the License for the specific language governing permissions - * and limitations under the License. - * - * When distributing Covered Code, include this CDDL HEADER in each - * file and include the License file at usr/src/OPENSOLARIS.LICENSE. - * If applicable, add the following below this CDDL HEADER, with the - * fields enclosed by brackets "[]" replaced with your own identifying - * information: Portions Copyright [yyyy] [name of copyright owner] - * - * CDDL HEADER END - */ -/* - * Copyright 2007 Sun Microsystems, Inc. All rights reserved. - * Use is subject to license terms. - */ - - .ident "%Z%%M% %I% %E% SMI" - - .file "%M%" - -#define _ASM -#ifdef __linux__ -#include -#elif __FreeBSD__ -#include -#define SET_SIZE(x) -#endif - ENTRY(atomic_inc_8) - ALTENTRY(atomic_inc_uchar) - lock - incb (%rdi) - ret - SET_SIZE(atomic_inc_uchar) - SET_SIZE(atomic_inc_8) - - ENTRY(atomic_inc_16) - ALTENTRY(atomic_inc_ushort) - lock - incw (%rdi) - ret - SET_SIZE(atomic_inc_ushort) - SET_SIZE(atomic_inc_16) - - ENTRY(atomic_inc_32) - ALTENTRY(atomic_inc_uint) - lock - incl (%rdi) - ret - SET_SIZE(atomic_inc_uint) - SET_SIZE(atomic_inc_32) - - ENTRY(atomic_inc_64) - ALTENTRY(atomic_inc_ulong) - lock - incq (%rdi) - ret - SET_SIZE(atomic_inc_ulong) - SET_SIZE(atomic_inc_64) - - ENTRY(atomic_inc_8_nv) - ALTENTRY(atomic_inc_uchar_nv) - movb (%rdi), %al -1: - leaq 1(%rax), %rcx - lock - cmpxchgb %cl, (%rdi) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_inc_uchar_nv) - SET_SIZE(atomic_inc_8_nv) - - ENTRY(atomic_inc_16_nv) - ALTENTRY(atomic_inc_ushort_nv) - movw (%rdi), %ax -1: - leaq 1(%rax), %rcx - lock - cmpxchgw %cx, (%rdi) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_inc_ushort_nv) - SET_SIZE(atomic_inc_16_nv) - - ENTRY(atomic_inc_32_nv) - ALTENTRY(atomic_inc_uint_nv) - movl (%rdi), %eax -1: - leaq 1(%rax), %rcx - lock - cmpxchgl %ecx, (%rdi) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_inc_uint_nv) - SET_SIZE(atomic_inc_32_nv) - - ENTRY(atomic_inc_64_nv) - ALTENTRY(atomic_inc_ulong_nv) - movq (%rdi), %rax -1: - leaq 1(%rax), %rcx - lock - cmpxchgq %rcx, (%rdi) - jne 1b - movq %rcx, %rax - ret - SET_SIZE(atomic_inc_ulong_nv) - SET_SIZE(atomic_inc_64_nv) - - ENTRY(atomic_dec_8) - ALTENTRY(atomic_dec_uchar) - lock - decb (%rdi) - ret - SET_SIZE(atomic_dec_uchar) - SET_SIZE(atomic_dec_8) - - ENTRY(atomic_dec_16) - ALTENTRY(atomic_dec_ushort) - lock - decw (%rdi) - ret - SET_SIZE(atomic_dec_ushort) - SET_SIZE(atomic_dec_16) - - ENTRY(atomic_dec_32) - ALTENTRY(atomic_dec_uint) - lock - decl (%rdi) - ret - SET_SIZE(atomic_dec_uint) - SET_SIZE(atomic_dec_32) - - ENTRY(atomic_dec_64) - ALTENTRY(atomic_dec_ulong) - lock - decq (%rdi) - ret - SET_SIZE(atomic_dec_ulong) - SET_SIZE(atomic_dec_64) - - ENTRY(atomic_dec_8_nv) - ALTENTRY(atomic_dec_uchar_nv) - movb (%rdi), %al -1: - leaq -1(%rax), %rcx - lock - cmpxchgb %cl, (%rdi) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_dec_uchar_nv) - SET_SIZE(atomic_dec_8_nv) - - ENTRY(atomic_dec_16_nv) - ALTENTRY(atomic_dec_ushort_nv) - movw (%rdi), %ax -1: - leaq -1(%rax), %rcx - lock - cmpxchgw %cx, (%rdi) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_dec_ushort_nv) - SET_SIZE(atomic_dec_16_nv) - - ENTRY(atomic_dec_32_nv) - ALTENTRY(atomic_dec_uint_nv) - movl (%rdi), %eax -1: - leaq -1(%rax), %rcx - lock - cmpxchgl %ecx, (%rdi) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_dec_uint_nv) - SET_SIZE(atomic_dec_32_nv) - - ENTRY(atomic_dec_64_nv) - ALTENTRY(atomic_dec_ulong_nv) - movq (%rdi), %rax -1: - leaq -1(%rax), %rcx - lock - cmpxchgq %rcx, (%rdi) - jne 1b - movq %rcx, %rax - ret - SET_SIZE(atomic_dec_ulong_nv) - SET_SIZE(atomic_dec_64_nv) - - ENTRY(atomic_add_8) - ALTENTRY(atomic_add_char) - lock - addb %sil, (%rdi) - ret - SET_SIZE(atomic_add_char) - SET_SIZE(atomic_add_8) - - ENTRY(atomic_add_16) - ALTENTRY(atomic_add_short) - lock - addw %si, (%rdi) - ret - SET_SIZE(atomic_add_short) - SET_SIZE(atomic_add_16) - - ENTRY(atomic_add_32) - ALTENTRY(atomic_add_int) - lock - addl %esi, (%rdi) - ret - SET_SIZE(atomic_add_int) - SET_SIZE(atomic_add_32) - - ENTRY(atomic_add_64) - ALTENTRY(atomic_add_ptr) - ALTENTRY(atomic_add_long) - lock - addq %rsi, (%rdi) - ret - SET_SIZE(atomic_add_long) - SET_SIZE(atomic_add_ptr) - SET_SIZE(atomic_add_64) - - ENTRY(atomic_sub_8) - ALTENTRY(atomic_sub_char) - lock - subb %sil, (%rdi) - ret - SET_SIZE(atomic_sub_char) - SET_SIZE(atomic_sub_8) - - ENTRY(atomic_sub_16) - ALTENTRY(atomic_sub_short) - lock - subw %si, (%rdi) - ret - SET_SIZE(atomic_sub_short) - SET_SIZE(atomic_sub_16) - - ENTRY(atomic_sub_32) - ALTENTRY(atomic_sub_int) - lock - subl %esi, (%rdi) - ret - SET_SIZE(atomic_sub_int) - SET_SIZE(atomic_sub_32) - - ENTRY(atomic_sub_64) - ALTENTRY(atomic_sub_ptr) - ALTENTRY(atomic_sub_long) - lock - subq %rsi, (%rdi) - ret - SET_SIZE(atomic_sub_long) - SET_SIZE(atomic_sub_ptr) - SET_SIZE(atomic_sub_64) - - ENTRY(atomic_or_8) - ALTENTRY(atomic_or_uchar) - lock - orb %sil, (%rdi) - ret - SET_SIZE(atomic_or_uchar) - SET_SIZE(atomic_or_8) - - ENTRY(atomic_or_16) - ALTENTRY(atomic_or_ushort) - lock - orw %si, (%rdi) - ret - SET_SIZE(atomic_or_ushort) - SET_SIZE(atomic_or_16) - - ENTRY(atomic_or_32) - ALTENTRY(atomic_or_uint) - lock - orl %esi, (%rdi) - ret - SET_SIZE(atomic_or_uint) - SET_SIZE(atomic_or_32) - - ENTRY(atomic_or_64) - ALTENTRY(atomic_or_ulong) - lock - orq %rsi, (%rdi) - ret - SET_SIZE(atomic_or_ulong) - SET_SIZE(atomic_or_64) - - ENTRY(atomic_and_8) - ALTENTRY(atomic_and_uchar) - lock - andb %sil, (%rdi) - ret - SET_SIZE(atomic_and_uchar) - SET_SIZE(atomic_and_8) - - ENTRY(atomic_and_16) - ALTENTRY(atomic_and_ushort) - lock - andw %si, (%rdi) - ret - SET_SIZE(atomic_and_ushort) - SET_SIZE(atomic_and_16) - - ENTRY(atomic_and_32) - ALTENTRY(atomic_and_uint) - lock - andl %esi, (%rdi) - ret - SET_SIZE(atomic_and_uint) - SET_SIZE(atomic_and_32) - - ENTRY(atomic_and_64) - ALTENTRY(atomic_and_ulong) - lock - andq %rsi, (%rdi) - ret - SET_SIZE(atomic_and_ulong) - SET_SIZE(atomic_and_64) - - ENTRY(atomic_add_8_nv) - ALTENTRY(atomic_add_char_nv) - movb (%rdi), %al -1: - movb %sil, %cl - addb %al, %cl - lock - cmpxchgb %cl, (%rdi) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_add_char_nv) - SET_SIZE(atomic_add_8_nv) - - ENTRY(atomic_add_16_nv) - ALTENTRY(atomic_add_short_nv) - movw (%rdi), %ax -1: - movw %si, %cx - addw %ax, %cx - lock - cmpxchgw %cx, (%rdi) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_add_short_nv) - SET_SIZE(atomic_add_16_nv) - - ENTRY(atomic_add_32_nv) - ALTENTRY(atomic_add_int_nv) - movl (%rdi), %eax -1: - movl %esi, %ecx - addl %eax, %ecx - lock - cmpxchgl %ecx, (%rdi) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_add_int_nv) - SET_SIZE(atomic_add_32_nv) - - ENTRY(atomic_add_64_nv) - ALTENTRY(atomic_add_ptr_nv) - ALTENTRY(atomic_add_long_nv) - movq (%rdi), %rax -1: - movq %rsi, %rcx - addq %rax, %rcx - lock - cmpxchgq %rcx, (%rdi) - jne 1b - movq %rcx, %rax - ret - SET_SIZE(atomic_add_long_nv) - SET_SIZE(atomic_add_ptr_nv) - SET_SIZE(atomic_add_64_nv) - - ENTRY(atomic_sub_8_nv) - ALTENTRY(atomic_sub_char_nv) - movb (%rdi), %al -1: - movb %sil, %cl - subb %al, %cl - lock - cmpxchgb %cl, (%rdi) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_sub_char_nv) - SET_SIZE(atomic_sub_8_nv) - - ENTRY(atomic_sub_16_nv) - ALTENTRY(atomic_sub_short_nv) - movw (%rdi), %ax -1: - movw %si, %cx - subw %ax, %cx - lock - cmpxchgw %cx, (%rdi) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_sub_short_nv) - SET_SIZE(atomic_sub_16_nv) - - ENTRY(atomic_sub_32_nv) - ALTENTRY(atomic_sub_int_nv) - movl (%rdi), %eax -1: - movl %esi, %ecx - subl %eax, %ecx - lock - cmpxchgl %ecx, (%rdi) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_sub_int_nv) - SET_SIZE(atomic_sub_32_nv) - - ENTRY(atomic_sub_64_nv) - ALTENTRY(atomic_sub_ptr_nv) - ALTENTRY(atomic_sub_long_nv) - movq (%rdi), %rax -1: - movq %rsi, %rcx - subq %rax, %rcx - lock - cmpxchgq %rcx, (%rdi) - jne 1b - movq %rcx, %rax - ret - SET_SIZE(atomic_sub_long_nv) - SET_SIZE(atomic_sub_ptr_nv) - SET_SIZE(atomic_sub_64_nv) - - ENTRY(atomic_and_8_nv) - ALTENTRY(atomic_and_uchar_nv) - movb (%rdi), %al -1: - movb %sil, %cl - andb %al, %cl - lock - cmpxchgb %cl, (%rdi) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_and_uchar_nv) - SET_SIZE(atomic_and_8_nv) - - ENTRY(atomic_and_16_nv) - ALTENTRY(atomic_and_ushort_nv) - movw (%rdi), %ax -1: - movw %si, %cx - andw %ax, %cx - lock - cmpxchgw %cx, (%rdi) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_and_ushort_nv) - SET_SIZE(atomic_and_16_nv) - - ENTRY(atomic_and_32_nv) - ALTENTRY(atomic_and_uint_nv) - movl (%rdi), %eax -1: - movl %esi, %ecx - andl %eax, %ecx - lock - cmpxchgl %ecx, (%rdi) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_and_uint_nv) - SET_SIZE(atomic_and_32_nv) - - ENTRY(atomic_and_64_nv) - ALTENTRY(atomic_and_ulong_nv) - movq (%rdi), %rax -1: - movq %rsi, %rcx - andq %rax, %rcx - lock - cmpxchgq %rcx, (%rdi) - jne 1b - movq %rcx, %rax - ret - SET_SIZE(atomic_and_ulong_nv) - SET_SIZE(atomic_and_64_nv) - - ENTRY(atomic_or_8_nv) - ALTENTRY(atomic_or_uchar_nv) - movb (%rdi), %al -1: - movb %sil, %cl - orb %al, %cl - lock - cmpxchgb %cl, (%rdi) - jne 1b - movzbl %cl, %eax - ret - SET_SIZE(atomic_and_uchar_nv) - SET_SIZE(atomic_and_8_nv) - - ENTRY(atomic_or_16_nv) - ALTENTRY(atomic_or_ushort_nv) - movw (%rdi), %ax -1: - movw %si, %cx - orw %ax, %cx - lock - cmpxchgw %cx, (%rdi) - jne 1b - movzwl %cx, %eax - ret - SET_SIZE(atomic_or_ushort_nv) - SET_SIZE(atomic_or_16_nv) - - ENTRY(atomic_or_32_nv) - ALTENTRY(atomic_or_uint_nv) - movl (%rdi), %eax -1: - movl %esi, %ecx - orl %eax, %ecx - lock - cmpxchgl %ecx, (%rdi) - jne 1b - movl %ecx, %eax - ret - SET_SIZE(atomic_or_uint_nv) - SET_SIZE(atomic_or_32_nv) - - ENTRY(atomic_or_64_nv) - ALTENTRY(atomic_or_ulong_nv) - movq (%rdi), %rax -1: - movq %rsi, %rcx - orq %rax, %rcx - lock - cmpxchgq %rcx, (%rdi) - jne 1b - movq %rcx, %rax - ret - SET_SIZE(atomic_or_ulong_nv) - SET_SIZE(atomic_or_64_nv) - - ENTRY(atomic_cas_8) - ALTENTRY(atomic_cas_uchar) - movzbl %sil, %eax - lock - cmpxchgb %dl, (%rdi) - ret - SET_SIZE(atomic_cas_uchar) - SET_SIZE(atomic_cas_8) - - ENTRY(atomic_cas_16) - ALTENTRY(atomic_cas_ushort) - movzwl %si, %eax - lock - cmpxchgw %dx, (%rdi) - ret - SET_SIZE(atomic_cas_ushort) - SET_SIZE(atomic_cas_16) - - ENTRY(atomic_cas_32) - ALTENTRY(atomic_cas_uint) - movl %esi, %eax - lock - cmpxchgl %edx, (%rdi) - ret - SET_SIZE(atomic_cas_uint) - SET_SIZE(atomic_cas_32) - - ENTRY(atomic_cas_64) - ALTENTRY(atomic_cas_ulong) - ALTENTRY(atomic_cas_ptr) - movq %rsi, %rax - lock - cmpxchgq %rdx, (%rdi) - ret - SET_SIZE(atomic_cas_ptr) - SET_SIZE(atomic_cas_ulong) - SET_SIZE(atomic_cas_64) - - ENTRY(atomic_swap_8) - ALTENTRY(atomic_swap_uchar) - movzbl %sil, %eax - lock - xchgb %al, (%rdi) - ret - SET_SIZE(atomic_swap_uchar) - SET_SIZE(atomic_swap_8) - - ENTRY(atomic_swap_16) - ALTENTRY(atomic_swap_ushort) - movzwl %si, %eax - lock - xchgw %ax, (%rdi) - ret - SET_SIZE(atomic_swap_ushort) - SET_SIZE(atomic_swap_16) - - ENTRY(atomic_swap_32) - ALTENTRY(atomic_swap_uint) - movl %esi, %eax - lock - xchgl %eax, (%rdi) - ret - SET_SIZE(atomic_swap_uint) - SET_SIZE(atomic_swap_32) - - ENTRY(atomic_swap_64) - ALTENTRY(atomic_swap_ulong) - ALTENTRY(atomic_swap_ptr) - movq %rsi, %rax - lock - xchgq %rax, (%rdi) - ret - SET_SIZE(atomic_swap_ptr) - SET_SIZE(atomic_swap_ulong) - SET_SIZE(atomic_swap_64) - - ENTRY(atomic_set_long_excl) - xorl %eax, %eax - lock - btsq %rsi, (%rdi) - jnc 1f - decl %eax -1: - ret - SET_SIZE(atomic_set_long_excl) - - ENTRY(atomic_clear_long_excl) - xorl %eax, %eax - lock - btrq %rsi, (%rdi) - jc 1f - decl %eax -1: - ret - SET_SIZE(atomic_clear_long_excl) - - /* - * NOTE: membar_enter, and membar_exit are identical routines. - * We define them separately, instead of using an ALTENTRY - * definitions to alias them together, so that DTrace and - * debuggers will see a unique address for them, allowing - * more accurate tracing. - */ - - ENTRY(membar_enter) - mfence - ret - SET_SIZE(membar_enter) - - ENTRY(membar_exit) - mfence - ret - SET_SIZE(membar_exit) - - ENTRY(membar_producer) - sfence - ret - SET_SIZE(membar_producer) - - ENTRY(membar_consumer) - lfence - ret - SET_SIZE(membar_consumer) - -#ifdef __ELF__ -.section .note.GNU-stack,"",%progbits -#endif diff --git a/lib/libspl/asm-generic/atomic.c b/lib/libspl/atomic.c similarity index 58% rename from lib/libspl/asm-generic/atomic.c rename to lib/libspl/atomic.c index 35535ea49c79..d1a71b77710f 100644 --- a/lib/libspl/asm-generic/atomic.c +++ b/lib/libspl/atomic.c @@ -1,450 +1,355 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only * (the "License"). You may not use this file except in compliance * with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2009 by Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #include -#include -#include - -/* - * All operations are implemented by serializing them through a global - * pthread mutex. This provides a correct generic implementation. - * However all supported architectures are encouraged to provide a - * native implementation is assembly for performance reasons. - */ -pthread_mutex_t atomic_lock = PTHREAD_MUTEX_INITIALIZER; /* * These are the void returning variants */ /* BEGIN CSTYLED */ #define ATOMIC_INC(name, type) \ void atomic_inc_##name(volatile type *target) \ { \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - (*target)++; \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ + (void) __atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST); \ } ATOMIC_INC(8, uint8_t) ATOMIC_INC(uchar, uchar_t) ATOMIC_INC(16, uint16_t) ATOMIC_INC(ushort, ushort_t) ATOMIC_INC(32, uint32_t) ATOMIC_INC(uint, uint_t) ATOMIC_INC(ulong, ulong_t) ATOMIC_INC(64, uint64_t) #define ATOMIC_DEC(name, type) \ void atomic_dec_##name(volatile type *target) \ { \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - (*target)--; \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ + (void) __atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST); \ } ATOMIC_DEC(8, uint8_t) ATOMIC_DEC(uchar, uchar_t) ATOMIC_DEC(16, uint16_t) ATOMIC_DEC(ushort, ushort_t) ATOMIC_DEC(32, uint32_t) ATOMIC_DEC(uint, uint_t) ATOMIC_DEC(ulong, ulong_t) ATOMIC_DEC(64, uint64_t) #define ATOMIC_ADD(name, type1, type2) \ void atomic_add_##name(volatile type1 *target, type2 bits) \ { \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - *target += bits; \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ + (void) __atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST); \ } ATOMIC_ADD(8, uint8_t, int8_t) ATOMIC_ADD(char, uchar_t, signed char) ATOMIC_ADD(16, uint16_t, int16_t) ATOMIC_ADD(short, ushort_t, short) ATOMIC_ADD(32, uint32_t, int32_t) ATOMIC_ADD(int, uint_t, int) ATOMIC_ADD(long, ulong_t, long) ATOMIC_ADD(64, uint64_t, int64_t) void atomic_add_ptr(volatile void *target, ssize_t bits) { - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); - *(caddr_t *)target += bits; - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); + (void) __atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST); } #define ATOMIC_SUB(name, type1, type2) \ void atomic_sub_##name(volatile type1 *target, type2 bits) \ { \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - *target -= bits; \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ + (void) __atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST); \ } ATOMIC_SUB(8, uint8_t, int8_t) ATOMIC_SUB(char, uchar_t, signed char) ATOMIC_SUB(16, uint16_t, int16_t) ATOMIC_SUB(short, ushort_t, short) ATOMIC_SUB(32, uint32_t, int32_t) ATOMIC_SUB(int, uint_t, int) ATOMIC_SUB(long, ulong_t, long) ATOMIC_SUB(64, uint64_t, int64_t) void atomic_sub_ptr(volatile void *target, ssize_t bits) { - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); - *(caddr_t *)target -= bits; - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); + (void) __atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST); } #define ATOMIC_OR(name, type) \ void atomic_or_##name(volatile type *target, type bits) \ { \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - *target |= bits; \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ + (void) __atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST); \ } ATOMIC_OR(8, uint8_t) ATOMIC_OR(uchar, uchar_t) ATOMIC_OR(16, uint16_t) ATOMIC_OR(ushort, ushort_t) ATOMIC_OR(32, uint32_t) ATOMIC_OR(uint, uint_t) ATOMIC_OR(ulong, ulong_t) ATOMIC_OR(64, uint64_t) #define ATOMIC_AND(name, type) \ void atomic_and_##name(volatile type *target, type bits) \ { \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - *target &= bits; \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ + (void) __atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST); \ } ATOMIC_AND(8, uint8_t) ATOMIC_AND(uchar, uchar_t) ATOMIC_AND(16, uint16_t) ATOMIC_AND(ushort, ushort_t) ATOMIC_AND(32, uint32_t) ATOMIC_AND(uint, uint_t) ATOMIC_AND(ulong, ulong_t) ATOMIC_AND(64, uint64_t) /* * New value returning variants */ #define ATOMIC_INC_NV(name, type) \ type atomic_inc_##name##_nv(volatile type *target) \ { \ - type rc; \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - rc = (++(*target)); \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ - return (rc); \ + return (__atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST)); \ } ATOMIC_INC_NV(8, uint8_t) ATOMIC_INC_NV(uchar, uchar_t) ATOMIC_INC_NV(16, uint16_t) ATOMIC_INC_NV(ushort, ushort_t) ATOMIC_INC_NV(32, uint32_t) ATOMIC_INC_NV(uint, uint_t) ATOMIC_INC_NV(ulong, ulong_t) ATOMIC_INC_NV(64, uint64_t) #define ATOMIC_DEC_NV(name, type) \ type atomic_dec_##name##_nv(volatile type *target) \ { \ - type rc; \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - rc = (--(*target)); \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ - return (rc); \ + return (__atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST)); \ } ATOMIC_DEC_NV(8, uint8_t) ATOMIC_DEC_NV(uchar, uchar_t) ATOMIC_DEC_NV(16, uint16_t) ATOMIC_DEC_NV(ushort, ushort_t) ATOMIC_DEC_NV(32, uint32_t) ATOMIC_DEC_NV(uint, uint_t) ATOMIC_DEC_NV(ulong, ulong_t) ATOMIC_DEC_NV(64, uint64_t) #define ATOMIC_ADD_NV(name, type1, type2) \ - type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits)\ + type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits) \ { \ - type1 rc; \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - rc = (*target += bits); \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ - return (rc); \ + return (__atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST)); \ } ATOMIC_ADD_NV(8, uint8_t, int8_t) ATOMIC_ADD_NV(char, uchar_t, signed char) ATOMIC_ADD_NV(16, uint16_t, int16_t) ATOMIC_ADD_NV(short, ushort_t, short) ATOMIC_ADD_NV(32, uint32_t, int32_t) ATOMIC_ADD_NV(int, uint_t, int) ATOMIC_ADD_NV(long, ulong_t, long) ATOMIC_ADD_NV(64, uint64_t, int64_t) void * atomic_add_ptr_nv(volatile void *target, ssize_t bits) { - void *ptr; - - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); - ptr = (*(caddr_t *)target += bits); - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); - - return (ptr); + return (__atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST)); } #define ATOMIC_SUB_NV(name, type1, type2) \ - type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits)\ + type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits) \ { \ - type1 rc; \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - rc = (*target -= bits); \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ - return (rc); \ + return (__atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST)); \ } ATOMIC_SUB_NV(8, uint8_t, int8_t) ATOMIC_SUB_NV(char, uchar_t, signed char) ATOMIC_SUB_NV(16, uint16_t, int16_t) ATOMIC_SUB_NV(short, ushort_t, short) ATOMIC_SUB_NV(32, uint32_t, int32_t) ATOMIC_SUB_NV(int, uint_t, int) ATOMIC_SUB_NV(long, ulong_t, long) ATOMIC_SUB_NV(64, uint64_t, int64_t) void * atomic_sub_ptr_nv(volatile void *target, ssize_t bits) { - void *ptr; - - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); - ptr = (*(caddr_t *)target -= bits); - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); - - return (ptr); + return (__atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST)); } #define ATOMIC_OR_NV(name, type) \ type atomic_or_##name##_nv(volatile type *target, type bits) \ { \ - type rc; \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - rc = (*target |= bits); \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ - return (rc); \ + return (__atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST)); \ } ATOMIC_OR_NV(8, uint8_t) ATOMIC_OR_NV(uchar, uchar_t) ATOMIC_OR_NV(16, uint16_t) ATOMIC_OR_NV(ushort, ushort_t) ATOMIC_OR_NV(32, uint32_t) ATOMIC_OR_NV(uint, uint_t) ATOMIC_OR_NV(ulong, ulong_t) ATOMIC_OR_NV(64, uint64_t) #define ATOMIC_AND_NV(name, type) \ type atomic_and_##name##_nv(volatile type *target, type bits) \ { \ - type rc; \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - rc = (*target &= bits); \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ - return (rc); \ + return (__atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST)); \ } ATOMIC_AND_NV(8, uint8_t) ATOMIC_AND_NV(uchar, uchar_t) ATOMIC_AND_NV(16, uint16_t) ATOMIC_AND_NV(ushort, ushort_t) ATOMIC_AND_NV(32, uint32_t) ATOMIC_AND_NV(uint, uint_t) ATOMIC_AND_NV(ulong, ulong_t) ATOMIC_AND_NV(64, uint64_t) /* - * If *arg1 == arg2, set *arg1 = arg3; return old value + * If *tgt == exp, set *tgt = des; return old value + * + * This may not look right on the first pass (or the sixteenth), but, + * from https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html: + * > If they are not equal, the operation is a read + * > and the current contents of *ptr are written into *expected. + * And, in the converse case, exp is already *target by definition. */ #define ATOMIC_CAS(name, type) \ - type atomic_cas_##name(volatile type *target, type arg1, type arg2) \ + type atomic_cas_##name(volatile type *target, type exp, type des) \ { \ - type old; \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - old = *target; \ - if (old == arg1) \ - *target = arg2; \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ - return (old); \ + __atomic_compare_exchange_n(target, &exp, des, B_FALSE, \ + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ + return (exp); \ } ATOMIC_CAS(8, uint8_t) ATOMIC_CAS(uchar, uchar_t) ATOMIC_CAS(16, uint16_t) ATOMIC_CAS(ushort, ushort_t) ATOMIC_CAS(32, uint32_t) ATOMIC_CAS(uint, uint_t) ATOMIC_CAS(ulong, ulong_t) ATOMIC_CAS(64, uint64_t) void * -atomic_cas_ptr(volatile void *target, void *arg1, void *arg2) +atomic_cas_ptr(volatile void *target, void *exp, void *des) { - void *old; - - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); - old = *(void **)target; - if (old == arg1) - *(void **)target = arg2; - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); - return (old); + __atomic_compare_exchange_n((void **)target, &exp, des, B_FALSE, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return (exp); } /* * Swap target and return old value */ #define ATOMIC_SWAP(name, type) \ type atomic_swap_##name(volatile type *target, type bits) \ { \ - type old; \ - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \ - old = *target; \ - *target = bits; \ - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \ - return (old); \ + return (__atomic_exchange_n(target, bits, __ATOMIC_SEQ_CST)); \ } ATOMIC_SWAP(8, uint8_t) ATOMIC_SWAP(uchar, uchar_t) ATOMIC_SWAP(16, uint16_t) ATOMIC_SWAP(ushort, ushort_t) ATOMIC_SWAP(32, uint32_t) ATOMIC_SWAP(uint, uint_t) ATOMIC_SWAP(ulong, ulong_t) ATOMIC_SWAP(64, uint64_t) /* END CSTYLED */ void * atomic_swap_ptr(volatile void *target, void *bits) { - void *old; - - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); - old = *(void **)target; - *(void **)target = bits; - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); - - return (old); + return (__atomic_exchange_n((void **)target, bits, __ATOMIC_SEQ_CST)); } int atomic_set_long_excl(volatile ulong_t *target, uint_t value) { - ulong_t bit; - - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); - bit = (1UL << value); - if ((*target & bit) != 0) { - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); - return (-1); - } - *target |= bit; - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); - - return (0); + ulong_t bit = 1UL << value; + ulong_t old = __atomic_fetch_or(target, bit, __ATOMIC_SEQ_CST); + return ((old & bit) ? -1 : 0); } int atomic_clear_long_excl(volatile ulong_t *target, uint_t value) { - ulong_t bit; - - VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); - bit = (1UL << value); - if ((*target & bit) == 0) { - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); - return (-1); - } - *target &= ~bit; - VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); - - return (0); + ulong_t bit = 1UL << value; + ulong_t old = __atomic_fetch_and(target, ~bit, __ATOMIC_SEQ_CST); + return ((old & bit) ? 0 : -1); } void membar_enter(void) { - /* XXX - Implement me */ + __atomic_thread_fence(__ATOMIC_SEQ_CST); } void membar_exit(void) { - /* XXX - Implement me */ + __atomic_thread_fence(__ATOMIC_SEQ_CST); } void membar_producer(void) { - /* XXX - Implement me */ + __atomic_thread_fence(__ATOMIC_RELEASE); } void membar_consumer(void) { - /* XXX - Implement me */ + __atomic_thread_fence(__ATOMIC_ACQUIRE); }