[cairo-commit] build/aclocal.cairo.m4 src/cairo-atomic-private.h

Bryce Harrington bryce at kemper.freedesktop.org
Thu Jun 4 13:32:10 PDT 2015


 build/aclocal.cairo.m4     |   12 ++++++
 src/cairo-atomic-private.h |   90 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 102 insertions(+)

New commits:
commit 5d150ee111c222f09e78f4f88540964476327844
Author: Nathan Froyd <froydnj at mozilla.com>
Date:   Mon May 4 13:38:41 2015 -0400

    Support new-style __atomic_* primitives
    
    Recent versions of GCC/clang feature a new set of compiler intrinsics
    for performing atomic operations, motivated by the operations needed to
    support the C++11 memory model.  These intrinsics are more flexible than
    the old __sync_* intrinstics and offer efficient support for atomic load
    and store operations.
    
    Having the load appear atomic to the compiler is particular important
    for tools like ThreadSanitizer so they don't report false positives on
    memory operations that we intend to be atomic.
    
    Patch from Nathan Froyd <froydnj at mozilla.com>

diff --git a/build/aclocal.cairo.m4 b/build/aclocal.cairo.m4
index 2f4873b..44ba5fd 100644
--- a/build/aclocal.cairo.m4
+++ b/build/aclocal.cairo.m4
@@ -168,6 +168,13 @@ int atomic_cmpxchg(int i, int j, int k) { return __sync_val_compare_and_swap (&i
 		  cairo_cv_atomic_primitives="Intel"
 		  )
 
+		AC_TRY_LINK([
+int atomic_add(int i) { return __atomic_fetch_add(&i, 1, __ATOMIC_SEQ_CST); }
+int atomic_cmpxchg(int i, int j, int k) { return __atomic_compare_exchange_n(&i, &j, k, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
+], [],
+		   cairo_cv_atomic_primitives="cxx11"
+		   )
+
 		if test "x$cairo_cv_atomic_primitives" = "xnone"; then
 			AC_CHECK_HEADER([atomic_ops.h],
 					cairo_cv_atomic_primitives="libatomic-ops")
@@ -178,6 +185,11 @@ int atomic_cmpxchg(int i, int j, int k) { return __sync_val_compare_and_swap (&i
 					cairo_cv_atomic_primitives="OSAtomic")
 		fi
 	])
+	if test "x$cairo_cv_atomic_primitives" = xcxx11; then
+		AC_DEFINE(HAVE_CXX11_ATOMIC_PRIMITIVES, 1,
+			  [Enable if your compiler supports the GCC __atomic_* atomic primitives])
+	fi
+
 	if test "x$cairo_cv_atomic_primitives" = xIntel; then
 		AC_DEFINE(HAVE_INTEL_ATOMIC_PRIMITIVES, 1,
 			  [Enable if your compiler supports the Intel __sync_* atomic primitives])
diff --git a/src/cairo-atomic-private.h b/src/cairo-atomic-private.h
index 327fed1..11b2887 100644
--- a/src/cairo-atomic-private.h
+++ b/src/cairo-atomic-private.h
@@ -53,6 +53,96 @@
 
 CAIRO_BEGIN_DECLS
 
+/* C++11 atomic primitives were designed to be more flexible than the
+ * __sync_* family of primitives.  Despite the name, they are available
+ * in C as well as C++.  The motivating reason for using them is that
+ * for _cairo_atomic_{int,ptr}_get, the compiler is able to see that
+ * the load is intended to be atomic, as opposed to the __sync_*
+ * version, below, where the load looks like a plain load.  Having
+ * the load appear atomic to the compiler is particular important for
+ * tools like ThreadSanitizer so they don't report false positives on
+ * memory operations that we intend to be atomic.
+ */
+#if HAVE_CXX11_ATOMIC_PRIMITIVES
+
+#define HAS_ATOMIC_OPS 1
+
+typedef int cairo_atomic_int_t;
+
+static cairo_always_inline cairo_atomic_int_t
+_cairo_atomic_int_get (cairo_atomic_int_t *x)
+{
+    return __atomic_load_n(x, __ATOMIC_SEQ_CST);
+}
+
+static cairo_always_inline void *
+_cairo_atomic_ptr_get (void **x)
+{
+    return __atomic_load_n(x, __ATOMIC_SEQ_CST);
+}
+
+# define _cairo_atomic_int_inc(x) ((void) __atomic_fetch_add(x, 1, __ATOMIC_SEQ_CST))
+# define _cairo_atomic_int_dec(x) ((void) __atomic_fetch_sub(x, 1, __ATOMIC_SEQ_CST))
+# define _cairo_atomic_int_dec_and_test(x) (__atomic_fetch_sub(x, 1, __ATOMIC_SEQ_CST) == 1)
+
+#if SIZEOF_VOID_P==SIZEOF_INT
+typedef int cairo_atomic_intptr_t;
+#elif SIZEOF_VOID_P==SIZEOF_LONG
+typedef long cairo_atomic_intptr_t;
+#elif SIZEOF_VOID_P==SIZEOF_LONG_LONG
+typedef long long cairo_atomic_intptr_t;
+#else
+#error No matching integer pointer type
+#endif
+
+static cairo_always_inline cairo_bool_t
+_cairo_atomic_int_cmpxchg_impl(cairo_atomic_int_t *x,
+			       cairo_atomic_int_t oldv,
+			       cairo_atomic_int_t newv)
+{
+    cairo_atomic_int_t expected = oldv;
+    return __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+}
+
+#define _cairo_atomic_int_cmpxchg(x, oldv, newv) \
+  _cairo_atomic_int_cmpxchg_impl(x, oldv, newv)
+
+static cairo_always_inline cairo_atomic_int_t
+_cairo_atomic_int_cmpxchg_return_old_impl(cairo_atomic_int_t *x,
+					  cairo_atomic_int_t oldv,
+					  cairo_atomic_int_t newv)
+{
+    cairo_atomic_int_t expected = oldv;
+    (void) __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+    return expected;
+}
+
+#define _cairo_atomic_int_cmpxchg_return_old(x, oldv, newv) \
+  _cairo_atomic_int_cmpxchg_return_old_impl(x, oldv, newv)
+
+static cairo_always_inline cairo_bool_t
+_cairo_atomic_ptr_cmpxchg_impl(void **x, void *oldv, void *newv)
+{
+    void *expected = oldv;
+    return __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+}
+
+#define _cairo_atomic_ptr_cmpxchg(x, oldv, newv) \
+  _cairo_atomic_ptr_cmpxchg_impl(x, oldv, newv)
+
+static cairo_always_inline void *
+_cairo_atomic_ptr_cmpxchg_return_old_impl(void **x, void *oldv, void *newv)
+{
+    void *expected = oldv;
+    (void) __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+    return expected;
+}
+
+#define _cairo_atomic_ptr_cmpxchg_return_old(x, oldv, newv) \
+  _cairo_atomic_ptr_cmpxchg_return_old_impl(x, oldv, newv)
+
+#endif
+
 #if HAVE_INTEL_ATOMIC_PRIMITIVES
 
 #define HAS_ATOMIC_OPS 1


More information about the cairo-commit mailing list