summaryrefslogtreecommitdiff
path: root/includes
diff options
context:
space:
mode:
authorNicolas "Pixel" Noble <pixel@nobis-crew.org>2011-10-04 00:37:17 +0200
committerNicolas "Pixel" Noble <pixel@nobis-crew.org>2011-10-04 00:37:17 +0200
commit04dbe446e821ccabfb765d8a4381994dfd9916c2 (patch)
tree145d882904df4d1cefdabe5976f06787ee6cb689 /includes
parent342b273234405ab76dc159d2e402bfb1ddfa1d8f (diff)
Adding atomics.
Diffstat (limited to 'includes')
-rw-r--r--includes/Atomic.h71
1 files changed, 71 insertions, 0 deletions
diff --git a/includes/Atomic.h b/includes/Atomic.h
new file mode 100644
index 0000000..d079967
--- /dev/null
+++ b/includes/Atomic.h
@@ -0,0 +1,71 @@
+#pragma once
+
+namespace Balau {
+
+namespace Atomic {
+
+#if (__GNUC__ >= 5) || ((__GNUC__ == 4) && ((__GNUC_MINOR__ >= 1)))
+// gcc version of the atomic operations
+template <class T> T Or(volatile T * ptr, T mask) { __sync_or_and_fetch(ptr, mask); }
+template <class T> T And(volatile T * ptr, T mask) { __sync_and_and_fetch(ptr, mask); }
+template <class T> T Xor(volatile T * ptr, T mask) { __sync_xor_and_fetch(ptr, mask); }
+template <class T> T Nand(volatile T * ptr, T mask) { __sync_nand_and_fetch(ptr, mask); }
+template <class T> T Increment(volatile T * ptr, T delta = 1) { __sync_add_and_fetch(ptr, delta); }
+template <class T> T Decrement(volatile T * ptr, T delta = 1) { __sync_sub_and_fetch(ptr, delta); }
+
+namespace Prefetch {
+template <class T> T Or(volatile T * ptr, T mask) { __sync_fetch_and_or(ptr, mask); }
+template <class T> T And(volatile T * ptr, T mask) { __sync_fetch_and_and(ptr, mask); }
+template <class T> T Xor(volatile T * ptr, T mask) { __sync_fetch_and_xor(ptr, mask); }
+template <class T> T Nand(volatile T * ptr, T mask) { __sync_fetch_and_nand(ptr, mask); }
+template <class T> T Increment(volatile T * ptr, T delta = 1) { __sync_fetch_and_add(ptr, delta); }
+template <class T> T Decrement(volatile T * ptr, T delta = 1) { __sync_fetch_and_sub(ptr, delta); }
+};
+
+template <class T> T CmpXChgVal(volatile T * ptr, const T xch, const T cmp) { return __sync_val_compare_and_swap(ptr, cmp, xch); }
+template <class T> bool CmpXChgBool(volatile T * ptr, const T xch, const T cmp) { return __sync_bool_compare_and_swap(ptr, cmp, xch); }
+
+template <class T> T Exchange32(volatile T * ptr, const T exchange) {
+#if defined(i386) || defined (__x86_64)
+ __asm__ __volatile__("lock xchgl %0, (%1)" : "+r"(exchange) : "r"(ptr));
+ return exchange;
+#else
+ T p;
+ do { p = *ptr; } while (!__sync_bool_compare_and_swap(ptr, p, exchange));
+ return p;
+#endif
+}
+
+template <class T> T Exchange64(volatile T * ptr, const T exchange) {
+#if defined(i386) || defined (__x86_64)
+ __asm__ __volatile__("lock xchgq %0, (%1)" : "+r"(exchange) : "r"(ptr));
+ return exchange;
+#else
+ T p;
+ do { p = *ptr; } while (!__sync_bool_compare_and_swap(ptr, p, exchange));
+ return p;
+#endif
+}
+
+#else
+#ifdef _MSVC
+// Visual Studio version of the atomic operations
+
+#error MSVC not yet implemented.
+
+#else
+#error No known platform for atomic operations.
+#endif
+#endif
+
+template <class T> T * ExchangePtr(T * volatile * ptr, const T * exchange) {
+#if defined (__x86_64)
+ return Exchange64(ptr, exchange);
+#else
+ return Exchange32(ptr, exchange);
+#endif
+}
+
+};
+
+};