eathread_atomic_standalone_gcc.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. ///////////////////////////////////////////////////////////////////////////////
  2. // Copyright (c) Electronic Arts Inc. All rights reserved.
  3. ///////////////////////////////////////////////////////////////////////////////
  4. #if defined(EA_PRAGMA_ONCE_SUPPORTED)
  5. #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
  6. #endif
  7. namespace EA
  8. {
  9. namespace Thread
  10. {
  11. // TODO(rparolin): Consider use of clang builtin __sync_swap.
  12. // https://clang.llvm.org/docs/LanguageExtensions.html#sync-swap
  13. // TODO(rparolin): Consider use of C11 atomics
  14. // https://clang.llvm.org/docs/LanguageExtensions.html#c11-atomic-builtins
  15. namespace detail
  16. {
  17. template<class T>
  18. inline T AtomicGetValue(volatile T* ptr);
  19. } // namespace detail
  20. // int
  21. inline int AtomicGetValue(volatile int* ptr) { return detail::AtomicGetValue(ptr); }
  22. inline int AtomicGetValue(const volatile int* ptr) { return AtomicGetValue(const_cast<volatile int*>(ptr)); }
  23. inline int AtomicSetValue(volatile int* dest, int value) { return __sync_lock_test_and_set(dest, value); }
  24. inline int AtomicFetchIncrement(volatile int* dest) { return __sync_fetch_and_add(dest, int(1)); }
  25. inline int AtomicFetchDecrement(volatile int* dest) { return __sync_fetch_and_add(dest, int(-1)); }
  26. inline int AtomicFetchAdd(volatile int* dest, int value) { return __sync_fetch_and_add(dest, value); }
  27. inline int AtomicFetchSub(volatile int* dest, int value) { return __sync_fetch_and_sub(dest, value); }
  28. inline int AtomicFetchOr(volatile int* dest, int value) { return __sync_fetch_and_or(dest, value); }
  29. inline int AtomicFetchAnd(volatile int* dest, int value) { return __sync_fetch_and_and(dest, value); }
  30. inline int AtomicFetchXor(volatile int* dest, int value) { return __sync_fetch_and_xor(dest, value); }
  31. inline int AtomicFetchSwap(volatile int* dest, int value) { return __sync_lock_test_and_set(dest, value); }
  32. inline int AtomicFetchSwapConditional(volatile int* dest, int value, int condition) { return __sync_val_compare_and_swap(dest, condition, value); }
  33. inline bool AtomicSetValueConditional(volatile int* dest, int value, int condition) { return __sync_bool_compare_and_swap(dest, condition, value); }
  34. // unsigned int
  35. inline unsigned int AtomicGetValue(volatile unsigned int* ptr) { return detail::AtomicGetValue(ptr); }
  36. inline unsigned int AtomicGetValue(const volatile unsigned int* ptr) { return AtomicGetValue(const_cast<volatile unsigned int*>(ptr)); }
  37. inline unsigned int AtomicSetValue(volatile unsigned int* dest, unsigned int value) { return __sync_lock_test_and_set(dest, value); }
  38. inline unsigned int AtomicFetchIncrement(volatile unsigned int* dest) { return __sync_fetch_and_add(dest, (unsigned int)(1)); }
  39. inline unsigned int AtomicFetchDecrement(volatile unsigned int* dest) { return __sync_fetch_and_add(dest, (unsigned int)(-1)); }
  40. inline unsigned int AtomicFetchAdd(volatile unsigned int* dest, unsigned int value) { return __sync_fetch_and_add(dest, value); }
  41. inline unsigned int AtomicFetchSub(volatile unsigned int* dest, unsigned int value) { return __sync_fetch_and_sub(dest, value); }
  42. inline unsigned int AtomicFetchOr(volatile unsigned int* dest, unsigned int value) { return __sync_fetch_and_or(dest, value); }
  43. inline unsigned int AtomicFetchAnd(volatile unsigned int* dest, unsigned int value) { return __sync_fetch_and_and(dest, value); }
  44. inline unsigned int AtomicFetchXor(volatile unsigned int* dest, unsigned int value) { return __sync_fetch_and_xor(dest, value); }
  45. inline unsigned int AtomicFetchSwap(volatile unsigned int* dest, unsigned int value) { return __sync_lock_test_and_set(dest, value); }
  46. inline unsigned int AtomicFetchSwapConditional(volatile unsigned int* dest, unsigned int value, unsigned int condition) { return __sync_val_compare_and_swap(dest, condition, value); }
  47. inline bool AtomicSetValueConditional(volatile unsigned int* dest, unsigned int value, unsigned int condition) { return __sync_bool_compare_and_swap(dest, condition, value); }
  48. // short
  49. inline short AtomicGetValue(volatile short* ptr) { return detail::AtomicGetValue(ptr); }
  50. inline short AtomicGetValue(const volatile short* ptr) { return AtomicGetValue(const_cast<volatile short*>(ptr)); }
  51. inline short AtomicSetValue(volatile short* dest, short value) { return __sync_lock_test_and_set(dest, value); }
  52. inline short AtomicFetchIncrement(volatile short* dest) { return __sync_fetch_and_add(dest, short(1)); }
  53. inline short AtomicFetchDecrement(volatile short* dest) { return __sync_fetch_and_add(dest, short(-1)); }
  54. inline short AtomicFetchAdd(volatile short* dest, short value) { return __sync_fetch_and_add(dest, value); }
  55. inline short AtomicFetchSub(volatile short* dest, short value) { return __sync_fetch_and_sub(dest, value); }
  56. inline short AtomicFetchOr(volatile short* dest, short value) { return __sync_fetch_and_or(dest, value); }
  57. inline short AtomicFetchAnd(volatile short* dest, short value) { return __sync_fetch_and_and(dest, value); }
  58. inline short AtomicFetchXor(volatile short* dest, short value) { return __sync_fetch_and_xor(dest, value); }
  59. inline short AtomicFetchSwap(volatile short* dest, short value) { return __sync_lock_test_and_set(dest, value); }
  60. inline short AtomicFetchSwapConditional(volatile short* dest, short value, short condition) { return __sync_val_compare_and_swap(reinterpret_cast<volatile unsigned short*>(dest), static_cast<unsigned short>(condition), static_cast<unsigned short>(value)); }
  61. inline bool AtomicSetValueConditional(volatile short* dest, short value, short condition) { return __sync_bool_compare_and_swap(reinterpret_cast<volatile unsigned short*>(dest), static_cast<unsigned short>(condition), static_cast<unsigned short>(value)); }
  62. // unsigned short
  63. inline unsigned short AtomicGetValue(volatile unsigned short* ptr) { return detail::AtomicGetValue(ptr); }
  64. inline unsigned short AtomicGetValue(const volatile unsigned short* ptr) { return AtomicGetValue(const_cast<volatile unsigned short*>(ptr)); }
  65. inline unsigned short AtomicSetValue(volatile unsigned short* dest, unsigned short value) { return __sync_lock_test_and_set(dest, value); }
  66. inline unsigned short AtomicFetchIncrement(volatile unsigned short* dest) { return __sync_fetch_and_add(dest, (unsigned short)(1)); }
  67. inline unsigned short AtomicFetchDecrement(volatile unsigned short* dest) { return __sync_fetch_and_add(dest, (unsigned short)(-1)); }
  68. inline unsigned short AtomicFetchAdd(volatile unsigned short* dest, unsigned short value) { return __sync_fetch_and_add(dest, value); }
  69. inline unsigned short AtomicFetchSub(volatile unsigned short* dest, unsigned short value) { return __sync_fetch_and_sub(dest, value); }
  70. inline unsigned short AtomicFetchOr(volatile unsigned short* dest, unsigned short value) { return __sync_fetch_and_or(dest, value); }
  71. inline unsigned short AtomicFetchAnd(volatile unsigned short* dest, unsigned short value) { return __sync_fetch_and_and(dest, value); }
  72. inline unsigned short AtomicFetchXor(volatile unsigned short* dest, unsigned short value) { return __sync_fetch_and_xor(dest, value); }
  73. inline unsigned short AtomicFetchSwap(volatile unsigned short* dest, unsigned short value) { return __sync_lock_test_and_set(dest, value); }
  74. inline unsigned short AtomicFetchSwapConditional(volatile unsigned short* dest, unsigned short value, unsigned short condition) { return __sync_val_compare_and_swap(dest, condition, value); }
  75. inline bool AtomicSetValueConditional(volatile unsigned short* dest, unsigned short value, unsigned short condition) { return __sync_bool_compare_and_swap(dest, condition, value); }
  76. // long
  77. inline long AtomicGetValue(volatile long* ptr) { return detail::AtomicGetValue(ptr); }
  78. inline long AtomicGetValue(const volatile long* ptr) { return AtomicGetValue(const_cast<volatile long*>(ptr)); }
  79. inline long AtomicSetValue(volatile long* dest, long value) { return __sync_lock_test_and_set(dest, value); }
  80. inline long AtomicFetchIncrement(volatile long* dest) { return __sync_fetch_and_add(dest, long(1)); }
  81. inline long AtomicFetchDecrement(volatile long* dest) { return __sync_fetch_and_add(dest, long(-1)); }
  82. inline long AtomicFetchAdd(volatile long* dest, long value) { return __sync_fetch_and_add(dest, value); }
  83. inline long AtomicFetchSub(volatile long* dest, long value) { return __sync_fetch_and_sub(dest, value); }
  84. inline long AtomicFetchOr(volatile long* dest, long value) { return __sync_fetch_and_or(dest, value); }
  85. inline long AtomicFetchAnd(volatile long* dest, long value) { return __sync_fetch_and_and(dest, value); }
  86. inline long AtomicFetchXor(volatile long* dest, long value) { return __sync_fetch_and_xor(dest, value); }
  87. inline long AtomicFetchSwap(volatile long* dest, long value) { return __sync_lock_test_and_set(dest, value); }
  88. inline long AtomicFetchSwapConditional(volatile long* dest, long value, long condition) { return __sync_val_compare_and_swap(dest, condition, value); }
  89. inline bool AtomicSetValueConditional(volatile long* dest, long value, long condition) { return __sync_bool_compare_and_swap(dest, condition, value); }
  90. // unsigned long
  91. inline unsigned long AtomicGetValue(volatile unsigned long* ptr) { return detail::AtomicGetValue(ptr); }
  92. inline unsigned long AtomicGetValue(const volatile unsigned long* ptr) { return AtomicGetValue(const_cast<volatile unsigned long*>(ptr)); }
  93. inline unsigned long AtomicSetValue(volatile unsigned long* dest, unsigned long value) { return __sync_lock_test_and_set(dest, value); }
  94. inline unsigned long AtomicFetchIncrement(volatile unsigned long* dest) { return __sync_fetch_and_add(dest, (unsigned long)(1)); }
  95. inline unsigned long AtomicFetchDecrement(volatile unsigned long* dest) { return __sync_fetch_and_add(dest, (unsigned long)(-1)); }
  96. inline unsigned long AtomicFetchAdd(volatile unsigned long* dest, unsigned long value) { return __sync_fetch_and_add(dest, value); }
  97. inline unsigned long AtomicFetchSub(volatile unsigned long* dest, unsigned long value) { return __sync_fetch_and_sub(dest, value); }
  98. inline unsigned long AtomicFetchOr(volatile unsigned long* dest, unsigned long value) { return __sync_fetch_and_or(dest, value); }
  99. inline unsigned long AtomicFetchAnd(volatile unsigned long* dest, unsigned long value) { return __sync_fetch_and_and(dest, value); }
  100. inline unsigned long AtomicFetchXor(volatile unsigned long* dest, unsigned long value) { return __sync_fetch_and_xor(dest, value); }
  101. inline unsigned long AtomicFetchSwap(volatile unsigned long* dest, unsigned long value) { return __sync_lock_test_and_set(dest, value); }
  102. inline unsigned long AtomicFetchSwapConditional(volatile unsigned long* dest, unsigned long value, unsigned long condition) { return __sync_val_compare_and_swap(dest, condition, value); }
  103. inline bool AtomicSetValueConditional(volatile unsigned long* dest, unsigned long value, unsigned long condition) { return __sync_bool_compare_and_swap(dest, condition, value); }
  104. // char32_t
  105. #if EA_CHAR32_NATIVE
  106. inline char32_t AtomicGetValue(volatile char32_t* ptr) { return detail::AtomicGetValue(ptr); }
  107. inline char32_t AtomicGetValue(const volatile char32_t* ptr) { return AtomicGetValue(const_cast<volatile char32_t*>(ptr)); }
  108. inline char32_t AtomicSetValue(volatile char32_t* dest, char32_t value) { return __sync_lock_test_and_set(dest, value); }
  109. inline char32_t AtomicFetchIncrement(volatile char32_t* dest) { return __sync_fetch_and_add(dest, char32_t(1)); }
  110. inline char32_t AtomicFetchDecrement(volatile char32_t* dest) { return __sync_fetch_and_add(dest, char32_t(-1)); }
  111. inline char32_t AtomicFetchAdd(volatile char32_t* dest, char32_t value) { return __sync_fetch_and_add(dest, value); }
  112. inline char32_t AtomicFetchSub(volatile char32_t* dest, char32_t value) { return __sync_fetch_and_sub(dest, value); }
  113. inline char32_t AtomicFetchOr(volatile char32_t* dest, char32_t value) { return __sync_fetch_and_or(dest, value); }
  114. inline char32_t AtomicFetchAnd(volatile char32_t* dest, char32_t value) { return __sync_fetch_and_and(dest, value); }
  115. inline char32_t AtomicFetchXor(volatile char32_t* dest, char32_t value) { return __sync_fetch_and_xor(dest, value); }
  116. inline char32_t AtomicFetchSwap(volatile char32_t* dest, char32_t value) { return __sync_lock_test_and_set(dest, value); }
  117. inline char32_t AtomicFetchSwapConditional(volatile char32_t* dest, char32_t value, char32_t condition) { return __sync_val_compare_and_swap(dest, condition, value); }
  118. inline bool AtomicSetValueConditional(volatile char32_t* dest, char32_t value, char32_t condition) { return __sync_bool_compare_and_swap(dest, condition, value); }
  119. #endif
  120. // long long
  121. inline long long AtomicGetValue(volatile long long* ptr) { return detail::AtomicGetValue(ptr); }
  122. inline long long AtomicGetValue(const volatile long long* ptr) { return AtomicGetValue(const_cast<volatile long long*>(ptr)); }
  123. inline long long AtomicSetValue(volatile long long* dest, long long value) { return __sync_lock_test_and_set(dest, value); }
  124. inline long long AtomicFetchIncrement(volatile long long* dest) { return __sync_fetch_and_add(dest, (long long)(1)); }
  125. inline long long AtomicFetchDecrement(volatile long long* dest) { return __sync_fetch_and_add(dest, (long long)(-1)); }
  126. inline long long AtomicFetchAdd(volatile long long* dest, long long value) { return __sync_fetch_and_add(dest, value); }
  127. inline long long AtomicFetchSub(volatile long long* dest, long long value) { return __sync_fetch_and_sub(dest, value); }
  128. inline long long AtomicFetchOr(volatile long long* dest, long long value) { return __sync_fetch_and_or(dest, value); }
  129. inline long long AtomicFetchAnd(volatile long long* dest, long long value) { return __sync_fetch_and_and(dest, value); }
  130. inline long long AtomicFetchXor(volatile long long* dest, long long value) { return __sync_fetch_and_xor(dest, value); }
  131. inline long long AtomicFetchSwap(volatile long long* dest, long long value) { return __sync_lock_test_and_set(dest, value); }
  132. inline long long AtomicFetchSwapConditional(volatile long long* dest, long long value, long long condition) { return __sync_val_compare_and_swap(dest, condition, value); }
  133. inline bool AtomicSetValueConditional(volatile long long* dest, long long value, long long condition) { return __sync_bool_compare_and_swap(dest, condition, value); }
  134. // unsigned long long
  135. inline unsigned long long AtomicGetValue(volatile unsigned long long* ptr) { return detail::AtomicGetValue(ptr); }
  136. inline unsigned long long AtomicGetValue(const volatile unsigned long long* ptr) { return AtomicGetValue(const_cast<volatile unsigned long long*>(ptr)); }
  137. inline unsigned long long AtomicSetValue(volatile unsigned long long* dest, unsigned long long value) { return __sync_lock_test_and_set(dest, value); }
  138. inline unsigned long long AtomicFetchIncrement(volatile unsigned long long* dest) { return __sync_fetch_and_add(dest, (unsigned long long)(1)); }
  139. inline unsigned long long AtomicFetchDecrement(volatile unsigned long long* dest) { return __sync_fetch_and_add(dest, (unsigned long long)(-1)); }
  140. inline unsigned long long AtomicFetchAdd(volatile unsigned long long* dest, unsigned long long value) { return __sync_fetch_and_add(dest, value); }
  141. inline unsigned long long AtomicFetchSub(volatile unsigned long long* dest, unsigned long long value) { return __sync_fetch_and_sub(dest, value); }
  142. inline unsigned long long AtomicFetchOr(volatile unsigned long long* dest, unsigned long long value) { return __sync_fetch_and_or(dest, value); }
  143. inline unsigned long long AtomicFetchAnd(volatile unsigned long long* dest, unsigned long long value) { return __sync_fetch_and_and(dest, value); }
  144. inline unsigned long long AtomicFetchXor(volatile unsigned long long* dest, unsigned long long value) { return __sync_fetch_and_xor(dest, value); }
  145. inline unsigned long long AtomicFetchSwap(volatile unsigned long long* dest, unsigned long long value) { return __sync_lock_test_and_set(dest, value); }
  146. inline unsigned long long AtomicFetchSwapConditional(volatile unsigned long long* dest, unsigned long long value, unsigned long long condition) { return __sync_val_compare_and_swap(dest, condition, value); }
  147. inline bool AtomicSetValueConditional(volatile unsigned long long* dest, unsigned long long value, unsigned long long condition) { return __sync_bool_compare_and_swap(dest, condition, value); }
  148. //
  149. // You can not simply define a template for the above atomics due to the explicit 128bit overloads
  150. // below. The compiler will prefer those overloads during overload resolution and attempt to convert
  151. // temporaries as they are more specialized than a template.
  152. //
  153. // template<typename T> inline T AtomicGetValue(volatile T* source) { return __sync_fetch_and_add(source, (T)(0)); }
  154. // template<typename T> inline void AtomicSetValue(volatile T* dest, T value) { __sync_lock_test_and_set(dest, value); }
  155. // template<typename T> inline T AtomicFetchIncrement(volatile T* dest) { return __sync_fetch_and_add(dest, (T)(1)); }
  156. // template<typename T> inline T AtomicFetchDecrement(volatile T* dest) { return __sync_fetch_and_add(dest, (T)(-1)); }
  157. // template<typename T> inline T AtomicFetchAdd(volatile T* dest, T value) { return __sync_fetch_and_add(dest, value); }
  158. // template<typename T> inline T AtomicFetchOr(volatile T* dest, T value) { return __sync_fetch_and_or(dest, value); }
  159. // template<typename T> inline T AtomicFetchAnd(volatile T* dest, T value) { return __sync_fetch_and_and(dest, value); }
  160. // template<typename T> inline T AtomicFetchXor(volatile T* dest, T value) { return __sync_fetch_and_xor(dest, value); }
  161. // template<typename T> inline T AtomicFetchSwap(volatile T* dest, T value) { return __sync_lock_test_and_set(dest, value); }
  162. // template<typename T> inline bool AtomicSetValueConditional(volatile T* dest, T value, T condition) { return __sync_bool_compare_and_swap(dest, condition, value); }
  163. //
  164. namespace detail
  165. {
  166. template<class T>
  167. inline T AtomicGetValue(volatile T* ptr)
  168. {
  169. #if EA_PLATFORM_WORD_SIZE >= 8 && defined(EA_PROCESSOR_X86_64)
  170. EATHREAD_ALIGNMENT_CHECK(ptr);
  171. EACompilerMemoryBarrier();
  172. T value = *ptr;
  173. EACompilerMemoryBarrier();
  174. return value;
  175. #else
  176. return AtomicFetchAdd(ptr, T(0));
  177. #endif
  178. }
  179. } // namespace detail
  180. } // namespace Thread
  181. } // namespace EA