eathread_atomic_standalone_msvc.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. ///////////////////////////////////////////////////////////////////////////////
  2. // Copyright (c) Electronic Arts Inc. All rights reserved.
  3. ///////////////////////////////////////////////////////////////////////////////
  4. #if defined(EA_PRAGMA_ONCE_SUPPORTED)
  5. #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
  6. #endif
  7. /////////////////////////////////////////////////////////////////////////////
  8. // InterlockedXXX intrinsics
  9. //
  10. #if defined(EA_PLATFORM_MICROSOFT)
  11. EA_DISABLE_ALL_VC_WARNINGS()
  12. #include <xatomic.h>
  13. EA_RESTORE_ALL_VC_WARNINGS()
  14. extern "C" long _InterlockedIncrement(long volatile* Addend);
  15. extern "C" long _InterlockedDecrement(long volatile* Addend);
  16. extern "C" long _InterlockedCompareExchange(long volatile* Dest, long Exchange, long Comp);
  17. extern "C" long _InterlockedExchange(long volatile* Target, long Value);
  18. extern "C" long _InterlockedExchangeAdd(long volatile* Addend, long Value);
  19. extern "C" int64_t _InterlockedCompareExchange64(int64_t volatile* Dest, int64_t Exchange, int64_t Comp);
  20. #pragma intrinsic (_InterlockedCompareExchange)
  21. #define InterlockedCompareExchangeImp _InterlockedCompareExchange
  22. #pragma intrinsic (_InterlockedExchange)
  23. #define InterlockedExchangeImp _InterlockedExchange
  24. #pragma intrinsic (_InterlockedExchangeAdd)
  25. #define InterlockedExchangeAddImp _InterlockedExchangeAdd
  26. #pragma intrinsic (_InterlockedIncrement)
  27. #define InterlockedIncrementImp _InterlockedIncrement
  28. #pragma intrinsic (_InterlockedDecrement)
  29. #define InterlockedDecrementImp _InterlockedDecrement
  30. #pragma intrinsic (_InterlockedCompareExchange64)
  31. #define InterlockedCompareExchange64Imp _InterlockedCompareExchange64
  32. inline bool InterlockedSetIfEqual(volatile int64_t* dest, int64_t newValue, int64_t condition)
  33. {
  34. return (InterlockedCompareExchange64Imp(dest, newValue, condition) == condition);
  35. }
  36. inline bool InterlockedSetIfEqual(volatile uint64_t* dest, uint64_t newValue, uint64_t condition)
  37. {
  38. return (InterlockedCompareExchange64Imp((int64_t volatile*)dest, (int64_t)newValue, (int64_t)condition) == (int64_t)condition);
  39. }
  40. #ifndef InterlockedCompareExchangeImp // If the above intrinsics aren't used...
  41. extern "C" __declspec(dllimport) long __stdcall InterlockedIncrement(long volatile * pAddend);
  42. extern "C" __declspec(dllimport) long __stdcall InterlockedDecrement(long volatile * pAddend);
  43. extern "C" __declspec(dllimport) long __stdcall InterlockedExchange(long volatile * pTarget, long value);
  44. extern "C" __declspec(dllimport) long __stdcall InterlockedExchangeAdd(long volatile * pAddend, long value);
  45. extern "C" __declspec(dllimport) long __stdcall InterlockedCompareExchange(long volatile * pDestination, long value, long compare);
  46. #define InterlockedCompareExchangeImp InterlockedCompareExchange
  47. #define InterlockedExchangeImp InterlockedExchange
  48. #define InterlockedExchangeAddImp InterlockedExchangeAdd
  49. #define InterlockedIncrementImp InterlockedIncrement
  50. #define InterlockedDecrementImp InterlockedDecrement
  51. #endif
  52. #if defined(EA_PROCESSOR_X86)
  53. #define _InterlockedExchange64 _InterlockedExchange64_INLINE
  54. #define _InterlockedExchangeAdd64 _InterlockedExchangeAdd64_INLINE
  55. #define _InterlockedAnd64 _InterlockedAnd64_INLINE
  56. #define _InterlockedOr64 _InterlockedOr64_INLINE
  57. #define _InterlockedXor64 _InterlockedXor64_INLINE
  58. #endif
  59. #endif // EA_PLATFORM_MICROSOFT
  60. namespace EA
  61. {
  62. namespace Thread
  63. {
  64. namespace detail
  65. {
  66. template<class T>
  67. inline T AtomicGetValue(volatile T* ptr);
  68. } // namespace detail
  69. // int
  70. inline int AtomicGetValue(volatile int* ptr) { return detail::AtomicGetValue(ptr); }
  71. inline int AtomicGetValue(const volatile int* ptr) { return AtomicGetValue(const_cast<volatile int*>(ptr)); }
  72. inline int AtomicSetValue(volatile int* ptr, int value) { return _InterlockedExchange((long*)ptr, (long)value); }
  73. inline int AtomicFetchIncrement(volatile int* ptr) { return static_cast<int>(_InterlockedIncrement((long*)ptr)) - 1; }
  74. inline int AtomicFetchDecrement(volatile int* ptr) { return static_cast<int>(_InterlockedDecrement((long*)ptr)) + 1; }
  75. inline int AtomicFetchAdd(volatile int* ptr, int value) { return static_cast<int>(_InterlockedExchangeAdd((long*)ptr, (long)value)); }
  76. inline int AtomicFetchSub(volatile int* ptr, int value) { return static_cast<int>(_InterlockedExchangeAdd((long*)ptr, -(long)value)); }
  77. inline int AtomicFetchOr(volatile int* ptr, int value) { return static_cast<int>(_InterlockedOr((long*)ptr, (long)value)); }
  78. inline int AtomicFetchAnd(volatile int* ptr, int value) { return static_cast<int>(_InterlockedAnd((long*)ptr, (long)value)); }
  79. inline int AtomicFetchXor(volatile int* ptr, int value) { return static_cast<int>(_InterlockedXor((long*)ptr, (long)value)); }
  80. inline int AtomicFetchSwap(volatile int* ptr, int value) { return static_cast<int>(_InterlockedExchange((long*)ptr, (long)value)); }
  81. inline int AtomicFetchSwapConditional(volatile int* ptr, int value, int condition) { return _InterlockedCompareExchange((long*)ptr, (long)value, (long)condition); }
  82. inline bool AtomicSetValueConditional(volatile int* ptr, int value, int condition) { return _InterlockedCompareExchange((long*)ptr, (long)value, (long)condition) == (long)condition; }
  83. // unsigned int
  84. inline unsigned int AtomicGetValue(volatile unsigned int* ptr) { return detail::AtomicGetValue(ptr); }
  85. inline unsigned int AtomicGetValue(const volatile unsigned int* ptr) { return AtomicGetValue(const_cast<volatile unsigned int*>(ptr)); }
  86. inline unsigned int AtomicSetValue(volatile unsigned int* ptr, unsigned int value) { return static_cast<unsigned int>(_InterlockedExchange((long*)ptr, (long)value)); }
  87. inline unsigned int AtomicFetchIncrement(volatile unsigned int* ptr) { return static_cast<unsigned int>(_InterlockedExchangeAdd((long*)ptr, (long)1)); }
  88. inline unsigned int AtomicFetchDecrement(volatile unsigned int* ptr) { return static_cast<unsigned int>(_InterlockedExchangeAdd((long*)ptr, (long)-1)); }
  89. inline unsigned int AtomicFetchAdd(volatile unsigned int* ptr, unsigned int value) { return static_cast<unsigned int>(_InterlockedExchangeAdd((long*)ptr, (long)value)); }
  90. inline unsigned int AtomicFetchSub(volatile unsigned int* ptr, unsigned int value) { return static_cast<unsigned int>(_InterlockedExchangeAdd((long*)ptr, -(long)value)); }
  91. inline unsigned int AtomicFetchOr(volatile unsigned int* ptr, unsigned int value) { return static_cast<unsigned int>(_InterlockedOr((long*)ptr, (long)value)); }
  92. inline unsigned int AtomicFetchAnd(volatile unsigned int* ptr, unsigned int value) { return static_cast<unsigned int>(_InterlockedAnd((long*)ptr, (long)value)); }
  93. inline unsigned int AtomicFetchXor(volatile unsigned int* ptr, unsigned int value) { return static_cast<unsigned int>(_InterlockedXor((long*)ptr, (long)value)); }
  94. inline unsigned int AtomicFetchSwap(volatile unsigned int* ptr, unsigned int value) { return static_cast<unsigned int>(_InterlockedExchange((long*)ptr, (long)value)); }
  95. inline unsigned int AtomicFetchSwapConditional(volatile unsigned int* ptr, unsigned int value, unsigned int condition) { return (unsigned int)_InterlockedCompareExchange((long*)ptr, (long)value, (long)condition); }
  96. inline bool AtomicSetValueConditional(volatile unsigned int* ptr, unsigned int value, unsigned int condition) { return _InterlockedCompareExchange((long*)ptr, (long)value, (long)condition) == (long)condition; }
  97. // short
  98. inline short AtomicGetValue(volatile short* ptr) { return detail::AtomicGetValue(ptr); }
  99. inline short AtomicGetValue(const volatile short* ptr) { return AtomicGetValue(const_cast<volatile short*>(ptr)); }
  100. inline short AtomicSetValue(volatile short* ptr, short value) { return static_cast<short>(_InterlockedExchange16((short*)ptr, (short)value)); }
  101. inline short AtomicFetchIncrement(volatile short* ptr) { return static_cast<short>(_InterlockedExchangeAdd16((short*)ptr, (short)1)); }
  102. inline short AtomicFetchDecrement(volatile short* ptr) { return static_cast<short>(_InterlockedExchangeAdd16((short*)ptr, (short)-1)); }
  103. inline short AtomicFetchAdd(volatile short* ptr, short value) { return static_cast<short>(_InterlockedExchangeAdd16((short*)ptr, (short)value)); }
  104. inline short AtomicFetchSub(volatile short* ptr, short value) { return static_cast<short>(_InterlockedExchangeAdd16((short*)ptr, -value)); }
  105. inline short AtomicFetchOr(volatile short* ptr, short value) { return static_cast<short>(_InterlockedOr16((short*)ptr, (short)value)); }
  106. inline short AtomicFetchAnd(volatile short* ptr, short value) { return static_cast<short>(_InterlockedAnd16((short*)ptr, (short)value)); }
  107. inline short AtomicFetchXor(volatile short* ptr, short value) { return static_cast<short>(_InterlockedXor16((short*)ptr, (short)value)); }
  108. inline short AtomicFetchSwap(volatile short* ptr, short value) { return static_cast<short>(_InterlockedExchange16((short*)ptr, (short)value)); }
  109. inline short AtomicFetchSwapConditional(volatile short* ptr, short value, short condition) { return _InterlockedCompareExchange16(ptr, value, condition); }
  110. inline bool AtomicSetValueConditional(volatile short* ptr, short value, short condition) { return _InterlockedCompareExchange16(ptr, value, condition) == condition; }
  111. // unsigned short
  112. inline unsigned short AtomicGetValue(volatile unsigned short* ptr) { return detail::AtomicGetValue(ptr); }
  113. inline unsigned short AtomicGetValue(const volatile unsigned short* ptr) { return AtomicGetValue(const_cast<volatile unsigned short*>(ptr)); }
  114. inline unsigned short AtomicSetValue(volatile unsigned short* ptr, unsigned short value) { return static_cast<unsigned short>(_InterlockedExchange16((short*)ptr, (short)value)); }
  115. inline unsigned short AtomicFetchIncrement(volatile unsigned short* ptr) { return static_cast<unsigned short>(_InterlockedExchangeAdd16((short*)ptr, (short)1)); }
  116. inline unsigned short AtomicFetchDecrement(volatile unsigned short* ptr) { return static_cast<unsigned short>(_InterlockedExchangeAdd16((short*)ptr, (short)-1)); }
  117. inline unsigned short AtomicFetchAdd(volatile unsigned short* ptr, unsigned short value) { return static_cast<unsigned short>(_InterlockedExchangeAdd16((short*)ptr, (short)value)); }
  118. inline unsigned short AtomicFetchSub(volatile unsigned short* ptr, unsigned short value) { return static_cast<unsigned short>(_InterlockedExchangeAdd16((short*)ptr, -(short)value)); }
  119. inline unsigned short AtomicFetchOr(volatile unsigned short* ptr, unsigned short value) { return static_cast<unsigned short>(_InterlockedOr16((short*)ptr, (short)value)); }
  120. inline unsigned short AtomicFetchAnd(volatile unsigned short* ptr, unsigned short value) { return static_cast<unsigned short>(_InterlockedAnd16((short*)ptr, (short)value)); }
  121. inline unsigned short AtomicFetchXor(volatile unsigned short* ptr, unsigned short value) { return static_cast<unsigned short>(_InterlockedXor16((short*)ptr, (short)value)); }
  122. inline unsigned short AtomicFetchSwap(volatile unsigned short* ptr, unsigned short value) { return static_cast<unsigned short>(_InterlockedExchange16((short*)ptr, (short)value)); }
  123. inline unsigned short AtomicFetchSwapConditional(volatile unsigned short* ptr, unsigned short value, unsigned short condition) { return (unsigned short)_InterlockedCompareExchange16((short*)ptr, (short)value, (short)condition); }
  124. inline bool AtomicSetValueConditional(volatile unsigned short* ptr, unsigned short value, unsigned short condition) { return _InterlockedCompareExchange16((short*)ptr, (short)value, (short)condition) == (short)condition; }
  125. // long
  126. inline long AtomicGetValue(volatile long* ptr) { return detail::AtomicGetValue(ptr); }
  127. inline long AtomicGetValue(const volatile long* ptr) { return AtomicGetValue(const_cast<volatile long*>(ptr)); }
  128. inline long AtomicSetValue(volatile long* ptr, long value) { return _InterlockedExchange(ptr, value); }
  129. inline long AtomicFetchIncrement(volatile long* ptr) { return _InterlockedIncrement(ptr) - 1; }
  130. inline long AtomicFetchDecrement(volatile long* ptr) { return _InterlockedDecrement(ptr) + 1; }
  131. inline long AtomicFetchAdd(volatile long* ptr, long value) { return _InterlockedExchangeAdd(ptr, value); }
  132. inline long AtomicFetchSub(volatile long* ptr, long value) { return _InterlockedExchangeAdd(ptr, -value); }
  133. inline long AtomicFetchOr(volatile long* ptr, long value) { return _InterlockedOr(ptr, value); }
  134. inline long AtomicFetchAnd(volatile long* ptr, long value) { return _InterlockedAnd(ptr, value); }
  135. inline long AtomicFetchXor(volatile long* ptr, long value) { return _InterlockedXor(ptr, value); }
  136. inline long AtomicFetchSwap(volatile long* ptr, long value) { return _InterlockedExchange(ptr, value); }
  137. inline long AtomicFetchSwapConditional(volatile long* ptr, long value, long condition) { return _InterlockedCompareExchange(ptr, value, condition); }
  138. inline bool AtomicSetValueConditional(volatile long* ptr, long value, long condition) { return _InterlockedCompareExchange(ptr, value, condition) == condition; }
  139. // unsigned long
  140. inline unsigned long AtomicGetValue(volatile unsigned long* ptr) { return detail::AtomicGetValue(ptr); }
  141. inline unsigned long AtomicGetValue(const volatile unsigned long* ptr) { return AtomicGetValue(const_cast<volatile unsigned long*>(ptr)); }
  142. inline unsigned long AtomicSetValue(volatile unsigned long* ptr, unsigned long value) { return static_cast<unsigned long>(_InterlockedExchange((long*)ptr, (long)value)); }
  143. inline unsigned long AtomicFetchIncrement(volatile unsigned long* ptr) { return static_cast<unsigned long>(_InterlockedIncrement((long*)ptr)) - 1; }
  144. inline unsigned long AtomicFetchDecrement(volatile unsigned long* ptr) { return static_cast<unsigned long>(_InterlockedDecrement((long*)ptr)) + 1; }
  145. inline unsigned long AtomicFetchAdd(volatile unsigned long* ptr, unsigned long value) { return static_cast<unsigned long>(_InterlockedExchangeAdd((long*)ptr, (long)value)); }
  146. inline unsigned long AtomicFetchSub(volatile unsigned long* ptr, unsigned long value) { return static_cast<unsigned long>(_InterlockedExchangeAdd((long*)ptr, -(long)value)); }
  147. inline unsigned long AtomicFetchOr(volatile unsigned long* ptr, unsigned long value) { return static_cast<unsigned long>(_InterlockedOr((long*)ptr, (long)value)); }
  148. inline unsigned long AtomicFetchAnd(volatile unsigned long* ptr, unsigned long value) { return static_cast<unsigned long>(_InterlockedAnd((long*)ptr, (long)value)); }
  149. inline unsigned long AtomicFetchXor(volatile unsigned long* ptr, unsigned long value) { return static_cast<unsigned long>(_InterlockedXor((long*)ptr, (long)value)); }
  150. inline unsigned long AtomicFetchSwap(volatile unsigned long* ptr, unsigned long value) { return static_cast<unsigned long>(_InterlockedExchange((long*)ptr, (long)value)); }
  151. inline unsigned long AtomicFetchSwapConditional(volatile unsigned long* ptr, unsigned long value, unsigned long condition) { return static_cast<unsigned long>(_InterlockedCompareExchange((long*)ptr, (long)value, (long)condition)); }
  152. inline bool AtomicSetValueConditional(volatile unsigned long* ptr, unsigned long value, unsigned long condition) { return static_cast<unsigned long>(_InterlockedCompareExchange((long*)ptr, (long)value, (long)condition)) == condition; }
  153. // char32_t
  154. #if EA_CHAR32_NATIVE
  155. inline char32_t AtomicGetValue(volatile char32_t* ptr) { return detail::AtomicGetValue(ptr); }
  156. inline char32_t AtomicGetValue(const volatile char32_t* ptr) { return AtomicGetValue(const_cast<volatile char32_t*>(ptr)); }
  157. inline char32_t AtomicSetValue(volatile char32_t* ptr, char32_t value) { return static_cast<char32_t>(_InterlockedExchange((long*)ptr, (long)value)); }
  158. inline char32_t AtomicFetchIncrement(volatile char32_t* ptr) { return static_cast<char32_t>(_InterlockedExchangeAdd((long*)ptr, (long)1)); }
  159. inline char32_t AtomicFetchDecrement(volatile char32_t* ptr) { return static_cast<char32_t>(_InterlockedExchangeAdd((long*)ptr, (long)-1)); }
  160. inline char32_t AtomicFetchAdd(volatile char32_t* ptr, char32_t value) { return static_cast<char32_t>(_InterlockedExchangeAdd((long*)ptr, (long)value)); }
  161. inline char32_t AtomicFetchSub(volatile char32_t* ptr, char32_t value) { return static_cast<char32_t>(_InterlockedExchangeAdd((long*)ptr, -(long)value)); }
  162. inline char32_t AtomicFetchOr(volatile char32_t* ptr, char32_t value) { return static_cast<char32_t>(_InterlockedOr((long*)ptr, (long)value)); }
  163. inline char32_t AtomicFetchAnd(volatile char32_t* ptr, char32_t value) { return static_cast<char32_t>(_InterlockedAnd((long*)ptr, (long)value)); }
  164. inline char32_t AtomicFetchXor(volatile char32_t* ptr, char32_t value) { return static_cast<char32_t>(_InterlockedXor((long*)ptr, (long)value)); }
  165. inline char32_t AtomicFetchSwap(volatile char32_t* ptr, char32_t value) { return static_cast<char32_t>(_InterlockedExchange((long*)ptr, (long)value)); }
  166. inline char32_t AtomicFetchSwapConditional(volatile char32_t* ptr, char32_t value, unsigned int condition) { return static_cast<char32_t>(_InterlockedCompareExchange((long*)ptr, (long)value, (long)condition)); }
  167. inline bool AtomicSetValueConditional(volatile char32_t* ptr, char32_t value, unsigned int condition) { return _InterlockedCompareExchange((long*)ptr, (long)value, (long)condition) == (long)condition; }
  168. #endif
  169. // long long
  170. inline long long AtomicGetValue(volatile long long* ptr) { return detail::AtomicGetValue(ptr); }
  171. inline long long AtomicGetValue(const volatile long long* ptr) { return AtomicGetValue(const_cast<volatile long long*>(ptr)); }
  172. inline long long AtomicSetValue(volatile long long* ptr, long long value) { return static_cast<long long>(_InterlockedExchange64(ptr, value)); }
  173. inline long long AtomicFetchIncrement(volatile long long* ptr) { return static_cast<long long>(_InterlockedExchangeAdd64(ptr, (long long)1)); }
  174. inline long long AtomicFetchDecrement(volatile long long* ptr) { return static_cast<long long>(_InterlockedExchangeAdd64(ptr, (long long)-1)); }
  175. inline long long AtomicFetchAdd(volatile long long* ptr, long long value) { return static_cast<long long>(_InterlockedExchangeAdd64(ptr, value)); }
  176. inline long long AtomicFetchSub(volatile long long* ptr, long long value) { return static_cast<long long>(_InterlockedExchangeAdd64(ptr, -(long long)value)); }
  177. inline long long AtomicFetchOr(volatile long long* ptr, long long value) { return static_cast<long long>(_InterlockedOr64(ptr, value)); }
  178. inline long long AtomicFetchAnd(volatile long long* ptr, long long value) { return static_cast<long long>(_InterlockedAnd64(ptr, value)); }
  179. inline long long AtomicFetchXor(volatile long long* ptr, long long value) { return static_cast<long long>(_InterlockedXor64(ptr, value)); }
  180. inline long long AtomicFetchSwap(volatile long long* ptr, long long value) { return static_cast<long long>(_InterlockedExchange64(ptr, value)); }
  181. inline long long AtomicFetchSwapConditional(volatile long long* ptr, long long value, long long condition) { return _InterlockedCompareExchange64(ptr, value, condition); }
  182. inline bool AtomicSetValueConditional(volatile long long* ptr, long long value, long long condition) { return _InterlockedCompareExchange64(ptr, value, condition) == condition; }
  183. // unsigned long long
  184. inline unsigned long long AtomicGetValue(volatile unsigned long long* ptr) { return detail::AtomicGetValue(ptr); }
  185. inline unsigned long long AtomicGetValue(const volatile unsigned long long* ptr) { return AtomicGetValue(const_cast<volatile unsigned long long*>(ptr)); }
  186. inline unsigned long long AtomicSetValue(volatile unsigned long long* ptr, unsigned long long value) { return static_cast<unsigned long long>(_InterlockedExchange64(reinterpret_cast<volatile long long*>(ptr), (long long)value)); }
  187. inline unsigned long long AtomicFetchIncrement(volatile unsigned long long* ptr) { return static_cast<unsigned long long>(_InterlockedExchangeAdd64(reinterpret_cast<volatile long long*>(ptr), (long long)1)); }
  188. inline unsigned long long AtomicFetchDecrement(volatile unsigned long long* ptr) { return static_cast<unsigned long long>(_InterlockedExchangeAdd64(reinterpret_cast<volatile long long*>(ptr), (long long)-1)); }
  189. inline unsigned long long AtomicFetchAdd(volatile unsigned long long* ptr, unsigned long long value) { return static_cast<unsigned long long>(_InterlockedExchangeAdd64(reinterpret_cast<volatile long long*>(ptr), (long long)value)); }
  190. inline unsigned long long AtomicFetchSub(volatile unsigned long long* ptr, unsigned long long value) { return static_cast<unsigned long long>(_InterlockedExchangeAdd64(reinterpret_cast<volatile long long*>(ptr), -(long long)value)); }
  191. inline unsigned long long AtomicFetchOr(volatile unsigned long long* ptr, unsigned long long value) { return static_cast<unsigned long long>(_InterlockedOr64(reinterpret_cast<volatile long long*>(ptr), (long long)value)); }
  192. inline unsigned long long AtomicFetchAnd(volatile unsigned long long* ptr, unsigned long long value) { return static_cast<unsigned long long>(_InterlockedAnd64(reinterpret_cast<volatile long long*>(ptr),(long long) value)); }
  193. inline unsigned long long AtomicFetchXor(volatile unsigned long long* ptr, unsigned long long value) { return static_cast<unsigned long long>(_InterlockedXor64(reinterpret_cast<volatile long long*>(ptr),(long long) value)); }
  194. inline unsigned long long AtomicFetchSwap(volatile unsigned long long* ptr, unsigned long long value) { return static_cast<unsigned long long>(_InterlockedExchange64(reinterpret_cast<volatile long long*>(ptr),(long long) value)); }
  195. inline unsigned long long AtomicFetchSwapConditional(volatile unsigned long long* ptr, unsigned long long value, unsigned long long condition) { return static_cast<unsigned long long>(_InterlockedCompareExchange64(reinterpret_cast<volatile long long*>(ptr), (long long)value, (long long)condition)); }
  196. inline bool AtomicSetValueConditional(volatile unsigned long long* ptr, unsigned long long value, unsigned long long condition) { return static_cast<unsigned long long>(_InterlockedCompareExchange64(reinterpret_cast<volatile long long*>(ptr), (long long)value, (long long)condition)) == condition; }
  197. namespace detail
  198. {
  199. template<class T>
  200. inline T AtomicGetValue(volatile T* ptr)
  201. {
  202. #if EA_PLATFORM_WORD_SIZE >= 8 && defined(EA_PROCESSOR_X86_64)
  203. EATHREAD_ALIGNMENT_CHECK(ptr);
  204. EACompilerMemoryBarrier();
  205. T value = *ptr;
  206. EACompilerMemoryBarrier();
  207. return value;
  208. #else
  209. return AtomicFetchAdd(ptr, T(0));
  210. #endif
  211. }
  212. } // namespace detail
  213. } // namespace Thread
  214. } // namespace EA